From 9f0610fb83ae064e2e2c854fb2e9c9dc4cbc1646 Mon Sep 17 00:00:00 2001 From: Sandeep Nemuri Date: Thu, 24 Oct 2019 20:13:44 +0530 Subject: [PATCH] HADOOP-16654:Delete hadoop-ozone and hadoop-hdds subprojects from apache trunk Signed-off-by: Dinesh Chitlangia --- BUILDING.txt | 2 - .../main/resources/assemblies/hadoop-src.xml | 2 - hadoop-hdds/client/pom.xml | 44 - .../hdds/scm/ClientCredentialInterceptor.java | 65 - .../hadoop/hdds/scm/XceiverClientGrpc.java | 466 - .../hadoop/hdds/scm/XceiverClientManager.java | 390 - .../hadoop/hdds/scm/XceiverClientMetrics.java | 113 - .../hadoop/hdds/scm/XceiverClientRatis.java | 367 - .../scm/client/ContainerOperationClient.java | 495 - .../hdds/scm/client/HddsClientUtils.java | 350 - .../hadoop/hdds/scm/client/package-info.java | 23 - .../apache/hadoop/hdds/scm/package-info.java | 23 - .../hdds/scm/storage/BlockInputStream.java | 388 - .../hdds/scm/storage/BlockOutputStream.java | 640 - .../hadoop/hdds/scm/storage/BufferPool.java | 129 - .../hdds/scm/storage/ChunkInputStream.java | 544 - .../hdds/scm/storage/CommitWatcher.java | 240 - .../hadoop/hdds/scm/storage/package-info.java | 23 - .../scm/storage/TestBlockInputStream.java | 234 - .../scm/storage/TestChunkInputStream.java | 222 - .../hadoop/hdds/scm/storage/package-info.java | 21 - .../dev-support/findbugsExcludeFile.xml | 33 - hadoop-hdds/common/pom.xml | 285 - .../common/src/main/bin/hadoop-config.cmd | 317 - .../common/src/main/bin/hadoop-config.sh | 165 - .../common/src/main/bin/hadoop-daemons.sh | 77 - .../common/src/main/bin/hadoop-functions.sh | 2732 --- hadoop-hdds/common/src/main/bin/workers.sh | 59 - .../common/src/main/conf/core-site.xml | 20 - .../common/src/main/conf/hadoop-env.cmd | 90 - .../common/src/main/conf/hadoop-env.sh | 439 - .../src/main/conf/hadoop-metrics2.properties | 99 - .../common/src/main/conf/hadoop-policy.xml | 275 - .../apache/hadoop/hdds/HddsConfigKeys.java | 252 - .../org/apache/hadoop/hdds/HddsIdFactory.java | 53 - .../org/apache/hadoop/hdds/HddsUtils.java | 505 - .../apache/hadoop/hdds/cli/GenericCli.java | 107 - .../hadoop/hdds/cli/GenericParentCommand.java | 29 - .../hadoop/hdds/cli/HddsVersionProvider.java | 35 - .../hdds/cli/MissingSubcommandException.java | 31 - .../apache/hadoop/hdds/cli/package-info.java | 22 - .../apache/hadoop/hdds/client/BlockID.java | 127 - .../hadoop/hdds/client/ContainerBlockID.java | 79 - .../apache/hadoop/hdds/client/OzoneQuota.java | 203 - .../hadoop/hdds/client/ReplicationFactor.java | 81 - .../hadoop/hdds/client/ReplicationType.java | 48 - .../hadoop/hdds/client/package-info.java | 23 - .../hadoop/hdds/conf/HddsConfServlet.java | 190 - .../hadoop/hdds/conf/OzoneConfiguration.java | 328 - .../apache/hadoop/hdds/conf/package-info.java | 18 - .../FunctionWithServiceException.java | 36 - .../hadoop/hdds/function/package-info.java | 22 - .../org/apache/hadoop/hdds/package-info.java | 23 - .../hadoop/hdds/protocol/DatanodeDetails.java | 493 - .../hdds/protocol/SCMSecurityProtocol.java | 80 - .../hadoop/hdds/protocol/package-info.java | 22 - ...ecurityProtocolClientSideTranslatorPB.java | 213 - .../protocolPB/SCMSecurityProtocolPB.java | 35 - .../hadoop/hdds/protocolPB/package-info.java | 22 - .../ratis/ContainerCommandRequestMessage.java | 107 - .../apache/hadoop/hdds/ratis/RatisHelper.java | 290 - .../hadoop/hdds/ratis/package-info.java | 22 - .../hadoop/hdds/scm/ByteStringConversion.java | 62 - .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 375 - .../org/apache/hadoop/hdds/scm/ScmInfo.java | 81 - .../hadoop/hdds/scm/XceiverClientReply.java | 82 - .../hadoop/hdds/scm/XceiverClientSpi.java | 179 - .../hadoop/hdds/scm/client/ScmClient.java | 241 - .../hadoop/hdds/scm/client/package-info.java | 24 - .../scm/container/ContainerException.java | 46 - .../hdds/scm/container/ContainerID.java | 107 - .../hdds/scm/container/ContainerInfo.java | 471 - .../container/ContainerNotFoundException.java | 44 - .../ContainerReplicaNotFoundException.java | 45 - .../common/helpers/AllocatedBlock.java | 66 - .../helpers/BlockNotCommittedException.java | 36 - .../helpers/ContainerNotOpenException.java | 36 - .../common/helpers/ContainerWithPipeline.java | 137 - .../common/helpers/DeleteBlockResult.java | 53 - .../container/common/helpers/ExcludeList.java | 111 - .../InvalidContainerStateException.java | 35 - .../helpers/StorageContainerException.java | 104 - .../common/helpers/package-info.java | 22 - .../hdds/scm/container/package-info.java | 18 - .../algorithms/ContainerPlacementPolicy.java | 45 - .../placement/algorithms/package-info.java | 21 - .../hdds/scm/exceptions/SCMException.java | 127 - .../hdds/scm/exceptions/package-info.java | 21 - .../apache/hadoop/hdds/scm/net/InnerNode.java | 85 - .../hadoop/hdds/scm/net/InnerNodeImpl.java | 509 - .../hadoop/hdds/scm/net/NetConstants.java | 67 - .../apache/hadoop/hdds/scm/net/NetUtils.java | 161 - .../hadoop/hdds/scm/net/NetworkTopology.java | 229 - .../hdds/scm/net/NetworkTopologyImpl.java | 798 - .../org/apache/hadoop/hdds/scm/net/Node.java | 101 - .../apache/hadoop/hdds/scm/net/NodeImpl.java | 222 - .../hadoop/hdds/scm/net/NodeSchema.java | 183 - .../hadoop/hdds/scm/net/NodeSchemaLoader.java | 489 - .../hdds/scm/net/NodeSchemaManager.java | 135 - .../hadoop/hdds/scm/net/package-info.java | 21 - .../apache/hadoop/hdds/scm/package-info.java | 24 - .../hadoop/hdds/scm/pipeline/Pipeline.java | 390 - .../hadoop/hdds/scm/pipeline/PipelineID.java | 80 - .../pipeline/PipelineNotFoundException.java | 46 - .../UnknownPipelineStateException.java | 46 - .../hdds/scm/pipeline/package-info.java | 24 - .../hdds/scm/protocol/LocatedContainer.java | 127 - .../protocol/ScmBlockLocationProtocol.java | 86 - .../hdds/scm/protocol/ScmLocatedBlock.java | 100 - .../StorageContainerLocationProtocol.java | 214 - .../hdds/scm/protocol/package-info.java | 19 - ...ocationProtocolClientSideTranslatorPB.java | 273 - .../ScmBlockLocationProtocolPB.java | 39 - ...ocationProtocolClientSideTranslatorPB.java | 475 - .../StorageContainerLocationProtocolPB.java | 40 - .../hdds/scm/protocolPB/package-info.java | 24 - .../hdds/scm/storage/CheckedBiFunction.java | 31 - .../scm/storage/ContainerProtocolCalls.java | 573 - .../hadoop/hdds/scm/storage/package-info.java | 23 - .../exception/SCMSecurityException.java | 79 - .../hdds/security/exception/package-info.java | 23 - .../security/token/BlockTokenException.java | 53 - .../security/token/BlockTokenVerifier.java | 130 - .../token/OzoneBlockTokenIdentifier.java | 212 - .../token/OzoneBlockTokenSelector.java | 79 - .../hdds/security/token/TokenVerifier.java | 38 - .../hdds/security/token/package-info.java | 22 - .../hdds/security/x509/SecurityConfig.java | 371 - .../certificate/authority/BaseApprover.java | 249 - .../authority/CertificateApprover.java | 91 - .../authority/CertificateServer.java | 123 - .../authority/CertificateStore.java | 80 - .../authority/DefaultApprover.java | 157 - .../authority/DefaultCAServer.java | 491 - .../PKIProfiles/DefaultCAProfile.java | 46 - .../authority/PKIProfiles/DefaultProfile.java | 336 - .../authority/PKIProfiles/PKIProfile.java | 140 - .../authority/PKIProfiles/package-info.java | 33 - .../certificate/authority/package-info.java | 22 - .../certificate/client/CertificateClient.java | 203 - .../client/DNCertificateClient.java | 65 - .../client/DefaultCertificateClient.java | 828 - .../client/OMCertificateClient.java | 125 - .../x509/certificate/client/package-info.java | 22 - .../certificate/utils/CertificateCodec.java | 299 - .../x509/certificate/utils/package-info.java | 22 - .../utils/CertificateSignRequest.java | 289 - .../utils/SelfSignedCertificate.java | 238 - .../x509/certificates/utils/package-info.java | 22 - .../x509/exceptions/CertificateException.java | 89 - .../x509/exceptions/package-info.java | 23 - .../security/x509/keys/HDDSKeyGenerator.java | 120 - .../hdds/security/x509/keys/KeyCodec.java | 398 - .../hdds/security/x509/keys/SecurityUtil.java | 138 - .../hdds/security/x509/keys/package-info.java | 23 - .../hdds/security/x509/package-info.java | 99 - .../hdds/tracing/GrpcClientInterceptor.java | 57 - .../hdds/tracing/GrpcServerInterceptor.java | 51 - .../hadoop/hdds/tracing/StringCodec.java | 98 - .../hadoop/hdds/tracing/TraceAllMethod.java | 86 - .../hadoop/hdds/tracing/TracingUtil.java | 140 - .../hadoop/hdds/tracing/package-info.java | 23 - .../hadoop/hdds/utils/BackgroundService.java | 164 - .../hadoop/hdds/utils/BackgroundTask.java | 28 - .../hdds/utils/BackgroundTaskQueue.java | 64 - .../hdds/utils/BackgroundTaskResult.java | 44 - .../hadoop/hdds/utils/BatchOperation.java | 90 - .../hadoop/hdds/utils/EntryConsumer.java | 38 - .../hadoop/hdds/utils/HddsVersionInfo.java | 58 - .../hadoop/hdds/utils/LevelDBStore.java | 399 - .../hdds/utils/LevelDBStoreIterator.java | 63 - .../hadoop/hdds/utils/MetaStoreIterator.java | 39 - .../hadoop/hdds/utils/MetadataKeyFilters.java | 206 - .../hadoop/hdds/utils/MetadataStore.java | 233 - .../hdds/utils/MetadataStoreBuilder.java | 146 - .../hadoop/hdds/utils/RetriableTask.java | 78 - .../hadoop/hdds/utils/RocksDBStore.java | 405 - .../hdds/utils/RocksDBStoreIterator.java | 66 - .../hadoop/hdds/utils/RocksDBStoreMBean.java | 219 - .../apache/hadoop/hdds/utils/Scheduler.java | 104 - .../apache/hadoop/hdds/utils/UniqueId.java | 69 - .../apache/hadoop/hdds/utils/VersionInfo.java | 97 - .../hadoop/hdds/utils/db/BatchOperation.java | 27 - .../hdds/utils/db/ByteArrayKeyValue.java | 67 - .../apache/hadoop/hdds/utils/db/Codec.java | 43 - .../hadoop/hdds/utils/db/CodecRegistry.java | 113 - .../hadoop/hdds/utils/db/DBCheckpoint.java | 74 - .../hdds/utils/db/DBConfigFromFile.java | 146 - .../hadoop/hdds/utils/db/DBProfile.java | 120 - .../apache/hadoop/hdds/utils/db/DBStore.java | 196 - .../hadoop/hdds/utils/db/DBStoreBuilder.java | 243 - .../hdds/utils/db/DBUpdatesWrapper.java | 52 - .../hadoop/hdds/utils/db/IntegerCodec.java | 38 - .../hadoop/hdds/utils/db/LongCodec.java | 46 - .../hdds/utils/db/RDBBatchOperation.java | 73 - .../hdds/utils/db/RDBCheckpointManager.java | 100 - .../apache/hadoop/hdds/utils/db/RDBStore.java | 381 - .../hdds/utils/db/RDBStoreIterator.java | 107 - .../apache/hadoop/hdds/utils/db/RDBTable.java | 196 - .../hdds/utils/db/RocksDBCheckpoint.java | 93 - .../hdds/utils/db/RocksDBConfiguration.java | 62 - .../db/SequenceNumberNotFoundException.java | 37 - .../hadoop/hdds/utils/db/StringCodec.java | 46 - .../apache/hadoop/hdds/utils/db/Table.java | 168 - .../hadoop/hdds/utils/db/TableConfig.java | 93 - .../hadoop/hdds/utils/db/TableIterator.java | 63 - .../hadoop/hdds/utils/db/TypedTable.java | 361 - .../hadoop/hdds/utils/db/cache/CacheKey.java | 65 - .../hdds/utils/db/cache/CacheResult.java | 76 - .../hdds/utils/db/cache/CacheValue.java | 47 - .../hdds/utils/db/cache/EpochEntry.java | 74 - .../hdds/utils/db/cache/TableCache.java | 106 - .../hdds/utils/db/cache/TableCacheImpl.java | 169 - .../hdds/utils/db/cache/package-info.java | 18 - .../hadoop/hdds/utils/db/package-info.java | 22 - .../hadoop/hdds/utils/package-info.java | 18 - .../apache/hadoop/ozone/OzoneConfigKeys.java | 464 - .../org/apache/hadoop/ozone/OzoneConsts.java | 327 - .../hadoop/ozone/OzoneSecurityUtil.java | 118 - .../hadoop/ozone/audit/AuditAction.java | 30 - .../hadoop/ozone/audit/AuditEventStatus.java | 36 - .../hadoop/ozone/audit/AuditLogger.java | 86 - .../hadoop/ozone/audit/AuditLoggerType.java | 37 - .../hadoop/ozone/audit/AuditMarker.java | 38 - .../hadoop/ozone/audit/AuditMessage.java | 131 - .../apache/hadoop/ozone/audit/Auditable.java | 32 - .../apache/hadoop/ozone/audit/Auditor.java | 33 - .../apache/hadoop/ozone/audit/DNAction.java | 48 - .../apache/hadoop/ozone/audit/SCMAction.java | 51 - .../hadoop/ozone/audit/package-info.java | 138 - .../hadoop/ozone/common/BlockGroup.java | 106 - .../apache/hadoop/ozone/common/Checksum.java | 286 - .../ozone/common/ChecksumByteBuffer.java | 122 - .../hadoop/ozone/common/ChecksumData.java | 206 - .../ozone/common/DeleteBlockGroupResult.java | 97 - .../InconsistentStorageStateException.java | 51 - .../ozone/common/OzoneChecksumException.java | 66 - .../ozone/common/PureJavaCrc32ByteBuffer.java | 556 - .../common/PureJavaCrc32CByteBuffer.java | 559 - .../apache/hadoop/ozone/common/Storage.java | 261 - .../hadoop/ozone/common/StorageInfo.java | 182 - .../hadoop/ozone/common/package-info.java | 18 - .../InvalidStateTransitionException.java | 42 - .../common/statemachine/StateMachine.java | 68 - .../common/statemachine/package-info.java | 21 - .../container/common/helpers/BlockData.java | 273 - .../container/common/helpers/ChunkInfo.java | 185 - .../ContainerCommandRequestPBHelper.java | 196 - .../common/helpers/package-info.java | 23 - .../org/apache/hadoop/ozone/lease/Lease.java | 189 - .../lease/LeaseAlreadyExistException.java | 46 - .../ozone/lease/LeaseCallbackExecutor.java | 65 - .../hadoop/ozone/lease/LeaseException.java | 45 - .../ozone/lease/LeaseExpiredException.java | 45 - .../hadoop/ozone/lease/LeaseManager.java | 251 - .../LeaseManagerNotRunningException.java | 45 - .../ozone/lease/LeaseNotFoundException.java | 46 - .../hadoop/ozone/lease/package-info.java | 26 - .../apache/hadoop/ozone/lock/ActiveLock.java | 141 - .../apache/hadoop/ozone/lock/LockManager.java | 241 - .../hadoop/ozone/lock/PooledLockFactory.java | 48 - .../hadoop/ozone/lock/package-info.java | 21 - .../org/apache/hadoop/ozone/package-info.java | 35 - .../protocolPB/ProtocolMessageMetrics.java | 105 - .../hadoop/ozone/protocolPB/package-info.java | 24 - .../hadoop/ozone/web/utils/JsonUtils.java | 70 - .../hadoop/ozone/web/utils/package-info.java | 19 - .../proto/DatanodeContainerProtocol.proto | 469 - .../common/src/main/proto/FSProtos.proto | 78 - .../src/main/proto/SCMSecurityProtocol.proto | 129 - .../main/proto/ScmBlockLocationProtocol.proto | 212 - .../common/src/main/proto/Security.proto | 73 - .../StorageContainerLocationProtocol.proto | 330 - hadoop-hdds/common/src/main/proto/hdds.proto | 249 - .../javax.annotation.processing.Processor | 16 - .../resources/hdds-version-info.properties | 26 - .../resources/network-topology-default.xml | 68 - .../resources/network-topology-default.yaml | 61 - .../resources/network-topology-nodegroup.xml | 74 - .../src/main/resources/ozone-default.xml | 2504 --- .../org/apache/hadoop/hdds/TestHddsUtils.java | 42 - .../hadoop/hdds/conf/SimpleConfiguration.java | 88 - .../hdds/conf/TestOzoneConfiguration.java | 175 - .../apache/hadoop/hdds/conf/package-info.java | 22 - .../TestContainerCommandRequestMessage.java | 152 - .../TestSCMExceptionResultCodes.java | 52 - .../hdds/scm/net/TestNetworkTopologyImpl.java | 953 - .../hdds/scm/net/TestNodeSchemaLoader.java | 103 - .../hdds/scm/net/TestNodeSchemaManager.java | 101 - .../hdds/scm/net/TestYamlSchemaLoader.java | 90 - .../apache/hadoop/hdds/scm/package-info.java | 21 - .../token/TestOzoneBlockTokenIdentifier.java | 313 - .../hdds/security/token/package-info.java | 22 - .../certificate/authority/MockApprover.java | 58 - .../certificate/authority/MockCAStore.java | 54 - .../authority/TestDefaultCAServer.java | 251 - .../authority/TestDefaultProfile.java | 364 - .../certificate/authority/package-info.java | 22 - .../client/TestCertificateClientInit.java | 224 - .../client/TestDefaultCertificateClient.java | 480 - .../utils/TestCertificateCodec.java | 222 - .../x509/certificate/utils/package-info.java | 23 - .../TestCertificateSignRequest.java | 267 - .../certificates/TestRootCertificate.java | 258 - .../x509/certificates/package-info.java | 22 - .../x509/keys/TestHDDSKeyGenerator.java | 87 - .../hdds/security/x509/keys/TestKeyCodec.java | 235 - .../hdds/security/x509/keys/package-info.java | 22 - .../hdds/security/x509/package-info.java | 22 - .../hadoop/hdds/tracing/TestStringCodec.java | 52 - .../hadoop/hdds/tracing/package-info.java | 21 - .../hadoop/hdds/utils/TestHddsIdFactory.java | 77 - .../hadoop/hdds/utils/TestMetadataStore.java | 590 - .../hadoop/hdds/utils/TestRetriableTask.java | 76 - .../hdds/utils/TestRocksDBStoreMBean.java | 234 - .../hdds/utils/db/TestDBConfigFromFile.java | 116 - .../hdds/utils/db/TestDBStoreBuilder.java | 173 - .../hadoop/hdds/utils/db/TestRDBStore.java | 349 - .../hdds/utils/db/TestRDBTableStore.java | 269 - .../hdds/utils/db/TestTypedRDBTableStore.java | 373 - .../utils/db/cache/TestTableCacheImpl.java | 173 - .../hdds/utils/db/cache/package-info.java | 22 - .../hadoop/hdds/utils/db/package-info.java | 22 - .../hadoop/hdds/utils/package-info.java | 22 - .../hadoop/ozone/audit/DummyAction.java | 44 - .../hadoop/ozone/audit/DummyEntity.java | 57 - .../ozone/audit/TestOzoneAuditLogger.java | 166 - .../hadoop/ozone/audit/package-info.java | 23 - .../hadoop/ozone/common/TestChecksum.java | 101 - .../ozone/common/TestChecksumByteBuffer.java | 102 - .../hadoop/ozone/common/TestStateMachine.java | 106 - .../hadoop/ozone/lease/TestLeaseManager.java | 388 - .../hadoop/ozone/lease/package-info.java | 21 - .../hadoop/ozone/lock/TestLockManager.java | 173 - .../hadoop/ozone/lock/package-info.java | 21 - .../org/apache/hadoop/ozone/package-info.java | 21 - .../src/test/resources/log4j2.properties | 76 - .../enforce-error.xml | 47 - .../networkTopologyTestFiles/good.xml | 49 - .../networkTopologyTestFiles/good.yaml | 59 - .../networkTopologyTestFiles/invalid-cost.xml | 43 - .../invalid-version.xml | 43 - .../networkTopologyTestFiles/middle-leaf.yaml | 59 - .../multiple-leaf.xml | 43 - .../multiple-root.xml | 43 - .../multiple-root.yaml | 59 - .../multiple-topology.xml | 47 - .../networkTopologyTestFiles/no-leaf.xml | 43 - .../networkTopologyTestFiles/no-root.xml | 43 - .../networkTopologyTestFiles/no-topology.xml | 39 - .../path-layers-size-mismatch.xml | 43 - .../path-with-id-reference-failure.xml | 43 - .../unknown-layer-type.xml | 43 - .../wrong-path-order-1.xml | 43 - .../wrong-path-order-2.xml | 43 - .../common/src/test/resources/test.db.ini | 145 - hadoop-hdds/config/pom.xml | 45 - .../org/apache/hadoop/hdds/conf/Config.java | 59 - .../hadoop/hdds/conf/ConfigFileAppender.java | 127 - .../hadoop/hdds/conf/ConfigFileGenerator.java | 114 - .../apache/hadoop/hdds/conf/ConfigGroup.java | 32 - .../apache/hadoop/hdds/conf/ConfigTag.java | 44 - .../apache/hadoop/hdds/conf/ConfigType.java | 34 - .../hdds/conf/ConfigurationException.java | 34 - .../apache/hadoop/hdds/conf/package-info.java | 22 - .../hdds/conf/ConfigurationExample.java | 89 - .../hdds/conf/TestConfigFileAppender.java | 48 - .../apache/hadoop/hdds/conf/package-info.java | 24 - .../javax.annotation.processing.Processor | 16 - .../dev-support/findbugsExcludeFile.xml | 33 - hadoop-hdds/container-service/pom.xml | 103 - .../hadoop/hdds/scm/HddsServerUtil.java | 384 - .../apache/hadoop/hdds/scm/VersionInfo.java | 81 - .../apache/hadoop/hdds/scm/package-info.java | 19 - .../hadoop/ozone/HddsDatanodeHttpServer.java | 86 - .../hadoop/ozone/HddsDatanodeService.java | 495 - .../hadoop/ozone/HddsDatanodeStopService.java | 27 - .../common/DataNodeLayoutVersion.java | 80 - .../common/helpers/ContainerMetrics.java | 128 - .../common/helpers/ContainerUtils.java | 305 - .../common/helpers/DatanodeIdYaml.java | 182 - .../common/helpers/DatanodeVersionFile.java | 95 - .../DeletedContainerBlocksSummary.java | 104 - .../common/helpers/package-info.java | 22 - .../common/impl/ChunkLayOutVersion.java | 98 - .../container/common/impl/ContainerData.java | 560 - .../common/impl/ContainerDataYaml.java | 323 - .../container/common/impl/ContainerSet.java | 281 - .../container/common/impl/HddsDispatcher.java | 597 - .../common/impl/OpenContainerBlockMap.java | 151 - ...RandomContainerDeletionChoosingPolicy.java | 71 - .../common/impl/StorageLocationReport.java | 300 - ...rderedContainerDeletionChoosingPolicy.java | 91 - .../container/common/impl/package-info.java | 22 - .../common/interfaces/BlockIterator.java | 57 - .../common/interfaces/Container.java | 188 - .../ContainerDeletionChoosingPolicy.java | 58 - .../interfaces/ContainerDispatcher.java | 86 - .../interfaces/ContainerLocationManager.java | 58 - .../ContainerLocationManagerMXBean.java | 34 - .../common/interfaces/ContainerPacker.java | 58 - .../container/common/interfaces/Handler.java | 189 - .../StorageLocationReportMXBean.java | 40 - .../interfaces/VolumeChoosingPolicy.java | 46 - .../common/interfaces/package-info.java | 20 - .../ozone/container/common/package-info.java | 28 - .../report/CommandStatusReportPublisher.java | 87 - .../report/ContainerReportPublisher.java | 86 - .../common/report/NodeReportPublisher.java | 66 - .../report/PipelineReportPublisher.java | 73 - .../common/report/ReportManager.java | 158 - .../common/report/ReportPublisher.java | 115 - .../common/report/ReportPublisherFactory.java | 79 - .../container/common/report/package-info.java | 80 - .../statemachine/DatanodeStateMachine.java | 489 - .../statemachine/EndpointStateMachine.java | 296 - .../EndpointStateMachineMBean.java | 34 - .../statemachine/SCMConnectionManager.java | 221 - .../SCMConnectionManagerMXBean.java | 27 - .../common/statemachine/StateContext.java | 502 - .../CloseContainerCommandHandler.java | 179 - .../commandhandler/CommandDispatcher.java | 188 - .../commandhandler/CommandHandler.java | 75 - .../DeleteBlocksCommandHandler.java | 281 - .../DeleteContainerCommandHandler.java | 81 - .../ReplicateContainerCommandHandler.java | 94 - .../commandhandler/package-info.java | 18 - .../common/statemachine/package-info.java | 28 - .../common/states/DatanodeState.java | 58 - .../states/datanode/InitDatanodeState.java | 182 - .../states/datanode/RunningDatanodeState.java | 187 - .../common/states/datanode/package-info.java | 21 - .../endpoint/HeartbeatEndpointTask.java | 402 - .../states/endpoint/RegisterEndpointTask.java | 261 - .../states/endpoint/VersionEndpointTask.java | 129 - .../common/states/endpoint/package-info.java | 20 - .../container/common/states/package-info.java | 18 - .../transport/server/GrpcXceiverService.java | 97 - .../server/ServerCredentialInterceptor.java | 74 - .../transport/server/XceiverServer.java | 89 - .../transport/server/XceiverServerGrpc.java | 206 - .../transport/server/XceiverServerSpi.java | 68 - .../common/transport/server/package-info.java | 24 - .../transport/server/ratis/CSMMetrics.java | 221 - .../server/ratis/ContainerStateMachine.java | 871 - .../server/ratis/DispatcherContext.java | 154 - .../ratis/RatisServerConfiguration.java | 48 - .../server/ratis/XceiverServerRatis.java | 689 - .../transport/server/ratis/package-info.java | 23 - .../common/utils/ContainerCache.java | 163 - .../common/utils/HddsVolumeUtil.java | 219 - .../common/utils/ReferenceCountedDB.java | 97 - .../container/common/utils/package-info.java | 18 - .../common/volume/AbstractFuture.java | 1298 -- .../container/common/volume/AsyncChecker.java | 65 - .../container/common/volume/HddsVolume.java | 455 - .../common/volume/HddsVolumeChecker.java | 424 - .../RoundRobinVolumeChoosingPolicy.java | 85 - .../common/volume/ThrottledAsyncChecker.java | 248 - .../common/volume/TimeoutFuture.java | 161 - .../common/volume/VolumeIOStats.java | 139 - .../container/common/volume/VolumeInfo.java | 141 - .../container/common/volume/VolumeSet.java | 519 - .../container/common/volume/VolumeUsage.java | 193 - .../container/common/volume/package-info.java | 21 - .../keyvalue/KeyValueBlockIterator.java | 157 - .../container/keyvalue/KeyValueContainer.java | 730 - .../keyvalue/KeyValueContainerCheck.java | 310 - .../keyvalue/KeyValueContainerData.java | 276 - .../container/keyvalue/KeyValueHandler.java | 1043 - .../keyvalue/TarContainerPacker.java | 249 - .../keyvalue/helpers/BlockUtils.java | 201 - .../keyvalue/helpers/ChunkUtils.java | 319 - .../KeyValueContainerLocationUtil.java | 112 - .../helpers/KeyValueContainerUtil.java | 237 - .../keyvalue/helpers/SmallFileUtils.java | 91 - .../keyvalue/helpers/package-info.java | 21 - .../keyvalue/impl/BlockManagerImpl.java | 291 - .../keyvalue/impl/ChunkManagerDummyImpl.java | 162 - .../keyvalue/impl/ChunkManagerFactory.java | 91 - .../keyvalue/impl/ChunkManagerImpl.java | 312 - .../container/keyvalue/impl/package-info.java | 21 - .../keyvalue/interfaces/BlockManager.java | 85 - .../keyvalue/interfaces/ChunkManager.java | 83 - .../keyvalue/interfaces/package-info.java | 21 - .../container/keyvalue/package-info.java | 21 - .../background/BlockDeletingService.java | 332 - .../statemachine/background/package-info.java | 18 - .../ozoneimpl/ContainerController.java | 177 - .../ozoneimpl/ContainerDataScanner.java | 178 - .../ContainerDataScrubberMetrics.java | 113 - .../ozoneimpl/ContainerMetadataScanner.java | 132 - .../ContainerMetadataScrubberMetrics.java | 92 - .../container/ozoneimpl/ContainerReader.java | 261 - .../ContainerScrubberConfiguration.java | 91 - .../container/ozoneimpl/OzoneContainer.java | 283 - .../container/ozoneimpl/package-info.java | 21 - .../replication/ContainerDownloader.java | 40 - .../ContainerReplicationSource.java | 49 - .../replication/ContainerReplicator.java | 27 - .../replication/ContainerStreamingOutput.java | 45 - .../DownloadAndImportReplicator.java | 135 - .../replication/GrpcReplicationClient.java | 175 - .../replication/GrpcReplicationService.java | 129 - .../OnDemandContainerReplicationSource.java | 68 - .../replication/ReplicationSupervisor.java | 137 - .../replication/ReplicationTask.java | 102 - .../SimpleContainerDownloader.java | 102 - .../container/replication/package-info.java | 21 - .../org/apache/hadoop/ozone/package-info.java | 23 - .../StorageContainerDatanodeProtocol.java | 86 - .../StorageContainerNodeProtocol.java | 78 - .../ozone/protocol/VersionResponse.java | 154 - .../commands/CloseContainerCommand.java | 82 - .../protocol/commands/CommandForDatanode.java | 52 - .../protocol/commands/CommandStatus.java | 164 - .../commands/DeleteBlockCommandStatus.java | 100 - .../commands/DeleteBlocksCommand.java | 71 - .../commands/DeleteContainerCommand.java | 86 - .../protocol/commands/RegisteredCommand.java | 164 - .../commands/ReplicateContainerCommand.java | 98 - .../protocol/commands/ReregisterCommand.java | 58 - .../ozone/protocol/commands/SCMCommand.java | 62 - .../ozone/protocol/commands/package-info.java | 21 - .../hadoop/ozone/protocol/package-info.java | 23 - ...atanodeProtocolClientSideTranslatorPB.java | 177 - .../StorageContainerDatanodeProtocolPB.java | 40 - ...atanodeProtocolServerSideTranslatorPB.java | 113 - .../hadoop/ozone/protocolPB/package-info.java | 19 - .../StorageContainerDatanodeProtocol.proto | 429 - ...sun.jersey.spi.container.ContainerProvider | 16 - .../resources/webapps/hddsDatanode/.gitkeep | 17 - .../hadoop/ozone/TestHddsDatanodeService.java | 95 - .../ozone/TestHddsSecureDatanodeInit.java | 274 - .../container/common/ContainerTestUtils.java | 68 - .../ozone/container/common/SCMTestUtils.java | 128 - .../ozone/container/common/ScmTestMock.java | 355 - .../common/TestChunkLayOutVersion.java | 42 - .../container/common/TestContainerCache.java | 128 - .../common/TestDatanodeLayOutVersion.java | 38 - .../common/TestDatanodeStateMachine.java | 444 - .../common/TestKeyValueContainerData.java | 93 - .../helpers/TestDatanodeVersionFile.java | 134 - .../common/impl/TestContainerDataYaml.java | 220 - .../common/impl/TestContainerSet.java | 227 - .../common/impl/TestHddsDispatcher.java | 300 - .../container/common/impl/package-info.java | 22 - .../common/interfaces/TestHandler.java | 101 - .../ozone/container/common/package-info.java | 22 - .../common/report/TestReportManager.java | 52 - .../common/report/TestReportPublisher.java | 191 - .../report/TestReportPublisherFactory.java | 68 - .../container/common/report/package-info.java | 22 - .../TestCloseContainerCommandHandler.java | 227 - .../commandhandler/package-info.java | 22 - .../endpoint/TestHeartbeatEndpointTask.java | 295 - .../common/states/endpoint/package-info.java | 18 - .../common/volume/TestHddsVolume.java | 139 - .../common/volume/TestHddsVolumeChecker.java | 211 - .../TestRoundRobinVolumeChoosingPolicy.java | 137 - .../common/volume/TestVolumeSet.java | 246 - .../volume/TestVolumeSetDiskChecks.java | 190 - .../container/common/volume/package-info.java | 22 - .../keyvalue/TestBlockManagerImpl.java | 197 - .../keyvalue/TestChunkManagerImpl.java | 292 - .../keyvalue/TestKeyValueBlockIterator.java | 284 - .../keyvalue/TestKeyValueContainer.java | 394 - .../keyvalue/TestKeyValueContainerCheck.java | 270 - .../TestKeyValueContainerMarkUnhealthy.java | 175 - .../keyvalue/TestKeyValueHandler.java | 316 - ...KeyValueHandlerWithUnhealthyContainer.java | 231 - .../keyvalue/TestTarContainerPacker.java | 234 - .../keyvalue/helpers/TestChunkUtils.java | 164 - .../container/keyvalue/package-info.java | 22 - .../TestContainerScrubberMetrics.java | 110 - .../ozoneimpl/TestOzoneContainer.java | 255 - .../TestReplicationSupervisor.java | 136 - .../container/replication/package-info.java | 22 - .../BlockDeletingServiceTestImpl.java | 105 - .../container/testutils/package-info.java | 18 - .../test/resources/additionalfields.container | 14 - .../resources/incorrect.checksum.container | 13 - .../src/test/resources/incorrect.container | 13 - .../src/test/resources/log4j.properties | 23 - .../checkstyle/checkstyle-noframes-sorted.xsl | 189 - .../dev-support/checkstyle/checkstyle.xml | 196 - .../dev-support/checkstyle/suppressions.xml | 21 - hadoop-hdds/docs/README.md | 55 - hadoop-hdds/docs/archetypes/default.md | 20 - hadoop-hdds/docs/config.yaml | 26 - hadoop-hdds/docs/content/_index.md | 45 - hadoop-hdds/docs/content/beyond/Containers.md | 235 - .../docs/content/beyond/DockerCheatSheet.md | 88 - .../docs/content/beyond/RunningWithHDFS.md | 70 - hadoop-hdds/docs/content/beyond/_index.md | 30 - .../content/concept/ContainerMetadata.png | Bin 98493 -> 0 bytes hadoop-hdds/docs/content/concept/Datanodes.md | 75 - .../docs/content/concept/FunctionalOzone.png | Bin 55334 -> 0 bytes hadoop-hdds/docs/content/concept/Hdds.md | 52 - hadoop-hdds/docs/content/concept/Overview.md | 81 - .../docs/content/concept/OzoneBlock.png | Bin 4650 -> 0 bytes .../docs/content/concept/OzoneManager.md | 87 - hadoop-hdds/docs/content/concept/_index.md | 33 - .../content/concept/ozoneBlockDiagram.png | Bin 45218 -> 0 bytes .../docs/content/design/decommissioning.md | 624 - .../design/ozone-enhancement-proposals.md | 197 - .../docs/content/gdpr/GDPR in Ozone.md | 42 - hadoop-hdds/docs/content/gdpr/_index.md | 38 - hadoop-hdds/docs/content/interface/JavaApi.md | 156 - hadoop-hdds/docs/content/interface/OzoneFS.md | 155 - hadoop-hdds/docs/content/interface/S3.md | 150 - hadoop-hdds/docs/content/interface/_index.md | 27 - hadoop-hdds/docs/content/recipe/Prometheus.md | 95 - .../docs/content/recipe/SparkOzoneFSK8S.md | 188 - hadoop-hdds/docs/content/recipe/_index.md | 29 - .../recipe/prometheus-key-allocate.png | Bin 51155 -> 0 bytes .../docs/content/recipe/prometheus.png | Bin 38962 -> 0 bytes .../content/security/SecuityWithRanger.md | 43 - .../docs/content/security/SecureOzone.md | 178 - .../content/security/SecuringDatanodes.md | 73 - .../docs/content/security/SecuringS3.md | 61 - .../docs/content/security/SecuringTDE.md | 65 - .../docs/content/security/SecurityAcls.md | 85 - hadoop-hdds/docs/content/security/_index.md | 36 - .../docs/content/shell/BucketCommands.md | 99 - hadoop-hdds/docs/content/shell/Format.md | 69 - hadoop-hdds/docs/content/shell/KeyCommands.md | 139 - .../docs/content/shell/VolumeCommands.md | 112 - hadoop-hdds/docs/content/shell/_index.md | 28 - hadoop-hdds/docs/content/start/FromSource.md | 68 - hadoop-hdds/docs/content/start/Kubernetes.md | 53 - hadoop-hdds/docs/content/start/Minikube.md | 70 - hadoop-hdds/docs/content/start/OnPrem.md | 187 - .../docs/content/start/RunningViaDocker.md | 61 - .../docs/content/start/StartFromDockerHub.md | 111 - hadoop-hdds/docs/content/start/_index.md | 88 - hadoop-hdds/docs/content/start/docker.png | Bin 7002 -> 0 bytes hadoop-hdds/docs/content/start/hadoop.png | Bin 4923 -> 0 bytes hadoop-hdds/docs/content/start/k8s.png | Bin 6270 -> 0 bytes hadoop-hdds/docs/content/start/minikube.png | Bin 5764 -> 0 bytes hadoop-hdds/docs/content/tools/AuditParser.md | 70 - hadoop-hdds/docs/content/tools/Genconf.md | 26 - hadoop-hdds/docs/content/tools/SCMCLI.md | 27 - hadoop-hdds/docs/content/tools/TestTools.md | 228 - hadoop-hdds/docs/content/tools/_index.md | 65 - .../docs/dev-support/bin/generate-site.sh | 29 - hadoop-hdds/docs/pom.xml | 75 - hadoop-hdds/docs/static/NOTES.md | 20 - hadoop-hdds/docs/static/OzoneOverview.png | Bin 41729 -> 0 bytes hadoop-hdds/docs/static/OzoneOverview.svg | 238 - hadoop-hdds/docs/static/SCMBlockDiagram.png | Bin 14714 -> 0 bytes hadoop-hdds/docs/static/ozone-logo-small.png | Bin 40237 -> 0 bytes hadoop-hdds/docs/static/ozone-usage.png | Bin 104961 -> 0 bytes .../ozonedoc/layouts/_default/section.html | 71 - .../ozonedoc/layouts/_default/single.html | 57 - .../docs/themes/ozonedoc/layouts/index.html | 37 - .../ozonedoc/layouts/partials/footer.html | 22 - .../ozonedoc/layouts/partials/header.html | 34 - .../ozonedoc/layouts/partials/navbar.html | 42 - .../ozonedoc/layouts/partials/sidebar.html | 71 - .../layouts/shortcodes/buttonlink.html | 20 - .../ozonedoc/layouts/shortcodes/card.html | 40 - .../layouts/shortcodes/jumbotron.html | 25 - .../layouts/shortcodes/requirements.html | 22 - .../static/css/bootstrap-theme.min.css | 6 - .../static/css/bootstrap-theme.min.css.map | 1 - .../ozonedoc/static/css/bootstrap.min.css | 6 - .../ozonedoc/static/css/bootstrap.min.css.map | 1 - .../themes/ozonedoc/static/css/ozonedoc.css | 169 - .../fonts/glyphicons-halflings-regular.eot | Bin 20127 -> 0 bytes .../fonts/glyphicons-halflings-regular.svg | 288 - .../fonts/glyphicons-halflings-regular.ttf | Bin 45404 -> 0 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 23424 -> 0 bytes .../fonts/glyphicons-halflings-regular.woff2 | Bin 18028 -> 0 bytes .../ozonedoc/static/js/bootstrap.min.js | 6 - .../ozonedoc/static/js/jquery-3.4.1.min.js | 2 - .../themes/ozonedoc/static/js/ozonedoc.js | 23 - hadoop-hdds/docs/themes/ozonedoc/theme.toml | 2 - hadoop-hdds/framework/README.md | 24 - hadoop-hdds/framework/pom.xml | 42 - .../hadoop/hdds/server/BaseHttpServer.java | 258 - .../hadoop/hdds/server/LogStreamServlet.java | 58 - .../OzoneProtocolMessageDispatcher.java | 88 - .../hadoop/hdds/server/ProfileServlet.java | 507 - .../hdds/server/PrometheusMetricsSink.java | 145 - .../hadoop/hdds/server/PrometheusServlet.java | 46 - .../hadoop/hdds/server/ServerUtils.java | 227 - .../hdds/server/ServiceRuntimeInfo.java | 64 - .../hdds/server/ServiceRuntimeInfoImpl.java | 60 - .../hadoop/hdds/server/events/Event.java | 42 - .../hdds/server/events/EventExecutor.java | 68 - .../hdds/server/events/EventHandler.java | 33 - .../hdds/server/events/EventPublisher.java | 28 - .../hadoop/hdds/server/events/EventQueue.java | 262 - .../hdds/server/events/EventWatcher.java | 218 - .../server/events/EventWatcherMetrics.java | 79 - .../events/IdentifiableEventPayload.java | 28 - .../server/events/SingleThreadExecutor.java | 118 - .../hadoop/hdds/server/events/TypedEvent.java | 58 - .../hdds/server/events/package-info.java | 23 - .../hadoop/hdds/server/package-info.java | 23 - .../src/main/resources/webapps/datanode/dn.js | 92 - .../webapps/static/angular-1.6.4.min.js | 332 - .../webapps/static/angular-nvd3-1.0.9.min.js | 1 - .../webapps/static/angular-route-1.6.4.min.js | 17 - .../css/bootstrap-editable.css | 655 - .../bootstrap-3.4.1/css/bootstrap-theme.css | 587 - .../css/bootstrap-theme.css.map | 1 - .../css/bootstrap-theme.min.css | 6 - .../css/bootstrap-theme.min.css.map | 1 - .../static/bootstrap-3.4.1/css/bootstrap.css | 6834 ------ .../bootstrap-3.4.1/css/bootstrap.css.map | 1 - .../bootstrap-3.4.1/css/bootstrap.min.css | 6 - .../bootstrap-3.4.1/css/bootstrap.min.css.map | 1 - .../fonts/glyphicons-halflings-regular.eot | Bin 20127 -> 0 bytes .../fonts/glyphicons-halflings-regular.svg | 288 - .../fonts/glyphicons-halflings-regular.ttf | Bin 45404 -> 0 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 23424 -> 0 bytes .../fonts/glyphicons-halflings-regular.woff2 | Bin 18028 -> 0 bytes .../js/bootstrap-editable.min.js | 7 - .../static/bootstrap-3.4.1/js/bootstrap.js | 2580 --- .../bootstrap-3.4.1/js/bootstrap.min.js | 6 - .../resources/webapps/static/d3-3.5.17.min.js | 5 - .../main/resources/webapps/static/hadoop.css | 331 - .../webapps/static/jquery-3.4.1.min.js | 2 - .../webapps/static/nvd3-1.8.5.min.css | 2 - .../webapps/static/nvd3-1.8.5.min.css.map | 1 - .../webapps/static/nvd3-1.8.5.min.js | 11 - .../webapps/static/nvd3-1.8.5.min.js.map | 1 - .../main/resources/webapps/static/ozone.css | 60 - .../main/resources/webapps/static/ozone.js | 385 - .../webapps/static/templates/config.html | 91 - .../webapps/static/templates/jvm.html | 26 - .../webapps/static/templates/menu.html | 60 - .../webapps/static/templates/overview.html | 39 - .../webapps/static/templates/rpc-metrics.html | 87 - .../hdds/server/TestBaseHttpServer.java | 98 - .../hdds/server/TestProfileServlet.java | 54 - .../server/TestPrometheusMetricsSink.java | 199 - .../hadoop/hdds/server/TestServerUtils.java | 123 - .../hdds/server/events/EventHandlerStub.java | 38 - .../hdds/server/events/TestEventQueue.java | 83 - .../server/events/TestEventQueueChain.java | 79 - .../hdds/server/events/TestEventWatcher.java | 305 - .../hdds/server/events/package-info.java | 22 - .../src/test/resources/ozone-site.xml | 24 - hadoop-hdds/pom.xml | 406 - hadoop-hdds/server-scm/pom.xml | 153 - .../org/apache/hadoop/hdds/scm/ScmUtils.java | 51 - .../hadoop/hdds/scm/block/BlockManager.java | 89 - .../hdds/scm/block/BlockManagerImpl.java | 362 - .../hdds/scm/block/BlockmanagerMXBean.java | 30 - .../DatanodeDeletedBlockTransactions.java | 146 - .../hdds/scm/block/DeletedBlockLog.java | 118 - .../hdds/scm/block/DeletedBlockLogImpl.java | 366 - .../hdds/scm/block/PendingDeleteHandler.java | 41 - .../scm/block/PendingDeleteStatusList.java | 85 - .../scm/block/SCMBlockDeletingService.java | 199 - .../hadoop/hdds/scm/block/package-info.java | 22 - .../command/CommandStatusReportHandler.java | 108 - .../hadoop/hdds/scm/command/package-info.java | 26 - .../AbstractContainerReportHandler.java | 246 - .../container/CloseContainerEventHandler.java | 113 - .../container/ContainerActionsHandler.java | 62 - .../hdds/scm/container/ContainerManager.java | 190 - .../hdds/scm/container/ContainerReplica.java | 231 - .../scm/container/ContainerReportHandler.java | 201 - .../scm/container/ContainerStateManager.java | 535 - .../IncrementalContainerReportHandler.java | 93 - .../scm/container/ReplicationManager.java | 875 - .../scm/container/SCMContainerManager.java | 592 - .../scm/container/closer/package-info.java | 23 - .../metrics/SCMContainerManagerMetrics.java | 144 - .../scm/container/metrics/package-info.java | 22 - .../hdds/scm/container/package-info.java | 22 - .../ContainerPlacementPolicyFactory.java | 76 - .../placement/algorithms/SCMCommonPolicy.java | 201 - .../SCMContainerPlacementCapacity.java | 140 - .../SCMContainerPlacementMetrics.java | 107 - .../SCMContainerPlacementRackAware.java | 348 - .../SCMContainerPlacementRandom.java | 97 - .../placement/algorithms/package-info.java | 18 - .../placement/metrics/ContainerStat.java | 165 - .../placement/metrics/DatanodeMetric.java | 90 - .../placement/metrics/LongMetric.java | 161 - .../container/placement/metrics/NodeStat.java | 67 - .../placement/metrics/SCMMetrics.java | 154 - .../placement/metrics/SCMNodeMetric.java | 224 - .../placement/metrics/SCMNodeStat.java | 141 - .../placement/metrics/package-info.java | 20 - .../scm/container/placement/package-info.java | 19 - .../ReplicationActivityStatus.java | 104 - .../ReplicationActivityStatusMXBean.java | 28 - .../container/replication/package-info.java | 23 - .../container/states/ContainerAttribute.java | 255 - .../container/states/ContainerQueryKey.java | 110 - .../scm/container/states/ContainerState.java | 80 - .../container/states/ContainerStateMap.java | 545 - .../scm/container/states/package-info.java | 22 - .../hadoop/hdds/scm/events/SCMEvents.java | 205 - .../hadoop/hdds/scm/events/package-info.java | 23 - .../hdds/scm/metadata/BigIntegerCodec.java | 39 - .../DeletedBlocksTransactionCodec.java | 50 - .../hadoop/hdds/scm/metadata/LongCodec.java | 40 - .../hdds/scm/metadata/SCMMetadataStore.java | 103 - .../scm/metadata/SCMMetadataStoreRDBImpl.java | 201 - .../scm/metadata/X509CertificateCodec.java | 54 - .../hdds/scm/metadata/package-info.java | 21 - .../hadoop/hdds/scm/node/CommandQueue.java | 190 - .../hadoop/hdds/scm/node/DatanodeInfo.java | 129 - .../hadoop/hdds/scm/node/DeadNodeHandler.java | 172 - .../hadoop/hdds/scm/node/NewNodeHandler.java | 45 - .../hadoop/hdds/scm/node/NodeManager.java | 202 - .../hdds/scm/node/NodeManagerMXBean.java | 45 - .../hdds/scm/node/NodeReportHandler.java | 54 - .../hdds/scm/node/NodeStateManager.java | 765 - .../node/NonHealthyToHealthyNodeHandler.java | 47 - .../hadoop/hdds/scm/node/SCMNodeManager.java | 684 - .../hadoop/hdds/scm/node/SCMNodeMetrics.java | 161 - .../scm/node/SCMNodeStorageStatMXBean.java | 77 - .../hdds/scm/node/SCMNodeStorageStatMap.java | 368 - .../hdds/scm/node/StaleNodeHandler.java | 70 - .../hdds/scm/node/StorageReportResult.java | 87 - .../hadoop/hdds/scm/node/package-info.java | 31 - .../scm/node/states/Node2ContainerMap.java | 92 - .../hdds/scm/node/states/Node2ObjectsMap.java | 164 - .../scm/node/states/Node2PipelineMap.java | 77 - .../states/NodeAlreadyExistsException.java | 45 - .../hdds/scm/node/states/NodeException.java | 44 - .../node/states/NodeNotFoundException.java | 47 - .../hdds/scm/node/states/NodeStateMap.java | 312 - .../hdds/scm/node/states/ReportResult.java | 110 - .../hdds/scm/node/states/package-info.java | 22 - .../apache/hadoop/hdds/scm/package-info.java | 22 - .../pipeline/BackgroundPipelineCreator.java | 110 - .../InsufficientDatanodesException.java | 36 - .../scm/pipeline/PipelineActionHandler.java | 74 - .../hdds/scm/pipeline/PipelineFactory.java | 69 - .../hdds/scm/pipeline/PipelineManager.java | 99 - .../scm/pipeline/PipelineManagerMXBean.java | 38 - .../hdds/scm/pipeline/PipelineProvider.java | 37 - .../scm/pipeline/PipelineReportHandler.java | 119 - .../scm/pipeline/PipelineStateManager.java | 164 - .../hdds/scm/pipeline/PipelineStateMap.java | 420 - .../scm/pipeline/RatisPipelineProvider.java | 256 - .../hdds/scm/pipeline/RatisPipelineUtils.java | 103 - .../hdds/scm/pipeline/SCMPipelineManager.java | 469 - .../hdds/scm/pipeline/SCMPipelineMetrics.java | 160 - .../scm/pipeline/SimplePipelineProvider.java | 80 - .../hdds/scm/pipeline/package-info.java | 24 - ...ecurityProtocolServerSideTranslatorPB.java | 186 - ...ocationProtocolServerSideTranslatorPB.java | 228 - ...ocationProtocolServerSideTranslatorPB.java | 393 - .../hdds/scm/protocol/package-info.java | 21 - .../hadoop/hdds/scm/ratis/package-info.java | 22 - .../scm/safemode/ContainerSafeModeRule.java | 128 - .../scm/safemode/DataNodeSafeModeRule.java | 82 - .../safemode/HealthyPipelineSafeModeRule.java | 170 - .../OneReplicaPipelineSafeModeRule.java | 158 - .../hadoop/hdds/scm/safemode/Precheck.java | 29 - .../hdds/scm/safemode/SCMSafeModeManager.java | 257 - .../hdds/scm/safemode/SafeModeExitRule.java | 114 - .../hdds/scm/safemode/SafeModeHandler.java | 147 - .../hdds/scm/safemode/SafeModeMetrics.java | 111 - .../hdds/scm/safemode/SafeModePrecheck.java | 69 - .../scm/safemode/SafeModeRestrictedOps.java | 41 - .../hdds/scm/safemode/package-info.java | 18 - .../scm/server/SCMBlockProtocolServer.java | 365 - .../hadoop/hdds/scm/server/SCMCertStore.java | 115 - .../scm/server/SCMClientProtocolServer.java | 610 - .../hdds/scm/server/SCMConfigurator.java | 222 - .../hdds/scm/server/SCMContainerMetrics.java | 88 - .../SCMDatanodeHeartbeatDispatcher.java | 289 - .../scm/server/SCMDatanodeProtocolServer.java | 404 - .../hadoop/hdds/scm/server/SCMMXBean.java | 67 - .../hdds/scm/server/SCMPolicyProvider.java | 80 - .../scm/server/SCMSecurityProtocolServer.java | 225 - .../hdds/scm/server/SCMStarterInterface.java | 37 - .../hdds/scm/server/SCMStorageConfig.java | 73 - .../scm/server/StorageContainerManager.java | 1103 - .../StorageContainerManagerHttpServer.java | 76 - .../StorageContainerManagerStarter.java | 153 - .../hadoop/hdds/scm/server/package-info.java | 22 - .../RetriableDatanodeEventWatcher.java | 58 - .../ozone/protocol/commands/package-info.java | 18 - .../src/main/resources/webapps/scm/index.html | 76 - .../src/main/resources/webapps/scm/main.html | 20 - .../resources/webapps/scm/scm-overview.html | 44 - .../src/main/resources/webapps/scm/scm.js | 54 - .../hadoop/hdds/scm/HddsServerUtilTest.java | 308 - .../apache/hadoop/hdds/scm/HddsTestUtils.java | 114 - .../hdds/scm/HddsWhiteboxTestUtils.java | 103 - .../hadoop/hdds/scm/TestHddsServerUtils.java | 229 - ...TestStorageContainerManagerHttpServer.java | 144 - .../org/apache/hadoop/hdds/scm/TestUtils.java | 597 - .../hdds/scm/block/TestBlockManager.java | 327 - .../hdds/scm/block/TestDeletedBlockLog.java | 437 - .../hadoop/hdds/scm/block/package-info.java | 23 - .../TestCommandStatusReportHandler.java | 118 - .../hadoop/hdds/scm/command/package-info.java | 22 - .../hdds/scm/container/MockNodeManager.java | 584 - .../TestCloseContainerEventHandler.java | 171 - .../TestContainerActionsHandler.java | 68 - .../container/TestContainerReportHandler.java | 510 - .../container/TestContainerStateManager.java | 128 - ...TestIncrementalContainerReportHandler.java | 223 - .../scm/container/TestReplicationManager.java | 662 - .../container/TestSCMContainerManager.java | 318 - .../scm/container/closer/package-info.java | 22 - .../hdds/scm/container/package-info.java | 22 - .../TestContainerPlacementFactory.java | 152 - .../TestSCMContainerPlacementCapacity.java | 110 - .../TestSCMContainerPlacementRackAware.java | 375 - .../TestSCMContainerPlacementRandom.java | 90 - .../container/replication/package-info.java | 23 - .../states/TestContainerAttribute.java | 143 - .../scm/container/states/package-info.java | 22 - .../hdds/scm/node/TestContainerPlacement.java | 179 - .../hdds/scm/node/TestDeadNodeHandler.java | 227 - .../hdds/scm/node/TestNodeReportHandler.java | 103 - .../hdds/scm/node/TestSCMNodeManager.java | 1225 -- .../scm/node/TestSCMNodeStorageStatMap.java | 262 - .../hdds/scm/node/TestStatisticsUpdate.java | 137 - .../hadoop/hdds/scm/node/package-info.java | 22 - .../node/states/TestNode2ContainerMap.java | 327 - .../hdds/scm/node/states/package-info.java | 23 - .../apache/hadoop/hdds/scm/package-info.java | 21 - .../pipeline/MockRatisPipelineProvider.java | 45 - .../TestHealthyPipelineSafeModeRule.java | 264 - .../TestOneReplicaPipelineSafeModeRule.java | 209 - .../scm/safemode/TestSCMSafeModeManager.java | 521 - .../scm/safemode/TestSafeModeHandler.java | 119 - .../hdds/scm/safemode/package-info.java | 21 - .../server/TestSCMBlockProtocolServer.java | 143 - .../server/TestSCMClientProtocolServer.java | 79 - .../scm/server/TestSCMContainerMetrics.java | 81 - .../TestSCMDatanodeHeartbeatDispatcher.java | 175 - .../server/TestSCMSecurityProtocolServer.java | 65 - .../TestStorageContainerManagerStarter.java | 166 - .../ozone/container/common/TestEndPoint.java | 542 - .../ozone/container/common/package-info.java | 22 - .../placement/TestContainerPlacement.java | 139 - .../placement/TestDatanodeMetrics.java | 56 - .../container/placement/package-info.java | 22 - .../container/replication/package-info.java | 18 - .../testutils/ReplicationNodeManagerMock.java | 330 - .../container/testutils/package-info.java | 18 - .../src/test/resources/nodegroup-mapping | 24 - .../src/test/resources/rack-mapping | 24 - hadoop-hdds/tools/pom.xml | 56 - .../scm/cli/ReplicationManagerCommands.java | 54 - .../ReplicationManagerStartSubcommand.java | 53 - .../ReplicationManagerStatusSubcommand.java | 60 - .../cli/ReplicationManagerStopSubcommand.java | 55 - .../apache/hadoop/hdds/scm/cli/SCMCLI.java | 168 - .../hdds/scm/cli/SafeModeCheckSubcommand.java | 61 - .../hadoop/hdds/scm/cli/SafeModeCommands.java | 59 - .../hdds/scm/cli/SafeModeExitSubcommand.java | 57 - .../hdds/scm/cli/TopologySubcommand.java | 124 - .../scm/cli/container/CloseSubcommand.java | 53 - .../scm/cli/container/ContainerCommands.java | 57 - .../scm/cli/container/CreateSubcommand.java | 64 - .../scm/cli/container/DeleteSubcommand.java | 59 - .../scm/cli/container/InfoSubcommand.java | 88 - .../scm/cli/container/ListSubcommand.java | 80 - .../hdds/scm/cli/container/package-info.java | 22 - .../hadoop/hdds/scm/cli/package-info.java | 23 - .../pipeline/ActivatePipelineSubcommand.java | 52 - .../cli/pipeline/ClosePipelineSubcommand.java | 52 - .../DeactivatePipelineSubcommand.java | 52 - .../cli/pipeline/ListPipelinesSubcommand.java | 74 - .../scm/cli/pipeline/PipelineCommands.java | 56 - .../hdds/scm/cli/pipeline/package-info.java | 22 - hadoop-ozone/.gitignore | 20 - hadoop-ozone/Jenkinsfile | 116 - hadoop-ozone/client/pom.xml | 42 - .../hadoop/ozone/client/BucketArgs.java | 173 - .../hadoop/ozone/client/ObjectStore.java | 498 - .../hadoop/ozone/client/OzoneBucket.java | 624 - .../hadoop/ozone/client/OzoneClient.java | 110 - .../ozone/client/OzoneClientException.java | 43 - .../ozone/client/OzoneClientFactory.java | 268 - .../client/OzoneClientInvocationHandler.java | 62 - .../hadoop/ozone/client/OzoneClientUtils.java | 37 - .../apache/hadoop/ozone/client/OzoneKey.java | 148 - .../hadoop/ozone/client/OzoneKeyDetails.java | 78 - .../hadoop/ozone/client/OzoneKeyLocation.java | 82 - .../ozone/client/OzoneMultipartUpload.java | 89 - .../client/OzoneMultipartUploadList.java | 46 - .../OzoneMultipartUploadPartListParts.java | 117 - .../hadoop/ozone/client/OzoneVolume.java | 328 - .../hadoop/ozone/client/VolumeArgs.java | 150 - .../client/io/BlockOutputStreamEntry.java | 354 - .../client/io/BlockOutputStreamEntryPool.java | 354 - .../ozone/client/io/KeyInputStream.java | 294 - .../ozone/client/io/KeyOutputStream.java | 629 - .../ozone/client/io/OzoneInputStream.java | 63 - .../ozone/client/io/OzoneOutputStream.java | 74 - .../hadoop/ozone/client/io/package-info.java | 23 - .../hadoop/ozone/client/package-info.java | 23 - .../ozone/client/protocol/ClientProtocol.java | 648 - .../ozone/client/protocol/package-info.java | 23 - .../hadoop/ozone/client/rpc/OzoneKMSUtil.java | 179 - .../hadoop/ozone/client/rpc/RpcClient.java | 1177 -- .../hadoop/ozone/client/rpc/package-info.java | 23 - .../ozone/client/TestHddsClientUtils.java | 220 - .../hadoop/ozone/client/package-info.java | 23 - .../ozone/client/rpc/TestOzoneKMSUtil.java | 51 - .../dev-support/findbugsExcludeFile.xml | 22 - hadoop-ozone/common/pom.xml | 189 - hadoop-ozone/common/src/main/bin/ozone | 312 - .../common/src/main/bin/ozone-config.sh | 56 - .../common/src/main/bin/start-ozone.sh | 140 - .../common/src/main/bin/stop-ozone.sh | 100 - .../hadoop/hdds/protocol/StorageType.java | 64 - .../hadoop/hdds/protocol/package-info.java | 20 - .../java/org/apache/hadoop/ozone/OmUtils.java | 528 - .../org/apache/hadoop/ozone/OzoneAcl.java | 343 - .../ozone/OzoneIllegalArgumentException.java | 40 - .../apache/hadoop/ozone/audit/OMAction.java | 82 - .../hadoop/ozone/audit/package-info.java | 22 - .../ozone/client/io/LengthInputStream.java | 49 - .../hadoop/ozone/client/io/package-info.java | 22 - .../hadoop/ozone/freon/OzoneGetConf.java | 271 - .../hadoop/ozone/freon/package-info.java | 21 - .../apache/hadoop/ozone/om/OMConfigKeys.java | 244 - .../hadoop/ozone/om/OMMetadataManager.java | 339 - .../hadoop/ozone/om/S3SecretManager.java | 36 - .../hadoop/ozone/om/S3SecretManagerImpl.java | 109 - .../ozone/om/codec/OmBucketInfoCodec.java | 53 - .../hadoop/ozone/om/codec/OmKeyInfoCodec.java | 53 - .../om/codec/OmMultipartKeyInfoCodec.java | 59 - .../ozone/om/codec/OmPrefixInfoCodec.java | 53 - .../ozone/om/codec/OmVolumeArgsCodec.java | 53 - .../om/codec/RepeatedOmKeyInfoCodec.java | 52 - .../ozone/om/codec/S3SecretValueCodec.java | 57 - .../ozone/om/codec/TokenIdentifierCodec.java | 52 - .../ozone/om/codec/UserVolumeInfoCodec.java | 53 - .../hadoop/ozone/om/codec/package-info.java | 24 - .../om/exceptions/NotLeaderException.java | 51 - .../ozone/om/exceptions/OMException.java | 216 - .../ozone/om/exceptions/package-info.java | 19 - .../ozone/om/ha/OMFailoverProxyProvider.java | 294 - .../hadoop/ozone/om/ha/OMProxyInfo.java | 59 - .../hadoop/ozone/om/ha/package-info.java | 23 - .../om/helpers/BucketEncryptionKeyInfo.java | 79 - .../om/helpers/EncryptionBucketInfo.java | 114 - .../hadoop/ozone/om/helpers/KeyValueUtil.java | 54 - .../ozone/om/helpers/OMRatisHelper.java | 112 - .../hadoop/ozone/om/helpers/OmBucketArgs.java | 202 - .../hadoop/ozone/om/helpers/OmBucketInfo.java | 369 - .../hadoop/ozone/om/helpers/OmKeyArgs.java | 269 - .../hadoop/ozone/om/helpers/OmKeyInfo.java | 421 - .../ozone/om/helpers/OmKeyLocationInfo.java | 230 - .../om/helpers/OmKeyLocationInfoGroup.java | 118 - .../OmMultipartCommitUploadPartInfo.java | 34 - .../ozone/om/helpers/OmMultipartInfo.java | 77 - .../ozone/om/helpers/OmMultipartKeyInfo.java | 104 - .../ozone/om/helpers/OmMultipartUpload.java | 149 - .../OmMultipartUploadCompleteInfo.java | 70 - .../OmMultipartUploadCompleteList.java | 63 - .../om/helpers/OmMultipartUploadList.java | 44 - .../helpers/OmMultipartUploadListParts.java | 97 - .../ozone/om/helpers/OmOzoneAclMap.java | 301 - .../hadoop/ozone/om/helpers/OmPartInfo.java | 60 - .../hadoop/ozone/om/helpers/OmPrefixInfo.java | 183 - .../hadoop/ozone/om/helpers/OmVolumeArgs.java | 359 - .../ozone/om/helpers/OpenKeySession.java | 50 - .../hadoop/ozone/om/helpers/OzoneAclUtil.java | 286 - .../hadoop/ozone/om/helpers/OzoneFSUtils.java | 89 - .../ozone/om/helpers/OzoneFileStatus.java | 116 - .../ozone/om/helpers/RepeatedOmKeyInfo.java | 91 - .../ozone/om/helpers/S3SecretValue.java | 90 - .../hadoop/ozone/om/helpers/ServiceInfo.java | 224 - .../ozone/om/helpers/ServiceInfoEx.java | 47 - .../hadoop/ozone/om/helpers/VolumeArgs.java | 140 - .../hadoop/ozone/om/helpers/WithMetadata.java | 45 - .../hadoop/ozone/om/helpers/package-info.java | 18 - .../ozone/om/lock/OzoneManagerLock.java | 477 - .../ozone/om/lock/OzoneManagerLockUtil.java | 75 - .../hadoop/ozone/om/lock/package-info.java | 22 - .../apache/hadoop/ozone/om/package-info.java | 21 - .../om/protocol/OzoneManagerHAProtocol.java | 37 - .../om/protocol/OzoneManagerProtocol.java | 530 - .../OzoneManagerSecurityProtocol.java | 67 - .../protocol/OzoneManagerServerProtocol.java | 28 - .../ozone/om/protocol/package-info.java | 19 - ...ManagerProtocolClientSideTranslatorPB.java | 1569 -- .../om/protocolPB/OzoneManagerProtocolPB.java | 41 - .../ozone/om/protocolPB/package-info.java | 19 - .../org/apache/hadoop/ozone/package-info.java | 22 - .../hadoop/ozone/protocolPB/OMPBHelper.java | 195 - .../ozone/protocolPB/OzonePBHelper.java | 30 - .../hadoop/ozone/protocolPB/package-info.java | 24 - .../ozone/security/AWSV4AuthValidator.java | 116 - .../ozone/security/GDPRSymmetricKey.java | 87 - .../OzoneBlockTokenSecretManager.java | 192 - .../OzoneDelegationTokenSecretManager.java | 560 - .../OzoneDelegationTokenSelector.java | 71 - .../hadoop/ozone/security/OzoneSecretKey.java | 176 - .../ozone/security/OzoneSecretManager.java | 258 - .../ozone/security/OzoneSecretStore.java | 115 - .../security/OzoneSecurityException.java | 105 - .../ozone/security/OzoneTokenIdentifier.java | 315 - .../ozone/security/acl/IAccessAuthorizer.java | 182 - .../hadoop/ozone/security/acl/IOzoneObj.java | 24 - .../security/acl/OzoneAccessAuthorizer.java | 31 - .../ozone/security/acl/OzoneAclConfig.java | 71 - .../hadoop/ozone/security/acl/OzoneObj.java | 147 - .../ozone/security/acl/OzoneObjInfo.java | 204 - .../ozone/security/acl/RequestContext.java | 122 - .../ozone/security/acl/package-info.java | 22 - .../hadoop/ozone/security/package-info.java | 21 - .../hadoop/ozone/util/BooleanBiFunction.java | 29 - .../hadoop/ozone/util/OzoneVersionInfo.java | 77 - .../apache/hadoop/ozone/util/RadixNode.java | 59 - .../apache/hadoop/ozone/util/RadixTree.java | 220 - .../hadoop/ozone/util/package-info.java | 22 - .../hadoop/ozone/web/utils/OzoneUtils.java | 174 - .../hadoop/ozone/web/utils/package-info.java | 18 - .../src/main/proto/OzoneManagerProtocol.proto | 1107 - .../resources/ozone-version-info.properties | 27 - .../src/main/shellprofile.d/hadoop-ozone.sh | 21 - .../org/apache/hadoop/ozone/TestOmUtils.java | 180 - .../apache/hadoop/ozone/TestOzoneAcls.java | 329 - .../om/codec/TestOmMultipartKeyInfoCodec.java | 66 - .../ozone/om/codec/TestOmPrefixInfoCodec.java | 99 - .../om/codec/TestS3SecretValueCodec.java | 88 - .../hadoop/ozone/om/codec/package-info.java | 24 - .../ozone/om/exceptions/TestResultCodes.java | 49 - .../ozone/om/helpers/TestOmBucketInfo.java | 46 - .../ozone/om/helpers/TestOmKeyInfo.java | 52 - .../om/helpers/TestOmMultipartUpload.java | 39 - .../ozone/om/helpers/TestOzoneAclUtil.java | 191 - .../hadoop/ozone/om/helpers/package-info.java | 21 - .../ozone/om/lock/TestOzoneManagerLock.java | 348 - .../hadoop/ozone/om/lock/package-info.java | 21 - .../security/TestAWSV4AuthValidator.java | 78 - .../ozone/security/TestGDPRSymmetricKey.java | 69 - .../TestOzoneDelegationTokenSelector.java | 87 - .../ozone/security/acl/TestOzoneObjInfo.java | 151 - .../hadoop/ozone/util/TestRadixTree.java | 127 - .../hadoop/ozone/util/package-info.java | 21 - .../csi/dev-support/findbugsExcludeFile.xml | 22 - hadoop-ozone/csi/pom.xml | 188 - .../hadoop/ozone/csi/ControllerService.java | 123 - .../apache/hadoop/ozone/csi/CsiServer.java | 160 - .../hadoop/ozone/csi/IdentitiyService.java | 72 - .../apache/hadoop/ozone/csi/NodeService.java | 142 - .../apache/hadoop/ozone/csi/package-info.java | 22 - hadoop-ozone/csi/src/main/proto/csi.proto | 1323 -- hadoop-ozone/datanode/pom.xml | 67 - hadoop-ozone/dev-support/checks/README.md | 27 - .../dev-support/checks/_mvn_unit_report.sh | 84 - hadoop-ozone/dev-support/checks/acceptance.sh | 35 - hadoop-ozone/dev-support/checks/author.sh | 33 - hadoop-ozone/dev-support/checks/blockade.sh | 28 - hadoop-ozone/dev-support/checks/build.sh | 21 - hadoop-ozone/dev-support/checks/checkstyle.sh | 43 - hadoop-ozone/dev-support/checks/findbugs.sh | 40 - .../dev-support/checks/integration.sh | 33 - hadoop-ozone/dev-support/checks/isolation.sh | 27 - hadoop-ozone/dev-support/checks/rat.sh | 38 - hadoop-ozone/dev-support/checks/shellcheck.sh | 37 - hadoop-ozone/dev-support/checks/unit.sh | 31 - hadoop-ozone/dev-support/docker/Dockerfile | 69 - .../intellij/install-runconfigs.sh | 21 - .../dev-support/intellij/log4j.properties | 18 - .../dev-support/intellij/ozone-site.xml | 70 - .../intellij/runConfigurations/Datanode.xml | 33 - .../runConfigurations/FreonStandalone.xml | 33 - .../runConfigurations/OzoneManager.xml | 33 - .../runConfigurations/OzoneManagerInit.xml | 33 - .../intellij/runConfigurations/OzoneShell.xml | 33 - .../intellij/runConfigurations/Recon.xml | 33 - .../intellij/runConfigurations/S3Gateway.xml | 33 - .../StorageContainerManager.xml | 33 - .../StorageContainerManagerInit.xml | 33 - hadoop-ozone/dist/README.md | 85 - .../dev-support/bin/dist-layout-stitching | 129 - .../dist/dev-support/bin/dist-tar-stitching | 45 - hadoop-ozone/dist/pom.xml | 428 - .../dist/src/main/assemblies/ozone-src.xml | 101 - hadoop-ozone/dist/src/main/compose/README.md | 51 - .../compose/common/grafana/conf/grafana.ini | 20 - .../dashboards/Ozone - Object Metrics.json | 1344 -- .../dashboards/Ozone - RPC Metrics.json | 875 - .../provisioning/dashboards/dashboards.yml | 22 - .../provisioning/datasources/datasources.yml | 25 - .../compose/common/prometheus/prometheus.yml | 25 - .../dist/src/main/compose/ozone-hdfs/.env | 18 - .../compose/ozone-hdfs/docker-compose.yaml | 69 - .../src/main/compose/ozone-hdfs/docker-config | 35 - .../src/main/compose/ozone-mr/common-config | 77 - .../src/main/compose/ozone-mr/hadoop27/.env | 22 - .../ozone-mr/hadoop27/docker-compose.yaml | 102 - .../compose/ozone-mr/hadoop27/docker-config | 18 - .../main/compose/ozone-mr/hadoop27/test.sh | 44 - .../src/main/compose/ozone-mr/hadoop31/.env | 22 - .../ozone-mr/hadoop31/docker-compose.yaml | 91 - .../compose/ozone-mr/hadoop31/docker-config | 18 - .../main/compose/ozone-mr/hadoop31/test.sh | 45 - .../src/main/compose/ozone-mr/hadoop32/.env | 20 - .../ozone-mr/hadoop32/docker-compose.yaml | 104 - .../compose/ozone-mr/hadoop32/docker-config | 18 - .../main/compose/ozone-mr/hadoop32/test.sh | 40 - .../dist/src/main/compose/ozone-om-ha/.env | 18 - .../compose/ozone-om-ha/docker-compose.yaml | 79 - .../main/compose/ozone-om-ha/docker-config | 40 - .../dist/src/main/compose/ozone-recon/.env | 18 - .../compose/ozone-recon/docker-compose.yaml | 65 - .../main/compose/ozone-recon/docker-config | 36 - .../dist/src/main/compose/ozone-recon/test.sh | 30 - .../dist/src/main/compose/ozone-topology/.env | 18 - .../ozone-topology/docker-compose.yaml | 110 - .../main/compose/ozone-topology/docker-config | 43 - .../compose/ozone-topology/network-config | 22 - .../src/main/compose/ozone-topology/test.sh | 38 - hadoop-ozone/dist/src/main/compose/ozone/.env | 18 - .../main/compose/ozone/docker-compose.yaml | 53 - .../dist/src/main/compose/ozone/docker-config | 34 - .../dist/src/main/compose/ozone/test.sh | 38 - .../dist/src/main/compose/ozoneblockade/.env | 18 - .../compose/ozoneblockade/docker-compose.yaml | 58 - .../main/compose/ozoneblockade/docker-config | 42 - .../dist/src/main/compose/ozoneperf/.env | 18 - .../dist/src/main/compose/ozoneperf/README.md | 56 - .../compose/ozoneperf/docker-compose.yaml | 89 - .../src/main/compose/ozoneperf/docker-config | 37 - .../dist/src/main/compose/ozoneperf/test.sh | 30 - .../src/main/compose/ozones3-haproxy/.env | 18 - .../ozones3-haproxy/docker-compose.yaml | 83 - .../compose/ozones3-haproxy/docker-config | 31 - .../ozones3-haproxy/haproxy-conf/haproxy.cfg | 38 - .../src/main/compose/ozones3-haproxy/test.sh | 30 - .../dist/src/main/compose/ozones3/.env | 18 - .../main/compose/ozones3/docker-compose.yaml | 58 - .../src/main/compose/ozones3/docker-config | 31 - .../dist/src/main/compose/ozones3/test.sh | 32 - .../dist/src/main/compose/ozonescripts/.env | 18 - .../compose/ozonescripts/.ssh/authorized_keys | 16 - .../src/main/compose/ozonescripts/.ssh/config | 18 - .../compose/ozonescripts/.ssh/environment | 16 - .../src/main/compose/ozonescripts/.ssh/id_rsa | 42 - .../main/compose/ozonescripts/.ssh/id_rsa.pub | 16 - .../src/main/compose/ozonescripts/Dockerfile | 34 - .../src/main/compose/ozonescripts/README.md | 38 - .../compose/ozonescripts/docker-compose.yaml | 42 - .../main/compose/ozonescripts/docker-config | 34 - .../dist/src/main/compose/ozonescripts/ps.sh | 17 - .../src/main/compose/ozonescripts/start.sh | 26 - .../src/main/compose/ozonescripts/stop.sh | 17 - .../dist/src/main/compose/ozonesecure-mr/.env | 19 - .../src/main/compose/ozonesecure-mr/README.md | 73 - .../ozonesecure-mr/docker-compose.yaml | 135 - .../main/compose/ozonesecure-mr/docker-config | 133 - .../docker-image/docker-krb5/Dockerfile-krb5 | 35 - .../docker-image/docker-krb5/README.md | 34 - .../docker-image/docker-krb5/kadm5.acl | 20 - .../docker-image/docker-krb5/krb5.conf | 41 - .../docker-image/docker-krb5/launcher.sh | 25 - .../src/main/compose/ozonesecure-mr/test.sh | 44 - .../dist/src/main/compose/ozonesecure/.env | 19 - .../src/main/compose/ozonesecure/README.md | 22 - .../compose/ozonesecure/docker-compose.yaml | 91 - .../main/compose/ozonesecure/docker-config | 90 - .../docker-image/docker-krb5/Dockerfile-krb5 | 34 - .../docker-image/docker-krb5/README.md | 34 - .../docker-image/docker-krb5/kadm5.acl | 20 - .../docker-image/docker-krb5/krb5.conf | 41 - .../docker-image/docker-krb5/launcher.sh | 25 - .../dist/src/main/compose/ozonesecure/test.sh | 42 - .../dist/src/main/compose/test-all.sh | 48 - .../dist/src/main/compose/test-single.sh | 55 - hadoop-ozone/dist/src/main/compose/testlib.sh | 157 - .../src/main/conf/dn-audit-log4j2.properties | 90 - .../dist/src/main/conf/log4j.properties | 158 - .../src/main/conf/om-audit-log4j2.properties | 90 - .../main/conf/ozone-shell-log4j.properties | 33 - .../dist/src/main/conf/ozone-site.xml | 24 - .../src/main/conf/scm-audit-log4j2.properties | 90 - hadoop-ozone/dist/src/main/docker/Dockerfile | 21 - .../dist/src/main/dockerbin/entrypoint.sh | 149 - .../dist/src/main/dockerbin/envtoconf.py | 117 - .../dist/src/main/dockerbin/transformation.py | 150 - .../k8s/definitions/jaeger/flekszible.yaml | 16 - .../main/k8s/definitions/jaeger/jaeger.yaml | 54 - .../definitions/ozone-csi/csi-controller.yaml | 53 - .../k8s/definitions/ozone-csi/csi-crd.yaml | 21 - .../k8s/definitions/ozone-csi/csi-node.yaml | 95 - .../k8s/definitions/ozone-csi/csi-rbac.yaml | 66 - .../ozone-csi/csi-storageclass.yaml | 20 - .../ozone-csi/definitions/csi.yaml | 28 - .../main/k8s/definitions/ozone/config.yaml | 33 - .../ozone/datanode-ss-service.yaml | 27 - .../k8s/definitions/ozone/datanode-ss.yaml | 54 - .../ozone/definitions/emptydir.yaml | 46 - .../ozone/definitions/persistence.yaml | 66 - .../ozone/definitions/profiler.yaml | 27 - .../ozone/definitions/prometheus.yaml | 26 - .../ozone/definitions/tracing.yaml | 33 - .../k8s/definitions/ozone/flekszible.yaml | 16 - .../definitions/ozone/freon/flekszible.yaml | 16 - .../k8s/definitions/ozone/freon/freon.yaml | 40 - .../k8s/definitions/ozone/om-ss-service.yaml | 27 - .../src/main/k8s/definitions/ozone/om-ss.yaml | 54 - .../k8s/definitions/ozone/s3g-ss-service.yaml | 27 - .../main/k8s/definitions/ozone/s3g-ss.yaml | 43 - .../k8s/definitions/ozone/scm-ss-service.yaml | 27 - .../main/k8s/definitions/ozone/scm-ss.yaml | 52 - .../ozone/transformations/config.yaml | 26 - .../k8s/definitions/prometheus/configmap.yaml | 49 - .../prometheus/definitions/enable.yaml | 26 - .../definitions/prometheus/deployment.yaml | 46 - .../definitions/prometheus/flekszible.yaml | 16 - .../main/k8s/definitions/prometheus/role.yaml | 33 - .../definitions/prometheus/rolebinding.yaml | 27 - .../prometheus/service-account.yaml | 19 - .../k8s/definitions/prometheus/service.yaml | 25 - .../k8s/definitions/pv-test/flekszible.yaml | 16 - .../pv-test/webserver-deployment.yaml | 50 - .../pv-test/webserver-service.yaml | 29 - .../definitions/pv-test/webserver-volume.yaml | 29 - .../k8s/examples/getting-started/Flekszible | 45 - .../examples/getting-started/LICENSE.header | 15 - .../getting-started/config-configmap.yaml | 34 - .../datanode-public-service.yaml | 28 - .../getting-started/datanode-service.yaml | 28 - .../getting-started/datanode-statefulset.yaml | 66 - .../freon/freon-deployment.yaml | 46 - .../getting-started/om-public-service.yaml | 28 - .../examples/getting-started/om-service.yaml | 28 - .../getting-started/om-statefulset.yaml | 65 - .../getting-started/s3g-public-service.yaml | 28 - .../examples/getting-started/s3g-service.yaml | 28 - .../getting-started/s3g-statefulset.yaml | 55 - .../getting-started/scm-public-service.yaml | 28 - .../examples/getting-started/scm-service.yaml | 28 - .../getting-started/scm-statefulset.yaml | 73 - .../src/main/k8s/examples/minikube/Flekszible | 54 - .../main/k8s/examples/minikube/LICENSE.header | 15 - .../examples/minikube/config-configmap.yaml | 34 - .../minikube/datanode-public-service.yaml | 28 - .../examples/minikube/datanode-service.yaml | 28 - .../minikube/datanode-statefulset.yaml | 56 - .../minikube/freon/freon-deployment.yaml | 46 - .../examples/minikube/om-public-service.yaml | 28 - .../k8s/examples/minikube/om-service.yaml | 28 - .../k8s/examples/minikube/om-statefulset.yaml | 65 - .../examples/minikube/s3g-public-service.yaml | 28 - .../k8s/examples/minikube/s3g-service.yaml | 28 - .../examples/minikube/s3g-statefulset.yaml | 55 - .../examples/minikube/scm-public-service.yaml | 28 - .../k8s/examples/minikube/scm-service.yaml | 28 - .../examples/minikube/scm-statefulset.yaml | 73 - .../main/k8s/examples/ozone-dev/Flekszible | 46 - .../k8s/examples/ozone-dev/LICENSE.header | 15 - .../examples/ozone-dev/config-configmap.yaml | 40 - .../ozone-dev/csi/csi-node-daemonset.yaml | 97 - .../ozone-dev/csi/csi-ozone-clusterrole.yaml | 98 - .../csi/csi-ozone-clusterrolebinding.yaml | 28 - .../csi/csi-ozone-serviceaccount.yaml | 21 - .../csi/csi-provisioner-deployment.yaml | 54 - .../org.apache.hadoop.ozone-csidriver.yaml | 22 - .../ozone-dev/csi/ozone-storageclass.yaml | 21 - .../ozone-dev/datanode-public-service.yaml | 28 - .../examples/ozone-dev/datanode-service.yaml | 28 - .../ozone-dev/datanode-statefulset.yaml | 73 - .../ozone-dev/freon/freon-deployment.yaml | 53 - .../ozone-dev/jaeger-public-service.yaml | 28 - .../examples/ozone-dev/jaeger-service.yaml | 28 - .../ozone-dev/jaeger-statefulset.yaml | 42 - .../examples/ozone-dev/om-public-service.yaml | 28 - .../k8s/examples/ozone-dev/om-service.yaml | 28 - .../examples/ozone-dev/om-statefulset.yaml | 71 - .../ozone-dev/prometheus-clusterrole.yaml | 42 - .../ozone-dev/prometheus-deployment.yaml | 48 - ...rometheus-operator-clusterrolebinding.yaml | 28 - .../prometheus-operator-serviceaccount.yaml | 20 - .../ozone-dev/prometheus-service.yaml | 26 - .../ozone-dev/prometheusconf-configmap.yaml | 50 - .../ozone-csi-test-webserver-deployment.yaml | 50 - ...-test-webserver-persistentvolumeclaim.yaml | 29 - .../ozone-csi-test-webserver-service.yaml | 29 - .../ozone-dev/s3g-public-service.yaml | 28 - .../k8s/examples/ozone-dev/s3g-service.yaml | 28 - .../examples/ozone-dev/s3g-statefulset.yaml | 62 - .../ozone-dev/scm-public-service.yaml | 28 - .../k8s/examples/ozone-dev/scm-service.yaml | 28 - .../examples/ozone-dev/scm-statefulset.yaml | 80 - .../src/main/k8s/examples/ozone/Flekszible | 37 - .../main/k8s/examples/ozone/LICENSE.header | 15 - .../k8s/examples/ozone/config-configmap.yaml | 37 - .../ozone/csi/csi-node-daemonset.yaml | 97 - .../ozone/csi/csi-ozone-clusterrole.yaml | 98 - .../csi/csi-ozone-clusterrolebinding.yaml | 28 - .../ozone/csi/csi-ozone-serviceaccount.yaml | 21 - .../ozone/csi/csi-provisioner-deployment.yaml | 54 - .../org.apache.hadoop.ozone-csidriver.yaml | 22 - .../ozone/csi/ozone-storageclass.yaml | 21 - .../k8s/examples/ozone/datanode-service.yaml | 28 - .../examples/ozone/datanode-statefulset.yaml | 72 - .../ozone/freon/freon-deployment.yaml | 46 - .../main/k8s/examples/ozone/om-service.yaml | 28 - .../k8s/examples/ozone/om-statefulset.yaml | 72 - .../ozone-csi-test-webserver-deployment.yaml | 50 - ...-test-webserver-persistentvolumeclaim.yaml | 29 - .../ozone-csi-test-webserver-service.yaml | 29 - .../main/k8s/examples/ozone/s3g-service.yaml | 28 - .../k8s/examples/ozone/s3g-statefulset.yaml | 61 - .../main/k8s/examples/ozone/scm-service.yaml | 28 - .../k8s/examples/ozone/scm-statefulset.yaml | 79 - .../dist/src/main/license/bin/LICENSE.txt | 443 - .../dist/src/main/license/bin/NOTICE.txt | 520 - .../bin/licenses/LICENSE-angular-nvd3.txt | 16 - .../license/bin/licenses/LICENSE-angular.txt | 21 - ...ICENSE-com.google.code.findbugs-jsr305.txt | 8 - .../licenses/LICENSE-com.google.re2j-re2j.txt | 32 - .../bin/licenses/LICENSE-com.jcraft-jsch.txt | 30 - .../bin/licenses/LICENSE-com.sun.jersey.txt | 274 - .../bin/licenses/LICENSE-com.sun.xml.bind.txt | 759 - ...E-com.thoughtworks.paranamer-paranamer.txt | 29 - .../main/license/bin/licenses/LICENSE-d3.txt | 26 - .../bin/licenses/LICENSE-dnsjava-dnsjava.txt | 24 - ...arta.annotation-jakarta.annotation-api.txt | 277 - .../LICENSE-javax.activation-activation.txt | 134 - ...-javax.annotation-javax.annotation-api.txt | 263 - .../LICENSE-javax.el-javax.el-api.txt | 263 - ...avax.interceptor-javax.interceptor-api.txt | 263 - ...ICENSE-javax.servlet-javax.servlet-api.txt | 263 - .../LICENSE-javax.servlet.jsp-jsp-api.txt | 759 - .../LICENSE-javax.ws.rs-javax.ws.rs-api.txt | 759 - .../LICENSE-javax.ws.rs-jsr311-api.txt | 759 - .../LICENSE-javax.xml.bind-jaxb-api.txt | 274 - .../license/bin/licenses/LICENSE-jersey.txt | 759 - .../license/bin/licenses/LICENSE-jetty.txt | 415 - .../license/bin/licenses/LICENSE-jquery.txt | 20 - ...LICENSE-net.sf.jopt-simple-jopt-simple.txt | 24 - .../license/bin/licenses/LICENSE-nvd3.txt | 10 - ...dehaus.mojo-animal-sniffer-annotations.txt | 21 - ...ICENSE-org.codehaus.woodstox-stax2-api.txt | 13 - ...g.fusesource.leveldbjni-leveldbjni-all.txt | 27 - .../licenses/LICENSE-org.glassfish.hk2.txt | 759 - .../LICENSE-org.openjdk.jmh-jmh-core.txt | 347 - ...g.openjdk.jmh-jmh-generator-annprocess.txt | 347 - .../bin/licenses/LICENSE-org.ow2.asm-asm.txt | 28 - .../bin/licenses/LICENSE-org.slf4j.txt | 21 - .../license/bin/licenses/LICENSE-protobuf.txt | 32 - .../LICENSE-ratis-thirdparty-misc.txt | 353 - .../licenses/NOTICE-ratis-thirtparty-misc.txt | 340 - .../dist/src/main/license/src/LICENSE.txt | 239 - .../dist/src/main/license/src/NOTICE.txt | 33 - .../main/license/src/licenses/IMPORTANT.md | 21 - .../src/licenses/LICENSE-angular-nvd3.txt | 16 - .../license/src/licenses/LICENSE-angular.txt | 21 - .../main/license/src/licenses/LICENSE-d3.txt | 26 - .../license/src/licenses/LICENSE-jquery.txt | 20 - .../license/src/licenses/LICENSE-nvd3.txt | 10 - hadoop-ozone/dist/src/main/ozone/README.txt | 51 - hadoop-ozone/dist/src/main/smoketest/.env | 17 - .../dist/src/main/smoketest/README.md | 63 - .../dist/src/main/smoketest/__init__.robot | 18 - .../smoketest/auditparser/auditparser.robot | 53 - .../dist/src/main/smoketest/basic/basic.robot | 35 - .../main/smoketest/basic/ozone-shell.robot | 138 - .../dist/src/main/smoketest/commonlib.robot | 65 - .../src/main/smoketest/createbucketenv.robot | 42 - .../dist/src/main/smoketest/createmrenv.robot | 47 - .../dist/src/main/smoketest/env-compose.robot | 32 - .../dist/src/main/smoketest/gdpr/gdpr.robot | 89 - .../src/main/smoketest/kinit-hadoop.robot | 25 - .../dist/src/main/smoketest/kinit.robot | 28 - .../dist/src/main/smoketest/mapreduce.robot | 37 - .../main/smoketest/ozonefs/hadoopo3fs.robot | 32 - .../src/main/smoketest/ozonefs/ozonefs.robot | 112 - .../main/smoketest/s3/MultipartUpload.robot | 274 - .../dist/src/main/smoketest/s3/README.md | 27 - .../dist/src/main/smoketest/s3/__init__.robot | 21 - .../dist/src/main/smoketest/s3/awss3.robot | 47 - .../src/main/smoketest/s3/bucketcreate.robot | 34 - .../src/main/smoketest/s3/buckethead.robot | 34 - .../src/main/smoketest/s3/bucketlist.robot | 32 - .../src/main/smoketest/s3/commonawslib.robot | 81 - .../src/main/smoketest/s3/objectcopy.robot | 66 - .../src/main/smoketest/s3/objectdelete.robot | 72 - .../main/smoketest/s3/objectmultidelete.robot | 48 - .../src/main/smoketest/s3/objectputget.robot | 154 - .../dist/src/main/smoketest/s3/webui.robot | 35 - .../src/main/smoketest/scmcli/pipeline.robot | 28 - .../smoketest/security/ozone-secure-fs.robot | 131 - .../smoketest/security/ozone-secure-s3.robot | 44 - hadoop-ozone/dist/src/main/smoketest/test.sh | 28 - .../src/main/smoketest/topology/scmcli.robot | 32 - .../network-tests/pom.xml | 103 - .../network-tests/src/test/blockade/README.md | 42 - .../src/test/blockade/conftest.py | 113 - .../src/test/blockade/ozone/__init__.py | 14 - .../src/test/blockade/ozone/blockade.py | 92 - .../src/test/blockade/ozone/client.py | 75 - .../src/test/blockade/ozone/cluster.py | 316 - .../src/test/blockade/ozone/constants.py | 23 - .../src/test/blockade/ozone/container.py | 138 - .../src/test/blockade/ozone/exceptions.py | 22 - .../src/test/blockade/ozone/util.py | 80 - .../blockade/test_blockade_client_failure.py | 117 - .../test_blockade_datanode_isolation.py | 156 - .../src/test/blockade/test_blockade_flaky.py | 57 - .../blockade/test_blockade_mixed_failure.py | 121 - ...ckade_mixed_failure_three_nodes_isolate.py | 164 - .../test_blockade_mixed_failure_two_nodes.py | 132 - .../blockade/test_blockade_scm_isolation.py | 126 - .../src/test/compose/docker-compose.yaml | 50 - .../src/test/compose/docker-config | 77 - hadoop-ozone/fault-injection-test/pom.xml | 35 - .../dev-support/findbugsExcludeFile.xml | 19 - hadoop-ozone/insight/pom.xml | 131 - .../ozone/insight/BaseInsightPoint.java | 188 - .../ozone/insight/BaseInsightSubCommand.java | 106 - .../hadoop/ozone/insight/Component.java | 116 - .../insight/ConfigurationSubCommand.java | 89 - .../apache/hadoop/ozone/insight/Insight.java | 41 - .../hadoop/ozone/insight/InsightPoint.java | 49 - .../hadoop/ozone/insight/ListSubCommand.java | 59 - .../hadoop/ozone/insight/LogSubcommand.java | 167 - .../hadoop/ozone/insight/LoggerSource.java | 72 - .../hadoop/ozone/insight/MetricDisplay.java | 69 - .../ozone/insight/MetricGroupDisplay.java | 69 - .../ozone/insight/MetricsSubCommand.java | 132 - .../ozone/insight/datanode/RatisInsight.java | 75 - .../ozone/insight/datanode/package-info.java | 23 - .../ozone/insight/om/KeyManagerInsight.java | 78 - .../ozone/insight/om/OmProtocolInsight.java | 67 - .../hadoop/ozone/insight/om/package-info.java | 23 - .../hadoop/ozone/insight/package-info.java | 24 - .../ozone/insight/scm/EventQueueInsight.java | 47 - .../ozone/insight/scm/NodeManagerInsight.java | 74 - .../insight/scm/ReplicaManagerInsight.java | 60 - .../scm/ScmProtocolBlockLocationInsight.java | 71 - .../ScmProtocolContainerLocationInsight.java | 71 - .../scm/ScmProtocolDatanodeInsight.java | 72 - .../scm/ScmProtocolSecurityInsight.java | 71 - .../ozone/insight/scm/package-info.java | 23 - .../ozone/insight/LogSubcommandTest.java | 41 - hadoop-ozone/integration-test/pom.xml | 136 - .../src/test/bin/start-chaos.sh | 35 - .../TestContainerStateManagerIntegration.java | 470 - .../TestSCMContainerManagerMetrics.java | 167 - .../scm/pipeline/TestNode2PipelineMap.java | 122 - .../hdds/scm/pipeline/TestNodeFailure.java | 134 - .../hdds/scm/pipeline/TestPipelineClose.java | 264 - .../pipeline/TestPipelineStateManager.java | 475 - .../TestRatisPipelineCreateAndDestroy.java | 131 - .../pipeline/TestRatisPipelineProvider.java | 207 - .../scm/pipeline/TestSCMPipelineManager.java | 317 - .../hdds/scm/pipeline/TestSCMRestart.java | 112 - .../pipeline/TestSimplePipelineProvider.java | 103 - .../hdds/scm/pipeline/package-info.java | 22 - .../TestSCMSafeModeWithPipelineRules.java | 202 - .../hadoop/ozone/MiniOzoneChaosCluster.java | 281 - .../apache/hadoop/ozone/MiniOzoneCluster.java | 472 - .../hadoop/ozone/MiniOzoneClusterImpl.java | 663 - .../hadoop/ozone/MiniOzoneHAClusterImpl.java | 339 - .../hadoop/ozone/MiniOzoneLoadGenerator.java | 268 - .../apache/hadoop/ozone/OzoneTestUtils.java | 112 - .../apache/hadoop/ozone/RatisTestHelper.java | 135 - .../hadoop/ozone/TestContainerOperations.java | 88 - .../TestContainerStateMachineIdempotency.java | 119 - .../org/apache/hadoop/ozone/TestDataUtil.java | 95 - .../ozone/TestMiniChaosOzoneCluster.java | 131 - .../hadoop/ozone/TestMiniOzoneCluster.java | 326 - .../ozone/TestOzoneConfigurationFields.java | 61 - .../hadoop/ozone/TestSecureOzoneCluster.java | 896 - .../ozone/TestStorageContainerManager.java | 656 - .../TestStorageContainerManagerHelper.java | 153 - .../hadoop/ozone/chaos/TestProbability.java | 43 - .../client/CertificateClientTestImpl.java | 175 - .../hadoop/ozone/client/package-info.java | 20 - .../client/rpc/Test2WayCommitInRatis.java | 156 - .../hadoop/ozone/client/rpc/TestBCSID.java | 148 - .../client/rpc/TestBlockOutputStream.java | 696 - .../TestBlockOutputStreamWithFailures.java | 1218 -- .../TestCloseContainerHandlingByClient.java | 474 - .../ozone/client/rpc/TestCommitWatcher.java | 296 - .../rpc/TestContainerReplicationEndToEnd.java | 219 - .../client/rpc/TestContainerStateMachine.java | 211 - .../TestContainerStateMachineFailures.java | 504 - .../rpc/TestDeleteWithSlowFollower.java | 291 - .../rpc/TestFailureHandlingByClient.java | 415 - .../rpc/TestHybridPipelineOnDatanode.java | 166 - .../ozone/client/rpc/TestKeyInputStream.java | 175 - .../TestMultiBlockWritesWithDnFailures.java | 220 - .../client/rpc/TestOzoneAtRestEncryption.java | 239 - .../TestOzoneClientRetriesOnException.java | 233 - .../ozone/client/rpc/TestOzoneRpcClient.java | 55 - .../rpc/TestOzoneRpcClientAbstract.java | 2810 --- .../rpc/TestOzoneRpcClientForAclAuditLog.java | 305 - .../rpc/TestOzoneRpcClientWithRatis.java | 152 - .../ozone/client/rpc/TestReadRetries.java | 222 - .../client/rpc/TestSecureOzoneRpcClient.java | 258 - .../ozone/client/rpc/TestWatchForCommit.java | 463 - .../hadoop/ozone/client/rpc/package-info.java | 23 - .../ozone/container/ContainerTestHelper.java | 907 - .../container/TestContainerReplication.java | 192 - .../common/TestBlockDeletingService.java | 465 - .../common/helpers/TestBlockData.java | 132 - .../TestContainerDeletionChoosingPolicy.java | 191 - .../common/impl/TestContainerPersistence.java | 897 - .../commandhandler/TestBlockDeletion.java | 350 - .../TestCloseContainerByPipeline.java | 354 - .../TestCloseContainerHandler.java | 134 - .../TestDeleteContainerHandler.java | 279 - .../commandhandler/package-info.java | 21 - .../server/ratis/TestCSMMetrics.java | 237 - .../metrics/TestContainerMetrics.java | 173 - .../ozoneimpl/TestOzoneContainer.java | 574 - .../ozoneimpl/TestOzoneContainerRatis.java | 138 - .../ozoneimpl/TestOzoneContainerWithTLS.java | 204 - .../container/ozoneimpl/TestRatisManager.java | 124 - .../ozoneimpl/TestSecureOzoneContainer.java | 226 - .../container/server/TestContainerServer.java | 299 - .../server/TestSecureContainerServer.java | 291 - .../ozone/dn/scrubber/TestDataScrubber.java | 218 - .../hadoop/ozone/freon/package-info.java | 21 - .../ozone/om/TestContainerReportWithKeys.java | 132 - .../hadoop/ozone/om/TestKeyManagerImpl.java | 975 - .../hadoop/ozone/om/TestKeyPurging.java | 138 - .../ozone/om/TestOMDbCheckpointServlet.java | 172 - .../hadoop/ozone/om/TestOMRatisSnapshots.java | 191 - .../apache/hadoop/ozone/om/TestOmAcls.java | 151 - .../ozone/om/TestOmBlockVersioning.java | 222 - .../apache/hadoop/ozone/om/TestOmInit.java | 95 - .../apache/hadoop/ozone/om/TestOmMetrics.java | 443 - .../om/TestOzoneManagerConfiguration.java | 346 - .../hadoop/ozone/om/TestOzoneManagerHA.java | 1248 -- .../om/TestOzoneManagerRestInterface.java | 135 - .../ozone/om/TestOzoneManagerRestart.java | 210 - .../om/TestOzoneManagerRocksDBLogging.java | 97 - .../hadoop/ozone/om/TestScmSafeMode.java | 353 - .../ozone/om/TestSecureOzoneManager.java | 217 - .../apache/hadoop/ozone/om/package-info.java | 22 - .../om/snapshot/TestOMRatisSnapshotInfo.java | 65 - .../TestOzoneManagerSnapshotProvider.java | 129 - .../ozone/ozShell/TestOzoneDatanodeShell.java | 204 - .../ozone/ozShell/TestOzoneShellHA.java | 343 - .../hadoop/ozone/ozShell/TestS3Shell.java | 292 - .../org/apache/hadoop/ozone/package-info.java | 22 - .../ozone/scm/TestAllocateContainer.java | 84 - .../ozone/scm/TestContainerSmallFile.java | 203 - .../TestGetCommittedBlockLengthAndPutKey.java | 179 - ...estSCMContainerPlacementPolicyMetrics.java | 156 - .../hadoop/ozone/scm/TestSCMMXBean.java | 212 - .../ozone/scm/TestSCMNodeManagerMXBean.java | 120 - .../ozone/scm/TestXceiverClientManager.java | 258 - .../ozone/scm/TestXceiverClientMetrics.java | 179 - .../hadoop/ozone/scm/node/TestQueryNode.java | 132 - .../ozone/scm/node/TestSCMNodeMetrics.java | 174 - .../hadoop/ozone/scm/node/package-info.java | 24 - .../pipeline/TestPipelineManagerMXBean.java | 97 - .../scm/pipeline/TestSCMPipelineMetrics.java | 133 - .../ozone/scm/pipeline/package-info.java | 24 - .../acl/TestOzoneNativeAuthorizer.java | 470 - .../src/test/resources/auditlog.properties | 76 - .../src/test/resources/core-site.xml | 24 - .../src/test/resources/hdfs-site.xml | 24 - .../src/test/resources/log4j.properties | 21 - .../src/test/resources/ssl/ca.crt | 27 - .../src/test/resources/ssl/ca.key | 54 - .../src/test/resources/ssl/client.crt | 27 - .../src/test/resources/ssl/client.csr | 26 - .../src/test/resources/ssl/client.key | 51 - .../src/test/resources/ssl/client.pem | 52 - .../src/test/resources/ssl/generate.sh | 34 - .../src/test/resources/ssl/server.crt | 27 - .../src/test/resources/ssl/server.csr | 26 - .../src/test/resources/ssl/server.key | 51 - .../src/test/resources/ssl/server.pem | 52 - .../resources/webapps/ozoneManager/.gitkeep | 15 - .../src/test/resources/webapps/scm/.gitkeep | 15 - hadoop-ozone/ozone-manager/pom.xml | 135 - .../apache/hadoop/ozone/om/BucketManager.java | 82 - .../hadoop/ozone/om/BucketManagerImpl.java | 590 - .../org/apache/hadoop/ozone/om/IOzoneAcl.java | 80 - .../hadoop/ozone/om/KeyDeletingService.java | 256 - .../apache/hadoop/ozone/om/KeyManager.java | 250 - .../hadoop/ozone/om/KeyManagerImpl.java | 2157 -- .../ozone/om/OMDBCheckpointServlet.java | 177 - .../org/apache/hadoop/ozone/om/OMMXBean.java | 31 - .../org/apache/hadoop/ozone/om/OMMetrics.java | 763 - .../hadoop/ozone/om/OMPolicyProvider.java | 67 - .../hadoop/ozone/om/OMStarterInterface.java | 33 - .../org/apache/hadoop/ozone/om/OMStorage.java | 107 - .../ozone/om/OmMetadataManagerImpl.java | 943 - .../apache/hadoop/ozone/om/OmMetricsInfo.java | 43 - .../ozone/om/OpenKeyCleanupService.java | 119 - .../apache/hadoop/ozone/om/OzoneManager.java | 3295 --- .../ozone/om/OzoneManagerHttpServer.java | 83 - .../hadoop/ozone/om/OzoneManagerStarter.java | 130 - .../apache/hadoop/ozone/om/PrefixManager.java | 45 - .../hadoop/ozone/om/PrefixManagerImpl.java | 417 - .../hadoop/ozone/om/S3BucketManager.java | 87 - .../hadoop/ozone/om/S3BucketManagerImpl.java | 254 - .../org/apache/hadoop/ozone/om/ScmClient.java | 44 - .../ozone/om/ServiceListJSONServlet.java | 104 - .../apache/hadoop/ozone/om/VolumeManager.java | 103 - .../hadoop/ozone/om/VolumeManagerImpl.java | 705 - .../hadoop/ozone/om/fs/OzoneManagerFS.java | 54 - .../hadoop/ozone/om/fs/package-info.java | 21 - .../hadoop/ozone/om/ha/OMHANodeDetails.java | 306 - .../hadoop/ozone/om/ha/OMNodeDetails.java | 161 - .../hadoop/ozone/om/ha/package-info.java | 23 - .../apache/hadoop/ozone/om/package-info.java | 21 - .../ozone/om/ratis/OMRatisSnapshotInfo.java | 180 - .../om/ratis/OzoneManagerDoubleBuffer.java | 350 - .../om/ratis/OzoneManagerRatisClient.java | 210 - .../om/ratis/OzoneManagerRatisServer.java | 648 - .../om/ratis/OzoneManagerRatisSnapshot.java | 32 - .../om/ratis/OzoneManagerStateMachine.java | 377 - .../om/ratis/helpers/DoubleBufferEntry.java | 44 - .../ozone/om/ratis/helpers/package-info.java | 20 - .../OzoneManagerDoubleBufferMetrics.java | 89 - .../ozone/om/ratis/metrics/package-info.java | 21 - .../hadoop/ozone/om/ratis/package-info.java | 22 - .../utils/OzoneManagerDoubleBufferHelper.java | 33 - .../ratis/utils/OzoneManagerRatisUtils.java | 203 - .../ozone/om/ratis/utils/package-info.java | 21 - .../ozone/om/request/OMClientRequest.java | 220 - .../ozone/om/request/RequestAuditor.java | 81 - .../request/bucket/OMBucketCreateRequest.java | 280 - .../request/bucket/OMBucketDeleteRequest.java | 174 - .../bucket/OMBucketSetPropertyRequest.java | 205 - .../bucket/acl/OMBucketAclRequest.java | 188 - .../bucket/acl/OMBucketAddAclRequest.java | 122 - .../bucket/acl/OMBucketRemoveAclRequest.java | 119 - .../bucket/acl/OMBucketSetAclRequest.java | 120 - .../om/request/bucket/acl/package-info.java | 23 - .../ozone/om/request/bucket/package-info.java | 23 - .../file/OMDirectoryCreateRequest.java | 239 - .../om/request/file/OMFileCreateRequest.java | 357 - .../ozone/om/request/file/OMFileRequest.java | 116 - .../ozone/om/request/file/package-info.java | 23 - .../request/key/OMAllocateBlockRequest.java | 227 - .../om/request/key/OMKeyCommitRequest.java | 199 - .../om/request/key/OMKeyCreateRequest.java | 209 - .../om/request/key/OMKeyDeleteRequest.java | 175 - .../om/request/key/OMKeyPurgeRequest.java | 73 - .../om/request/key/OMKeyRenameRequest.java | 202 - .../ozone/om/request/key/OMKeyRequest.java | 536 - .../om/request/key/acl/OMKeyAclRequest.java | 175 - .../request/key/acl/OMKeyAddAclRequest.java | 108 - .../key/acl/OMKeyRemoveAclRequest.java | 109 - .../request/key/acl/OMKeySetAclRequest.java | 108 - .../om/request/key/acl/package-info.java | 24 - .../key/acl/prefix/OMPrefixAclRequest.java | 197 - .../key/acl/prefix/OMPrefixAddAclRequest.java | 122 - .../acl/prefix/OMPrefixRemoveAclRequest.java | 119 - .../key/acl/prefix/OMPrefixSetAclRequest.java | 120 - .../request/key/acl/prefix/package-info.java | 22 - .../ozone/om/request/key/package-info.java | 23 - .../hadoop/ozone/om/request/package-info.java | 21 - .../s3/bucket/S3BucketCreateRequest.java | 391 - .../s3/bucket/S3BucketDeleteRequest.java | 199 - .../om/request/s3/bucket/package-info.java | 23 - .../S3InitiateMultipartUploadRequest.java | 219 - .../S3MultipartUploadAbortRequest.java | 173 - .../S3MultipartUploadCommitPartRequest.java | 228 - .../S3MultipartUploadCompleteRequest.java | 319 - .../om/request/s3/multipart/package-info.java | 23 - .../s3/security/S3GetSecretRequest.java | 195 - .../om/request/s3/security/package-info.java | 22 - .../OMCancelDelegationTokenRequest.java | 125 - .../security/OMGetDelegationTokenRequest.java | 187 - .../OMRenewDelegationTokenRequest.java | 164 - .../om/request/security/package-info.java | 22 - .../ozone/om/request/util/ObjectParser.java | 74 - .../ozone/om/request/util/package-info.java | 23 - .../request/volume/OMVolumeCreateRequest.java | 204 - .../request/volume/OMVolumeDeleteRequest.java | 191 - .../om/request/volume/OMVolumeRequest.java | 143 - .../volume/OMVolumeSetOwnerRequest.java | 211 - .../volume/OMVolumeSetQuotaRequest.java | 172 - .../volume/acl/OMVolumeAclRequest.java | 175 - .../volume/acl/OMVolumeAddAclRequest.java | 110 - .../volume/acl/OMVolumeRemoveAclRequest.java | 109 - .../volume/acl/OMVolumeSetAclRequest.java | 108 - .../om/request/volume/acl/package-info.java | 22 - .../ozone/om/request/volume/package-info.java | 22 - .../ozone/om/response/OMClientResponse.java | 69 - .../bucket/OMBucketCreateResponse.java | 68 - .../bucket/OMBucketDeleteResponse.java | 69 - .../bucket/OMBucketSetPropertyResponse.java | 60 - .../bucket/acl/OMBucketAclResponse.java | 62 - .../om/response/bucket/acl/package-info.java | 22 - .../om/response/bucket/package-info.java | 23 - .../file/OMDirectoryCreateResponse.java | 68 - .../response/file/OMFileCreateResponse.java | 41 - .../ozone/om/response/file/package-info.java | 23 - .../response/key/OMAllocateBlockResponse.java | 61 - .../om/response/key/OMKeyCommitResponse.java | 69 - .../om/response/key/OMKeyCreateResponse.java | 63 - .../om/response/key/OMKeyDeleteResponse.java | 100 - .../om/response/key/OMKeyPurgeResponse.java | 56 - .../om/response/key/OMKeyRenameResponse.java | 69 - .../om/response/key/acl/OMKeyAclResponse.java | 63 - .../om/response/key/acl/package-info.java | 24 - .../key/acl/prefix/OMPrefixAclResponse.java | 71 - .../response/key/acl/prefix/package-info.java | 24 - .../ozone/om/response/key/package-info.java | 23 - .../ozone/om/response/package-info.java | 24 - .../s3/bucket/S3BucketCreateResponse.java | 79 - .../s3/bucket/S3BucketDeleteResponse.java | 55 - .../om/response/s3/bucket/package-info.java | 24 - .../S3InitiateMultipartUploadResponse.java | 80 - .../S3MultipartUploadAbortResponse.java | 90 - .../S3MultipartUploadCommitPartResponse.java | 125 - .../S3MultipartUploadCompleteResponse.java | 65 - .../response/s3/multipart/package-info.java | 22 - .../s3/security/S3GetSecretResponse.java | 56 - .../om/response/s3/security/package-info.java | 22 - .../OMCancelDelegationTokenResponse.java | 56 - .../OMGetDelegationTokenResponse.java | 59 - .../OMRenewDelegationTokenResponse.java | 58 - .../om/response/security/package-info.java | 22 - .../volume/OMVolumeAclOpResponse.java | 69 - .../volume/OMVolumeCreateResponse.java | 75 - .../volume/OMVolumeDeleteResponse.java | 72 - .../volume/OMVolumeSetOwnerResponse.java | 81 - .../volume/OMVolumeSetQuotaResponse.java | 57 - .../om/response/volume/package-info.java | 22 - .../OzoneManagerSnapshotProvider.java | 210 - .../ozone/om/snapshot/package-info.java | 23 - .../OzoneManagerHARequestHandler.java | 39 - .../OzoneManagerHARequestHandlerImpl.java | 105 - ...ManagerProtocolServerSideTranslatorPB.java | 246 - .../OzoneManagerRequestHandler.java | 1132 - .../ozone/protocolPB/RequestHandler.java | 48 - .../hadoop/ozone/protocolPB/package-info.java | 22 - .../security/acl/OzoneNativeAuthorizer.java | 120 - .../ozone/security/acl/package-info.java | 22 - .../hadoop/ozone/web/ozShell/Handler.java | 57 - .../ozone/web/ozShell/ObjectPrinter.java | 38 - .../ozone/web/ozShell/OzoneAddress.java | 263 - .../hadoop/ozone/web/ozShell/OzoneShell.java | 64 - .../hadoop/ozone/web/ozShell/Shell.java | 76 - .../ozShell/bucket/AddAclBucketHandler.java | 100 - .../web/ozShell/bucket/BucketCommands.java | 69 - .../ozShell/bucket/CreateBucketHandler.java | 105 - .../ozShell/bucket/DeleteBucketHandler.java | 62 - .../ozShell/bucket/GetAclBucketHandler.java | 84 - .../web/ozShell/bucket/InfoBucketHandler.java | 66 - .../web/ozShell/bucket/ListBucketHandler.java | 102 - .../bucket/RemoveAclBucketHandler.java | 100 - .../web/ozShell/bucket/S3BucketMapping.java | 66 - .../ozShell/bucket/SetAclBucketHandler.java | 100 - .../web/ozShell/bucket/package-info.java | 23 - .../web/ozShell/keys/AddAclKeyHandler.java | 103 - .../web/ozShell/keys/DeleteKeyHandler.java | 67 - .../web/ozShell/keys/GetAclKeyHandler.java | 87 - .../ozone/web/ozShell/keys/GetKeyHandler.java | 115 - .../web/ozShell/keys/InfoKeyHandler.java | 75 - .../ozone/web/ozShell/keys/KeyCommands.java | 71 - .../web/ozShell/keys/ListKeyHandler.java | 110 - .../ozone/web/ozShell/keys/PutKeyHandler.java | 125 - .../web/ozShell/keys/RemoveAclKeyHandler.java | 103 - .../web/ozShell/keys/RenameKeyHandler.java | 73 - .../web/ozShell/keys/SetAclKeyHandler.java | 102 - .../ozone/web/ozShell/keys/package-info.java | 23 - .../ozone/web/ozShell/package-info.java | 27 - .../web/ozShell/s3/GetS3SecretHandler.java | 61 - .../hadoop/ozone/web/ozShell/s3/S3Shell.java | 56 - .../ozone/web/ozShell/s3/package-info.java | 21 - .../web/ozShell/token/CancelTokenHandler.java | 72 - .../web/ozShell/token/GetTokenHandler.java | 77 - .../web/ozShell/token/PrintTokenHandler.java | 71 - .../web/ozShell/token/RenewTokenHandler.java | 75 - .../web/ozShell/token/TokenCommands.java | 64 - .../ozone/web/ozShell/token/package-info.java | 26 - .../ozShell/volume/AddAclVolumeHandler.java | 97 - .../ozShell/volume/CreateVolumeHandler.java | 100 - .../ozShell/volume/DeleteVolumeHandler.java | 59 - .../ozShell/volume/GetAclVolumeHandler.java | 78 - .../web/ozShell/volume/InfoVolumeHandler.java | 58 - .../web/ozShell/volume/ListVolumeHandler.java | 108 - .../volume/RemoveAclVolumeHandler.java | 97 - .../ozShell/volume/SetAclVolumeHandler.java | 100 - .../ozShell/volume/UpdateVolumeHandler.java | 76 - .../web/ozShell/volume/VolumeCommands.java | 71 - .../web/ozShell/volume/package-info.java | 23 - .../apache/hadoop/ozone/web/package-info.java | 24 - .../resources/webapps/ozoneManager/index.html | 70 - .../resources/webapps/ozoneManager/main.css | 23 - .../resources/webapps/ozoneManager/main.html | 18 - .../webapps/ozoneManager/om-metrics.html | 44 - .../webapps/ozoneManager/ozoneManager.js | 112 - .../om/ScmBlockLocationTestingClient.java | 195 - .../ozone/om/TestBucketManagerImpl.java | 344 - .../hadoop/ozone/om/TestChunkStreams.java | 166 - .../ozone/om/TestKeyDeletingService.java | 213 - .../hadoop/ozone/om/TestKeyManagerUnit.java | 180 - .../ozone/om/TestOmMetadataManager.java | 417 - .../ozone/om/TestOzoneManagerHttpServer.java | 141 - .../ozone/om/TestOzoneManagerStarter.java | 154 - .../hadoop/ozone/om/TestS3BucketManager.java | 115 - .../apache/hadoop/ozone/om/package-info.java | 21 - ...eManagerDoubleBufferWithDummyResponse.java | 171 - ...zoneManagerDoubleBufferWithOMResponse.java | 496 - .../om/ratis/TestOzoneManagerRatisServer.java | 227 - .../TestOMClientRequestWithUserInfo.java | 119 - .../ozone/om/request/TestOMRequestUtils.java | 579 - .../om/request/bucket/TestBucketRequest.java | 83 - .../bucket/TestOMBucketCreateRequest.java | 205 - .../bucket/TestOMBucketDeleteRequest.java | 114 - .../TestOMBucketSetPropertyRequest.java | 127 - .../ozone/om/request/bucket/package-info.java | 23 - .../file/TestOMDirectoryCreateRequest.java | 337 - .../request/file/TestOMFileCreateRequest.java | 374 - .../ozone/om/request/file/package-info.java | 23 - .../key/TestOMAllocateBlockRequest.java | 245 - .../request/key/TestOMKeyCommitRequest.java | 300 - .../request/key/TestOMKeyCreateRequest.java | 329 - .../request/key/TestOMKeyDeleteRequest.java | 166 - .../key/TestOMKeyPurgeRequestAndResponse.java | 147 - .../request/key/TestOMKeyRenameRequest.java | 230 - .../om/request/key/TestOMKeyRequest.java | 158 - .../ozone/om/request/key/package-info.java | 23 - .../hadoop/ozone/om/request/package-info.java | 22 - .../s3/bucket/TestS3BucketCreateRequest.java | 202 - .../s3/bucket/TestS3BucketDeleteRequest.java | 124 - .../s3/bucket/TestS3BucketRequest.java | 82 - .../om/request/s3/bucket/package-info.java | 23 - .../TestS3InitiateMultipartUploadRequest.java | 153 - .../s3/multipart/TestS3MultipartRequest.java | 208 - .../TestS3MultipartUploadAbortRequest.java | 180 - ...estS3MultipartUploadCommitPartRequest.java | 209 - .../TestS3MultipartUploadCompleteRequest.java | 195 - .../om/request/s3/multipart/package-info.java | 24 - .../volume/TestOMVolumeCreateRequest.java | 258 - .../volume/TestOMVolumeDeleteRequest.java | 168 - .../request/volume/TestOMVolumeRequest.java | 80 - .../volume/TestOMVolumeSetOwnerRequest.java | 161 - .../volume/TestOMVolumeSetQuotaRequest.java | 154 - .../volume/acl/TestOMVolumeAddAclRequest.java | 122 - .../acl/TestOMVolumeRemoveAclRequest.java | 133 - .../volume/acl/TestOMVolumeSetAclRequest.java | 135 - .../om/request/volume/acl/package-info.java | 21 - .../ozone/om/request/volume/package-info.java | 21 - .../om/response/TestOMResponseUtils.java | 84 - .../bucket/TestOMBucketCreateResponse.java | 96 - .../bucket/TestOMBucketDeleteResponse.java | 97 - .../TestOMBucketSetPropertyResponse.java | 95 - .../om/response/bucket/package-info.java | 23 - .../file/TestOMDirectoryCreateResponse.java | 123 - .../ozone/om/response/file/package-info.java | 23 - .../key/TestOMAllocateBlockResponse.java | 93 - .../response/key/TestOMKeyCommitResponse.java | 108 - .../response/key/TestOMKeyCreateResponse.java | 92 - .../response/key/TestOMKeyDeleteResponse.java | 165 - .../response/key/TestOMKeyRenameResponse.java | 148 - .../om/response/key/TestOMKeyResponse.java | 75 - .../ozone/om/response/key/package-info.java | 23 - .../ozone/om/response/package-info.java | 22 - .../s3/bucket/TestS3BucketCreateResponse.java | 91 - .../s3/bucket/TestS3BucketDeleteResponse.java | 91 - .../om/response/s3/bucket/package-info.java | 23 - ...TestS3InitiateMultipartUploadResponse.java | 63 - .../s3/multipart/TestS3MultipartResponse.java | 142 - .../TestS3MultipartUploadAbortResponse.java | 148 - .../response/s3/multipart/package-info.java | 24 - .../volume/TestOMVolumeCreateResponse.java | 128 - .../volume/TestOMVolumeDeleteResponse.java | 133 - .../volume/TestOMVolumeSetOwnerResponse.java | 156 - .../volume/TestOMVolumeSetQuotaResponse.java | 124 - .../om/response/volume/package-info.java | 21 - .../TestOzoneBlockTokenSecretManager.java | 186 - ...TestOzoneDelegationTokenSecretManager.java | 403 - .../security/TestOzoneManagerBlockToken.java | 251 - .../security/TestOzoneTokenIdentifier.java | 305 - .../hadoop/ozone/security/package-info.java | 21 - .../ozone/web/ozShell/TestObjectPrinter.java | 50 - .../ozone/web/ozShell/TestOzoneAddress.java | 98 - .../ozone/web/ozShell/package-info.java | 21 - .../org.mockito.plugins.MockMaker | 16 - hadoop-ozone/ozonefs-lib-current/pom.xml | 214 - .../services/org.apache.hadoop.fs.FileSystem | 16 - hadoop-ozone/ozonefs-lib-legacy/pom.xml | 139 - .../services/org.apache.hadoop.fs.FileSystem | 16 - .../src/main/resources/ozonefs.txt | 21 - hadoop-ozone/ozonefs/pom.xml | 219 - .../apache/hadoop/fs/ozone/BasicKeyInfo.java | 53 - .../org/apache/hadoop/fs/ozone/BasicOzFs.java | 45 - .../fs/ozone/BasicOzoneClientAdapterImpl.java | 446 - .../hadoop/fs/ozone/BasicOzoneFileSystem.java | 787 - .../org/apache/hadoop/fs/ozone/Constants.java | 42 - .../hadoop/fs/ozone/FileStatusAdapter.java | 108 - .../hadoop/fs/ozone/FilteredClassLoader.java | 95 - .../apache/hadoop/fs/ozone/O3fsDtFetcher.java | 84 - .../java/org/apache/hadoop/fs/ozone/OzFs.java | 44 - .../hadoop/fs/ozone/OzoneClientAdapter.java | 71 - .../fs/ozone/OzoneClientAdapterFactory.java | 170 - .../fs/ozone/OzoneClientAdapterImpl.java | 60 - .../hadoop/fs/ozone/OzoneFSInputStream.java | 79 - .../hadoop/fs/ozone/OzoneFSOutputStream.java | 58 - .../fs/ozone/OzoneFSStorageStatistics.java | 126 - .../hadoop/fs/ozone/OzoneFileSystem.java | 106 - .../apache/hadoop/fs/ozone/OzoneFsShell.java | 90 - .../org/apache/hadoop/fs/ozone/Statistic.java | 119 - .../apache/hadoop/fs/ozone/package-info.java | 30 - ...org.apache.hadoop.security.token.DtFetcher | 19 - ...ache.hadoop.security.token.TokenIdentifier | 17 - ....apache.hadoop.security.token.TokenRenewer | 19 - .../fs/ozone/TestFilteredClassLoader.java | 63 - .../fs/ozone/TestOzoneFSInputStream.java | 137 - .../fs/ozone/TestOzoneFileInterfaces.java | 343 - .../hadoop/fs/ozone/TestOzoneFileSystem.java | 305 - .../ozone/TestOzoneFileSystemWithMocks.java | 148 - .../hadoop/fs/ozone/TestOzoneFsHAURLs.java | 348 - .../hadoop/fs/ozone/TestOzoneFsRenameDir.java | 102 - .../contract/ITestOzoneContractCreate.java | 48 - .../contract/ITestOzoneContractDelete.java | 48 - .../contract/ITestOzoneContractDistCp.java | 50 - .../ITestOzoneContractGetFileStatus.java | 65 - .../contract/ITestOzoneContractMkdir.java | 48 - .../contract/ITestOzoneContractOpen.java | 47 - .../contract/ITestOzoneContractRename.java | 49 - .../contract/ITestOzoneContractRootDir.java | 51 - .../contract/ITestOzoneContractSeek.java | 47 - .../fs/ozone/contract/OzoneContract.java | 99 - .../apache/hadoop/fs/ozone/package-info.java | 22 - .../services/org.apache.hadoop.fs.FileSystem | 16 - .../src/test/resources/contract/ozone.xml | 113 - .../src/test/resources/log4j.properties | 27 - hadoop-ozone/pom.xml | 414 - hadoop-ozone/recon-codegen/pom.xml | 70 - .../recon/codegen/JooqCodeGenerator.java | 170 - .../codegen/ReconSchemaGenerationModule.java | 42 - .../recon/codegen/TableNamingStrategy.java | 48 - .../ozone/recon/codegen/package-info.java | 22 - .../schema/ReconInternalSchemaDefinition.java | 65 - .../recon/schema/ReconSchemaDefinition.java | 34 - .../recon/schema/StatsSchemaDefinition.java | 61 - .../schema/UtilizationSchemaDefinition.java | 80 - .../ozone/recon/schema/package-info.java | 22 - .../recon/dev-support/findbugsExcludeFile.xml | 28 - hadoop-ozone/recon/pom.xml | 311 - .../ozone/recon/ConfigurationProvider.java | 43 - .../hadoop/ozone/recon/ReconConstants.java | 51 - .../ozone/recon/ReconControllerModule.java | 185 - .../ReconGuiceServletContextListener.java | 40 - .../hadoop/ozone/recon/ReconHttpServer.java | 88 - .../ozone/recon/ReconRestServletModule.java | 134 - .../hadoop/ozone/recon/ReconServer.java | 117 - .../ozone/recon/ReconServerConfigKeys.java | 124 - .../ozone/recon/ReconTaskBindingModule.java | 40 - .../apache/hadoop/ozone/recon/ReconUtils.java | 274 - .../ozone/recon/api/ContainerKeyService.java | 210 - .../ozone/recon/api/UtilizationService.java | 67 - .../hadoop/ozone/recon/api/package-info.java | 23 - .../recon/api/types/ContainerKeyPrefix.java | 89 - .../recon/api/types/ContainerMetadata.java | 56 - .../recon/api/types/ContainersResponse.java | 94 - .../ozone/recon/api/types/IsoDateAdapter.java | 48 - .../ozone/recon/api/types/KeyMetadata.java | 147 - .../ozone/recon/api/types/KeysResponse.java | 93 - .../ozone/recon/api/types/package-info.java | 22 - .../hadoop/ozone/recon/package-info.java | 22 - .../persistence/DataSourceConfiguration.java | 86 - .../DefaultDataSourceProvider.java | 74 - .../persistence/JooqPersistenceModule.java | 110 - .../TransactionalMethodInterceptor.java | 76 - .../ozone/recon/persistence/package-info.java | 22 - .../recovery/ReconOMMetadataManager.java | 38 - .../recovery/ReconOmMetadataManagerImpl.java | 111 - .../ozone/recon/recovery/package-info.java | 22 - .../recon/spi/ContainerDBServiceProvider.java | 159 - .../spi/HddsDatanodeServiceProvider.java | 25 - .../spi/OzoneManagerServiceProvider.java | 43 - .../spi/StorageContainerServiceProvider.java | 25 - .../impl/ContainerDBServiceProviderImpl.java | 402 - .../spi/impl/ContainerKeyPrefixCodec.java | 87 - .../impl/OzoneManagerServiceProviderImpl.java | 362 - .../spi/impl/ReconContainerDBProvider.java | 109 - .../ozone/recon/spi/impl/package-info.java | 22 - .../hadoop/ozone/recon/spi/package-info.java | 24 - .../recon/tasks/ContainerKeyMapperTask.java | 235 - .../ozone/recon/tasks/FileSizeCountTask.java | 251 - .../ozone/recon/tasks/OMDBUpdateEvent.java | 125 - .../ozone/recon/tasks/OMDBUpdatesHandler.java | 225 - .../ozone/recon/tasks/OMUpdateEventBatch.java | 77 - .../ozone/recon/tasks/ReconDBUpdateTask.java | 58 - .../recon/tasks/ReconTaskController.java | 69 - .../recon/tasks/ReconTaskControllerImpl.java | 245 - .../ozone/recon/tasks/package-info.java | 22 - .../resources/webapps/recon/WEB-INF/web.xml | 28 - .../webapps/recon/ozone-recon-web/.gitignore | 23 - .../webapps/recon/ozone-recon-web/LICENSE | 17279 ---------------- .../webapps/recon/ozone-recon-web/NOTICE | 5 - .../webapps/recon/ozone-recon-web/README.md | 45 - .../recon/ozone-recon-web/config-overrides.js | 33 - .../recon/ozone-recon-web/package.json | 47 - .../recon/ozone-recon-web/public/favicon.ico | Bin 17470 -> 0 bytes .../recon/ozone-recon-web/public/index.html | 56 - .../ozone-recon-web/public/manifest.json | 15 - .../recon/ozone-recon-web/src/App.less | 49 - .../recon/ozone-recon-web/src/App.test.tsx | 27 - .../webapps/recon/ozone-recon-web/src/App.tsx | 88 - .../components/Breadcrumbs/Breadcrumbs.tsx | 58 - .../src/components/NavBar/NavBar.less | 41 - .../src/components/NavBar/NavBar.tsx | 67 - .../src/constants/breadcrumbs.constants.tsx | 26 - .../recon/ozone-recon-web/src/index.less | 32 - .../recon/ozone-recon-web/src/index.tsx | 30 - .../recon/ozone-recon-web/src/logo.png | Bin 22480 -> 0 bytes .../src/makeRouteWithSubRoutes.tsx | 32 - .../ozone-recon-web/src/react-app-env.d.ts | 18 - .../recon/ozone-recon-web/src/routes.tsx | 37 - .../ozone-recon-web/src/routes.types.tsx | 23 - .../ozone-recon-web/src/serviceWorker.ts | 161 - .../ContainerBrowser/ContainerBrowser.tsx | 33 - .../src/views/Dashboard/Dashboard.tsx | 32 - .../src/views/NotFound/NotFound.tsx | 29 - .../recon/ozone-recon-web/tsconfig.json | 27 - .../webapps/recon/ozone-recon-web/yarn.lock | 11114 ---------- .../recon/AbstractOMMetadataManagerTest.java | 232 - .../recon/GuiceInjectorUtilsForTestsImpl.java | 28 - .../hadoop/ozone/recon/TestReconCodecs.java | 58 - .../hadoop/ozone/recon/TestReconUtils.java | 207 - .../recon/api/TestContainerKeyService.java | 373 - .../recon/api/TestUtilizationService.java | 77 - .../hadoop/ozone/recon/api/package-info.java | 21 - .../hadoop/ozone/recon/package-info.java | 21 - .../persistence/AbstractSqlDatabaseTest.java | 161 - .../TestReconInternalSchemaDefinition.java | 143 - .../TestStatsSchemaDefinition.java | 147 - .../TestUtilizationSchemaDefinition.java | 234 - .../ozone/recon/persistence/package-info.java | 22 - .../TestReconOmMetadataManagerImpl.java | 187 - .../ozone/recon/recovery/package-info.java | 21 - .../TestContainerDBServiceProviderImpl.java | 405 - .../TestOzoneManagerServiceProviderImpl.java | 338 - .../impl/TestReconContainerDBProvider.java | 74 - .../ozone/recon/spi/impl/package-info.java | 21 - .../ozone/recon/tasks/DummyReconDBTask.java | 83 - .../tasks/TestContainerKeyMapperTask.java | 314 - .../recon/tasks/TestFileSizeCountTask.java | 130 - .../recon/tasks/TestOMDBUpdatesHandler.java | 207 - .../tasks/TestReconTaskControllerImpl.java | 191 - .../ozone/recon/tasks/package-info.java | 22 - .../types/GuiceInjectorUtilsForTests.java | 117 - .../org.mockito.plugins.MockMaker | 16 - hadoop-ozone/s3gateway/pom.xml | 256 - .../apache/hadoop/ozone/s3/AWSAuthParser.java | 78 - .../hadoop/ozone/s3/AWSV4AuthParser.java | 304 - .../CommonHeadersContainerResponseFilter.java | 48 - .../org/apache/hadoop/ozone/s3/Gateway.java | 66 - .../hadoop/ozone/s3/GatewayApplication.java | 29 - .../hadoop/ozone/s3/HeaderPreprocessor.java | 76 - .../hadoop/ozone/s3/OzoneClientProducer.java | 130 - .../ozone/s3/OzoneConfigurationHolder.java | 43 - .../hadoop/ozone/s3/OzoneServiceProvider.java | 94 - .../hadoop/ozone/s3/RequestIdentifier.java | 48 - .../hadoop/ozone/s3/S3GatewayConfigKeys.java | 55 - .../hadoop/ozone/s3/S3GatewayHttpServer.java | 90 - .../ozone/s3/SignedChunksInputStream.java | 139 - .../ozone/s3/VirtualHostStyleFilter.java | 169 - .../ozone/s3/commontypes/BucketMetadata.java | 53 - .../ozone/s3/commontypes/CommonPrefix.java | 47 - .../ozone/s3/commontypes/IsoDateAdapter.java | 47 - .../ozone/s3/commontypes/KeyMetadata.java | 87 - .../ozone/s3/commontypes/package-info.java | 29 - .../ozone/s3/endpoint/BucketEndpoint.java | 347 - .../CompleteMultipartUploadRequest.java | 77 - .../CompleteMultipartUploadResponse.java | 78 - .../ozone/s3/endpoint/CopyObjectResponse.java | 63 - .../ozone/s3/endpoint/CopyPartResult.java | 69 - .../ozone/s3/endpoint/EndpointBase.java | 216 - .../ozone/s3/endpoint/ListBucketResponse.java | 55 - .../endpoint/ListMultipartUploadsResult.java | 268 - .../ozone/s3/endpoint/ListObjectResponse.java | 180 - .../ozone/s3/endpoint/ListPartsResponse.java | 196 - .../ozone/s3/endpoint/MultiDeleteRequest.java | 96 - .../MultiDeleteRequestUnmarshaller.java | 84 - .../s3/endpoint/MultiDeleteResponse.java | 154 - .../MultipartUploadInitiateResponse.java | 69 - .../ozone/s3/endpoint/ObjectEndpoint.java | 766 - .../PlainTextMultipartUploadReader.java | 66 - .../ozone/s3/endpoint/RootEndpoint.java | 84 - .../ozone/s3/endpoint/XmlNamespaceFilter.java | 54 - .../ozone/s3/endpoint/package-info.java | 30 - .../ozone/s3/exception/OS3Exception.java | 161 - .../s3/exception/OS3ExceptionMapper.java | 52 - .../ozone/s3/exception/S3ErrorTable.java | 107 - .../ozone/s3/exception/package-info.java | 21 - .../s3/header/AuthenticationHeaderParser.java | 65 - .../s3/header/AuthorizationHeaderV2.java | 97 - .../s3/header/AuthorizationHeaderV4.java | 253 - .../hadoop/ozone/s3/header/Credential.java | 110 - .../hadoop/ozone/s3/header/package-info.java | 22 - .../ozone/s3/io/S3WrapperInputStream.java | 79 - .../hadoop/ozone/s3/io/package-info.java | 23 - .../apache/hadoop/ozone/s3/package-info.java | 22 - .../hadoop/ozone/s3/util/ContinueToken.java | 173 - .../hadoop/ozone/s3/util/OzoneS3Util.java | 80 - .../hadoop/ozone/s3/util/RFC1123Util.java | 98 - .../hadoop/ozone/s3/util/RangeHeader.java | 89 - .../ozone/s3/util/RangeHeaderParserUtil.java | 95 - .../apache/hadoop/ozone/s3/util/S3Consts.java | 55 - .../hadoop/ozone/s3/util/S3StorageType.java | 64 - .../hadoop/ozone/s3/util/package-info.java | 22 - .../src/main/resources/META-INF/beans.xml | 20 - .../s3gateway/src/main/resources/browser.html | 617 - .../webapps/s3gateway/WEB-INF/beans.xml | 20 - .../webapps/s3gateway/WEB-INF/web.xml | 36 - .../resources/webapps/static/images/ozone.ico | Bin 1150 -> 0 bytes .../main/resources/webapps/static/index.html | 83 - .../src/main/resources/webapps/static/s3g.js | 23 - .../hadoop/ozone/client/ObjectStoreStub.java | 244 - .../hadoop/ozone/client/OzoneBucketStub.java | 317 - .../hadoop/ozone/client/OzoneClientStub.java | 37 - .../ozone/client/OzoneOutputStreamStub.java | 73 - .../hadoop/ozone/client/OzoneVolumeStub.java | 109 - .../hadoop/ozone/client/package-info.java | 21 - .../ozone/s3/TestOzoneClientProducer.java | 145 - .../ozone/s3/TestSignedChunksInputStream.java | 114 - .../ozone/s3/TestVirtualHostStyleFilter.java | 220 - .../s3/endpoint/TestAbortMultipartUpload.java | 83 - .../ozone/s3/endpoint/TestBucketDelete.java | 100 - .../ozone/s3/endpoint/TestBucketGet.java | 380 - .../ozone/s3/endpoint/TestBucketHead.java | 71 - .../ozone/s3/endpoint/TestBucketResponse.java | 38 - .../endpoint/TestInitiateMultipartUpload.java | 79 - .../ozone/s3/endpoint/TestListParts.java | 129 - .../TestMultiDeleteRequestUnmarshaller.java | 78 - .../endpoint/TestMultipartUploadComplete.java | 222 - .../endpoint/TestMultipartUploadWithCopy.java | 233 - .../ozone/s3/endpoint/TestObjectDelete.java | 60 - .../ozone/s3/endpoint/TestObjectEndpoint.java | 53 - .../ozone/s3/endpoint/TestObjectGet.java | 90 - .../ozone/s3/endpoint/TestObjectHead.java | 104 - .../s3/endpoint/TestObjectMultiDelete.java | 116 - .../ozone/s3/endpoint/TestObjectPut.java | 270 - .../ozone/s3/endpoint/TestPartUpload.java | 126 - .../ozone/s3/endpoint/TestRootList.java | 75 - .../ozone/s3/endpoint/package-info.java | 21 - .../ozone/s3/exception/TestOS3Exception.java | 50 - .../ozone/s3/exception/package-info.java | 22 - .../s3/header/TestAuthorizationHeaderV2.java | 88 - .../s3/header/TestAuthorizationHeaderV4.java | 354 - .../apache/hadoop/ozone/s3/package-info.java | 21 - .../ozone/s3/util/TestContinueToken.java | 50 - .../hadoop/ozone/s3/util/TestOzoneS3Util.java | 130 - .../hadoop/ozone/s3/util/TestRFC1123Util.java | 44 - .../s3/util/TestRangeHeaderParserUtil.java | 93 - .../src/test/resources/log4j.properties | 21 - .../tools/dev-support/findbugsExcludeFile.xml | 19 - hadoop-ozone/tools/pom.xml | 146 - .../ozone/audit/parser/AuditParser.java | 55 - .../audit/parser/common/DatabaseHelper.java | 245 - .../audit/parser/common/ParserConsts.java | 35 - .../audit/parser/common/package-info.java | 20 - .../parser/handler/LoadCommandHandler.java | 52 - .../parser/handler/QueryCommandHandler.java | 57 - .../handler/TemplateCommandHandler.java | 61 - .../audit/parser/handler/package-info.java | 20 - .../ozone/audit/parser/model/AuditEntry.java | 188 - .../audit/parser/model/package-info.java | 20 - .../ozone/audit/parser/package-info.java | 20 - .../ozone/freon/BaseFreonGenerator.java | 334 - .../hadoop/ozone/freon/ContentGenerator.java | 62 - .../org/apache/hadoop/ozone/freon/Freon.java | 93 - .../hadoop/ozone/freon/FreonHttpServer.java | 74 - .../hadoop/ozone/freon/HadoopFsGenerator.java | 99 - .../hadoop/ozone/freon/HadoopFsValidator.java | 100 - .../hadoop/ozone/freon/OmBucketGenerator.java | 85 - .../hadoop/ozone/freon/OmKeyGenerator.java | 100 - .../ozone/freon/OzoneClientKeyGenerator.java | 114 - .../ozone/freon/OzoneClientKeyValidator.java | 99 - .../apache/hadoop/ozone/freon/PathSchema.java | 38 - .../hadoop/ozone/freon/ProgressBar.java | 147 - .../ozone/freon/RandomKeyGenerator.java | 1112 - .../hadoop/ozone/freon/S3KeyGenerator.java | 109 - .../hadoop/ozone/freon/SameKeyReader.java | 104 - .../hadoop/ozone/freon/package-info.java | 22 - .../hadoop/ozone/fsck/BlockIdDetails.java | 83 - .../hadoop/ozone/fsck/ContainerMapper.java | 134 - .../hadoop/ozone/fsck/package-info.java | 44 - .../GenerateOzoneRequiredConfigurations.java | 174 - .../hadoop/ozone/genconf/package-info.java | 24 - .../genesis/BenchMarkContainerStateMap.java | 200 - .../genesis/BenchMarkDatanodeDispatcher.java | 331 - .../genesis/BenchMarkMetadataStoreReads.java | 70 - .../genesis/BenchMarkMetadataStoreWrites.java | 62 - .../ozone/genesis/BenchMarkOMClient.java | 153 - .../genesis/BenchMarkOMKeyAllocation.java | 135 - .../ozone/genesis/BenchMarkOzoneManager.java | 185 - .../ozone/genesis/BenchMarkRocksDbStore.java | 119 - .../hadoop/ozone/genesis/BenchMarkSCM.java | 120 - .../apache/hadoop/ozone/genesis/Genesis.java | 94 - .../ozone/genesis/GenesisMemoryProfiler.java | 61 - .../hadoop/ozone/genesis/GenesisUtil.java | 196 - .../hadoop/ozone/genesis/package-info.java | 25 - .../apache/hadoop/ozone/scm/cli/SQLCLI.java | 565 - .../hadoop/ozone/scm/cli/package-info.java | 22 - .../services/org.apache.hadoop.fs.FileSystem | 16 - .../src/main/resources/commands.properties | 22 - .../src/main/resources/webapps/freon/.gitkeep | 17 - .../ozone/audit/parser/TestAuditParser.java | 192 - .../ozone/audit/parser/package-info.java | 21 - .../hadoop/ozone/freon/TestDataValidate.java | 115 - .../TestDataValidateWithDummyContainers.java | 74 - ...estDataValidateWithSafeByteOperations.java | 52 - ...tDataValidateWithUnsafeByteOperations.java | 52 - .../TestFreonWithDatanodeFastRestart.java | 126 - .../freon/TestFreonWithDatanodeRestart.java | 108 - .../freon/TestFreonWithPipelineDestroy.java | 107 - .../hadoop/ozone/freon/TestProgressBar.java | 73 - .../ozone/freon/TestRandomKeyGenerator.java | 164 - .../hadoop/ozone/freon/package-info.java | 22 - .../ozone/fsck/TestContainerMapper.java | 117 - .../hadoop/ozone/fsck/package-info.java | 44 - ...stGenerateOzoneRequiredConfigurations.java | 248 - .../hadoop/ozone/genconf/package-info.java | 22 - .../apache/hadoop/ozone/om/TestOmSQLCli.java | 248 - .../apache/hadoop/ozone/om/package-info.java | 22 - .../apache/hadoop/ozone/scm/package-info.java | 22 - .../apache/hadoop/test/OzoneTestDriver.java | 59 - .../src/test/resources/commands.properties | 22 - .../tools/src/test/resources/testaudit.log | 15 - hadoop-ozone/upgrade/pom.xml | 53 - .../apache/hadoop/ozone/upgrade/Balance.java | 38 - .../apache/hadoop/ozone/upgrade/Execute.java | 37 - .../hadoop/ozone/upgrade/InPlaceUpgrade.java | 45 - .../org/apache/hadoop/ozone/upgrade/Plan.java | 38 - .../hadoop/ozone/upgrade/package-info.java | 23 - pom.ozone.xml | 2032 -- pom.xml | 12 - 2334 files changed, 349955 deletions(-) delete mode 100644 hadoop-hdds/client/pom.xml delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ClientCredentialInterceptor.java delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java delete mode 100644 hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java delete mode 100644 hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java delete mode 100644 hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java delete mode 100644 hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java delete mode 100644 hadoop-hdds/common/dev-support/findbugsExcludeFile.xml delete mode 100644 hadoop-hdds/common/pom.xml delete mode 100644 hadoop-hdds/common/src/main/bin/hadoop-config.cmd delete mode 100755 hadoop-hdds/common/src/main/bin/hadoop-config.sh delete mode 100755 hadoop-hdds/common/src/main/bin/hadoop-daemons.sh delete mode 100755 hadoop-hdds/common/src/main/bin/hadoop-functions.sh delete mode 100755 hadoop-hdds/common/src/main/bin/workers.sh delete mode 100644 hadoop-hdds/common/src/main/conf/core-site.xml delete mode 100644 hadoop-hdds/common/src/main/conf/hadoop-env.cmd delete mode 100644 hadoop-hdds/common/src/main/conf/hadoop-env.sh delete mode 100644 hadoop-hdds/common/src/main/conf/hadoop-metrics2.properties delete mode 100644 hadoop-hdds/common/src/main/conf/hadoop-policy.xml delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ContainerBlockID.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolPB.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerNotFoundException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaNotFoundException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/UnknownPipelineStateException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/CheckedBiFunction.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecurityException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateApprover.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/PKIProfile.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/CertificateException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcClientInterceptor.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskQueue.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskResult.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BatchOperation.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/EntryConsumer.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetaStoreIterator.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStore.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RetriableTask.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStore.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreIterator.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreMBean.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/Scheduler.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BatchOperation.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayKeyValue.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBCheckpoint.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBUpdatesWrapper.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/SequenceNumberNotFoundException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodec.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheResult.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCache.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ProtocolMessageMetrics.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java delete mode 100644 hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto delete mode 100644 hadoop-hdds/common/src/main/proto/FSProtos.proto delete mode 100644 hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto delete mode 100644 hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto delete mode 100644 hadoop-hdds/common/src/main/proto/Security.proto delete mode 100644 hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto delete mode 100644 hadoop-hdds/common/src/main/proto/hdds.proto delete mode 100644 hadoop-hdds/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor delete mode 100644 hadoop-hdds/common/src/main/resources/hdds-version-info.properties delete mode 100644 hadoop-hdds/common/src/main/resources/network-topology-default.xml delete mode 100644 hadoop-hdds/common/src/main/resources/network-topology-default.yaml delete mode 100644 hadoop-hdds/common/src/main/resources/network-topology-nodegroup.xml delete mode 100644 hadoop-hdds/common/src/main/resources/ozone-default.xml delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/exceptions/TestSCMExceptionResultCodes.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockApprover.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultProfile.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateCodec.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestHddsIdFactory.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRetriableTask.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCacheImpl.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/package-info.java delete mode 100644 hadoop-hdds/common/src/test/resources/log4j2.properties delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/enforce-error.xml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.xml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.yaml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-cost.xml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-version.xml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/middle-leaf.yaml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-leaf.xml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.xml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.yaml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-topology.xml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-leaf.xml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-root.xml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-topology.xml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-layers-size-mismatch.xml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-with-id-reference-failure.xml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/unknown-layer-type.xml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-1.xml delete mode 100644 hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-2.xml delete mode 100644 hadoop-hdds/common/src/test/resources/test.db.ini delete mode 100644 hadoop-hdds/config/pom.xml delete mode 100644 hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java delete mode 100644 hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java delete mode 100644 hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java delete mode 100644 hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java delete mode 100644 hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java delete mode 100644 hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java delete mode 100644 hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java delete mode 100644 hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java delete mode 100644 hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java delete mode 100644 hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java delete mode 100644 hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java delete mode 100644 hadoop-hdds/config/src/test/resources/META-INF/services/javax.annotation.processing.Processor delete mode 100644 hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml delete mode 100644 hadoop-hdds/container-service/pom.xml delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeStopService.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerPacker.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/VolumeChoosingPolicy.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachineMBean.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManagerMXBean.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/DatanodeState.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ServerCredentialInterceptor.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/RatisServerConfiguration.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AsyncChecker.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerDownloader.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicationSource.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicator.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerStreamingOutput.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/DownloadAndImportReplicator.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/OnDemandContainerReplicationSource.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandStatus.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlockCommandStatus.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteContainerCommand.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java delete mode 100644 hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto delete mode 100644 hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider delete mode 100644 hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolumeChecker.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java delete mode 100644 hadoop-hdds/container-service/src/test/resources/additionalfields.container delete mode 100644 hadoop-hdds/container-service/src/test/resources/incorrect.checksum.container delete mode 100644 hadoop-hdds/container-service/src/test/resources/incorrect.container delete mode 100644 hadoop-hdds/container-service/src/test/resources/log4j.properties delete mode 100644 hadoop-hdds/dev-support/checkstyle/checkstyle-noframes-sorted.xsl delete mode 100644 hadoop-hdds/dev-support/checkstyle/checkstyle.xml delete mode 100644 hadoop-hdds/dev-support/checkstyle/suppressions.xml delete mode 100644 hadoop-hdds/docs/README.md delete mode 100644 hadoop-hdds/docs/archetypes/default.md delete mode 100644 hadoop-hdds/docs/config.yaml delete mode 100644 hadoop-hdds/docs/content/_index.md delete mode 100644 hadoop-hdds/docs/content/beyond/Containers.md delete mode 100644 hadoop-hdds/docs/content/beyond/DockerCheatSheet.md delete mode 100644 hadoop-hdds/docs/content/beyond/RunningWithHDFS.md delete mode 100644 hadoop-hdds/docs/content/beyond/_index.md delete mode 100644 hadoop-hdds/docs/content/concept/ContainerMetadata.png delete mode 100644 hadoop-hdds/docs/content/concept/Datanodes.md delete mode 100644 hadoop-hdds/docs/content/concept/FunctionalOzone.png delete mode 100644 hadoop-hdds/docs/content/concept/Hdds.md delete mode 100644 hadoop-hdds/docs/content/concept/Overview.md delete mode 100644 hadoop-hdds/docs/content/concept/OzoneBlock.png delete mode 100644 hadoop-hdds/docs/content/concept/OzoneManager.md delete mode 100644 hadoop-hdds/docs/content/concept/_index.md delete mode 100644 hadoop-hdds/docs/content/concept/ozoneBlockDiagram.png delete mode 100644 hadoop-hdds/docs/content/design/decommissioning.md delete mode 100644 hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md delete mode 100644 hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md delete mode 100644 hadoop-hdds/docs/content/gdpr/_index.md delete mode 100644 hadoop-hdds/docs/content/interface/JavaApi.md delete mode 100644 hadoop-hdds/docs/content/interface/OzoneFS.md delete mode 100644 hadoop-hdds/docs/content/interface/S3.md delete mode 100644 hadoop-hdds/docs/content/interface/_index.md delete mode 100644 hadoop-hdds/docs/content/recipe/Prometheus.md delete mode 100644 hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md delete mode 100644 hadoop-hdds/docs/content/recipe/_index.md delete mode 100644 hadoop-hdds/docs/content/recipe/prometheus-key-allocate.png delete mode 100644 hadoop-hdds/docs/content/recipe/prometheus.png delete mode 100644 hadoop-hdds/docs/content/security/SecuityWithRanger.md delete mode 100644 hadoop-hdds/docs/content/security/SecureOzone.md delete mode 100644 hadoop-hdds/docs/content/security/SecuringDatanodes.md delete mode 100644 hadoop-hdds/docs/content/security/SecuringS3.md delete mode 100644 hadoop-hdds/docs/content/security/SecuringTDE.md delete mode 100644 hadoop-hdds/docs/content/security/SecurityAcls.md delete mode 100644 hadoop-hdds/docs/content/security/_index.md delete mode 100644 hadoop-hdds/docs/content/shell/BucketCommands.md delete mode 100644 hadoop-hdds/docs/content/shell/Format.md delete mode 100644 hadoop-hdds/docs/content/shell/KeyCommands.md delete mode 100644 hadoop-hdds/docs/content/shell/VolumeCommands.md delete mode 100644 hadoop-hdds/docs/content/shell/_index.md delete mode 100644 hadoop-hdds/docs/content/start/FromSource.md delete mode 100644 hadoop-hdds/docs/content/start/Kubernetes.md delete mode 100644 hadoop-hdds/docs/content/start/Minikube.md delete mode 100644 hadoop-hdds/docs/content/start/OnPrem.md delete mode 100644 hadoop-hdds/docs/content/start/RunningViaDocker.md delete mode 100644 hadoop-hdds/docs/content/start/StartFromDockerHub.md delete mode 100644 hadoop-hdds/docs/content/start/_index.md delete mode 100644 hadoop-hdds/docs/content/start/docker.png delete mode 100644 hadoop-hdds/docs/content/start/hadoop.png delete mode 100644 hadoop-hdds/docs/content/start/k8s.png delete mode 100644 hadoop-hdds/docs/content/start/minikube.png delete mode 100644 hadoop-hdds/docs/content/tools/AuditParser.md delete mode 100644 hadoop-hdds/docs/content/tools/Genconf.md delete mode 100644 hadoop-hdds/docs/content/tools/SCMCLI.md delete mode 100644 hadoop-hdds/docs/content/tools/TestTools.md delete mode 100644 hadoop-hdds/docs/content/tools/_index.md delete mode 100755 hadoop-hdds/docs/dev-support/bin/generate-site.sh delete mode 100644 hadoop-hdds/docs/pom.xml delete mode 100644 hadoop-hdds/docs/static/NOTES.md delete mode 100644 hadoop-hdds/docs/static/OzoneOverview.png delete mode 100644 hadoop-hdds/docs/static/OzoneOverview.svg delete mode 100644 hadoop-hdds/docs/static/SCMBlockDiagram.png delete mode 100644 hadoop-hdds/docs/static/ozone-logo-small.png delete mode 100644 hadoop-hdds/docs/static/ozone-usage.png delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/layouts/_default/section.html delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/layouts/_default/single.html delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/layouts/index.html delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/layouts/partials/footer.html delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/layouts/partials/header.html delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/layouts/partials/navbar.html delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/layouts/partials/sidebar.html delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/buttonlink.html delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/card.html delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/jumbotron.html delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/requirements.html delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap-theme.min.css.map delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap.min.css delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap.min.css.map delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/css/ozonedoc.css delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.eot delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.ttf delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-halflings-regular.woff2 delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/js/bootstrap.min.js delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/static/js/ozonedoc.js delete mode 100644 hadoop-hdds/docs/themes/ozonedoc/theme.toml delete mode 100644 hadoop-hdds/framework/README.md delete mode 100644 hadoop-hdds/framework/pom.xml delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/BaseHttpServer.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/LogStreamServlet.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneProtocolMessageDispatcher.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusServlet.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/package-info.java delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/datanode/dn.js delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.6.4.min.js delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/angular-route-1.6.4.min.js delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-editable.css delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css.map delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css.map delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css.map delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.min.css delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.min.css.map delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.eot delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.svg delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.ttf delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.woff delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/fonts/glyphicons-halflings-regular.woff2 delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap-editable.min.js delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap.js delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/js/bootstrap.min.js delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/d3-3.5.17.min.js delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/hadoop.css delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/jquery-3.4.1.min.js delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css.map delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html delete mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestBaseHttpServer.java delete mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestProfileServlet.java delete mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java delete mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java delete mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java delete mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java delete mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java delete mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java delete mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java delete mode 100644 hadoop-hdds/framework/src/test/resources/ozone-site.xml delete mode 100644 hadoop-hdds/pom.xml delete mode 100644 hadoop-hdds/server-scm/pom.xml delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockmanagerMXBean.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/SCMContainerManagerMetrics.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatusMXBean.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerQueryKey.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/DeletedBlocksTransactionCodec.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/LongCodec.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeAlreadyExistsException.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeException.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/InsufficientDatanodesException.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/Precheck.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRestrictedOps.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/RetriableDatanodeEventWatcher.java delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html delete mode 100644 hadoop-hdds/server-scm/src/main/resources/webapps/scm/main.html delete mode 100644 hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html delete mode 100644 hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsWhiteboxTestUtils.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSafeModeHandler.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMContainerMetrics.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java delete mode 100644 hadoop-hdds/server-scm/src/test/resources/nodegroup-mapping delete mode 100644 hadoop-hdds/server-scm/src/test/resources/rack-mapping delete mode 100644 hadoop-hdds/tools/pom.xml delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java delete mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/package-info.java delete mode 100644 hadoop-ozone/.gitignore delete mode 100644 hadoop-ozone/Jenkinsfile delete mode 100644 hadoop-ozone/client/pom.xml delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyLocation.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUpload.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java delete mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/package-info.java delete mode 100644 hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java delete mode 100644 hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/package-info.java delete mode 100644 hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java delete mode 100644 hadoop-ozone/common/dev-support/findbugsExcludeFile.xml delete mode 100644 hadoop-ozone/common/pom.xml delete mode 100755 hadoop-ozone/common/src/main/bin/ozone delete mode 100755 hadoop-ozone/common/src/main/bin/ozone-config.sh delete mode 100755 hadoop-ozone/common/src/main/bin/start-ozone.sh delete mode 100755 hadoop-ozone/common/src/main/bin/stop-ozone.sh delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneIllegalArgumentException.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/LengthInputStream.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmBucketInfoCodec.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmMultipartKeyInfoCodec.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmVolumeArgsCodec.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/S3SecretValueCodec.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/UserVolumeInfoCodec.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/NotLeaderException.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMProxyInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/EncryptionBucketInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUpload.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadList.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfoEx.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLockUtil.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerSecurityProtocol.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerServerProtocol.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/AWSV4AuthValidator.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretKey.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecurityException.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IOzoneObj.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/RequestContext.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/BooleanBiFunction.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixNode.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/package-info.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java delete mode 100644 hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto delete mode 100644 hadoop-ozone/common/src/main/resources/ozone-version-info.properties delete mode 100644 hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmMultipartKeyInfoCodec.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestS3SecretValueCodec.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/package-info.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/exceptions/TestResultCodes.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartUpload.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/package-info.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/package-info.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSelector.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java delete mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/package-info.java delete mode 100644 hadoop-ozone/csi/dev-support/findbugsExcludeFile.xml delete mode 100644 hadoop-ozone/csi/pom.xml delete mode 100644 hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/ControllerService.java delete mode 100644 hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java delete mode 100644 hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentitiyService.java delete mode 100644 hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/NodeService.java delete mode 100644 hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java delete mode 100644 hadoop-ozone/csi/src/main/proto/csi.proto delete mode 100644 hadoop-ozone/datanode/pom.xml delete mode 100755 hadoop-ozone/dev-support/checks/README.md delete mode 100755 hadoop-ozone/dev-support/checks/_mvn_unit_report.sh delete mode 100755 hadoop-ozone/dev-support/checks/acceptance.sh delete mode 100755 hadoop-ozone/dev-support/checks/author.sh delete mode 100755 hadoop-ozone/dev-support/checks/blockade.sh delete mode 100755 hadoop-ozone/dev-support/checks/build.sh delete mode 100755 hadoop-ozone/dev-support/checks/checkstyle.sh delete mode 100755 hadoop-ozone/dev-support/checks/findbugs.sh delete mode 100755 hadoop-ozone/dev-support/checks/integration.sh delete mode 100755 hadoop-ozone/dev-support/checks/isolation.sh delete mode 100755 hadoop-ozone/dev-support/checks/rat.sh delete mode 100755 hadoop-ozone/dev-support/checks/shellcheck.sh delete mode 100755 hadoop-ozone/dev-support/checks/unit.sh delete mode 100644 hadoop-ozone/dev-support/docker/Dockerfile delete mode 100755 hadoop-ozone/dev-support/intellij/install-runconfigs.sh delete mode 100644 hadoop-ozone/dev-support/intellij/log4j.properties delete mode 100644 hadoop-ozone/dev-support/intellij/ozone-site.xml delete mode 100644 hadoop-ozone/dev-support/intellij/runConfigurations/Datanode.xml delete mode 100644 hadoop-ozone/dev-support/intellij/runConfigurations/FreonStandalone.xml delete mode 100644 hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManager.xml delete mode 100644 hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManagerInit.xml delete mode 100644 hadoop-ozone/dev-support/intellij/runConfigurations/OzoneShell.xml delete mode 100644 hadoop-ozone/dev-support/intellij/runConfigurations/Recon.xml delete mode 100644 hadoop-ozone/dev-support/intellij/runConfigurations/S3Gateway.xml delete mode 100644 hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManager.xml delete mode 100644 hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManagerInit.xml delete mode 100644 hadoop-ozone/dist/README.md delete mode 100755 hadoop-ozone/dist/dev-support/bin/dist-layout-stitching delete mode 100755 hadoop-ozone/dist/dev-support/bin/dist-tar-stitching delete mode 100644 hadoop-ozone/dist/pom.xml delete mode 100644 hadoop-ozone/dist/src/main/assemblies/ozone-src.xml delete mode 100644 hadoop-ozone/dist/src/main/compose/README.md delete mode 100644 hadoop-ozone/dist/src/main/compose/common/grafana/conf/grafana.ini delete mode 100644 hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json delete mode 100644 hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - RPC Metrics.json delete mode 100755 hadoop-ozone/dist/src/main/compose/common/grafana/provisioning/dashboards/dashboards.yml delete mode 100755 hadoop-ozone/dist/src/main/compose/common/grafana/provisioning/datasources/datasources.yml delete mode 100644 hadoop-ozone/dist/src/main/compose/common/prometheus/prometheus.yml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/common-config delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config delete mode 100755 hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/test.sh delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config delete mode 100755 hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/test.sh delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config delete mode 100755 hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/test.sh delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-recon/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config delete mode 100755 hadoop-ozone/dist/src/main/compose/ozone-recon/test.sh delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-topology/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone-topology/network-config delete mode 100755 hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozone/docker-config delete mode 100755 hadoop-ozone/dist/src/main/compose/ozone/test.sh delete mode 100644 hadoop-ozone/dist/src/main/compose/ozoneblockade/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config delete mode 100644 hadoop-ozone/dist/src/main/compose/ozoneperf/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozoneperf/README.md delete mode 100644 hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config delete mode 100755 hadoop-ozone/dist/src/main/compose/ozoneperf/test.sh delete mode 100644 hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config delete mode 100644 hadoop-ozone/dist/src/main/compose/ozones3-haproxy/haproxy-conf/haproxy.cfg delete mode 100755 hadoop-ozone/dist/src/main/compose/ozones3-haproxy/test.sh delete mode 100644 hadoop-ozone/dist/src/main/compose/ozones3/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozones3/docker-config delete mode 100755 hadoop-ozone/dist/src/main/compose/ozones3/test.sh delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonescripts/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/authorized_keys delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/config delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa.pub delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonescripts/README.md delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config delete mode 100755 hadoop-ozone/dist/src/main/compose/ozonescripts/ps.sh delete mode 100755 hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh delete mode 100755 hadoop-ozone/dist/src/main/compose/ozonescripts/stop.sh delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure-mr/README.md delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/Dockerfile-krb5 delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/README.md delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/kadm5.acl delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/krb5.conf delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/launcher.sh delete mode 100755 hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure/README.md delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/Dockerfile-krb5 delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/README.md delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/kadm5.acl delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/krb5.conf delete mode 100644 hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/launcher.sh delete mode 100755 hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh delete mode 100755 hadoop-ozone/dist/src/main/compose/test-all.sh delete mode 100755 hadoop-ozone/dist/src/main/compose/test-single.sh delete mode 100755 hadoop-ozone/dist/src/main/compose/testlib.sh delete mode 100644 hadoop-ozone/dist/src/main/conf/dn-audit-log4j2.properties delete mode 100644 hadoop-ozone/dist/src/main/conf/log4j.properties delete mode 100644 hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties delete mode 100644 hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties delete mode 100644 hadoop-ozone/dist/src/main/conf/ozone-site.xml delete mode 100644 hadoop-ozone/dist/src/main/conf/scm-audit-log4j2.properties delete mode 100644 hadoop-ozone/dist/src/main/docker/Dockerfile delete mode 100755 hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh delete mode 100755 hadoop-ozone/dist/src/main/dockerbin/envtoconf.py delete mode 100755 hadoop-ozone/dist/src/main/dockerbin/transformation.py delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/jaeger/flekszible.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/jaeger/jaeger.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-crd.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-node.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-rbac.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-storageclass.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/definitions/csi.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/emptydir.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/persistence.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/profiler.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/prometheus.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/tracing.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/flekszible.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/flekszible.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/freon.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/ozone/transformations/config.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/prometheus/configmap.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/prometheus/definitions/enable.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/prometheus/deployment.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/prometheus/flekszible.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/prometheus/role.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/prometheus/rolebinding.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/prometheus/service-account.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/prometheus/service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-deployment.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-volume.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/Flekszible delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/LICENSE.header delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-public-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/freon/freon-deployment.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-public-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-public-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-public-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/LICENSE.header delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-public-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/freon/freon-deployment.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/om-public-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/om-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/om-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-public-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-public-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/LICENSE.header delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-node-daemonset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrole.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrolebinding.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-serviceaccount.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-provisioner-deployment.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/org.apache.hadoop.ozone-csidriver.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/ozone-storageclass.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-public-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/freon/freon-deployment.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-public-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-public-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-clusterrole.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-deployment.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-clusterrolebinding.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-serviceaccount.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheusconf-configmap.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-deployment.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-persistentvolumeclaim.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-public-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-public-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/LICENSE.header delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-node-daemonset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrole.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrolebinding.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-serviceaccount.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-provisioner-deployment.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/org.apache.hadoop.ozone-csidriver.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/ozone-storageclass.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/freon/freon-deployment.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/om-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/om-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-deployment.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-persistentvolumeclaim.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/s3g-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/s3g-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/scm-service.yaml delete mode 100644 hadoop-ozone/dist/src/main/k8s/examples/ozone/scm-statefulset.yaml delete mode 100644 hadoop-ozone/dist/src/main/license/bin/LICENSE.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/NOTICE.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-angular-nvd3.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-angular.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.code.findbugs-jsr305.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.re2j-re2j.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.jcraft-jsch.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.sun.jersey.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.sun.xml.bind.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.thoughtworks.paranamer-paranamer.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-d3.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-dnsjava-dnsjava.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jakarta.annotation-jakarta.annotation-api.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.activation-activation.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.annotation-javax.annotation-api.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.el-javax.el-api.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.interceptor-javax.interceptor-api.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.servlet-javax.servlet-api.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.servlet.jsp-jsp-api.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.ws.rs-javax.ws.rs-api.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.ws.rs-jsr311-api.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.xml.bind-jaxb-api.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jersey.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jetty.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jquery.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-net.sf.jopt-simple-jopt-simple.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-nvd3.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.codehaus.mojo-animal-sniffer-annotations.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.codehaus.woodstox-stax2-api.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.fusesource.leveldbjni-leveldbjni-all.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.glassfish.hk2.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.openjdk.jmh-jmh-core.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.openjdk.jmh-jmh-generator-annprocess.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.ow2.asm-asm.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.slf4j.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-protobuf.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-ratis-thirdparty-misc.txt delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/NOTICE-ratis-thirtparty-misc.txt delete mode 100644 hadoop-ozone/dist/src/main/license/src/LICENSE.txt delete mode 100644 hadoop-ozone/dist/src/main/license/src/NOTICE.txt delete mode 100644 hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md delete mode 100644 hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular-nvd3.txt delete mode 100644 hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular.txt delete mode 100644 hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-d3.txt delete mode 100644 hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-jquery.txt delete mode 100644 hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-nvd3.txt delete mode 100644 hadoop-ozone/dist/src/main/ozone/README.txt delete mode 100644 hadoop-ozone/dist/src/main/smoketest/.env delete mode 100644 hadoop-ozone/dist/src/main/smoketest/README.md delete mode 100644 hadoop-ozone/dist/src/main/smoketest/__init__.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/auditparser/auditparser.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/basic/basic.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/commonlib.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/createbucketenv.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/createmrenv.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/env-compose.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/gdpr/gdpr.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/kinit-hadoop.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/kinit.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/mapreduce.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/ozonefs/hadoopo3fs.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/README.md delete mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/__init__.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/buckethead.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/bucketlist.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/objectdelete.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/objectmultidelete.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/webui.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/scmcli/pipeline.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot delete mode 100644 hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot delete mode 100755 hadoop-ozone/dist/src/main/smoketest/test.sh delete mode 100644 hadoop-ozone/dist/src/main/smoketest/topology/scmcli.robot delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/pom.xml delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/README.md delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/conftest.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/__init__.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/blockade.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/client.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/constants.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/exceptions.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/util.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_client_failure.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_datanode_isolation.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_flaky.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_three_nodes_isolate.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_two_nodes.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_scm_isolation.py delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-compose.yaml delete mode 100644 hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-config delete mode 100644 hadoop-ozone/fault-injection-test/pom.xml delete mode 100644 hadoop-ozone/insight/dev-support/findbugsExcludeFile.xml delete mode 100644 hadoop-ozone/insight/pom.xml delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightPoint.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightSubCommand.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Component.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/InsightPoint.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ListSubCommand.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LogSubcommand.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LoggerSource.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricDisplay.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricGroupDisplay.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricsSubCommand.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/RatisInsight.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/package-info.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/KeyManagerInsight.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/OmProtocolInsight.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/package-info.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/package-info.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/EventQueueInsight.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/NodeManagerInsight.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ReplicaManagerInsight.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolBlockLocationInsight.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolContainerLocationInsight.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolDatanodeInsight.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolSecurityInsight.java delete mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/package-info.java delete mode 100644 hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/LogSubcommandTest.java delete mode 100644 hadoop-ozone/integration-test/pom.xml delete mode 100755 hadoop-ozone/integration-test/src/test/bin/start-chaos.sh delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/chaos/TestProbability.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/package-info.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/package-info.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/package-info.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/package-info.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOMRatisSnapshotInfo.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShellHA.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestS3Shell.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/package-info.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java delete mode 100644 hadoop-ozone/integration-test/src/test/resources/auditlog.properties delete mode 100644 hadoop-ozone/integration-test/src/test/resources/core-site.xml delete mode 100644 hadoop-ozone/integration-test/src/test/resources/hdfs-site.xml delete mode 100644 hadoop-ozone/integration-test/src/test/resources/log4j.properties delete mode 100644 hadoop-ozone/integration-test/src/test/resources/ssl/ca.crt delete mode 100644 hadoop-ozone/integration-test/src/test/resources/ssl/ca.key delete mode 100644 hadoop-ozone/integration-test/src/test/resources/ssl/client.crt delete mode 100644 hadoop-ozone/integration-test/src/test/resources/ssl/client.csr delete mode 100644 hadoop-ozone/integration-test/src/test/resources/ssl/client.key delete mode 100644 hadoop-ozone/integration-test/src/test/resources/ssl/client.pem delete mode 100755 hadoop-ozone/integration-test/src/test/resources/ssl/generate.sh delete mode 100644 hadoop-ozone/integration-test/src/test/resources/ssl/server.crt delete mode 100644 hadoop-ozone/integration-test/src/test/resources/ssl/server.csr delete mode 100644 hadoop-ozone/integration-test/src/test/resources/ssl/server.key delete mode 100644 hadoop-ozone/integration-test/src/test/resources/ssl/server.pem delete mode 100644 hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep delete mode 100644 hadoop-ozone/integration-test/src/test/resources/webapps/scm/.gitkeep delete mode 100644 hadoop-ozone/ozone-manager/pom.xml delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/IOzoneAcl.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetricsInfo.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManager.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManager.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManagerImpl.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ScmClient.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMNodeDetails.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisSnapshotInfo.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisSnapshot.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/DoubleBufferEntry.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerDoubleBufferHelper.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketDeleteRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/ObjectParser.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketCreateResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketDeleteResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMCancelDelegationTokenResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMGetDelegationTokenResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMRenewDelegationTokenResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneShell.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/S3BucketMapping.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RenameKeyHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/GetS3SecretHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/S3Shell.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/CancelTokenHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/GetTokenHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/RenewTokenHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/TokenCommands.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html delete mode 100644 hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.css delete mode 100644 hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.html delete mode 100644 hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-metrics.html delete mode 100644 hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestS3BucketManager.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketCreateRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketDeleteRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketCreateResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketDeleteResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketSetPropertyResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketCreateResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketDeleteResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneBlockTokenSecretManager.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestObjectPrinter.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/package-info.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker delete mode 100644 hadoop-ozone/ozonefs-lib-current/pom.xml delete mode 100644 hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem delete mode 100644 hadoop-ozone/ozonefs-lib-legacy/pom.xml delete mode 100644 hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem delete mode 100644 hadoop-ozone/ozonefs-lib-legacy/src/main/resources/ozonefs.txt delete mode 100644 hadoop-ozone/ozonefs/pom.xml delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicKeyInfo.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSStorageStatistics.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java delete mode 100644 hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java delete mode 100644 hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.DtFetcher delete mode 100644 hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier delete mode 100644 hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestFilteredClassLoader.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java delete mode 100644 hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/package-info.java delete mode 100644 hadoop-ozone/ozonefs/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem delete mode 100644 hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml delete mode 100644 hadoop-ozone/ozonefs/src/test/resources/log4j.properties delete mode 100644 hadoop-ozone/pom.xml delete mode 100644 hadoop-ozone/recon-codegen/pom.xml delete mode 100644 hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/JooqCodeGenerator.java delete mode 100644 hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java delete mode 100644 hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/TableNamingStrategy.java delete mode 100644 hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/package-info.java delete mode 100644 hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconInternalSchemaDefinition.java delete mode 100644 hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconSchemaDefinition.java delete mode 100644 hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java delete mode 100644 hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java delete mode 100644 hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/package-info.java delete mode 100644 hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml delete mode 100644 hadoop-ozone/recon/pom.xml delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconHttpServer.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconRestServletModule.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconTaskBindingModule.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerKeyService.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/UtilizationService.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/package-info.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerMetadata.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainersResponse.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/IsoDateAdapter.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/package-info.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/package-info.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DataSourceConfiguration.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/JooqPersistenceModule.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/TransactionalMethodInterceptor.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/package-info.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/package-info.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/HddsDatanodeServiceProvider.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/StorageContainerServiceProvider.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/package-info.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconDBUpdateTask.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java delete mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/package-info.java delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/WEB-INF/web.xml delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/.gitignore delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/LICENSE delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/NOTICE delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/README.md delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/config-overrides.js delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/favicon.ico delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/index.html delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/manifest.json delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.less delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.test.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/Breadcrumbs/Breadcrumbs.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.less delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.less delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/logo.png delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/makeRouteWithSubRoutes.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/react-app-env.d.ts delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.types.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/serviceWorker.ts delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/ContainerBrowser/ContainerBrowser.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Dashboard/Dashboard.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/NotFound/NotFound.tsx delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/tsconfig.json delete mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/yarn.lock delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/GuiceInjectorUtilsForTestsImpl.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestUtilizationService.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/package-info.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/package-info.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractSqlDatabaseTest.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/package-info.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/package-info.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerDBProvider.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/DummyReconDBTask.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/package-info.java delete mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/types/GuiceInjectorUtilsForTests.java delete mode 100644 hadoop-ozone/recon/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker delete mode 100644 hadoop-ozone/s3gateway/pom.xml delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSAuthParser.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneServiceProvider.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RequestIdentifier.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/BucketMetadata.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/CommonPrefix.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/IsoDateAdapter.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/package-info.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListBucketResponse.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsResult.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequest.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteResponse.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartUploadInitiateResponse.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PlainTextMultipartUploadReader.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/XmlNamespaceFilter.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/package-info.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthenticationHeaderParser.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/Credential.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/package-info.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/S3WrapperInputStream.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/package-info.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/package-info.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RFC1123Util.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java delete mode 100644 hadoop-ozone/s3gateway/src/main/resources/META-INF/beans.xml delete mode 100644 hadoop-ozone/s3gateway/src/main/resources/browser.html delete mode 100644 hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/beans.xml delete mode 100644 hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml delete mode 100755 hadoop-ozone/s3gateway/src/main/resources/webapps/static/images/ozone.ico delete mode 100644 hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html delete mode 100644 hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneClientStub.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/package-info.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketResponse.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectEndpoint.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exception.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/package-info.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV2.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/package-info.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestContinueToken.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestOzoneS3Util.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRFC1123Util.java delete mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java delete mode 100644 hadoop-ozone/s3gateway/src/test/resources/log4j.properties delete mode 100644 hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml delete mode 100644 hadoop-ozone/tools/pom.xml delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/ParserConsts.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/package-info.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/LoadCommandHandler.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/QueryCommandHandler.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/package-info.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/package-info.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/package-info.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonHttpServer.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsValidator.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyValidator.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/PathSchema.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ProgressBar.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3KeyGenerator.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SameKeyReader.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/package-info.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/BlockIdDetails.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/ContainerMapper.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/fsck/package-info.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/package-info.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreReads.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkMetadataStoreWrites.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOMClient.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOMKeyAllocation.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOzoneManager.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkRocksDbStore.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkSCM.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/package-info.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/package-info.java delete mode 100644 hadoop-ozone/tools/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem delete mode 100644 hadoop-ozone/tools/src/main/resources/commands.properties delete mode 100644 hadoop-ozone/tools/src/main/resources/webapps/freon/.gitkeep delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/package-info.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithDummyContainers.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithSafeByteOperations.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidateWithUnsafeByteOperations.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestProgressBar.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestRandomKeyGenerator.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/package-info.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/fsck/TestContainerMapper.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/fsck/package-info.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/package-info.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/om/package-info.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/package-info.java delete mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java delete mode 100644 hadoop-ozone/tools/src/test/resources/commands.properties delete mode 100644 hadoop-ozone/tools/src/test/resources/testaudit.log delete mode 100644 hadoop-ozone/upgrade/pom.xml delete mode 100644 hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Balance.java delete mode 100644 hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Execute.java delete mode 100644 hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/InPlaceUpgrade.java delete mode 100644 hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/Plan.java delete mode 100644 hadoop-ozone/upgrade/src/main/java/org/apache/hadoop/ozone/upgrade/package-info.java delete mode 100644 pom.ozone.xml diff --git a/BUILDING.txt b/BUILDING.txt index d3c9a1a7f51..6d2cddf9250 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -104,8 +104,6 @@ Maven main modules: - hadoop-hdfs-project (Hadoop HDFS) - hadoop-yarn-project (Hadoop YARN) - hadoop-mapreduce-project (Hadoop MapReduce) - - hadoop-ozone (Hadoop Ozone) - - hadoop-hdds (Hadoop Distributed Data Store) - hadoop-tools (Hadoop tools like Streaming, Distcp, etc.) - hadoop-dist (Hadoop distribution assembler) - hadoop-client-modules (Hadoop client modules) diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml index b47b4bcc333..7da999c001a 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml @@ -56,8 +56,6 @@ **/build/** **/file:/** **/SecurityAuth.audit* - hadoop-ozone/** - hadoop-hdds/** hadoop-submarine/** diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml deleted file mode 100644 index 673af41aeef..00000000000 --- a/hadoop-hdds/client/pom.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-hdds - 0.5.0-SNAPSHOT - - - hadoop-hdds-client - 0.5.0-SNAPSHOT - Apache Hadoop Distributed Data Store Client Library - Apache Hadoop HDDS Client - jar - - - - org.apache.hadoop - hadoop-hdds-common - - - - io.netty - netty-all - - - - diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ClientCredentialInterceptor.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ClientCredentialInterceptor.java deleted file mode 100644 index 7a15808b2ea..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ClientCredentialInterceptor.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import org.apache.ratis.thirdparty.io.grpc.CallOptions; -import org.apache.ratis.thirdparty.io.grpc.Channel; -import org.apache.ratis.thirdparty.io.grpc.ClientCall; -import org.apache.ratis.thirdparty.io.grpc.ClientInterceptor; -import org.apache.ratis.thirdparty.io.grpc.ForwardingClientCall; -import org.apache.ratis.thirdparty.io.grpc.Metadata; -import org.apache.ratis.thirdparty.io.grpc.MethodDescriptor; - -import static org.apache.hadoop.ozone.OzoneConsts.OBT_METADATA_KEY; -import static org.apache.hadoop.ozone.OzoneConsts.USER_METADATA_KEY; - -/** - * GRPC client interceptor for ozone block token. - */ -public class ClientCredentialInterceptor implements ClientInterceptor { - - private final String user; - private final String token; - - public ClientCredentialInterceptor(String user, String token) { - this.user = user; - this.token = token; - } - - @Override - public ClientCall interceptCall( - MethodDescriptor method, - CallOptions callOptions, - Channel next) { - - return new ForwardingClientCall.SimpleForwardingClientCall( - next.newCall(method, callOptions)) { - @Override - public void start(Listener responseListener, Metadata headers) { - if (token != null) { - headers.put(OBT_METADATA_KEY, token); - } - if (user != null) { - headers.put(USER_METADATA_KEY, user); - } - super.start(responseListener, headers); - } - }; - } -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java deleted file mode 100644 index 04a8a1aaa1d..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ /dev/null @@ -1,466 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc; -import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceStub; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.tracing.GrpcClientInterceptor; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Time; - -import io.opentracing.Scope; -import io.opentracing.util.GlobalTracer; -import org.apache.ratis.thirdparty.io.grpc.ManagedChannel; -import org.apache.ratis.thirdparty.io.grpc.Status; -import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts; -import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder; -import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; -import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.security.cert.X509Certificate; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * A Client for the storageContainer protocol for read object data. - */ -public class XceiverClientGrpc extends XceiverClientSpi { - static final Logger LOG = LoggerFactory.getLogger(XceiverClientGrpc.class); - private static final String COMPONENT = "dn"; - private final Pipeline pipeline; - private final Configuration config; - private Map asyncStubs; - private XceiverClientMetrics metrics; - private Map channels; - private final Semaphore semaphore; - private boolean closed = false; - private SecurityConfig secConfig; - private final boolean topologyAwareRead; - private X509Certificate caCert; - - /** - * Constructs a client that can communicate with the Container framework on - * data nodes. - * - * @param pipeline - Pipeline that defines the machines. - * @param config -- Ozone Config - * @param caCert - SCM ca certificate. - */ - public XceiverClientGrpc(Pipeline pipeline, Configuration config, - X509Certificate caCert) { - super(); - Preconditions.checkNotNull(pipeline); - Preconditions.checkNotNull(config); - this.pipeline = pipeline; - this.config = config; - this.secConfig = new SecurityConfig(config); - this.semaphore = - new Semaphore(HddsClientUtils.getMaxOutstandingRequests(config)); - this.metrics = XceiverClientManager.getXceiverClientMetrics(); - this.channels = new HashMap<>(); - this.asyncStubs = new HashMap<>(); - this.topologyAwareRead = config.getBoolean( - OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, - OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT); - this.caCert = caCert; - } - - /** - * Constructs a client that can communicate with the Container framework on - * data nodes. - * - * @param pipeline - Pipeline that defines the machines. - * @param config -- Ozone Config - */ - public XceiverClientGrpc(Pipeline pipeline, Configuration config) { - this(pipeline, config, null); - } - - /** - * To be used when grpc token is not enabled. - */ - @Override - public void connect() throws Exception { - // connect to the closest node, if closest node doesn't exist, delegate to - // first node, which is usually the leader in the pipeline. - DatanodeDetails dn = topologyAwareRead ? this.pipeline.getClosestNode() : - this.pipeline.getFirstNode(); - // just make a connection to the picked datanode at the beginning - connectToDatanode(dn, null); - } - - /** - * Passed encoded token to GRPC header when security is enabled. - */ - @Override - public void connect(String encodedToken) throws Exception { - // connect to the closest node, if closest node doesn't exist, delegate to - // first node, which is usually the leader in the pipeline. - DatanodeDetails dn = topologyAwareRead ? this.pipeline.getClosestNode() : - this.pipeline.getFirstNode(); - // just make a connection to the picked datanode at the beginning - connectToDatanode(dn, encodedToken); - } - - private void connectToDatanode(DatanodeDetails dn, String encodedToken) - throws IOException { - // read port from the data node, on failure use default configured - // port. - int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); - if (port == 0) { - port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); - } - - // Add credential context to the client call - String userName = UserGroupInformation.getCurrentUser().getShortUserName(); - if (LOG.isDebugEnabled()) { - LOG.debug("Nodes in pipeline : {}", pipeline.getNodes().toString()); - LOG.debug("Connecting to server : {}", dn.getIpAddress()); - } - NettyChannelBuilder channelBuilder = - NettyChannelBuilder.forAddress(dn.getIpAddress(), port).usePlaintext() - .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) - .intercept(new ClientCredentialInterceptor(userName, encodedToken), - new GrpcClientInterceptor()); - if (secConfig.isGrpcTlsEnabled()) { - SslContextBuilder sslContextBuilder = GrpcSslContexts.forClient(); - if (caCert != null) { - sslContextBuilder.trustManager(caCert); - } - if (secConfig.useTestCert()) { - channelBuilder.overrideAuthority("localhost"); - } - channelBuilder.useTransportSecurity(). - sslContext(sslContextBuilder.build()); - } else { - channelBuilder.usePlaintext(); - } - ManagedChannel channel = channelBuilder.build(); - XceiverClientProtocolServiceStub asyncStub = - XceiverClientProtocolServiceGrpc.newStub(channel); - asyncStubs.put(dn.getUuid(), asyncStub); - channels.put(dn.getUuid(), channel); - } - - /** - * Returns if the xceiver client connects to all servers in the pipeline. - * - * @return True if the connection is alive, false otherwise. - */ - @VisibleForTesting - public boolean isConnected(DatanodeDetails details) { - return isConnected(channels.get(details.getUuid())); - } - - private boolean isConnected(ManagedChannel channel) { - return channel != null && !channel.isTerminated() && !channel.isShutdown(); - } - - @Override - public void close() { - closed = true; - for (ManagedChannel channel : channels.values()) { - channel.shutdownNow(); - try { - channel.awaitTermination(60, TimeUnit.MINUTES); - } catch (Exception e) { - LOG.error("Unexpected exception while waiting for channel termination", - e); - } - } - } - - @Override - public Pipeline getPipeline() { - return pipeline; - } - - @Override - public ContainerCommandResponseProto sendCommand( - ContainerCommandRequestProto request) throws IOException { - try { - XceiverClientReply reply; - reply = sendCommandWithTraceIDAndRetry(request, null); - ContainerCommandResponseProto responseProto = reply.getResponse().get(); - return responseProto; - } catch (ExecutionException | InterruptedException e) { - throw new IOException("Failed to execute command " + request, e); - } - } - - @Override - public ContainerCommandResponseProto sendCommand( - ContainerCommandRequestProto request, List validators) - throws IOException { - try { - XceiverClientReply reply; - reply = sendCommandWithTraceIDAndRetry(request, validators); - ContainerCommandResponseProto responseProto = reply.getResponse().get(); - return responseProto; - } catch (ExecutionException | InterruptedException e) { - throw new IOException("Failed to execute command " + request, e); - } - } - - private XceiverClientReply sendCommandWithTraceIDAndRetry( - ContainerCommandRequestProto request, List validators) - throws IOException { - try (Scope scope = GlobalTracer.get() - .buildSpan("XceiverClientGrpc." + request.getCmdType().name()) - .startActive(true)) { - ContainerCommandRequestProto finalPayload = - ContainerCommandRequestProto.newBuilder(request) - .setTraceID(TracingUtil.exportCurrentSpan()).build(); - return sendCommandWithRetry(finalPayload, validators); - } - } - - private XceiverClientReply sendCommandWithRetry( - ContainerCommandRequestProto request, List validators) - throws IOException { - ContainerCommandResponseProto responseProto = null; - IOException ioException = null; - - // In case of an exception or an error, we will try to read from the - // datanodes in the pipeline in a round robin fashion. - - // TODO: cache the correct leader info in here, so that any subsequent calls - // should first go to leader - XceiverClientReply reply = new XceiverClientReply(null); - List datanodeList; - if ((request.getCmdType() == ContainerProtos.Type.ReadChunk || - request.getCmdType() == ContainerProtos.Type.GetSmallFile) && - topologyAwareRead) { - datanodeList = pipeline.getNodesInOrder(); - } else { - datanodeList = pipeline.getNodes(); - // Shuffle datanode list so that clients do not read in the same order - // every time. - Collections.shuffle(datanodeList); - } - for (DatanodeDetails dn : datanodeList) { - try { - if (LOG.isDebugEnabled()) { - LOG.debug("Executing command " + request + " on datanode " + dn); - } - // In case the command gets retried on a 2nd datanode, - // sendCommandAsyncCall will create a new channel and async stub - // in case these don't exist for the specific datanode. - reply.addDatanode(dn); - responseProto = sendCommandAsync(request, dn).getResponse().get(); - if (validators != null && !validators.isEmpty()) { - for (CheckedBiFunction validator : validators) { - validator.apply(request, responseProto); - } - } - break; - } catch (ExecutionException | InterruptedException | IOException e) { - LOG.error("Failed to execute command " + request + " on datanode " + dn - .getUuidString(), e); - if (!(e instanceof IOException)) { - if (Status.fromThrowable(e.getCause()).getCode() - == Status.UNAUTHENTICATED.getCode()) { - throw new SCMSecurityException("Failed to authenticate with " - + "GRPC XceiverServer with Ozone block token."); - } - ioException = new IOException(e); - } else { - ioException = (IOException) e; - } - responseProto = null; - } - } - - if (responseProto != null) { - reply.setResponse(CompletableFuture.completedFuture(responseProto)); - return reply; - } else { - Preconditions.checkNotNull(ioException); - LOG.error("Failed to execute command {} on the pipeline {}.", request, - pipeline); - throw ioException; - } - } - - // TODO: for a true async API, once the waitable future while executing - // the command on one channel fails, it should be retried asynchronously - // on the future Task for all the remaining datanodes. - - // Note: this Async api is not used currently used in any active I/O path. - // In case it gets used, the asynchronous retry logic needs to be plugged - // in here. - /** - * Sends a given command to server gets a waitable future back. - * - * @param request Request - * @return Response to the command - * @throws IOException - */ - @Override - public XceiverClientReply sendCommandAsync( - ContainerCommandRequestProto request) - throws IOException, ExecutionException, InterruptedException { - try (Scope scope = GlobalTracer.get() - .buildSpan("XceiverClientGrpc." + request.getCmdType().name()) - .startActive(true)) { - - ContainerCommandRequestProto finalPayload = - ContainerCommandRequestProto.newBuilder(request) - .setTraceID(TracingUtil.exportCurrentSpan()) - .build(); - XceiverClientReply asyncReply = - sendCommandAsync(finalPayload, pipeline.getFirstNode()); - // TODO : for now make this API sync in nature as async requests are - // served out of order over XceiverClientGrpc. This needs to be fixed - // if this API is to be used for I/O path. Currently, this is not - // used for Read/Write Operation but for tests. - if (!HddsUtils.isReadOnly(request)) { - asyncReply.getResponse().get(); - } - return asyncReply; - } - } - - private XceiverClientReply sendCommandAsync( - ContainerCommandRequestProto request, DatanodeDetails dn) - throws IOException, ExecutionException, InterruptedException { - if (closed) { - throw new IOException("This channel is not connected."); - } - - UUID dnId = dn.getUuid(); - ManagedChannel channel = channels.get(dnId); - // If the channel doesn't exist for this specific datanode or the channel - // is closed, just reconnect - String token = request.getEncodedToken(); - if (!isConnected(channel)) { - reconnect(dn, token); - } - if (LOG.isDebugEnabled()) { - LOG.debug("Send command {} to datanode {}", - request.getCmdType().toString(), dn.getNetworkFullPath()); - } - final CompletableFuture replyFuture = - new CompletableFuture<>(); - semaphore.acquire(); - long requestTime = Time.monotonicNowNanos(); - metrics.incrPendingContainerOpsMetrics(request.getCmdType()); - // create a new grpc stream for each non-async call. - - // TODO: for async calls, we should reuse StreamObserver resources. - final StreamObserver requestObserver = - asyncStubs.get(dnId) - .send(new StreamObserver() { - @Override - public void onNext(ContainerCommandResponseProto value) { - replyFuture.complete(value); - metrics.decrPendingContainerOpsMetrics(request.getCmdType()); - metrics.addContainerOpsLatency(request.getCmdType(), - Time.monotonicNowNanos() - requestTime); - semaphore.release(); - } - - @Override - public void onError(Throwable t) { - replyFuture.completeExceptionally(t); - metrics.decrPendingContainerOpsMetrics(request.getCmdType()); - metrics.addContainerOpsLatency(request.getCmdType(), - Time.monotonicNowNanos() - requestTime); - semaphore.release(); - } - - @Override - public void onCompleted() { - if (!replyFuture.isDone()) { - replyFuture.completeExceptionally(new IOException( - "Stream completed but no reply for request " + request)); - } - } - }); - requestObserver.onNext(request); - requestObserver.onCompleted(); - return new XceiverClientReply(replyFuture); - } - - private void reconnect(DatanodeDetails dn, String encodedToken) - throws IOException { - ManagedChannel channel; - try { - connectToDatanode(dn, encodedToken); - channel = channels.get(dn.getUuid()); - } catch (Exception e) { - LOG.error("Error while connecting: ", e); - throw new IOException(e); - } - - if (channel == null || !isConnected(channel)) { - throw new IOException("This channel is not connected."); - } - } - - @Override - public XceiverClientReply watchForCommit(long index, long timeout) - throws InterruptedException, ExecutionException, TimeoutException, - IOException { - // there is no notion of watch for commit index in standalone pipeline - return null; - }; - - public long getReplicatedMinCommitIndex() { - return 0; - } - /** - * Returns pipeline Type. - * - * @return - Stand Alone as the type. - */ - @Override - public HddsProtos.ReplicationType getPipelineType() { - return HddsProtos.ReplicationType.STAND_ALONE; - } -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java deleted file mode 100644 index b15828a1530..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java +++ /dev/null @@ -1,390 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.Config; -import org.apache.hadoop.hdds.conf.ConfigGroup; -import org.apache.hadoop.hdds.conf.ConfigType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.util.concurrent.Callable; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE; -import static org.apache.hadoop.hdds.conf.ConfigTag.PERFORMANCE; - -/** - * XceiverClientManager is responsible for the lifecycle of XceiverClient - * instances. Callers use this class to acquire an XceiverClient instance - * connected to the desired container pipeline. When done, the caller also uses - * this class to release the previously acquired XceiverClient instance. - * - * - * This class caches connection to container for reuse purpose, such that - * accessing same container frequently will be through the same connection - * without reestablishing connection. But the connection will be closed if - * not being used for a period of time. - */ -public class XceiverClientManager implements Closeable { - private static final Logger LOG = - LoggerFactory.getLogger(XceiverClientManager.class); - //TODO : change this to SCM configuration class - private final Configuration conf; - private final Cache clientCache; - private final boolean useRatis; - private X509Certificate caCert; - - private static XceiverClientMetrics metrics; - private boolean isSecurityEnabled; - private final boolean topologyAwareRead; - /** - * Creates a new XceiverClientManager for non secured ozone cluster. - * For security enabled ozone cluster, client should use the other constructor - * with a valid ca certificate in pem string format. - * - * @param conf configuration - */ - public XceiverClientManager(Configuration conf) throws IOException { - this(conf, OzoneConfiguration.of(conf).getObject(ScmClientConfig.class), - null); - } - - public XceiverClientManager(Configuration conf, ScmClientConfig clientConf, - String caCertPem) throws IOException { - Preconditions.checkNotNull(clientConf); - Preconditions.checkNotNull(conf); - long staleThresholdMs = clientConf.getStaleThreshold(MILLISECONDS); - this.useRatis = conf.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); - this.conf = conf; - this.isSecurityEnabled = OzoneSecurityUtil.isSecurityEnabled(conf); - if (isSecurityEnabled) { - Preconditions.checkNotNull(caCertPem); - try { - this.caCert = CertificateCodec.getX509Cert(caCertPem); - } catch (CertificateException ex) { - throw new SCMSecurityException("Error: Fail to get SCM CA certificate", - ex); - } - } - - this.clientCache = CacheBuilder.newBuilder() - .expireAfterAccess(staleThresholdMs, MILLISECONDS) - .maximumSize(clientConf.getMaxSize()) - .removalListener( - new RemovalListener() { - @Override - public void onRemoval( - RemovalNotification - removalNotification) { - synchronized (clientCache) { - // Mark the entry as evicted - XceiverClientSpi info = removalNotification.getValue(); - info.setEvicted(); - } - } - }).build(); - topologyAwareRead = conf.getBoolean( - OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, - OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT); - } - - @VisibleForTesting - public Cache getClientCache() { - return clientCache; - } - - /** - * Acquires a XceiverClientSpi connected to a container capable of - * storing the specified key. - * - * If there is already a cached XceiverClientSpi, simply return - * the cached otherwise create a new one. - * - * @param pipeline the container pipeline for the client connection - * @return XceiverClientSpi connected to a container - * @throws IOException if a XceiverClientSpi cannot be acquired - */ - public XceiverClientSpi acquireClient(Pipeline pipeline) - throws IOException { - return acquireClient(pipeline, false); - } - - /** - * Acquires a XceiverClientSpi connected to a container for read. - * - * If there is already a cached XceiverClientSpi, simply return - * the cached otherwise create a new one. - * - * @param pipeline the container pipeline for the client connection - * @return XceiverClientSpi connected to a container - * @throws IOException if a XceiverClientSpi cannot be acquired - */ - public XceiverClientSpi acquireClientForReadData(Pipeline pipeline) - throws IOException { - return acquireClient(pipeline, true); - } - - private XceiverClientSpi acquireClient(Pipeline pipeline, boolean read) - throws IOException { - Preconditions.checkNotNull(pipeline); - Preconditions.checkArgument(pipeline.getNodes() != null); - Preconditions.checkArgument(!pipeline.getNodes().isEmpty()); - - synchronized (clientCache) { - XceiverClientSpi info = getClient(pipeline, read); - info.incrementReference(); - return info; - } - } - - /** - * Releases a XceiverClientSpi after use. - * - * @param client client to release - * @param invalidateClient if true, invalidates the client in cache - */ - public void releaseClient(XceiverClientSpi client, boolean invalidateClient) { - releaseClient(client, invalidateClient, false); - } - - /** - * Releases a read XceiverClientSpi after use. - * - * @param client client to release - * @param invalidateClient if true, invalidates the client in cache - */ - public void releaseClientForReadData(XceiverClientSpi client, - boolean invalidateClient) { - releaseClient(client, invalidateClient, true); - } - - private void releaseClient(XceiverClientSpi client, boolean invalidateClient, - boolean read) { - Preconditions.checkNotNull(client); - synchronized (clientCache) { - client.decrementReference(); - if (invalidateClient) { - Pipeline pipeline = client.getPipeline(); - String key = getPipelineCacheKey(pipeline, read); - XceiverClientSpi cachedClient = clientCache.getIfPresent(key); - if (cachedClient == client) { - clientCache.invalidate(key); - } - } - } - } - - private XceiverClientSpi getClient(Pipeline pipeline, boolean forRead) - throws IOException { - HddsProtos.ReplicationType type = pipeline.getType(); - try { - // create different client for read different pipeline node based on - // network topology - String key = getPipelineCacheKey(pipeline, forRead); - // Append user short name to key to prevent a different user - // from using same instance of xceiverClient. - key = isSecurityEnabled ? - key + UserGroupInformation.getCurrentUser().getShortUserName() : key; - return clientCache.get(key, new Callable() { - @Override - public XceiverClientSpi call() throws Exception { - XceiverClientSpi client = null; - switch (type) { - case RATIS: - client = XceiverClientRatis.newXceiverClientRatis(pipeline, conf, - caCert); - client.connect(); - break; - case STAND_ALONE: - client = new XceiverClientGrpc(pipeline, conf, caCert); - break; - case CHAINED: - default: - throw new IOException("not implemented" + pipeline.getType()); - } - return client; - } - }); - } catch (Exception e) { - throw new IOException( - "Exception getting XceiverClient: " + e.toString(), e); - } - } - - private String getPipelineCacheKey(Pipeline pipeline, boolean forRead) { - String key = pipeline.getId().getId().toString() + pipeline.getType(); - if (topologyAwareRead && forRead) { - try { - key += pipeline.getClosestNode().getHostName(); - } catch (IOException e) { - LOG.error("Failed to get closest node to create pipeline cache key:" + - e.getMessage()); - } - } - return key; - } - - /** - * Close and remove all the cached clients. - */ - @Override - public void close() { - //closing is done through RemovalListener - clientCache.invalidateAll(); - clientCache.cleanUp(); - - if (metrics != null) { - metrics.unRegister(); - } - } - - /** - * Tells us if Ratis is enabled for this cluster. - * @return True if Ratis is enabled. - */ - public boolean isUseRatis() { - return useRatis; - } - - /** - * Returns hard coded 3 as replication factor. - * @return 3 - */ - public HddsProtos.ReplicationFactor getFactor() { - if(isUseRatis()) { - return HddsProtos.ReplicationFactor.THREE; - } - return HddsProtos.ReplicationFactor.ONE; - } - - /** - * Returns the default replication type. - * @return Ratis or Standalone - */ - public HddsProtos.ReplicationType getType() { - // TODO : Fix me and make Ratis default before release. - // TODO: Remove this as replication factor and type are pipeline properties - if(isUseRatis()) { - return HddsProtos.ReplicationType.RATIS; - } - return HddsProtos.ReplicationType.STAND_ALONE; - } - - public Function byteBufferToByteStringConversion(){ - return ByteStringConversion.createByteBufferConversion(conf); - } - - /** - * Get xceiver client metric. - */ - public synchronized static XceiverClientMetrics getXceiverClientMetrics() { - if (metrics == null) { - metrics = XceiverClientMetrics.create(); - } - - return metrics; - } - - /** - * Configuration for HDDS client. - */ - @ConfigGroup(prefix = "scm.container.client") - public static class ScmClientConfig { - - private int maxSize; - private long staleThreshold; - private int maxOutstandingRequests; - - public long getStaleThreshold(TimeUnit unit) { - return unit.convert(staleThreshold, MILLISECONDS); - } - - @Config(key = "idle.threshold", - type = ConfigType.TIME, timeUnit = MILLISECONDS, - defaultValue = "10s", - tags = { OZONE, PERFORMANCE }, - description = - "In the standalone pipelines, the SCM clients use netty to " - + " communicate with the container. It also uses connection pooling" - + " to reduce client side overheads. This allows a connection to" - + " stay idle for a while before the connection is closed." - ) - public void setStaleThreshold(long staleThreshold) { - this.staleThreshold = staleThreshold; - } - - public int getMaxSize() { - return maxSize; - } - - @Config(key = "max.size", - defaultValue = "256", - tags = { OZONE, PERFORMANCE }, - description = - "Controls the maximum number of connections that are cached via" - + " client connection pooling. If the number of connections" - + " exceed this count, then the oldest idle connection is evicted." - ) - public void setMaxSize(int maxSize) { - this.maxSize = maxSize; - } - - public int getMaxOutstandingRequests() { - return maxOutstandingRequests; - } - - @Config(key = "max.outstanding.requests", - defaultValue = "100", - tags = { OZONE, PERFORMANCE }, - description = - "Controls the maximum number of outstanding async requests that can" - + " be handled by the Standalone as well as Ratis client." - ) - public void setMaxOutstandingRequests(int maxOutstandingRequests) { - this.maxOutstandingRequests = maxOutstandingRequests; - } - } - -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java deleted file mode 100644 index 5d43c5ef225..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MetricsRegistry; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableRate; - -/** - * The client metrics for the Storage Container protocol. - */ -@InterfaceAudience.Private -@Metrics(about = "Storage Container Client Metrics", context = "dfs") -public class XceiverClientMetrics { - public static final String SOURCE_NAME = XceiverClientMetrics.class - .getSimpleName(); - - private @Metric MutableCounterLong pendingOps; - private @Metric MutableCounterLong totalOps; - private MutableCounterLong[] pendingOpsArray; - private MutableCounterLong[] opsArray; - private MutableRate[] containerOpsLatency; - private MetricsRegistry registry; - - public XceiverClientMetrics() { - int numEnumEntries = ContainerProtos.Type.values().length; - this.registry = new MetricsRegistry(SOURCE_NAME); - - this.pendingOpsArray = new MutableCounterLong[numEnumEntries]; - this.opsArray = new MutableCounterLong[numEnumEntries]; - this.containerOpsLatency = new MutableRate[numEnumEntries]; - for (int i = 0; i < numEnumEntries; i++) { - pendingOpsArray[i] = registry.newCounter( - "numPending" + ContainerProtos.Type.forNumber(i + 1), - "number of pending" + ContainerProtos.Type.forNumber(i + 1) + " ops", - (long) 0); - opsArray[i] = registry - .newCounter("opCount" + ContainerProtos.Type.forNumber(i + 1), - "number of" + ContainerProtos.Type.forNumber(i + 1) + " ops", - (long) 0); - - containerOpsLatency[i] = registry.newRate( - ContainerProtos.Type.forNumber(i + 1) + "Latency", - "latency of " + ContainerProtos.Type.forNumber(i + 1) - + " ops"); - } - } - - public static XceiverClientMetrics create() { - DefaultMetricsSystem.initialize(SOURCE_NAME); - MetricsSystem ms = DefaultMetricsSystem.instance(); - return ms.register(SOURCE_NAME, "Storage Container Client Metrics", - new XceiverClientMetrics()); - } - - public void incrPendingContainerOpsMetrics(ContainerProtos.Type type) { - pendingOps.incr(); - totalOps.incr(); - opsArray[type.ordinal()].incr(); - pendingOpsArray[type.ordinal()].incr(); - } - - public void decrPendingContainerOpsMetrics(ContainerProtos.Type type) { - pendingOps.incr(-1); - pendingOpsArray[type.ordinal()].incr(-1); - } - - public void addContainerOpsLatency(ContainerProtos.Type type, - long latencyNanos) { - containerOpsLatency[type.ordinal()].add(latencyNanos); - } - - public long getContainerOpsMetrics(ContainerProtos.Type type) { - return pendingOpsArray[type.ordinal()].value(); - } - - @VisibleForTesting - public long getTotalOpCount() { - return totalOps.value(); - } - - @VisibleForTesting - public long getContainerOpCountMetrics(ContainerProtos.Type type) { - return opsArray[type.ordinal()].value(); - } - - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java deleted file mode 100644 index 04fababf504..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import java.io.IOException; -import java.security.cert.X509Certificate; -import java.util.Collection; -import java.util.List; -import java.util.Objects; -import java.util.OptionalLong; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.ratis.RatisHelper; -import org.apache.ratis.client.RaftClient; -import org.apache.ratis.grpc.GrpcTlsConfig; -import org.apache.ratis.proto.RaftProtos; -import org.apache.ratis.protocol.GroupMismatchException; -import org.apache.ratis.protocol.RaftClientReply; -import org.apache.ratis.protocol.RaftException; -import org.apache.ratis.retry.RetryPolicy; -import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.rpc.SupportedRpcType; -import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.ratis.util.TimeDuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -import io.opentracing.Scope; -import io.opentracing.util.GlobalTracer; - -/** - * An abstract implementation of {@link XceiverClientSpi} using Ratis. - * The underlying RPC mechanism can be chosen via the constructor. - */ -public final class XceiverClientRatis extends XceiverClientSpi { - public static final Logger LOG = - LoggerFactory.getLogger(XceiverClientRatis.class); - - public static XceiverClientRatis newXceiverClientRatis( - org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline, - Configuration ozoneConf) { - return newXceiverClientRatis(pipeline, ozoneConf, null); - } - - public static XceiverClientRatis newXceiverClientRatis( - org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline, - Configuration ozoneConf, X509Certificate caCert) { - final String rpcType = ozoneConf - .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); - final TimeDuration clientRequestTimeout = - RatisHelper.getClientRequestTimeout(ozoneConf); - final int maxOutstandingRequests = - HddsClientUtils.getMaxOutstandingRequests(ozoneConf); - final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf); - final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new - SecurityConfig(ozoneConf), caCert); - return new XceiverClientRatis(pipeline, - SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests, - retryPolicy, tlsConfig, clientRequestTimeout); - } - - private final Pipeline pipeline; - private final RpcType rpcType; - private final AtomicReference client = new AtomicReference<>(); - private final int maxOutstandingRequests; - private final RetryPolicy retryPolicy; - private final GrpcTlsConfig tlsConfig; - private final TimeDuration clientRequestTimeout; - - // Map to track commit index at every server - private final ConcurrentHashMap commitInfoMap; - - private XceiverClientMetrics metrics; - - /** - * Constructs a client. - */ - private XceiverClientRatis(Pipeline pipeline, RpcType rpcType, - int maxOutStandingChunks, RetryPolicy retryPolicy, - GrpcTlsConfig tlsConfig, TimeDuration timeout) { - super(); - this.pipeline = pipeline; - this.rpcType = rpcType; - this.maxOutstandingRequests = maxOutStandingChunks; - this.retryPolicy = retryPolicy; - commitInfoMap = new ConcurrentHashMap<>(); - this.tlsConfig = tlsConfig; - this.clientRequestTimeout = timeout; - metrics = XceiverClientManager.getXceiverClientMetrics(); - } - - private void updateCommitInfosMap( - Collection commitInfoProtos) { - // if the commitInfo map is empty, just update the commit indexes for each - // of the servers - if (commitInfoMap.isEmpty()) { - commitInfoProtos.forEach(proto -> commitInfoMap - .put(RatisHelper.toDatanodeId(proto.getServer()), - proto.getCommitIndex())); - // In case the commit is happening 2 way, just update the commitIndex - // for the servers which have been successfully updating the commit - // indexes. This is important because getReplicatedMinCommitIndex() - // should always return the min commit index out of the nodes which have - // been replicating data successfully. - } else { - commitInfoProtos.forEach(proto -> commitInfoMap - .computeIfPresent(RatisHelper.toDatanodeId(proto.getServer()), - (address, index) -> { - index = proto.getCommitIndex(); - return index; - })); - } - } - - /** - * Returns Ratis as pipeline Type. - * - * @return - Ratis - */ - @Override - public HddsProtos.ReplicationType getPipelineType() { - return HddsProtos.ReplicationType.RATIS; - } - - @Override - public Pipeline getPipeline() { - return pipeline; - } - - @Override - public void connect() throws Exception { - if (LOG.isDebugEnabled()) { - LOG.debug("Connecting to pipeline:{} datanode:{}", getPipeline().getId(), - RatisHelper.toRaftPeerId(pipeline.getFirstNode())); - } - // TODO : XceiverClient ratis should pass the config value of - // maxOutstandingRequests so as to set the upper bound on max no of async - // requests to be handled by raft client - if (!client.compareAndSet(null, - RatisHelper.newRaftClient(rpcType, getPipeline(), retryPolicy, - maxOutstandingRequests, tlsConfig, clientRequestTimeout))) { - throw new IllegalStateException("Client is already connected."); - } - } - - @Override - public void connect(String encodedToken) throws Exception { - throw new UnsupportedOperationException("Block tokens are not " + - "implemented for Ratis clients."); - } - - @Override - public void close() { - final RaftClient c = client.getAndSet(null); - if (c != null) { - closeRaftClient(c); - } - } - - private void closeRaftClient(RaftClient raftClient) { - try { - raftClient.close(); - } catch (IOException e) { - throw new IllegalStateException(e); - } - } - - private RaftClient getClient() { - return Objects.requireNonNull(client.get(), "client is null"); - } - - - @VisibleForTesting - public ConcurrentHashMap getCommitInfoMap() { - return commitInfoMap; - } - - private CompletableFuture sendRequestAsync( - ContainerCommandRequestProto request) { - try (Scope scope = GlobalTracer.get() - .buildSpan("XceiverClientRatis." + request.getCmdType().name()) - .startActive(true)) { - final ContainerCommandRequestMessage message - = ContainerCommandRequestMessage.toMessage( - request, TracingUtil.exportCurrentSpan()); - if (HddsUtils.isReadOnly(request)) { - if (LOG.isDebugEnabled()) { - LOG.debug("sendCommandAsync ReadOnly {}", message); - } - return getClient().sendReadOnlyAsync(message); - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("sendCommandAsync {}", message); - } - return getClient().sendAsync(message); - } - } - } - - // gets the minimum log index replicated to all servers - @Override - public long getReplicatedMinCommitIndex() { - OptionalLong minIndex = - commitInfoMap.values().parallelStream().mapToLong(v -> v).min(); - return minIndex.isPresent() ? minIndex.getAsLong() : 0; - } - - private void addDatanodetoReply(UUID address, XceiverClientReply reply) { - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(address.toString()); - reply.addDatanode(builder.build()); - } - - @Override - public XceiverClientReply watchForCommit(long index, long timeout) - throws InterruptedException, ExecutionException, TimeoutException, - IOException { - long commitIndex = getReplicatedMinCommitIndex(); - XceiverClientReply clientReply = new XceiverClientReply(null); - if (commitIndex >= index) { - // return the min commit index till which the log has been replicated to - // all servers - clientReply.setLogIndex(commitIndex); - return clientReply; - } - if (LOG.isDebugEnabled()) { - LOG.debug("commit index : {} watch timeout : {}", index, timeout); - } - RaftClientReply reply; - try { - CompletableFuture replyFuture = getClient() - .sendWatchAsync(index, RaftProtos.ReplicationLevel.ALL_COMMITTED); - replyFuture.get(timeout, TimeUnit.MILLISECONDS); - } catch (Exception e) { - Throwable t = HddsClientUtils.checkForException(e); - LOG.warn("3 way commit failed on pipeline {}", pipeline, e); - if (t instanceof GroupMismatchException) { - throw e; - } - reply = getClient() - .sendWatchAsync(index, RaftProtos.ReplicationLevel.MAJORITY_COMMITTED) - .get(timeout, TimeUnit.MILLISECONDS); - List commitInfoProtoList = - reply.getCommitInfos().stream() - .filter(i -> i.getCommitIndex() < index) - .collect(Collectors.toList()); - commitInfoProtoList.parallelStream().forEach(proto -> { - UUID address = RatisHelper.toDatanodeId(proto.getServer()); - addDatanodetoReply(address, clientReply); - // since 3 way commit has failed, the updated map from now on will - // only store entries for those datanodes which have had successful - // replication. - commitInfoMap.remove(address); - LOG.info( - "Could not commit index {} on pipeline {} to all the nodes. " + - "Server {} has failed. Committed by majority.", - index, pipeline, address); - }); - } - clientReply.setLogIndex(index); - return clientReply; - } - - /** - * Sends a given command to server gets a waitable future back. - * - * @param request Request - * @return Response to the command - */ - @Override - public XceiverClientReply sendCommandAsync( - ContainerCommandRequestProto request) { - XceiverClientReply asyncReply = new XceiverClientReply(null); - long requestTime = Time.monotonicNowNanos(); - CompletableFuture raftClientReply = - sendRequestAsync(request); - metrics.incrPendingContainerOpsMetrics(request.getCmdType()); - CompletableFuture containerCommandResponse = - raftClientReply.whenComplete((reply, e) -> { - if (LOG.isDebugEnabled()) { - LOG.debug("received reply {} for request: cmdType={} containerID={}" - + " pipelineID={} traceID={} exception: {}", reply, - request.getCmdType(), request.getContainerID(), - request.getPipelineID(), request.getTraceID(), e); - } - metrics.decrPendingContainerOpsMetrics(request.getCmdType()); - metrics.addContainerOpsLatency(request.getCmdType(), - Time.monotonicNowNanos() - requestTime); - }).thenApply(reply -> { - try { - if (!reply.isSuccess()) { - // in case of raft retry failure, the raft client is - // not able to connect to the leader hence the pipeline - // can not be used but this instance of RaftClient will close - // and refreshed again. In case the client cannot connect to - // leader, getClient call will fail. - - // No need to set the failed Server ID here. Ozone client - // will directly exclude this pipeline in next allocate block - // to SCM as in this case, it is the raft client which is not - // able to connect to leader in the pipeline, though the - // pipeline can still be functional. - RaftException exception = reply.getException(); - Preconditions.checkNotNull(exception, "Raft reply failure but " + - "no exception propagated."); - throw new CompletionException(exception); - } - ContainerCommandResponseProto response = - ContainerCommandResponseProto - .parseFrom(reply.getMessage().getContent()); - UUID serverId = RatisHelper.toDatanodeId(reply.getReplierId()); - if (response.getResult() == ContainerProtos.Result.SUCCESS) { - updateCommitInfosMap(reply.getCommitInfos()); - } - asyncReply.setLogIndex(reply.getLogIndex()); - addDatanodetoReply(serverId, asyncReply); - return response; - } catch (InvalidProtocolBufferException e) { - throw new CompletionException(e); - } - }); - asyncReply.setResponse(containerCommandResponse); - return asyncReply; - } - -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java deleted file mode 100644 index 982fb8ea1ee..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java +++ /dev/null @@ -1,495 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.client; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadContainerResponseProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; - -/** - * This class provides the client-facing APIs of container operations. - */ -public class ContainerOperationClient implements ScmClient { - - private static final Logger LOG = - LoggerFactory.getLogger(ContainerOperationClient.class); - private static long containerSizeB = -1; - private final StorageContainerLocationProtocol - storageContainerLocationClient; - private final XceiverClientManager xceiverClientManager; - - public ContainerOperationClient( - StorageContainerLocationProtocol - storageContainerLocationClient, - XceiverClientManager xceiverClientManager) { - this.storageContainerLocationClient = storageContainerLocationClient; - this.xceiverClientManager = xceiverClientManager; - } - - /** - * Return the capacity of containers. The current assumption is that all - * containers have the same capacity. Therefore one static is sufficient for - * any container. - * @return The capacity of one container in number of bytes. - */ - public static long getContainerSizeB() { - return containerSizeB; - } - - /** - * Set the capacity of container. Should be exactly once on system start. - * @param size Capacity of one container in number of bytes. - */ - public static void setContainerSizeB(long size) { - containerSizeB = size; - } - - - @Override - public ContainerWithPipeline createContainer(String owner) - throws IOException { - XceiverClientSpi client = null; - try { - ContainerWithPipeline containerWithPipeline = - storageContainerLocationClient.allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), owner); - Pipeline pipeline = containerWithPipeline.getPipeline(); - client = xceiverClientManager.acquireClient(pipeline); - - Preconditions.checkState(pipeline.isOpen(), String - .format("Unexpected state=%s for pipeline=%s, expected state=%s", - pipeline.getPipelineState(), pipeline.getId(), - Pipeline.PipelineState.OPEN)); - createContainer(client, - containerWithPipeline.getContainerInfo().getContainerID()); - return containerWithPipeline; - } finally { - if (client != null) { - xceiverClientManager.releaseClient(client, false); - } - } - } - - /** - * Create a container over pipeline specified by the SCM. - * - * @param client - Client to communicate with Datanodes. - * @param containerId - Container ID. - * @throws IOException - */ - public void createContainer(XceiverClientSpi client, - long containerId) throws IOException { - ContainerProtocolCalls.createContainer(client, containerId, null); - - // Let us log this info after we let SCM know that we have completed the - // creation state. - if (LOG.isDebugEnabled()) { - LOG.debug("Created container " + containerId - + " machines:" + client.getPipeline().getNodes()); - } - } - - /** - * Creates a pipeline over the machines choosen by the SCM. - * - * @param client - Client - * @param pipeline - pipeline to be createdon Datanodes. - * @throws IOException - */ - private void createPipeline(XceiverClientSpi client, Pipeline pipeline) - throws IOException { - - Preconditions.checkNotNull(pipeline.getId(), "Pipeline " + - "name cannot be null when client create flag is set."); - - // Pipeline creation is a three step process. - // - // 1. Notify SCM that this client is doing a create pipeline on - // datanodes. - // - // 2. Talk to Datanodes to create the pipeline. - // - // 3. update SCM that pipeline creation was successful. - - // TODO: this has not been fully implemented on server side - // SCMClientProtocolServer#notifyObjectStageChange - // TODO: when implement the pipeline state machine, change - // the pipeline name (string) to pipeline id (long) - //storageContainerLocationClient.notifyObjectStageChange( - // ObjectStageChangeRequestProto.Type.pipeline, - // pipeline.getPipelineName(), - // ObjectStageChangeRequestProto.Op.create, - // ObjectStageChangeRequestProto.Stage.begin); - - // client.createPipeline(); - // TODO: Use PipelineManager to createPipeline - - //storageContainerLocationClient.notifyObjectStageChange( - // ObjectStageChangeRequestProto.Type.pipeline, - // pipeline.getPipelineName(), - // ObjectStageChangeRequestProto.Op.create, - // ObjectStageChangeRequestProto.Stage.complete); - - // TODO : Should we change the state on the client side ?? - // That makes sense, but it is not needed for the client to work. - if (LOG.isDebugEnabled()) { - LOG.debug("Pipeline creation successful. Pipeline: {}", - pipeline.toString()); - } - } - - @Override - public ContainerWithPipeline createContainer(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, String owner) throws IOException { - XceiverClientSpi client = null; - try { - // allocate container on SCM. - ContainerWithPipeline containerWithPipeline = - storageContainerLocationClient.allocateContainer(type, factor, - owner); - Pipeline pipeline = containerWithPipeline.getPipeline(); - client = xceiverClientManager.acquireClient(pipeline); - - // connect to pipeline leader and allocate container on leader datanode. - client = xceiverClientManager.acquireClient(pipeline); - createContainer(client, - containerWithPipeline.getContainerInfo().getContainerID()); - return containerWithPipeline; - } finally { - if (client != null) { - xceiverClientManager.releaseClient(client, false); - } - } - } - - /** - * Returns a set of Nodes that meet a query criteria. - * - * @param nodeStatuses - Criteria that we want the node to have. - * @param queryScope - Query scope - Cluster or pool. - * @param poolName - if it is pool, a pool name is required. - * @return A set of nodes that meet the requested criteria. - * @throws IOException - */ - @Override - public List queryNode(HddsProtos.NodeState - nodeStatuses, HddsProtos.QueryScope queryScope, String poolName) - throws IOException { - return storageContainerLocationClient.queryNode(nodeStatuses, queryScope, - poolName); - } - - /** - * Creates a specified replication pipeline. - */ - @Override - public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool) - throws IOException { - return storageContainerLocationClient.createReplicationPipeline(type, - factor, nodePool); - } - - @Override - public List listPipelines() throws IOException { - return storageContainerLocationClient.listPipelines(); - } - - @Override - public void activatePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - storageContainerLocationClient.activatePipeline(pipelineID); - } - - @Override - public void deactivatePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - storageContainerLocationClient.deactivatePipeline(pipelineID); - } - - @Override - public void closePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - storageContainerLocationClient.closePipeline(pipelineID); - } - - @Override - public void close() { - try { - xceiverClientManager.close(); - } catch (Exception ex) { - LOG.error("Can't close " + this.getClass().getSimpleName(), ex); - } - } - - /** - * Deletes an existing container. - * - * @param containerId - ID of the container. - * @param pipeline - Pipeline that represents the container. - * @param force - true to forcibly delete the container. - * @throws IOException - */ - @Override - public void deleteContainer(long containerId, Pipeline pipeline, - boolean force) throws IOException { - XceiverClientSpi client = null; - try { - client = xceiverClientManager.acquireClient(pipeline); - ContainerProtocolCalls - .deleteContainer(client, containerId, force, null); - storageContainerLocationClient - .deleteContainer(containerId); - if (LOG.isDebugEnabled()) { - LOG.debug("Deleted container {}, machines: {} ", containerId, - pipeline.getNodes()); - } - } finally { - if (client != null) { - xceiverClientManager.releaseClient(client, false); - } - } - } - - /** - * Delete the container, this will release any resource it uses. - * @param containerID - containerID. - * @param force - True to forcibly delete the container. - * @throws IOException - */ - @Override - public void deleteContainer(long containerID, boolean force) - throws IOException { - ContainerWithPipeline info = getContainerWithPipeline(containerID); - deleteContainer(containerID, info.getPipeline(), force); - } - - @Override - public List listContainer(long startContainerID, - int count) throws IOException { - return storageContainerLocationClient.listContainer( - startContainerID, count); - } - - /** - * Get meta data from an existing container. - * - * @param containerID - ID of the container. - * @param pipeline - Pipeline where the container is located. - * @return ContainerInfo - * @throws IOException - */ - @Override - public ContainerDataProto readContainer(long containerID, - Pipeline pipeline) throws IOException { - XceiverClientSpi client = null; - try { - client = xceiverClientManager.acquireClient(pipeline); - ReadContainerResponseProto response = - ContainerProtocolCalls.readContainer(client, containerID, null); - if (LOG.isDebugEnabled()) { - LOG.debug("Read container {}, machines: {} ", containerID, - pipeline.getNodes()); - } - return response.getContainerData(); - } finally { - if (client != null) { - xceiverClientManager.releaseClient(client, false); - } - } - } - - /** - * Get meta data from an existing container. - * @param containerID - ID of the container. - * @return ContainerInfo - a message of protobuf which has basic info - * of a container. - * @throws IOException - */ - @Override - public ContainerDataProto readContainer(long containerID) throws IOException { - ContainerWithPipeline info = getContainerWithPipeline(containerID); - return readContainer(containerID, info.getPipeline()); - } - - /** - * Given an id, return the pipeline associated with the container. - * @param containerId - String Container ID - * @return Pipeline of the existing container, corresponding to the given id. - * @throws IOException - */ - @Override - public ContainerInfo getContainer(long containerId) throws - IOException { - return storageContainerLocationClient.getContainer(containerId); - } - - /** - * Gets a container by Name -- Throws if the container does not exist. - * - * @param containerId - Container ID - * @return ContainerWithPipeline - * @throws IOException - */ - @Override - public ContainerWithPipeline getContainerWithPipeline(long containerId) - throws IOException { - return storageContainerLocationClient.getContainerWithPipeline(containerId); - } - - /** - * Close a container. - * - * @param pipeline the container to be closed. - * @throws IOException - */ - @Override - public void closeContainer(long containerId, Pipeline pipeline) - throws IOException { - XceiverClientSpi client = null; - try { - if (LOG.isDebugEnabled()) { - LOG.debug("Close container {}", pipeline); - } - /* - TODO: two orders here, revisit this later: - 1. close on SCM first, then on data node - 2. close on data node first, then on SCM - - with 1: if client failed after closing on SCM, then there is a - container SCM thinks as closed, but is actually open. Then SCM will no - longer allocate block to it, which is fine. But SCM may later try to - replicate this "closed" container, which I'm not sure is safe. - - with 2: if client failed after close on datanode, then there is a - container SCM thinks as open, but is actually closed. Then SCM will still - try to allocate block to it. Which will fail when actually doing the - write. No more data can be written, but at least the correctness and - consistency of existing data will maintain. - - For now, take the #2 way. - */ - // Actually close the container on Datanode - client = xceiverClientManager.acquireClient(pipeline); - - storageContainerLocationClient.notifyObjectStageChange( - ObjectStageChangeRequestProto.Type.container, - containerId, - ObjectStageChangeRequestProto.Op.close, - ObjectStageChangeRequestProto.Stage.begin); - - ContainerProtocolCalls.closeContainer(client, containerId, - null); - // Notify SCM to close the container - storageContainerLocationClient.notifyObjectStageChange( - ObjectStageChangeRequestProto.Type.container, - containerId, - ObjectStageChangeRequestProto.Op.close, - ObjectStageChangeRequestProto.Stage.complete); - } finally { - if (client != null) { - xceiverClientManager.releaseClient(client, false); - } - } - } - - /** - * Close a container. - * - * @throws IOException - */ - @Override - public void closeContainer(long containerId) - throws IOException { - ContainerWithPipeline info = getContainerWithPipeline(containerId); - Pipeline pipeline = info.getPipeline(); - closeContainer(containerId, pipeline); - } - - /** - * Get the the current usage information. - * @param containerID - ID of the container. - * @return the size of the given container. - * @throws IOException - */ - @Override - public long getContainerSize(long containerID) throws IOException { - // TODO : Fix this, it currently returns the capacity - // but not the current usage. - long size = getContainerSizeB(); - if (size == -1) { - throw new IOException("Container size unknown!"); - } - return size; - } - - /** - * Check if SCM is in safe mode. - * - * @return Returns true if SCM is in safe mode else returns false. - * @throws IOException - */ - public boolean inSafeMode() throws IOException { - return storageContainerLocationClient.inSafeMode(); - } - - /** - * Force SCM out of safe mode. - * - * @return returns true if operation is successful. - * @throws IOException - */ - public boolean forceExitSafeMode() throws IOException { - return storageContainerLocationClient.forceExitSafeMode(); - } - - @Override - public void startReplicationManager() throws IOException { - storageContainerLocationClient.startReplicationManager(); - } - - @Override - public void stopReplicationManager() throws IOException { - storageContainerLocationClient.stopReplicationManager(); - } - - @Override - public boolean getReplicationManagerStatus() throws IOException { - return storageContainerLocationClient.getReplicationManagerStatus(); - } - - -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java deleted file mode 100644 index d3bb31aa698..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java +++ /dev/null @@ -1,350 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.client; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB; -import org.apache.hadoop.hdds.scm.XceiverClientManager.ScmClientConfig; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClients; -import org.apache.ratis.protocol.AlreadyClosedException; -import org.apache.ratis.protocol.GroupMismatchException; -import org.apache.ratis.protocol.NotReplicatedException; -import org.apache.ratis.protocol.RaftRetryFailureException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.text.ParseException; -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.time.format.DateTimeFormatter; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * Utility methods for Ozone and Container Clients. - * - * The methods to retrieve SCM service endpoints assume there is a single - * SCM service instance. This will change when we switch to replicated service - * instances for redundancy. - */ -@InterfaceAudience.Public -@InterfaceStability.Unstable -public final class HddsClientUtils { - - private static final Logger LOG = LoggerFactory.getLogger( - HddsClientUtils.class); - - private static final int NO_PORT = -1; - - private HddsClientUtils() { - } - - private static final List> EXCEPTION_LIST = - new ArrayList>() {{ - add(TimeoutException.class); - add(StorageContainerException.class); - add(RaftRetryFailureException.class); - add(AlreadyClosedException.class); - add(GroupMismatchException.class); - // Not Replicated Exception will be thrown if watch For commit - // does not succeed - add(NotReplicatedException.class); - }}; - - /** - * Date format that used in ozone. Here the format is thread safe to use. - */ - private static final ThreadLocal DATE_FORMAT = - ThreadLocal.withInitial(() -> { - DateTimeFormatter format = - DateTimeFormatter.ofPattern(OzoneConsts.OZONE_DATE_FORMAT); - return format.withZone(ZoneId.of(OzoneConsts.OZONE_TIME_ZONE)); - }); - - - /** - * Convert time in millisecond to a human readable format required in ozone. - * @return a human readable string for the input time - */ - public static String formatDateTime(long millis) { - ZonedDateTime dateTime = ZonedDateTime.ofInstant( - Instant.ofEpochMilli(millis), DATE_FORMAT.get().getZone()); - return DATE_FORMAT.get().format(dateTime); - } - - /** - * Convert time in ozone date format to millisecond. - * @return time in milliseconds - */ - public static long formatDateTime(String date) throws ParseException { - Preconditions.checkNotNull(date, "Date string should not be null."); - return ZonedDateTime.parse(date, DATE_FORMAT.get()) - .toInstant().toEpochMilli(); - } - - /** - * verifies that bucket name / volume name is a valid DNS name. - * - * @param resName Bucket or volume Name to be validated - * - * @throws IllegalArgumentException - */ - public static void verifyResourceName(String resName) - throws IllegalArgumentException { - if (resName == null) { - throw new IllegalArgumentException("Bucket or Volume name is null"); - } - - if (resName.length() < OzoneConsts.OZONE_MIN_BUCKET_NAME_LENGTH || - resName.length() > OzoneConsts.OZONE_MAX_BUCKET_NAME_LENGTH) { - throw new IllegalArgumentException( - "Bucket or Volume length is illegal, " - + "valid length is 3-63 characters"); - } - - if (resName.charAt(0) == '.' || resName.charAt(0) == '-') { - throw new IllegalArgumentException( - "Bucket or Volume name cannot start with a period or dash"); - } - - if (resName.charAt(resName.length() - 1) == '.' || - resName.charAt(resName.length() - 1) == '-') { - throw new IllegalArgumentException("Bucket or Volume name " - + "cannot end with a period or dash"); - } - - boolean isIPv4 = true; - char prev = (char) 0; - - for (int index = 0; index < resName.length(); index++) { - char currChar = resName.charAt(index); - if (currChar != '.') { - isIPv4 = ((currChar >= '0') && (currChar <= '9')) && isIPv4; - } - if (currChar > 'A' && currChar < 'Z') { - throw new IllegalArgumentException( - "Bucket or Volume name does not support uppercase characters"); - } - if (currChar != '.' && currChar != '-') { - if (currChar < '0' || (currChar > '9' && currChar < 'a') || - currChar > 'z') { - throw new IllegalArgumentException("Bucket or Volume name has an " + - "unsupported character : " + - currChar); - } - } - if (prev == '.' && currChar == '.') { - throw new IllegalArgumentException("Bucket or Volume name should not " + - "have two contiguous periods"); - } - if (prev == '-' && currChar == '.') { - throw new IllegalArgumentException( - "Bucket or Volume name should not have period after dash"); - } - if (prev == '.' && currChar == '-') { - throw new IllegalArgumentException( - "Bucket or Volume name should not have dash after period"); - } - prev = currChar; - } - - if (isIPv4) { - throw new IllegalArgumentException( - "Bucket or Volume name cannot be an IPv4 address or all numeric"); - } - } - - /** - * verifies that bucket / volume name is a valid DNS name. - * - * @param resourceNames Array of bucket / volume names to be verified. - */ - public static void verifyResourceName(String... resourceNames) { - for (String resourceName : resourceNames) { - HddsClientUtils.verifyResourceName(resourceName); - } - } - - /** - * Checks that object parameters passed as reference is not null. - * - * @param references Array of object references to be checked. - * @param - */ - public static void checkNotNull(T... references) { - for (T ref: references) { - Preconditions.checkNotNull(ref); - } - } - - /** - * Returns the cache value to be used for list calls. - * @param conf Configuration object - * @return list cache size - */ - public static int getListCacheSize(Configuration conf) { - return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE, - OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT); - } - - /** - * @return a default instance of {@link CloseableHttpClient}. - */ - public static CloseableHttpClient newHttpClient() { - return HddsClientUtils.newHttpClient(new Configuration()); - } - - /** - * Returns a {@link CloseableHttpClient} configured by given configuration. - * If conf is null, returns a default instance. - * - * @param conf configuration - * @return a {@link CloseableHttpClient} instance. - */ - public static CloseableHttpClient newHttpClient(Configuration conf) { - long socketTimeout = OzoneConfigKeys - .OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT; - long connectionTimeout = OzoneConfigKeys - .OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT; - if (conf != null) { - socketTimeout = conf.getTimeDuration( - OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT, - OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - connectionTimeout = conf.getTimeDuration( - OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT, - OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - } - - CloseableHttpClient client = HttpClients.custom() - .setDefaultRequestConfig( - RequestConfig.custom() - .setSocketTimeout(Math.toIntExact(socketTimeout)) - .setConnectTimeout(Math.toIntExact(connectionTimeout)) - .build()) - .build(); - return client; - } - - /** - * Returns the maximum no of outstanding async requests to be handled by - * Standalone and Ratis client. - */ - public static int getMaxOutstandingRequests(Configuration config) { - return OzoneConfiguration.of(config) - .getObject(ScmClientConfig.class) - .getMaxOutstandingRequests(); - } - - /** - * Create a scm block client, used by putKey() and getKey(). - * - * @return {@link ScmBlockLocationProtocol} - * @throws IOException - */ - public static SCMSecurityProtocol getScmSecurityClient( - OzoneConfiguration conf, UserGroupInformation ugi) throws IOException { - RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class, - ProtobufRpcEngine.class); - long scmVersion = - RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class); - InetSocketAddress scmSecurityProtoAdd = - HddsUtils.getScmAddressForSecurityProtocol(conf); - SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient = - new SCMSecurityProtocolClientSideTranslatorPB( - RPC.getProxy(SCMSecurityProtocolPB.class, scmVersion, - scmSecurityProtoAdd, ugi, conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))); - return scmSecurityClient; - } - - public static Throwable checkForException(Exception e) { - Throwable t = e; - while (t != null) { - for (Class cls : getExceptionList()) { - if (cls.isInstance(t)) { - return t; - } - } - t = t.getCause(); - } - return t; - } - - public static RetryPolicy createRetryPolicy(int maxRetryCount, - long retryInterval) { - // retry with fixed sleep between retries - return RetryPolicies.retryUpToMaximumCountWithFixedSleep( - maxRetryCount, retryInterval, TimeUnit.MILLISECONDS); - } - - public static Map, - RetryPolicy> getRetryPolicyByException(int maxRetryCount, - long retryInterval) { - Map, RetryPolicy> policyMap = new HashMap<>(); - for (Class ex : EXCEPTION_LIST) { - if (ex == TimeoutException.class - || ex == RaftRetryFailureException.class) { - // retry without sleep - policyMap.put(ex, createRetryPolicy(maxRetryCount, 0)); - } else { - // retry with fixed sleep between retries - policyMap.put(ex, createRetryPolicy(maxRetryCount, retryInterval)); - } - } - // Default retry policy - policyMap - .put(Exception.class, createRetryPolicy(maxRetryCount, retryInterval)); - return policyMap; - } - - public static List> getExceptionList() { - return EXCEPTION_LIST; - } -} \ No newline at end of file diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java deleted file mode 100644 index 73ad78cd787..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.client; - -/** - * Client facing classes for the container operations. - */ \ No newline at end of file diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java deleted file mode 100644 index 9390bc10203..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -/** - * Classes for different type of container service client. - */ \ No newline at end of file diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java deleted file mode 100644 index 40bbd93b16f..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ /dev/null @@ -1,388 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -import com.google.common.annotations.VisibleForTesting; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.fs.Seekable; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.GetBlockResponseProto; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.EOFException; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * An {@link InputStream} called from KeyInputStream to read a block from the - * container. - * This class encapsulates all state management for iterating - * through the sequence of chunks through {@link ChunkInputStream}. - */ -public class BlockInputStream extends InputStream implements Seekable { - - private static final Logger LOG = - LoggerFactory.getLogger(BlockInputStream.class); - - private static final int EOF = -1; - - private final BlockID blockID; - private final long length; - private Pipeline pipeline; - private final Token token; - private final boolean verifyChecksum; - private XceiverClientManager xceiverClientManager; - private XceiverClientSpi xceiverClient; - private boolean initialized = false; - - // List of ChunkInputStreams, one for each chunk in the block - private List chunkStreams; - - // chunkOffsets[i] stores the index of the first data byte in - // chunkStream i w.r.t the block data. - // Let’s say we have chunk size as 40 bytes. And let's say the parent - // block stores data from index 200 and has length 400. - // The first 40 bytes of this block will be stored in chunk[0], next 40 in - // chunk[1] and so on. But since the chunkOffsets are w.r.t the block only - // and not the key, the values in chunkOffsets will be [0, 40, 80,....]. - private long[] chunkOffsets = null; - - // Index of the chunkStream corresponding to the current position of the - // BlockInputStream i.e offset of the data to be read next from this block - private int chunkIndex; - - // Position of the BlockInputStream is maintainted by this variable till - // the stream is initialized. This position is w.r.t to the block only and - // not the key. - // For the above example, if we seek to position 240 before the stream is - // initialized, then value of blockPosition will be set to 40. - // Once, the stream is initialized, the position of the stream - // will be determined by the current chunkStream and its position. - private long blockPosition = 0; - - // Tracks the chunkIndex corresponding to the last blockPosition so that it - // can be reset if a new position is seeked. - private int chunkIndexOfPrevPosition; - - public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline, - Token token, boolean verifyChecksum, - XceiverClientManager xceiverClientManager) { - this.blockID = blockId; - this.length = blockLen; - this.pipeline = pipeline; - this.token = token; - this.verifyChecksum = verifyChecksum; - this.xceiverClientManager = xceiverClientManager; - } - - /** - * Initialize the BlockInputStream. Get the BlockData (list of chunks) from - * the Container and create the ChunkInputStreams for each Chunk in the Block. - */ - public synchronized void initialize() throws IOException { - - // Pre-check that the stream has not been intialized already - if (initialized) { - return; - } - - List chunks = getChunkInfos(); - if (chunks != null && !chunks.isEmpty()) { - // For each chunk in the block, create a ChunkInputStream and compute - // its chunkOffset - this.chunkOffsets = new long[chunks.size()]; - long tempOffset = 0; - - this.chunkStreams = new ArrayList<>(chunks.size()); - for (int i = 0; i < chunks.size(); i++) { - addStream(chunks.get(i)); - chunkOffsets[i] = tempOffset; - tempOffset += chunks.get(i).getLen(); - } - - initialized = true; - this.chunkIndex = 0; - - if (blockPosition > 0) { - // Stream was seeked to blockPosition before initialization. Seek to the - // blockPosition now. - seek(blockPosition); - } - } - } - - /** - * Send RPC call to get the block info from the container. - * @return List of chunks in this block. - */ - protected List getChunkInfos() throws IOException { - // irrespective of the container state, we will always read via Standalone - // protocol. - if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) { - pipeline = Pipeline.newBuilder(pipeline) - .setType(HddsProtos.ReplicationType.STAND_ALONE).build(); - } - xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline); - boolean success = false; - List chunks; - try { - if (LOG.isDebugEnabled()) { - LOG.debug("Initializing BlockInputStream for get key to access {}", - blockID.getContainerID()); - } - - if (token != null) { - UserGroupInformation.getCurrentUser().addToken(token); - } - DatanodeBlockID datanodeBlockID = blockID - .getDatanodeBlockIDProtobuf(); - GetBlockResponseProto response = ContainerProtocolCalls - .getBlock(xceiverClient, datanodeBlockID); - - chunks = response.getBlockData().getChunksList(); - success = true; - } finally { - if (!success) { - xceiverClientManager.releaseClientForReadData(xceiverClient, false); - } - } - - return chunks; - } - - /** - * Append another ChunkInputStream to the end of the list. Note that the - * ChunkInputStream is only created here. The chunk will be read from the - * Datanode only when a read operation is performed on for that chunk. - */ - protected synchronized void addStream(ChunkInfo chunkInfo) { - chunkStreams.add(new ChunkInputStream(chunkInfo, blockID, - xceiverClient, verifyChecksum)); - } - - public synchronized long getRemaining() throws IOException { - return length - getPos(); - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized int read() throws IOException { - byte[] buf = new byte[1]; - if (read(buf, 0, 1) == EOF) { - return EOF; - } - return Byte.toUnsignedInt(buf[0]); - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized int read(byte[] b, int off, int len) throws IOException { - if (b == null) { - throw new NullPointerException(); - } - if (off < 0 || len < 0 || len > b.length - off) { - throw new IndexOutOfBoundsException(); - } - if (len == 0) { - return 0; - } - - if (!initialized) { - initialize(); - } - - checkOpen(); - int totalReadLen = 0; - while (len > 0) { - // if we are at the last chunk and have read the entire chunk, return - if (chunkStreams.size() == 0 || - (chunkStreams.size() - 1 <= chunkIndex && - chunkStreams.get(chunkIndex) - .getRemaining() == 0)) { - return totalReadLen == 0 ? EOF : totalReadLen; - } - - // Get the current chunkStream and read data from it - ChunkInputStream current = chunkStreams.get(chunkIndex); - int numBytesToRead = Math.min(len, (int)current.getRemaining()); - int numBytesRead = current.read(b, off, numBytesToRead); - if (numBytesRead != numBytesToRead) { - // This implies that there is either data loss or corruption in the - // chunk entries. Even EOF in the current stream would be covered in - // this case. - throw new IOException(String.format( - "Inconsistent read for chunkName=%s length=%d numBytesRead=%d", - current.getChunkName(), current.getLength(), numBytesRead)); - } - totalReadLen += numBytesRead; - off += numBytesRead; - len -= numBytesRead; - if (current.getRemaining() <= 0 && - ((chunkIndex + 1) < chunkStreams.size())) { - chunkIndex += 1; - } - } - return totalReadLen; - } - - /** - * Seeks the BlockInputStream to the specified position. If the stream is - * not initialized, save the seeked position via blockPosition. Otherwise, - * update the position in 2 steps: - * 1. Updating the chunkIndex to the chunkStream corresponding to the - * seeked position. - * 2. Seek the corresponding chunkStream to the adjusted position. - * - * Let’s say we have chunk size as 40 bytes. And let's say the parent block - * stores data from index 200 and has length 400. If the key was seeked to - * position 90, then this block will be seeked to position 90. - * When seek(90) is called on this blockStream, then - * 1. chunkIndex will be set to 2 (as indices 80 - 120 reside in chunk[2]). - * 2. chunkStream[2] will be seeked to position 10 - * (= 90 - chunkOffset[2] (= 80)). - */ - @Override - public synchronized void seek(long pos) throws IOException { - if (!initialized) { - // Stream has not been initialized yet. Save the position so that it - // can be seeked when the stream is initialized. - blockPosition = pos; - return; - } - - checkOpen(); - if (pos < 0 || pos >= length) { - if (pos == 0) { - // It is possible for length and pos to be zero in which case - // seek should return instead of throwing exception - return; - } - throw new EOFException( - "EOF encountered at pos: " + pos + " for block: " + blockID); - } - - if (chunkIndex >= chunkStreams.size()) { - chunkIndex = Arrays.binarySearch(chunkOffsets, pos); - } else if (pos < chunkOffsets[chunkIndex]) { - chunkIndex = - Arrays.binarySearch(chunkOffsets, 0, chunkIndex, pos); - } else if (pos >= chunkOffsets[chunkIndex] + chunkStreams - .get(chunkIndex).getLength()) { - chunkIndex = Arrays.binarySearch(chunkOffsets, - chunkIndex + 1, chunkStreams.size(), pos); - } - if (chunkIndex < 0) { - // Binary search returns -insertionPoint - 1 if element is not present - // in the array. insertionPoint is the point at which element would be - // inserted in the sorted array. We need to adjust the chunkIndex - // accordingly so that chunkIndex = insertionPoint - 1 - chunkIndex = -chunkIndex - 2; - } - - // Reset the previous chunkStream's position - chunkStreams.get(chunkIndexOfPrevPosition).resetPosition(); - - // seek to the proper offset in the ChunkInputStream - chunkStreams.get(chunkIndex).seek(pos - chunkOffsets[chunkIndex]); - chunkIndexOfPrevPosition = chunkIndex; - } - - @Override - public synchronized long getPos() throws IOException { - if (length == 0) { - return 0; - } - - if (!initialized) { - // The stream is not initialized yet. Return the blockPosition - return blockPosition; - } else { - return chunkOffsets[chunkIndex] + chunkStreams.get(chunkIndex).getPos(); - } - } - - @Override - public boolean seekToNewSource(long targetPos) throws IOException { - return false; - } - - @Override - public synchronized void close() { - if (xceiverClientManager != null && xceiverClient != null) { - xceiverClientManager.releaseClient(xceiverClient, false); - xceiverClientManager = null; - xceiverClient = null; - } - } - - public synchronized void resetPosition() { - this.blockPosition = 0; - } - - /** - * Checks if the stream is open. If not, throw an exception. - * - * @throws IOException if stream is closed - */ - protected synchronized void checkOpen() throws IOException { - if (xceiverClient == null) { - throw new IOException("BlockInputStream has been closed."); - } - } - - public BlockID getBlockID() { - return blockID; - } - - public long getLength() { - return length; - } - - @VisibleForTesting - synchronized int getChunkIndex() { - return chunkIndex; - } - - @VisibleForTesting - synchronized long getBlockPosition() { - return blockPosition; - } - - @VisibleForTesting - synchronized List getChunkStreams() { - return chunkStreams; - } -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java deleted file mode 100644 index b15ca3f6c85..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ /dev/null @@ -1,640 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.XceiverClientReply; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; -import org.apache.hadoop.ozone.common.OzoneChecksumException; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue; -import org.apache.hadoop.hdds.client.BlockID; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicReference; - -import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls - .putBlockAsync; -import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls - .writeChunkAsync; - -/** - * An {@link OutputStream} used by the REST service in combination with the - * SCMClient to write the value of a key to a sequence - * of container chunks. Writes are buffered locally and periodically written to - * the container as a new chunk. In order to preserve the semantics that - * replacement of a pre-existing key is atomic, each instance of the stream has - * an internal unique identifier. This unique identifier and a monotonically - * increasing chunk index form a composite key that is used as the chunk name. - * After all data is written, a putKey call creates or updates the corresponding - * container key, and this call includes the full list of chunks that make up - * the key data. The list of chunks is updated all at once. Therefore, a - * concurrent reader never can see an intermediate state in which different - * chunks of data from different versions of the key data are interleaved. - * This class encapsulates all state management for buffering and writing - * through to the container. - */ -public class BlockOutputStream extends OutputStream { - public static final Logger LOG = - LoggerFactory.getLogger(BlockOutputStream.class); - - private volatile BlockID blockID; - - private final BlockData.Builder containerBlockData; - private XceiverClientManager xceiverClientManager; - private XceiverClientSpi xceiverClient; - private final ContainerProtos.ChecksumType checksumType; - private final int bytesPerChecksum; - private int chunkIndex; - private int chunkSize; - private final long streamBufferFlushSize; - private final long streamBufferMaxSize; - private BufferPool bufferPool; - // The IOException will be set by response handling thread in case there is an - // exception received in the response. If the exception is set, the next - // request will fail upfront. - private AtomicReference ioException; - private ExecutorService responseExecutor; - - // the effective length of data flushed so far - private long totalDataFlushedLength; - - // effective data write attempted so far for the block - private long writtenDataLength; - - // List containing buffers for which the putBlock call will - // update the length in the datanodes. This list will just maintain - // references to the buffers in the BufferPool which will be cleared - // when the watchForCommit acknowledges a putBlock logIndex has been - // committed on all datanodes. This list will be a place holder for buffers - // which got written between successive putBlock calls. - private List bufferList; - - // This object will maintain the commitIndexes and byteBufferList in order - // Also, corresponding to the logIndex, the corresponding list of buffers will - // be released from the buffer pool. - private final CommitWatcher commitWatcher; - - private List failedServers; - - /** - * Creates a new BlockOutputStream. - * - * @param blockID block ID - * @param xceiverClientManager client manager that controls client - * @param pipeline pipeline where block will be written - * @param chunkSize chunk size - * @param bufferPool pool of buffers - * @param streamBufferFlushSize flush size - * @param streamBufferMaxSize max size of the currentBuffer - * @param watchTimeout watch timeout - * @param checksumType checksum type - * @param bytesPerChecksum Bytes per checksum - */ - @SuppressWarnings("parameternumber") - public BlockOutputStream(BlockID blockID, - XceiverClientManager xceiverClientManager, Pipeline pipeline, - int chunkSize, long streamBufferFlushSize, long streamBufferMaxSize, - long watchTimeout, BufferPool bufferPool, ChecksumType checksumType, - int bytesPerChecksum) - throws IOException { - this.blockID = blockID; - this.chunkSize = chunkSize; - KeyValue keyValue = - KeyValue.newBuilder().setKey("TYPE").setValue("KEY").build(); - this.containerBlockData = - BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .addMetadata(keyValue); - this.xceiverClientManager = xceiverClientManager; - this.xceiverClient = xceiverClientManager.acquireClient(pipeline); - this.chunkIndex = 0; - this.streamBufferFlushSize = streamBufferFlushSize; - this.streamBufferMaxSize = streamBufferMaxSize; - this.bufferPool = bufferPool; - this.checksumType = checksumType; - this.bytesPerChecksum = bytesPerChecksum; - - // A single thread executor handle the responses of async requests - responseExecutor = Executors.newSingleThreadExecutor(); - commitWatcher = new CommitWatcher(bufferPool, xceiverClient, watchTimeout); - bufferList = null; - totalDataFlushedLength = 0; - writtenDataLength = 0; - failedServers = new ArrayList<>(0); - ioException = new AtomicReference<>(null); - } - - - public BlockID getBlockID() { - return blockID; - } - - public long getTotalAckDataLength() { - return commitWatcher.getTotalAckDataLength(); - } - - public long getWrittenDataLength() { - return writtenDataLength; - } - - public List getFailedServers() { - return failedServers; - } - - @VisibleForTesting - public XceiverClientSpi getXceiverClient() { - return xceiverClient; - } - - @VisibleForTesting - public long getTotalDataFlushedLength() { - return totalDataFlushedLength; - } - - @VisibleForTesting - public BufferPool getBufferPool() { - return bufferPool; - } - - public IOException getIoException() { - return ioException.get(); - } - - @VisibleForTesting - public Map> getCommitIndex2flushedDataMap() { - return commitWatcher.getCommitIndex2flushedDataMap(); - } - - @Override - public void write(int b) throws IOException { - checkOpen(); - byte[] buf = new byte[1]; - buf[0] = (byte) b; - write(buf, 0, 1); - } - - @Override - public void write(byte[] b, int off, int len) throws IOException { - checkOpen(); - if (b == null) { - throw new NullPointerException(); - } - if ((off < 0) || (off > b.length) || (len < 0) || ((off + len) > b.length) - || ((off + len) < 0)) { - throw new IndexOutOfBoundsException(); - } - if (len == 0) { - return; - } - - while (len > 0) { - int writeLen; - // Allocate a buffer if needed. The buffer will be allocated only - // once as needed and will be reused again for multiple blockOutputStream - // entries. - ByteBuffer currentBuffer = bufferPool.allocateBufferIfNeeded(); - int pos = currentBuffer.position(); - writeLen = - Math.min(chunkSize - pos % chunkSize, len); - currentBuffer.put(b, off, writeLen); - if (!currentBuffer.hasRemaining()) { - writeChunk(currentBuffer); - } - off += writeLen; - len -= writeLen; - writtenDataLength += writeLen; - if (shouldFlush()) { - updateFlushLength(); - executePutBlock(); - } - // Data in the bufferPool can not exceed streamBufferMaxSize - if (isBufferPoolFull()) { - handleFullBuffer(); - } - } - } - - private boolean shouldFlush() { - return bufferPool.computeBufferData() % streamBufferFlushSize == 0; - } - - private void updateFlushLength() { - totalDataFlushedLength += writtenDataLength - totalDataFlushedLength; - } - - private boolean isBufferPoolFull() { - return bufferPool.computeBufferData() == streamBufferMaxSize; - } - /** - * Will be called on the retryPath in case closedContainerException/ - * TimeoutException. - * @param len length of data to write - * @throws IOException if error occurred - */ - - // In this case, the data is already cached in the currentBuffer. - public void writeOnRetry(long len) throws IOException { - if (len == 0) { - return; - } - int count = 0; - Preconditions.checkArgument(len <= streamBufferMaxSize); - while (len > 0) { - long writeLen; - writeLen = Math.min(chunkSize, len); - if (writeLen == chunkSize) { - writeChunk(bufferPool.getBuffer(count)); - } - len -= writeLen; - count++; - writtenDataLength += writeLen; - // we should not call isBufferFull/shouldFlush here. - // The buffer might already be full as whole data is already cached in - // the buffer. We should just validate - // if we wrote data of size streamBufferMaxSize/streamBufferFlushSize to - // call for handling full buffer/flush buffer condition. - if (writtenDataLength % streamBufferFlushSize == 0) { - // reset the position to zero as now we will be reading the - // next buffer in the list - updateFlushLength(); - executePutBlock(); - } - if (writtenDataLength == streamBufferMaxSize) { - handleFullBuffer(); - } - } - } - - /** - * This is a blocking call. It will wait for the flush till the commit index - * at the head of the commitIndex2flushedDataMap gets replicated to all or - * majority. - * @throws IOException - */ - private void handleFullBuffer() throws IOException { - try { - checkOpen(); - if (!commitWatcher.getFutureMap().isEmpty()) { - waitOnFlushFutures(); - } - } catch (InterruptedException | ExecutionException e) { - setIoException(e); - adjustBuffersOnException(); - throw getIoException(); - } - watchForCommit(true); - } - - - // It may happen that once the exception is encountered , we still might - // have successfully flushed up to a certain index. Make sure the buffers - // only contain data which have not been sufficiently replicated - private void adjustBuffersOnException() { - commitWatcher.releaseBuffersOnException(); - } - - /** - * calls watchForCommit API of the Ratis Client. For Standalone client, - * it is a no op. - * @param bufferFull flag indicating whether bufferFull condition is hit or - * its called as part flush/close - * @return minimum commit index replicated to all nodes - * @throws IOException IOException in case watch gets timed out - */ - private void watchForCommit(boolean bufferFull) throws IOException { - checkOpen(); - try { - XceiverClientReply reply = bufferFull ? - commitWatcher.watchOnFirstIndex() : commitWatcher.watchOnLastIndex(); - if (reply != null) { - List dnList = reply.getDatanodes(); - if (!dnList.isEmpty()) { - Pipeline pipe = xceiverClient.getPipeline(); - - LOG.warn("Failed to commit BlockId {} on {}. Failed nodes: {}", - blockID, pipe, dnList); - failedServers.addAll(dnList); - } - } - } catch (IOException ioe) { - setIoException(ioe); - throw getIoException(); - } - } - - private CompletableFuture executePutBlock() - throws IOException { - checkOpen(); - long flushPos = totalDataFlushedLength; - Preconditions.checkNotNull(bufferList); - List byteBufferList = bufferList; - bufferList = null; - Preconditions.checkNotNull(byteBufferList); - - CompletableFuture flushFuture; - try { - XceiverClientReply asyncReply = - putBlockAsync(xceiverClient, containerBlockData.build()); - CompletableFuture future = - asyncReply.getResponse(); - flushFuture = future.thenApplyAsync(e -> { - try { - validateResponse(e); - } catch (IOException sce) { - throw new CompletionException(sce); - } - // if the ioException is not set, putBlock is successful - if (getIoException() == null) { - BlockID responseBlockID = BlockID.getFromProtobuf( - e.getPutBlock().getCommittedBlockLength().getBlockID()); - Preconditions.checkState(blockID.getContainerBlockID() - .equals(responseBlockID.getContainerBlockID())); - // updates the bcsId of the block - blockID = responseBlockID; - if (LOG.isDebugEnabled()) { - LOG.debug( - "Adding index " + asyncReply.getLogIndex() + " commitMap size " - + commitWatcher.getCommitInfoMapSize() + " flushLength " - + flushPos + " numBuffers " + byteBufferList.size() - + " blockID " + blockID + " bufferPool size" + bufferPool - .getSize() + " currentBufferIndex " + bufferPool - .getCurrentBufferIndex()); - } - // for standalone protocol, logIndex will always be 0. - commitWatcher - .updateCommitInfoMap(asyncReply.getLogIndex(), byteBufferList); - } - return e; - }, responseExecutor).exceptionally(e -> { - if (LOG.isDebugEnabled()) { - LOG.debug( - "putBlock failed for blockID " + blockID + " with exception " + e - .getLocalizedMessage()); - } - CompletionException ce = new CompletionException(e); - setIoException(ce); - throw ce; - }); - } catch (IOException | InterruptedException | ExecutionException e) { - throw new IOException( - "Unexpected Storage Container Exception: " + e.toString(), e); - } - commitWatcher.getFutureMap().put(flushPos, flushFuture); - return flushFuture; - } - - @Override - public void flush() throws IOException { - if (xceiverClientManager != null && xceiverClient != null - && bufferPool != null && bufferPool.getSize() > 0) { - try { - handleFlush(); - } catch (InterruptedException | ExecutionException e) { - // just set the exception here as well in order to maintain sanctity of - // ioException field - setIoException(e); - adjustBuffersOnException(); - throw getIoException(); - } - } - } - - - private void writeChunk(ByteBuffer buffer) - throws IOException { - // This data in the buffer will be pushed to datanode and a reference will - // be added to the bufferList. Once putBlock gets executed, this list will - // be marked null. Hence, during first writeChunk call after every putBlock - // call or during the first call to writeChunk here, the list will be null. - - if (bufferList == null) { - bufferList = new ArrayList<>(); - } - bufferList.add(buffer); - // Please note : We are not flipping the slice when we write since - // the slices are pointing the currentBuffer start and end as needed for - // the chunk write. Also please note, Duplicate does not create a - // copy of data, it only creates metadata that points to the data - // stream. - ByteBuffer chunk = buffer.duplicate(); - chunk.position(0); - chunk.limit(buffer.position()); - writeChunkToContainer(chunk); - } - - private void handleFlush() - throws IOException, InterruptedException, ExecutionException { - checkOpen(); - // flush the last chunk data residing on the currentBuffer - if (totalDataFlushedLength < writtenDataLength) { - ByteBuffer currentBuffer = bufferPool.getCurrentBuffer(); - Preconditions.checkArgument(currentBuffer.position() > 0); - if (currentBuffer.position() != chunkSize) { - writeChunk(currentBuffer); - } - // This can be a partially filled chunk. Since we are flushing the buffer - // here, we just limit this buffer to the current position. So that next - // write will happen in new buffer - updateFlushLength(); - executePutBlock(); - } - waitOnFlushFutures(); - watchForCommit(false); - // just check again if the exception is hit while waiting for the - // futures to ensure flush has indeed succeeded - - // irrespective of whether the commitIndex2flushedDataMap is empty - // or not, ensure there is no exception set - checkOpen(); - } - - @Override - public void close() throws IOException { - if (xceiverClientManager != null && xceiverClient != null - && bufferPool != null && bufferPool.getSize() > 0) { - try { - handleFlush(); - } catch (InterruptedException | ExecutionException e) { - setIoException(e); - adjustBuffersOnException(); - throw getIoException(); - } finally { - cleanup(false); - } - // TODO: Turn the below buffer empty check on when Standalone pipeline - // is removed in the write path in tests - // Preconditions.checkArgument(buffer.position() == 0); - // bufferPool.checkBufferPoolEmpty(); - - } - } - - private void waitOnFlushFutures() - throws InterruptedException, ExecutionException { - CompletableFuture combinedFuture = CompletableFuture.allOf( - commitWatcher.getFutureMap().values().toArray( - new CompletableFuture[commitWatcher.getFutureMap().size()])); - // wait for all the transactions to complete - combinedFuture.get(); - } - - private void validateResponse( - ContainerProtos.ContainerCommandResponseProto responseProto) - throws IOException { - try { - // if the ioException is already set, it means a prev request has failed - // just throw the exception. The current operation will fail with the - // original error - IOException exception = getIoException(); - if (exception != null) { - throw exception; - } - ContainerProtocolCalls.validateContainerResponse(responseProto); - } catch (StorageContainerException sce) { - LOG.error("Unexpected Storage Container Exception: ", sce); - setIoException(sce); - throw sce; - } - } - - - private void setIoException(Exception e) { - if (getIoException() == null) { - IOException exception = new IOException( - "Unexpected Storage Container Exception: " + e.toString(), e); - ioException.compareAndSet(null, exception); - } - } - - public void cleanup(boolean invalidateClient) { - if (xceiverClientManager != null) { - xceiverClientManager.releaseClient(xceiverClient, invalidateClient); - } - xceiverClientManager = null; - xceiverClient = null; - commitWatcher.cleanup(); - if (bufferList != null) { - bufferList.clear(); - } - bufferList = null; - responseExecutor.shutdown(); - } - - /** - * Checks if the stream is open or exception has occured. - * If not, throws an exception. - * - * @throws IOException if stream is closed - */ - private void checkOpen() throws IOException { - if (isClosed()) { - throw new IOException("BlockOutputStream has been closed."); - } else if (getIoException() != null) { - adjustBuffersOnException(); - throw getIoException(); - } - } - - public boolean isClosed() { - return xceiverClient == null; - } - - /** - * Writes buffered data as a new chunk to the container and saves chunk - * information to be used later in putKey call. - * - * @throws IOException if there is an I/O error while performing the call - * @throws OzoneChecksumException if there is an error while computing - * checksum - */ - private void writeChunkToContainer(ByteBuffer chunk) throws IOException { - int effectiveChunkSize = chunk.remaining(); - ByteString data = bufferPool.byteStringConversion().apply(chunk); - Checksum checksum = new Checksum(checksumType, bytesPerChecksum); - ChecksumData checksumData = checksum.computeChecksum(chunk); - ChunkInfo chunkInfo = ChunkInfo.newBuilder() - .setChunkName(blockID.getLocalID() + "_chunk_" + ++chunkIndex) - .setOffset(0) - .setLen(effectiveChunkSize) - .setChecksumData(checksumData.getProtoBufMessage()) - .build(); - - try { - XceiverClientReply asyncReply = - writeChunkAsync(xceiverClient, chunkInfo, blockID, data); - CompletableFuture future = - asyncReply.getResponse(); - future.thenApplyAsync(e -> { - try { - validateResponse(e); - } catch (IOException sce) { - future.completeExceptionally(sce); - } - return e; - }, responseExecutor).exceptionally(e -> { - if (LOG.isDebugEnabled()) { - LOG.debug( - "writing chunk failed " + chunkInfo.getChunkName() + " blockID " - + blockID + " with exception " + e.getLocalizedMessage()); - } - CompletionException ce = new CompletionException(e); - setIoException(ce); - throw ce; - }); - } catch (IOException | InterruptedException | ExecutionException e) { - throw new IOException( - "Unexpected Storage Container Exception: " + e.toString(), e); - } - if (LOG.isDebugEnabled()) { - LOG.debug( - "writing chunk " + chunkInfo.getChunkName() + " blockID " + blockID - + " length " + effectiveChunkSize); - } - containerBlockData.addChunks(chunkInfo); - } - - @VisibleForTesting - public void setXceiverClient(XceiverClientSpi xceiverClient) { - this.xceiverClient = xceiverClient; - } -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java deleted file mode 100644 index 6d534579c86..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.ByteStringConversion; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.function.Function; - -/** - * This class creates and manages pool of n buffers. - */ -public class BufferPool { - - private List bufferList; - private int currentBufferIndex; - private final int bufferSize; - private final int capacity; - private final Function byteStringConversion; - - public BufferPool(int bufferSize, int capacity) { - this(bufferSize, capacity, - ByteStringConversion.createByteBufferConversion(null)); - } - - public BufferPool(int bufferSize, int capacity, - Function byteStringConversion){ - this.capacity = capacity; - this.bufferSize = bufferSize; - bufferList = new ArrayList<>(capacity); - currentBufferIndex = -1; - this.byteStringConversion = byteStringConversion; - } - - public Function byteStringConversion(){ - return byteStringConversion; - } - - public ByteBuffer getCurrentBuffer() { - return currentBufferIndex == -1 ? null : bufferList.get(currentBufferIndex); - } - - /** - * If the currentBufferIndex is less than the buffer size - 1, - * it means, the next buffer in the list has been freed up for - * rewriting. Reuse the next available buffer in such cases. - * - * In case, the currentBufferIndex == buffer.size and buffer size is still - * less than the capacity to be allocated, just allocate a buffer of size - * chunk size. - * - */ - public ByteBuffer allocateBufferIfNeeded() { - ByteBuffer buffer = getCurrentBuffer(); - if (buffer != null && buffer.hasRemaining()) { - return buffer; - } - if (currentBufferIndex < bufferList.size() - 1) { - buffer = getBuffer(currentBufferIndex + 1); - } else { - buffer = ByteBuffer.allocate(bufferSize); - bufferList.add(buffer); - } - Preconditions.checkArgument(bufferList.size() <= capacity); - currentBufferIndex++; - // TODO: Turn the below precondition check on when Standalone pipeline - // is removed in the write path in tests - // Preconditions.checkArgument(buffer.position() == 0); - return buffer; - } - - public void releaseBuffer(ByteBuffer byteBuffer) { - // always remove from head of the list and append at last - ByteBuffer buffer = bufferList.remove(0); - // Ensure the buffer to be removed is always at the head of the list. - Preconditions.checkArgument(buffer.equals(byteBuffer)); - buffer.clear(); - bufferList.add(buffer); - Preconditions.checkArgument(currentBufferIndex >= 0); - currentBufferIndex--; - } - - public void clearBufferPool() { - bufferList.clear(); - currentBufferIndex = -1; - } - - public void checkBufferPoolEmpty() { - Preconditions.checkArgument(computeBufferData() == 0); - } - - public long computeBufferData() { - return bufferList.stream().mapToInt(value -> value.position()) - .sum(); - } - - public int getSize() { - return bufferList.size(); - } - - public ByteBuffer getBuffer(int index) { - return bufferList.get(index); - } - - int getCurrentBufferIndex() { - return currentBufferIndex; - } - -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java deleted file mode 100644 index f94d2d87340..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java +++ /dev/null @@ -1,544 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.fs.Seekable; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkResponseProto; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; -import org.apache.hadoop.ozone.common.OzoneChecksumException; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; - -import java.io.EOFException; -import java.io.IOException; -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.util.List; - -/** - * An {@link InputStream} called from BlockInputStream to read a chunk from the - * container. Each chunk may contain multiple underlying {@link ByteBuffer} - * instances. - */ -public class ChunkInputStream extends InputStream implements Seekable { - - private ChunkInfo chunkInfo; - private final long length; - private final BlockID blockID; - private XceiverClientSpi xceiverClient; - private boolean verifyChecksum; - private boolean allocated = false; - - // Buffer to store the chunk data read from the DN container - private List buffers; - - // Index of the buffers corresponding to the current position of the buffers - private int bufferIndex; - - // The offset of the current data residing in the buffers w.r.t the start - // of chunk data - private long bufferOffset; - - // The number of bytes of chunk data residing in the buffers currently - private long bufferLength; - - // Position of the ChunkInputStream is maintained by this variable (if a - // seek is performed. This position is w.r.t to the chunk only and not the - // block or key. This variable is set only if either the buffers are not - // yet allocated or the if the allocated buffers do not cover the seeked - // position. Once the chunk is read, this variable is reset. - private long chunkPosition = -1; - - private static final int EOF = -1; - - ChunkInputStream(ChunkInfo chunkInfo, BlockID blockId, - XceiverClientSpi xceiverClient, boolean verifyChecksum) { - this.chunkInfo = chunkInfo; - this.length = chunkInfo.getLen(); - this.blockID = blockId; - this.xceiverClient = xceiverClient; - this.verifyChecksum = verifyChecksum; - } - - public synchronized long getRemaining() throws IOException { - return length - getPos(); - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized int read() throws IOException { - checkOpen(); - int available = prepareRead(1); - int dataout = EOF; - - if (available == EOF) { - // There is no more data in the chunk stream. The buffers should have - // been released by now - Preconditions.checkState(buffers == null); - } else { - dataout = Byte.toUnsignedInt(buffers.get(bufferIndex).get()); - } - - if (chunkStreamEOF()) { - // consumer might use getPos to determine EOF, - // so release buffers when serving the last byte of data - releaseBuffers(); - } - - return dataout; - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized int read(byte[] b, int off, int len) throws IOException { - // According to the JavaDocs for InputStream, it is recommended that - // subclasses provide an override of bulk read if possible for performance - // reasons. In addition to performance, we need to do it for correctness - // reasons. The Ozone REST service uses PipedInputStream and - // PipedOutputStream to relay HTTP response data between a Jersey thread and - // a Netty thread. It turns out that PipedInputStream/PipedOutputStream - // have a subtle dependency (bug?) on the wrapped stream providing separate - // implementations of single-byte read and bulk read. Without this, get key - // responses might close the connection before writing all of the bytes - // advertised in the Content-Length. - if (b == null) { - throw new NullPointerException(); - } - if (off < 0 || len < 0 || len > b.length - off) { - throw new IndexOutOfBoundsException(); - } - if (len == 0) { - return 0; - } - checkOpen(); - int total = 0; - while (len > 0) { - int available = prepareRead(len); - if (available == EOF) { - // There is no more data in the chunk stream. The buffers should have - // been released by now - Preconditions.checkState(buffers == null); - return total != 0 ? total : EOF; - } - buffers.get(bufferIndex).get(b, off + total, available); - len -= available; - total += available; - } - - if (chunkStreamEOF()) { - // smart consumers determine EOF by calling getPos() - // so we release buffers when serving the final bytes of data - releaseBuffers(); - } - - return total; - } - - /** - * Seeks the ChunkInputStream to the specified position. This is done by - * updating the chunkPosition to the seeked position in case the buffers - * are not allocated or buffers do not contain the data corresponding to - * the seeked position (determined by buffersHavePosition()). Otherwise, - * the buffers position is updated to the seeked position. - */ - @Override - public synchronized void seek(long pos) throws IOException { - if (pos < 0 || pos >= length) { - if (pos == 0) { - // It is possible for length and pos to be zero in which case - // seek should return instead of throwing exception - return; - } - throw new EOFException("EOF encountered at pos: " + pos + " for chunk: " - + chunkInfo.getChunkName()); - } - - if (buffersHavePosition(pos)) { - // The bufferPosition is w.r.t the current chunk. - // Adjust the bufferIndex and position to the seeked position. - adjustBufferPosition(pos - bufferOffset); - } else { - chunkPosition = pos; - } - } - - @Override - public synchronized long getPos() throws IOException { - if (chunkPosition >= 0) { - return chunkPosition; - } - if (chunkStreamEOF()) { - return length; - } - if (buffersHaveData()) { - return bufferOffset + buffers.get(bufferIndex).position(); - } - if (buffersAllocated()) { - return bufferOffset + bufferLength; - } - return 0; - } - - @Override - public boolean seekToNewSource(long targetPos) throws IOException { - return false; - } - - @Override - public synchronized void close() { - if (xceiverClient != null) { - xceiverClient = null; - } - } - - /** - * Checks if the stream is open. If not, throw an exception. - * - * @throws IOException if stream is closed - */ - protected synchronized void checkOpen() throws IOException { - if (xceiverClient == null) { - throw new IOException("BlockInputStream has been closed."); - } - } - - /** - * Prepares to read by advancing through buffers or allocating new buffers, - * as needed until it finds data to return, or encounters EOF. - * @param len desired lenght of data to read - * @return length of data available to read, possibly less than desired length - */ - private synchronized int prepareRead(int len) throws IOException { - for (;;) { - if (chunkPosition >= 0) { - if (buffersHavePosition(chunkPosition)) { - // The current buffers have the seeked position. Adjust the buffer - // index and position to point to the chunkPosition. - adjustBufferPosition(chunkPosition - bufferOffset); - } else { - // Read a required chunk data to fill the buffers with seeked - // position data - readChunkFromContainer(len); - } - } - if (buffersHaveData()) { - // Data is available from buffers - ByteBuffer bb = buffers.get(bufferIndex); - return len > bb.remaining() ? bb.remaining() : len; - } else if (dataRemainingInChunk()) { - // There is more data in the chunk stream which has not - // been read into the buffers yet. - readChunkFromContainer(len); - } else { - // All available input from this chunk stream has been consumed. - return EOF; - } - } - } - - /** - * Reads full or partial Chunk from DN Container based on the current - * position of the ChunkInputStream, the number of bytes of data to read - * and the checksum boundaries. - * If successful, then the read data in saved in the buffers so that - * subsequent read calls can utilize it. - * @param len number of bytes of data to be read - * @throws IOException if there is an I/O error while performing the call - * to Datanode - */ - private synchronized void readChunkFromContainer(int len) throws IOException { - - // index of first byte to be read from the chunk - long startByteIndex; - if (chunkPosition >= 0) { - // If seek operation was called to advance the buffer position, the - // chunk should be read from that position onwards. - startByteIndex = chunkPosition; - } else { - // Start reading the chunk from the last chunkPosition onwards. - startByteIndex = bufferOffset + bufferLength; - } - - if (verifyChecksum) { - // Update the bufferOffset and bufferLength as per the checksum - // boundary requirement. - computeChecksumBoundaries(startByteIndex, len); - } else { - // Read from the startByteIndex - bufferOffset = startByteIndex; - bufferLength = len; - } - - // Adjust the chunkInfo so that only the required bytes are read from - // the chunk. - final ChunkInfo adjustedChunkInfo = ChunkInfo.newBuilder(chunkInfo) - .setOffset(bufferOffset) - .setLen(bufferLength) - .build(); - - ByteString byteString = readChunk(adjustedChunkInfo); - - buffers = byteString.asReadOnlyByteBufferList(); - bufferIndex = 0; - allocated = true; - - // If the stream was seeked to position before, then the buffer - // position should be adjusted as the reads happen at checksum boundaries. - // The buffers position might need to be adjusted for the following - // scenarios: - // 1. Stream was seeked to a position before the chunk was read - // 2. Chunk was read from index < the current position to account for - // checksum boundaries. - adjustBufferPosition(startByteIndex - bufferOffset); - } - - /** - * Send RPC call to get the chunk from the container. - */ - @VisibleForTesting - protected ByteString readChunk(ChunkInfo readChunkInfo) throws IOException { - ReadChunkResponseProto readChunkResponse; - - try { - List validators = - ContainerProtocolCalls.getValidatorList(); - validators.add(validator); - - readChunkResponse = ContainerProtocolCalls.readChunk(xceiverClient, - readChunkInfo, blockID, validators); - - } catch (IOException e) { - if (e instanceof StorageContainerException) { - throw e; - } - throw new IOException("Unexpected OzoneException: " + e.toString(), e); - } - - return readChunkResponse.getData(); - } - - private CheckedBiFunction validator = - (request, response) -> { - final ChunkInfo reqChunkInfo = - request.getReadChunk().getChunkData(); - - ReadChunkResponseProto readChunkResponse = response.getReadChunk(); - ByteString byteString = readChunkResponse.getData(); - - if (byteString.size() != reqChunkInfo.getLen()) { - // Bytes read from chunk should be equal to chunk size. - throw new OzoneChecksumException(String - .format("Inconsistent read for chunk=%s len=%d bytesRead=%d", - reqChunkInfo.getChunkName(), reqChunkInfo.getLen(), - byteString.size())); - } - - if (verifyChecksum) { - ChecksumData checksumData = ChecksumData.getFromProtoBuf( - chunkInfo.getChecksumData()); - - // ChecksumData stores checksum for each 'numBytesPerChecksum' - // number of bytes in a list. Compute the index of the first - // checksum to match with the read data - - int checkumStartIndex = (int) (reqChunkInfo.getOffset() / - checksumData.getBytesPerChecksum()); - Checksum.verifyChecksum( - byteString, checksumData, checkumStartIndex); - } - }; - - /** - * Return the offset and length of bytes that need to be read from the - * chunk file to cover the checksum boundaries covering the actual start and - * end of the chunk index to be read. - * For example, lets say the client is reading from index 120 to 450 in the - * chunk. And let's say checksum is stored for every 100 bytes in the chunk - * i.e. the first checksum is for bytes from index 0 to 99, the next for - * bytes from index 100 to 199 and so on. To verify bytes from 120 to 450, - * we would need to read from bytes 100 to 499 so that checksum - * verification can be done. - * - * @param startByteIndex the first byte index to be read by client - * @param dataLen number of bytes to be read from the chunk - */ - private void computeChecksumBoundaries(long startByteIndex, int dataLen) { - - int bytesPerChecksum = chunkInfo.getChecksumData().getBytesPerChecksum(); - // index of the last byte to be read from chunk, inclusively. - final long endByteIndex = startByteIndex + dataLen - 1; - - bufferOffset = (startByteIndex / bytesPerChecksum) - * bytesPerChecksum; // inclusive - final long endIndex = ((endByteIndex / bytesPerChecksum) + 1) - * bytesPerChecksum; // exclusive - bufferLength = Math.min(endIndex, length) - bufferOffset; - } - - /** - * Adjust the buffers position to account for seeked position and/ or checksum - * boundary reads. - * @param bufferPosition the position to which the buffers must be advanced - */ - private void adjustBufferPosition(long bufferPosition) { - // The bufferPosition is w.r.t the current chunk. - // Adjust the bufferIndex and position to the seeked chunkPosition. - long tempOffest = 0; - for (int i = 0; i < buffers.size(); i++) { - if (bufferPosition - tempOffest >= buffers.get(i).capacity()) { - tempOffest += buffers.get(i).capacity(); - } else { - bufferIndex = i; - break; - } - } - buffers.get(bufferIndex).position((int) (bufferPosition - tempOffest)); - - // Reset the chunkPosition as chunk stream has been initialized i.e. the - // buffers have been allocated. - resetPosition(); - } - - /** - * Check if the buffers have been allocated data and false otherwise. - */ - private boolean buffersAllocated() { - return buffers != null && !buffers.isEmpty(); - } - - /** - * Check if the buffers have any data remaining between the current - * position and the limit. - */ - private boolean buffersHaveData() { - boolean hasData = false; - - if (buffersAllocated()) { - while (bufferIndex < (buffers.size())) { - if (buffers.get(bufferIndex).hasRemaining()) { - // current buffer has data - hasData = true; - break; - } else { - if (buffersRemaining()) { - // move to next available buffer - ++bufferIndex; - Preconditions.checkState(bufferIndex < buffers.size()); - } else { - // no more buffers remaining - break; - } - } - } - } - - return hasData; - } - - private boolean buffersRemaining() { - return (bufferIndex < (buffers.size() - 1)); - } - - /** - * Check if curernt buffers have the data corresponding to the input position. - */ - private boolean buffersHavePosition(long pos) { - // Check if buffers have been allocated - if (buffersAllocated()) { - // Check if the current buffers cover the input position - return pos >= bufferOffset && - pos < bufferOffset + bufferLength; - } - return false; - } - - /** - * Check if there is more data in the chunk which has not yet been read - * into the buffers. - */ - private boolean dataRemainingInChunk() { - long bufferPos; - if (chunkPosition >= 0) { - bufferPos = chunkPosition; - } else { - bufferPos = bufferOffset + bufferLength; - } - - return bufferPos < length; - } - - /** - * Check if end of chunkStream has been reached. - */ - private boolean chunkStreamEOF() { - if (!allocated) { - // Chunk data has not been read yet - return false; - } - - if (buffersHaveData() || dataRemainingInChunk()) { - return false; - } else { - Preconditions.checkState(bufferOffset + bufferLength == length, - "EOF detected, but not at the last byte of the chunk"); - return true; - } - } - - /** - * If EOF is reached, release the buffers. - */ - private void releaseBuffers() { - buffers = null; - bufferIndex = 0; - } - - /** - * Reset the chunkPosition once the buffers are allocated. - */ - void resetPosition() { - this.chunkPosition = -1; - } - - String getChunkName() { - return chunkInfo.getChunkName(); - } - - protected long getLength() { - return length; - } - - @VisibleForTesting - protected long getChunkPosition() { - return chunkPosition; - } -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java deleted file mode 100644 index 1d9d55bfbfb..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This class maintains the map of the commitIndexes to be watched for - * successful replication in the datanodes in a given pipeline. It also releases - * the buffers associated with the user data back to {@Link BufferPool} once - * minimum replication criteria is achieved during an ozone key write. - */ -package org.apache.hadoop.hdds.scm.storage; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.XceiverClientReply; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.ExecutionException; - -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.stream.Collectors; - -/** - * This class executes watchForCommit on ratis pipeline and releases - * buffers once data successfully gets replicated. - */ -public class CommitWatcher { - - private static final Logger LOG = - LoggerFactory.getLogger(CommitWatcher.class); - - // A reference to the pool of buffers holding the data - private BufferPool bufferPool; - - // The map should maintain the keys (logIndexes) in order so that while - // removing we always end up updating incremented data flushed length. - // Also, corresponding to the logIndex, the corresponding list of buffers will - // be released from the buffer pool. - private ConcurrentSkipListMap> - commitIndex2flushedDataMap; - - // future Map to hold up all putBlock futures - private ConcurrentHashMap> - futureMap; - - private XceiverClientSpi xceiverClient; - - private final long watchTimeout; - - // total data which has been successfully flushed and acknowledged - // by all servers - private long totalAckDataLength; - - public CommitWatcher(BufferPool bufferPool, XceiverClientSpi xceiverClient, - long watchTimeout) { - this.bufferPool = bufferPool; - this.xceiverClient = xceiverClient; - this.watchTimeout = watchTimeout; - commitIndex2flushedDataMap = new ConcurrentSkipListMap<>(); - totalAckDataLength = 0; - futureMap = new ConcurrentHashMap<>(); - } - - /** - * just update the totalAckDataLength. In case of failure, - * we will read the data starting from totalAckDataLength. - */ - private long releaseBuffers(List indexes) { - Preconditions.checkArgument(!commitIndex2flushedDataMap.isEmpty()); - for (long index : indexes) { - Preconditions.checkState(commitIndex2flushedDataMap.containsKey(index)); - List buffers = commitIndex2flushedDataMap.remove(index); - long length = buffers.stream().mapToLong(value -> { - int pos = value.position(); - return pos; - }).sum(); - totalAckDataLength += length; - // clear the future object from the future Map - Preconditions.checkNotNull(futureMap.remove(totalAckDataLength)); - for (ByteBuffer byteBuffer : buffers) { - bufferPool.releaseBuffer(byteBuffer); - } - } - return totalAckDataLength; - } - - public void updateCommitInfoMap(long index, List byteBufferList) { - commitIndex2flushedDataMap - .put(index, byteBufferList); - } - - int getCommitInfoMapSize() { - return commitIndex2flushedDataMap.size(); - } - - /** - * Calls watch for commit for the first index in commitIndex2flushedDataMap to - * the Ratis client. - * @return reply reply from raft client - * @throws IOException in case watchForCommit fails - */ - public XceiverClientReply watchOnFirstIndex() throws IOException { - if (!commitIndex2flushedDataMap.isEmpty()) { - // wait for the first commit index in the commitIndex2flushedDataMap - // to get committed to all or majority of nodes in case timeout - // happens. - long index = - commitIndex2flushedDataMap.keySet().stream().mapToLong(v -> v).min() - .getAsLong(); - if (LOG.isDebugEnabled()) { - LOG.debug("waiting for first index " + index + " to catch up"); - } - return watchForCommit(index); - } else { - return null; - } - } - - /** - * Calls watch for commit for the first index in commitIndex2flushedDataMap to - * the Ratis client. - * @return reply reply from raft client - * @throws IOException in case watchForCommit fails - */ - public XceiverClientReply watchOnLastIndex() - throws IOException { - if (!commitIndex2flushedDataMap.isEmpty()) { - // wait for the commit index in the commitIndex2flushedDataMap - // to get committed to all or majority of nodes in case timeout - // happens. - long index = - commitIndex2flushedDataMap.keySet().stream().mapToLong(v -> v).max() - .getAsLong(); - if (LOG.isDebugEnabled()) { - LOG.debug("waiting for last flush Index " + index + " to catch up"); - } - return watchForCommit(index); - } else { - return null; - } - } - - - private void adjustBuffers(long commitIndex) { - List keyList = commitIndex2flushedDataMap.keySet().stream() - .filter(p -> p <= commitIndex).collect(Collectors.toList()); - if (keyList.isEmpty()) { - return; - } else { - releaseBuffers(keyList); - } - } - - // It may happen that once the exception is encountered , we still might - // have successfully flushed up to a certain index. Make sure the buffers - // only contain data which have not been sufficiently replicated - void releaseBuffersOnException() { - adjustBuffers(xceiverClient.getReplicatedMinCommitIndex()); - } - - - /** - * calls watchForCommit API of the Ratis Client. For Standalone client, - * it is a no op. - * @param commitIndex log index to watch for - * @return minimum commit index replicated to all nodes - * @throws IOException IOException in case watch gets timed out - */ - public XceiverClientReply watchForCommit(long commitIndex) - throws IOException { - long index; - try { - XceiverClientReply reply = - xceiverClient.watchForCommit(commitIndex, watchTimeout); - if (reply == null) { - index = 0; - } else { - index = reply.getLogIndex(); - } - adjustBuffers(index); - return reply; - } catch (TimeoutException | InterruptedException | ExecutionException e) { - LOG.warn("watchForCommit failed for index " + commitIndex, e); - IOException ioException = new IOException( - "Unexpected Storage Container Exception: " + e.toString(), e); - releaseBuffersOnException(); - throw ioException; - } - } - - @VisibleForTesting - public ConcurrentSkipListMap> getCommitIndex2flushedDataMap() { - return commitIndex2flushedDataMap; - } - - public ConcurrentHashMap> getFutureMap() { - return futureMap; - } - - public long getTotalAckDataLength() { - return totalAckDataLength; - } - - public void cleanup() { - if (commitIndex2flushedDataMap != null) { - commitIndex2flushedDataMap.clear(); - } - if (futureMap != null) { - futureMap.clear(); - } - commitIndex2flushedDataMap = null; - } -} diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java deleted file mode 100644 index 6e7ce948784..00000000000 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -/** - * Low level IO streams to upload/download chunks from container service. - */ \ No newline at end of file diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java deleted file mode 100644 index 042bfd94174..00000000000 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -import com.google.common.primitives.Bytes; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.security.token.Token; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.EOFException; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Random; - -import static org.apache.hadoop.hdds.scm.storage.TestChunkInputStream.generateRandomData; - -/** - * Tests for {@link BlockInputStream}'s functionality. - */ -public class TestBlockInputStream { - - private static final int CHUNK_SIZE = 100; - private static Checksum checksum; - - private BlockInputStream blockStream; - private byte[] blockData; - private int blockSize; - private List chunks; - private Map chunkDataMap; - - @Before - public void setup() throws Exception { - BlockID blockID = new BlockID(new ContainerBlockID(1, 1)); - checksum = new Checksum(ChecksumType.NONE, CHUNK_SIZE); - createChunkList(5); - - blockStream = new DummyBlockInputStream(blockID, blockSize, null, null, - false, null); - } - - /** - * Create a mock list of chunks. The first n-1 chunks of length CHUNK_SIZE - * and the last chunk with length CHUNK_SIZE/2. - */ - private void createChunkList(int numChunks) - throws Exception { - - chunks = new ArrayList<>(numChunks); - chunkDataMap = new HashMap<>(); - blockData = new byte[0]; - int i, chunkLen; - byte[] byteData; - String chunkName; - - for (i = 0; i < numChunks; i++) { - chunkName = "chunk-" + i; - chunkLen = CHUNK_SIZE; - if (i == numChunks - 1) { - chunkLen = CHUNK_SIZE / 2; - } - byteData = generateRandomData(chunkLen); - ChunkInfo chunkInfo = ChunkInfo.newBuilder() - .setChunkName(chunkName) - .setOffset(0) - .setLen(chunkLen) - .setChecksumData(checksum.computeChecksum( - byteData, 0, chunkLen).getProtoBufMessage()) - .build(); - - chunkDataMap.put(chunkName, byteData); - chunks.add(chunkInfo); - - blockSize += chunkLen; - blockData = Bytes.concat(blockData, byteData); - } - } - - /** - * A dummy BlockInputStream to mock read block call to DN. - */ - private class DummyBlockInputStream extends BlockInputStream { - - DummyBlockInputStream(BlockID blockId, - long blockLen, - Pipeline pipeline, - Token token, - boolean verifyChecksum, - XceiverClientManager xceiverClientManager) { - super(blockId, blockLen, pipeline, token, verifyChecksum, - xceiverClientManager); - } - - @Override - protected List getChunkInfos() { - return chunks; - } - - @Override - protected void addStream(ChunkInfo chunkInfo) { - TestChunkInputStream testChunkInputStream = new TestChunkInputStream(); - getChunkStreams().add(testChunkInputStream.new DummyChunkInputStream( - chunkInfo, null, null, false, - chunkDataMap.get(chunkInfo.getChunkName()).clone())); - } - - @Override - protected synchronized void checkOpen() throws IOException { - // No action needed - } - } - - private void seekAndVerify(int pos) throws Exception { - blockStream.seek(pos); - Assert.assertEquals("Current position of buffer does not match with the " + - "seeked position", pos, blockStream.getPos()); - } - - /** - * Match readData with the chunkData byte-wise. - * @param readData Data read through ChunkInputStream - * @param inputDataStartIndex first index (inclusive) in chunkData to compare - * with read data - * @param length the number of bytes of data to match starting from - * inputDataStartIndex - */ - private void matchWithInputData(byte[] readData, int inputDataStartIndex, - int length) { - for (int i = inputDataStartIndex; i < inputDataStartIndex + length; i++) { - Assert.assertEquals(blockData[i], readData[i - inputDataStartIndex]); - } - } - - @Test - public void testSeek() throws Exception { - // Seek to position 0 - int pos = 0; - seekAndVerify(pos); - Assert.assertEquals("ChunkIndex is incorrect", 0, - blockStream.getChunkIndex()); - - // Before BlockInputStream is initialized (initialization happens during - // read operation), seek should update the BlockInputStream#blockPosition - pos = CHUNK_SIZE; - seekAndVerify(pos); - Assert.assertEquals("ChunkIndex is incorrect", 0, - blockStream.getChunkIndex()); - Assert.assertEquals(pos, blockStream.getBlockPosition()); - - // Initialize the BlockInputStream. After initializtion, the chunkIndex - // should be updated to correspond to the seeked position. - blockStream.initialize(); - Assert.assertEquals("ChunkIndex is incorrect", 1, - blockStream.getChunkIndex()); - - pos = (CHUNK_SIZE * 4) + 5; - seekAndVerify(pos); - Assert.assertEquals("ChunkIndex is incorrect", 4, - blockStream.getChunkIndex()); - - try { - // Try seeking beyond the blockSize. - pos = blockSize + 10; - seekAndVerify(pos); - Assert.fail("Seek to position beyond block size should fail."); - } catch (EOFException e) { - System.out.println(e); - } - - // Seek to random positions between 0 and the block size. - Random random = new Random(); - for (int i = 0; i < 10; i++) { - pos = random.nextInt(blockSize); - seekAndVerify(pos); - } - } - - @Test - public void testRead() throws Exception { - // read 200 bytes of data starting from position 50. Chunk0 contains - // indices 0 to 99, chunk1 from 100 to 199 and chunk3 from 200 to 299. So - // the read should result in 3 ChunkInputStream reads - seekAndVerify(50); - byte[] b = new byte[200]; - blockStream.read(b, 0, 200); - matchWithInputData(b, 50, 200); - - // The new position of the blockInputStream should be the last index read - // + 1. - Assert.assertEquals(250, blockStream.getPos()); - Assert.assertEquals(2, blockStream.getChunkIndex()); - } - - @Test - public void testSeekAndRead() throws Exception { - // Seek to a position and read data - seekAndVerify(50); - byte[] b1 = new byte[100]; - blockStream.read(b1, 0, 100); - matchWithInputData(b1, 50, 100); - - // Next read should start from the position of the last read + 1 i.e. 100 - byte[] b2 = new byte[100]; - blockStream.read(b2, 0, 100); - matchWithInputData(b2, 150, 100); - } -} diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java deleted file mode 100644 index a5fe26b5619..00000000000 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.EOFException; -import java.util.ArrayList; -import java.util.List; -import java.util.Random; - -/** - * Tests for {@link ChunkInputStream}'s functionality. - */ -public class TestChunkInputStream { - - private static final int CHUNK_SIZE = 100; - private static final int BYTES_PER_CHECKSUM = 20; - private static final String CHUNK_NAME = "dummyChunk"; - private static final Random RANDOM = new Random(); - private static Checksum checksum; - - private DummyChunkInputStream chunkStream; - private ChunkInfo chunkInfo; - private byte[] chunkData; - - @Before - public void setup() throws Exception { - checksum = new Checksum(ChecksumType.valueOf( - OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT), - BYTES_PER_CHECKSUM); - - chunkData = generateRandomData(CHUNK_SIZE); - - chunkInfo = ChunkInfo.newBuilder() - .setChunkName(CHUNK_NAME) - .setOffset(0) - .setLen(CHUNK_SIZE) - .setChecksumData(checksum.computeChecksum( - chunkData, 0, CHUNK_SIZE).getProtoBufMessage()) - .build(); - - chunkStream = new DummyChunkInputStream(chunkInfo, null, null, true); - } - - static byte[] generateRandomData(int length) { - byte[] bytes = new byte[length]; - RANDOM.nextBytes(bytes); - return bytes; - } - - /** - * A dummy ChunkInputStream to mock read chunk calls to DN. - */ - public class DummyChunkInputStream extends ChunkInputStream { - - // Stores the read chunk data in each readChunk call - private List readByteBuffers = new ArrayList<>(); - - DummyChunkInputStream(ChunkInfo chunkInfo, - BlockID blockId, - XceiverClientSpi xceiverClient, - boolean verifyChecksum) { - super(chunkInfo, blockId, xceiverClient, verifyChecksum); - } - - public DummyChunkInputStream(ChunkInfo chunkInfo, - BlockID blockId, - XceiverClientSpi xceiverClient, - boolean verifyChecksum, - byte[] data) { - super(chunkInfo, blockId, xceiverClient, verifyChecksum); - chunkData = data; - } - - @Override - protected ByteString readChunk(ChunkInfo readChunkInfo) { - ByteString byteString = ByteString.copyFrom(chunkData, - (int) readChunkInfo.getOffset(), - (int) readChunkInfo.getLen()); - readByteBuffers.add(byteString); - return byteString; - } - - @Override - protected void checkOpen() { - // No action needed - } - } - - /** - * Match readData with the chunkData byte-wise. - * @param readData Data read through ChunkInputStream - * @param inputDataStartIndex first index (inclusive) in chunkData to compare - * with read data - * @param length the number of bytes of data to match starting from - * inputDataStartIndex - */ - private void matchWithInputData(byte[] readData, int inputDataStartIndex, - int length) { - for (int i = inputDataStartIndex; i < inputDataStartIndex + length; i++) { - Assert.assertEquals(chunkData[i], readData[i - inputDataStartIndex]); - } - } - - /** - * Seek to a position and verify through getPos(). - */ - private void seekAndVerify(int pos) throws Exception { - chunkStream.seek(pos); - Assert.assertEquals("Current position of buffer does not match with the " + - "seeked position", pos, chunkStream.getPos()); - } - - @Test - public void testFullChunkRead() throws Exception { - byte[] b = new byte[CHUNK_SIZE]; - chunkStream.read(b, 0, CHUNK_SIZE); - - matchWithInputData(b, 0, CHUNK_SIZE); - } - - @Test - public void testPartialChunkRead() throws Exception { - int len = CHUNK_SIZE / 2; - byte[] b = new byte[len]; - - chunkStream.read(b, 0, len); - - matchWithInputData(b, 0, len); - - // To read chunk data from index 0 to 49 (len = 50), we need to read - // chunk from offset 0 to 60 as the checksum boundary is at every 20 - // bytes. Verify that 60 bytes of chunk data are read and stored in the - // buffers. - matchWithInputData(chunkStream.readByteBuffers.get(0).toByteArray(), - 0, 60); - - } - - @Test - public void testSeek() throws Exception { - seekAndVerify(0); - - try { - seekAndVerify(CHUNK_SIZE); - Assert.fail("Seeking to Chunk Length should fail."); - } catch (EOFException e) { - GenericTestUtils.assertExceptionContains("EOF encountered at pos: " - + CHUNK_SIZE + " for chunk: " + CHUNK_NAME, e); - } - - // Seek before read should update the ChunkInputStream#chunkPosition - seekAndVerify(25); - Assert.assertEquals(25, chunkStream.getChunkPosition()); - - // Read from the seeked position. - // Reading from index 25 to 54 should result in the ChunkInputStream - // copying chunk data from index 20 to 59 into the buffers (checksum - // boundaries). - byte[] b = new byte[30]; - chunkStream.read(b, 0, 30); - matchWithInputData(b, 25, 30); - matchWithInputData(chunkStream.readByteBuffers.get(0).toByteArray(), - 20, 40); - - // After read, the position of the chunkStream is evaluated from the - // buffers and the chunkPosition should be reset to -1. - Assert.assertEquals(-1, chunkStream.getChunkPosition()); - - // Seek to a position within the current buffers. Current buffers contain - // data from index 20 to 59. ChunkPosition should still not be used to - // set the position. - seekAndVerify(35); - Assert.assertEquals(-1, chunkStream.getChunkPosition()); - - // Seek to a position outside the current buffers. In this case, the - // chunkPosition should be updated to the seeked position. - seekAndVerify(75); - Assert.assertEquals(75, chunkStream.getChunkPosition()); - } - - @Test - public void testSeekAndRead() throws Exception { - // Seek to a position and read data - seekAndVerify(50); - byte[] b1 = new byte[20]; - chunkStream.read(b1, 0, 20); - matchWithInputData(b1, 50, 20); - - // Next read should start from the position of the last read + 1 i.e. 70 - byte[] b2 = new byte[20]; - chunkStream.read(b2, 0, 20); - matchWithInputData(b2, 70, 20); - } -} \ No newline at end of file diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java deleted file mode 100644 index abdd04ea967..00000000000 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * This package contains Ozone InputStream related tests. - */ -package org.apache.hadoop.hdds.scm.storage; \ No newline at end of file diff --git a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml deleted file mode 100644 index 4441b69d868..00000000000 --- a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - - - - - - diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml deleted file mode 100644 index 9af807f8b9e..00000000000 --- a/hadoop-hdds/common/pom.xml +++ /dev/null @@ -1,285 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-hdds - 0.5.0-SNAPSHOT - - hadoop-hdds-common - 0.5.0-SNAPSHOT - Apache Hadoop Distributed Data Store Common - Apache Hadoop HDDS Common - jar - - - 0.5.0-SNAPSHOT - 2.11.0 - 3.4.2 - ${hdds.version} - - - - - org.apache.hadoop - hadoop-hdds-config - - - - javax.annotation - javax.annotation-api - 1.2 - - - - org.fusesource.leveldbjni - leveldbjni-all - - - - ratis-server - org.apache.ratis - - - org.slf4j - slf4j-log4j12 - - - io.dropwizard.metrics - metrics-core - - - org.bouncycastle - bcprov-jdk15on - - - - - ratis-netty - org.apache.ratis - - - ratis-grpc - org.apache.ratis - - - com.google.errorprone - error_prone_annotations - 2.2.0 - true - - - - org.rocksdb - rocksdbjni - 6.0.1 - - - org.apache.hadoop - hadoop-common - test - test-jar - - - - org.apache.logging.log4j - log4j-api - ${log4j2.version} - - - org.apache.logging.log4j - log4j-core - ${log4j2.version} - - - com.lmax - disruptor - ${disruptor.version} - - - org.apache.commons - commons-pool2 - 2.6.0 - - - org.bouncycastle - bcpkix-jdk15on - ${bouncycastle.version} - - - - commons-validator - commons-validator - 1.6 - - - org.junit.jupiter - junit-jupiter-api - - - io.jaegertracing - jaeger-client - ${jaeger.version} - - - io.opentracing - opentracing-util - 0.31.0 - - - org.yaml - snakeyaml - 1.16 - - - - - - - ${basedir}/src/main/resources - - hdds-version-info.properties - - false - - - ${basedir}/src/main/resources - - hdds-version-info.properties - - true - - - - - kr.motd.maven - os-maven-plugin - ${os-maven-plugin.version} - - - - - org.xolstice.maven.plugins - protobuf-maven-plugin - ${protobuf-maven-plugin.version} - true - - - com.google.protobuf:protoc:${protobuf-compile.version}:exe:${os.detected.classifier} - - ${basedir}/src/main/proto/ - - DatanodeContainerProtocol.proto - - target/generated-sources/java - false - - - - compile-protoc - - compile - test-compile - compile-custom - test-compile-custom - - - grpc-java - - io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} - - - - - - - maven-antrun-plugin - - - generate-sources - - - - - - - - - - run - - - - - - org.apache.hadoop - hadoop-maven-plugins - - - version-info - generate-resources - - version-info - - - - ${basedir}/../ - - */src/main/java/**/*.java - */src/main/proto/*.proto - - - - - - compile-protoc - - protoc - - - ${protobuf.version} - ${protoc.path} - - ${basedir}/src/main/proto - - - ${basedir}/src/main/proto - - StorageContainerLocationProtocol.proto - hdds.proto - ScmBlockLocationProtocol.proto - SCMSecurityProtocol.proto - - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - - - - diff --git a/hadoop-hdds/common/src/main/bin/hadoop-config.cmd b/hadoop-hdds/common/src/main/bin/hadoop-config.cmd deleted file mode 100644 index d77dc5346a1..00000000000 --- a/hadoop-hdds/common/src/main/bin/hadoop-config.cmd +++ /dev/null @@ -1,317 +0,0 @@ -@echo off -@rem Licensed to the Apache Software Foundation (ASF) under one or more -@rem contributor license agreements. See the NOTICE file distributed with -@rem this work for additional information regarding copyright ownership. -@rem The ASF licenses this file to You under the Apache License, Version 2.0 -@rem (the "License"); you may not use this file except in compliance with -@rem the License. You may obtain a copy of the License at -@rem -@rem http://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. - -@rem included in all the hadoop scripts with source command -@rem should not be executable directly -@rem also should not be passed any arguments, since we need original %* - -if not defined HADOOP_COMMON_DIR ( - set HADOOP_COMMON_DIR=share\hadoop\common -) -if not defined HADOOP_COMMON_LIB_JARS_DIR ( - set HADOOP_COMMON_LIB_JARS_DIR=share\hadoop\common\lib -) -if not defined HADOOP_COMMON_LIB_NATIVE_DIR ( - set HADOOP_COMMON_LIB_NATIVE_DIR=lib\native -) -if not defined HDFS_DIR ( - set HDFS_DIR=share\hadoop\hdfs -) -if not defined HDFS_LIB_JARS_DIR ( - set HDFS_LIB_JARS_DIR=share\hadoop\hdfs\lib -) -if not defined YARN_DIR ( - set YARN_DIR=share\hadoop\yarn -) -if not defined YARN_LIB_JARS_DIR ( - set YARN_LIB_JARS_DIR=share\hadoop\yarn\lib -) -if not defined MAPRED_DIR ( - set MAPRED_DIR=share\hadoop\mapreduce -) -if not defined MAPRED_LIB_JARS_DIR ( - set MAPRED_LIB_JARS_DIR=share\hadoop\mapreduce\lib -) - -@rem the root of the Hadoop installation -set HADOOP_HOME=%~dp0 -for %%i in (%HADOOP_HOME%.) do ( - set HADOOP_HOME=%%~dpi -) -if "%HADOOP_HOME:~-1%" == "\" ( - set HADOOP_HOME=%HADOOP_HOME:~0,-1% -) - -if not exist %HADOOP_HOME%\share\hadoop\common\hadoop-common-*.jar ( - @echo +================================================================+ - @echo ^| Error: HADOOP_HOME is not set correctly ^| - @echo +----------------------------------------------------------------+ - @echo ^| Please set your HADOOP_HOME variable to the absolute path of ^| - @echo ^| the directory that contains the hadoop distribution ^| - @echo +================================================================+ - exit /b 1 -) - -if not defined HADOOP_CONF_DIR ( - set HADOOP_CONF_DIR=%HADOOP_HOME%\etc\hadoop -) - -@rem -@rem Allow alternate conf dir location. -@rem - -if "%1" == "--config" ( - set HADOOP_CONF_DIR=%2 - shift - shift -) - -@rem -@rem check to see it is specified whether to use the workers or the -@rem masters file -@rem - -if "%1" == "--hosts" ( - set HADOOP_WORKERS=%HADOOP_CONF_DIR%\%2 - shift - shift -) - -@rem -@rem Set log level. Default to INFO. -@rem - -if "%1" == "--loglevel" ( - set HADOOP_LOGLEVEL=%2 - shift - shift -) - -if exist %HADOOP_CONF_DIR%\hadoop-env.cmd ( - call %HADOOP_CONF_DIR%\hadoop-env.cmd -) - -@rem -@rem setup java environment variables -@rem - -if not defined JAVA_HOME ( - echo Error: JAVA_HOME is not set. - goto :eof -) - -if not exist %JAVA_HOME%\bin\java.exe ( - echo Error: JAVA_HOME is incorrectly set. - echo Please update %HADOOP_CONF_DIR%\hadoop-env.cmd - goto :eof -) - -set JAVA=%JAVA_HOME%\bin\java -@rem some Java parameters -set JAVA_HEAP_MAX=-Xmx1000m - -@rem -@rem check envvars which might override default args -@rem - -if defined HADOOP_HEAPSIZE ( - set JAVA_HEAP_MAX=-Xmx%HADOOP_HEAPSIZE%m -) - -@rem -@rem CLASSPATH initially contains %HADOOP_CONF_DIR% -@rem - -set CLASSPATH=%HADOOP_CONF_DIR% - -if not defined HADOOP_COMMON_HOME ( - if exist %HADOOP_HOME%\share\hadoop\common ( - set HADOOP_COMMON_HOME=%HADOOP_HOME% - ) -) - -@rem -@rem for releases, add core hadoop jar & webapps to CLASSPATH -@rem - -if exist %HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%\webapps ( - set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR% -) - -if exist %HADOOP_COMMON_HOME%\%HADOOP_COMMON_LIB_JARS_DIR% ( - set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_LIB_JARS_DIR%\* -) - -set CLASSPATH=!CLASSPATH!;%HADOOP_COMMON_HOME%\%HADOOP_COMMON_DIR%\* - -@rem -@rem default log directory % file -@rem - -if not defined HADOOP_LOG_DIR ( - set HADOOP_LOG_DIR=%HADOOP_HOME%\logs -) - -if not defined HADOOP_LOGFILE ( - set HADOOP_LOGFILE=hadoop.log -) - -if not defined HADOOP_LOGLEVEL ( - set HADOOP_LOGLEVEL=INFO -) - -if not defined HADOOP_ROOT_LOGGER ( - set HADOOP_ROOT_LOGGER=%HADOOP_LOGLEVEL%,console -) - -@rem -@rem default policy file for service-level authorization -@rem - -if not defined HADOOP_POLICYFILE ( - set HADOOP_POLICYFILE=hadoop-policy.xml -) - -@rem -@rem Determine the JAVA_PLATFORM -@rem - -for /f "delims=" %%A in ('%JAVA% -Xmx32m %HADOOP_JAVA_PLATFORM_OPTS% -classpath "%CLASSPATH%" org.apache.hadoop.util.PlatformName') do set JAVA_PLATFORM=%%A -@rem replace space with underscore -set JAVA_PLATFORM=%JAVA_PLATFORM: =_% - -@rem -@rem setup 'java.library.path' for native hadoop code if necessary -@rem - -@rem Check if we're running hadoop directly from the build -if exist %HADOOP_COMMON_HOME%\target\bin ( - if defined JAVA_LIBRARY_PATH ( - set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;%HADOOP_COMMON_HOME%\target\bin - ) else ( - set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\target\bin - ) -) - -@rem For the distro case, check the bin folder -if exist %HADOOP_COMMON_HOME%\bin ( - if defined JAVA_LIBRARY_PATH ( - set JAVA_LIBRARY_PATH=%JAVA_LIBRARY_PATH%;%HADOOP_COMMON_HOME%\bin - ) else ( - set JAVA_LIBRARY_PATH=%HADOOP_COMMON_HOME%\bin - ) -) - -@rem -@rem setup a default TOOL_PATH -@rem -set TOOL_PATH=%HADOOP_HOME%\share\hadoop\tools\lib\* - -set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.log.dir=%HADOOP_LOG_DIR% -set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.log.file=%HADOOP_LOGFILE% -set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.home.dir=%HADOOP_HOME% -set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.id.str=%HADOOP_IDENT_STRING% -set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.root.logger=%HADOOP_ROOT_LOGGER% - -if defined JAVA_LIBRARY_PATH ( - set HADOOP_OPTS=%HADOOP_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH% -) -set HADOOP_OPTS=%HADOOP_OPTS% -Dhadoop.policy.file=%HADOOP_POLICYFILE% - -@rem -@rem Disable ipv6 as it can cause issues -@rem - -set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true - -@rem -@rem put hdfs in classpath if present -@rem - -if not defined HADOOP_HDFS_HOME ( - if exist %HADOOP_HOME%\%HDFS_DIR% ( - set HADOOP_HDFS_HOME=%HADOOP_HOME% - ) -) - -if exist %HADOOP_HDFS_HOME%\%HDFS_DIR%\webapps ( - set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_DIR% -) - -if exist %HADOOP_HDFS_HOME%\%HDFS_LIB_JARS_DIR% ( - set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_LIB_JARS_DIR%\* -) - -set CLASSPATH=!CLASSPATH!;%HADOOP_HDFS_HOME%\%HDFS_DIR%\* - -@rem -@rem put yarn in classpath if present -@rem - -if not defined HADOOP_YARN_HOME ( - if exist %HADOOP_HOME%\%YARN_DIR% ( - set HADOOP_YARN_HOME=%HADOOP_HOME% - ) -) - -if exist %HADOOP_YARN_HOME%\%YARN_DIR%\webapps ( - set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_DIR% -) - -if exist %HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR% ( - set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\* -) - -set CLASSPATH=!CLASSPATH!;%HADOOP_YARN_HOME%\%YARN_DIR%\* - -@rem -@rem put mapred in classpath if present AND different from YARN -@rem - -if not defined HADOOP_MAPRED_HOME ( - if exist %HADOOP_HOME%\%MAPRED_DIR% ( - set HADOOP_MAPRED_HOME=%HADOOP_HOME% - ) -) - -if not "%HADOOP_MAPRED_HOME%\%MAPRED_DIR%" == "%HADOOP_YARN_HOME%\%YARN_DIR%" ( - - if exist %HADOOP_MAPRED_HOME%\%MAPRED_DIR%\webapps ( - set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR% - ) - - if exist %HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR% ( - set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_LIB_JARS_DIR%\* - ) - - set CLASSPATH=!CLASSPATH!;%HADOOP_MAPRED_HOME%\%MAPRED_DIR%\* -) - -@rem -@rem add user-specified CLASSPATH last -@rem - -if defined HADOOP_CLASSPATH ( - if not defined HADOOP_USE_CLIENT_CLASSLOADER ( - if defined HADOOP_USER_CLASSPATH_FIRST ( - set CLASSPATH=%HADOOP_CLASSPATH%;%CLASSPATH%; - ) else ( - set CLASSPATH=%CLASSPATH%;%HADOOP_CLASSPATH%; - ) - ) -) - -:eof diff --git a/hadoop-hdds/common/src/main/bin/hadoop-config.sh b/hadoop-hdds/common/src/main/bin/hadoop-config.sh deleted file mode 100755 index 444b79a3629..00000000000 --- a/hadoop-hdds/common/src/main/bin/hadoop-config.sh +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#### -# IMPORTANT -#### - -## The hadoop-config.sh tends to get executed by non-Hadoop scripts. -## Those parts expect this script to parse/manipulate $@. In order -## to maintain backward compatibility, this means a surprising -## lack of functions for bits that would be much better off in -## a function. -## -## In other words, yes, there is some bad things happen here and -## unless we break the rest of the ecosystem, we can't change it. :( - - -# included in all the hadoop scripts with source command -# should not be executable directly -# also should not be passed any arguments, since we need original $* -# -# after doing more config, caller should also exec finalize -# function to finish last minute/default configs for -# settings that might be different between daemons & interactive - -# you must be this high to ride the ride -if [[ -z "${BASH_VERSINFO[0]}" ]] \ - || [[ "${BASH_VERSINFO[0]}" -lt 3 ]] \ - || [[ "${BASH_VERSINFO[0]}" -eq 3 && "${BASH_VERSINFO[1]}" -lt 2 ]]; then - echo "bash v3.2+ is required. Sorry." - exit 1 -fi - -# In order to get partially bootstrapped, we need to figure out where -# we are located. Chances are good that our caller has already done -# this work for us, but just in case... - -if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then - _hadoop_common_this="${BASH_SOURCE-$0}" - HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hadoop_common_this}")" >/dev/null && pwd -P) -fi - -# get our functions defined for usage later -if [[ -n "${HADOOP_COMMON_HOME}" ]] && - [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh" ]]; then - # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh - . "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh" -elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" ]]; then - # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh - . "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" -else - echo "ERROR: Unable to exec ${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh." 1>&2 - exit 1 -fi - -hadoop_deprecate_envvar HADOOP_PREFIX HADOOP_HOME - -# allow overrides of the above and pre-defines of the below -if [[ -n "${HADOOP_COMMON_HOME}" ]] && - [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh" ]]; then - # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example - . "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh" -elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh" ]]; then - # shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example - . "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh" -fi - -# -# IMPORTANT! We are not executing user provided code yet! -# - -# Let's go! Base definitions so we can move forward -hadoop_bootstrap - -# let's find our conf. -# -# first, check and process params passed to us -# we process this in-line so that we can directly modify $@ -# if something downstream is processing that directly, -# we need to make sure our params have been ripped out -# note that we do many of them here for various utilities. -# this provides consistency and forces a more consistent -# user experience - - -# save these off in case our caller needs them -# shellcheck disable=SC2034 -HADOOP_USER_PARAMS=("$@") - -hadoop_parse_args "$@" -shift "${HADOOP_PARSE_COUNTER}" - -# -# Setup the base-line environment -# -hadoop_find_confdir -hadoop_exec_hadoopenv -hadoop_import_shellprofiles -hadoop_exec_userfuncs - -# -# IMPORTANT! User provided code is now available! -# - -hadoop_exec_user_hadoopenv -hadoop_verify_confdir - -hadoop_deprecate_envvar HADOOP_SLAVES HADOOP_WORKERS -hadoop_deprecate_envvar HADOOP_SLAVE_NAMES HADOOP_WORKER_NAMES -hadoop_deprecate_envvar HADOOP_SLAVE_SLEEP HADOOP_WORKER_SLEEP - -# do all the OS-specific startup bits here -# this allows us to get a decent JAVA_HOME, -# call crle for LD_LIBRARY_PATH, etc. -hadoop_os_tricks - -hadoop_java_setup - -hadoop_basic_init - -# inject any sub-project overrides, defaults, etc. -if declare -F hadoop_subproject_init >/dev/null ; then - hadoop_subproject_init -fi - -hadoop_shellprofiles_init - -# get the native libs in there pretty quick -hadoop_add_javalibpath "${HADOOP_HOME}/build/native" -hadoop_add_javalibpath "${HADOOP_HOME}/${HADOOP_COMMON_LIB_NATIVE_DIR}" - -hadoop_shellprofiles_nativelib - -# get the basic java class path for these subprojects -# in as quickly as possible since other stuff -# will definitely depend upon it. - -hadoop_add_common_to_classpath -hadoop_shellprofiles_classpath - -# user API commands can now be run since the runtime -# environment has been configured -hadoop_exec_hadooprc - -# -# backwards compatibility. new stuff should -# call this when they are ready -# -if [[ -z "${HADOOP_NEW_CONFIG}" ]]; then - hadoop_finalize -fi diff --git a/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh b/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh deleted file mode 100755 index 55304916ad1..00000000000 --- a/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Run a Hadoop command on all slave hosts. - -function hadoop_usage -{ - echo "Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] (start|stop|status) " -} - -this="${BASH_SOURCE-$0}" -bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) - -# let's locate libexec... -if [[ -n "${HADOOP_HOME}" ]]; then - HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" -else - HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" -fi - -HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" -# shellcheck disable=SC2034 -HADOOP_NEW_CONFIG=true -if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then - . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" -else - echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1 - exit 1 -fi - -if [[ $# = 0 ]]; then - hadoop_exit_with_usage 1 -fi - -daemonmode=$1 -shift - -if [[ -z "${HADOOP_HDFS_HOME}" ]]; then - hdfsscript="${HADOOP_HOME}/bin/hdfs" -else - hdfsscript="${HADOOP_HDFS_HOME}/bin/hdfs" -fi - -hadoop_error "WARNING: Use of this script to ${daemonmode} HDFS daemons is deprecated." -hadoop_error "WARNING: Attempting to execute replacement \"hdfs --workers --daemon ${daemonmode}\" instead." - -# -# Original input was usually: -# hadoop-daemons.sh (shell options) (start|stop) (datanode|...) (daemon options) -# we're going to turn this into -# hdfs --workers --daemon (start|stop) (rest of options) -# -for (( i = 0; i < ${#HADOOP_USER_PARAMS[@]}; i++ )) -do - if [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^start$ ]] || - [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^stop$ ]] || - [[ "${HADOOP_USER_PARAMS[$i]}" =~ ^status$ ]]; then - unset HADOOP_USER_PARAMS[$i] - fi -done - -${hdfsscript} --workers --daemon "${daemonmode}" "${HADOOP_USER_PARAMS[@]}" diff --git a/hadoop-hdds/common/src/main/bin/hadoop-functions.sh b/hadoop-hdds/common/src/main/bin/hadoop-functions.sh deleted file mode 100755 index 484fe2302f9..00000000000 --- a/hadoop-hdds/common/src/main/bin/hadoop-functions.sh +++ /dev/null @@ -1,2732 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# we need to declare this globally as an array, which can only -# be done outside of a function -declare -a HADOOP_SUBCMD_USAGE -declare -a HADOOP_OPTION_USAGE -declare -a HADOOP_SUBCMD_USAGE_TYPES - -## @description Print a message to stderr -## @audience public -## @stability stable -## @replaceable no -## @param string -function hadoop_error -{ - echo "$*" 1>&2 -} - -## @description Print a message to stderr if --debug is turned on -## @audience public -## @stability stable -## @replaceable no -## @param string -function hadoop_debug -{ - if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then - echo "DEBUG: $*" 1>&2 - fi -} - -## @description Given a filename or dir, return the absolute version of it -## @description This works as an alternative to readlink, which isn't -## @description portable. -## @audience public -## @stability stable -## @param fsobj -## @replaceable no -## @return 0 success -## @return 1 failure -## @return stdout abspath -function hadoop_abs -{ - declare obj=$1 - declare dir - declare fn - declare dirret - - if [[ ! -e ${obj} ]]; then - return 1 - elif [[ -d ${obj} ]]; then - dir=${obj} - else - dir=$(dirname -- "${obj}") - fn=$(basename -- "${obj}") - fn="/${fn}" - fi - - dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P) - dirret=$? - if [[ ${dirret} = 0 ]]; then - echo "${dir}${fn}" - return 0 - fi - return 1 -} - -## @description Given variable $1 delete $2 from it -## @audience public -## @stability stable -## @replaceable no -function hadoop_delete_entry -{ - if [[ ${!1} =~ \ ${2}\ ]] ; then - hadoop_debug "Removing ${2} from ${1}" - eval "${1}"=\""${!1// ${2} }"\" - fi -} - -## @description Given variable $1 add $2 to it -## @audience public -## @stability stable -## @replaceable no -function hadoop_add_entry -{ - if [[ ! ${!1} =~ \ ${2}\ ]] ; then - hadoop_debug "Adding ${2} to ${1}" - #shellcheck disable=SC2140 - eval "${1}"=\""${!1} ${2} "\" - fi -} - -## @description Given variable $1 determine if $2 is in it -## @audience public -## @stability stable -## @replaceable no -## @return 0 = yes, 1 = no -function hadoop_verify_entry -{ - # this unfortunately can't really be tested by bats. :( - # so if this changes, be aware that unit tests effectively - # do this function in them - [[ ${!1} =~ \ ${2}\ ]] -} - -## @description Check if an array has a given value -## @audience public -## @stability stable -## @replaceable yes -## @param element -## @param array -## @returns 0 = yes -## @returns 1 = no -function hadoop_array_contains -{ - declare element=$1 - shift - declare val - - if [[ "$#" -eq 0 ]]; then - return 1 - fi - - for val in "${@}"; do - if [[ "${val}" == "${element}" ]]; then - return 0 - fi - done - return 1 -} - -## @description Add the `appendstring` if `checkstring` is not -## @description present in the given array -## @audience public -## @stability stable -## @replaceable yes -## @param envvar -## @param appendstring -function hadoop_add_array_param -{ - declare arrname=$1 - declare add=$2 - - declare arrref="${arrname}[@]" - declare array=("${!arrref}") - - if ! hadoop_array_contains "${add}" "${array[@]}"; then - #shellcheck disable=SC1083,SC2086 - eval ${arrname}=\(\"\${array[@]}\" \"${add}\" \) - hadoop_debug "$1 accepted $2" - else - hadoop_debug "$1 declined $2" - fi -} - -## @description Sort an array (must not contain regexps) -## @description present in the given array -## @audience public -## @stability stable -## @replaceable yes -## @param arrayvar -function hadoop_sort_array -{ - declare arrname=$1 - declare arrref="${arrname}[@]" - declare array=("${!arrref}") - declare oifs - - declare globstatus - declare -a sa - - globstatus=$(set -o | grep noglob | awk '{print $NF}') - - set -f - oifs=${IFS} - - # shellcheck disable=SC2034 - IFS=$'\n' sa=($(sort <<<"${array[*]}")) - - # shellcheck disable=SC1083 - eval "${arrname}"=\(\"\${sa[@]}\"\) - - IFS=${oifs} - if [[ "${globstatus}" = off ]]; then - set +f - fi -} - -## @description Check if we are running with priv -## @description by default, this implementation looks for -## @description EUID=0. For OSes that have true priv -## @description separation, this should be something more complex -## @audience private -## @stability evolving -## @replaceable yes -## @return 1 = no priv -## @return 0 = priv -function hadoop_privilege_check -{ - [[ "${EUID}" = 0 ]] -} - -## @description Execute a command via su when running as root -## @description if the given user is found or exit with -## @description failure if not. -## @description otherwise just run it. (This is intended to -## @description be used by the start-*/stop-* scripts.) -## @audience private -## @stability evolving -## @replaceable yes -## @param user -## @param commandstring -## @return exitstatus -function hadoop_su -{ - declare user=$1 - shift - - if hadoop_privilege_check; then - if hadoop_verify_user_resolves user; then - su -l "${user}" -- "$@" - else - hadoop_error "ERROR: Refusing to run as root: ${user} account is not found. Aborting." - return 1 - fi - else - "$@" - fi -} - -## @description Execute a command via su when running as root -## @description with extra support for commands that might -## @description legitimately start as root (e.g., datanode) -## @description (This is intended to -## @description be used by the start-*/stop-* scripts.) -## @audience private -## @stability evolving -## @replaceable no -## @param user -## @param commandstring -## @return exitstatus -function hadoop_uservar_su -{ - - ## startup matrix: - # - # if $EUID != 0, then exec - # if $EUID =0 then - # if hdfs_subcmd_user is defined, call hadoop_su to exec - # if hdfs_subcmd_user is not defined, error - # - # For secure daemons, this means both the secure and insecure env vars need to be - # defined. e.g., HDFS_DATANODE_USER=root HDFS_DATANODE_SECURE_USER=hdfs - # This function will pick up the "normal" var, switch to that user, then - # execute the command which will then pick up the "secure" version. - # - - declare program=$1 - declare command=$2 - shift 2 - - declare uprogram - declare ucommand - declare uvar - declare svar - - if hadoop_privilege_check; then - uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER) - - svar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_USER) - - if [[ -n "${!uvar}" ]]; then - hadoop_su "${!uvar}" "$@" - elif [[ -n "${!svar}" ]]; then - ## if we are here, then SECURE_USER with no USER defined - ## we are already privileged, so just run the command and hope - ## for the best - "$@" - else - hadoop_error "ERROR: Attempting to operate on ${program} ${command} as root" - hadoop_error "ERROR: but there is no ${uvar} defined. Aborting operation." - return 1 - fi - else - "$@" - fi -} - -## @description Add a subcommand to the usage output -## @audience private -## @stability evolving -## @replaceable no -## @param subcommand -## @param subcommandtype -## @param subcommanddesc -function hadoop_add_subcommand -{ - declare subcmd=$1 - declare subtype=$2 - declare text=$3 - - hadoop_debug "${subcmd} as a ${subtype}" - - hadoop_add_array_param HADOOP_SUBCMD_USAGE_TYPES "${subtype}" - - # done in this order so that sort works later - HADOOP_SUBCMD_USAGE[${HADOOP_SUBCMD_USAGE_COUNTER}]="${subcmd}@${subtype}@${text}" - ((HADOOP_SUBCMD_USAGE_COUNTER=HADOOP_SUBCMD_USAGE_COUNTER+1)) -} - -## @description Add an option to the usage output -## @audience private -## @stability evolving -## @replaceable no -## @param subcommand -## @param subcommanddesc -function hadoop_add_option -{ - local option=$1 - local text=$2 - - HADOOP_OPTION_USAGE[${HADOOP_OPTION_USAGE_COUNTER}]="${option}@${text}" - ((HADOOP_OPTION_USAGE_COUNTER=HADOOP_OPTION_USAGE_COUNTER+1)) -} - -## @description Reset the usage information to blank -## @audience private -## @stability evolving -## @replaceable no -function hadoop_reset_usage -{ - HADOOP_SUBCMD_USAGE=() - HADOOP_OPTION_USAGE=() - HADOOP_SUBCMD_USAGE_TYPES=() - HADOOP_SUBCMD_USAGE_COUNTER=0 - HADOOP_OPTION_USAGE_COUNTER=0 -} - -## @description Print a screen-size aware two-column output -## @description if reqtype is not null, only print those requested -## @audience private -## @stability evolving -## @replaceable no -## @param reqtype -## @param array -function hadoop_generic_columnprinter -{ - declare reqtype=$1 - shift - declare -a input=("$@") - declare -i i=0 - declare -i counter=0 - declare line - declare text - declare option - declare giventext - declare -i maxoptsize - declare -i foldsize - declare -a tmpa - declare numcols - declare brup - - if [[ -n "${COLUMNS}" ]]; then - numcols=${COLUMNS} - else - numcols=$(tput cols) 2>/dev/null - COLUMNS=${numcols} - fi - - if [[ -z "${numcols}" - || ! "${numcols}" =~ ^[0-9]+$ ]]; then - numcols=75 - else - ((numcols=numcols-5)) - fi - - while read -r line; do - tmpa[${counter}]=${line} - ((counter=counter+1)) - IFS='@' read -ra brup <<< "${line}" - option="${brup[0]}" - if [[ ${#option} -gt ${maxoptsize} ]]; then - maxoptsize=${#option} - fi - done < <(for text in "${input[@]}"; do - echo "${text}" - done | sort) - - i=0 - ((foldsize=numcols-maxoptsize)) - - until [[ $i -eq ${#tmpa[@]} ]]; do - IFS='@' read -ra brup <<< "${tmpa[$i]}" - - option="${brup[0]}" - cmdtype="${brup[1]}" - giventext="${brup[2]}" - - if [[ -n "${reqtype}" ]]; then - if [[ "${cmdtype}" != "${reqtype}" ]]; then - ((i=i+1)) - continue - fi - fi - - if [[ -z "${giventext}" ]]; then - giventext=${cmdtype} - fi - - while read -r line; do - printf "%-${maxoptsize}s %-s\n" "${option}" "${line}" - option=" " - done < <(echo "${giventext}"| fold -s -w ${foldsize}) - ((i=i+1)) - done -} - -## @description generate standard usage output -## @description and optionally takes a class -## @audience private -## @stability evolving -## @replaceable no -## @param execname -## @param true|false -## @param [text to use in place of SUBCOMMAND] -function hadoop_generate_usage -{ - declare cmd=$1 - declare takesclass=$2 - declare subcmdtext=${3:-"SUBCOMMAND"} - declare haveoptions - declare optstring - declare havesubs - declare subcmdstring - declare cmdtype - - cmd=${cmd##*/} - - if [[ -n "${HADOOP_OPTION_USAGE_COUNTER}" - && "${HADOOP_OPTION_USAGE_COUNTER}" -gt 0 ]]; then - haveoptions=true - optstring=" [OPTIONS]" - fi - - if [[ -n "${HADOOP_SUBCMD_USAGE_COUNTER}" - && "${HADOOP_SUBCMD_USAGE_COUNTER}" -gt 0 ]]; then - havesubs=true - subcmdstring=" ${subcmdtext} [${subcmdtext} OPTIONS]" - fi - - echo "Usage: ${cmd}${optstring}${subcmdstring}" - if [[ ${takesclass} = true ]]; then - echo " or ${cmd}${optstring} CLASSNAME [CLASSNAME OPTIONS]" - echo " where CLASSNAME is a user-provided Java class" - fi - - if [[ "${haveoptions}" = true ]]; then - echo "" - echo " OPTIONS is none or any of:" - echo "" - - hadoop_generic_columnprinter "" "${HADOOP_OPTION_USAGE[@]}" - fi - - if [[ "${havesubs}" = true ]]; then - echo "" - echo " ${subcmdtext} is one of:" - echo "" - - if [[ "${#HADOOP_SUBCMD_USAGE_TYPES[@]}" -gt 0 ]]; then - - hadoop_sort_array HADOOP_SUBCMD_USAGE_TYPES - for subtype in "${HADOOP_SUBCMD_USAGE_TYPES[@]}"; do - #shellcheck disable=SC2086 - cmdtype="$(tr '[:lower:]' '[:upper:]' <<< ${subtype:0:1})${subtype:1}" - printf "\n %s Commands:\n\n" "${cmdtype}" - hadoop_generic_columnprinter "${subtype}" "${HADOOP_SUBCMD_USAGE[@]}" - done - else - hadoop_generic_columnprinter "" "${HADOOP_SUBCMD_USAGE[@]}" - fi - echo "" - echo "${subcmdtext} may print help when invoked w/o parameters or with -h." - fi -} - -## @description Replace `oldvar` with `newvar` if `oldvar` exists. -## @audience public -## @stability stable -## @replaceable yes -## @param oldvar -## @param newvar -function hadoop_deprecate_envvar -{ - local oldvar=$1 - local newvar=$2 - local oldval=${!oldvar} - local newval=${!newvar} - - if [[ -n "${oldval}" ]]; then - hadoop_error "WARNING: ${oldvar} has been replaced by ${newvar}. Using value of ${oldvar}." - # shellcheck disable=SC2086 - eval ${newvar}=\"${oldval}\" - - # shellcheck disable=SC2086 - newval=${oldval} - - # shellcheck disable=SC2086 - eval ${newvar}=\"${newval}\" - fi -} - -## @description Declare `var` being used and print its value. -## @audience public -## @stability stable -## @replaceable yes -## @param var -function hadoop_using_envvar -{ - local var=$1 - local val=${!var} - - if [[ -n "${val}" ]]; then - hadoop_debug "${var} = ${val}" - fi -} - -## @description Create the directory 'dir'. -## @audience public -## @stability stable -## @replaceable yes -## @param dir -function hadoop_mkdir -{ - local dir=$1 - - if [[ ! -w "${dir}" ]] && [[ ! -d "${dir}" ]]; then - hadoop_error "WARNING: ${dir} does not exist. Creating." - if ! mkdir -p "${dir}"; then - hadoop_error "ERROR: Unable to create ${dir}. Aborting." - exit 1 - fi - fi -} - -## @description Bootstraps the Hadoop shell environment -## @audience private -## @stability evolving -## @replaceable no -function hadoop_bootstrap -{ - # the root of the Hadoop installation - # See HADOOP-6255 for the expected directory structure layout - - if [[ -n "${DEFAULT_LIBEXEC_DIR}" ]]; then - hadoop_error "WARNING: DEFAULT_LIBEXEC_DIR ignored. It has been replaced by HADOOP_DEFAULT_LIBEXEC_DIR." - fi - - # By now, HADOOP_LIBEXEC_DIR should have been defined upstream - # We can piggyback off of that to figure out where the default - # HADOOP_FREFIX should be. This allows us to run without - # HADOOP_HOME ever being defined by a human! As a consequence - # HADOOP_LIBEXEC_DIR now becomes perhaps the single most powerful - # env var within Hadoop. - if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then - hadoop_error "HADOOP_LIBEXEC_DIR is not defined. Exiting." - exit 1 - fi - HADOOP_DEFAULT_PREFIX=$(cd -P -- "${HADOOP_LIBEXEC_DIR}/.." >/dev/null && pwd -P) - HADOOP_HOME=${HADOOP_HOME:-$HADOOP_DEFAULT_PREFIX} - export HADOOP_HOME - - # - # short-cuts. vendors may redefine these as well, preferably - # in hadoop-layout.sh - # - HADOOP_COMMON_DIR=${HADOOP_COMMON_DIR:-"share/hadoop/common"} - HADOOP_COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR:-"share/hadoop/common/lib"} - HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_COMMON_LIB_NATIVE_DIR:-"lib/native"} - HDFS_DIR=${HDFS_DIR:-"share/hadoop/hdfs"} - HDFS_LIB_JARS_DIR=${HDFS_LIB_JARS_DIR:-"share/hadoop/hdfs/lib"} - YARN_DIR=${YARN_DIR:-"share/hadoop/yarn"} - YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"} - MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"} - MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"} - HDDS_DIR=${HDDS_DIR:-"share/hadoop/hdds"} - HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"} - OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"} - OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"} - OZONEFS_DIR=${OZONEFS_DIR:-"share/hadoop/ozonefs"} - - HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_HOME}} - HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"} - HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"} - - # by default, whatever we are about to run doesn't support - # daemonization - HADOOP_SUBCMD_SUPPORTDAEMONIZATION=false - - # by default, we have not been self-re-execed - HADOOP_REEXECED_CMD=false - - HADOOP_SUBCMD_SECURESERVICE=false - - # This is the default we claim in hadoop-env.sh - JSVC_HOME=${JSVC_HOME:-"/usr/bin"} - - # usage output set to zero - hadoop_reset_usage - - export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)} - - # defaults - export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"} - hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}" -} - -## @description Locate Hadoop's configuration directory -## @audience private -## @stability evolving -## @replaceable no -function hadoop_find_confdir -{ - local conf_dir - - # An attempt at compatibility with some Hadoop 1.x - # installs. - if [[ -e "${HADOOP_HOME}/conf/hadoop-env.sh" ]]; then - conf_dir="conf" - else - conf_dir="etc/hadoop" - fi - export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_HOME}/${conf_dir}}" - - hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}" -} - -## @description Validate ${HADOOP_CONF_DIR} -## @audience public -## @stability stable -## @replaceable yes -## @return will exit on failure conditions -function hadoop_verify_confdir -{ - # Check only log4j.properties by default. - # --loglevel does not work without logger settings in log4j.log4j.properties. - if [[ ! -f "${HADOOP_CONF_DIR}/log4j.properties" ]]; then - hadoop_error "WARNING: log4j.properties is not found. HADOOP_CONF_DIR may be incomplete." - fi -} - -## @description Import the hadoop-env.sh settings -## @audience private -## @stability evolving -## @replaceable no -function hadoop_exec_hadoopenv -{ - if [[ -z "${HADOOP_ENV_PROCESSED}" ]]; then - if [[ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]]; then - export HADOOP_ENV_PROCESSED=true - # shellcheck source=./hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh - . "${HADOOP_CONF_DIR}/hadoop-env.sh" - fi - fi -} - -## @description Import the replaced functions -## @audience private -## @stability evolving -## @replaceable no -function hadoop_exec_userfuncs -{ - if [[ -e "${HADOOP_CONF_DIR}/hadoop-user-functions.sh" ]]; then - # shellcheck disable=SC1090 - . "${HADOOP_CONF_DIR}/hadoop-user-functions.sh" - fi -} - -## @description Read the user's settings. This provides for users to -## @description override and/or append hadoop-env.sh. It is not meant -## @description as a complete system override. -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_exec_user_hadoopenv -{ - if [[ -f "${HOME}/.hadoop-env" ]]; then - hadoop_debug "Applying the user's .hadoop-env" - # shellcheck disable=SC1090 - . "${HOME}/.hadoop-env" - fi -} - -## @description Read the user's settings. This provides for users to -## @description run Hadoop Shell API after system bootstrap -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_exec_hadooprc -{ - if [[ -f "${HOME}/.hadooprc" ]]; then - hadoop_debug "Applying the user's .hadooprc" - # shellcheck disable=SC1090 - . "${HOME}/.hadooprc" - fi -} - -## @description Import shellprofile.d content -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_import_shellprofiles -{ - local i - local files1 - local files2 - - if [[ -d "${HADOOP_LIBEXEC_DIR}/shellprofile.d" ]]; then - files1=(${HADOOP_LIBEXEC_DIR}/shellprofile.d/*.sh) - hadoop_debug "shellprofiles: ${files1[*]}" - else - hadoop_error "WARNING: ${HADOOP_LIBEXEC_DIR}/shellprofile.d doesn't exist. Functionality may not work." - fi - - if [[ -d "${HADOOP_CONF_DIR}/shellprofile.d" ]]; then - files2=(${HADOOP_CONF_DIR}/shellprofile.d/*.sh) - fi - - # enable bundled shellprofiles that come - # from hadoop-tools. This converts the user-facing HADOOP_OPTIONAL_TOOLS - # to the HADOOP_TOOLS_OPTIONS that the shell profiles expect. - # See dist-tools-hooks-maker for how the example HADOOP_OPTIONAL_TOOLS - # gets populated into hadoop-env.sh - - for i in ${HADOOP_OPTIONAL_TOOLS//,/ }; do - hadoop_add_entry HADOOP_TOOLS_OPTIONS "${i}" - done - - for i in "${files1[@]}" "${files2[@]}" - do - if [[ -n "${i}" - && -f "${i}" ]]; then - hadoop_debug "Profiles: importing ${i}" - # shellcheck disable=SC1090 - . "${i}" - fi - done -} - -## @description Initialize the registered shell profiles -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_shellprofiles_init -{ - local i - - for i in ${HADOOP_SHELL_PROFILES} - do - if declare -F _${i}_hadoop_init >/dev/null ; then - hadoop_debug "Profiles: ${i} init" - # shellcheck disable=SC2086 - _${i}_hadoop_init - fi - done -} - -## @description Apply the shell profile classpath additions -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_shellprofiles_classpath -{ - local i - - for i in ${HADOOP_SHELL_PROFILES} - do - if declare -F _${i}_hadoop_classpath >/dev/null ; then - hadoop_debug "Profiles: ${i} classpath" - # shellcheck disable=SC2086 - _${i}_hadoop_classpath - fi - done -} - -## @description Apply the shell profile native library additions -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_shellprofiles_nativelib -{ - local i - - for i in ${HADOOP_SHELL_PROFILES} - do - if declare -F _${i}_hadoop_nativelib >/dev/null ; then - hadoop_debug "Profiles: ${i} nativelib" - # shellcheck disable=SC2086 - _${i}_hadoop_nativelib - fi - done -} - -## @description Apply the shell profile final configuration -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_shellprofiles_finalize -{ - local i - - for i in ${HADOOP_SHELL_PROFILES} - do - if declare -F _${i}_hadoop_finalize >/dev/null ; then - hadoop_debug "Profiles: ${i} finalize" - # shellcheck disable=SC2086 - _${i}_hadoop_finalize - fi - done -} - -## @description Initialize the Hadoop shell environment, now that -## @description user settings have been imported -## @audience private -## @stability evolving -## @replaceable no -function hadoop_basic_init -{ - # Some of these are also set in hadoop-env.sh. - # we still set them here just in case hadoop-env.sh is - # broken in some way, set up defaults, etc. - # - # but it is important to note that if you update these - # you also need to update hadoop-env.sh as well!!! - - CLASSPATH="" - hadoop_debug "Initialize CLASSPATH" - - if [[ -z "${HADOOP_COMMON_HOME}" ]] && - [[ -d "${HADOOP_HOME}/${HADOOP_COMMON_DIR}" ]]; then - export HADOOP_COMMON_HOME="${HADOOP_HOME}" - fi - - # default policy file for service-level authorization - HADOOP_POLICYFILE=${HADOOP_POLICYFILE:-"hadoop-policy.xml"} - - # define HADOOP_HDFS_HOME - if [[ -z "${HADOOP_HDFS_HOME}" ]] && - [[ -d "${HADOOP_HOME}/${HDFS_DIR}" ]]; then - export HADOOP_HDFS_HOME="${HADOOP_HOME}" - fi - - # define HADOOP_YARN_HOME - if [[ -z "${HADOOP_YARN_HOME}" ]] && - [[ -d "${HADOOP_HOME}/${YARN_DIR}" ]]; then - export HADOOP_YARN_HOME="${HADOOP_HOME}" - fi - - # define HADOOP_MAPRED_HOME - if [[ -z "${HADOOP_MAPRED_HOME}" ]] && - [[ -d "${HADOOP_HOME}/${MAPRED_DIR}" ]]; then - export HADOOP_MAPRED_HOME="${HADOOP_HOME}" - fi - - if [[ ! -d "${HADOOP_COMMON_HOME}" ]]; then - hadoop_error "ERROR: Invalid HADOOP_COMMON_HOME" - exit 1 - fi - - if [[ ! -d "${HADOOP_HDFS_HOME}" ]]; then - hadoop_error "ERROR: Invalid HADOOP_HDFS_HOME" - exit 1 - fi - - if [[ ! -d "${HADOOP_YARN_HOME}" ]]; then - hadoop_error "ERROR: Invalid HADOOP_YARN_HOME" - exit 1 - fi - - if [[ ! -d "${HADOOP_MAPRED_HOME}" ]]; then - hadoop_error "ERROR: Invalid HADOOP_MAPRED_HOME" - exit 1 - fi - - # if for some reason the shell doesn't have $USER defined - # (e.g., ssh'd in to execute a command) - # let's get the effective username and use that - USER=${USER:-$(id -nu)} - HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER} - HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_HOME}/logs"} - HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log} - HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO} - HADOOP_NICENESS=${HADOOP_NICENESS:-0} - HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5} - HADOOP_PID_DIR=${HADOOP_PID_DIR:-/tmp} - HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console} - HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_DAEMON_ROOT_LOGGER:-${HADOOP_LOGLEVEL},RFA} - HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender} - HADOOP_SSH_OPTS=${HADOOP_SSH_OPTS-"-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"} - HADOOP_SECURE_LOG_DIR=${HADOOP_SECURE_LOG_DIR:-${HADOOP_LOG_DIR}} - HADOOP_SECURE_PID_DIR=${HADOOP_SECURE_PID_DIR:-${HADOOP_PID_DIR}} - HADOOP_SSH_PARALLEL=${HADOOP_SSH_PARALLEL:-10} -} - -## @description Set the worker support information to the contents -## @description of `filename` -## @audience public -## @stability stable -## @replaceable no -## @param filename -## @return will exit if file does not exist -function hadoop_populate_workers_file -{ - local workersfile=$1 - shift - if [[ -f "${workersfile}" ]]; then - HADOOP_WORKERS="${workersfile}" - elif [[ -f "${HADOOP_CONF_DIR}/${workersfile}" ]]; then - HADOOP_WORKERS="${HADOOP_CONF_DIR}/${workersfile}" - else - hadoop_error "ERROR: Cannot find hosts file \"${workersfile}\"" - hadoop_exit_with_usage 1 - fi -} - -## @description Rotates the given `file` until `number` of -## @description files exist. -## @audience public -## @stability stable -## @replaceable no -## @param filename -## @param [number] -## @return $? will contain last mv's return value -function hadoop_rotate_log -{ - # - # Users are likely to replace this one for something - # that gzips or uses dates or who knows what. - # - # be aware that &1 and &2 might go through here - # so don't do anything too crazy... - # - local log=$1; - local num=${2:-5}; - - if [[ -f "${log}" ]]; then # rotate logs - while [[ ${num} -gt 1 ]]; do - #shellcheck disable=SC2086 - let prev=${num}-1 - if [[ -f "${log}.${prev}" ]]; then - mv "${log}.${prev}" "${log}.${num}" - fi - num=${prev} - done - mv "${log}" "${log}.${num}" - fi -} - -## @description Via ssh, log into `hostname` and run `command` -## @audience private -## @stability evolving -## @replaceable yes -## @param hostname -## @param command -## @param [...] -function hadoop_actual_ssh -{ - # we are passing this function to xargs - # should get hostname followed by rest of command line - local worker=$1 - shift - - # shellcheck disable=SC2086 - ssh ${HADOOP_SSH_OPTS} ${worker} $"${@// /\\ }" 2>&1 | sed "s/^/$worker: /" -} - -## @description Connect to ${HADOOP_WORKERS} or ${HADOOP_WORKER_NAMES} -## @description and execute command. -## @audience private -## @stability evolving -## @replaceable yes -## @param command -## @param [...] -function hadoop_connect_to_hosts -{ - # shellcheck disable=SC2124 - local params="$@" - local worker_file - local tmpslvnames - - # - # ssh (or whatever) to a host - # - # User can specify hostnames or a file where the hostnames are (not both) - if [[ -n "${HADOOP_WORKERS}" && -n "${HADOOP_WORKER_NAMES}" ]] ; then - hadoop_error "ERROR: Both HADOOP_WORKERS and HADOOP_WORKER_NAMES were defined. Aborting." - exit 1 - elif [[ -z "${HADOOP_WORKER_NAMES}" ]]; then - if [[ -n "${HADOOP_WORKERS}" ]]; then - worker_file=${HADOOP_WORKERS} - elif [[ -f "${HADOOP_CONF_DIR}/workers" ]]; then - worker_file=${HADOOP_CONF_DIR}/workers - elif [[ -f "${HADOOP_CONF_DIR}/slaves" ]]; then - hadoop_error "WARNING: 'slaves' file has been deprecated. Please use 'workers' file instead." - worker_file=${HADOOP_CONF_DIR}/slaves - fi - fi - - # if pdsh is available, let's use it. otherwise default - # to a loop around ssh. (ugh) - if [[ -e '/usr/bin/pdsh' ]]; then - if [[ -z "${HADOOP_WORKER_NAMES}" ]] ; then - # if we were given a file, just let pdsh deal with it. - # shellcheck disable=SC2086 - PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \ - -f "${HADOOP_SSH_PARALLEL}" -w ^"${worker_file}" $"${@// /\\ }" 2>&1 - else - # no spaces allowed in the pdsh arg host list - # shellcheck disable=SC2086 - tmpslvnames=$(echo ${HADOOP_WORKER_NAMES} | tr -s ' ' ,) - PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \ - -f "${HADOOP_SSH_PARALLEL}" \ - -w "${tmpslvnames}" $"${@// /\\ }" 2>&1 - fi - else - if [[ -z "${HADOOP_WORKER_NAMES}" ]]; then - HADOOP_WORKER_NAMES=$(sed 's/#.*$//;/^$/d' "${worker_file}") - fi - hadoop_connect_to_hosts_without_pdsh "${params}" - fi -} - -## @description Connect to ${HADOOP_WORKER_NAMES} and execute command -## @description under the environment which does not support pdsh. -## @audience private -## @stability evolving -## @replaceable yes -## @param command -## @param [...] -function hadoop_connect_to_hosts_without_pdsh -{ - # shellcheck disable=SC2124 - local params="$@" - local workers=(${HADOOP_WORKER_NAMES}) - for (( i = 0; i < ${#workers[@]}; i++ )) - do - if (( i != 0 && i % HADOOP_SSH_PARALLEL == 0 )); then - wait - fi - # shellcheck disable=SC2086 - hadoop_actual_ssh "${workers[$i]}" ${params} & - done - wait -} - -## @description Utility routine to handle --workers mode -## @audience private -## @stability evolving -## @replaceable yes -## @param commandarray -function hadoop_common_worker_mode_execute -{ - # - # input should be the command line as given by the user - # in the form of an array - # - local argv=("$@") - - # if --workers is still on the command line, remove it - # to prevent loops - # Also remove --hostnames and --hosts along with arg values - local argsSize=${#argv[@]}; - for (( i = 0; i < argsSize; i++ )) - do - if [[ "${argv[$i]}" =~ ^--workers$ ]]; then - unset argv[$i] - elif [[ "${argv[$i]}" =~ ^--hostnames$ ]] || - [[ "${argv[$i]}" =~ ^--hosts$ ]]; then - unset argv[$i]; - let i++; - unset argv[$i]; - fi - done - if [[ ${QATESTMODE} = true ]]; then - echo "${argv[@]}" - return - fi - hadoop_connect_to_hosts -- "${argv[@]}" -} - -## @description Verify that a shell command was passed a valid -## @description class name -## @audience public -## @stability stable -## @replaceable yes -## @param classname -## @return 0 = success -## @return 1 = failure w/user message -function hadoop_validate_classname -{ - local class=$1 - shift 1 - - if [[ ! ${class} =~ \. ]]; then - # assuming the arg is typo of command if it does not conatain ".". - # class belonging to no package is not allowed as a result. - hadoop_error "ERROR: ${class} is not COMMAND nor fully qualified CLASSNAME." - return 1 - fi - return 0 -} - -## @description Append the `appendstring` if `checkstring` is not -## @description present in the given `envvar` -## @audience public -## @stability stable -## @replaceable yes -## @param envvar -## @param checkstring -## @param appendstring -function hadoop_add_param -{ - # - # general param dedupe.. - # $1 is what we are adding to - # $2 is the name of what we want to add (key) - # $3 is the key+value of what we're adding - # - # doing it this way allows us to support all sorts of - # different syntaxes, just so long as they are space - # delimited - # - if [[ ! ${!1} =~ $2 ]] ; then - #shellcheck disable=SC2140 - eval "$1"="'${!1} $3'" - if [[ ${!1:0:1} = ' ' ]]; then - #shellcheck disable=SC2140 - eval "$1"="'${!1# }'" - fi - hadoop_debug "$1 accepted $3" - else - hadoop_debug "$1 declined $3" - fi -} - -## @description Register the given `shellprofile` to the Hadoop -## @description shell subsystem -## @audience public -## @stability stable -## @replaceable yes -## @param shellprofile -function hadoop_add_profile -{ - # shellcheck disable=SC2086 - hadoop_add_param HADOOP_SHELL_PROFILES $1 $1 -} - -## @description Add a file system object (directory, file, -## @description wildcard, ...) to the classpath. Optionally provide -## @description a hint as to where in the classpath it should go. -## @audience public -## @stability stable -## @replaceable yes -## @param object -## @param [before|after] -## @return 0 = success (added or duplicate) -## @return 1 = failure (doesn't exist or some other reason) -function hadoop_add_classpath -{ - # However, with classpath (& JLP), we can do dedupe - # along with some sanity checking (e.g., missing directories) - # since we have a better idea of what is legal - # - # for wildcard at end, we can - # at least check the dir exists - if [[ $1 =~ ^.*\*$ ]]; then - local mp - mp=$(dirname "$1") - if [[ ! -d "${mp}" ]]; then - hadoop_debug "Rejected CLASSPATH: $1 (not a dir)" - return 1 - fi - - # no wildcard in the middle, so check existence - # (doesn't matter *what* it is) - elif [[ ! $1 =~ ^.*\*.*$ ]] && [[ ! -e "$1" ]]; then - hadoop_debug "Rejected CLASSPATH: $1 (does not exist)" - return 1 - fi - if [[ -z "${CLASSPATH}" ]]; then - CLASSPATH=$1 - hadoop_debug "Initial CLASSPATH=$1" - elif [[ ":${CLASSPATH}:" != *":$1:"* ]]; then - if [[ "$2" = "before" ]]; then - CLASSPATH="$1:${CLASSPATH}" - hadoop_debug "Prepend CLASSPATH: $1" - else - CLASSPATH+=:$1 - hadoop_debug "Append CLASSPATH: $1" - fi - else - hadoop_debug "Dupe CLASSPATH: $1" - fi - return 0 -} - -## @description Add a file system object (directory, file, -## @description wildcard, ...) to the colonpath. Optionally provide -## @description a hint as to where in the colonpath it should go. -## @description Prior to adding, objects are checked for duplication -## @description and check for existence. Many other functions use -## @description this function as their base implementation -## @description including `hadoop_add_javalibpath` and `hadoop_add_ldlibpath`. -## @audience public -## @stability stable -## @replaceable yes -## @param envvar -## @param object -## @param [before|after] -## @return 0 = success (added or duplicate) -## @return 1 = failure (doesn't exist or some other reason) -function hadoop_add_colonpath -{ - # this is CLASSPATH, JLP, etc but with dedupe but no - # other checking - if [[ -d "${2}" ]] && [[ ":${!1}:" != *":$2:"* ]]; then - if [[ -z "${!1}" ]]; then - # shellcheck disable=SC2086 - eval $1="'$2'" - hadoop_debug "Initial colonpath($1): $2" - elif [[ "$3" = "before" ]]; then - # shellcheck disable=SC2086 - eval $1="'$2:${!1}'" - hadoop_debug "Prepend colonpath($1): $2" - else - # shellcheck disable=SC2086 - eval $1+=":'$2'" - hadoop_debug "Append colonpath($1): $2" - fi - return 0 - fi - hadoop_debug "Rejected colonpath($1): $2" - return 1 -} - -## @description Add a file system object (directory, file, -## @description wildcard, ...) to the Java JNI path. Optionally -## @description provide a hint as to where in the Java JNI path -## @description it should go. -## @audience public -## @stability stable -## @replaceable yes -## @param object -## @param [before|after] -## @return 0 = success (added or duplicate) -## @return 1 = failure (doesn't exist or some other reason) -function hadoop_add_javalibpath -{ - # specialized function for a common use case - hadoop_add_colonpath JAVA_LIBRARY_PATH "$1" "$2" -} - -## @description Add a file system object (directory, file, -## @description wildcard, ...) to the LD_LIBRARY_PATH. Optionally -## @description provide a hint as to where in the LD_LIBRARY_PATH -## @description it should go. -## @audience public -## @stability stable -## @replaceable yes -## @param object -## @param [before|after] -## @return 0 = success (added or duplicate) -## @return 1 = failure (doesn't exist or some other reason) -function hadoop_add_ldlibpath -{ - local status - # specialized function for a common use case - hadoop_add_colonpath LD_LIBRARY_PATH "$1" "$2" - status=$? - - # note that we export this - export LD_LIBRARY_PATH - return ${status} -} - -## @description Add the common/core Hadoop components to the -## @description environment -## @audience private -## @stability evolving -## @replaceable yes -## @returns 1 on failure, may exit -## @returns 0 on success -function hadoop_add_common_to_classpath -{ - # - # get all of the common jars+config in the path - # - - if [[ -z "${HADOOP_COMMON_HOME}" - || -z "${HADOOP_COMMON_DIR}" - || -z "${HADOOP_COMMON_LIB_JARS_DIR}" ]]; then - hadoop_debug "COMMON_HOME=${HADOOP_COMMON_HOME}" - hadoop_debug "COMMON_DIR=${HADOOP_COMMON_DIR}" - hadoop_debug "COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR}" - hadoop_error "ERROR: HADOOP_COMMON_HOME or related vars are not configured." - exit 1 - fi - - # developers - if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then - hadoop_add_classpath "${HADOOP_COMMON_HOME}/hadoop-common/target/classes" - fi - - hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"'/*' - hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*' -} - -## @description Run libexec/tools/module.sh to add to the classpath -## @description environment -## @audience private -## @stability evolving -## @replaceable yes -## @param module -function hadoop_add_to_classpath_tools -{ - declare module=$1 - - if [[ -f "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh" ]]; then - # shellcheck disable=SC1090 - . "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh" - else - hadoop_error "ERROR: Tools helper ${HADOOP_LIBEXEC_DIR}/tools/${module}.sh was not found." - fi - - if declare -f hadoop_classpath_tools_${module} >/dev/null 2>&1; then - "hadoop_classpath_tools_${module}" - fi -} - -## @description Add the user's custom classpath settings to the -## @description environment -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_add_to_classpath_userpath -{ - # Add the user-specified HADOOP_CLASSPATH to the - # official CLASSPATH env var if HADOOP_USE_CLIENT_CLASSLOADER - # is not set. - # Add it first or last depending on if user has - # set env-var HADOOP_USER_CLASSPATH_FIRST - # we'll also dedupe it, because we're cool like that. - # - declare -a array - declare -i c=0 - declare -i j - declare -i i - declare idx - - if [[ -n "${HADOOP_CLASSPATH}" ]]; then - # I wonder if Java runs on VMS. - for idx in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do - array[${c}]=${idx} - ((c=c+1)) - done - - # bats gets confused by j getting set to 0 - ((j=c-1)) || ${QATESTMODE} - - if [[ -z "${HADOOP_USE_CLIENT_CLASSLOADER}" ]]; then - if [[ -z "${HADOOP_USER_CLASSPATH_FIRST}" ]]; then - for ((i=0; i<=j; i++)); do - hadoop_add_classpath "${array[$i]}" after - done - else - for ((i=j; i>=0; i--)); do - hadoop_add_classpath "${array[$i]}" before - done - fi - fi - fi -} - -## @description Routine to configure any OS-specific settings. -## @audience public -## @stability stable -## @replaceable yes -## @return may exit on failure conditions -function hadoop_os_tricks -{ - local bindv6only - - HADOOP_IS_CYGWIN=false - case ${HADOOP_OS_TYPE} in - Darwin) - if [[ -z "${JAVA_HOME}" ]]; then - if [[ -x /usr/libexec/java_home ]]; then - JAVA_HOME="$(/usr/libexec/java_home)" - export JAVA_HOME - else - JAVA_HOME=/Library/Java/Home - export JAVA_HOME - fi - fi - ;; - Linux) - - # Newer versions of glibc use an arena memory allocator that - # causes virtual # memory usage to explode. This interacts badly - # with the many threads that we use in Hadoop. Tune the variable - # down to prevent vmem explosion. - export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4} - # we put this in QA test mode off so that non-Linux can test - if [[ "${QATESTMODE}" = true ]]; then - return - fi - - # NOTE! HADOOP_ALLOW_IPV6 is a developer hook. We leave it - # undocumented in hadoop-env.sh because we don't want users to - # shoot themselves in the foot while devs make IPv6 work. - - bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null) - - if [[ -n "${bindv6only}" ]] && - [[ "${bindv6only}" -eq "1" ]] && - [[ "${HADOOP_ALLOW_IPV6}" != "yes" ]]; then - hadoop_error "ERROR: \"net.ipv6.bindv6only\" is set to 1 " - hadoop_error "ERROR: Hadoop networking could be broken. Aborting." - hadoop_error "ERROR: For more info: http://wiki.apache.org/hadoop/HadoopIPv6" - exit 1 - fi - ;; - CYGWIN*) - # Flag that we're running on Cygwin to trigger path translation later. - HADOOP_IS_CYGWIN=true - ;; - esac -} - -## @description Configure/verify ${JAVA_HOME} -## @audience public -## @stability stable -## @replaceable yes -## @return may exit on failure conditions -function hadoop_java_setup -{ - # Bail if we did not detect it - if [[ -z "${JAVA_HOME}" ]]; then - hadoop_error "ERROR: JAVA_HOME is not set and could not be found." - exit 1 - fi - - if [[ ! -d "${JAVA_HOME}" ]]; then - hadoop_error "ERROR: JAVA_HOME ${JAVA_HOME} does not exist." - exit 1 - fi - - JAVA="${JAVA_HOME}/bin/java" - - if [[ ! -x "$JAVA" ]]; then - hadoop_error "ERROR: $JAVA is not executable." - exit 1 - fi -} - -## @description Finish Java JNI paths prior to execution -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_finalize_libpaths -{ - if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then - hadoop_translate_cygwin_path JAVA_LIBRARY_PATH - hadoop_add_param HADOOP_OPTS java.library.path \ - "-Djava.library.path=${JAVA_LIBRARY_PATH}" - export LD_LIBRARY_PATH - fi -} - -## @description Finish Java heap parameters prior to execution -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_finalize_hadoop_heap -{ - if [[ -n "${HADOOP_HEAPSIZE_MAX}" ]]; then - if [[ "${HADOOP_HEAPSIZE_MAX}" =~ ^[0-9]+$ ]]; then - HADOOP_HEAPSIZE_MAX="${HADOOP_HEAPSIZE_MAX}m" - fi - hadoop_add_param HADOOP_OPTS Xmx "-Xmx${HADOOP_HEAPSIZE_MAX}" - fi - - # backwards compatibility - if [[ -n "${HADOOP_HEAPSIZE}" ]]; then - if [[ "${HADOOP_HEAPSIZE}" =~ ^[0-9]+$ ]]; then - HADOOP_HEAPSIZE="${HADOOP_HEAPSIZE}m" - fi - hadoop_add_param HADOOP_OPTS Xmx "-Xmx${HADOOP_HEAPSIZE}" - fi - - if [[ -n "${HADOOP_HEAPSIZE_MIN}" ]]; then - if [[ "${HADOOP_HEAPSIZE_MIN}" =~ ^[0-9]+$ ]]; then - HADOOP_HEAPSIZE_MIN="${HADOOP_HEAPSIZE_MIN}m" - fi - hadoop_add_param HADOOP_OPTS Xms "-Xms${HADOOP_HEAPSIZE_MIN}" - fi -} - -## @description Converts the contents of the variable name -## @description `varnameref` into the equivalent Windows path. -## @description If the second parameter is true, then `varnameref` -## @description is treated as though it was a path list. -## @audience public -## @stability stable -## @replaceable yes -## @param varnameref -## @param [true] -function hadoop_translate_cygwin_path -{ - if [[ "${HADOOP_IS_CYGWIN}" = "true" ]]; then - if [[ "$2" = "true" ]]; then - #shellcheck disable=SC2016 - eval "$1"='$(cygpath -p -w "${!1}" 2>/dev/null)' - else - #shellcheck disable=SC2016 - eval "$1"='$(cygpath -w "${!1}" 2>/dev/null)' - fi - fi -} - -## @description Adds the HADOOP_CLIENT_OPTS variable to -## @description HADOOP_OPTS if HADOOP_SUBCMD_SUPPORTDAEMONIZATION is false -## @audience public -## @stability stable -## @replaceable yes -function hadoop_add_client_opts -{ - if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = false - || -z "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" ]]; then - hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS" - HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}" - fi -} - -## @description Finish configuring Hadoop specific system properties -## @description prior to executing Java -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_finalize_hadoop_opts -{ - hadoop_translate_cygwin_path HADOOP_LOG_DIR - hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR}" - hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE}" - hadoop_translate_cygwin_path HADOOP_HOME - export HADOOP_HOME - hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_HOME}" - hadoop_add_param HADOOP_OPTS hadoop.id.str "-Dhadoop.id.str=${HADOOP_IDENT_STRING}" - hadoop_add_param HADOOP_OPTS hadoop.root.logger "-Dhadoop.root.logger=${HADOOP_ROOT_LOGGER}" - hadoop_add_param HADOOP_OPTS hadoop.policy.file "-Dhadoop.policy.file=${HADOOP_POLICYFILE}" - hadoop_add_param HADOOP_OPTS hadoop.security.logger "-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER}" -} - -## @description Finish Java classpath prior to execution -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_finalize_classpath -{ - hadoop_add_classpath "${HADOOP_CONF_DIR}" before - - # user classpath gets added at the last minute. this allows - # override of CONF dirs and more - hadoop_add_to_classpath_userpath - hadoop_translate_cygwin_path CLASSPATH true -} - -## @description Finish all the remaining environment settings prior -## @description to executing Java. This is a wrapper that calls -## @description the other `finalize` routines. -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_finalize -{ - hadoop_shellprofiles_finalize - - hadoop_finalize_classpath - hadoop_finalize_libpaths - hadoop_finalize_hadoop_heap - hadoop_finalize_hadoop_opts - - hadoop_translate_cygwin_path HADOOP_HOME - hadoop_translate_cygwin_path HADOOP_CONF_DIR - hadoop_translate_cygwin_path HADOOP_COMMON_HOME - hadoop_translate_cygwin_path HADOOP_HDFS_HOME - hadoop_translate_cygwin_path HADOOP_YARN_HOME - hadoop_translate_cygwin_path HADOOP_MAPRED_HOME -} - -## @description Print usage information and exit with the passed -## @description `exitcode` -## @audience public -## @stability stable -## @replaceable no -## @param exitcode -## @return This function will always exit. -function hadoop_exit_with_usage -{ - local exitcode=$1 - if [[ -z $exitcode ]]; then - exitcode=1 - fi - # shellcheck disable=SC2034 - if declare -F hadoop_usage >/dev/null ; then - hadoop_usage - elif [[ -x /usr/bin/cowsay ]]; then - /usr/bin/cowsay -f elephant "Sorry, no help available." - else - hadoop_error "Sorry, no help available." - fi - exit $exitcode -} - -## @description Verify that prerequisites have been met prior to -## @description excuting a privileged program. -## @audience private -## @stability evolving -## @replaceable yes -## @return This routine may exit. -function hadoop_verify_secure_prereq -{ - # if you are on an OS like Illumos that has functional roles - # and you are using pfexec, you'll probably want to change - # this. - - if ! hadoop_privilege_check && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then - hadoop_error "ERROR: You must be a privileged user in order to run a secure service." - exit 1 - else - return 0 - fi -} - -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_setup_secure_service -{ - # need a more complicated setup? replace me! - - HADOOP_PID_DIR=${HADOOP_SECURE_PID_DIR} - HADOOP_LOG_DIR=${HADOOP_SECURE_LOG_DIR} -} - -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_verify_piddir -{ - if [[ -z "${HADOOP_PID_DIR}" ]]; then - hadoop_error "No pid directory defined." - exit 1 - fi - hadoop_mkdir "${HADOOP_PID_DIR}" - touch "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1 - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Unable to write in ${HADOOP_PID_DIR}. Aborting." - exit 1 - fi - rm "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1 -} - -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_verify_logdir -{ - if [[ -z "${HADOOP_LOG_DIR}" ]]; then - hadoop_error "No log directory defined." - exit 1 - fi - hadoop_mkdir "${HADOOP_LOG_DIR}" - touch "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1 - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Unable to write in ${HADOOP_LOG_DIR}. Aborting." - exit 1 - fi - rm "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1 -} - -## @description Determine the status of the daemon referenced -## @description by `pidfile` -## @audience public -## @stability stable -## @replaceable yes -## @param pidfile -## @return (mostly) LSB 4.1.0 compatible status -function hadoop_status_daemon -{ - # - # LSB 4.1.0 compatible status command (1) - # - # 0 = program is running - # 1 = dead, but still a pid (2) - # 2 = (not used by us) - # 3 = not running - # - # 1 - this is not an endorsement of the LSB - # - # 2 - technically, the specification says /var/run/pid, so - # we should never return this value, but we're giving - # them the benefit of a doubt and returning 1 even if - # our pid is not in in /var/run . - # - - local pidfile=$1 - shift - - local pid - local pspid - - if [[ -f "${pidfile}" ]]; then - pid=$(cat "${pidfile}") - if pspid=$(ps -o args= -p"${pid}" 2>/dev/null); then - # this is to check that the running process we found is actually the same - # daemon that we're interested in - if [[ ${pspid} =~ -Dproc_${daemonname} ]]; then - return 0 - fi - fi - return 1 - fi - return 3 -} - -## @description Execute the Java `class`, passing along any `options`. -## @description Additionally, set the Java property -Dproc_`command`. -## @audience public -## @stability stable -## @replaceable yes -## @param command -## @param class -## @param [options] -function hadoop_java_exec -{ - # run a java command. this is used for - # non-daemons - - local command=$1 - local class=$2 - shift 2 - - hadoop_debug "Final CLASSPATH: ${CLASSPATH}" - hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}" - hadoop_debug "Final JAVA_HOME: ${JAVA_HOME}" - hadoop_debug "java: ${JAVA}" - hadoop_debug "Class name: ${class}" - hadoop_debug "Command line options: $*" - - export CLASSPATH - #shellcheck disable=SC2086 - exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@" -} - -## @description Start a non-privileged daemon in the foreground. -## @audience private -## @stability evolving -## @replaceable yes -## @param command -## @param class -## @param pidfile -## @param [options] -function hadoop_start_daemon -{ - # this is our non-privileged daemon starter - # that fires up a daemon in the *foreground* - # so complex! so wow! much java! - local command=$1 - local class=$2 - local pidfile=$3 - shift 3 - - hadoop_debug "Final CLASSPATH: ${CLASSPATH}" - hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}" - hadoop_debug "Final JAVA_HOME: ${JAVA_HOME}" - hadoop_debug "java: ${JAVA}" - hadoop_debug "Class name: ${class}" - hadoop_debug "Command line options: $*" - - # this is for the non-daemon pid creation - #shellcheck disable=SC2086 - echo $$ > "${pidfile}" 2>/dev/null - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot write ${command} pid ${pidfile}." - fi - - export CLASSPATH - #shellcheck disable=SC2086 - exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@" -} - -## @description Start a non-privileged daemon in the background. -## @audience private -## @stability evolving -## @replaceable yes -## @param command -## @param class -## @param pidfile -## @param outfile -## @param [options] -function hadoop_start_daemon_wrapper -{ - local daemonname=$1 - local class=$2 - local pidfile=$3 - local outfile=$4 - shift 4 - - local counter - - hadoop_rotate_log "${outfile}" - - hadoop_start_daemon "${daemonname}" \ - "$class" \ - "${pidfile}" \ - "$@" >> "${outfile}" 2>&1 < /dev/null & - - # we need to avoid a race condition here - # so let's wait for the fork to finish - # before overriding with the daemonized pid - (( counter=0 )) - while [[ ! -f ${pidfile} && ${counter} -le 5 ]]; do - sleep 1 - (( counter++ )) - done - - # this is for daemon pid creation - #shellcheck disable=SC2086 - echo $! > "${pidfile}" 2>/dev/null - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot write ${daemonname} pid ${pidfile}." - fi - - # shellcheck disable=SC2086 - renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1 - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot set priority of ${daemonname} process $!" - fi - - # shellcheck disable=SC2086 - disown %+ >/dev/null 2>&1 - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!" - fi - sleep 1 - - # capture the ulimit output - ulimit -a >> "${outfile}" 2>&1 - - # shellcheck disable=SC2086 - if ! ps -p $! >/dev/null 2>&1; then - return 1 - fi - return 0 -} - -## @description Start a privileged daemon in the foreground. -## @audience private -## @stability evolving -## @replaceable yes -## @param command -## @param class -## @param daemonpidfile -## @param daemonoutfile -## @param daemonerrfile -## @param wrapperpidfile -## @param [options] -function hadoop_start_secure_daemon -{ - # this is used to launch a secure daemon in the *foreground* - # - local daemonname=$1 - local class=$2 - - # pid file to create for our daemon - local daemonpidfile=$3 - - # where to send stdout. jsvc has bad habits so this *may* be &1 - # which means you send it to stdout! - local daemonoutfile=$4 - - # where to send stderr. same thing, except &2 = stderr - local daemonerrfile=$5 - local privpidfile=$6 - shift 6 - - hadoop_rotate_log "${daemonoutfile}" - hadoop_rotate_log "${daemonerrfile}" - - # shellcheck disable=SC2153 - jsvc="${JSVC_HOME}/jsvc" - if [[ ! -f "${jsvc}" ]]; then - hadoop_error "JSVC_HOME is not set or set incorrectly. jsvc is required to run secure" - hadoop_error "or privileged daemons. Please download and install jsvc from " - hadoop_error "http://archive.apache.org/dist/commons/daemon/binaries/ " - hadoop_error "and set JSVC_HOME to the directory containing the jsvc binary." - exit 1 - fi - - # note that shellcheck will throw a - # bogus for-our-use-case 2086 here. - # it doesn't properly support multi-line situations - - hadoop_debug "Final CLASSPATH: ${CLASSPATH}" - hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}" - hadoop_debug "Final JSVC_HOME: ${JSVC_HOME}" - hadoop_debug "jsvc: ${jsvc}" - hadoop_debug "Final HADOOP_DAEMON_JSVC_EXTRA_OPTS: ${HADOOP_DAEMON_JSVC_EXTRA_OPTS}" - hadoop_debug "Class name: ${class}" - hadoop_debug "Command line options: $*" - - #shellcheck disable=SC2086 - echo $$ > "${privpidfile}" 2>/dev/null - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot write ${daemonname} pid ${privpidfile}." - fi - - # shellcheck disable=SC2086 - exec "${jsvc}" \ - "-Dproc_${daemonname}" \ - ${HADOOP_DAEMON_JSVC_EXTRA_OPTS} \ - -outfile "${daemonoutfile}" \ - -errfile "${daemonerrfile}" \ - -pidfile "${daemonpidfile}" \ - -nodetach \ - -user "${HADOOP_SECURE_USER}" \ - -cp "${CLASSPATH}" \ - ${HADOOP_OPTS} \ - "${class}" "$@" -} - -## @description Start a privileged daemon in the background. -## @audience private -## @stability evolving -## @replaceable yes -## @param command -## @param class -## @param daemonpidfile -## @param daemonoutfile -## @param wrapperpidfile -## @param warpperoutfile -## @param daemonerrfile -## @param [options] -function hadoop_start_secure_daemon_wrapper -{ - # this wraps hadoop_start_secure_daemon to take care - # of the dirty work to launch a daemon in the background! - local daemonname=$1 - local class=$2 - - # same rules as hadoop_start_secure_daemon except we - # have some additional parameters - - local daemonpidfile=$3 - - local daemonoutfile=$4 - - # the pid file of the subprocess that spawned our - # secure launcher - local jsvcpidfile=$5 - - # the output of the subprocess that spawned our secure - # launcher - local jsvcoutfile=$6 - - local daemonerrfile=$7 - shift 7 - - local counter - - hadoop_rotate_log "${jsvcoutfile}" - - hadoop_start_secure_daemon \ - "${daemonname}" \ - "${class}" \ - "${daemonpidfile}" \ - "${daemonoutfile}" \ - "${daemonerrfile}" \ - "${jsvcpidfile}" "$@" >> "${jsvcoutfile}" 2>&1 < /dev/null & - - # we need to avoid a race condition here - # so let's wait for the fork to finish - # before overriding with the daemonized pid - (( counter=0 )) - while [[ ! -f ${daemonpidfile} && ${counter} -le 5 ]]; do - sleep 1 - (( counter++ )) - done - - #shellcheck disable=SC2086 - if ! echo $! > "${jsvcpidfile}"; then - hadoop_error "ERROR: Cannot write ${daemonname} pid ${jsvcpidfile}." - fi - - sleep 1 - #shellcheck disable=SC2086 - renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1 - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot set priority of ${daemonname} process $!" - fi - if [[ -f "${daemonpidfile}" ]]; then - #shellcheck disable=SC2046 - renice "${HADOOP_NICENESS}" $(cat "${daemonpidfile}" 2>/dev/null) >/dev/null 2>&1 - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot set priority of ${daemonname} process $(cat "${daemonpidfile}" 2>/dev/null)" - fi - fi - #shellcheck disable=SC2046 - disown %+ >/dev/null 2>&1 - if [[ $? -gt 0 ]]; then - hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!" - fi - # capture the ulimit output - su "${HADOOP_SECURE_USER}" -c 'bash -c "ulimit -a"' >> "${jsvcoutfile}" 2>&1 - #shellcheck disable=SC2086 - if ! ps -p $! >/dev/null 2>&1; then - return 1 - fi - return 0 -} - -## @description Wait till process dies or till timeout -## @audience private -## @stability evolving -## @param pid -## @param timeout -function wait_process_to_die_or_timeout -{ - local pid=$1 - local timeout=$2 - - # Normalize timeout - # Round up or down - timeout=$(printf "%.0f\n" "${timeout}") - if [[ ${timeout} -lt 1 ]]; then - # minimum 1 second - timeout=1 - fi - - # Wait to see if it's still alive - for (( i=0; i < "${timeout}"; i++ )) - do - if kill -0 "${pid}" > /dev/null 2>&1; then - sleep 1 - else - break - fi - done -} - -## @description Stop the non-privileged `command` daemon with that -## @description that is running at `pidfile`. -## @audience public -## @stability stable -## @replaceable yes -## @param command -## @param pidfile -function hadoop_stop_daemon -{ - local cmd=$1 - local pidfile=$2 - shift 2 - - local pid - local cur_pid - - if [[ -f "${pidfile}" ]]; then - pid=$(cat "$pidfile") - - kill "${pid}" >/dev/null 2>&1 - - wait_process_to_die_or_timeout "${pid}" "${HADOOP_STOP_TIMEOUT}" - - if kill -0 "${pid}" > /dev/null 2>&1; then - hadoop_error "WARNING: ${cmd} did not stop gracefully after ${HADOOP_STOP_TIMEOUT} seconds: Trying to kill with kill -9" - kill -9 "${pid}" >/dev/null 2>&1 - fi - wait_process_to_die_or_timeout "${pid}" "${HADOOP_STOP_TIMEOUT}" - if ps -p "${pid}" > /dev/null 2>&1; then - hadoop_error "ERROR: Unable to kill ${pid}" - else - cur_pid=$(cat "$pidfile") - if [[ "${pid}" = "${cur_pid}" ]]; then - rm -f "${pidfile}" >/dev/null 2>&1 - else - hadoop_error "WARNING: pid has changed for ${cmd}, skip deleting pid file" - fi - fi - fi -} - -## @description Stop the privileged `command` daemon with that -## @description that is running at `daemonpidfile` and launched with -## @description the wrapper at `wrapperpidfile`. -## @audience public -## @stability stable -## @replaceable yes -## @param command -## @param daemonpidfile -## @param wrapperpidfile -function hadoop_stop_secure_daemon -{ - local command=$1 - local daemonpidfile=$2 - local privpidfile=$3 - shift 3 - local ret - - local daemon_pid - local priv_pid - local cur_daemon_pid - local cur_priv_pid - - daemon_pid=$(cat "$daemonpidfile") - priv_pid=$(cat "$privpidfile") - - hadoop_stop_daemon "${command}" "${daemonpidfile}" - ret=$? - - cur_daemon_pid=$(cat "$daemonpidfile") - cur_priv_pid=$(cat "$privpidfile") - - if [[ "${daemon_pid}" = "${cur_daemon_pid}" ]]; then - rm -f "${daemonpidfile}" >/dev/null 2>&1 - else - hadoop_error "WARNING: daemon pid has changed for ${command}, skip deleting daemon pid file" - fi - - if [[ "${priv_pid}" = "${cur_priv_pid}" ]]; then - rm -f "${privpidfile}" >/dev/null 2>&1 - else - hadoop_error "WARNING: priv pid has changed for ${command}, skip deleting priv pid file" - fi - return ${ret} -} - -## @description Manage a non-privileged daemon. -## @audience private -## @stability evolving -## @replaceable yes -## @param [start|stop|status|default] -## @param command -## @param class -## @param daemonpidfile -## @param daemonoutfile -## @param [options] -function hadoop_daemon_handler -{ - local daemonmode=$1 - local daemonname=$2 - local class=$3 - local daemon_pidfile=$4 - local daemon_outfile=$5 - shift 5 - - case ${daemonmode} in - status) - hadoop_status_daemon "${daemon_pidfile}" - exit $? - ;; - - stop) - hadoop_stop_daemon "${daemonname}" "${daemon_pidfile}" - exit $? - ;; - - ##COMPAT -- older hadoops would also start daemons by default - start|default) - hadoop_verify_piddir - hadoop_verify_logdir - hadoop_status_daemon "${daemon_pidfile}" - if [[ $? == 0 ]]; then - hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}"). Stop it first." - exit 1 - else - # stale pid file, so just remove it and continue on - rm -f "${daemon_pidfile}" >/dev/null 2>&1 - fi - ##COMPAT - differenticate between --daemon start and nothing - # "nothing" shouldn't detach - if [[ "$daemonmode" = "default" ]]; then - hadoop_start_daemon "${daemonname}" "${class}" "${daemon_pidfile}" "$@" - else - hadoop_start_daemon_wrapper "${daemonname}" \ - "${class}" "${daemon_pidfile}" "${daemon_outfile}" "$@" - fi - ;; - esac -} - -## @description Manage a privileged daemon. -## @audience private -## @stability evolving -## @replaceable yes -## @param [start|stop|status|default] -## @param command -## @param class -## @param daemonpidfile -## @param daemonoutfile -## @param wrapperpidfile -## @param wrapperoutfile -## @param wrappererrfile -## @param [options] -function hadoop_secure_daemon_handler -{ - local daemonmode=$1 - local daemonname=$2 - local classname=$3 - local daemon_pidfile=$4 - local daemon_outfile=$5 - local priv_pidfile=$6 - local priv_outfile=$7 - local priv_errfile=$8 - shift 8 - - case ${daemonmode} in - status) - hadoop_status_daemon "${daemon_pidfile}" - exit $? - ;; - - stop) - hadoop_stop_secure_daemon "${daemonname}" \ - "${daemon_pidfile}" "${priv_pidfile}" - exit $? - ;; - - ##COMPAT -- older hadoops would also start daemons by default - start|default) - hadoop_verify_piddir - hadoop_verify_logdir - hadoop_status_daemon "${daemon_pidfile}" - if [[ $? == 0 ]]; then - hadoop_error "${daemonname} is running as process $(cat "${daemon_pidfile}"). Stop it first." - exit 1 - else - # stale pid file, so just remove it and continue on - rm -f "${daemon_pidfile}" >/dev/null 2>&1 - fi - - ##COMPAT - differenticate between --daemon start and nothing - # "nothing" shouldn't detach - if [[ "${daemonmode}" = "default" ]]; then - hadoop_start_secure_daemon "${daemonname}" "${classname}" \ - "${daemon_pidfile}" "${daemon_outfile}" \ - "${priv_errfile}" "${priv_pidfile}" "$@" - else - hadoop_start_secure_daemon_wrapper "${daemonname}" "${classname}" \ - "${daemon_pidfile}" "${daemon_outfile}" \ - "${priv_pidfile}" "${priv_outfile}" "${priv_errfile}" "$@" - fi - ;; - esac -} - -## @description autodetect whether this is a priv subcmd -## @description by whether or not a priv user var exists -## @description and if HADOOP_SECURE_CLASSNAME is defined -## @audience public -## @stability stable -## @replaceable yes -## @param command -## @param subcommand -## @return 1 = not priv -## @return 0 = priv -function hadoop_detect_priv_subcmd -{ - declare program=$1 - declare command=$2 - - if [[ -z "${HADOOP_SECURE_CLASSNAME}" ]]; then - hadoop_debug "No secure classname defined." - return 1 - fi - - uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_USER) - if [[ -z "${!uvar}" ]]; then - hadoop_debug "No secure user defined." - return 1 - fi - return 0 -} - -## @description Build custom subcommand var -## @audience public -## @stability stable -## @replaceable yes -## @param command -## @param subcommand -## @param customid -## @return string -function hadoop_build_custom_subcmd_var -{ - declare program=$1 - declare command=$2 - declare custom=$3 - declare uprogram - declare ucommand - - if [[ -z "${BASH_VERSINFO[0]}" ]] \ - || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then - uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]') - ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]') - else - uprogram=${program^^} - ucommand=${command^^} - fi - - echo "${uprogram}_${ucommand}_${custom}" -} - -## @description Verify that username in a var converts to user id -## @audience public -## @stability stable -## @replaceable yes -## @param userstring -## @return 0 for success -## @return 1 for failure -function hadoop_verify_user_resolves -{ - declare userstr=$1 - - if [[ -z ${userstr} || -z ${!userstr} ]] ; then - return 1 - fi - - id -u "${!userstr}" >/dev/null 2>&1 -} - -## @description Verify that ${USER} is allowed to execute the -## @description given subcommand. -## @audience public -## @stability stable -## @replaceable yes -## @param command -## @param subcommand -## @return return 0 on success -## @return exit 1 on failure -function hadoop_verify_user_perm -{ - declare program=$1 - declare command=$2 - declare uvar - - if [[ ${command} =~ \. ]]; then - return 1 - fi - - uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER) - - if [[ -n ${!uvar} ]]; then - if [[ ${!uvar} != "${USER}" ]]; then - hadoop_error "ERROR: ${command} can only be executed by ${!uvar}." - exit 1 - fi - fi - return 0 -} - -## @description Verify that ${USER} is allowed to execute the -## @description given subcommand. -## @audience public -## @stability stable -## @replaceable yes -## @param subcommand -## @return 1 on no re-exec needed -## @return 0 on need to re-exec -function hadoop_need_reexec -{ - declare program=$1 - declare command=$2 - declare uvar - - # we've already been re-execed, bail - - if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then - return 1 - fi - - if [[ ${command} =~ \. ]]; then - return 1 - fi - - # if we have privilege, and the _USER is defined, and _USER is - # set to someone who isn't us, then yes, we should re-exec. - # otherwise no, don't re-exec and let the system deal with it. - - if hadoop_privilege_check; then - uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER) - if [[ -n ${!uvar} ]]; then - if [[ ${!uvar} != "${USER}" ]]; then - return 0 - fi - fi - fi - return 1 -} - -## @description Add custom (program)_(command)_OPTS to HADOOP_OPTS. -## @description Also handles the deprecated cases from pre-3.x. -## @audience public -## @stability evolving -## @replaceable yes -## @param program -## @param subcommand -## @return will exit on failure conditions -function hadoop_subcommand_opts -{ - declare program=$1 - declare command=$2 - declare uvar - declare depvar - declare uprogram - declare ucommand - - if [[ -z "${program}" || -z "${command}" ]]; then - return 1 - fi - - if [[ ${command} =~ \. ]]; then - return 1 - fi - - # bash 4 and up have built-in ways to upper and lower - # case the contents of vars. This is faster than - # calling tr. - - ## We don't call hadoop_build_custom_subcmd_var here - ## since we need to construct this for the deprecation - ## cases. For Hadoop 4.x, this needs to get cleaned up. - - if [[ -z "${BASH_VERSINFO[0]}" ]] \ - || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then - uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]') - ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]') - else - uprogram=${program^^} - ucommand=${command^^} - fi - - uvar="${uprogram}_${ucommand}_OPTS" - - # Let's handle all of the deprecation cases early - # HADOOP_NAMENODE_OPTS -> HDFS_NAMENODE_OPTS - - depvar="HADOOP_${ucommand}_OPTS" - - if [[ "${depvar}" != "${uvar}" ]]; then - if [[ -n "${!depvar}" ]]; then - hadoop_deprecate_envvar "${depvar}" "${uvar}" - fi - fi - - if [[ -n ${!uvar} ]]; then - hadoop_debug "Appending ${uvar} onto HADOOP_OPTS" - HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}" - return 0 - fi -} - -## @description Add custom (program)_(command)_SECURE_EXTRA_OPTS to HADOOP_OPTS. -## @description This *does not* handle the pre-3.x deprecated cases -## @audience public -## @stability stable -## @replaceable yes -## @param program -## @param subcommand -## @return will exit on failure conditions -function hadoop_subcommand_secure_opts -{ - declare program=$1 - declare command=$2 - declare uvar - declare uprogram - declare ucommand - - if [[ -z "${program}" || -z "${command}" ]]; then - return 1 - fi - - # HDFS_DATANODE_SECURE_EXTRA_OPTS - # HDFS_NFS3_SECURE_EXTRA_OPTS - # ... - uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_EXTRA_OPTS) - - if [[ -n ${!uvar} ]]; then - hadoop_debug "Appending ${uvar} onto HADOOP_OPTS" - HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}" - return 0 - fi -} - -## @description Perform the 'hadoop classpath', etc subcommand with the given -## @description parameters -## @audience private -## @stability evolving -## @replaceable yes -## @param [parameters] -## @return will print & exit with no params -function hadoop_do_classpath_subcommand -{ - if [[ "$#" -gt 1 ]]; then - eval "$1"=org.apache.hadoop.util.Classpath - else - hadoop_finalize - echo "${CLASSPATH}" - exit 0 - fi -} - -## @description generic shell script option parser. sets -## @description HADOOP_PARSE_COUNTER to set number the -## @description caller should shift -## @audience private -## @stability evolving -## @replaceable yes -## @param [parameters, typically "$@"] -function hadoop_parse_args -{ - HADOOP_DAEMON_MODE="default" - HADOOP_PARSE_COUNTER=0 - - # not all of the options supported here are supported by all commands - # however these are: - hadoop_add_option "--config dir" "Hadoop config directory" - hadoop_add_option "--debug" "turn on shell script debug mode" - hadoop_add_option "--help" "usage information" - - while true; do - hadoop_debug "hadoop_parse_args: processing $1" - case $1 in - --buildpaths) - HADOOP_ENABLE_BUILD_PATHS=true - shift - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1)) - ;; - --config) - shift - confdir=$1 - shift - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2)) - if [[ -d "${confdir}" ]]; then - HADOOP_CONF_DIR="${confdir}" - elif [[ -z "${confdir}" ]]; then - hadoop_error "ERROR: No parameter provided for --config " - hadoop_exit_with_usage 1 - else - hadoop_error "ERROR: Cannot find configuration directory \"${confdir}\"" - hadoop_exit_with_usage 1 - fi - ;; - --daemon) - shift - HADOOP_DAEMON_MODE=$1 - shift - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2)) - if [[ -z "${HADOOP_DAEMON_MODE}" || \ - ! "${HADOOP_DAEMON_MODE}" =~ ^st(art|op|atus)$ ]]; then - hadoop_error "ERROR: --daemon must be followed by either \"start\", \"stop\", or \"status\"." - hadoop_exit_with_usage 1 - fi - ;; - --debug) - shift - HADOOP_SHELL_SCRIPT_DEBUG=true - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1)) - ;; - --help|-help|-h|help|--h|--\?|-\?|\?) - hadoop_exit_with_usage 0 - ;; - --hostnames) - shift - HADOOP_WORKER_NAMES="$1" - shift - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2)) - ;; - --hosts) - shift - hadoop_populate_workers_file "$1" - shift - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2)) - ;; - --loglevel) - shift - # shellcheck disable=SC2034 - HADOOP_LOGLEVEL="$1" - shift - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2)) - ;; - --reexec) - shift - if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then - hadoop_error "ERROR: re-exec fork bomb prevention: --reexec already called" - exit 1 - fi - HADOOP_REEXECED_CMD=true - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1)) - ;; - --workers) - shift - # shellcheck disable=SC2034 - HADOOP_WORKER_MODE=true - ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1)) - ;; - *) - break - ;; - esac - done - - hadoop_debug "hadoop_parse: asking caller to skip ${HADOOP_PARSE_COUNTER}" -} - -## @description Handle subcommands from main program entries -## @audience private -## @stability evolving -## @replaceable yes -function hadoop_generic_java_subcmd_handler -{ - declare priv_outfile - declare priv_errfile - declare priv_pidfile - declare daemon_outfile - declare daemon_pidfile - declare secureuser - - # The default/expected way to determine if a daemon is going to run in secure - # mode is defined by hadoop_detect_priv_subcmd. If this returns true - # then setup the secure user var and tell the world we're in secure mode - - if hadoop_detect_priv_subcmd "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"; then - HADOOP_SUBCMD_SECURESERVICE=true - secureuser=$(hadoop_build_custom_subcmd_var "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" SECURE_USER) - - if ! hadoop_verify_user_resolves "${secureuser}"; then - hadoop_error "ERROR: User defined in ${secureuser} (${!secureuser}) does not exist. Aborting." - exit 1 - fi - - HADOOP_SECURE_USER="${!secureuser}" - fi - - # check if we're running in secure mode. - # breaking this up from the above lets 3rd parties - # do things a bit different - # secure services require some extra setup - # if yes, then we need to define all of the priv and daemon stuff - # if not, then we just need to define daemon stuff. - # note the daemon vars are purposefully different between the two - - if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then - - hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" - - hadoop_verify_secure_prereq - hadoop_setup_secure_service - priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out" - priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.err" - priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid" - daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out" - daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid" - else - daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out" - daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid" - fi - - # are we actually in daemon mode? - # if yes, use the daemon logger and the appropriate log file. - if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then - HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}" - if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then - HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log" - else - HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log" - fi - fi - - # finish defining the environment: system properties, env vars, class paths, etc. - hadoop_finalize - - # do the hard work of launching a daemon or just executing our interactive - # java class - if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = true ]]; then - if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then - hadoop_secure_daemon_handler \ - "${HADOOP_DAEMON_MODE}" \ - "${HADOOP_SUBCMD}" \ - "${HADOOP_SECURE_CLASSNAME}" \ - "${daemon_pidfile}" \ - "${daemon_outfile}" \ - "${priv_pidfile}" \ - "${priv_outfile}" \ - "${priv_errfile}" \ - "${HADOOP_SUBCMD_ARGS[@]}" - else - hadoop_daemon_handler \ - "${HADOOP_DAEMON_MODE}" \ - "${HADOOP_SUBCMD}" \ - "${HADOOP_CLASSNAME}" \ - "${daemon_pidfile}" \ - "${daemon_outfile}" \ - "${HADOOP_SUBCMD_ARGS[@]}" - fi - exit $? - else - hadoop_java_exec "${HADOOP_SUBCMD}" "${HADOOP_CLASSNAME}" "${HADOOP_SUBCMD_ARGS[@]}" - fi -} diff --git a/hadoop-hdds/common/src/main/bin/workers.sh b/hadoop-hdds/common/src/main/bin/workers.sh deleted file mode 100755 index 05bc5fd8f0f..00000000000 --- a/hadoop-hdds/common/src/main/bin/workers.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Run a shell command on all worker hosts. -# -# Environment Variables -# -# HADOOP_WORKERS File naming remote hosts. -# Default is ${HADOOP_CONF_DIR}/workers. -# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf. -# HADOOP_WORKER_SLEEP Seconds to sleep between spawning remote commands. -# HADOOP_SSH_OPTS Options passed to ssh when running remote commands. -## - -function hadoop_usage -{ - echo "Usage: workers.sh [--config confdir] command..." -} - -# let's locate libexec... -if [[ -n "${HADOOP_HOME}" ]]; then - HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" -else - this="${BASH_SOURCE-$0}" - bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) - HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" -fi - -HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" -# shellcheck disable=SC2034 -HADOOP_NEW_CONFIG=true -if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then - . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" -else - echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1 - exit 1 -fi - -# if no args specified, show usage -if [[ $# -le 0 ]]; then - hadoop_exit_with_usage 1 -fi - -hadoop_connect_to_hosts "$@" diff --git a/hadoop-hdds/common/src/main/conf/core-site.xml b/hadoop-hdds/common/src/main/conf/core-site.xml deleted file mode 100644 index d2ddf893e49..00000000000 --- a/hadoop-hdds/common/src/main/conf/core-site.xml +++ /dev/null @@ -1,20 +0,0 @@ - - - - - - - - diff --git a/hadoop-hdds/common/src/main/conf/hadoop-env.cmd b/hadoop-hdds/common/src/main/conf/hadoop-env.cmd deleted file mode 100644 index 971869597f5..00000000000 --- a/hadoop-hdds/common/src/main/conf/hadoop-env.cmd +++ /dev/null @@ -1,90 +0,0 @@ -@echo off -@rem Licensed to the Apache Software Foundation (ASF) under one or more -@rem contributor license agreements. See the NOTICE file distributed with -@rem this work for additional information regarding copyright ownership. -@rem The ASF licenses this file to You under the Apache License, Version 2.0 -@rem (the "License"); you may not use this file except in compliance with -@rem the License. You may obtain a copy of the License at -@rem -@rem http://www.apache.org/licenses/LICENSE-2.0 -@rem -@rem Unless required by applicable law or agreed to in writing, software -@rem distributed under the License is distributed on an "AS IS" BASIS, -@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -@rem See the License for the specific language governing permissions and -@rem limitations under the License. - -@rem Set Hadoop-specific environment variables here. - -@rem The only required environment variable is JAVA_HOME. All others are -@rem optional. When running a distributed configuration it is best to -@rem set JAVA_HOME in this file, so that it is correctly defined on -@rem remote nodes. - -@rem The java implementation to use. Required. -set JAVA_HOME=%JAVA_HOME% - -@rem The jsvc implementation to use. Jsvc is required to run secure datanodes. -@rem set JSVC_HOME=%JSVC_HOME% - -@rem set HADOOP_CONF_DIR= - -@rem Extra Java CLASSPATH elements. Automatically insert capacity-scheduler. -if exist %HADOOP_HOME%\contrib\capacity-scheduler ( - if not defined HADOOP_CLASSPATH ( - set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar - ) else ( - set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar - ) -) - -@rem The maximum amount of heap to use, in MB. Default is 1000. -@rem set HADOOP_HEAPSIZE= -@rem set HADOOP_NAMENODE_INIT_HEAPSIZE="" - -@rem Extra Java runtime options. Empty by default. -@rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true - -@rem Command specific options appended to HADOOP_OPTS when specified -if not defined HADOOP_SECURITY_LOGGER ( - set HADOOP_SECURITY_LOGGER=INFO,RFAS -) -if not defined HDFS_AUDIT_LOGGER ( - set HDFS_AUDIT_LOGGER=INFO,NullAppender -) - -set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS% -set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS% -set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS% - -@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc) -set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS% -@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%" - -@rem On secure datanodes, user to run the datanode as after dropping privileges -set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER% - -@rem Where log files are stored. %HADOOP_HOME%/logs by default. -@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME% - -@rem Where log files are stored in the secure data environment. -set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER% - -@rem -@rem Router-based HDFS Federation specific parameters -@rem Specify the JVM options to be used when starting the RBF Routers. -@rem These options will be appended to the options specified as HADOOP_OPTS -@rem and therefore may override any similar flags set in HADOOP_OPTS -@rem -@rem set HADOOP_DFSROUTER_OPTS="" -@rem - -@rem The directory where pid files are stored. /tmp by default. -@rem NOTE: this should be set to a directory that can only be written to by -@rem the user that will run the hadoop daemons. Otherwise there is the -@rem potential for a symlink attack. -set HADOOP_PID_DIR=%HADOOP_PID_DIR% -set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR% - -@rem A string representing this instance of hadoop. %USERNAME% by default. -set HADOOP_IDENT_STRING=%USERNAME% diff --git a/hadoop-hdds/common/src/main/conf/hadoop-env.sh b/hadoop-hdds/common/src/main/conf/hadoop-env.sh deleted file mode 100644 index e43cd95b047..00000000000 --- a/hadoop-hdds/common/src/main/conf/hadoop-env.sh +++ /dev/null @@ -1,439 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Set Hadoop-specific environment variables here. - -## -## THIS FILE ACTS AS THE MASTER FILE FOR ALL HADOOP PROJECTS. -## SETTINGS HERE WILL BE READ BY ALL HADOOP COMMANDS. THEREFORE, -## ONE CAN USE THIS FILE TO SET YARN, HDFS, AND MAPREDUCE -## CONFIGURATION OPTIONS INSTEAD OF xxx-env.sh. -## -## Precedence rules: -## -## {yarn-env.sh|hdfs-env.sh} > hadoop-env.sh > hard-coded defaults -## -## {YARN_xyz|HDFS_xyz} > HADOOP_xyz > hard-coded defaults -## - -# Many of the options here are built from the perspective that users -# may want to provide OVERWRITING values on the command line. -# For example: -# -# JAVA_HOME=/usr/java/testing hdfs dfs -ls -# -# Therefore, the vast majority (BUT NOT ALL!) of these defaults -# are configured for substitution and not append. If append -# is preferable, modify this file accordingly. - -### -# Generic settings for HADOOP -### - -# Technically, the only required environment variable is JAVA_HOME. -# All others are optional. However, the defaults are probably not -# preferred. Many sites configure these options outside of Hadoop, -# such as in /etc/profile.d - -# The java implementation to use. By default, this environment -# variable is REQUIRED on ALL platforms except OS X! -# export JAVA_HOME= - -# Location of Hadoop. By default, Hadoop will attempt to determine -# this location based upon its execution path. -# export HADOOP_HOME= - -# Location of Hadoop's configuration information. i.e., where this -# file is living. If this is not defined, Hadoop will attempt to -# locate it based upon its execution path. -# -# NOTE: It is recommend that this variable not be set here but in -# /etc/profile.d or equivalent. Some options (such as -# --config) may react strangely otherwise. -# -# export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop - -# The maximum amount of heap to use (Java -Xmx). If no unit -# is provided, it will be converted to MB. Daemons will -# prefer any Xmx setting in their respective _OPT variable. -# There is no default; the JVM will autoscale based upon machine -# memory size. -# export HADOOP_HEAPSIZE_MAX= - -# The minimum amount of heap to use (Java -Xms). If no unit -# is provided, it will be converted to MB. Daemons will -# prefer any Xms setting in their respective _OPT variable. -# There is no default; the JVM will autoscale based upon machine -# memory size. -# export HADOOP_HEAPSIZE_MIN= - -# Enable extra debugging of Hadoop's JAAS binding, used to set up -# Kerberos security. -# export HADOOP_JAAS_DEBUG=true - -# Extra Java runtime options for all Hadoop commands. We don't support -# IPv6 yet/still, so by default the preference is set to IPv4. -# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true" -# For Kerberos debugging, an extended option set logs more information -# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug" - -# Some parts of the shell code may do special things dependent upon -# the operating system. We have to set this here. See the next -# section as to why.... -export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)} - -# Extra Java runtime options for some Hadoop commands -# and clients (i.e., hdfs dfs -blah). These get appended to HADOOP_OPTS for -# such commands. In most cases, # this should be left empty and -# let users supply it on the command line. -# export HADOOP_CLIENT_OPTS="" - -# -# A note about classpaths. -# -# By default, Apache Hadoop overrides Java's CLASSPATH -# environment variable. It is configured such -# that it starts out blank with new entries added after passing -# a series of checks (file/dir exists, not already listed aka -# de-deduplication). During de-deduplication, wildcards and/or -# directories are *NOT* expanded to keep it simple. Therefore, -# if the computed classpath has two specific mentions of -# awesome-methods-1.0.jar, only the first one added will be seen. -# If two directories are in the classpath that both contain -# awesome-methods-1.0.jar, then Java will pick up both versions. - -# An additional, custom CLASSPATH. Site-wide configs should be -# handled via the shellprofile functionality, utilizing the -# hadoop_add_classpath function for greater control and much -# harder for apps/end-users to accidentally override. -# Similarly, end users should utilize ${HOME}/.hadooprc . -# This variable should ideally only be used as a short-cut, -# interactive way for temporary additions on the command line. -# export HADOOP_CLASSPATH="/some/cool/path/on/your/machine" - -# Should HADOOP_CLASSPATH be first in the official CLASSPATH? -# export HADOOP_USER_CLASSPATH_FIRST="yes" - -# If HADOOP_USE_CLIENT_CLASSLOADER is set, the classpath along -# with the main jar are handled by a separate isolated -# client classloader when 'hadoop jar', 'yarn jar', or 'mapred job' -# is utilized. If it is set, HADOOP_CLASSPATH and -# HADOOP_USER_CLASSPATH_FIRST are ignored. -# export HADOOP_USE_CLIENT_CLASSLOADER=true - -# HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES overrides the default definition of -# system classes for the client classloader when HADOOP_USE_CLIENT_CLASSLOADER -# is enabled. Names ending in '.' (period) are treated as package names, and -# names starting with a '-' are treated as negative matches. For example, -# export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop." - -# Enable optional, bundled Hadoop features -# This is a comma delimited list. It may NOT be overridden via .hadooprc -# Entries may be added/removed as needed. -# export HADOOP_OPTIONAL_TOOLS="@@@HADOOP_OPTIONAL_TOOLS@@@" - -### -# Options for remote shell connectivity -### - -# There are some optional components of hadoop that allow for -# command and control of remote hosts. For example, -# start-dfs.sh will attempt to bring up all NNs, DNS, etc. - -# Options to pass to SSH when one of the "log into a host and -# start/stop daemons" scripts is executed -# export HADOOP_SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s" - -# The built-in ssh handler will limit itself to 10 simultaneous connections. -# For pdsh users, this sets the fanout size ( -f ) -# Change this to increase/decrease as necessary. -# export HADOOP_SSH_PARALLEL=10 - -# Filename which contains all of the hosts for any remote execution -# helper scripts # such as workers.sh, start-dfs.sh, etc. -# export HADOOP_WORKERS="${HADOOP_CONF_DIR}/workers" - -### -# Options for all daemons -### -# - -# -# Many options may also be specified as Java properties. It is -# very common, and in many cases, desirable, to hard-set these -# in daemon _OPTS variables. Where applicable, the appropriate -# Java property is also identified. Note that many are re-used -# or set differently in certain contexts (e.g., secure vs -# non-secure) -# - -# Where (primarily) daemon log files are stored. -# ${HADOOP_HOME}/logs by default. -# Java property: hadoop.log.dir -# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs - -# A string representing this instance of hadoop. $USER by default. -# This is used in writing log and pid files, so keep that in mind! -# Java property: hadoop.id.str -# export HADOOP_IDENT_STRING=$USER - -# How many seconds to pause after stopping a daemon -# export HADOOP_STOP_TIMEOUT=5 - -# Where pid files are stored. /tmp by default. -# export HADOOP_PID_DIR=/tmp - -# Default log4j setting for interactive commands -# Java property: hadoop.root.logger -# export HADOOP_ROOT_LOGGER=INFO,console - -# Default log4j setting for daemons spawned explicitly by -# --daemon option of hadoop, hdfs, mapred and yarn command. -# Java property: hadoop.root.logger -# export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA - -# Default log level and output location for security-related messages. -# You will almost certainly want to change this on a per-daemon basis via -# the Java property (i.e., -Dhadoop.security.logger=foo). (Note that the -# defaults for the NN and 2NN override this by default.) -# Java property: hadoop.security.logger -# export HADOOP_SECURITY_LOGGER=INFO,NullAppender - -# Default process priority level -# Note that sub-processes will also run at this level! -# export HADOOP_NICENESS=0 - -# Default name for the service level authorization file -# Java property: hadoop.policy.file -# export HADOOP_POLICYFILE="hadoop-policy.xml" - -# -# NOTE: this is not used by default! <----- -# You can define variables right here and then re-use them later on. -# For example, it is common to use the same garbage collection settings -# for all the daemons. So one could define: -# -# export HADOOP_GC_SETTINGS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps" -# -# .. and then use it as per the b option under the namenode. - -### -# Secure/privileged execution -### - -# -# Out of the box, Hadoop uses jsvc from Apache Commons to launch daemons -# on privileged ports. This functionality can be replaced by providing -# custom functions. See hadoop-functions.sh for more information. -# - -# The jsvc implementation to use. Jsvc is required to run secure datanodes -# that bind to privileged ports to provide authentication of data transfer -# protocol. Jsvc is not required if SASL is configured for authentication of -# data transfer protocol using non-privileged ports. -# export JSVC_HOME=/usr/bin - -# -# This directory contains pids for secure and privileged processes. -#export HADOOP_SECURE_PID_DIR=${HADOOP_PID_DIR} - -# -# This directory contains the logs for secure and privileged processes. -# Java property: hadoop.log.dir -# export HADOOP_SECURE_LOG=${HADOOP_LOG_DIR} - -# -# When running a secure daemon, the default value of HADOOP_IDENT_STRING -# ends up being a bit bogus. Therefore, by default, the code will -# replace HADOOP_IDENT_STRING with HADOOP_xx_SECURE_USER. If one wants -# to keep HADOOP_IDENT_STRING untouched, then uncomment this line. -# export HADOOP_SECURE_IDENT_PRESERVE="true" - -### -# NameNode specific parameters -### - -# Default log level and output location for file system related change -# messages. For non-namenode daemons, the Java property must be set in -# the appropriate _OPTS if one wants something other than INFO,NullAppender -# Java property: hdfs.audit.logger -# export HDFS_AUDIT_LOGGER=INFO,NullAppender - -# Specify the JVM options to be used when starting the NameNode. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# a) Set JMX options -# export HDFS_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026" -# -# b) Set garbage collection logs -# export HDFS_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')" -# -# c) ... or set them directly -# export HDFS_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')" - -# this is the default: -# export HDFS_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS" - -### -# SecondaryNameNode specific parameters -### -# Specify the JVM options to be used when starting the SecondaryNameNode. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# This is the default: -# export HDFS_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS" - -### -# DataNode specific parameters -### -# Specify the JVM options to be used when starting the DataNode. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# This is the default: -# export HDFS_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS" - -# On secure datanodes, user to run the datanode as after dropping privileges. -# This **MUST** be uncommented to enable secure HDFS if using privileged ports -# to provide authentication of data transfer protocol. This **MUST NOT** be -# defined if SASL is configured for authentication of data transfer protocol -# using non-privileged ports. -# This will replace the hadoop.id.str Java property in secure mode. -# export HDFS_DATANODE_SECURE_USER=hdfs - -# Supplemental options for secure datanodes -# By default, Hadoop uses jsvc which needs to know to launch a -# server jvm. -# export HDFS_DATANODE_SECURE_EXTRA_OPTS="-jvm server" - -### -# NFS3 Gateway specific parameters -### -# Specify the JVM options to be used when starting the NFS3 Gateway. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_NFS3_OPTS="" - -# Specify the JVM options to be used when starting the Hadoop portmapper. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_PORTMAP_OPTS="-Xmx512m" - -# Supplemental options for priviliged gateways -# By default, Hadoop uses jsvc which needs to know to launch a -# server jvm. -# export HDFS_NFS3_SECURE_EXTRA_OPTS="-jvm server" - -# On privileged gateways, user to run the gateway as after dropping privileges -# This will replace the hadoop.id.str Java property in secure mode. -# export HDFS_NFS3_SECURE_USER=nfsserver - -### -# ZKFailoverController specific parameters -### -# Specify the JVM options to be used when starting the ZKFailoverController. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_ZKFC_OPTS="" - -### -# QuorumJournalNode specific parameters -### -# Specify the JVM options to be used when starting the QuorumJournalNode. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_JOURNALNODE_OPTS="" - -### -# HDFS Balancer specific parameters -### -# Specify the JVM options to be used when starting the HDFS Balancer. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_BALANCER_OPTS="" - -### -# HDFS Mover specific parameters -### -# Specify the JVM options to be used when starting the HDFS Mover. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_MOVER_OPTS="" - -### -# Router-based HDFS Federation specific parameters -# Specify the JVM options to be used when starting the RBF Routers. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_DFSROUTER_OPTS="" - -### -# Ozone Manager specific parameters -### -# Specify the JVM options to be used when starting the Ozone Manager. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_OM_OPTS="" - -### -# HDFS StorageContainerManager specific parameters -### -# Specify the JVM options to be used when starting the HDFS Storage Container Manager. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -# -# export HDFS_STORAGECONTAINERMANAGER_OPTS="" - -### -# Advanced Users Only! -### - -# -# When building Hadoop, one can add the class paths to the commands -# via this special env var: -# export HADOOP_ENABLE_BUILD_PATHS="true" - -# -# To prevent accidents, shell commands be (superficially) locked -# to only allow certain users to execute certain subcommands. -# It uses the format of (command)_(subcommand)_USER. -# -# For example, to limit who can execute the namenode command, -# export HDFS_NAMENODE_USER=hdfs - - -### -# Registry DNS specific parameters -### -# For privileged registry DNS, user to run as after dropping privileges -# This will replace the hadoop.id.str Java property in secure mode. -# export HADOOP_REGISTRYDNS_SECURE_USER=yarn - -# Supplemental options for privileged registry DNS -# By default, Hadoop uses jsvc which needs to know to launch a -# server jvm. -# export HADOOP_REGISTRYDNS_SECURE_EXTRA_OPTS="-jvm server" diff --git a/hadoop-hdds/common/src/main/conf/hadoop-metrics2.properties b/hadoop-hdds/common/src/main/conf/hadoop-metrics2.properties deleted file mode 100644 index f67bf8e4c5b..00000000000 --- a/hadoop-hdds/common/src/main/conf/hadoop-metrics2.properties +++ /dev/null @@ -1,99 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# syntax: [prefix].[source|sink].[instance].[options] -# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details - -*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink -# default sampling period, in seconds -*.period=10 - -# The namenode-metrics.out will contain metrics from all context -#namenode.sink.file.filename=namenode-metrics.out -# Specifying a special sampling period for namenode: -#namenode.sink.*.period=8 - -#datanode.sink.file.filename=datanode-metrics.out - -#resourcemanager.sink.file.filename=resourcemanager-metrics.out - -#nodemanager.sink.file.filename=nodemanager-metrics.out - -#mrappmaster.sink.file.filename=mrappmaster-metrics.out - -#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out - -# the following example split metrics of different -# context to different sinks (in this case files) -#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink -#nodemanager.sink.file_jvm.context=jvm -#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out -#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink -#nodemanager.sink.file_mapred.context=mapred -#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out - -# -# Below are for sending metrics to Ganglia -# -# for Ganglia 3.0 support -# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30 -# -# for Ganglia 3.1 support -# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31 - -# *.sink.ganglia.period=10 - -# default for supportsparse is false -# *.sink.ganglia.supportsparse=true - -#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both -#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40 - -# Tag values to use for the ganglia prefix. If not defined no tags are used. -# If '*' all tags are used. If specifying multiple tags separate them with -# commas. Note that the last segment of the property name is the context name. -# -# A typical use of tags is separating the metrics by the HDFS rpc port -# and HDFS service rpc port. -# For example: -# With following HDFS configuration: -# dfs.namenode.rpc-address is set as namenodeAddress:9110 -# dfs.namenode.servicerpc-address is set as namenodeAddress:9111 -# If no tags are used, following metric would be gathered: -# rpc.rpc.NumOpenConnections -# If using "*.sink.ganglia.tagsForPrefix.rpc=port", -# following metrics would be gathered: -# rpc.rpc.port=9110.NumOpenConnections -# rpc.rpc.port=9111.NumOpenConnections -# -#*.sink.ganglia.tagsForPrefix.jvm=ProcessName -#*.sink.ganglia.tagsForPrefix.dfs=HAState,IsOutOfSync -#*.sink.ganglia.tagsForPrefix.rpc=port -#*.sink.ganglia.tagsForPrefix.rpcdetailed=port -#*.sink.ganglia.tagsForPrefix.metricssystem=* -#*.sink.ganglia.tagsForPrefix.ugi=* -#*.sink.ganglia.tagsForPrefix.mapred= - -#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 - -#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 - -#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 - -#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 - -#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 - -#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649 diff --git a/hadoop-hdds/common/src/main/conf/hadoop-policy.xml b/hadoop-hdds/common/src/main/conf/hadoop-policy.xml deleted file mode 100644 index 85e4975a786..00000000000 --- a/hadoop-hdds/common/src/main/conf/hadoop-policy.xml +++ /dev/null @@ -1,275 +0,0 @@ - - - - - - - - - security.client.protocol.acl - * - ACL for ClientProtocol, which is used by user code - via the DistributedFileSystem. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.client.datanode.protocol.acl - * - ACL for ClientDatanodeProtocol, the client-to-datanode protocol - for block recovery. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.datanode.protocol.acl - * - ACL for DatanodeProtocol, which is used by datanodes to - communicate with the namenode. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.inter.datanode.protocol.acl - * - ACL for InterDatanodeProtocol, the inter-datanode protocol - for updating generation timestamp. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.namenode.protocol.acl - * - ACL for NamenodeProtocol, the protocol used by the secondary - namenode to communicate with the namenode. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.admin.operations.protocol.acl - * - ACL for AdminOperationsProtocol. Used for admin commands. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.refresh.user.mappings.protocol.acl - * - ACL for RefreshUserMappingsProtocol. Used to refresh - users mappings. The ACL is a comma-separated list of user and - group names. The user and group list is separated by a blank. For - e.g. "alice,bob users,wheel". A special value of "*" means all - users are allowed. - - - - security.refresh.policy.protocol.acl - * - ACL for RefreshAuthorizationPolicyProtocol, used by the - dfsadmin and mradmin commands to refresh the security policy in-effect. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.ha.service.protocol.acl - * - ACL for HAService protocol used by HAAdmin to manage the - active and stand-by states of namenode. - - - - security.router.admin.protocol.acl - * - ACL for RouterAdmin Protocol. The ACL is a comma-separated - list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - - security.zkfc.protocol.acl - * - ACL for access to the ZK Failover Controller - - - - - security.qjournal.service.protocol.acl - * - ACL for QJournalProtocol, used by the NN to communicate with - JNs when using the QuorumJournalManager for edit logs. - - - - security.interqjournal.service.protocol.acl - * - ACL for InterQJournalProtocol, used by the JN to - communicate with other JN - - - - - security.mrhs.client.protocol.acl - * - ACL for HSClientProtocol, used by job clients to - communciate with the MR History Server job status etc. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - - - security.resourcetracker.protocol.acl - * - ACL for ResourceTrackerProtocol, used by the - ResourceManager and NodeManager to communicate with each other. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.resourcemanager-administration.protocol.acl - * - ACL for ResourceManagerAdministrationProtocol, for admin commands. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.applicationclient.protocol.acl - * - ACL for ApplicationClientProtocol, used by the ResourceManager - and applications submission clients to communicate with each other. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.applicationmaster.protocol.acl - * - ACL for ApplicationMasterProtocol, used by the ResourceManager - and ApplicationMasters to communicate with each other. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.containermanagement.protocol.acl - * - ACL for ContainerManagementProtocol protocol, used by the NodeManager - and ApplicationMasters to communicate with each other. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.resourcelocalizer.protocol.acl - * - ACL for ResourceLocalizer protocol, used by the NodeManager - and ResourceLocalizer to communicate with each other. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.job.task.protocol.acl - * - ACL for TaskUmbilicalProtocol, used by the map and reduce - tasks to communicate with the parent tasktracker. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.job.client.protocol.acl - * - ACL for MRClientProtocol, used by job clients to - communciate with the MR ApplicationMaster to query job status etc. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.applicationhistory.protocol.acl - * - ACL for ApplicationHistoryProtocol, used by the timeline - server and the generic history service client to communicate with each other. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.collector-nodemanager.protocol.acl - * - ACL for CollectorNodemanagerProtocol, used by nodemanager - if timeline service v2 is enabled, for the timeline collector and nodemanager - to communicate with each other. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.applicationmaster-nodemanager.applicationmaster.protocol.acl - * - ACL for ApplicationMasterProtocol, used by the Nodemanager - and ApplicationMasters to communicate. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.distributedscheduling.protocol.acl - * - ACL for DistributedSchedulingAMProtocol, used by the Nodemanager - and Resourcemanager to communicate. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java deleted file mode 100644 index 99972ae9003..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ /dev/null @@ -1,252 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds; - -import org.apache.hadoop.hdds.utils.db.DBProfile; - -/** - * This class contains constants for configuration keys and default values - * used in hdds. - */ -public final class HddsConfigKeys { - - public static final String HDDS_HEARTBEAT_INTERVAL = - "hdds.heartbeat.interval"; - public static final String HDDS_HEARTBEAT_INTERVAL_DEFAULT = - "30s"; - public static final String HDDS_NODE_REPORT_INTERVAL = - "hdds.node.report.interval"; - public static final String HDDS_NODE_REPORT_INTERVAL_DEFAULT = - "60s"; - public static final String HDDS_CONTAINER_REPORT_INTERVAL = - "hdds.container.report.interval"; - public static final String HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT = - "60s"; - public static final String HDDS_PIPELINE_REPORT_INTERVAL = - "hdds.pipeline.report.interval"; - public static final String HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT = - "60s"; - public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL = - "hdds.command.status.report.interval"; - public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT = - "60s"; - public static final String HDDS_CONTAINER_ACTION_MAX_LIMIT = - "hdds.container.action.max.limit"; - public static final int HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT = - 20; - public static final String HDDS_PIPELINE_ACTION_MAX_LIMIT = - "hdds.pipeline.action.max.limit"; - public static final int HDDS_PIPELINE_ACTION_MAX_LIMIT_DEFAULT = - 20; - // Configuration to allow volume choosing policy. - public static final String HDDS_DATANODE_VOLUME_CHOOSING_POLICY = - "hdds.datanode.volume.choosing.policy"; - // DB PKIProfile used by ROCKDB instances. - public static final String HDDS_DB_PROFILE = "hdds.db.profile"; - public static final DBProfile HDDS_DEFAULT_DB_PROFILE = DBProfile.DISK; - // Once a container usage crosses this threshold, it is eligible for - // closing. - public static final String HDDS_CONTAINER_CLOSE_THRESHOLD = - "hdds.container.close.threshold"; - public static final float HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f; - public static final String HDDS_SCM_SAFEMODE_ENABLED = - "hdds.scm.safemode.enabled"; - - public static final boolean HDDS_SCM_SAFEMODE_ENABLED_DEFAULT = true; - public static final String HDDS_SCM_SAFEMODE_MIN_DATANODE = - "hdds.scm.safemode.min.datanode"; - public static final int HDDS_SCM_SAFEMODE_MIN_DATANODE_DEFAULT = 1; - - public static final String - HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT = - "hdds.scm.wait.time.after.safemode.exit"; - - public static final String - HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT_DEFAULT = "5m"; - - public static final String HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK = - "hdds.scm.safemode.pipeline-availability.check"; - public static final boolean - HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT = false; - - // % of containers which should have at least one reported replica - // before SCM comes out of safe mode. - public static final String HDDS_SCM_SAFEMODE_THRESHOLD_PCT = - "hdds.scm.safemode.threshold.pct"; - public static final double HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT = 0.99; - - - // percentage of healthy pipelines, where all 3 datanodes are reported in the - // pipeline. - public static final String HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT = - "hdds.scm.safemode.healthy.pipelie.pct"; - public static final double - HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT_DEFAULT = 0.10; - - public static final String HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT = - "hdds.scm.safemode.atleast.one.node.reported.pipeline.pct"; - public static final double - HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT_DEFAULT = 0.90; - - public static final String HDDS_LOCK_MAX_CONCURRENCY = - "hdds.lock.max.concurrency"; - public static final int HDDS_LOCK_MAX_CONCURRENCY_DEFAULT = 100; - // This configuration setting is used as a fallback location by all - // Ozone/HDDS services for their metadata. It is useful as a single - // config point for test/PoC clusters. - // - // In any real cluster where performance matters, the SCM, OM and DN - // metadata locations must be configured explicitly. - public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs"; - - public static final String HDDS_PROMETHEUS_ENABLED = - "hdds.prometheus.endpoint.enabled"; - - public static final String HDDS_PROFILER_ENABLED = - "hdds.profiler.endpoint.enabled"; - - public static final String HDDS_KEY_LEN = "hdds.key.len"; - public static final int HDDS_DEFAULT_KEY_LEN = 2048; - public static final String HDDS_KEY_ALGORITHM = "hdds.key.algo"; - public static final String HDDS_DEFAULT_KEY_ALGORITHM = "RSA"; - public static final String HDDS_SECURITY_PROVIDER = "hdds.security.provider"; - public static final String HDDS_DEFAULT_SECURITY_PROVIDER = "BC"; - public static final String HDDS_KEY_DIR_NAME = "hdds.key.dir.name"; - public static final String HDDS_KEY_DIR_NAME_DEFAULT = "keys"; - // TODO : Talk to StorageIO classes and see if they can return a secure - // storage location for each node. - public static final String HDDS_METADATA_DIR_NAME = "hdds.metadata.dir"; - public static final String HDDS_PRIVATE_KEY_FILE_NAME = - "hdds.priv.key.file.name"; - public static final String HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT = "private.pem"; - public static final String HDDS_PUBLIC_KEY_FILE_NAME = "hdds.public.key.file" - + ".name"; - public static final String HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT = "public.pem"; - - public static final String HDDS_BLOCK_TOKEN_EXPIRY_TIME = - "hdds.block.token.expiry.time"; - public static final String HDDS_BLOCK_TOKEN_EXPIRY_TIME_DEFAULT = "1d"; - /** - * Maximum duration of certificates issued by SCM including Self-Signed Roots. - * The formats accepted are based on the ISO-8601 duration format PnDTnHnMn.nS - * Default value is 5 years and written as P1865D. - */ - public static final String HDDS_X509_MAX_DURATION = "hdds.x509.max.duration"; - // Limit Certificate duration to a max value of 5 years. - public static final String HDDS_X509_MAX_DURATION_DEFAULT= "P1865D"; - public static final String HDDS_X509_SIGNATURE_ALGO = - "hdds.x509.signature.algorithm"; - public static final String HDDS_X509_SIGNATURE_ALGO_DEFAULT = "SHA256withRSA"; - public static final String HDDS_BLOCK_TOKEN_ENABLED = - "hdds.block.token.enabled"; - public static final boolean HDDS_BLOCK_TOKEN_ENABLED_DEFAULT = false; - - public static final String HDDS_X509_DIR_NAME = "hdds.x509.dir.name"; - public static final String HDDS_X509_DIR_NAME_DEFAULT = "certs"; - public static final String HDDS_X509_FILE_NAME = "hdds.x509.file.name"; - public static final String HDDS_X509_FILE_NAME_DEFAULT = "certificate.crt"; - - /** - * Default duration of certificates issued by SCM CA. - * The formats accepted are based on the ISO-8601 duration format PnDTnHnMn.nS - * Default value is 5 years and written as P1865D. - */ - public static final String HDDS_X509_DEFAULT_DURATION = "hdds.x509.default" + - ".duration"; - // Default Certificate duration to one year. - public static final String HDDS_X509_DEFAULT_DURATION_DEFAULT = "P365D"; - - /** - * Do not instantiate. - */ - private HddsConfigKeys() { - } - - // Enable TLS for GRPC clients/server in ozone. - public static final String HDDS_GRPC_TLS_ENABLED = "hdds.grpc.tls.enabled"; - public static final boolean HDDS_GRPC_TLS_ENABLED_DEFAULT = false; - - // Choose TLS provider the default is set to OPENSSL for better performance. - public static final String HDDS_GRPC_TLS_PROVIDER = "hdds.grpc.tls.provider"; - public static final String HDDS_GRPC_TLS_PROVIDER_DEFAULT = "OPENSSL"; - - // Test only settings for using test signed certificate, authority assume to - // be localhost. - public static final String HDDS_GRPC_TLS_TEST_CERT = "hdds.grpc.tls" + - ".test.cert"; - public static final boolean HDDS_GRPC_TLS_TEST_CERT_DEFAULT = false; - - // Comma separated acls (users, groups) allowing clients accessing - // datanode container protocol - // when hadoop.security.authorization is true, this needs to be set in - // hadoop-policy.xml, "*" allows all users/groups to access. - public static final String - HDDS_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL = - "hdds.security.client.datanode.container.protocol.acl"; - - // Comma separated acls (users, groups) allowing clients accessing - // scm container protocol - // when hadoop.security.authorization is true, this needs to be set in - // hadoop-policy.xml, "*" allows all users/groups to access. - public static final String HDDS_SECURITY_CLIENT_SCM_CONTAINER_PROTOCOL_ACL = - "hdds.security.client.scm.container.protocol.acl"; - - // Comma separated acls (users, groups) allowing clients accessing - // scm block protocol - // when hadoop.security.authorization is true, this needs to be set in - // hadoop-policy.xml, "*" allows all users/groups to access. - public static final String HDDS_SECURITY_CLIENT_SCM_BLOCK_PROTOCOL_ACL = - "hdds.security.client.scm.block.protocol.acl"; - - // Comma separated acls (users, groups) allowing clients accessing - // scm certificate protocol - // when hadoop.security.authorization is true, this needs to be set in - // hadoop-policy.xml, "*" allows all users/groups to access. - public static final String HDDS_SECURITY_CLIENT_SCM_CERTIFICATE_PROTOCOL_ACL = - "hdds.security.client.scm.certificate.protocol.acl"; - - // Determines if the Container Chunk Manager will write user data to disk - // Set to false only for specific performance tests - public static final String HDDS_CONTAINER_PERSISTDATA = - "hdds.container.chunk.persistdata"; - public static final boolean HDDS_CONTAINER_PERSISTDATA_DEFAULT = true; - - public static final String HDDS_CONTAINER_SCRUB_ENABLED = - "hdds.container.scrub.enabled"; - public static final boolean HDDS_CONTAINER_SCRUB_ENABLED_DEFAULT = false; - - public static final String HDDS_DATANODE_HTTP_ENABLED_KEY = - "hdds.datanode.http.enabled"; - public static final String HDDS_DATANODE_HTTP_BIND_HOST_KEY = - "hdds.datanode.http-bind-host"; - public static final String HDDS_DATANODE_HTTPS_BIND_HOST_KEY = - "hdds.datanode.https-bind-host"; - public static final String HDDS_DATANODE_HTTP_ADDRESS_KEY = - "hdds.datanode.http-address"; - public static final String HDDS_DATANODE_HTTPS_ADDRESS_KEY = - "hdds.datanode.https-address"; - - public static final String HDDS_DATANODE_HTTP_BIND_HOST_DEFAULT = "0.0.0.0"; - public static final int HDDS_DATANODE_HTTP_BIND_PORT_DEFAULT = 9882; - public static final int HDDS_DATANODE_HTTPS_BIND_PORT_DEFAULT = 9883; - public static final String - HDDS_DATANODE_HTTP_KERBEROS_PRINCIPAL_KEY = - "hdds.datanode.http.kerberos.principal"; - public static final String - HDDS_DATANODE_HTTP_KERBEROS_KEYTAB_FILE_KEY = - "hdds.datanode.http.kerberos.keytab"; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java deleted file mode 100644 index b244b8cf75d..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds; - -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; - -/** - * HDDS Id generator. - */ -public final class HddsIdFactory { - private HddsIdFactory() { - } - - private static final AtomicLong LONG_COUNTER = new AtomicLong( - System.currentTimeMillis()); - - /** - * Returns an incrementing long. This class doesn't - * persist initial value for long Id's, so incremental id's after restart - * may collide with previously generated Id's. - * - * @return long - */ - public static long getLongId() { - return LONG_COUNTER.incrementAndGet(); - } - - /** - * Returns a uuid. - * - * @return UUID. - */ - public static UUID getUUId() { - return UUID.randomUUID(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java deleted file mode 100644 index d7b20fdd917..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ /dev/null @@ -1,505 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds; - -import javax.management.ObjectName; -import java.io.IOException; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.Calendar; -import java.util.Collection; -import java.util.HashSet; -import java.util.Map; -import java.util.Optional; -import java.util.TimeZone; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.source.JvmMetrics; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.net.DNS; -import org.apache.hadoop.net.NetUtils; - -import com.google.common.net.HostAndPort; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT; - -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * HDDS specific stateless utility functions. - */ -@InterfaceAudience.Private -@InterfaceStability.Stable -public final class HddsUtils { - - - private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class); - - /** - * The service ID of the solitary Ozone SCM service. - */ - public static final String OZONE_SCM_SERVICE_ID = "OzoneScmService"; - public static final String OZONE_SCM_SERVICE_INSTANCE_ID = - "OzoneScmServiceInstance"; - private static final TimeZone UTC_ZONE = TimeZone.getTimeZone("UTC"); - - - private static final int NO_PORT = -1; - - private HddsUtils() { - } - - /** - * Retrieve the socket address that should be used by clients to connect - * to the SCM. - * - * @param conf - * @return Target InetSocketAddress for the SCM client endpoint. - */ - public static InetSocketAddress getScmAddressForClients(Configuration conf) { - Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - - if (!host.isPresent()) { - // Fallback to Ozone SCM names. - Collection scmAddresses = getSCMAddresses(conf); - if (scmAddresses.size() > 1) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_NAMES + - " must contain a single hostname. Multiple SCM hosts are " + - "currently unsupported"); - } - host = Optional.of(scmAddresses.iterator().next().getHostName()); - } - - if (!host.isPresent()) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY + " must be defined. See" - + " https://wiki.apache.org/hadoop/Ozone#Configuration for " - + "details" - + " on configuring Ozone."); - } - - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - - return NetUtils.createSocketAddr(host.get() + ":" + port - .orElse(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT)); - } - - /** - * Retrieve the socket address that should be used by clients to connect - * to the SCM for block service. If - * {@link ScmConfigKeys#OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY} is not defined - * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used. If neither - * is defined then {@link ScmConfigKeys#OZONE_SCM_NAMES} is used. - * - * @param conf - * @return Target InetSocketAddress for the SCM block client endpoint. - * @throws IllegalArgumentException if configuration is not defined. - */ - public static InetSocketAddress getScmAddressForBlockClients( - Configuration conf) { - Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY); - - if (!host.isPresent()) { - host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - } - - if (!host.isPresent()) { - // Fallback to Ozone SCM names. - Collection scmAddresses = getSCMAddresses(conf); - if (scmAddresses.size() > 1) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_NAMES + - " must contain a single hostname. Multiple SCM hosts are " + - "currently unsupported"); - } - host = Optional.of(scmAddresses.iterator().next().getHostName()); - } - - if (!host.isPresent()) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY - + " must be defined. See" - + " https://wiki.apache.org/hadoop/Ozone#Configuration" - + " for details on configuring Ozone."); - } - - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY); - - return NetUtils.createSocketAddr(host.get() + ":" + port - .orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT)); - } - - /** - * Create a scm security client. - * @param conf - Ozone configuration. - * - * @return {@link SCMSecurityProtocol} - * @throws IOException - */ - public static SCMSecurityProtocolClientSideTranslatorPB getScmSecurityClient( - OzoneConfiguration conf) throws IOException { - RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class, - ProtobufRpcEngine.class); - long scmVersion = - RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class); - InetSocketAddress address = - getScmAddressForSecurityProtocol(conf); - RetryPolicy retryPolicy = - RetryPolicies.retryForeverWithFixedSleep( - 1000, TimeUnit.MILLISECONDS); - SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient = - new SCMSecurityProtocolClientSideTranslatorPB( - RPC.getProtocolProxy(SCMSecurityProtocolPB.class, scmVersion, - address, UserGroupInformation.getCurrentUser(), - conf, NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf), retryPolicy).getProxy()); - return scmSecurityClient; - } - - /** - * Retrieve the hostname, trying the supplied config keys in order. - * Each config value may be absent, or if present in the format - * host:port (the :port part is optional). - * - * @param conf - Conf - * @param keys a list of configuration key names. - * - * @return first hostname component found from the given keys, or absent. - * @throws IllegalArgumentException if any values are not in the 'host' - * or host:port format. - */ - public static Optional getHostNameFromConfigKeys(Configuration conf, - String... keys) { - for (final String key : keys) { - final String value = conf.getTrimmed(key); - final Optional hostName = getHostName(value); - if (hostName.isPresent()) { - return hostName; - } - } - return Optional.empty(); - } - - /** - * Gets the hostname or Indicates that it is absent. - * @param value host or host:port - * @return hostname - */ - public static Optional getHostName(String value) { - if ((value == null) || value.isEmpty()) { - return Optional.empty(); - } - String hostname = value.replaceAll("\\:[0-9]+$", ""); - if (hostname.length() == 0) { - return Optional.empty(); - } else { - return Optional.of(hostname); - } - } - - /** - * Gets the port if there is one, throws otherwise. - * @param value String in host:port format. - * @return Port - */ - public static Optional getHostPort(String value) { - if ((value == null) || value.isEmpty()) { - return Optional.empty(); - } - int port = HostAndPort.fromString(value).getPortOrDefault(NO_PORT); - if (port == NO_PORT) { - return Optional.empty(); - } else { - return Optional.of(port); - } - } - - /** - * Retrieve the port number, trying the supplied config keys in order. - * Each config value may be absent, or if present in the format - * host:port (the :port part is optional). - * - * @param conf Conf - * @param keys a list of configuration key names. - * - * @return first port number component found from the given keys, or absent. - * @throws IllegalArgumentException if any values are not in the 'host' - * or host:port format. - */ - public static Optional getPortNumberFromConfigKeys( - Configuration conf, String... keys) { - for (final String key : keys) { - final String value = conf.getTrimmed(key); - final Optional hostPort = getHostPort(value); - if (hostPort.isPresent()) { - return hostPort; - } - } - return Optional.empty(); - } - - /** - * Retrieve the socket addresses of all storage container managers. - * - * @param conf - * @return A collection of SCM addresses - * @throws IllegalArgumentException If the configuration is invalid - */ - public static Collection getSCMAddresses( - Configuration conf) throws IllegalArgumentException { - Collection addresses = - new HashSet(); - Collection names = - conf.getTrimmedStringCollection(ScmConfigKeys.OZONE_SCM_NAMES); - if (names == null || names.isEmpty()) { - throw new IllegalArgumentException(ScmConfigKeys.OZONE_SCM_NAMES - + " need to be a set of valid DNS names or IP addresses." - + " Null or empty address list found."); - } - - final Optional defaultPort = Optional - .of(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT); - for (String address : names) { - Optional hostname = getHostName(address); - if (!hostname.isPresent()) { - throw new IllegalArgumentException("Invalid hostname for SCM: " - + hostname); - } - Optional port = getHostPort(address); - InetSocketAddress addr = NetUtils.createSocketAddr(hostname.get(), - port.orElse(defaultPort.get())); - addresses.add(addr); - } - return addresses; - } - - public static boolean isHddsEnabled(Configuration conf) { - return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT); - } - - - /** - * Returns the hostname for this datanode. If the hostname is not - * explicitly configured in the given config, then it is determined - * via the DNS class. - * - * @param conf Configuration - * - * @return the hostname (NB: may not be a FQDN) - * @throws UnknownHostException if the dfs.datanode.dns.interface - * option is used and the hostname can not be determined - */ - public static String getHostName(Configuration conf) - throws UnknownHostException { - String name = conf.get(DFS_DATANODE_HOST_NAME_KEY); - if (name == null) { - String dnsInterface = conf.get( - CommonConfigurationKeys.HADOOP_SECURITY_DNS_INTERFACE_KEY); - String nameServer = conf.get( - CommonConfigurationKeys.HADOOP_SECURITY_DNS_NAMESERVER_KEY); - boolean fallbackToHosts = false; - - if (dnsInterface == null) { - // Try the legacy configuration keys. - dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY); - nameServer = conf.get(DFS_DATANODE_DNS_NAMESERVER_KEY); - } else { - // If HADOOP_SECURITY_DNS_* is set then also attempt hosts file - // resolution if DNS fails. We will not use hosts file resolution - // by default to avoid breaking existing clusters. - fallbackToHosts = true; - } - - name = DNS.getDefaultHost(dnsInterface, nameServer, fallbackToHosts); - } - return name; - } - - /** - * Checks if the container command is read only or not. - * @param proto ContainerCommand Request proto - * @return True if its readOnly , false otherwise. - */ - public static boolean isReadOnly( - ContainerProtos.ContainerCommandRequestProto proto) { - switch (proto.getCmdType()) { - case ReadContainer: - case ReadChunk: - case ListBlock: - case GetBlock: - case GetSmallFile: - case ListContainer: - case ListChunk: - case GetCommittedBlockLength: - return true; - case CloseContainer: - case WriteChunk: - case UpdateContainer: - case CompactChunk: - case CreateContainer: - case DeleteChunk: - case DeleteContainer: - case DeleteBlock: - case PutBlock: - case PutSmallFile: - default: - return false; - } - } - - /** - * Register the provided MBean with additional JMX ObjectName properties. - * If additional properties are not supported then fallback to registering - * without properties. - * - * @param serviceName - see {@link MBeans#register} - * @param mBeanName - see {@link MBeans#register} - * @param jmxProperties - additional JMX ObjectName properties. - * @param mBean - the MBean to register. - * @return the named used to register the MBean. - */ - public static ObjectName registerWithJmxProperties( - String serviceName, String mBeanName, Map jmxProperties, - Object mBean) { - try { - - // Check support for registering with additional properties. - final Method registerMethod = MBeans.class.getMethod( - "register", String.class, String.class, - Map.class, Object.class); - - return (ObjectName) registerMethod.invoke( - null, serviceName, mBeanName, jmxProperties, mBean); - - } catch (NoSuchMethodException | IllegalAccessException | - InvocationTargetException e) { - - // Fallback - if (LOG.isTraceEnabled()) { - LOG.trace("Registering MBean {} without additional properties {}", - mBeanName, jmxProperties); - } - return MBeans.register(serviceName, mBeanName, mBean); - } - } - - /** - * Get the current UTC time in milliseconds. - * @return the current UTC time in milliseconds. - */ - public static long getUtcTime() { - return Calendar.getInstance(UTC_ZONE).getTimeInMillis(); - } - - /** - * Retrieve the socket address that should be used by clients to connect - * to the SCM for - * {@link org.apache.hadoop.hdds.protocol.SCMSecurityProtocol}. If - * {@link ScmConfigKeys#OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY} is not defined - * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used. If neither - * is defined then {@link ScmConfigKeys#OZONE_SCM_NAMES} is used. - * - * @param conf - * @return Target InetSocketAddress for the SCM block client endpoint. - * @throws IllegalArgumentException if configuration is not defined. - */ - public static InetSocketAddress getScmAddressForSecurityProtocol( - Configuration conf) { - Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY); - - if (!host.isPresent()) { - host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - } - - if (!host.isPresent()) { - // Fallback to Ozone SCM names. - Collection scmAddresses = getSCMAddresses(conf); - if (scmAddresses.size() > 1) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_NAMES + - " must contain a single hostname. Multiple SCM hosts are " + - "currently unsupported"); - } - host = Optional.of(scmAddresses.iterator().next().getHostName()); - } - - if (!host.isPresent()) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY - + " must be defined. See" - + " https://wiki.apache.org/hadoop/Ozone#Configuration" - + " for details on configuring Ozone."); - } - - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY); - - return NetUtils.createSocketAddr(host.get() + ":" + port - .orElse(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT)); - } - - /** - * Initialize hadoop metrics system for Ozone servers. - * @param configuration OzoneConfiguration to use. - * @param serverName The logical name of the server components. - * @return - */ - public static MetricsSystem initializeMetrics( - OzoneConfiguration configuration, String serverName) { - MetricsSystem metricsSystem = DefaultMetricsSystem.initialize(serverName); - JvmMetrics.create(serverName, - configuration.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY), - DefaultMetricsSystem.instance()); - return metricsSystem; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java deleted file mode 100644 index 372828b95ce..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.cli; - -import java.util.HashMap; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.fs.Path; - -import com.google.common.annotations.VisibleForTesting; -import picocli.CommandLine; -import picocli.CommandLine.ExecutionException; -import picocli.CommandLine.Option; -import picocli.CommandLine.RunLast; - -/** - * This is a generic parent class for all the ozone related cli tools. - */ -public class GenericCli implements Callable, GenericParentCommand { - - @Option(names = {"--verbose"}, - description = "More verbose output. Show the stack trace of the errors.") - private boolean verbose; - - @Option(names = {"-D", "--set"}) - private Map configurationOverrides = new HashMap<>(); - - @Option(names = {"-conf"}) - private String configurationPath; - - private final CommandLine cmd; - - public GenericCli() { - cmd = new CommandLine(this); - } - - public void run(String[] argv) { - try { - execute(argv); - } catch (ExecutionException ex) { - printError(ex.getCause() == null ? ex : ex.getCause()); - System.exit(-1); - } - } - - @VisibleForTesting - public void execute(String[] argv) { - cmd.parseWithHandler(new RunLast(), argv); - } - - protected void printError(Throwable error) { - //message could be null in case of NPE. This is unexpected so we can - //print out the stack trace. - if (verbose || error.getMessage() == null - || error.getMessage().length() == 0) { - error.printStackTrace(System.err); - } else { - System.err.println(error.getMessage().split("\n")[0]); - } - } - - @Override - public Void call() throws Exception { - throw new MissingSubcommandException(cmd); - } - - @Override - public OzoneConfiguration createOzoneConfiguration() { - OzoneConfiguration ozoneConf = new OzoneConfiguration(); - if (configurationPath != null) { - ozoneConf.addResource(new Path(configurationPath)); - } - if (configurationOverrides != null) { - for (Entry entry : configurationOverrides.entrySet()) { - ozoneConf.set(entry.getKey(), entry.getValue()); - } - } - return ozoneConf; - } - - @VisibleForTesting - public picocli.CommandLine getCmd() { - return cmd; - } - - @Override - public boolean isVerbose() { - return verbose; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java deleted file mode 100644 index 6abad3e32b8..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.cli; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -/** - * Interface to access the higher level parameters. - */ -public interface GenericParentCommand { - - boolean isVerbose(); - - OzoneConfiguration createOzoneConfiguration(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java deleted file mode 100644 index 2f4ac4f170a..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/HddsVersionProvider.java +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.cli; - -import org.apache.hadoop.hdds.utils.HddsVersionInfo; - -import picocli.CommandLine.IVersionProvider; - -/** - * Version provider for the CLI interface. - */ -public class HddsVersionProvider implements IVersionProvider { - @Override - public String[] getVersion() throws Exception { - String[] result = new String[] { - HddsVersionInfo.HDDS_VERSION_INFO.getBuildVersion() - }; - return result; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java deleted file mode 100644 index 759476579e9..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.cli; - -import picocli.CommandLine; - -/** - * Exception to throw if subcommand is not selected but required. - */ -public class MissingSubcommandException extends CommandLine.ParameterException { - - public MissingSubcommandException(CommandLine cmd) { - super(cmd, "Incomplete command"); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java deleted file mode 100644 index 8dcc1d1a3c9..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Generic helper class to make instantiate picocli based cli tools. - */ -package org.apache.hadoop.hdds.cli; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java deleted file mode 100644 index 07aa536c4e5..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java +++ /dev/null @@ -1,127 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.client; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -import java.util.Objects; - -/** - * BlockID of Ozone (containerID + localID + blockCommitSequenceId). - */ - -public class BlockID { - - private ContainerBlockID containerBlockID; - private long blockCommitSequenceId; - - public BlockID(long containerID, long localID) { - this(containerID, localID, 0); - } - - private BlockID(long containerID, long localID, long bcsID) { - containerBlockID = new ContainerBlockID(containerID, localID); - blockCommitSequenceId = bcsID; - } - - public BlockID(ContainerBlockID containerBlockID) { - this(containerBlockID, 0); - } - - private BlockID(ContainerBlockID containerBlockID, long bcsId) { - this.containerBlockID = containerBlockID; - blockCommitSequenceId = bcsId; - } - - public long getContainerID() { - return containerBlockID.getContainerID(); - } - - public long getLocalID() { - return containerBlockID.getLocalID(); - } - - public long getBlockCommitSequenceId() { - return blockCommitSequenceId; - } - - public void setBlockCommitSequenceId(long blockCommitSequenceId) { - this.blockCommitSequenceId = blockCommitSequenceId; - } - - public ContainerBlockID getContainerBlockID() { - return containerBlockID; - } - - public void setContainerBlockID(ContainerBlockID containerBlockID) { - this.containerBlockID = containerBlockID; - } - - @Override - public String toString() { - return new StringBuilder().append(getContainerBlockID().toString()) - .append(" bcsId: ") - .append(blockCommitSequenceId) - .toString(); - } - - public ContainerProtos.DatanodeBlockID getDatanodeBlockIDProtobuf() { - return ContainerProtos.DatanodeBlockID.newBuilder(). - setContainerID(containerBlockID.getContainerID()) - .setLocalID(containerBlockID.getLocalID()) - .setBlockCommitSequenceId(blockCommitSequenceId).build(); - } - - public static BlockID getFromProtobuf( - ContainerProtos.DatanodeBlockID blockID) { - return new BlockID(blockID.getContainerID(), - blockID.getLocalID(), blockID.getBlockCommitSequenceId()); - } - - public HddsProtos.BlockID getProtobuf() { - return HddsProtos.BlockID.newBuilder() - .setContainerBlockID(containerBlockID.getProtobuf()) - .setBlockCommitSequenceId(blockCommitSequenceId).build(); - } - - public static BlockID getFromProtobuf(HddsProtos.BlockID blockID) { - return new BlockID( - ContainerBlockID.getFromProtobuf(blockID.getContainerBlockID()), - blockID.getBlockCommitSequenceId()); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - BlockID blockID = (BlockID) o; - return containerBlockID.equals(blockID.getContainerBlockID()) - && blockCommitSequenceId == blockID.getBlockCommitSequenceId(); - } - - @Override - public int hashCode() { - return Objects - .hash(containerBlockID.getContainerID(), containerBlockID.getLocalID(), - blockCommitSequenceId); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ContainerBlockID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ContainerBlockID.java deleted file mode 100644 index 1e30cc351f9..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ContainerBlockID.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.client; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -import java.util.Objects; - -/** - * BlockID returned by SCM during allocation of block (containerID + localID). - */ -public class ContainerBlockID { - private long containerID; - private long localID; - - public ContainerBlockID(long containerID, long localID) { - this.containerID = containerID; - this.localID = localID; - } - - public long getContainerID() { - return containerID; - } - - public long getLocalID() { - return localID; - } - - @Override - public String toString() { - return new StringBuffer() - .append("conID: ") - .append(containerID) - .append(" locID: ") - .append(localID).toString(); - } - - public HddsProtos.ContainerBlockID getProtobuf() { - return HddsProtos.ContainerBlockID.newBuilder(). - setContainerID(containerID).setLocalID(localID).build(); - } - - public static ContainerBlockID getFromProtobuf( - HddsProtos.ContainerBlockID containerBlockID) { - return new ContainerBlockID(containerBlockID.getContainerID(), - containerBlockID.getLocalID()); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ContainerBlockID blockID = (ContainerBlockID) o; - return containerID == blockID.containerID && localID == blockID.localID; - } - - @Override - public int hashCode() { - return Objects.hash(containerID, localID); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java deleted file mode 100644 index 59708a956b9..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java +++ /dev/null @@ -1,203 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.client; - -import org.apache.hadoop.ozone.OzoneConsts; - - -/** - * represents an OzoneQuota Object that can be applied to - * a storage volume. - */ -public class OzoneQuota { - - public static final String OZONE_QUOTA_BYTES = "BYTES"; - public static final String OZONE_QUOTA_MB = "MB"; - public static final String OZONE_QUOTA_GB = "GB"; - public static final String OZONE_QUOTA_TB = "TB"; - - private Units unit; - private long size; - - /** Quota Units.*/ - public enum Units {UNDEFINED, BYTES, KB, MB, GB, TB} - - /** - * Returns size. - * - * @return long - */ - public long getSize() { - return size; - } - - /** - * Returns Units. - * - * @return Unit in MB, GB or TB - */ - public Units getUnit() { - return unit; - } - - /** - * Constructs a default Quota object. - */ - public OzoneQuota() { - this.size = 0; - this.unit = Units.UNDEFINED; - } - - /** - * Constructor for Ozone Quota. - * - * @param size Long Size - * @param unit MB, GB or TB - */ - public OzoneQuota(long size, Units unit) { - this.size = size; - this.unit = unit; - } - - /** - * Formats a quota as a string. - * - * @param quota the quota to format - * @return string representation of quota - */ - public static String formatQuota(OzoneQuota quota) { - return String.valueOf(quota.size) + quota.unit; - } - - /** - * Parses a user provided string and returns the - * Quota Object. - * - * @param quotaString Quota String - * - * @return OzoneQuota object - * - * @throws IllegalArgumentException - */ - public static OzoneQuota parseQuota(String quotaString) - throws IllegalArgumentException { - - if ((quotaString == null) || (quotaString.isEmpty())) { - throw new IllegalArgumentException( - "Quota string cannot be null or empty."); - } - - String uppercase = quotaString.toUpperCase().replaceAll("\\s+", ""); - String size = ""; - int nSize; - Units currUnit = Units.MB; - Boolean found = false; - if (uppercase.endsWith(OZONE_QUOTA_MB)) { - size = uppercase - .substring(0, uppercase.length() - OZONE_QUOTA_MB.length()); - currUnit = Units.MB; - found = true; - } - - if (uppercase.endsWith(OZONE_QUOTA_GB)) { - size = uppercase - .substring(0, uppercase.length() - OZONE_QUOTA_GB.length()); - currUnit = Units.GB; - found = true; - } - - if (uppercase.endsWith(OZONE_QUOTA_TB)) { - size = uppercase - .substring(0, uppercase.length() - OZONE_QUOTA_TB.length()); - currUnit = Units.TB; - found = true; - } - - if (uppercase.endsWith(OZONE_QUOTA_BYTES)) { - size = uppercase - .substring(0, uppercase.length() - OZONE_QUOTA_BYTES.length()); - currUnit = Units.BYTES; - found = true; - } - - if (!found) { - throw new IllegalArgumentException( - "Quota unit not recognized. Supported values are BYTES, MB, GB and " + - "TB."); - } - - nSize = Integer.parseInt(size); - if (nSize < 0) { - throw new IllegalArgumentException("Quota cannot be negative."); - } - - return new OzoneQuota(nSize, currUnit); - } - - - /** - * Returns size in Bytes or -1 if there is no Quota. - */ - public long sizeInBytes() { - switch (this.unit) { - case BYTES: - return this.getSize(); - case MB: - return this.getSize() * OzoneConsts.MB; - case GB: - return this.getSize() * OzoneConsts.GB; - case TB: - return this.getSize() * OzoneConsts.TB; - case UNDEFINED: - default: - return -1; - } - } - - /** - * Returns OzoneQuota corresponding to size in bytes. - * - * @param sizeInBytes size in bytes to be converted - * - * @return OzoneQuota object - */ - public static OzoneQuota getOzoneQuota(long sizeInBytes) { - long size; - Units unit; - if (sizeInBytes % OzoneConsts.TB == 0) { - size = sizeInBytes / OzoneConsts.TB; - unit = Units.TB; - } else if (sizeInBytes % OzoneConsts.GB == 0) { - size = sizeInBytes / OzoneConsts.GB; - unit = Units.GB; - } else if (sizeInBytes % OzoneConsts.MB == 0) { - size = sizeInBytes / OzoneConsts.MB; - unit = Units.MB; - } else { - size = sizeInBytes; - unit = Units.BYTES; - } - return new OzoneQuota((int)size, unit); - } - - @Override - public String toString() { - return size + " " + unit; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java deleted file mode 100644 index 044bd6f8334..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.client; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -/** - * The replication factor to be used while writing key into ozone. - */ -public enum ReplicationFactor { - ONE(1), - THREE(3); - - /** - * Integer representation of replication. - */ - private int value; - - /** - * Initializes ReplicationFactor with value. - * @param value replication value - */ - ReplicationFactor(int value) { - this.value = value; - } - - /** - * Returns enum value corresponding to the int value. - * @param value replication value - * @return ReplicationFactor - */ - public static ReplicationFactor valueOf(int value) { - if(value == 1) { - return ONE; - } - if (value == 3) { - return THREE; - } - throw new IllegalArgumentException("Unsupported value: " + value); - } - - public static ReplicationFactor fromProto( - HddsProtos.ReplicationFactor replicationFactor) { - if (replicationFactor == null) { - return null; - } - switch (replicationFactor) { - case ONE: - return ReplicationFactor.ONE; - case THREE: - return ReplicationFactor.THREE; - default: - throw new IllegalArgumentException( - "Unsupported ProtoBuf replication factor: " + replicationFactor); - } - } - - /** - * Returns integer representation of ReplicationFactor. - * @return replication value - */ - public int getValue() { - return value; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java deleted file mode 100644 index c63896e9e1d..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.client; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -/** - * The replication type to be used while writing key into ozone. - */ -public enum ReplicationType { - RATIS, - STAND_ALONE, - CHAINED; - - public static ReplicationType fromProto( - HddsProtos.ReplicationType replicationType) { - if (replicationType == null) { - return null; - } - switch (replicationType) { - case RATIS: - return ReplicationType.RATIS; - case STAND_ALONE: - return ReplicationType.STAND_ALONE; - case CHAINED: - return ReplicationType.CHAINED; - default: - throw new IllegalArgumentException( - "Unsupported ProtoBuf replication type: " + replicationType); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java deleted file mode 100644 index e81f134b259..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.client; - -/** - * Base property types for HDDS containers and replications. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java deleted file mode 100644 index 8beac1663b2..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java +++ /dev/null @@ -1,190 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import com.google.gson.Gson; -import java.io.IOException; -import java.io.Writer; - -import java.util.HashMap; -import java.util.Map; -import java.util.Properties; -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.ws.rs.core.HttpHeaders; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.http.HttpServer2; - -import com.google.common.annotations.VisibleForTesting; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TAGS_SYSTEM_KEY; - -/** - * A servlet to print out the running configuration data. - */ -@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) -@InterfaceStability.Unstable -public class HddsConfServlet extends HttpServlet { - - private static final long serialVersionUID = 1L; - - protected static final String FORMAT_JSON = "json"; - protected static final String FORMAT_XML = "xml"; - private static final String COMMAND = "cmd"; - private static final OzoneConfiguration OZONE_CONFIG = - new OzoneConfiguration(); - private static final transient Logger LOG = - LoggerFactory.getLogger(HddsConfServlet.class); - - - /** - * Return the Configuration of the daemon hosting this servlet. - * This is populated when the HttpServer starts. - */ - private Configuration getConfFromContext() { - Configuration conf = (Configuration) getServletContext().getAttribute( - HttpServer2.CONF_CONTEXT_ATTRIBUTE); - assert conf != null; - return conf; - } - - @Override - public void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - - if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(), - request, response)) { - return; - } - - String format = parseAcceptHeader(request); - if (FORMAT_XML.equals(format)) { - response.setContentType("text/xml; charset=utf-8"); - } else if (FORMAT_JSON.equals(format)) { - response.setContentType("application/json; charset=utf-8"); - } - - String name = request.getParameter("name"); - Writer out = response.getWriter(); - String cmd = request.getParameter(COMMAND); - - processCommand(cmd, format, request, response, out, name); - out.close(); - } - - private void processCommand(String cmd, String format, - HttpServletRequest request, HttpServletResponse response, Writer out, - String name) - throws IOException { - try { - if (cmd == null) { - if (FORMAT_XML.equals(format)) { - response.setContentType("text/xml; charset=utf-8"); - } else if (FORMAT_JSON.equals(format)) { - response.setContentType("application/json; charset=utf-8"); - } - - writeResponse(getConfFromContext(), out, format, name); - } else { - processConfigTagRequest(request, out); - } - } catch (BadFormatException bfe) { - response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage()); - } catch (IllegalArgumentException iae) { - response.sendError(HttpServletResponse.SC_NOT_FOUND, iae.getMessage()); - } - } - - @VisibleForTesting - static String parseAcceptHeader(HttpServletRequest request) { - String format = request.getHeader(HttpHeaders.ACCEPT); - return format != null && format.contains(FORMAT_JSON) ? - FORMAT_JSON : FORMAT_XML; - } - - /** - * Guts of the servlet - extracted for easy testing. - */ - static void writeResponse(Configuration conf, - Writer out, String format, String propertyName) - throws IOException, IllegalArgumentException, BadFormatException { - if (FORMAT_JSON.equals(format)) { - Configuration.dumpConfiguration(conf, propertyName, out); - } else if (FORMAT_XML.equals(format)) { - conf.writeXml(propertyName, out); - } else { - throw new BadFormatException("Bad format: " + format); - } - } - - /** - * Exception for signal bad content type. - */ - public static class BadFormatException extends Exception { - - private static final long serialVersionUID = 1L; - - public BadFormatException(String msg) { - super(msg); - } - } - - private void processConfigTagRequest(HttpServletRequest request, - Writer out) throws IOException { - String cmd = request.getParameter(COMMAND); - Gson gson = new Gson(); - Configuration config = getOzoneConfig(); - - switch (cmd) { - case "getOzoneTags": - out.write(gson.toJson(config.get(OZONE_TAGS_SYSTEM_KEY) - .split(","))); - break; - case "getPropertyByTag": - String tags = request.getParameter("tags"); - Map propMap = new HashMap<>(); - - for (String tag : tags.split(",")) { - if (config.isPropertyTag(tag)) { - Properties properties = config.getAllPropertiesByTag(tag); - propMap.put(tag, properties); - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("Not a valid tag" + tag); - } - } - } - out.write(gson.toJsonTree(propMap).toString()); - break; - default: - throw new IllegalArgumentException(cmd + " is not a valid command."); - } - - } - - private static Configuration getOzoneConfig() { - return OZONE_CONFIG; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java deleted file mode 100644 index c0486335cdd..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ /dev/null @@ -1,328 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.conf; - -import javax.xml.bind.JAXBContext; -import javax.xml.bind.JAXBException; -import javax.xml.bind.Unmarshaller; -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlRootElement; -import java.io.IOException; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.net.URL; -import java.util.ArrayList; -import java.util.Enumeration; -import java.util.List; -import java.util.Properties; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; - -/** - * Configuration for ozone. - */ -@InterfaceAudience.Private -public class OzoneConfiguration extends Configuration { - static { - activate(); - } - - public static OzoneConfiguration of(Configuration conf) { - Preconditions.checkNotNull(conf); - - return conf instanceof OzoneConfiguration - ? (OzoneConfiguration) conf - : new OzoneConfiguration(conf); - } - - public OzoneConfiguration() { - OzoneConfiguration.activate(); - loadDefaults(); - } - - public OzoneConfiguration(Configuration conf) { - super(conf); - //load the configuration from the classloader of the original conf. - setClassLoader(conf.getClassLoader()); - if (!(conf instanceof OzoneConfiguration)) { - loadDefaults(); - } - } - - private void loadDefaults() { - try { - //there could be multiple ozone-default-generated.xml files on the - // classpath, which are generated by the annotation processor. - // Here we add all of them to the list of the available configuration. - Enumeration generatedDefaults = - OzoneConfiguration.class.getClassLoader().getResources( - "ozone-default-generated.xml"); - while (generatedDefaults.hasMoreElements()) { - addResource(generatedDefaults.nextElement()); - } - } catch (IOException e) { - e.printStackTrace(); - } - addResource("ozone-site.xml"); - } - - public List readPropertyFromXml(URL url) throws JAXBException { - JAXBContext context = JAXBContext.newInstance(XMLConfiguration.class); - Unmarshaller um = context.createUnmarshaller(); - - XMLConfiguration config = (XMLConfiguration) um.unmarshal(url); - return config.getProperties(); - } - - /** - * Create a Configuration object and inject the required configuration values. - * - * @param configurationClass The class where the fields are annotated with - * the configuration. - * @return Initiated java object where the config fields are injected. - */ - public T getObject(Class configurationClass) { - - T configuration; - - try { - configuration = configurationClass.newInstance(); - } catch (InstantiationException | IllegalAccessException e) { - throw new ConfigurationException( - "Configuration class can't be created: " + configurationClass, e); - } - ConfigGroup configGroup = - configurationClass.getAnnotation(ConfigGroup.class); - String prefix = configGroup.prefix(); - - for (Method setterMethod : configurationClass.getMethods()) { - if (setterMethod.isAnnotationPresent(Config.class)) { - - String methodLocation = - configurationClass + "." + setterMethod.getName(); - - Config configAnnotation = setterMethod.getAnnotation(Config.class); - - String key = prefix + "." + configAnnotation.key(); - - Class[] parameterTypes = setterMethod.getParameterTypes(); - if (parameterTypes.length != 1) { - throw new ConfigurationException( - "@Config annotation should be used on simple setter: " - + methodLocation); - } - - ConfigType type = configAnnotation.type(); - - if (type == ConfigType.AUTO) { - type = detectConfigType(parameterTypes[0], methodLocation); - } - - //Note: default value is handled by ozone-default.xml. Here we can - //use any default. - try { - switch (type) { - case STRING: - setterMethod.invoke(configuration, get(key)); - break; - case INT: - setterMethod.invoke(configuration, - getInt(key, 0)); - break; - case BOOLEAN: - setterMethod.invoke(configuration, - getBoolean(key, false)); - break; - case LONG: - setterMethod.invoke(configuration, - getLong(key, 0)); - break; - case TIME: - setterMethod.invoke(configuration, - getTimeDuration(key, 0, configAnnotation.timeUnit())); - break; - default: - throw new ConfigurationException( - "Unsupported ConfigType " + type + " on " + methodLocation); - } - } catch (InvocationTargetException | IllegalAccessException e) { - throw new ConfigurationException( - "Can't inject configuration to " + methodLocation, e); - } - - } - } - return configuration; - - } - - private ConfigType detectConfigType(Class parameterType, - String methodLocation) { - ConfigType type; - if (parameterType == String.class) { - type = ConfigType.STRING; - } else if (parameterType == Integer.class || parameterType == int.class) { - type = ConfigType.INT; - } else if (parameterType == Long.class || parameterType == long.class) { - type = ConfigType.LONG; - } else if (parameterType == Boolean.class - || parameterType == boolean.class) { - type = ConfigType.BOOLEAN; - } else { - throw new ConfigurationException( - "Unsupported configuration type " + parameterType + " in " - + methodLocation); - } - return type; - } - - /** - * Class to marshall/un-marshall configuration from xml files. - */ - @XmlAccessorType(XmlAccessType.FIELD) - @XmlRootElement(name = "configuration") - public static class XMLConfiguration { - - @XmlElement(name = "property", type = Property.class) - private List properties = new ArrayList<>(); - - public XMLConfiguration() { - } - - public XMLConfiguration(List properties) { - this.properties = properties; - } - - public List getProperties() { - return properties; - } - - public void setProperties(List properties) { - this.properties = properties; - } - } - - /** - * Class to marshall/un-marshall configuration properties from xml files. - */ - @XmlAccessorType(XmlAccessType.FIELD) - @XmlRootElement(name = "property") - public static class Property implements Comparable { - - private String name; - private String value; - private String tag; - private String description; - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getValue() { - return value; - } - - public void setValue(String value) { - this.value = value; - } - - public String getTag() { - return tag; - } - - public void setTag(String tag) { - this.tag = tag; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - @Override - public int compareTo(Property o) { - if (this == o) { - return 0; - } - return this.getName().compareTo(o.getName()); - } - - @Override - public String toString() { - return this.getName() + " " + this.getValue() + " " + this.getTag(); - } - - @Override - public int hashCode() { - return this.getName().hashCode(); - } - - @Override - public boolean equals(Object obj) { - return (obj instanceof Property) && (((Property) obj).getName()) - .equals(this.getName()); - } - } - - public static void activate() { - // adds the default resources - Configuration.addDefaultResource("hdfs-default.xml"); - Configuration.addDefaultResource("hdfs-site.xml"); - Configuration.addDefaultResource("ozone-default.xml"); - } - - /** - * The super class method getAllPropertiesByTag - * does not override values of properties - * if there is no tag present in the configs of - * newly added resources. - * - * @param tag - * @return Properties that belong to the tag - */ - @Override - public Properties getAllPropertiesByTag(String tag) { - // Call getProps first to load the newly added resources - // before calling super.getAllPropertiesByTag - Properties updatedProps = getProps(); - Properties propertiesByTag = super.getAllPropertiesByTag(tag); - Properties props = new Properties(); - Enumeration properties = propertiesByTag.propertyNames(); - while (properties.hasMoreElements()) { - Object propertyName = properties.nextElement(); - // get the current value of the property - Object value = updatedProps.getProperty(propertyName.toString()); - if (value != null) { - props.put(propertyName, value); - } - } - return props; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java deleted file mode 100644 index 948057ebba7..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java deleted file mode 100644 index b9d7bceb48f..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/FunctionWithServiceException.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.function; - -import com.google.protobuf.ServiceException; - -/** - * Functional interface like java.util.function.Function but with - * checked exception. - */ -@FunctionalInterface -public interface FunctionWithServiceException { - - /** - * Applies this function to the given argument. - * - * @param t the function argument - * @return the function result - */ - R apply(T t) throws ServiceException; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java deleted file mode 100644 index 915fe3557e2..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/function/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Functional interfaces for ozone, similar to java.util.function. - */ -package org.apache.hadoop.hdds.function; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java deleted file mode 100644 index f8894e6a7e8..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds; - -/** - * Generic HDDS specific configurator and helper classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java deleted file mode 100644 index 698a443fc6b..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ /dev/null @@ -1,493 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.protocol; - -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.net.NetConstants; -import org.apache.hadoop.hdds.scm.net.NodeImpl; - -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -/** - * DatanodeDetails class contains details about DataNode like: - * - UUID of the DataNode. - * - IP and Hostname details. - * - Port details to which the DataNode will be listening. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class DatanodeDetails extends NodeImpl implements - Comparable { -/** - * DataNode's unique identifier in the cluster. - */ - private final UUID uuid; - - private String ipAddress; - private String hostName; - private List ports; - private String certSerialId; - - /** - * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used - * for instantiating DatanodeDetails. - * @param uuid DataNode's UUID - * @param ipAddress IP Address of this DataNode - * @param hostName DataNode's hostname - * @param networkLocation DataNode's network location path - * @param ports Ports used by the DataNode - * @param certSerialId serial id from SCM issued certificate. - */ - private DatanodeDetails(String uuid, String ipAddress, String hostName, - String networkLocation, List ports, String certSerialId) { - super(hostName, networkLocation, NetConstants.NODE_COST_DEFAULT); - this.uuid = UUID.fromString(uuid); - this.ipAddress = ipAddress; - this.hostName = hostName; - this.ports = ports; - this.certSerialId = certSerialId; - } - - protected DatanodeDetails(DatanodeDetails datanodeDetails) { - super(datanodeDetails.getHostName(), datanodeDetails.getNetworkLocation(), - datanodeDetails.getCost()); - this.uuid = datanodeDetails.uuid; - this.ipAddress = datanodeDetails.ipAddress; - this.hostName = datanodeDetails.hostName; - this.ports = datanodeDetails.ports; - this.setNetworkName(datanodeDetails.getNetworkName()); - } - - /** - * Returns the DataNode UUID. - * - * @return UUID of DataNode - */ - public UUID getUuid() { - return uuid; - } - - /** - * Returns the string representation of DataNode UUID. - * - * @return UUID of DataNode - */ - public String getUuidString() { - return uuid.toString(); - } - - /** - * Sets the IP address of Datanode. - * - * @param ip IP Address - */ - public void setIpAddress(String ip) { - this.ipAddress = ip; - } - - /** - * Returns IP address of DataNode. - * - * @return IP address - */ - public String getIpAddress() { - return ipAddress; - } - - /** - * Sets the Datanode hostname. - * - * @param host hostname - */ - public void setHostName(String host) { - this.hostName = host; - } - - /** - * Returns Hostname of DataNode. - * - * @return Hostname - */ - public String getHostName() { - return hostName; - } - - /** - * Sets a DataNode Port. - * - * @param port DataNode port - */ - public void setPort(Port port) { - // If the port is already in the list remove it first and add the - // new/updated port value. - ports.remove(port); - ports.add(port); - } - - /** - * Returns all the Ports used by DataNode. - * - * @return DataNode Ports - */ - public List getPorts() { - return ports; - } - - /** - * Given the name returns port number, null if the asked port is not found. - * - * @param name Name of the port - * - * @return Port - */ - public Port getPort(Port.Name name) { - for (Port port : ports) { - if (port.getName().equals(name)) { - return port; - } - } - return null; - } - - /** - * Returns a DatanodeDetails from the protocol buffers. - * - * @param datanodeDetailsProto - protoBuf Message - * @return DatanodeDetails - */ - public static DatanodeDetails getFromProtoBuf( - HddsProtos.DatanodeDetailsProto datanodeDetailsProto) { - DatanodeDetails.Builder builder = newBuilder(); - builder.setUuid(datanodeDetailsProto.getUuid()); - if (datanodeDetailsProto.hasIpAddress()) { - builder.setIpAddress(datanodeDetailsProto.getIpAddress()); - } - if (datanodeDetailsProto.hasHostName()) { - builder.setHostName(datanodeDetailsProto.getHostName()); - } - if (datanodeDetailsProto.hasCertSerialId()) { - builder.setCertSerialId(datanodeDetailsProto.getCertSerialId()); - } - for (HddsProtos.Port port : datanodeDetailsProto.getPortsList()) { - builder.addPort(newPort( - Port.Name.valueOf(port.getName().toUpperCase()), port.getValue())); - } - if (datanodeDetailsProto.hasNetworkName()) { - builder.setNetworkName(datanodeDetailsProto.getNetworkName()); - } - if (datanodeDetailsProto.hasNetworkLocation()) { - builder.setNetworkLocation(datanodeDetailsProto.getNetworkLocation()); - } - return builder.build(); - } - - /** - * Returns a DatanodeDetails protobuf message from a datanode ID. - * @return HddsProtos.DatanodeDetailsProto - */ - public HddsProtos.DatanodeDetailsProto getProtoBufMessage() { - HddsProtos.DatanodeDetailsProto.Builder builder = - HddsProtos.DatanodeDetailsProto.newBuilder() - .setUuid(getUuidString()); - if (ipAddress != null) { - builder.setIpAddress(ipAddress); - } - if (hostName != null) { - builder.setHostName(hostName); - } - if (certSerialId != null) { - builder.setCertSerialId(certSerialId); - } - if (!Strings.isNullOrEmpty(getNetworkName())) { - builder.setNetworkName(getNetworkName()); - } - if (!Strings.isNullOrEmpty(getNetworkLocation())) { - builder.setNetworkLocation(getNetworkLocation()); - } - - for (Port port : ports) { - builder.addPorts(HddsProtos.Port.newBuilder() - .setName(port.getName().toString()) - .setValue(port.getValue()) - .build()); - } - return builder.build(); - } - - @Override - public String toString() { - return uuid.toString() + "{" + - "ip: " + - ipAddress + - ", host: " + - hostName + - ", networkLocation: " + - getNetworkLocation() + - ", certSerialId: " + certSerialId + - "}"; - } - - @Override - public int compareTo(DatanodeDetails that) { - return this.getUuid().compareTo(that.getUuid()); - } - - @Override - public boolean equals(Object obj) { - return obj instanceof DatanodeDetails && - uuid.equals(((DatanodeDetails) obj).uuid); - } - - @Override - public int hashCode() { - return uuid.hashCode(); - } - - /** - * Returns DatanodeDetails.Builder instance. - * - * @return DatanodeDetails.Builder - */ - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Builder class for building DatanodeDetails. - */ - public static final class Builder { - private String id; - private String ipAddress; - private String hostName; - private String networkName; - private String networkLocation; - private List ports; - private String certSerialId; - - /** - * Default private constructor. To create Builder instance use - * DatanodeDetails#newBuilder. - */ - private Builder() { - ports = new ArrayList<>(); - } - - /** - * Sets the DatanodeUuid. - * - * @param uuid DatanodeUuid - * @return DatanodeDetails.Builder - */ - public Builder setUuid(String uuid) { - this.id = uuid; - return this; - } - - /** - * Sets the IP address of DataNode. - * - * @param ip address - * @return DatanodeDetails.Builder - */ - public Builder setIpAddress(String ip) { - this.ipAddress = ip; - return this; - } - - /** - * Sets the hostname of DataNode. - * - * @param host hostname - * @return DatanodeDetails.Builder - */ - public Builder setHostName(String host) { - this.hostName = host; - return this; - } - - /** - * Sets the network name of DataNode. - * - * @param name network name - * @return DatanodeDetails.Builder - */ - public Builder setNetworkName(String name) { - this.networkName = name; - return this; - } - - /** - * Sets the network location of DataNode. - * - * @param loc location - * @return DatanodeDetails.Builder - */ - public Builder setNetworkLocation(String loc) { - this.networkLocation = loc; - return this; - } - - /** - * Adds a DataNode Port. - * - * @param port DataNode port - * - * @return DatanodeDetails.Builder - */ - public Builder addPort(Port port) { - this.ports.add(port); - return this; - } - - /** - * Adds certificate serial id. - * - * @param certId Serial id of SCM issued certificate. - * - * @return DatanodeDetails.Builder - */ - public Builder setCertSerialId(String certId) { - this.certSerialId = certId; - return this; - } - - /** - * Builds and returns DatanodeDetails instance. - * - * @return DatanodeDetails - */ - public DatanodeDetails build() { - Preconditions.checkNotNull(id); - if (networkLocation == null) { - networkLocation = NetConstants.DEFAULT_RACK; - } - DatanodeDetails dn = new DatanodeDetails(id, ipAddress, hostName, - networkLocation, ports, certSerialId); - if (networkName != null) { - dn.setNetworkName(networkName); - } - return dn; - } - } - - /** - * Constructs a new Port with name and value. - * - * @param name Name of the port - * @param value Port number - * - * @return {@code Port} instance - */ - public static Port newPort(Port.Name name, Integer value) { - return new Port(name, value); - } - - /** - * Container to hold DataNode Port details. - */ - public static final class Port { - - /** - * Ports that are supported in DataNode. - */ - public enum Name { - STANDALONE, RATIS, REST - } - - private Name name; - private Integer value; - - /** - * Private constructor for constructing Port object. Use - * DatanodeDetails#newPort to create a new Port object. - * - * @param name - * @param value - */ - private Port(Name name, Integer value) { - this.name = name; - this.value = value; - } - - /** - * Returns the name of the port. - * - * @return Port name - */ - public Name getName() { - return name; - } - - /** - * Returns the port number. - * - * @return Port number - */ - public Integer getValue() { - return value; - } - - @Override - public int hashCode() { - return name.hashCode(); - } - - /** - * Ports are considered equal if they have the same name. - * - * @param anObject - * The object to compare this {@code Port} against - * @return {@code true} if the given object represents a {@code Port} - and has the same name, {@code false} otherwise - */ - @Override - public boolean equals(Object anObject) { - if (this == anObject) { - return true; - } - if (anObject instanceof Port) { - return name.equals(((Port) anObject).name); - } - return false; - } - } - - /** - * Returns serial id of SCM issued certificate. - * - * @return certificate serial id - */ - public String getCertSerialId() { - return certSerialId; - } - - /** - * Set certificate serial id of SCM issued certificate. - * - */ - public void setCertSerialId(String certSerialId) { - this.certSerialId = certSerialId; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java deleted file mode 100644 index 4036cb17b84..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.protocol; - -import java.io.IOException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.security.KerberosInfo; - -/** - * The protocol used to perform security related operations with SCM. - */ -@KerberosInfo( - serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -@InterfaceAudience.Private -public interface SCMSecurityProtocol { - - @SuppressWarnings("checkstyle:ConstantName") - /** - * Version 1: Initial version. - */ - long versionID = 1L; - - /** - * Get SCM signed certificate for DataNode. - * - * @param dataNodeDetails - DataNode Details. - * @param certSignReq - Certificate signing request. - * @return byte[] - SCM signed certificate. - */ - String getDataNodeCertificate( - DatanodeDetailsProto dataNodeDetails, - String certSignReq) throws IOException; - - /** - * Get SCM signed certificate for OM. - * - * @param omDetails - DataNode Details. - * @param certSignReq - Certificate signing request. - * @return String - pem encoded SCM signed - * certificate. - */ - String getOMCertificate(OzoneManagerDetailsProto omDetails, - String certSignReq) throws IOException; - - /** - * Get SCM signed certificate for given certificate serial id if it exists. - * Throws exception if it's not found. - * - * @param certSerialId - Certificate serial id. - * @return String - pem encoded SCM signed - * certificate with given cert id if it - * exists. - */ - String getCertificate(String certSerialId) throws IOException; - - /** - * Get CA certificate. - * - * @return String - pem encoded CA certificate. - */ - String getCACertificate() throws IOException; - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java deleted file mode 100644 index 7dae0fce02c..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains HDDS protocol related classes. - */ -package org.apache.hadoop.hdds.protocol; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java deleted file mode 100644 index efe79a76f31..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java +++ /dev/null @@ -1,213 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.protocolPB; - -import java.io.Closeable; -import java.io.IOException; -import java.util.function.Consumer; - -import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCACertificateRequestProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest.Builder; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityResponse; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.Type; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtocolTranslator; -import org.apache.hadoop.ipc.RPC; - -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -import static org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetOMCertRequestProto; - -/** - * This class is the client-side translator that forwards requests for - * {@link SCMSecurityProtocol} to the {@link SCMSecurityProtocolPB} proxy. - */ -public class SCMSecurityProtocolClientSideTranslatorPB implements - SCMSecurityProtocol, ProtocolTranslator, Closeable { - - /** - * RpcController is not used and hence is set to null. - */ - private static final RpcController NULL_RPC_CONTROLLER = null; - private final SCMSecurityProtocolPB rpcProxy; - - public SCMSecurityProtocolClientSideTranslatorPB( - SCMSecurityProtocolPB rpcProxy) { - this.rpcProxy = rpcProxy; - } - - /** - * Helper method to wrap the request and send the message. - */ - private SCMSecurityResponse submitRequest( - SCMSecurityProtocolProtos.Type type, - Consumer builderConsumer) throws IOException { - final SCMSecurityResponse response; - try { - - Builder builder = SCMSecurityRequest.newBuilder() - .setCmdType(type) - .setTraceID(TracingUtil.exportCurrentSpan()); - builderConsumer.accept(builder); - SCMSecurityRequest wrapper = builder.build(); - - response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper); - } catch (ServiceException ex) { - throw ProtobufHelper.getRemoteException(ex); - } - return response; - } - - /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - * - *

As noted in {@link AutoCloseable#close()}, cases where the - * close may fail require careful attention. It is strongly advised - * to relinquish the underlying resources and to internally - * mark the {@code Closeable} as closed, prior to throwing - * the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - RPC.stopProxy(rpcProxy); - } - - /** - * Get SCM signed certificate for DataNode. - * - * @param dataNodeDetails - DataNode Details. - * @param certSignReq - Certificate signing request. - * @return byte[] - SCM signed certificate. - */ - @Override - public String getDataNodeCertificate(DatanodeDetailsProto dataNodeDetails, - String certSignReq) throws IOException { - return getDataNodeCertificateChain(dataNodeDetails, certSignReq) - .getX509Certificate(); - } - - /** - * Get SCM signed certificate for OM. - * - * @param omDetails - OzoneManager Details. - * @param certSignReq - Certificate signing request. - * @return byte[] - SCM signed certificate. - */ - @Override - public String getOMCertificate(OzoneManagerDetailsProto omDetails, - String certSignReq) throws IOException { - return getOMCertChain(omDetails, certSignReq).getX509Certificate(); - } - - /** - * Get SCM signed certificate for OM. - * - * @param omDetails - OzoneManager Details. - * @param certSignReq - Certificate signing request. - * @return byte[] - SCM signed certificate. - */ - public SCMGetCertResponseProto getOMCertChain( - OzoneManagerDetailsProto omDetails, String certSignReq) - throws IOException { - SCMGetOMCertRequestProto request = SCMGetOMCertRequestProto - .newBuilder() - .setCSR(certSignReq) - .setOmDetails(omDetails) - .build(); - return submitRequest(Type.GetOMCertificate, - builder -> builder.setGetOMCertRequest(request)) - .getGetCertResponseProto(); - } - - /** - * Get SCM signed certificate with given serial id. Throws exception if - * certificate is not found. - * - * @param certSerialId - Certificate serial id. - * @return string - pem encoded certificate. - */ - @Override - public String getCertificate(String certSerialId) throws IOException { - SCMGetCertificateRequestProto request = SCMGetCertificateRequestProto - .newBuilder() - .setCertSerialId(certSerialId) - .build(); - return submitRequest(Type.GetCertificate, - builder -> builder.setGetCertificateRequest(request)) - .getGetCertResponseProto() - .getX509Certificate(); - } - - /** - * Get SCM signed certificate for Datanode. - * - * @param dnDetails - Datanode Details. - * @param certSignReq - Certificate signing request. - * @return byte[] - SCM signed certificate. - */ - public SCMGetCertResponseProto getDataNodeCertificateChain( - DatanodeDetailsProto dnDetails, String certSignReq) - throws IOException { - - SCMGetDataNodeCertRequestProto request = - SCMGetDataNodeCertRequestProto.newBuilder() - .setCSR(certSignReq) - .setDatanodeDetails(dnDetails) - .build(); - return submitRequest(Type.GetDataNodeCertificate, - builder -> builder.setGetDataNodeCertRequest(request)) - .getGetCertResponseProto(); - } - - /** - * Get CA certificate. - * - * @return serial - Root certificate. - */ - @Override - public String getCACertificate() throws IOException { - SCMGetCACertificateRequestProto protoIns = SCMGetCACertificateRequestProto - .getDefaultInstance(); - return submitRequest(Type.GetCACertificate, - builder -> builder.setGetCACertificateRequest(protoIns)) - .getGetCertResponseProto().getX509Certificate(); - - } - - /** - * Return the proxy object underlying this protocol translator. - * - * @return the proxy object underlying this protocol translator. - */ - @Override - public Object getUnderlyingProxyObject() { - return rpcProxy; - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolPB.java deleted file mode 100644 index 41b0332d6d3..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolPB.java +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.protocolPB; - -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityProtocolService; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ipc.ProtocolInfo; -import org.apache.hadoop.security.KerberosInfo; - -/** - * Protocol for security related operations on SCM. - */ - -@ProtocolInfo(protocolName = - "org.apache.hadoop.hdds.protocol.SCMSecurityProtocol", - protocolVersion = 1) -@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -public interface SCMSecurityProtocolPB extends - SCMSecurityProtocolService.BlockingInterface { - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java deleted file mode 100644 index 44960194f07..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocolPB/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.protocolPB; -/** - * This package contains classes for wiring HDDS protobuf calls to rpc. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java deleted file mode 100644 index 07a886a0f9c..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/ContainerCommandRequestMessage.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.ratis; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.ratis.util.JavaUtils; - -import java.util.Objects; -import java.util.function.Supplier; - -/** - * Implementing the {@link Message} interface - * for {@link ContainerCommandRequestProto}. - */ -public final class ContainerCommandRequestMessage implements Message { - public static ContainerCommandRequestMessage toMessage( - ContainerCommandRequestProto request, String traceId) { - final ContainerCommandRequestProto.Builder b - = ContainerCommandRequestProto.newBuilder(request); - if (traceId != null) { - b.setTraceID(traceId); - } - - ByteString data = ByteString.EMPTY; - if (request.getCmdType() == Type.WriteChunk) { - final WriteChunkRequestProto w = request.getWriteChunk(); - data = w.getData(); - b.setWriteChunk(w.toBuilder().clearData()); - } else if (request.getCmdType() == Type.PutSmallFile) { - final PutSmallFileRequestProto p = request.getPutSmallFile(); - data = p.getData(); - b.setPutSmallFile(p.toBuilder().setData(ByteString.EMPTY)); - } - return new ContainerCommandRequestMessage(b.build(), data); - } - - public static ContainerCommandRequestProto toProto( - ByteString bytes, RaftGroupId groupId) - throws InvalidProtocolBufferException { - final int i = 4 + bytes.asReadOnlyByteBuffer().getInt(); - final ContainerCommandRequestProto header - = ContainerCommandRequestProto.parseFrom(bytes.substring(4, i)); - // TODO: setting pipeline id can be avoided if the client is sending it. - // In such case, just have to validate the pipeline id. - final ContainerCommandRequestProto.Builder b = header.toBuilder(); - if (groupId != null) { - b.setPipelineID(groupId.getUuid().toString()); - } - final ByteString data = bytes.substring(i); - if (header.getCmdType() == Type.WriteChunk) { - b.setWriteChunk(b.getWriteChunkBuilder().setData(data)); - } else if (header.getCmdType() == Type.PutSmallFile) { - b.setPutSmallFile(b.getPutSmallFileBuilder().setData(data)); - } - return b.build(); - } - - private final ContainerCommandRequestProto header; - private final ByteString data; - private final Supplier contentSupplier - = JavaUtils.memoize(this::buildContent); - - private ContainerCommandRequestMessage( - ContainerCommandRequestProto header, ByteString data) { - this.header = Objects.requireNonNull(header, "header == null"); - this.data = Objects.requireNonNull(data, "data == null"); - } - - private ByteString buildContent() { - final ByteString headerBytes = header.toByteString(); - return RatisHelper.int2ByteString(headerBytes.size()) - .concat(headerBytes) - .concat(data); - } - - @Override - public ByteString getContent() { - return contentSupplier.get(); - } - - @Override - public String toString() { - return header + ", data.size=" + data.size(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java deleted file mode 100644 index 081b4fb766b..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ /dev/null @@ -1,290 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.ratis; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; - -import org.apache.ratis.RaftConfigKeys; -import org.apache.ratis.client.RaftClient; -import org.apache.ratis.client.RaftClientConfigKeys; -import org.apache.ratis.conf.RaftProperties; -import org.apache.ratis.grpc.GrpcConfigKeys; -import org.apache.ratis.grpc.GrpcFactory; -import org.apache.ratis.grpc.GrpcTlsConfig; -import org.apache.ratis.proto.RaftProtos; -import org.apache.ratis.protocol.RaftGroup; -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.protocol.RaftPeer; -import org.apache.ratis.protocol.RaftPeerId; -import org.apache.ratis.retry.RetryPolicies; -import org.apache.ratis.retry.RetryPolicy; -import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.rpc.SupportedRpcType; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.util.SizeInBytes; -import org.apache.ratis.util.TimeDuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Ratis helper methods. - */ -public interface RatisHelper { - Logger LOG = LoggerFactory.getLogger(RatisHelper.class); - - static String toRaftPeerIdString(DatanodeDetails id) { - return id.getUuidString(); - } - - static UUID toDatanodeId(String peerIdString) { - return UUID.fromString(peerIdString); - } - - static UUID toDatanodeId(RaftPeerId peerId) { - return toDatanodeId(peerId.toString()); - } - - static UUID toDatanodeId(RaftProtos.RaftPeerProto peerId) { - return toDatanodeId(RaftPeerId.valueOf(peerId.getId())); - } - - static String toRaftPeerAddressString(DatanodeDetails id) { - return id.getIpAddress() + ":" + - id.getPort(DatanodeDetails.Port.Name.RATIS).getValue(); - } - - static RaftPeerId toRaftPeerId(DatanodeDetails id) { - return RaftPeerId.valueOf(toRaftPeerIdString(id)); - } - - static RaftPeer toRaftPeer(DatanodeDetails id) { - return new RaftPeer(toRaftPeerId(id), toRaftPeerAddressString(id)); - } - - static List toRaftPeers(Pipeline pipeline) { - return toRaftPeers(pipeline.getNodes()); - } - - static List toRaftPeers( - List datanodes) { - return datanodes.stream().map(RatisHelper::toRaftPeer) - .collect(Collectors.toList()); - } - - /* TODO: use a dummy id for all groups for the moment. - * It should be changed to a unique id for each group. - */ - RaftGroupId DUMMY_GROUP_ID = - RaftGroupId.valueOf(ByteString.copyFromUtf8("AOzoneRatisGroup")); - - RaftGroup EMPTY_GROUP = RaftGroup.valueOf(DUMMY_GROUP_ID, - Collections.emptyList()); - - static RaftGroup emptyRaftGroup() { - return EMPTY_GROUP; - } - - static RaftGroup newRaftGroup(Collection peers) { - return peers.isEmpty()? emptyRaftGroup() - : RaftGroup.valueOf(DUMMY_GROUP_ID, peers); - } - - static RaftGroup newRaftGroup(RaftGroupId groupId, - Collection peers) { - final List newPeers = peers.stream() - .map(RatisHelper::toRaftPeer) - .collect(Collectors.toList()); - return peers.isEmpty() ? RaftGroup.valueOf(groupId, Collections.emptyList()) - : RaftGroup.valueOf(groupId, newPeers); - } - - static RaftGroup newRaftGroup(Pipeline pipeline) { - return RaftGroup.valueOf(RaftGroupId.valueOf(pipeline.getId().getId()), - toRaftPeers(pipeline)); - } - - static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline, - RetryPolicy retryPolicy, int maxOutStandingRequest, - GrpcTlsConfig tlsConfig, TimeDuration timeout) throws IOException { - return newRaftClient(rpcType, toRaftPeerId(pipeline.getFirstNode()), - newRaftGroup(RaftGroupId.valueOf(pipeline.getId().getId()), - pipeline.getNodes()), retryPolicy, maxOutStandingRequest, tlsConfig, - timeout); - } - - static TimeDuration getClientRequestTimeout(Configuration conf) { - // Set the client requestTimeout - final TimeUnit timeUnit = - OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT - .getUnit(); - final long duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY, - OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT - .getDuration(), timeUnit); - final TimeDuration clientRequestTimeout = - TimeDuration.valueOf(duration, timeUnit); - return clientRequestTimeout; - } - - static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader, - RetryPolicy retryPolicy, int maxOutstandingRequests, - GrpcTlsConfig tlsConfig, TimeDuration clientRequestTimeout) { - return newRaftClient(rpcType, leader.getId(), - newRaftGroup(new ArrayList<>(Arrays.asList(leader))), retryPolicy, - maxOutstandingRequests, tlsConfig, clientRequestTimeout); - } - - static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader, - RetryPolicy retryPolicy, int maxOutstandingRequests, - TimeDuration clientRequestTimeout) { - return newRaftClient(rpcType, leader.getId(), - newRaftGroup(new ArrayList<>(Arrays.asList(leader))), retryPolicy, - maxOutstandingRequests, null, clientRequestTimeout); - } - - static RaftClient newRaftClient(RpcType rpcType, RaftPeerId leader, - RaftGroup group, RetryPolicy retryPolicy, int maxOutStandingRequest, - GrpcTlsConfig tlsConfig, TimeDuration clientRequestTimeout) { - if (LOG.isTraceEnabled()) { - LOG.trace("newRaftClient: {}, leader={}, group={}", - rpcType, leader, group); - } - final RaftProperties properties = new RaftProperties(); - RaftConfigKeys.Rpc.setType(properties, rpcType); - RaftClientConfigKeys.Rpc - .setRequestTimeout(properties, clientRequestTimeout); - - GrpcConfigKeys.setMessageSizeMax(properties, - SizeInBytes.valueOf(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE)); - GrpcConfigKeys.OutputStream.setOutstandingAppendsMax(properties, - maxOutStandingRequest); - - RaftClient.Builder builder = RaftClient.newBuilder() - .setRaftGroup(group) - .setLeaderId(leader) - .setProperties(properties) - .setRetryPolicy(retryPolicy); - - // TODO: GRPC TLS only for now, netty/hadoop RPC TLS support later. - if (tlsConfig != null && rpcType == SupportedRpcType.GRPC) { - builder.setParameters(GrpcFactory.newRaftParameters(tlsConfig)); - } - return builder.build(); - } - - // For External gRPC client to server with gRPC TLS. - // No mTLS for external client as SCM CA does not issued certificates for them - static GrpcTlsConfig createTlsClientConfig(SecurityConfig conf, - X509Certificate caCert) { - GrpcTlsConfig tlsConfig = null; - if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) { - tlsConfig = new GrpcTlsConfig(null, null, - caCert, false); - } - return tlsConfig; - } - - // For Internal gRPC client from SCM to DN with gRPC TLS - static GrpcTlsConfig createTlsClientConfigForSCM(SecurityConfig conf, - CertificateServer certificateServer) throws IOException { - if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) { - try { - X509Certificate caCert = - CertificateCodec.getX509Certificate( - certificateServer.getCACertificate()); - return new GrpcTlsConfig(null, null, - caCert, false); - } catch (CertificateException ex) { - throw new SCMSecurityException("Fail to find SCM CA certificate.", ex); - } - } - return null; - } - - // For gRPC server running DN container service with gPRC TLS - // No mTLS as the channel is shared for for external client, which - // does not have SCM CA issued certificates. - // In summary: - // authenticate from server to client is via TLS. - // authenticate from client to server is via block token (or container token). - static GrpcTlsConfig createTlsServerConfigForDN(SecurityConfig conf, - CertificateClient caClient) { - if (conf.isSecurityEnabled() && conf.isGrpcTlsEnabled()) { - return new GrpcTlsConfig( - caClient.getPrivateKey(), caClient.getCertificate(), - null, false); - } - return null; - } - - static RetryPolicy createRetryPolicy(Configuration conf) { - int maxRetryCount = - conf.getInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, - OzoneConfigKeys. - DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT); - long retryInterval = conf.getTimeDuration(OzoneConfigKeys. - DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY, OzoneConfigKeys. - DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT - .toIntExact(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS); - TimeDuration sleepDuration = - TimeDuration.valueOf(retryInterval, TimeUnit.MILLISECONDS); - RetryPolicy retryPolicy = RetryPolicies - .retryUpToMaximumCountWithFixedSleep(maxRetryCount, sleepDuration); - return retryPolicy; - } - - static Long getMinReplicatedIndex( - Collection commitInfos) { - return commitInfos.stream().map(RaftProtos.CommitInfoProto::getCommitIndex) - .min(Long::compareTo).orElse(null); - } - - static ByteString int2ByteString(int n) { - final ByteString.Output out = ByteString.newOutput(); - try(DataOutputStream dataOut = new DataOutputStream(out)) { - dataOut.writeInt(n); - } catch (IOException e) { - throw new IllegalStateException( - "Failed to write integer n = " + n + " to a ByteString.", e); - } - return out.toByteString(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java deleted file mode 100644 index e52dc7ffc70..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.ratis; - -/** - * This package contains classes related to Apache Ratis. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java deleted file mode 100644 index 4608df76122..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; - -import java.nio.ByteBuffer; -import java.util.function.Function; - -/** - * Helper class to create a conversion function from ByteBuffer to ByteString - * based on the property - * {@link OzoneConfigKeys#OZONE_UNSAFEBYTEOPERATIONS_ENABLED} in the - * Ozone configuration. - */ -public final class ByteStringConversion { - private ByteStringConversion(){} // no instantiation. - - /** - * Creates the conversion function to be used to convert ByteBuffers to - * ByteString instances to be used in protobuf messages. - * - * @param config the Ozone configuration - * @return the conversion function defined by - * {@link OzoneConfigKeys#OZONE_UNSAFEBYTEOPERATIONS_ENABLED} - * @see

ByteBuffer
- */ - public static Function createByteBufferConversion( - Configuration config){ - boolean unsafeEnabled = - config!=null && config.getBoolean( - OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED, - OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT); - if (unsafeEnabled) { - return buffer -> UnsafeByteOperations.unsafeWrap(buffer); - } else { - return buffer -> { - ByteString retval = ByteString.copyFrom(buffer); - buffer.flip(); - return retval; - }; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java deleted file mode 100644 index 161780668ab..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ /dev/null @@ -1,375 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.ratis.proto.RaftProtos.ReplicationLevel; -import org.apache.ratis.util.TimeDuration; - -import java.util.concurrent.TimeUnit; - -/** - * This class contains constants for configuration keys used in SCM. - */ -@InterfaceAudience.Public -@InterfaceStability.Unstable -public final class ScmConfigKeys { - - // Location of SCM DB files. For now we just support a single - // metadata dir but in future we may support multiple for redundancy or - // performance. - public static final String OZONE_SCM_DB_DIRS = "ozone.scm.db.dirs"; - - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = "dfs.container.ratis.enabled"; - public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT - = false; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = "dfs.container.ratis.rpc.type"; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT - = "GRPC"; - public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY - = "dfs.container.ratis.num.write.chunk.threads"; - public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT - = 60; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = "dfs.container.ratis.replication.level"; - public static final ReplicationLevel - DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = "dfs.container.ratis.num.container.op.executors"; - public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT - = 10; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = - "dfs.container.ratis.segment.size"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = - "1MB"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = - "dfs.container.ratis.segment.preallocated.size"; - public static final String - DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "16KB"; - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - "dfs.container.ratis.statemachinedata.sync.timeout"; - public static final TimeDuration - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = - TimeDuration.valueOf(10, TimeUnit.SECONDS); - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = - "dfs.container.ratis.statemachinedata.sync.retries"; - public static final int - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT = -1; - public static final String - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS = - "dfs.container.ratis.statemachine.max.pending.apply-transactions"; - // The default value of maximum number of pending state machine apply - // transactions is kept same as default snapshot threshold. - public static final int - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT = - 100000; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = - "dfs.container.ratis.log.queue.num-elements"; - public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = - 1024; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = - "dfs.container.ratis.log.queue.byte-limit"; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = - "4GB"; - public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = - "dfs.container.ratis.log.appender.queue.num-elements"; - public static final int - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1; - public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = - "dfs.container.ratis.log.appender.queue.byte-limit"; - public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; - public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = - "dfs.container.ratis.log.purge.gap"; - // TODO: Set to 1024 once RATIS issue around purge is fixed. - public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = - 1000000; - - public static final String DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS = - "dfs.container.ratis.leader.num.pending.requests"; - public static final int - DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT = 4096; - // expiry interval stateMachineData cache entry inside containerStateMachine - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL = - "dfs.container.ratis.statemachine.cache.expiry.interval"; - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT = - "10s"; - public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY = - "dfs.ratis.client.request.timeout.duration"; - public static final TimeDuration - DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT = - TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS); - public static final String DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY = - "dfs.ratis.client.request.max.retries"; - public static final int DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT = 180; - public static final String DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY = - "dfs.ratis.client.request.retry.interval"; - public static final TimeDuration - DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT = - TimeDuration.valueOf(1000, TimeUnit.MILLISECONDS); - public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = - "dfs.ratis.server.retry-cache.timeout.duration"; - public static final TimeDuration - DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = - TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS); - public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY = - "dfs.ratis.server.request.timeout.duration"; - public static final TimeDuration - DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT = - TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS); - public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - "dfs.ratis.leader.election.minimum.timeout.duration"; - public static final TimeDuration - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = - TimeDuration.valueOf(5, TimeUnit.SECONDS); - - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - "dfs.ratis.snapshot.threshold"; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; - - public static final String DFS_RATIS_SERVER_FAILURE_DURATION_KEY = - "dfs.ratis.server.failure.duration"; - public static final TimeDuration - DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT = - TimeDuration.valueOf(120, TimeUnit.SECONDS); - - // TODO : this is copied from OzoneConsts, may need to move to a better place - public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; - // 16 MB by default - public static final String OZONE_SCM_CHUNK_SIZE_DEFAULT = "16MB"; - - public static final String OZONE_SCM_CLIENT_PORT_KEY = - "ozone.scm.client.port"; - public static final int OZONE_SCM_CLIENT_PORT_DEFAULT = 9860; - - public static final String OZONE_SCM_DATANODE_PORT_KEY = - "ozone.scm.datanode.port"; - public static final int OZONE_SCM_DATANODE_PORT_DEFAULT = 9861; - - // OZONE_OM_PORT_DEFAULT = 9862 - public static final String OZONE_SCM_BLOCK_CLIENT_PORT_KEY = - "ozone.scm.block.client.port"; - public static final int OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT = 9863; - - public static final String OZONE_SCM_SECURITY_SERVICE_PORT_KEY = - "ozone.scm.security.service.port"; - public static final int OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT = 9961; - - // Container service client - public static final String OZONE_SCM_CLIENT_ADDRESS_KEY = - "ozone.scm.client.address"; - public static final String OZONE_SCM_CLIENT_BIND_HOST_KEY = - "ozone.scm.client.bind.host"; - public static final String OZONE_SCM_CLIENT_BIND_HOST_DEFAULT = - "0.0.0.0"; - - // Block service client - public static final String OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY = - "ozone.scm.block.client.address"; - public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY = - "ozone.scm.block.client.bind.host"; - public static final String OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT = - "0.0.0.0"; - - // SCM Security service address. - public static final String OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY = - "ozone.scm.security.service.address"; - public static final String OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY = - "ozone.scm.security.service.bind.host"; - public static final String OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT = - "0.0.0.0"; - - public static final String OZONE_SCM_DATANODE_ADDRESS_KEY = - "ozone.scm.datanode.address"; - public static final String OZONE_SCM_DATANODE_BIND_HOST_KEY = - "ozone.scm.datanode.bind.host"; - public static final String OZONE_SCM_DATANODE_BIND_HOST_DEFAULT = - "0.0.0.0"; - - public static final String OZONE_SCM_HTTP_ENABLED_KEY = - "ozone.scm.http.enabled"; - public static final String OZONE_SCM_HTTP_BIND_HOST_KEY = - "ozone.scm.http-bind-host"; - public static final String OZONE_SCM_HTTPS_BIND_HOST_KEY = - "ozone.scm.https-bind-host"; - public static final String OZONE_SCM_HTTP_ADDRESS_KEY = - "ozone.scm.http-address"; - public static final String OZONE_SCM_HTTPS_ADDRESS_KEY = - "ozone.scm.https-address"; - public static final String HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY = - "hdds.scm.kerberos.keytab.file"; - public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY = - "hdds.scm.kerberos.principal"; - public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0"; - public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876; - public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877; - - public static final String HDDS_REST_HTTP_ADDRESS_KEY = - "hdds.rest.http-address"; - public static final String HDDS_REST_HTTP_ADDRESS_DEFAULT = "0.0.0.0:9880"; - public static final String HDDS_DATANODE_DIR_KEY = "hdds.datanode.dir"; - public static final String HDDS_REST_CSRF_ENABLED_KEY = - "hdds.rest.rest-csrf.enabled"; - public static final boolean HDDS_REST_CSRF_ENABLED_DEFAULT = false; - public static final String HDDS_REST_NETTY_HIGH_WATERMARK = - "hdds.rest.netty.high.watermark"; - public static final int HDDS_REST_NETTY_HIGH_WATERMARK_DEFAULT = 65536; - public static final int HDDS_REST_NETTY_LOW_WATERMARK_DEFAULT = 32768; - public static final String HDDS_REST_NETTY_LOW_WATERMARK = - "hdds.rest.netty.low.watermark"; - - public static final String OZONE_SCM_HANDLER_COUNT_KEY = - "ozone.scm.handler.count.key"; - public static final int OZONE_SCM_HANDLER_COUNT_DEFAULT = 10; - - public static final String OZONE_SCM_SECURITY_HANDLER_COUNT_KEY = - "ozone.scm.security.handler.count.key"; - public static final int OZONE_SCM_SECURITY_HANDLER_COUNT_DEFAULT = 2; - - public static final String OZONE_SCM_DEADNODE_INTERVAL = - "ozone.scm.dead.node.interval"; - public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT = - "10m"; - - public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL = - "ozone.scm.heartbeat.thread.interval"; - public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT = - "3s"; - - public static final String OZONE_SCM_STALENODE_INTERVAL = - "ozone.scm.stale.node.interval"; - public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT = - "5m"; - - public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT = - "ozone.scm.heartbeat.rpc-timeout"; - public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT = - "1s"; - - /** - * Defines how frequently we will log the missing of heartbeat to a specific - * SCM. In the default case we will write a warning message for each 10 - * sequential heart beats that we miss to a specific SCM. This is to avoid - * overrunning the log with lots of HB missed Log statements. - */ - public static final String OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT = - "ozone.scm.heartbeat.log.warn.interval.count"; - public static final int OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT = - 10; - - // ozone.scm.names key is a set of DNS | DNS:PORT | IP Address | IP:PORT. - // Written as a comma separated string. e.g. scm1, scm2:8020, 7.7.7.7:7777 - // - // If this key is not specified datanodes will not be able to find - // SCM. The SCM membership can be dynamic, so this key should contain - // all possible SCM names. Once the SCM leader is discovered datanodes will - // get the right list of SCMs to heartbeat to from the leader. - // While it is good for the datanodes to know the names of all SCM nodes, - // it is sufficient to actually know the name of on working SCM. That SCM - // will be able to return the information about other SCMs that are part of - // the SCM replicated Log. - // - //In case of a membership change, any one of the SCM machines will be - // able to send back a new list to the datanodes. - public static final String OZONE_SCM_NAMES = "ozone.scm.names"; - - public static final int OZONE_SCM_DEFAULT_PORT = - OZONE_SCM_DATANODE_PORT_DEFAULT; - // The path where datanode ID is to be written to. - // if this value is not set then container startup will fail. - public static final String OZONE_SCM_DATANODE_ID_DIR = - "ozone.scm.datanode.id.dir"; - - public static final String OZONE_SCM_DB_CACHE_SIZE_MB = - "ozone.scm.db.cache.size.mb"; - public static final int OZONE_SCM_DB_CACHE_SIZE_DEFAULT = 128; - - public static final String OZONE_SCM_CONTAINER_SIZE = - "ozone.scm.container.size"; - public static final String OZONE_SCM_CONTAINER_SIZE_DEFAULT = "5GB"; - - public static final String OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY = - "ozone.scm.container.placement.impl"; - - public static final String OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT = - "ozone.scm.pipeline.owner.container.count"; - public static final int OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT = 3; - - public static final String - OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY = - "ozone.scm.keyvalue.container.deletion-choosing.policy"; - - public static final String OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT = - "ozone.scm.container.creation.lease.timeout"; - - public static final String - OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT_DEFAULT = "60s"; - - public static final String OZONE_SCM_PIPELINE_DESTROY_TIMEOUT = - "ozone.scm.pipeline.destroy.timeout"; - - public static final String OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT = - "66s"; - - public static final String OZONE_SCM_PIPELINE_CREATION_INTERVAL = - "ozone.scm.pipeline.creation.interval"; - public static final String OZONE_SCM_PIPELINE_CREATION_INTERVAL_DEFAULT = - "120s"; - - public static final String OZONE_SCM_BLOCK_DELETION_MAX_RETRY = - "ozone.scm.block.deletion.max.retry"; - public static final int OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT = 4096; - - public static final String HDDS_SCM_WATCHER_TIMEOUT = - "hdds.scm.watcher.timeout"; - - public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT = - "10m"; - - public static final String - HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY = - "hdds.scm.http.kerberos.principal"; - public static final String - HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY = - "hdds.scm.http.kerberos.keytab"; - - // Network topology - public static final String OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE = - "ozone.scm.network.topology.schema.file"; - public static final String OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT = - "network-topology-default.xml"; - - public static final String HDDS_TRACING_ENABLED = "hdds.tracing.enabled"; - public static final boolean HDDS_TRACING_ENABLED_DEFAULT = true; - - /** - * Never constructed. - */ - private ScmConfigKeys() { - - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java deleted file mode 100644 index 6236febb7b1..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -/** - * ScmInfo wraps the result returned from SCM#getScmInfo which - * contains clusterId and the SCM Id. - */ -public final class ScmInfo { - private String clusterId; - private String scmId; - - /** - * Builder for ScmInfo. - */ - public static class Builder { - private String clusterId; - private String scmId; - - /** - * sets the cluster id. - * @param cid clusterId to be set - * @return Builder for ScmInfo - */ - public Builder setClusterId(String cid) { - this.clusterId = cid; - return this; - } - - /** - * sets the scmId. - * @param id scmId - * @return Builder for scmInfo - */ - public Builder setScmId(String id) { - this.scmId = id; - return this; - } - - public ScmInfo build() { - return new ScmInfo(clusterId, scmId); - } - } - - private ScmInfo(String clusterId, String scmId) { - this.clusterId = clusterId; - this.scmId = scmId; - } - - /** - * Gets the clusterId from the Version file. - * @return ClusterId - */ - public String getClusterId() { - return clusterId; - } - - /** - * Gets the SCM Id from the Version file. - * @return SCM Id - */ - public String getScmId() { - return scmId; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java deleted file mode 100644 index bae0758fddb..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientReply.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; - -/** - * This class represents the reply from XceiverClient. - */ -public class XceiverClientReply { - - private CompletableFuture response; - private Long logIndex; - - /** - * List of datanodes where the command got executed and reply is received. - * If there is an exception in the reply, these datanodes will inform - * about the servers where there is a failure. - */ - private List datanodes; - - public XceiverClientReply( - CompletableFuture response) { - this(response, null); - } - - public XceiverClientReply( - CompletableFuture response, - List datanodes) { - this.logIndex = (long) 0; - this.response = response; - this.datanodes = datanodes == null ? new ArrayList<>() : datanodes; - } - - public CompletableFuture getResponse() { - return response; - } - - public long getLogIndex() { - return logIndex; - } - - public void setLogIndex(Long logIndex) { - this.logIndex = logIndex; - } - - public List getDatanodes() { - return datanodes; - } - - public void addDatanode(DatanodeDetails dn) { - datanodes.add(dn); - } - - public void setResponse( - CompletableFuture response) { - this.response = response; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java deleted file mode 100644 index 5631badf44c..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction; - -/** - * A Client for the storageContainer protocol. - */ -public abstract class XceiverClientSpi implements Closeable { - - final private AtomicInteger referenceCount; - private boolean isEvicted; - - XceiverClientSpi() { - this.referenceCount = new AtomicInteger(0); - this.isEvicted = false; - } - - void incrementReference() { - this.referenceCount.incrementAndGet(); - } - - void decrementReference() { - this.referenceCount.decrementAndGet(); - cleanup(); - } - - void setEvicted() { - isEvicted = true; - cleanup(); - } - - // close the xceiverClient only if, - // 1) there is no refcount on the client - // 2) it has been evicted from the cache. - private void cleanup() { - if (referenceCount.get() == 0 && isEvicted) { - close(); - } - } - - @VisibleForTesting - public int getRefcount() { - return referenceCount.get(); - } - - /** - * Connects to the leader in the pipeline. - */ - public abstract void connect() throws Exception; - - /** - * Connects to the leader in the pipeline using encoded token. To be used - * in a secure cluster. - */ - public abstract void connect(String encodedToken) throws Exception; - - @Override - public abstract void close(); - - /** - * Returns the pipeline of machines that host the container used by this - * client. - * - * @return pipeline of machines that host the container - */ - public abstract Pipeline getPipeline(); - - /** - * Sends a given command to server and gets the reply back. - * @param request Request - * @return Response to the command - * @throws IOException - */ - public ContainerCommandResponseProto sendCommand( - ContainerCommandRequestProto request) throws IOException { - try { - XceiverClientReply reply; - reply = sendCommandAsync(request); - ContainerCommandResponseProto responseProto = reply.getResponse().get(); - return responseProto; - } catch (ExecutionException | InterruptedException e) { - throw new IOException("Failed to command " + request, e); - } - } - - /** - * Sends a given command to server and gets the reply back along with - * the server associated info. - * @param request Request - * @param validators functions to validate the response - * @return Response to the command - * @throws IOException - */ - public ContainerCommandResponseProto sendCommand( - ContainerCommandRequestProto request, List validators) - throws IOException { - try { - XceiverClientReply reply; - reply = sendCommandAsync(request); - ContainerCommandResponseProto responseProto = reply.getResponse().get(); - for (CheckedBiFunction function : validators) { - function.apply(request, responseProto); - } - return responseProto; - } catch (ExecutionException | InterruptedException e) { - throw new IOException("Failed to command " + request, e); - } - } - - /** - * Sends a given command to server gets a waitable future back. - * - * @param request Request - * @return Response to the command - * @throws IOException - */ - public abstract XceiverClientReply - sendCommandAsync(ContainerCommandRequestProto request) - throws IOException, ExecutionException, InterruptedException; - - /** - * Returns pipeline Type. - * - * @return - {Stand_Alone, Ratis or Chained} - */ - public abstract HddsProtos.ReplicationType getPipelineType(); - - /** - * Check if an specfic commitIndex is replicated to majority/all servers. - * @param index index to watch for - * @param timeout timeout provided for the watch operation to complete - * @return reply containing the min commit index replicated to all or majority - * servers in case of a failure - * @throws InterruptedException - * @throws ExecutionException - * @throws TimeoutException - * @throws IOException - */ - public abstract XceiverClientReply watchForCommit(long index, long timeout) - throws InterruptedException, ExecutionException, TimeoutException, - IOException; - - /** - * returns the min commit index replicated to all servers. - * @return min commit index replicated to all servers. - */ - public abstract long getReplicatedMinCommitIndex(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java deleted file mode 100644 index 226ceda9255..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ /dev/null @@ -1,241 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.client; - -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; - -/** - * The interface to call into underlying container layer. - * - * Written as interface to allow easy testing: implement a mock container layer - * for standalone testing of CBlock API without actually calling into remote - * containers. Actual container layer can simply re-implement this. - * - * NOTE this is temporarily needed class. When SCM containers are full-fledged, - * this interface will likely be removed. - */ -@InterfaceStability.Unstable -public interface ScmClient extends Closeable { - /** - * Creates a Container on SCM and returns the pipeline. - * @return ContainerInfo - * @throws IOException - */ - ContainerWithPipeline createContainer(String owner) throws IOException; - - /** - * Gets a container by Name -- Throws if the container does not exist. - * @param containerId - Container ID - * @return Pipeline - * @throws IOException - */ - ContainerInfo getContainer(long containerId) throws IOException; - - /** - * Gets a container by Name -- Throws if the container does not exist. - * @param containerId - Container ID - * @return ContainerWithPipeline - * @throws IOException - */ - ContainerWithPipeline getContainerWithPipeline(long containerId) - throws IOException; - - /** - * Close a container. - * - * @param containerId - ID of the container. - * @param pipeline - Pipeline where the container is located. - * @throws IOException - */ - void closeContainer(long containerId, Pipeline pipeline) throws IOException; - - /** - * Close a container. - * - * @param containerId - ID of the container. - * @throws IOException - */ - void closeContainer(long containerId) throws IOException; - - /** - * Deletes an existing container. - * @param containerId - ID of the container. - * @param pipeline - Pipeline that represents the container. - * @param force - true to forcibly delete the container. - * @throws IOException - */ - void deleteContainer(long containerId, Pipeline pipeline, boolean force) - throws IOException; - - /** - * Deletes an existing container. - * @param containerId - ID of the container. - * @param force - true to forcibly delete the container. - * @throws IOException - */ - void deleteContainer(long containerId, boolean force) throws IOException; - - /** - * Lists a range of containers and get their info. - * - * @param startContainerID start containerID. - * @param count count must be {@literal >} 0. - * - * @return a list of pipeline. - * @throws IOException - */ - List listContainer(long startContainerID, - int count) throws IOException; - - /** - * Read meta data from an existing container. - * @param containerID - ID of the container. - * @param pipeline - Pipeline where the container is located. - * @return ContainerInfo - * @throws IOException - */ - ContainerDataProto readContainer(long containerID, Pipeline pipeline) - throws IOException; - - /** - * Read meta data from an existing container. - * @param containerID - ID of the container. - * @return ContainerInfo - * @throws IOException - */ - ContainerDataProto readContainer(long containerID) - throws IOException; - - /** - * Gets the container size -- Computed by SCM from Container Reports. - * @param containerID - ID of the container. - * @return number of bytes used by this container. - * @throws IOException - */ - long getContainerSize(long containerID) throws IOException; - - /** - * Creates a Container on SCM and returns the pipeline. - * @param type - Replication Type. - * @param replicationFactor - Replication Factor - * @return ContainerInfo - * @throws IOException - in case of error. - */ - ContainerWithPipeline createContainer(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor replicationFactor, - String owner) throws IOException; - - /** - * Returns a set of Nodes that meet a query criteria. - * @param nodeStatuses - Criteria that we want the node to have. - * @param queryScope - Query scope - Cluster or pool. - * @param poolName - if it is pool, a pool name is required. - * @return A set of nodes that meet the requested criteria. - * @throws IOException - */ - List queryNode(HddsProtos.NodeState nodeStatuses, - HddsProtos.QueryScope queryScope, String poolName) throws IOException; - - /** - * Creates a specified replication pipeline. - * @param type - Type - * @param factor - Replication factor - * @param nodePool - Set of machines. - * @throws IOException - */ - Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool) - throws IOException; - - /** - * Returns the list of active Pipelines. - * - * @return list of Pipeline - * @throws IOException in case of any exception - */ - List listPipelines() throws IOException; - - /** - * Activates the pipeline given a pipeline ID. - * - * @param pipelineID PipelineID to activate. - * @throws IOException In case of exception while activating the pipeline - */ - void activatePipeline(HddsProtos.PipelineID pipelineID) throws IOException; - - /** - * Deactivates the pipeline given a pipeline ID. - * - * @param pipelineID PipelineID to deactivate. - * @throws IOException In case of exception while deactivating the pipeline - */ - void deactivatePipeline(HddsProtos.PipelineID pipelineID) throws IOException; - - /** - * Closes the pipeline given a pipeline ID. - * - * @param pipelineID PipelineID to close. - * @throws IOException In case of exception while closing the pipeline - */ - void closePipeline(HddsProtos.PipelineID pipelineID) throws IOException; - - /** - * Check if SCM is in safe mode. - * - * @return Returns true if SCM is in safe mode else returns false. - * @throws IOException - */ - boolean inSafeMode() throws IOException; - - /** - * Force SCM out of safe mode. - * - * @return returns true if operation is successful. - * @throws IOException - */ - boolean forceExitSafeMode() throws IOException; - - /** - * Start ReplicationManager. - */ - void startReplicationManager() throws IOException; - - /** - * Stop ReplicationManager. - */ - void stopReplicationManager() throws IOException; - - /** - * Returns ReplicationManager status. - * - * @return True if ReplicationManager is running, false otherwise. - */ - boolean getReplicationManagerStatus() throws IOException; - - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java deleted file mode 100644 index e2f7033d7fa..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.client; - -/** - * This package contains classes for the client of the storage container - * protocol. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerException.java deleted file mode 100644 index 9d37dfb1f33..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerException.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import java.io.IOException; - -/** - * Signals that ContainerException of some sort has occurred. This is parent - * of all the exceptions thrown by ContainerManager. - */ -public class ContainerException extends IOException { - - /** - * Constructs an {@code ContainerException} with {@code null} - * as its error detail message. - */ - public ContainerException() { - super(); - } - - /** - * Constructs an {@code ContainerException} with the specified detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public ContainerException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java deleted file mode 100644 index bb44da4e78e..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.container; - -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; -import org.apache.commons.lang3.builder.CompareToBuilder; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; - -/** - * Container ID is an integer that is a value between 1..MAX_CONTAINER ID. - *

- * We are creating a specific type for this to avoid mixing this with - * normal integers in code. - */ -public final class ContainerID implements Comparable { - - private final long id; - - // TODO: make this private. - /** - * Constructs ContainerID. - * - * @param id int - */ - public ContainerID(long id) { - this.id = id; - } - - /** - * Factory method for creation of ContainerID. - * @param containerID long - * @return ContainerID. - */ - public static ContainerID valueof(final long containerID) { - Preconditions.checkState(containerID > 0, - "Container ID should be a positive long. "+ containerID); - return new ContainerID(containerID); - } - - /** - * Returns int representation of ID. - * - * @return int - */ - public long getId() { - return id; - } - - public byte[] getBytes() { - return Longs.toByteArray(id); - } - - @Override - public boolean equals(final Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - final ContainerID that = (ContainerID) o; - - return new EqualsBuilder() - .append(getId(), that.getId()) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(61, 71) - .append(getId()) - .toHashCode(); - } - - @Override - public int compareTo(final ContainerID that) { - Preconditions.checkNotNull(that); - return new CompareToBuilder() - .append(this.getId(), that.getId()) - .build(); - } - - @Override - public String toString() { - return "#" + id; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java deleted file mode 100644 index 5c58e92d3c5..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java +++ /dev/null @@ -1,471 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import static java.lang.Math.max; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.PropertyAccessor; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.google.common.base.Preconditions; -import java.io.Externalizable; -import java.io.IOException; -import java.io.ObjectInput; -import java.io.ObjectOutput; -import java.util.Arrays; -import java.util.Comparator; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.util.Time; - -/** - * Class wraps ozone container info. - */ -public class ContainerInfo implements Comparator, - Comparable, Externalizable { - - private static final ObjectWriter WRITER; - private static final String SERIALIZATION_ERROR_MSG = "Java serialization not" - + " supported. Use protobuf instead."; - - static { - ObjectMapper mapper = new ObjectMapper(); - mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY); - mapper - .setVisibility(PropertyAccessor.GETTER, JsonAutoDetect.Visibility.NONE); - WRITER = mapper.writerWithDefaultPrettyPrinter(); - } - - private HddsProtos.LifeCycleState state; - @JsonIgnore - private PipelineID pipelineID; - private ReplicationFactor replicationFactor; - private ReplicationType replicationType; - private long usedBytes; - private long numberOfKeys; - private long lastUsed; - // The wall-clock ms since the epoch at which the current state enters. - private long stateEnterTime; - private String owner; - private long containerID; - private long deleteTransactionId; - // The sequenceId of a close container cannot change, and all the - // container replica should have the same sequenceId. - private long sequenceId; - - /** - * Allows you to maintain private data on ContainerInfo. This is not - * serialized via protobuf, just allows us to maintain some private data. - */ - @JsonIgnore - private byte[] data; - - @SuppressWarnings("parameternumber") - ContainerInfo( - long containerID, - HddsProtos.LifeCycleState state, - PipelineID pipelineID, - long usedBytes, - long numberOfKeys, - long stateEnterTime, - String owner, - long deleteTransactionId, - long sequenceId, - ReplicationFactor replicationFactor, - ReplicationType repType) { - this.containerID = containerID; - this.pipelineID = pipelineID; - this.usedBytes = usedBytes; - this.numberOfKeys = numberOfKeys; - this.lastUsed = Time.monotonicNow(); - this.state = state; - this.stateEnterTime = stateEnterTime; - this.owner = owner; - this.deleteTransactionId = deleteTransactionId; - this.sequenceId = sequenceId; - this.replicationFactor = replicationFactor; - this.replicationType = repType; - } - - /** - * Needed for serialization findbugs. - */ - public ContainerInfo() { - } - - public static ContainerInfo fromProtobuf(HddsProtos.ContainerInfoProto info) { - ContainerInfo.Builder builder = new ContainerInfo.Builder(); - return builder.setPipelineID( - PipelineID.getFromProtobuf(info.getPipelineID())) - .setUsedBytes(info.getUsedBytes()) - .setNumberOfKeys(info.getNumberOfKeys()) - .setState(info.getState()) - .setStateEnterTime(info.getStateEnterTime()) - .setOwner(info.getOwner()) - .setContainerID(info.getContainerID()) - .setDeleteTransactionId(info.getDeleteTransactionId()) - .setReplicationFactor(info.getReplicationFactor()) - .setReplicationType(info.getReplicationType()) - .build(); - } - - public long getContainerID() { - return containerID; - } - - public HddsProtos.LifeCycleState getState() { - return state; - } - - public void setState(HddsProtos.LifeCycleState state) { - this.state = state; - } - - public long getStateEnterTime() { - return stateEnterTime; - } - - public ReplicationFactor getReplicationFactor() { - return replicationFactor; - } - - public PipelineID getPipelineID() { - return pipelineID; - } - - public long getUsedBytes() { - return usedBytes; - } - - public void setUsedBytes(long value) { - usedBytes = value; - } - - public long getNumberOfKeys() { - return numberOfKeys; - } - - public void setNumberOfKeys(long value) { - numberOfKeys = value; - } - - public long getDeleteTransactionId() { - return deleteTransactionId; - } - - public long getSequenceId() { - return sequenceId; - } - - public void updateDeleteTransactionId(long transactionId) { - deleteTransactionId = max(transactionId, deleteTransactionId); - } - - public void updateSequenceId(long sequenceID) { - assert (isOpen() || state == HddsProtos.LifeCycleState.QUASI_CLOSED); - sequenceId = max(sequenceID, sequenceId); - } - - public ContainerID containerID() { - return new ContainerID(getContainerID()); - } - - /** - * Gets the last used time from SCM's perspective. - * - * @return time in milliseconds. - */ - public long getLastUsed() { - return lastUsed; - } - - public ReplicationType getReplicationType() { - return replicationType; - } - - public void updateLastUsedTime() { - lastUsed = Time.monotonicNow(); - } - - public HddsProtos.ContainerInfoProto getProtobuf() { - HddsProtos.ContainerInfoProto.Builder builder = - HddsProtos.ContainerInfoProto.newBuilder(); - Preconditions.checkState(containerID > 0); - return builder.setContainerID(getContainerID()) - .setUsedBytes(getUsedBytes()) - .setNumberOfKeys(getNumberOfKeys()).setState(getState()) - .setStateEnterTime(getStateEnterTime()).setContainerID(getContainerID()) - .setDeleteTransactionId(getDeleteTransactionId()) - .setPipelineID(getPipelineID().getProtobuf()) - .setReplicationFactor(getReplicationFactor()) - .setReplicationType(getReplicationType()) - .setOwner(getOwner()) - .build(); - } - - public String getOwner() { - return owner; - } - - public void setOwner(String owner) { - this.owner = owner; - } - - @Override - public String toString() { - return "ContainerInfo{" - + "id=" + containerID - + ", state=" + state - + ", pipelineID=" + pipelineID - + ", stateEnterTime=" + stateEnterTime - + ", owner=" + owner - + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - ContainerInfo that = (ContainerInfo) o; - - return new EqualsBuilder() - .append(getContainerID(), that.getContainerID()) - - // TODO : Fix this later. If we add these factors some tests fail. - // So Commenting this to continue and will enforce this with - // Changes in pipeline where we remove Container Name to - // SCMContainerinfo from Pipeline. - // .append(pipeline.getFactor(), that.pipeline.getFactor()) - // .append(pipeline.getType(), that.pipeline.getType()) - .append(owner, that.owner) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(11, 811) - .append(getContainerID()) - .append(getOwner()) - .toHashCode(); - } - - /** - * Compares its two arguments for order. Returns a negative integer, zero, or - * a positive integer as the first argument is less than, equal to, or greater - * than the second.

- * - * @param o1 the first object to be compared. - * @param o2 the second object to be compared. - * @return a negative integer, zero, or a positive integer as the first - * argument is less than, equal to, or greater than the second. - * @throws NullPointerException if an argument is null and this comparator - * does not permit null arguments - * @throws ClassCastException if the arguments' types prevent them from - * being compared by this comparator. - */ - @Override - public int compare(ContainerInfo o1, ContainerInfo o2) { - return Long.compare(o1.getLastUsed(), o2.getLastUsed()); - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less than, - * equal to, or greater than the specified object. - * - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object is - * less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(ContainerInfo o) { - return this.compare(this, o); - } - - /** - * Returns a JSON string of this object. - * - * @return String - json string - * @throws IOException - */ - public String toJsonString() throws IOException { - return WRITER.writeValueAsString(this); - } - - /** - * Returns private data that is set on this containerInfo. - * - * @return blob, the user can interpret it any way they like. - */ - public byte[] getData() { - if (this.data != null) { - return Arrays.copyOf(this.data, this.data.length); - } else { - return null; - } - } - - /** - * Set private data on ContainerInfo object. - * - * @param data -- private data. - */ - public void setData(byte[] data) { - if (data != null) { - this.data = Arrays.copyOf(data, data.length); - } - } - - /** - * Throws IOException as default java serialization is not supported. Use - * serialization via protobuf instead. - * - * @param out the stream to write the object to - * @throws IOException Includes any I/O exceptions that may occur - * @serialData Overriding methods should use this tag to describe - * the data layout of this Externalizable object. - * List the sequence of element types and, if possible, - * relate the element to a public/protected field and/or - * method of this Externalizable class. - */ - @Override - public void writeExternal(ObjectOutput out) throws IOException { - throw new IOException(SERIALIZATION_ERROR_MSG); - } - - /** - * Throws IOException as default java serialization is not supported. Use - * serialization via protobuf instead. - * - * @param in the stream to read data from in order to restore the object - * @throws IOException if I/O errors occur - * @throws ClassNotFoundException If the class for an object being - * restored cannot be found. - */ - @Override - public void readExternal(ObjectInput in) - throws IOException, ClassNotFoundException { - throw new IOException(SERIALIZATION_ERROR_MSG); - } - - /** - * Builder class for ContainerInfo. - */ - public static class Builder { - private HddsProtos.LifeCycleState state; - private long used; - private long keys; - private long stateEnterTime; - private String owner; - private long containerID; - private long deleteTransactionId; - private long sequenceId; - private PipelineID pipelineID; - private ReplicationFactor replicationFactor; - private ReplicationType replicationType; - - public Builder setReplicationType( - ReplicationType repType) { - this.replicationType = repType; - return this; - } - - public Builder setPipelineID(PipelineID pipelineId) { - this.pipelineID = pipelineId; - return this; - } - - public Builder setReplicationFactor(ReplicationFactor repFactor) { - this.replicationFactor = repFactor; - return this; - } - - public Builder setContainerID(long id) { - Preconditions.checkState(id >= 0); - this.containerID = id; - return this; - } - - public Builder setState(HddsProtos.LifeCycleState lifeCycleState) { - this.state = lifeCycleState; - return this; - } - - public Builder setUsedBytes(long bytesUsed) { - this.used = bytesUsed; - return this; - } - - public Builder setNumberOfKeys(long keyCount) { - this.keys = keyCount; - return this; - } - - public Builder setStateEnterTime(long time) { - this.stateEnterTime = time; - return this; - } - - public Builder setOwner(String containerOwner) { - this.owner = containerOwner; - return this; - } - - public Builder setDeleteTransactionId(long deleteTransactionID) { - this.deleteTransactionId = deleteTransactionID; - return this; - } - - public Builder setSequenceId(long sequenceID) { - this.sequenceId = sequenceID; - return this; - } - - public ContainerInfo build() { - return new ContainerInfo(containerID, state, pipelineID, - used, keys, stateEnterTime, owner, deleteTransactionId, - sequenceId, replicationFactor, replicationType); - } - } - - /** - * Check if a container is in open state, this will check if the - * container is either open or closing state. Any containers in these states - * is managed as an open container by SCM. - */ - public boolean isOpen() { - return state == HddsProtos.LifeCycleState.OPEN - || state == HddsProtos.LifeCycleState.CLOSING; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerNotFoundException.java deleted file mode 100644 index 3eebcce8403..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerNotFoundException.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -/** - * Signals that a container is missing from ContainerManager. - */ -public class ContainerNotFoundException extends ContainerException { - - /** - * Constructs an {@code ContainerNotFoundException} with {@code null} - * as its error detail message. - */ - public ContainerNotFoundException() { - super(); - } - - /** - * Constructs an {@code ContainerNotFoundException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public ContainerNotFoundException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaNotFoundException.java deleted file mode 100644 index fdbc18b1191..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaNotFoundException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -/** - * Signals that a ContainerReplica is missing from the Container in - * ContainerManager. - */ -public class ContainerReplicaNotFoundException extends ContainerException { - - /** - * Constructs an {@code ContainerReplicaNotFoundException} with {@code null} - * as its error detail message. - */ - public ContainerReplicaNotFoundException() { - super(); - } - - /** - * Constructs an {@code ContainerReplicaNotFoundException} with the - * specified detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public ContainerReplicaNotFoundException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java deleted file mode 100644 index 7ac0401af11..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container.common.helpers; - -import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; - -/** - * Allocated block wraps the result returned from SCM#allocateBlock which - * contains a Pipeline and the key. - */ -public final class AllocatedBlock { - private Pipeline pipeline; - private ContainerBlockID containerBlockID; - - /** - * Builder for AllocatedBlock. - */ - public static class Builder { - private Pipeline pipeline; - private ContainerBlockID containerBlockID; - - public Builder setPipeline(Pipeline p) { - this.pipeline = p; - return this; - } - - public Builder setContainerBlockID(ContainerBlockID blockId) { - this.containerBlockID = blockId; - return this; - } - - public AllocatedBlock build() { - return new AllocatedBlock(pipeline, containerBlockID); - } - } - - private AllocatedBlock(Pipeline pipeline, ContainerBlockID containerBlockID) { - this.pipeline = pipeline; - this.containerBlockID = containerBlockID; - } - - public Pipeline getPipeline() { - return pipeline; - } - - public ContainerBlockID getBlockID() { - return containerBlockID; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java deleted file mode 100644 index 86f5a66cf4c..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/BlockNotCommittedException.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.common.helpers; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -/** - * Exceptions thrown when a block is yet to be committed on the datanode. - */ -public class BlockNotCommittedException extends StorageContainerException { - - /** - * Constructs an {@code IOException} with the specified detail message. - * - * @param message The detail message (which is saved for later retrieval by - * the {@link #getMessage()} method) - */ - public BlockNotCommittedException(String message) { - super(message, ContainerProtos.Result.BLOCK_NOT_COMMITTED); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java deleted file mode 100644 index 4e406e6e97f..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerNotOpenException.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.common.helpers; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -/** - * Exceptions thrown when a write/update opearation is done on non-open - * container. - */ -public class ContainerNotOpenException extends StorageContainerException { - - /** - * Constructs an {@code IOException} with the specified detail message. - * - * @param message The detail message (which is saved for later retrieval by - * the {@link #getMessage()} method) - */ - public ContainerNotOpenException(String message) { - super(message, ContainerProtos.Result.CONTAINER_NOT_OPEN); - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java deleted file mode 100644 index 5b01bd2c652..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container.common.helpers; - -import java.util.Comparator; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.UnknownPipelineStateException; - -/** - * Class wraps ozone container info. - */ -public class ContainerWithPipeline implements Comparator, - Comparable { - - private final ContainerInfo containerInfo; - private final Pipeline pipeline; - - public ContainerWithPipeline(ContainerInfo containerInfo, Pipeline pipeline) { - this.containerInfo = containerInfo; - this.pipeline = pipeline; - } - - public ContainerInfo getContainerInfo() { - return containerInfo; - } - - public Pipeline getPipeline() { - return pipeline; - } - - public static ContainerWithPipeline fromProtobuf( - HddsProtos.ContainerWithPipeline allocatedContainer) - throws UnknownPipelineStateException { - return new ContainerWithPipeline( - ContainerInfo.fromProtobuf(allocatedContainer.getContainerInfo()), - Pipeline.getFromProtobuf(allocatedContainer.getPipeline())); - } - - public HddsProtos.ContainerWithPipeline getProtobuf() - throws UnknownPipelineStateException { - HddsProtos.ContainerWithPipeline.Builder builder = - HddsProtos.ContainerWithPipeline.newBuilder(); - builder.setContainerInfo(getContainerInfo().getProtobuf()) - .setPipeline(getPipeline().getProtobufMessage()); - - return builder.build(); - } - - - @Override - public String toString() { - return containerInfo.toString() + " | " + pipeline.toString(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - ContainerWithPipeline that = (ContainerWithPipeline) o; - - return new EqualsBuilder() - .append(getContainerInfo(), that.getContainerInfo()) - .append(getPipeline(), that.getPipeline()) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(11, 811) - .append(getContainerInfo()) - .append(getPipeline()) - .toHashCode(); - } - - /** - * Compares its two arguments for order. Returns a negative integer, zero, or - * a positive integer as the first argument is less than, equal to, or greater - * than the second.

- * - * @param o1 the first object to be compared. - * @param o2 the second object to be compared. - * @return a negative integer, zero, or a positive integer as the first - * argument is less than, equal to, or greater than the second. - * @throws NullPointerException if an argument is null and this comparator - * does not permit null arguments - * @throws ClassCastException if the arguments' types prevent them from - * being compared by this comparator. - */ - @Override - public int compare(ContainerWithPipeline o1, ContainerWithPipeline o2) { - return o1.getContainerInfo().compareTo(o2.getContainerInfo()); - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less than, - * equal to, or greater than the specified object. - * - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object is - * less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - @Override - public int compareTo(ContainerWithPipeline o) { - return this.compare(this, o); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java deleted file mode 100644 index 5f5aaceb16a..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.common.helpers; - -import org.apache.hadoop.hdds.client.BlockID; - -import static org.apache.hadoop.hdds.protocol.proto - .ScmBlockLocationProtocolProtos.DeleteScmBlockResult; - -/** - * Class wraps storage container manager block deletion results. - */ -public class DeleteBlockResult { - private BlockID blockID; - private DeleteScmBlockResult.Result result; - - public DeleteBlockResult(final BlockID blockID, - final DeleteScmBlockResult.Result result) { - this.blockID = blockID; - this.result = result; - } - - /** - * Get block id deleted. - * @return block id. - */ - public BlockID getBlockID() { - return blockID; - } - - /** - * Get key deletion result. - * @return key deletion result. - */ - public DeleteScmBlockResult.Result getResult() { - return result; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java deleted file mode 100644 index eb215d63a46..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.common.helpers; - - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; - -import java.util.ArrayList; -import java.util.List; -import java.util.Collection; - -/** - * This class contains set of dns and containers which ozone client provides - * to be handed over to SCM when block allocation request comes. - */ -public class ExcludeList { - - private final List datanodes; - private final List containerIds; - private final List pipelineIds; - - - public ExcludeList() { - datanodes = new ArrayList<>(); - containerIds = new ArrayList<>(); - pipelineIds = new ArrayList<>(); - } - - public List getContainerIds() { - return containerIds; - } - - public List getDatanodes() { - return datanodes; - } - - public void addDatanodes(Collection dns) { - datanodes.addAll(dns); - } - - public void addDatanode(DatanodeDetails dn) { - datanodes.add(dn); - } - - public void addConatinerId(ContainerID containerId) { - containerIds.add(containerId); - } - - public void addPipeline(PipelineID pipelineId) { - pipelineIds.add(pipelineId); - } - - public List getPipelineIds() { - return pipelineIds; - } - - public HddsProtos.ExcludeListProto getProtoBuf() { - HddsProtos.ExcludeListProto.Builder builder = - HddsProtos.ExcludeListProto.newBuilder(); - containerIds - .forEach(id -> builder.addContainerIds(id.getId())); - datanodes.forEach(dn -> { - builder.addDatanodes(dn.getUuidString()); - }); - pipelineIds.forEach(pipelineID -> { - builder.addPipelineIds(pipelineID.getProtobuf()); - }); - return builder.build(); - } - - public static ExcludeList getFromProtoBuf( - HddsProtos.ExcludeListProto excludeListProto) { - ExcludeList excludeList = new ExcludeList(); - excludeListProto.getContainerIdsList().forEach(id -> { - excludeList.addConatinerId(ContainerID.valueof(id)); - }); - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - excludeListProto.getDatanodesList().forEach(dn -> { - builder.setUuid(dn); - excludeList.addDatanode(builder.build()); - }); - excludeListProto.getPipelineIdsList().forEach(pipelineID -> { - excludeList.addPipeline(PipelineID.getFromProtobuf(pipelineID)); - }); - return excludeList; - } - - public void clear() { - datanodes.clear(); - containerIds.clear(); - pipelineIds.clear(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java deleted file mode 100644 index 1378d1ab70a..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/InvalidContainerStateException.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.common.helpers; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -/** - * Exceptions thrown when a container is in invalid state while doing a I/O. - */ -public class InvalidContainerStateException extends StorageContainerException { - - /** - * Constructs an {@code IOException} with the specified detail message. - * - * @param message The detail message (which is saved for later retrieval by - * the {@link #getMessage()} method) - */ - public InvalidContainerStateException(String message) { - super(message, ContainerProtos.Result.INVALID_CONTAINER_STATE); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java deleted file mode 100644 index f1405fff946..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.common.helpers; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -import java.io.IOException; - -/** - * Exceptions thrown from the Storage Container. - */ -public class StorageContainerException extends IOException { - private ContainerProtos.Result result; - - /** - * Constructs an {@code IOException} with {@code null} - * as its error detail message. - */ - public StorageContainerException(ContainerProtos.Result result) { - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified detail message. - * - * @param message The detail message (which is saved for later retrieval by - * the {@link #getMessage()} method) - * @param result - The result code - */ - public StorageContainerException(String message, - ContainerProtos.Result result) { - super(message); - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified detail message - * and cause. - *

- *

Note that the detail message associated with {@code cause} is - * not automatically incorporated into this exception's detail - * message. - * - * @param message The detail message (which is saved for later retrieval by - * the {@link #getMessage()} method) - * - * @param cause The cause (which is saved for later retrieval by the {@link - * #getCause()} method). (A null value is permitted, and indicates that the - * cause is nonexistent or unknown.) - * - * @param result - The result code - * @since 1.6 - */ - public StorageContainerException(String message, Throwable cause, - ContainerProtos.Result result) { - super(message, cause); - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified cause and a - * detail message of {@code (cause==null ? null : cause.toString())} - * (which typically contains the class and detail message of {@code cause}). - * This constructor is useful for IO exceptions that are little more - * than wrappers for other throwables. - * - * @param cause The cause (which is saved for later retrieval by the {@link - * #getCause()} method). (A null value is permitted, and indicates that the - * cause is nonexistent or unknown.) - * @param result - The result code - * @since 1.6 - */ - public StorageContainerException(Throwable cause, ContainerProtos.Result - result) { - super(cause); - this.result = result; - } - - /** - * Returns Result. - * - * @return Result. - */ - public ContainerProtos.Result getResult() { - return result; - } - - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java deleted file mode 100644 index ffe0d3d4d99..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.common.helpers; -/** - Contains protocol buffer helper classes and utilites used in - impl. - **/ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java deleted file mode 100644 index d13dcb1f6c4..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java deleted file mode 100644 index 52ce7964b67..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -import java.io.IOException; -import java.util.List; - -/** - * A ContainerPlacementPolicy support choosing datanodes to build replication - * pipeline with specified constraints. - */ -public interface ContainerPlacementPolicy { - - /** - * Given the replication factor and size required, return set of datanodes - * that satisfy the nodes and size requirement. - * - * @param excludedNodes - list of nodes to be excluded. - * @param favoredNodes - list of nodes preferred. - * @param nodesRequired - number of datanodes required. - * @param sizeRequired - size required for the container or block. - * @return list of datanodes chosen. - * @throws IOException - */ - List chooseDatanodes(List excludedNodes, - List favoredNodes, int nodesRequired, long sizeRequired) - throws IOException; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java deleted file mode 100644 index dac4752fe66..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.algorithms; -/** - Contains container placement policy interface definition. - **/ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java deleted file mode 100644 index db1f82ae411..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java +++ /dev/null @@ -1,127 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.exceptions; - -import java.io.IOException; - -/** - * Exception thrown by SCM. - */ -public class SCMException extends IOException { - private final ResultCodes result; - - /** - * Constructs an {@code IOException} with {@code null} - * as its error detail message. - */ - public SCMException(ResultCodes result) { - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified detail message. - * - * @param message The detail message (which is saved for later retrieval by - * the - * {@link #getMessage()} method) - */ - public SCMException(String message, ResultCodes result) { - super(message); - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified detail message - * and cause. - *

- *

Note that the detail message associated with {@code cause} is - * not automatically incorporated into this exception's detail - * message. - * - * @param message The detail message (which is saved for later retrieval by - * the - * {@link #getMessage()} method) - * @param cause The cause (which is saved for later retrieval by the {@link - * #getCause()} method). (A null value is permitted, and indicates that the - * cause is nonexistent or unknown.) - * @since 1.6 - */ - public SCMException(String message, Throwable cause, ResultCodes result) { - super(message, cause); - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified cause and a - * detail message of {@code (cause==null ? null : cause.toString())} - * (which typically contains the class and detail message of {@code cause}). - * This constructor is useful for IO exceptions that are little more - * than wrappers for other throwables. - * - * @param cause The cause (which is saved for later retrieval by the {@link - * #getCause()} method). (A null value is permitted, and indicates that the - * cause is nonexistent or unknown.) - * @since 1.6 - */ - public SCMException(Throwable cause, ResultCodes result) { - super(cause); - this.result = result; - } - - /** - * Returns resultCode. - * @return ResultCode - */ - public ResultCodes getResult() { - return result; - } - - /** - * Error codes to make it easy to decode these exceptions. - */ - public enum ResultCodes { - OK, - FAILED_TO_LOAD_NODEPOOL, - FAILED_TO_FIND_NODE_IN_POOL, - FAILED_TO_FIND_HEALTHY_NODES, - FAILED_TO_FIND_NODES_WITH_SPACE, - FAILED_TO_FIND_SUITABLE_NODE, - INVALID_CAPACITY, - INVALID_BLOCK_SIZE, - SAFE_MODE_EXCEPTION, - FAILED_TO_LOAD_OPEN_CONTAINER, - FAILED_TO_ALLOCATE_CONTAINER, - FAILED_TO_CHANGE_CONTAINER_STATE, - FAILED_TO_CHANGE_PIPELINE_STATE, - CONTAINER_EXISTS, - FAILED_TO_FIND_CONTAINER, - FAILED_TO_FIND_CONTAINER_WITH_SPACE, - BLOCK_EXISTS, - FAILED_TO_FIND_BLOCK, - IO_EXCEPTION, - UNEXPECTED_CONTAINER_STATE, - SCM_NOT_INITIALIZED, - DUPLICATE_DATANODE, - NO_SUCH_DATANODE, - NO_REPLICA_FOUND, - FAILED_TO_FIND_ACTIVE_PIPELINE, - FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY, - FAILED_TO_ALLOCATE_ENOUGH_BLOCKS, - INTERNAL_ERROR - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java deleted file mode 100644 index 721a32b48e2..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.exceptions; -/** - Exception objects for the SCM Server. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java deleted file mode 100644 index 6cf73bf5480..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import java.util.Collection; -import java.util.List; - -/** - * The interface defines an inner node in a network topology. - * An inner node represents network topology entities, such as data center, - * rack, switch or logical group. - */ -public interface InnerNode extends Node { - /** A factory interface to get new InnerNode instance. */ - interface Factory { - /** Construct an InnerNode from name, location, parent, level and cost. */ - N newInnerNode(String name, String location, InnerNode parent, int level, - int cost); - } - - /** - * Add node n to the subtree of this node. - * @param n node to be added - * @return true if the node is added; false otherwise - */ - boolean add(Node n); - - /** - * Remove node n from the subtree of this node. - * @param n node to be deleted - */ - void remove(Node n); - - /** - * Given a node's string representation, return a reference to the node. - * @param loc string location of the format /dc/rack/nodegroup/node - * @return null if the node is not found - */ - Node getNode(String loc); - - /** - * @return number of its all nodes at level level. Here level is a - * relative level. If level is 1, means node itself. If level is 2, means its - * direct children, and so on. - **/ - int getNumOfNodes(int level); - - /** - * Get leafIndex leaf of this subtree. - * - * @param leafIndex an indexed leaf of the node - * @return the leaf node corresponding to the given index. - */ - Node getLeaf(int leafIndex); - - /** - * Get leafIndex leaf of this subtree. - * - * @param leafIndex ode's index, start from 0, skip the nodes in - * excludedScope and excludedNodes with ancestorGen - * @param excludedScopes the excluded scopes - * @param excludedNodes nodes to be excluded. If ancestorGen is not 0, - * the chosen node will not share same ancestor with - * those in excluded nodes at the specified generation - * @param ancestorGen ignored with value is 0 - * @return the leaf node corresponding to the given index - */ - Node getLeaf(int leafIndex, List excludedScopes, - Collection excludedNodes, int ancestorGen); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java deleted file mode 100644 index f2183fc9823..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ /dev/null @@ -1,509 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -import com.google.common.base.Preconditions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR; -import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR; - -/** - * A thread safe class that implements InnerNode interface. - */ -public class InnerNodeImpl extends NodeImpl implements InnerNode { - protected static class Factory implements InnerNode.Factory { - protected Factory() {} - - public InnerNodeImpl newInnerNode(String name, String location, - InnerNode parent, int level, int cost) { - return new InnerNodeImpl(name, location, parent, level, cost); - } - } - - static final Factory FACTORY = new Factory(); - // a map of node's network name to Node for quick search and keep - // the insert order - private final HashMap childrenMap = - new LinkedHashMap(); - // number of descendant leaves under this node - private int numOfLeaves; - // LOGGER - public static final Logger LOG = LoggerFactory.getLogger(InnerNodeImpl.class); - - /** - * Construct an InnerNode from its name, network location, parent, level and - * its cost. - **/ - protected InnerNodeImpl(String name, String location, InnerNode parent, - int level, int cost) { - super(name, location, parent, level, cost); - } - - /** @return the number of children this node has */ - private int getNumOfChildren() { - return childrenMap.size(); - } - - /** @return its leaf nodes number */ - @Override - public int getNumOfLeaves() { - return numOfLeaves; - } - - /** - * @return number of its all nodes at level level. Here level is a - * relative level. If level is 1, means node itself. If level is 2, means its - * direct children, and so on. - **/ - public int getNumOfNodes(int level) { - Preconditions.checkArgument(level > 0); - int count = 0; - if (level == 1) { - count += 1; - } else if (level == 2) { - count += getNumOfChildren(); - } else { - for (Node node: childrenMap.values()) { - if (node instanceof InnerNode) { - count += ((InnerNode)node).getNumOfNodes(level -1); - } else { - throw new RuntimeException("Cannot support Level:" + level + - " on this node " + this.toString()); - } - } - } - return count; - } - - /** - * Judge if this node is the parent of a leave node n. - * @return true if this node is the parent of n - */ - private boolean isLeafParent() { - if (childrenMap.isEmpty()) { - return true; - } - Node child = childrenMap.values().iterator().next(); - return child instanceof InnerNode ? false : true; - } - - /** - * Judge if this node is the parent of node node. - * @param node a node - * @return true if this node is the parent of n - */ - private boolean isParent(Node node) { - return node.getNetworkLocation().equals(this.getNetworkFullPath()); - } - - /** - * Add node node to the subtree of this node. - * @param node node to be added - * @return true if the node is added, false if is only updated - */ - public boolean add(Node node) { - if (!isAncestor(node)) { - throw new IllegalArgumentException(node.getNetworkName() - + ", which is located at " + node.getNetworkLocation() - + ", is not a descendant of " + this.getNetworkFullPath()); - } - if (isParent(node)) { - // this node is the parent, then add it directly - node.setParent(this); - node.setLevel(this.getLevel() + 1); - Node current = childrenMap.put(node.getNetworkName(), node); - if (current != null) { - return false; - } - } else { - // find the next level ancestor node - String ancestorName = getNextLevelAncestorName(node); - InnerNode childNode = (InnerNode)childrenMap.get(ancestorName); - if (childNode == null) { - // create a new InnerNode for this ancestor node - childNode = createChildNode(ancestorName); - childrenMap.put(childNode.getNetworkName(), childNode); - } - // add node to the subtree of the next ancestor node - if (!childNode.add(node)) { - return false; - } - } - numOfLeaves++; - return true; - } - - /** - * Remove node node from the subtree of this node. - * @param node node to be deleted - */ - public void remove(Node node) { - if (!isAncestor(node)) { - throw new IllegalArgumentException(node.getNetworkName() - + ", which is located at " + node.getNetworkLocation() - + ", is not a descendant of " + this.getNetworkFullPath()); - } - if (isParent(node)) { - // this node is the parent, remove it directly - if (childrenMap.containsKey(node.getNetworkName())) { - childrenMap.remove(node.getNetworkName()); - node.setParent(null); - } else { - throw new RuntimeException("Should not come to here. Node:" + - node.getNetworkFullPath() + ", Parent:" + - this.getNetworkFullPath()); - } - } else { - // find the next ancestor node - String ancestorName = getNextLevelAncestorName(node); - InnerNodeImpl childNode = (InnerNodeImpl)childrenMap.get(ancestorName); - Preconditions.checkNotNull(childNode, "InnerNode is deleted before leaf"); - // remove node from the parent node - childNode.remove(node); - // if the parent node has no children, remove the parent node too - if (childNode.getNumOfChildren() == 0) { - childrenMap.remove(ancestorName); - } - } - numOfLeaves--; - } - - /** - * Given a node's string representation, return a reference to the node. - * Node can be leaf node or inner node. - * - * @param loc string location of a node. If loc starts with "/", it's a - * absolute path, otherwise a relative path. Following examples - * are all accepted, - * 1. /dc1/rm1/rack1 -> an inner node - * 2. /dc1/rm1/rack1/node1 -> a leaf node - * 3. rack1/node1 -> a relative path to this node - * - * @return null if the node is not found - */ - public Node getNode(String loc) { - if (loc == null) { - return null; - } - - String fullPath = this.getNetworkFullPath(); - if (loc.equalsIgnoreCase(fullPath)) { - return this; - } - - // remove current node's location from loc when it's a absolute path - if (fullPath.equals(NetConstants.PATH_SEPARATOR_STR)) { - // current node is ROOT - if (loc.startsWith(PATH_SEPARATOR_STR)) { - loc = loc.substring(1); - } - } else if (loc.startsWith(fullPath)) { - loc = loc.substring(fullPath.length()); - // skip the separator "/" - loc = loc.substring(1); - } - - String[] path = loc.split(PATH_SEPARATOR_STR, 2); - Node child = childrenMap.get(path[0]); - if (child == null) { - return null; - } - if (path.length == 1){ - return child; - } - if (child instanceof InnerNode) { - return ((InnerNode)child).getNode(path[1]); - } else { - return null; - } - } - - /** - * get leafIndex leaf of this subtree. - * - * @param leafIndex an indexed leaf of the node - * @return the leaf node corresponding to the given index. - */ - public Node getLeaf(int leafIndex) { - Preconditions.checkArgument(leafIndex >= 0); - // children are leaves - if (isLeafParent()) { - // range check - if (leafIndex >= getNumOfChildren()) { - return null; - } - return getChildNode(leafIndex); - } else { - for(Node node : childrenMap.values()) { - InnerNodeImpl child = (InnerNodeImpl)node; - int leafCount = child.getNumOfLeaves(); - if (leafIndex < leafCount) { - return child.getLeaf(leafIndex); - } else { - leafIndex -= leafCount; - } - } - return null; - } - } - - /** - * Get leafIndex leaf of this subtree. - * - * @param leafIndex node's index, start from 0, skip the nodes in - * excludedScope and excludedNodes with ancestorGen - * @param excludedScopes the exclude scopes - * @param excludedNodes nodes to be excluded from. If ancestorGen is not 0, - * the chosen node will not share same ancestor with - * those in excluded nodes at the specified generation - * @param ancestorGen apply to excludeNodes, when value is 0, then no same - * ancestor enforcement on excludedNodes - * @return the leaf node corresponding to the given index. - * Example: - * - * / --- root - * / \ - * / \ - * / \ - * / \ - * dc1 dc2 - * / \ / \ - * / \ / \ - * / \ / \ - * rack1 rack2 rack1 rack2 - * / \ / \ / \ / \ - * n1 n2 n3 n4 n5 n6 n7 n8 - * - * Input: - * leafIndex = 2 - * excludedScope = /dc2/rack2 - * excludedNodes = {/dc1/rack1/n1} - * ancestorGen = 1 - * - * Output: - * node /dc1/rack2/n5 - * - * Explanation: - * Since excludedNodes is n1 and ancestorGen is 1, it means nodes under - * /root/dc1/rack1 are excluded. Given leafIndex start from 0, LeafIndex 2 - * means picking the 3th available node, which is n5. - * - */ - public Node getLeaf(int leafIndex, List excludedScopes, - Collection excludedNodes, int ancestorGen) { - Preconditions.checkArgument(leafIndex >= 0 && ancestorGen >= 0); - // come to leaf parent layer - if (isLeafParent()) { - return getLeafOnLeafParent(leafIndex, excludedScopes, excludedNodes); - } - - int maxLevel = NodeSchemaManager.getInstance().getMaxLevel(); - // this node's children, it's generation as the ancestor of the leaf node - int currentGen = maxLevel - this.getLevel() - 1; - // build an ancestor(children) to exclude node count map - Map countMap = - getAncestorCountMap(excludedNodes, ancestorGen, currentGen); - // nodes covered by excluded scope - Map excludedNodeCount = - getExcludedScopeNodeCount(excludedScopes); - - for (Node child : childrenMap.values()) { - int leafCount = child.getNumOfLeaves(); - // skip nodes covered by excluded scopes - for (Map.Entry entry: excludedNodeCount.entrySet()) { - if (entry.getKey().startsWith(child.getNetworkFullPath())) { - leafCount -= entry.getValue(); - } - } - // skip nodes covered by excluded nodes and ancestorGen - Integer count = countMap.get(child); - if (count != null) { - leafCount -= count; - } - if (leafIndex < leafCount) { - return ((InnerNode)child).getLeaf(leafIndex, excludedScopes, - excludedNodes, ancestorGen); - } else { - leafIndex -= leafCount; - } - } - return null; - } - - @Override - public boolean equals(Object to) { - if (to == null) { - return false; - } - if (this == to) { - return true; - } - return this.toString().equals(to.toString()); - } - - @Override - public int hashCode() { - return super.hashCode(); - } - - /** - * Get a ancestor to its excluded node count map. - * - * @param nodes a collection of leaf nodes to exclude - * @param genToExclude the ancestor generation to exclude - * @param genToReturn the ancestor generation to return the count map - * @return the map. - * example: - * - * * --- root - * / \ - * * * -- genToReturn =2 - * / \ / \ - * * * * * -- genToExclude = 1 - * /\ /\ /\ /\ - * * * * * * * * * -- nodes - */ - private Map getAncestorCountMap(Collection nodes, - int genToExclude, int genToReturn) { - Preconditions.checkState(genToExclude >= 0); - Preconditions.checkState(genToReturn >= 0); - - if (nodes == null || nodes.size() == 0) { - return Collections.emptyMap(); - } - // with the recursive call, genToReturn can be smaller than genToExclude - if (genToReturn < genToExclude) { - genToExclude = genToReturn; - } - // ancestorToExclude to ancestorToReturn map - HashMap ancestorMap = new HashMap<>(); - for (Node node: nodes) { - Node ancestorToExclude = node.getAncestor(genToExclude); - Node ancestorToReturn = node.getAncestor(genToReturn); - if (ancestorToExclude == null || ancestorToReturn == null) { - LOG.warn("Ancestor not found, node: " + node.getNetworkFullPath() + - ", generation to exclude: " + genToExclude + - ", generation to return:" + genToReturn); - continue; - } - ancestorMap.put(ancestorToExclude, ancestorToReturn); - } - // ancestorToReturn to exclude node count map - HashMap countMap = new HashMap<>(); - for (Map.Entry entry : ancestorMap.entrySet()) { - countMap.compute(entry.getValue(), - (key, n) -> (n == null ? 0 : n) + entry.getKey().getNumOfLeaves()); - } - - return countMap; - } - - /** - * Get the node with leafIndex, considering skip nodes in excludedScope - * and in excludeNodes list. - */ - private Node getLeafOnLeafParent(int leafIndex, List excludedScopes, - Collection excludedNodes) { - Preconditions.checkArgument(isLeafParent() && leafIndex >= 0); - if (leafIndex >= getNumOfChildren()) { - return null; - } - for(Node node : childrenMap.values()) { - if (excludedNodes != null && excludedNodes.contains(node)) { - continue; - } - if (excludedScopes != null && excludedScopes.size() > 0) { - if (excludedScopes.stream().anyMatch(scope -> - node.getNetworkFullPath().startsWith(scope))) { - continue; - } - } - if (leafIndex == 0) { - return node; - } - leafIndex--; - } - return null; - } - - /** - * Return child's name of this node which is an ancestor of node n. - */ - private String getNextLevelAncestorName(Node n) { - int parentPathLen = this.getNetworkFullPath().length(); - String name = n.getNetworkLocation().substring(parentPathLen); - if (name.charAt(0) == PATH_SEPARATOR) { - name = name.substring(1); - } - int index = name.indexOf(PATH_SEPARATOR); - if (index != -1) { - name = name.substring(0, index); - } - return name; - } - - /** - * Creates a child node to be added to the list of children. - * @param name The name of the child node - * @return A new inner node - * @see InnerNodeImpl(String, String, InnerNode, int) - */ - private InnerNodeImpl createChildNode(String name) { - int childLevel = this.getLevel() + 1; - int cost = NodeSchemaManager.getInstance().getCost(childLevel); - return new InnerNodeImpl(name, this.getNetworkFullPath(), this, childLevel, - cost); - } - - /** Get node with index index. */ - private Node getChildNode(int index) { - Iterator iterator = childrenMap.values().iterator(); - Node node = null; - while(index >= 0 && iterator.hasNext()) { - node = (Node)iterator.next(); - index--; - } - return node; - } - - /** Get how many leaf nodes are covered by the excludedScopes(no overlap). */ - private Map getExcludedScopeNodeCount( - List excludedScopes) { - HashMap nodeCounts = new HashMap<>(); - if (excludedScopes == null || excludedScopes.isEmpty()) { - return nodeCounts; - } - - for (String scope: excludedScopes) { - Node excludedScopeNode = getNode(scope); - nodeCounts.put(scope, excludedScopeNode == null ? 0 : - excludedScopeNode.getNumOfLeaves()); - } - return nodeCounts; - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java deleted file mode 100644 index 0e1b0769446..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetConstants.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.apache.hadoop.hdds.scm.net.NodeSchema.LayerType; - -/** - * Class to hold network topology related constants and configurations. - */ -public final class NetConstants { - private NetConstants() { - // Prevent instantiation - } - public final static char PATH_SEPARATOR = '/'; - /** Path separator as a string. */ - public final static String PATH_SEPARATOR_STR = "/"; - public final static String SCOPE_REVERSE_STR = "~"; - /** string representation of root. */ - public final static String ROOT = ""; - public final static int INNER_NODE_COST_DEFAULT = 1; - public final static int NODE_COST_DEFAULT = 0; - public final static int ANCESTOR_GENERATION_DEFAULT = 0; - public final static int ROOT_LEVEL = 1; - public final static String NODE_COST_PREFIX = "$"; - public final static String DEFAULT_RACK = "/default-rack"; - public final static String DEFAULT_NODEGROUP = "/default-nodegroup"; - public final static String DEFAULT_DATACENTER = "/default-datacenter"; - public final static String DEFAULT_REGION = "/default-dataregion"; - - // Build-in network topology node schema - public static final NodeSchema ROOT_SCHEMA = - new NodeSchema.Builder().setType(LayerType.ROOT).build(); - - public static final NodeSchema REGION_SCHEMA = - new NodeSchema.Builder().setType(LayerType.INNER_NODE) - .setDefaultName(DEFAULT_REGION).build(); - - public static final NodeSchema DATACENTER_SCHEMA = - new NodeSchema.Builder().setType(LayerType.INNER_NODE) - .setDefaultName(DEFAULT_DATACENTER).build(); - - public static final NodeSchema RACK_SCHEMA = - new NodeSchema.Builder().setType(LayerType.INNER_NODE) - .setDefaultName(DEFAULT_RACK).build(); - - public static final NodeSchema NODEGROUP_SCHEMA = - new NodeSchema.Builder().setType(LayerType.INNER_NODE) - .setDefaultName(DEFAULT_NODEGROUP).build(); - - public static final NodeSchema LEAF_SCHEMA = - new NodeSchema.Builder().setType(LayerType.LEAF_NODE).build(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java deleted file mode 100644 index 4019b1305f6..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java +++ /dev/null @@ -1,161 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.apache.commons.collections.CollectionUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.stream.Collectors; - -/** - * Utility class to facilitate network topology functions. - */ -public final class NetUtils { - public static final Logger LOG = LoggerFactory.getLogger(NetUtils.class); - private NetUtils() { - // Prevent instantiation - } - /** - * Normalize a path by stripping off any trailing. - * {@link NetConstants#PATH_SEPARATOR} - * @param path path to normalize. - * @return the normalised path - * If pathis empty or null, then {@link NetConstants#ROOT} is returned - */ - public static String normalize(String path) { - if (path == null || path.length() == 0) { - return NetConstants.ROOT; - } - - if (path.charAt(0) != NetConstants.PATH_SEPARATOR) { - throw new IllegalArgumentException( - "Network Location path does not start with " - + NetConstants.PATH_SEPARATOR_STR + ": " + path); - } - - // Remove any trailing NetConstants.PATH_SEPARATOR - return path.length() == 1 ? path : - path.replaceAll(NetConstants.PATH_SEPARATOR_STR + "+$", ""); - } - - /** - * Given a network topology location string, return its network topology - * depth, E.g. the depth of /dc1/rack1/ng1/node1 is 5. - */ - public static int locationToDepth(String location) { - String newLocation = normalize(location); - return newLocation.equals(NetConstants.PATH_SEPARATOR_STR) ? 1 : - newLocation.split(NetConstants.PATH_SEPARATOR_STR).length; - } - - - /** - * Remove node from mutableExcludedNodes if it's covered by excludedScope. - * Please noted that mutableExcludedNodes content might be changed after the - * function call. - */ - public static void removeDuplicate(NetworkTopology topology, - Collection mutableExcludedNodes, List mutableExcludedScopes, - int ancestorGen) { - if (CollectionUtils.isEmpty(mutableExcludedNodes) || - CollectionUtils.isEmpty(mutableExcludedScopes) || topology == null) { - return; - } - - Iterator iterator = mutableExcludedNodes.iterator(); - while (iterator.hasNext() && (!mutableExcludedScopes.isEmpty())) { - Node node = iterator.next(); - Node ancestor = topology.getAncestor(node, ancestorGen); - if (ancestor == null) { - LOG.warn("Fail to get ancestor generation " + ancestorGen + - " of node :" + node); - continue; - } - // excludedScope is child of ancestor - List duplicateList = mutableExcludedScopes.stream() - .filter(scope -> scope.startsWith(ancestor.getNetworkFullPath())) - .collect(Collectors.toList()); - mutableExcludedScopes.removeAll(duplicateList); - - // ancestor is covered by excludedScope - mutableExcludedScopes.stream().forEach(scope -> { - if (ancestor.getNetworkFullPath().startsWith(scope)) { - // remove exclude node if it's covered by excludedScope - iterator.remove(); - } - }); - } - } - - /** - * Remove node from mutableExcludedNodes if it's not part of scope - * Please noted that mutableExcludedNodes content might be changed after the - * function call. - */ - public static void removeOutscope(Collection mutableExcludedNodes, - String scope) { - if (CollectionUtils.isEmpty(mutableExcludedNodes) || scope == null) { - return; - } - synchronized (mutableExcludedNodes) { - Iterator iterator = mutableExcludedNodes.iterator(); - while (iterator.hasNext()) { - Node next = iterator.next(); - if (!next.getNetworkFullPath().startsWith(scope)) { - iterator.remove(); - } - } - } - } - - /** - * Get a ancestor list for nodes on generation generation. - * - * @param nodes a collection of leaf nodes - * @param generation the ancestor generation - * @return the ancestor list. If no ancestor is found, then a empty list is - * returned. - */ - public static List getAncestorList(NetworkTopology topology, - Collection nodes, int generation) { - List ancestorList = new ArrayList<>(); - if (topology == null || CollectionUtils.isEmpty(nodes) || - generation == 0) { - return ancestorList; - } - Iterator iterator = nodes.iterator(); - while (iterator.hasNext()) { - Node node = iterator.next(); - Node ancestor = topology.getAncestor(node, generation); - if (ancestor == null) { - LOG.warn("Fail to get ancestor generation " + generation + - " of node :" + node); - continue; - } - if (!ancestorList.contains(ancestor)) { - ancestorList.add(ancestor); - } - } - return ancestorList; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java deleted file mode 100644 index 3a2c7c0f1a5..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java +++ /dev/null @@ -1,229 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import java.util.Collection; -import java.util.List; - -/** - * The interface defines a network topology. - */ -public interface NetworkTopology { - /** Exception for invalid network topology detection. */ - class InvalidTopologyException extends RuntimeException { - private static final long serialVersionUID = 1L; - public InvalidTopologyException(String msg) { - super(msg); - } - } - /** - * Add a leaf node. This will be called when a new datanode is added. - * @param node node to be added; can be null - * @exception IllegalArgumentException if add a node to a leave or node to be - * added is not a leaf - */ - void add(Node node); - - /** - * Remove a node from the network topology. This will be called when a - * existing datanode is removed from the system. - * @param node node to be removed; cannot be null - */ - void remove(Node node); - - /** - * Check if the tree already contains node node. - * @param node a node - * @return true if node is already in the tree; false otherwise - */ - boolean contains(Node node); - - /** - * Compare the direct parent of each node for equality. - * @return true if their parent are the same - */ - boolean isSameParent(Node node1, Node node2); - - /** - * Compare the specified ancestor generation of each node for equality. - * ancestorGen 1 means parent. - * @return true if their specified generation ancestor are equal - */ - boolean isSameAncestor(Node node1, Node node2, int ancestorGen); - - /** - * Get the ancestor for node on generation ancestorGen. - * - * @param node the node to get ancestor - * @param ancestorGen the ancestor generation - * @return the ancestor. If no ancestor is found, then null is returned. - */ - Node getAncestor(Node node, int ancestorGen); - - /** - * Return the max level of this topology, start from 1 for ROOT. For example, - * topology like "/rack/node" has the max level '3'. - */ - int getMaxLevel(); - - /** - * Given a string representation of a node, return its reference. - * @param loc a path string representing a node, can be leaf or inner node - * @return a reference to the node; null if the node is not in the tree - */ - Node getNode(String loc); - - /** - * Given a string representation of a InnerNode, return its leaf nodes count. - * @param loc a path-like string representation of a InnerNode - * @return the number of leaf nodes, 0 if it's not an InnerNode or the node - * doesn't exist - */ - int getNumOfLeafNode(String loc); - - /** - * Return the node numbers at level level. - * @param level topology level, start from 1, which means ROOT - * @return the number of nodes on the level - */ - int getNumOfNodes(int level); - - /** - * Randomly choose a node in the scope. - * @param scope range of nodes from which a node will be chosen. If scope - * starts with ~, choose one from the all nodes except for the - * ones in scope; otherwise, choose one from scope. - * @return the chosen node - */ - Node chooseRandom(String scope); - - /** - * Randomly choose a node in the scope, ano not in the exclude scope. - * @param scope range of nodes from which a node will be chosen. cannot start - * with ~ - * @param excludedScopes the chosen nodes cannot be in these ranges. cannot - * starts with ~ - * @return the chosen node - */ - Node chooseRandom(String scope, List excludedScopes); - - /** - * Randomly choose a leaf node from scope. - * - * If scope starts with ~, choose one from the all nodes except for the - * ones in scope; otherwise, choose nodes from scope. - * If excludedNodes is given, choose a node that's not in excludedNodes. - * - * @param scope range of nodes from which a node will be chosen - * @param excludedNodes nodes to be excluded - * - * @return the chosen node - */ - Node chooseRandom(String scope, Collection excludedNodes); - - /** - * Randomly choose a leaf node from scope. - * - * If scope starts with ~, choose one from the all nodes except for the - * ones in scope; otherwise, choose nodes from scope. - * If excludedNodes is given, choose a node that's not in excludedNodes. - * - * @param scope range of nodes from which a node will be chosen - * @param excludedNodes nodes to be excluded from. - * @param ancestorGen matters when excludeNodes is not null. It means the - * ancestor generation that's not allowed to share between chosen node and the - * excludedNodes. For example, if ancestorGen is 1, means chosen node - * cannot share the same parent with excludeNodes. If value is 2, cannot - * share the same grand parent, and so on. If ancestorGen is 0, then no - * effect. - * - * @return the chosen node - */ - Node chooseRandom(String scope, Collection excludedNodes, - int ancestorGen); - - /** - * Randomly choose one node from scope, share the same generation - * ancestor with affinityNode, and exclude nodes in - * excludeScope and excludeNodes. - * - * @param scope range of nodes from which a node will be chosen, cannot start - * with ~ - * @param excludedScopes ranges of nodes to be excluded, cannot start with ~ - * @param excludedNodes nodes to be excluded - * @param affinityNode when not null, the chosen node should share the same - * ancestor with this node at generation ancestorGen. - * Ignored when value is null - * @param ancestorGen If 0, then no same generation ancestor enforcement on - * both excludedNodes and affinityNode. If greater than 0, - * then apply to affinityNode(if not null), or apply to - * excludedNodes if affinityNode is null - * @return the chosen node - */ - Node chooseRandom(String scope, List excludedScopes, - Collection excludedNodes, Node affinityNode, int ancestorGen); - - /** - * Choose the node at index index from scope, share the same - * generation ancestor with affinityNode, and exclude nodes in - * excludeScope and excludeNodes. - * - * @param leafIndex node index, exclude nodes in excludedScope and - * excludedNodes - * @param scope range of nodes from which a node will be chosen, cannot start - * with ~ - * @param excludedScopes ranges of nodes to be excluded, cannot start with ~ - * @param excludedNodes nodes to be excluded - * @param affinityNode when not null, the chosen node should share the same - * ancestor with this node at generation ancestorGen. - * Ignored when value is null - * @param ancestorGen If 0, then no same generation ancestor enforcement on - * both excludedNodes and affinityNode. If greater than 0, - * then apply to affinityNode(if not null), or apply to - * excludedNodes if affinityNode is null - * @return the chosen node - */ - Node getNode(int leafIndex, String scope, List excludedScopes, - Collection excludedNodes, Node affinityNode, int ancestorGen); - - /** Return the distance cost between two nodes - * The distance cost from one node to its parent is it's parent's cost - * The distance cost between two nodes is calculated by summing up their - * distances cost to their closest common ancestor. - * @param node1 one node - * @param node2 another node - * @return the distance cost between node1 and node2 which is zero if they - * are the same or {@link Integer#MAX_VALUE} if node1 or node2 do not belong - * to the cluster - */ - int getDistanceCost(Node node1, Node node2); - - /** - * Sort nodes array by network distance to reader to reduces network - * traffic and improves performance. - * - * As an additional twist, we also randomize the nodes at each network - * distance. This helps with load balancing when there is data skew. - * - * @param reader Node where need the data - * @param nodes Available replicas with the requested data - * @param activeLen Number of active nodes at the front of the array - */ - List sortByDistanceCost(Node reader, - List nodes, int activeLen); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java deleted file mode 100644 index 579e5f71c79..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ /dev/null @@ -1,798 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import org.apache.commons.collections.CollectionUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.TreeMap; -import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Collectors; - -import org.apache.hadoop.conf.Configuration; -import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; -import static org.apache.hadoop.hdds.scm.net.NetConstants.SCOPE_REVERSE_STR; -import static org.apache.hadoop.hdds.scm.net.NetConstants.ANCESTOR_GENERATION_DEFAULT; - -/** - * The class represents a cluster of computers with a tree hierarchical - * network topology. In the network topology, leaves represent data nodes - * (computers) and inner nodes represent datacenter/core-switches/routers that - * manages traffic in/out of data centers or racks. - */ -public class NetworkTopologyImpl implements NetworkTopology{ - public static final Logger LOG = - LoggerFactory.getLogger(NetworkTopology.class); - - /** The Inner node crate factory. */ - private final InnerNode.Factory factory; - /** The root cluster tree. */ - private final InnerNode clusterTree; - /** Depth of all leaf nodes. */ - private final int maxLevel; - /** Schema manager. */ - private final NodeSchemaManager schemaManager; - /** Lock to coordinate cluster tree access. */ - private ReadWriteLock netlock = new ReentrantReadWriteLock(true); - - public NetworkTopologyImpl(Configuration conf) { - schemaManager = NodeSchemaManager.getInstance(); - schemaManager.init(conf); - maxLevel = schemaManager.getMaxLevel(); - factory = InnerNodeImpl.FACTORY; - clusterTree = factory.newInnerNode(ROOT, null, null, - NetConstants.ROOT_LEVEL, - schemaManager.getCost(NetConstants.ROOT_LEVEL)); - } - - @VisibleForTesting - public NetworkTopologyImpl(NodeSchemaManager manager) { - schemaManager = manager; - maxLevel = schemaManager.getMaxLevel(); - factory = InnerNodeImpl.FACTORY; - clusterTree = factory.newInnerNode(ROOT, null, null, - NetConstants.ROOT_LEVEL, - schemaManager.getCost(NetConstants.ROOT_LEVEL)); - } - - /** - * Add a leaf node. This will be called when a new datanode is added. - * @param node node to be added; can be null - * @exception IllegalArgumentException if add a node to a leave or node to be - * added is not a leaf - */ - public void add(Node node) { - Preconditions.checkArgument(node != null, "node cannot be null"); - if (node instanceof InnerNode) { - throw new IllegalArgumentException( - "Not allowed to add an inner node: "+ node.getNetworkFullPath()); - } - int newDepth = NetUtils.locationToDepth(node.getNetworkLocation()) + 1; - - // Check depth - if (maxLevel != newDepth) { - throw new InvalidTopologyException("Failed to add " + - node.getNetworkFullPath() + ": Its path depth is not " + maxLevel); - } - netlock.writeLock().lock(); - boolean add; - try { - add = clusterTree.add(node); - }finally { - netlock.writeLock().unlock(); - } - - if (add) { - LOG.info("Added a new node: " + node.getNetworkFullPath()); - if (LOG.isDebugEnabled()) { - LOG.debug("NetworkTopology became:\n{}", this); - } - } - } - - /** - * Remove a node from the network topology. This will be called when a - * existing datanode is removed from the system. - * @param node node to be removed; cannot be null - */ - public void remove(Node node) { - Preconditions.checkArgument(node != null, "node cannot be null"); - if (node instanceof InnerNode) { - throw new IllegalArgumentException( - "Not allowed to remove an inner node: "+ node.getNetworkFullPath()); - } - netlock.writeLock().lock(); - try { - clusterTree.remove(node); - }finally { - netlock.writeLock().unlock(); - } - LOG.info("Removed a node: " + node.getNetworkFullPath()); - if (LOG.isDebugEnabled()) { - LOG.debug("NetworkTopology became:\n{}", this); - } - } - - /** - * Check if the tree already contains node node. - * @param node a node - * @return true if node is already in the tree; false otherwise - */ - public boolean contains(Node node) { - Preconditions.checkArgument(node != null, "node cannot be null"); - netlock.readLock().lock(); - try { - Node parent = node.getParent(); - while (parent != null && parent != clusterTree) { - parent = parent.getParent(); - } - if (parent == clusterTree) { - return true; - } - } finally { - netlock.readLock().unlock(); - } - return false; - } - - /** - * Compare the specified ancestor generation of each node for equality. - * @return true if their specified generation ancestor are equal - */ - public boolean isSameAncestor(Node node1, Node node2, int ancestorGen) { - if (node1 == null || node2 == null || ancestorGen <= 0) { - return false; - } - netlock.readLock().lock(); - try { - return node1.getAncestor(ancestorGen) == node2.getAncestor(ancestorGen); - } finally { - netlock.readLock().unlock(); - } - } - - /** - * Compare the direct parent of each node for equality. - * @return true if their parent are the same - */ - public boolean isSameParent(Node node1, Node node2) { - if (node1 == null || node2 == null) { - return false; - } - netlock.readLock().lock(); - try { - node1 = node1.getParent(); - node2 = node2.getParent(); - return node1 == node2; - } finally { - netlock.readLock().unlock(); - } - } - - /** - * Get the ancestor for node on generation ancestorGen. - * - * @param node the node to get ancestor - * @param ancestorGen the ancestor generation - * @return the ancestor. If no ancestor is found, then null is returned. - */ - public Node getAncestor(Node node, int ancestorGen) { - if (node == null) { - return null; - } - netlock.readLock().lock(); - try { - return node.getAncestor(ancestorGen); - } finally { - netlock.readLock().unlock(); - } - } - - /** - * Given a string representation of a node(leaf or inner), return its - * reference. - * @param loc a path string representing a node, can be leaf or inner node - * @return a reference to the node, null if the node is not in the tree - */ - public Node getNode(String loc) { - loc = NetUtils.normalize(loc); - netlock.readLock().lock(); - try { - if (!ROOT.equals(loc)) { - return clusterTree.getNode(loc); - } else { - return clusterTree; - } - } finally { - netlock.readLock().unlock(); - } - } - - /** - * Given a string representation of Node, return its leaf nodes count. - * @param loc a path-like string representation of Node - * @return the number of leaf nodes for InnerNode, 1 for leaf node, 0 if node - * doesn't exist - */ - public int getNumOfLeafNode(String loc) { - netlock.readLock().lock(); - try { - Node node = getNode(loc); - if (node != null) { - return node.getNumOfLeaves(); - } - } finally { - netlock.readLock().unlock(); - } - return 0; - } - - /** - * Return the max level of this tree, start from 1 for ROOT. For example, - * topology like "/rack/node" has the max level '3'. - */ - public int getMaxLevel() { - return maxLevel; - } - - /** - * Return the node numbers at level level. - * @param level topology level, start from 1, which means ROOT - * @return the number of nodes on the level - */ - public int getNumOfNodes(int level) { - Preconditions.checkArgument(level > 0 && level <= maxLevel, - "Invalid level"); - netlock.readLock().lock(); - try { - return clusterTree.getNumOfNodes(level); - } finally { - netlock.readLock().unlock(); - } - } - - /** - * Randomly choose a node in the scope. - * @param scope range of nodes from which a node will be chosen. If scope - * starts with ~, choose one from the all nodes except for the - * ones in scope; otherwise, choose one from scope. - * @return the chosen node - */ - public Node chooseRandom(String scope) { - if (scope == null) { - scope = ROOT; - } - if (scope.startsWith(SCOPE_REVERSE_STR)) { - ArrayList excludedScopes = new ArrayList(); - excludedScopes.add(scope.substring(1)); - return chooseRandom(ROOT, excludedScopes, null, null, - ANCESTOR_GENERATION_DEFAULT); - } else { - return chooseRandom(scope, null, null, null, ANCESTOR_GENERATION_DEFAULT); - } - } - - /** - * Randomly choose a node in the scope, ano not in the exclude scope. - * @param scope range of nodes from which a node will be chosen. cannot start - * with ~ - * @param excludedScopes the chosen node cannot be in these ranges. cannot - * starts with ~ - * @return the chosen node - */ - public Node chooseRandom(String scope, List excludedScopes) { - return chooseRandom(scope, excludedScopes, null, null, - ANCESTOR_GENERATION_DEFAULT); - } - - /** - * Randomly choose a leaf node from scope. - * - * If scope starts with ~, choose one from the all nodes except for the - * ones in scope; otherwise, choose nodes from scope. - * If excludedNodes is given, choose a node that's not in excludedNodes. - * - * @param scope range of nodes from which a node will be chosen - * @param excludedNodes nodes to be excluded - * - * @return the chosen node - */ - public Node chooseRandom(String scope, Collection excludedNodes) { - if (scope == null) { - scope = ROOT; - } - if (scope.startsWith(SCOPE_REVERSE_STR)) { - ArrayList excludedScopes = new ArrayList(); - excludedScopes.add(scope.substring(1)); - return chooseRandom(ROOT, excludedScopes, excludedNodes, null, - ANCESTOR_GENERATION_DEFAULT); - } else { - return chooseRandom(scope, null, excludedNodes, null, - ANCESTOR_GENERATION_DEFAULT); - } - } - - /** - * Randomly choose a leaf node from scope. - * - * If scope starts with ~, choose one from the all nodes except for the - * ones in scope; otherwise, choose nodes from scope. - * If excludedNodes is given, choose a node that's not in excludedNodes. - * - * @param scope range of nodes from which a node will be chosen - * @param excludedNodes nodes to be excluded from. - * @param ancestorGen matters when excludeNodes is not null. It means the - * ancestor generation that's not allowed to share between chosen node and the - * excludedNodes. For example, if ancestorGen is 1, means chosen node - * cannot share the same parent with excludeNodes. If value is 2, cannot - * share the same grand parent, and so on. If ancestorGen is 0, then no - * effect. - * - * @return the chosen node - */ - public Node chooseRandom(String scope, Collection excludedNodes, - int ancestorGen) { - if (scope == null) { - scope = ROOT; - } - if (scope.startsWith(SCOPE_REVERSE_STR)) { - ArrayList excludedScopes = new ArrayList(); - excludedScopes.add(scope.substring(1)); - return chooseRandom(ROOT, excludedScopes, excludedNodes, null, - ancestorGen); - } else { - return chooseRandom(scope, null, excludedNodes, null, ancestorGen); - } - } - - /** - * Randomly choose one leaf node from scope, share the same generation - * ancestor with affinityNode, and exclude nodes in - * excludeScope and excludeNodes. - * - * @param scope range of nodes from which a node will be chosen, cannot start - * with ~ - * @param excludedScopes ranges of nodes to be excluded, cannot start with ~ - * @param excludedNodes nodes to be excluded - * @param affinityNode when not null, the chosen node should share the same - * ancestor with this node at generation ancestorGen. - * Ignored when value is null - * @param ancestorGen If 0, then no same generation ancestor enforcement on - * both excludedNodes and affinityNode. If greater than 0, - * then apply to affinityNode(if not null), or apply to - * excludedNodes if affinityNode is null - * @return the chosen node - */ - public Node chooseRandom(String scope, List excludedScopes, - Collection excludedNodes, Node affinityNode, int ancestorGen) { - if (scope == null) { - scope = ROOT; - } - - checkScope(scope); - checkExcludedScopes(excludedScopes); - checkAffinityNode(affinityNode); - checkAncestorGen(ancestorGen); - - netlock.readLock().lock(); - try { - return chooseNodeInternal(scope, -1, excludedScopes, - excludedNodes, affinityNode, ancestorGen); - } finally { - netlock.readLock().unlock(); - } - } - - /** - * Choose the leaf node at index index from scope, share the - * same generation ancestor with affinityNode, and exclude nodes in - * excludeScope and excludeNodes. - * - * @param leafIndex node index, exclude nodes in excludedScope and - * excludedNodes - * @param scope range of nodes from which a node will be chosen, cannot start - * with ~ - * @param excludedScopes ranges of nodes to be excluded, cannot start with ~ - * @param excludedNodes nodes to be excluded - * @param affinityNode when not null, the chosen node should share the same - * ancestor with this node at generation ancestorGen. - * Ignored when value is null - * @param ancestorGen If 0, then no same generation ancestor enforcement on - * both excludedNodes and affinityNode. If greater than 0, - * then apply to affinityNode(if not null), or apply to - * excludedNodes if affinityNode is null - * @return the chosen node - * Example: - * - * / --- root - * / \ - * / \ - * / \ - * / \ - * dc1 dc2 - * / \ / \ - * / \ / \ - * / \ / \ - * rack1 rack2 rack1 rack2 - * / \ / \ / \ / \ - * n1 n2 n3 n4 n5 n6 n7 n8 - * - * Input: - * leafIndex = 1 - * excludedScope = /dc2 - * excludedNodes = {/dc1/rack1/n1} - * affinityNode = /dc1/rack2/n2 - * ancestorGen = 2 - * - * Output: - * node /dc1/rack2/n4 - * - * Explanation: - * With affinityNode n2 and ancestorGen 2, it means we can only pick node - * from subtree /dc1. LeafIndex 1, so we pick the 2nd available node n4. - * - */ - public Node getNode(int leafIndex, String scope, List excludedScopes, - Collection excludedNodes, Node affinityNode, int ancestorGen) { - Preconditions.checkArgument(leafIndex >= 0); - if (scope == null) { - scope = ROOT; - } - checkScope(scope); - checkExcludedScopes(excludedScopes); - checkAffinityNode(affinityNode); - checkAncestorGen(ancestorGen); - - netlock.readLock().lock(); - try { - return chooseNodeInternal(scope, leafIndex, excludedScopes, - excludedNodes, affinityNode, ancestorGen); - } finally { - netlock.readLock().unlock(); - } - } - - private Node chooseNodeInternal(String scope, int leafIndex, - List excludedScopes, Collection excludedNodes, - Node affinityNode, int ancestorGen) { - Preconditions.checkArgument(scope != null); - - String finalScope = scope; - if (affinityNode != null && ancestorGen > 0) { - Node affinityAncestor = affinityNode.getAncestor(ancestorGen); - if (affinityAncestor == null) { - throw new IllegalArgumentException("affinityNode " + - affinityNode.getNetworkFullPath() + " doesn't have ancestor on" + - " generation " + ancestorGen); - } - // affinity ancestor should has overlap with scope - if (affinityAncestor.getNetworkFullPath().startsWith(scope)){ - finalScope = affinityAncestor.getNetworkFullPath(); - } else if (!scope.startsWith(affinityAncestor.getNetworkFullPath())) { - return null; - } - // reset ancestor generation since the new scope is identified now - ancestorGen = 0; - } - - // check overlap of excludedScopes and finalScope - List mutableExcludedScopes = null; - if (excludedScopes != null && !excludedScopes.isEmpty()) { - mutableExcludedScopes = new ArrayList<>(); - for (String s: excludedScopes) { - // excludeScope covers finalScope - if (finalScope.startsWith(s)) { - return null; - } - // excludeScope and finalScope share nothing case - if (s.startsWith(finalScope)) { - if (!mutableExcludedScopes.stream().anyMatch( - e -> s.startsWith(e))) { - mutableExcludedScopes.add(s); - } - } - } - } - - // clone excludedNodes before remove duplicate in it - Collection mutableExNodes = null; - - // Remove duplicate in excludedNodes - if (excludedNodes != null) { - mutableExNodes = - excludedNodes.stream().distinct().collect(Collectors.toList()); - } - - // remove duplicate in mutableExNodes and mutableExcludedScopes - NetUtils.removeDuplicate(this, mutableExNodes, mutableExcludedScopes, - ancestorGen); - - // calculate available node count - Node scopeNode = getNode(finalScope); - int availableNodes = getAvailableNodesCount( - scopeNode.getNetworkFullPath(), mutableExcludedScopes, mutableExNodes, - ancestorGen); - - if (availableNodes <= 0) { - LOG.warn("No available node in (scope=\"{}\" excludedScope=\"{}\" " + - "excludedNodes=\"{}\" ancestorGen=\"{}\").", - scopeNode.getNetworkFullPath(), excludedScopes, excludedNodes, - ancestorGen); - return null; - } - - // scope is a Leaf node - if (!(scopeNode instanceof InnerNode)) { - return scopeNode; - } - - Node ret; - int nodeIndex; - if (leafIndex >= 0) { - nodeIndex = leafIndex % availableNodes; - ret = ((InnerNode)scopeNode).getLeaf(nodeIndex, mutableExcludedScopes, - mutableExNodes, ancestorGen); - } else { - nodeIndex = ThreadLocalRandom.current().nextInt(availableNodes); - ret = ((InnerNode)scopeNode).getLeaf(nodeIndex, mutableExcludedScopes, - mutableExNodes, ancestorGen); - } - if (LOG.isDebugEnabled()) { - LOG.debug("Choosing node[index={},random={}] from \"{}\" available " + - "nodes, scope=\"{}\", excludedScope=\"{}\", excludeNodes=\"{}\".", - nodeIndex, (leafIndex == -1 ? "true" : "false"), availableNodes, - scopeNode.getNetworkFullPath(), excludedScopes, excludedNodes); - LOG.debug("Chosen node = {}", (ret == null ? "not found" : - ret.toString())); - } - return ret; - } - - /** Return the distance cost between two nodes - * The distance cost from one node to its parent is it's parent's cost - * The distance cost between two nodes is calculated by summing up their - * distances cost to their closest common ancestor. - * @param node1 one node - * @param node2 another node - * @return the distance cost between node1 and node2 which is zero if they - * are the same or {@link Integer#MAX_VALUE} if node1 or node2 do not belong - * to the cluster - */ - public int getDistanceCost(Node node1, Node node2) { - if ((node1 != null && node2 != null && node1.equals(node2)) || - (node1 == null && node2 == null)) { - return 0; - } - if (node1 == null || node2 == null) { - LOG.warn("One of the nodes is a null pointer"); - return Integer.MAX_VALUE; - } - int cost = 0; - netlock.readLock().lock(); - try { - if ((node1.getAncestor(maxLevel - 1) != clusterTree) || - (node2.getAncestor(maxLevel - 1) != clusterTree)) { - LOG.debug("One of the nodes is outside of network topology"); - return Integer.MAX_VALUE; - } - int level1 = node1.getLevel(); - int level2 = node2.getLevel(); - if (level1 > maxLevel || level2 > maxLevel) { - return Integer.MAX_VALUE; - } - while(level1 > level2 && node1 != null) { - node1 = node1.getParent(); - level1--; - cost += node1 == null? 0 : node1.getCost(); - } - while(level2 > level1 && node2 != null) { - node2 = node2.getParent(); - level2--; - cost += node2 == null? 0 : node2.getCost(); - } - while(node1 != null && node2 != null && node1 != node2) { - node1 = node1.getParent(); - node2 = node2.getParent(); - cost += node1 == null? 0 : node1.getCost(); - cost += node2 == null? 0 : node2.getCost(); - } - return cost; - } finally { - netlock.readLock().unlock(); - } - } - - /** - * Sort nodes array by network distance to reader to reduces network - * traffic and improves performance. - * - * As an additional twist, we also randomize the nodes at each network - * distance. This helps with load balancing when there is data skew. - * - * @param reader Node where need the data - * @param nodes Available replicas with the requested data - * @param activeLen Number of active nodes at the front of the array - */ - public List sortByDistanceCost(Node reader, - List nodes, int activeLen) { - /** Sort weights for the nodes array */ - if (reader == null) { - return nodes; - } - int[] costs = new int[activeLen]; - for (int i = 0; i < activeLen; i++) { - costs[i] = getDistanceCost(reader, nodes.get(i)); - } - // Add cost/node pairs to a TreeMap to sort - TreeMap> tree = new TreeMap>(); - for (int i = 0; i < activeLen; i++) { - int cost = costs[i]; - Node node = nodes.get(i); - List list = tree.get(cost); - if (list == null) { - list = Lists.newArrayListWithExpectedSize(1); - tree.put(cost, list); - } - list.add(node); - } - - List ret = new ArrayList<>(); - for (List list: tree.values()) { - if (list != null) { - Collections.shuffle(list); - for (Node n: list) { - ret.add(n); - } - } - } - - Preconditions.checkState(ret.size() == activeLen, - "Wrong number of nodes sorted!"); - return ret; - } - - /** - * Return the number of leaves in scope but not in - * excludedNodes and excludeScope. - * @param scope the scope - * @param excludedScopes excluded scopes - * @param mutableExcludedNodes a list of excluded nodes, content might be - * changed after the call - * @param ancestorGen same generation ancestor prohibit on excludedNodes - * @return number of available nodes - */ - private int getAvailableNodesCount(String scope, List excludedScopes, - Collection mutableExcludedNodes, int ancestorGen) { - Preconditions.checkArgument(scope != null); - - Node scopeNode = getNode(scope); - if (scopeNode == null) { - return 0; - } - NetUtils.removeOutscope(mutableExcludedNodes, scope); - List excludedAncestorList = - NetUtils.getAncestorList(this, mutableExcludedNodes, ancestorGen); - for (Node ancestor : excludedAncestorList) { - if (scope.startsWith(ancestor.getNetworkFullPath())){ - return 0; - } - } - // number of nodes to exclude - int excludedCount = 0; - if (excludedScopes != null) { - for (String excludedScope: excludedScopes) { - Node excludedScopeNode = getNode(excludedScope); - if (excludedScopeNode != null) { - if (excludedScope.startsWith(scope)) { - excludedCount += excludedScopeNode.getNumOfLeaves(); - } else if (scope.startsWith(excludedScope)) { - return 0; - } - } - } - } - // excludedNodes is not null case - if (mutableExcludedNodes != null && (!mutableExcludedNodes.isEmpty())) { - if (ancestorGen == 0) { - for (Node node: mutableExcludedNodes) { - if (contains(node)) { - excludedCount++; - } - } - } else { - for (Node ancestor : excludedAncestorList) { - if (ancestor.getNetworkFullPath().startsWith(scope)) { - excludedCount += ancestor.getNumOfLeaves(); - } - } - } - } - - int availableCount = scopeNode.getNumOfLeaves() - excludedCount; - Preconditions.checkState(availableCount >= 0); - return availableCount; - } - - @Override - public String toString() { - // print max level - StringBuilder tree = new StringBuilder(); - tree.append("Level: "); - tree.append(maxLevel); - tree.append("\n"); - netlock.readLock().lock(); - try { - // print the number of leaves - int numOfLeaves = clusterTree.getNumOfLeaves(); - tree.append("Number of leaves:"); - tree.append(numOfLeaves); - tree.append("\n"); - // print all nodes - for (int i = 0; i < numOfLeaves; i++) { - tree.append(clusterTree.getLeaf(i).getNetworkFullPath()); - tree.append("\n"); - } - } finally { - netlock.readLock().unlock(); - } - return tree.toString(); - } - - private void checkScope(String scope) { - if (scope != null && scope.startsWith(SCOPE_REVERSE_STR)) { - throw new IllegalArgumentException("scope " + scope + - " should not start with " + SCOPE_REVERSE_STR); - } - } - - private void checkExcludedScopes(List excludedScopes) { - if (!CollectionUtils.isEmpty(excludedScopes)) { - excludedScopes.stream().forEach(scope -> { - if (scope.startsWith(SCOPE_REVERSE_STR)) { - throw new IllegalArgumentException("excludedScope " + scope + - " cannot start with " + SCOPE_REVERSE_STR); - } - }); - } - } - - private void checkAffinityNode(Node affinityNode) { - if (affinityNode != null && (!contains(affinityNode))) { - throw new IllegalArgumentException("Affinity node " + - affinityNode.getNetworkFullPath() + " is not a member of topology"); - } - } - - private void checkAncestorGen(int ancestorGen) { - if (ancestorGen > (maxLevel - 1) || ancestorGen < 0) { - throw new IllegalArgumentException("ancestorGen " + ancestorGen + - " exceeds this network topology acceptable level [0, " + - (maxLevel - 1) + "]"); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java deleted file mode 100644 index 0007e546770..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -/** - * The interface defines a node in a network topology. - * A node may be a leave representing a data node or an inner - * node representing a data center or rack. - * Each node has a name and its location in the network is - * decided by a string with syntax similar to a file name. - * For example, a data node's name is hostname:port# and if it's located at - * rack "orange" in data center "dog", the string representation of its - * network location will be /dog/orange. - */ -public interface Node { - /** @return the string representation of this node's network location path, - * exclude itself. In another words, its parent's full network location */ - String getNetworkLocation(); - - /** - * Set this node's network location. - * @param location it's network location - */ - void setNetworkLocation(String location); - - /** @return this node's self name in network topology. This should be node's - * IP or hostname. - * */ - String getNetworkName(); - - /** - * Set this node's name, can be hostname or Ipaddress. - * @param name it's network name - */ - void setNetworkName(String name); - - /** @return this node's full path in network topology. It's the concatenation - * of location and name. - * */ - String getNetworkFullPath(); - - /** @return this node's parent */ - InnerNode getParent(); - - /** - * Set this node's parent. - * @param parent the parent - */ - void setParent(InnerNode parent); - - /** @return this node's ancestor, generation 0 is itself, generation 1 is - * node's parent, and so on.*/ - Node getAncestor(int generation); - - /** - * @return this node's level in the tree. - * E.g. the root of a tree returns 1 and root's children return 2 - */ - int getLevel(); - - /** - * Set this node's level in the tree. - * @param i the level - */ - void setLevel(int i); - - /** - * @return this node's cost when network traffic go through it. - * E.g. the cost of going cross a switch is 1, and cost of going through a - * datacenter can be 5. - * Be default the cost of leaf datanode is 0, all other node is 1. - */ - int getCost(); - - /** @return the leaf nodes number under this node. */ - int getNumOfLeaves(); - - /** - * Judge if this node is an ancestor of node n. - * Ancestor includes itself and parents case. - * - * @param n a node - * @return true if this node is an ancestor of n - */ - boolean isAncestor(Node n); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java deleted file mode 100644 index 53b05ea2941..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java +++ /dev/null @@ -1,222 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import com.google.common.base.Preconditions; - -import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; -import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR; - -/** - * A thread safe class that implements interface Node. - */ -public class NodeImpl implements Node { - // host:port# - private String name; - // string representation of this node's location, such as /dc1/rack1 - private String location; - // location + "/" + name - private String path; - // which level of the tree the node resides, start from 1 for root - private int level; - // node's parent - private InnerNode parent; - // the cost to go through this node - private final int cost; - - /** - * Construct a node from its name and its location. - * @param name this node's name (can be null, must not contain - * {@link NetConstants#PATH_SEPARATOR}) - * @param location this node's location - */ - public NodeImpl(String name, String location, int cost) { - if (name != null && name.contains(PATH_SEPARATOR_STR)) { - throw new IllegalArgumentException( - "Network location name:" + name + " should not contain " + - PATH_SEPARATOR_STR); - } - this.name = (name == null) ? ROOT : name; - this.location = NetUtils.normalize(location); - this.path = getPath(); - this.cost = cost; - } - - /** - * Construct a node from its name and its location. - * - * @param name this node's name (can be null, must not contain - * {@link NetConstants#PATH_SEPARATOR}) - * @param location this node's location - * @param parent this node's parent node - * @param level this node's level in the tree - * @param cost this node's cost if traffic goes through it - */ - public NodeImpl(String name, String location, InnerNode parent, int level, - int cost) { - this(name, location, cost); - this.parent = parent; - this.level = level; - } - - /** - * @return this node's name - */ - public String getNetworkName() { - return name; - } - - /** - * Set this node's name, can be hostname or Ipaddress. - * @param networkName it's network name - */ - public void setNetworkName(String networkName) { - this.name = networkName; - this.path = getPath(); - } - - /** - * @return this node's network location - */ - public String getNetworkLocation() { - return location; - } - - /** - * Set this node's network location. - * @param networkLocation it's network location - */ - @Override - public void setNetworkLocation(String networkLocation) { - this.location = networkLocation; - this.path = getPath(); - } - - /** - * @return this node's full path in network topology. It's the concatenation - * of location and name. - */ - public String getNetworkFullPath() { - return path; - } - - /** - * @return this node's parent - */ - public InnerNode getParent() { - return parent; - } - - /** - * @return this node's ancestor, generation 0 is itself, generation 1 is - * node's parent, and so on. - */ - public Node getAncestor(int generation) { - Preconditions.checkArgument(generation >= 0); - Node current = this; - while (generation > 0 && current != null) { - current = current.getParent(); - generation--; - } - return current; - } - - /** - * Set this node's parent. - * - * @param parent the parent - */ - public void setParent(InnerNode parent) { - this.parent = parent; - } - - /** - * @return this node's level in the tree. - * E.g. the root of a tree returns 0 and its children return 1 - */ - public int getLevel() { - return this.level; - } - - /** - * Set this node's level in the tree. - * - * @param level the level - */ - public void setLevel(int level) { - this.level = level; - } - - /** - * @return this node's cost when network traffic go through it. - * E.g. the cost of going cross a switch is 1, and cost of going through a - * datacenter is 5. - * Be default the cost of leaf datanode is 0, all other inner node is 1. - */ - public int getCost() { - return this.cost; - } - - /** @return the leaf nodes number under this node. */ - public int getNumOfLeaves() { - return 1; - } - - /** - * Check if this node is an ancestor of node node. Ancestor includes - * itself and parents case; - * @param node a node - * @return true if this node is an ancestor of node - */ - public boolean isAncestor(Node node) { - return this.getNetworkFullPath().equals(PATH_SEPARATOR_STR) || - node.getNetworkLocation().startsWith(this.getNetworkFullPath()) || - node.getNetworkFullPath().equalsIgnoreCase( - this.getNetworkFullPath()); - } - - @Override - public boolean equals(Object to) { - if (to == null) { - return false; - } - if (this == to) { - return true; - } - return this.toString().equals(to.toString()); - } - - @Override - public int hashCode() { - return toString().hashCode(); - } - - /** - * @return this node's path as its string representation - */ - @Override - public String toString() { - return getNetworkFullPath(); - } - - private String getPath() { - return this.location.equals(PATH_SEPARATOR_STR) ? - this.location + this.name : - this.location + PATH_SEPARATOR_STR + this.name; - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java deleted file mode 100644 index 47e5de880d6..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java +++ /dev/null @@ -1,183 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.apache.hadoop.HadoopIllegalArgumentException; - -import java.util.List; - -/** - * Network topology schema to housekeeper relevant information. - */ -public final class NodeSchema { - /** - * Network topology layer type enum definition. - */ - public enum LayerType{ - ROOT("Root", NetConstants.INNER_NODE_COST_DEFAULT), - INNER_NODE("InnerNode", NetConstants.INNER_NODE_COST_DEFAULT), - LEAF_NODE("Leaf", NetConstants.NODE_COST_DEFAULT); - - private final String description; - // default cost - private final int cost; - - LayerType(String description, int cost) { - this.description = description; - this.cost = cost; - } - - @Override - public String toString() { - return description; - } - - public int getCost(){ - return cost; - } - public static LayerType getType(String typeStr) { - for (LayerType type: LayerType.values()) { - if (typeStr.equalsIgnoreCase(type.toString())) { - return type; - } - } - return null; - } - } - - // default cost - private int cost; - // layer Type, mandatory property - private LayerType type; - // default name, can be null or "" - private String defaultName; - // layer prefix, can be null or "" - private String prefix; - // sublayer - private List sublayer; - - /** - * Builder for NodeSchema. - */ - public static class Builder { - private int cost = -1; - private LayerType type; - private String defaultName; - private String prefix; - - public Builder setCost(int nodeCost) { - this.cost = nodeCost; - return this; - } - - public Builder setPrefix(String nodePrefix) { - this.prefix = nodePrefix; - return this; - } - - public Builder setType(LayerType nodeType) { - this.type = nodeType; - return this; - } - - public Builder setDefaultName(String nodeDefaultName) { - this.defaultName = nodeDefaultName; - return this; - } - - public NodeSchema build() { - if (type == null) { - throw new HadoopIllegalArgumentException("Type is mandatory for a " + - "network topology node layer definition"); - } - if (cost == -1) { - cost = type.getCost(); - } - return new NodeSchema(type, cost, prefix, defaultName); - } - } - - /** - * Constructor. - * @param type layer type - * @param cost layer's default cost - * @param prefix layer's prefix - * @param defaultName layer's default name is if specified - */ - public NodeSchema(LayerType type, int cost, String prefix, - String defaultName) { - this.type = type; - this.cost = cost; - this.prefix = prefix; - this.defaultName = defaultName; - } - - /** - * Constructor. This constructor is only used when build NodeSchema from - * YAML file. - */ - public NodeSchema() { - this.type = LayerType.INNER_NODE; - } - - public boolean matchPrefix(String name) { - if (name == null || name.isEmpty() || prefix == null || prefix.isEmpty()) { - return false; - } - return name.trim().toLowerCase().startsWith(prefix.toLowerCase()); - } - - public LayerType getType() { - return this.type; - } - - public void setType(LayerType type) { - this.type = type; - } - - public String getPrefix() { - return this.prefix; - } - - public void setPrefix(String prefix) { - this.prefix = prefix; - } - - public String getDefaultName() { - return this.defaultName; - } - - public void setDefaultName(String name) { - this.defaultName = name; - } - - public int getCost() { - return this.cost; - } - public void setCost(int cost) { - this.cost = cost; - } - - public void setSublayer(List sublayer) { - this.sublayer = sublayer; - } - - public List getSublayer() { - return sublayer; - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java deleted file mode 100644 index 8d7abedf2e7..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java +++ /dev/null @@ -1,489 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.apache.commons.io.FilenameUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; -import org.w3c.dom.Text; -import org.xml.sax.SAXException; - -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.parsers.ParserConfigurationException; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hdds.scm.net.NodeSchema.LayerType; -import org.yaml.snakeyaml.Yaml; - -/** - * A Network topology layer schema loading tool that loads user defined network - * layer schema data from a XML configuration file. - */ -public final class NodeSchemaLoader { - private static final Logger LOG - = LoggerFactory.getLogger(NodeSchemaLoader.class); - private static final String CONFIGURATION_TAG = "configuration"; - private static final String LAYOUT_VERSION_TAG = "layoutversion"; - private static final String TOPOLOGY_TAG = "topology"; - private static final String TOPOLOGY_PATH = "path"; - private static final String TOPOLOGY_ENFORCE_PREFIX = "enforceprefix"; - private static final String LAYERS_TAG = "layers"; - private static final String LAYER_TAG = "layer"; - private static final String LAYER_ID = "id"; - private static final String LAYER_TYPE = "type"; - private static final String LAYER_COST = "cost"; - private static final String LAYER_PREFIX = "prefix"; - private static final String LAYER_DEFAULT_NAME = "default"; - - private static final int LAYOUT_VERSION = 1; - private volatile static NodeSchemaLoader instance = null; - private NodeSchemaLoader() {} - - public static NodeSchemaLoader getInstance() { - if (instance == null) { - instance = new NodeSchemaLoader(); - } - return instance; - } - - /** - * Class to house keep the result of parsing a network topology schema file. - */ - public static class NodeSchemaLoadResult { - private List schemaList; - private boolean enforcePrefix; - - NodeSchemaLoadResult(List schemaList, boolean enforcePrefix) { - this.schemaList = schemaList; - this.enforcePrefix = enforcePrefix; - } - - public boolean isEnforePrefix() { - return enforcePrefix; - } - - public List getSchemaList() { - return schemaList; - } - } - - /** - * Load user defined network layer schemas from a XML/YAML configuration file. - * @param schemaFilePath path of schema file - * @return all valid node schemas defined in schema file - */ - public NodeSchemaLoadResult loadSchemaFromFile(String schemaFilePath) - throws IllegalArgumentException, FileNotFoundException { - try { - File schemaFile = new File(schemaFilePath); - - if (schemaFile.exists()) { - LOG.info("Load network topology schema file " + - schemaFile.getAbsolutePath()); - try (FileInputStream inputStream = new FileInputStream(schemaFile)) { - return loadSchemaFromStream(schemaFilePath, inputStream); - } - } else { - // try to load with classloader - ClassLoader classloader = - Thread.currentThread().getContextClassLoader(); - if (classloader == null) { - classloader = NodeSchemaLoader.class.getClassLoader(); - } - if (classloader != null) { - try (InputStream stream = classloader - .getResourceAsStream(schemaFilePath)) { - if (stream != null) { - LOG.info("Loading file from " + classloader - .getResources(schemaFilePath)); - return loadSchemaFromStream(schemaFilePath, stream); - } - } - } - - } - - String msg = "Network topology layer schema file " + - schemaFilePath + "[" + schemaFile.getAbsolutePath() + - "] is not found."; - LOG.warn(msg); - throw new FileNotFoundException(msg); - - } catch (FileNotFoundException e) { - throw e; - } catch (ParserConfigurationException | IOException | SAXException e) { - throw new IllegalArgumentException("Failed to load network topology node" - + " schema file: " + schemaFilePath + " , error:" + e.getMessage(), - e); - } - } - - private NodeSchemaLoadResult loadSchemaFromStream(String schemaFilePath, - InputStream stream) - throws ParserConfigurationException, SAXException, IOException { - if (FilenameUtils.getExtension(schemaFilePath).toLowerCase() - .compareTo("yaml") == 0) { - return loadSchemaFromYaml(stream); - } else { - return loadSchema(stream); - } - } - - /** - * Load network topology layer schemas from a XML configuration file. - * @param inputStream schema file as an inputStream - * @return all valid node schemas defined in schema file - * @throws ParserConfigurationException ParserConfigurationException happen - * @throws IOException no such schema file - * @throws SAXException xml file has some invalid elements - * @throws IllegalArgumentException xml file content is logically invalid - */ - private NodeSchemaLoadResult loadSchema(InputStream inputStream) throws - ParserConfigurationException, SAXException, IOException { - LOG.info("Loading network topology layer schema file"); - // Read and parse the schema file. - DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); - dbf.setIgnoringComments(true); - DocumentBuilder builder = dbf.newDocumentBuilder(); - Document doc = builder.parse(inputStream); - Element root = doc.getDocumentElement(); - - if (!CONFIGURATION_TAG.equals(root.getTagName())) { - throw new IllegalArgumentException("Bad network topology layer schema " + - "configuration file: top-level element not <" + CONFIGURATION_TAG + - ">"); - } - NodeSchemaLoadResult schemaList; - if (root.getElementsByTagName(LAYOUT_VERSION_TAG).getLength() == 1) { - if (loadLayoutVersion(root) == LAYOUT_VERSION) { - if (root.getElementsByTagName(LAYERS_TAG).getLength() == 1) { - Map schemas = loadLayersSection(root); - if (root.getElementsByTagName(TOPOLOGY_TAG).getLength() == 1) { - schemaList = loadTopologySection(root, schemas); - } else { - throw new IllegalArgumentException("Bad network topology layer " + - "schema configuration file: no or multiple <" + TOPOLOGY_TAG + - "> element"); - } - } else { - throw new IllegalArgumentException("Bad network topology layer schema" - + " configuration file: no or multiple <" + LAYERS_TAG + - ">element"); - } - } else { - throw new IllegalArgumentException("The parse failed because of bad " - + LAYOUT_VERSION_TAG + " value, expected:" + LAYOUT_VERSION); - } - } else { - throw new IllegalArgumentException("Bad network topology layer schema " + - "configuration file: no or multiple <" + LAYOUT_VERSION_TAG + - "> elements"); - } - return schemaList; - } - - /** - * Load network topology layer schemas from a YAML configuration file. - * @param schemaFile as inputStream - * @return all valid node schemas defined in schema file - * @throws ParserConfigurationException ParserConfigurationException happen - * @throws IOException no such schema file - * @throws SAXException xml file has some invalid elements - * @throws IllegalArgumentException xml file content is logically invalid - */ - private NodeSchemaLoadResult loadSchemaFromYaml(InputStream schemaFile) { - LOG.info("Loading network topology layer schema file {}", schemaFile); - NodeSchemaLoadResult finalSchema; - - try { - Yaml yaml = new Yaml(); - NodeSchema nodeTree; - - nodeTree = yaml.loadAs(schemaFile, NodeSchema.class); - - List schemaList = new ArrayList<>(); - if (nodeTree.getType() != LayerType.ROOT) { - throw new IllegalArgumentException("First layer is not a ROOT node." - + " schema file."); - } - schemaList.add(nodeTree); - if (nodeTree.getSublayer() != null) { - nodeTree = nodeTree.getSublayer().get(0); - } - - while (nodeTree != null) { - if (nodeTree.getType() == LayerType.LEAF_NODE - && nodeTree.getSublayer() != null) { - throw new IllegalArgumentException("Leaf node in the middle of path." - + " schema file."); - } - if (nodeTree.getType() == LayerType.ROOT) { - throw new IllegalArgumentException("Multiple root nodes are defined." - + " schema file."); - } - schemaList.add(nodeTree); - if (nodeTree.getSublayer() != null) { - nodeTree = nodeTree.getSublayer().get(0); - } else { - break; - } - } - finalSchema = new NodeSchemaLoadResult(schemaList, true); - } catch (Exception e) { - throw new IllegalArgumentException("Fail to load network topology node" - + " schema file: " + schemaFile + " , error:" - + e.getMessage(), e); - } - - return finalSchema; - } - - /** - * Load layoutVersion from root element in the XML configuration file. - * @param root root element - * @return layout version - */ - private int loadLayoutVersion(Element root) { - int layoutVersion; - Text text = (Text) root.getElementsByTagName(LAYOUT_VERSION_TAG) - .item(0).getFirstChild(); - if (text != null) { - String value = text.getData().trim(); - try { - layoutVersion = Integer.parseInt(value); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("Bad " + LAYOUT_VERSION_TAG + - " value " + value + " is found. It should be an integer."); - } - } else { - throw new IllegalArgumentException("Value of <" + LAYOUT_VERSION_TAG + - "> is null"); - } - return layoutVersion; - } - - /** - * Load layers from root element in the XML configuration file. - * @param root root element - * @return A map of node schemas with layer ID and layer schema - */ - private Map loadLayersSection(Element root) { - NodeList elements = root.getElementsByTagName(LAYER_TAG); - Map schemas = new HashMap(); - for (int i = 0; i < elements.getLength(); i++) { - Node node = elements.item(i); - if (node instanceof Element) { - Element element = (Element) node; - if (LAYER_TAG.equals(element.getTagName())) { - String layerId = element.getAttribute(LAYER_ID); - NodeSchema schema = parseLayerElement(element); - if (!schemas.containsValue(schema)) { - schemas.put(layerId, schema); - } else { - throw new IllegalArgumentException("Repetitive layer in network " + - "topology node schema configuration file: " + layerId); - } - } else { - throw new IllegalArgumentException("Bad element in network topology " - + "node schema configuration file: " + element.getTagName()); - } - } - } - - // Integrity check, only one ROOT and one LEAF is allowed - boolean foundRoot = false; - boolean foundLeaf = false; - for(NodeSchema schema: schemas.values()) { - if (schema.getType() == LayerType.ROOT) { - if (foundRoot) { - throw new IllegalArgumentException("Multiple ROOT layers are found" + - " in network topology schema configuration file"); - } else { - foundRoot = true; - } - } - if (schema.getType() == LayerType.LEAF_NODE) { - if (foundLeaf) { - throw new IllegalArgumentException("Multiple LEAF layers are found" + - " in network topology schema configuration file"); - } else { - foundLeaf = true; - } - } - } - if (!foundRoot) { - throw new IllegalArgumentException("No ROOT layer is found" + - " in network topology schema configuration file"); - } - if (!foundLeaf) { - throw new IllegalArgumentException("No LEAF layer is found" + - " in network topology schema configuration file"); - } - return schemas; - } - - /** - * Load network topology from root element in the XML configuration file and - * sort node schemas according to the topology path. - * @param root root element - * @param schemas schema map - * @return all valid node schemas defined in schema file - */ - private NodeSchemaLoadResult loadTopologySection(Element root, - Map schemas) { - NodeList elements = root.getElementsByTagName(TOPOLOGY_TAG) - .item(0).getChildNodes(); - List schemaList = new ArrayList(); - boolean enforecePrefix = false; - for (int i = 0; i < elements.getLength(); i++) { - Node node = elements.item(i); - if (node instanceof Element) { - Element element = (Element) node; - String tagName = element.getTagName(); - // Get the nonnull text value. - Text text = (Text) element.getFirstChild(); - String value; - if (text != null) { - value = text.getData().trim(); - if (value.isEmpty()) { - // Element with empty value is ignored - continue; - } - } else { - throw new IllegalArgumentException("Value of <" + tagName - + "> is null"); - } - if (TOPOLOGY_PATH.equals(tagName)) { - if(value.startsWith(NetConstants.PATH_SEPARATOR_STR)) { - value = value.substring(1, value.length()); - } - String[] layerIDs = value.split(NetConstants.PATH_SEPARATOR_STR); - if (layerIDs == null || layerIDs.length != schemas.size()) { - throw new IllegalArgumentException("Topology path depth doesn't " - + "match layer element numbers"); - } - for (int j = 0; j < layerIDs.length; j++) { - if (schemas.get(layerIDs[j]) == null) { - throw new IllegalArgumentException("No layer found for id " + - layerIDs[j]); - } - } - if (schemas.get(layerIDs[0]).getType() != LayerType.ROOT) { - throw new IllegalArgumentException("Topology path doesn't start " - + "with ROOT layer"); - } - if (schemas.get(layerIDs[layerIDs.length -1]).getType() != - LayerType.LEAF_NODE) { - throw new IllegalArgumentException("Topology path doesn't end " - + "with LEAF layer"); - } - for (int j = 0; j < layerIDs.length; j++) { - schemaList.add(schemas.get(layerIDs[j])); - } - } else if (TOPOLOGY_ENFORCE_PREFIX.equalsIgnoreCase(tagName)) { - enforecePrefix = Boolean.parseBoolean(value); - } else { - throw new IllegalArgumentException("Unsupported Element <" + - tagName + ">"); - } - } - } - // Integrity check - if (enforecePrefix) { - // Every InnerNode should have prefix defined - for (NodeSchema schema: schemas.values()) { - if (schema.getType() == LayerType.INNER_NODE && - schema.getPrefix() == null) { - throw new IllegalArgumentException("There is layer without prefix " + - "defined while prefix is enforced."); - } - } - } - return new NodeSchemaLoadResult(schemaList, enforecePrefix); - } - - /** - * Load a layer from a layer element in the XML configuration file. - * @param element network topology node layer element - * @return ECSchema - */ - private NodeSchema parseLayerElement(Element element) { - NodeList fields = element.getChildNodes(); - LayerType type = null; - int cost = 0; - String prefix = null; - String defaultName = null; - for (int i = 0; i < fields.getLength(); i++) { - Node fieldNode = fields.item(i); - if (fieldNode instanceof Element) { - Element field = (Element) fieldNode; - String tagName = field.getTagName(); - // Get the nonnull text value. - Text text = (Text) field.getFirstChild(); - String value; - if (text != null) { - value = text.getData().trim(); - if (value.isEmpty()) { - // Element with empty value is ignored - continue; - } - } else { - continue; - } - if (LAYER_COST.equalsIgnoreCase(tagName)) { - cost = Integer.parseInt(value); - if (cost < 0) { - throw new IllegalArgumentException( - "Cost should be positive number or 0"); - } - } else if (LAYER_TYPE.equalsIgnoreCase(tagName)) { - type = NodeSchema.LayerType.getType(value); - if (type == null) { - throw new IllegalArgumentException( - "Unsupported layer type:" + value); - } - } else if (LAYER_PREFIX.equalsIgnoreCase(tagName)) { - prefix = value; - } else if (LAYER_DEFAULT_NAME.equalsIgnoreCase(tagName)) { - defaultName = value; - } else { - throw new IllegalArgumentException("Unsupported Element <" + tagName - + ">"); - } - } - } - // type is a mandatory property - if (type == null) { - throw new IllegalArgumentException("Missing type Element"); - } - return new NodeSchema(type, cost, prefix, defaultName); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java deleted file mode 100644 index c60c2c80aa9..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.net.NodeSchemaLoader.NodeSchemaLoadResult; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.conf.Configuration; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** The class manages all network topology schemas. */ - -public final class NodeSchemaManager { - private static final Logger LOG = LoggerFactory.getLogger( - NodeSchemaManager.class); - - // All schema saved and sorted from ROOT to LEAF node - private List allSchema; - // enforcePrefix only applies to INNER_NODE - private boolean enforcePrefix; - // max level, includes ROOT level - private int maxLevel = -1; - - private volatile static NodeSchemaManager instance = null; - - private NodeSchemaManager() { - } - - public static NodeSchemaManager getInstance() { - if (instance == null) { - instance = new NodeSchemaManager(); - } - return instance; - } - - public void init(Configuration conf) { - /** - * Load schemas from network topology schema configuration file - */ - String schemaFile = conf.get( - ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, - ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT); - NodeSchemaLoadResult result; - try { - result = NodeSchemaLoader.getInstance().loadSchemaFromFile(schemaFile); - allSchema = result.getSchemaList(); - enforcePrefix = result.isEnforePrefix(); - maxLevel = allSchema.size(); - } catch (Throwable e) { - String msg = "Failed to load schema file:" + schemaFile - + ", error: " + e.getMessage(); - LOG.error(msg, e); - throw new RuntimeException(msg, e); - } - } - - @VisibleForTesting - public void init(NodeSchema[] schemas, boolean enforce) { - allSchema = new ArrayList<>(); - allSchema.addAll(Arrays.asList(schemas)); - enforcePrefix = enforce; - maxLevel = schemas.length; - } - - public int getMaxLevel() { - return maxLevel; - } - - public int getCost(int level) { - Preconditions.checkArgument(level <= maxLevel && - level >= (NetConstants.ROOT_LEVEL)); - return allSchema.get(level - NetConstants.ROOT_LEVEL).getCost(); - } - - /** - * Given a incomplete network path, return its complete network path if - * possible. E.g. input is 'node1', output is '/rack-default/node1' if this - * schema manages ROOT, RACK and LEAF, with prefix defined and enforce prefix - * enabled. - * - * @param path the incomplete input path - * @return complete path, null if cannot carry out complete action or action - * failed - */ - public String complete(String path) { - if (!enforcePrefix) { - return null; - } - String normalizedPath = NetUtils.normalize(path); - String[] subPath = normalizedPath.split(NetConstants.PATH_SEPARATOR_STR); - if ((subPath.length) == maxLevel) { - return path; - } - StringBuffer newPath = new StringBuffer(NetConstants.ROOT); - // skip the ROOT and LEAF layer - int i, j; - for (i = 1, j = 1; i < subPath.length && j < (allSchema.size() - 1);) { - if (allSchema.get(j).matchPrefix(subPath[i])) { - newPath.append(NetConstants.PATH_SEPARATOR_STR + subPath[i]); - i++; - j++; - } else { - newPath.append(allSchema.get(j).getDefaultName()); - j++; - } - } - if (i == (subPath.length - 1)) { - newPath.append(NetConstants.PATH_SEPARATOR_STR + subPath[i]); - return newPath.toString(); - } - return null; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/package-info.java deleted file mode 100644 index 375af7f0ea0..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; -/** - The network topology supported by Ozone. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java deleted file mode 100644 index 3c544db3ab9..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -/** - * This package contains classes for the client of the storage container - * protocol. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java deleted file mode 100644 index 2828f6ea41c..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * Represents a group of datanodes which store a container. - */ -public final class Pipeline { - - private static final Logger LOG = LoggerFactory.getLogger(Pipeline.class); - private final PipelineID id; - private final ReplicationType type; - private final ReplicationFactor factor; - - private PipelineState state; - private Map nodeStatus; - // nodes with ordered distance to client - private ThreadLocal> nodesInOrder = new ThreadLocal<>(); - - /** - * The immutable properties of pipeline object is used in - * ContainerStateManager#getMatchingContainerByPipeline to take a lock on - * the container allocations for a particular pipeline. - */ - private Pipeline(PipelineID id, ReplicationType type, - ReplicationFactor factor, PipelineState state, - Map nodeStatus) { - this.id = id; - this.type = type; - this.factor = factor; - this.state = state; - this.nodeStatus = nodeStatus; - } - - /** - * Returns the ID of this pipeline. - * - * @return PipelineID - */ - public PipelineID getId() { - return id; - } - - /** - * Returns the type. - * - * @return type - Simple or Ratis. - */ - public ReplicationType getType() { - return type; - } - - /** - * Returns the factor. - * - * @return type - Simple or Ratis. - */ - public ReplicationFactor getFactor() { - return factor; - } - - /** - * Returns the State of the pipeline. - * - * @return - LifeCycleStates. - */ - public PipelineState getPipelineState() { - return state; - } - - /** - * Returns the list of nodes which form this pipeline. - * - * @return List of DatanodeDetails - */ - public List getNodes() { - return new ArrayList<>(nodeStatus.keySet()); - } - - public DatanodeDetails getFirstNode() throws IOException { - if (nodeStatus.isEmpty()) { - throw new IOException(String.format("Pipeline=%s is empty", id)); - } - return nodeStatus.keySet().iterator().next(); - } - - public DatanodeDetails getClosestNode() throws IOException { - if (nodesInOrder.get() == null || nodesInOrder.get().isEmpty()) { - LOG.debug("Nodes in order is empty, delegate to getFirstNode"); - return getFirstNode(); - } - return nodesInOrder.get().get(0); - } - - public boolean isClosed() { - return state == PipelineState.CLOSED; - } - - public boolean isOpen() { - return state == PipelineState.OPEN; - } - - public void setNodesInOrder(List nodes) { - nodesInOrder.set(nodes); - } - - public List getNodesInOrder() { - if (nodesInOrder.get() == null || nodesInOrder.get().isEmpty()) { - LOG.debug("Nodes in order is empty, delegate to getNodes"); - return getNodes(); - } - return nodesInOrder.get(); - } - - void reportDatanode(DatanodeDetails dn) throws IOException { - if (nodeStatus.get(dn) == null) { - throw new IOException( - String.format("Datanode=%s not part of pipeline=%s", dn, id)); - } - nodeStatus.put(dn, System.currentTimeMillis()); - } - - boolean isHealthy() { - for (Long reportedTime : nodeStatus.values()) { - if (reportedTime < 0) { - return false; - } - } - return true; - } - - public boolean isEmpty() { - return nodeStatus.isEmpty(); - } - - public HddsProtos.Pipeline getProtobufMessage() - throws UnknownPipelineStateException { - HddsProtos.Pipeline.Builder builder = HddsProtos.Pipeline.newBuilder() - .setId(id.getProtobuf()) - .setType(type) - .setFactor(factor) - .setState(PipelineState.getProtobuf(state)) - .setLeaderID("") - .addAllMembers(nodeStatus.keySet().stream() - .map(DatanodeDetails::getProtoBufMessage) - .collect(Collectors.toList())); - // To save the message size on wire, only transfer the node order based on - // network topology - List nodes = nodesInOrder.get(); - if (nodes != null && !nodes.isEmpty()) { - for (int i = 0; i < nodes.size(); i++) { - Iterator it = nodeStatus.keySet().iterator(); - for (int j = 0; j < nodeStatus.keySet().size(); j++) { - if (it.next().equals(nodes.get(i))) { - builder.addMemberOrders(j); - break; - } - } - } - if (LOG.isDebugEnabled()) { - LOG.debug("Serialize pipeline {} with nodesInOrder{ }", id.toString(), - nodes); - } - } - return builder.build(); - } - - public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) - throws UnknownPipelineStateException { - Preconditions.checkNotNull(pipeline, "Pipeline is null"); - return new Builder().setId(PipelineID.getFromProtobuf(pipeline.getId())) - .setFactor(pipeline.getFactor()) - .setType(pipeline.getType()) - .setState(PipelineState.fromProtobuf(pipeline.getState())) - .setNodes(pipeline.getMembersList().stream() - .map(DatanodeDetails::getFromProtoBuf).collect(Collectors.toList())) - .setNodesInOrder(pipeline.getMemberOrdersList()) - .build(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - Pipeline that = (Pipeline) o; - - return new EqualsBuilder() - .append(id, that.id) - .append(type, that.type) - .append(factor, that.factor) - .append(getNodes(), that.getNodes()) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder() - .append(id) - .append(type) - .append(factor) - .append(nodeStatus) - .toHashCode(); - } - - @Override - public String toString() { - final StringBuilder b = - new StringBuilder(getClass().getSimpleName()).append("["); - b.append(" Id: ").append(id.getId()); - b.append(", Nodes: "); - nodeStatus.keySet().forEach(b::append); - b.append(", Type:").append(getType()); - b.append(", Factor:").append(getFactor()); - b.append(", State:").append(getPipelineState()); - b.append("]"); - return b.toString(); - } - - public static Builder newBuilder() { - return new Builder(); - } - - public static Builder newBuilder(Pipeline pipeline) { - return new Builder(pipeline); - } - - /** - * Builder class for Pipeline. - */ - public static class Builder { - private PipelineID id = null; - private ReplicationType type = null; - private ReplicationFactor factor = null; - private PipelineState state = null; - private Map nodeStatus = null; - private List nodeOrder = null; - private List nodesInOrder = null; - - public Builder() {} - - public Builder(Pipeline pipeline) { - this.id = pipeline.id; - this.type = pipeline.type; - this.factor = pipeline.factor; - this.state = pipeline.state; - this.nodeStatus = pipeline.nodeStatus; - this.nodesInOrder = pipeline.nodesInOrder.get(); - } - - public Builder setId(PipelineID id1) { - this.id = id1; - return this; - } - - public Builder setType(ReplicationType type1) { - this.type = type1; - return this; - } - - public Builder setFactor(ReplicationFactor factor1) { - this.factor = factor1; - return this; - } - - public Builder setState(PipelineState state1) { - this.state = state1; - return this; - } - - public Builder setNodes(List nodes) { - this.nodeStatus = new LinkedHashMap<>(); - nodes.forEach(node -> nodeStatus.put(node, -1L)); - return this; - } - - public Builder setNodesInOrder(List orders) { - this.nodeOrder = orders; - return this; - } - - public Pipeline build() { - Preconditions.checkNotNull(id); - Preconditions.checkNotNull(type); - Preconditions.checkNotNull(factor); - Preconditions.checkNotNull(state); - Preconditions.checkNotNull(nodeStatus); - Pipeline pipeline = new Pipeline(id, type, factor, state, nodeStatus); - - if (nodeOrder != null && !nodeOrder.isEmpty()) { - // This branch is for build from ProtoBuf - List nodesWithOrder = new ArrayList<>(); - for(int i = 0; i < nodeOrder.size(); i++) { - int nodeIndex = nodeOrder.get(i); - Iterator it = nodeStatus.keySet().iterator(); - while(it.hasNext() && nodeIndex >= 0) { - DatanodeDetails node = it.next(); - if (nodeIndex == 0) { - nodesWithOrder.add(node); - break; - } - nodeIndex--; - } - } - if (LOG.isDebugEnabled()) { - LOG.debug("Deserialize nodesInOrder {} in pipeline {}", - nodesWithOrder, id.toString()); - } - pipeline.setNodesInOrder(nodesWithOrder); - } else if (nodesInOrder != null){ - // This branch is for pipeline clone - pipeline.setNodesInOrder(nodesInOrder); - } - return pipeline; - } - } - - /** - * Possible Pipeline states in SCM. - */ - public enum PipelineState { - ALLOCATED, OPEN, DORMANT, CLOSED; - - public static PipelineState fromProtobuf(HddsProtos.PipelineState state) - throws UnknownPipelineStateException { - Preconditions.checkNotNull(state, "Pipeline state is null"); - switch (state) { - case PIPELINE_ALLOCATED: return ALLOCATED; - case PIPELINE_OPEN: return OPEN; - case PIPELINE_DORMANT: return DORMANT; - case PIPELINE_CLOSED: return CLOSED; - default: - throw new UnknownPipelineStateException( - "Pipeline state: " + state + " is not recognized."); - } - } - - public static HddsProtos.PipelineState getProtobuf(PipelineState state) - throws UnknownPipelineStateException { - Preconditions.checkNotNull(state, "Pipeline state is null"); - switch (state) { - case ALLOCATED: return HddsProtos.PipelineState.PIPELINE_ALLOCATED; - case OPEN: return HddsProtos.PipelineState.PIPELINE_OPEN; - case DORMANT: return HddsProtos.PipelineState.PIPELINE_DORMANT; - case CLOSED: return HddsProtos.PipelineState.PIPELINE_CLOSED; - default: - throw new UnknownPipelineStateException( - "Pipeline state: " + state + " is not recognized."); - } - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java deleted file mode 100644 index 76cf55e8b12..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -import java.util.UUID; - -/** - * ID for the pipeline, the ID is based on UUID. - */ -public final class PipelineID { - - private UUID id; - - private PipelineID(UUID id) { - this.id = id; - } - - public static PipelineID randomId() { - return new PipelineID(UUID.randomUUID()); - } - - public static PipelineID valueOf(UUID id) { - return new PipelineID(id); - } - - public UUID getId() { - return id; - } - - public HddsProtos.PipelineID getProtobuf() { - return HddsProtos.PipelineID.newBuilder().setId(id.toString()).build(); - } - - public static PipelineID getFromProtobuf(HddsProtos.PipelineID protos) { - return new PipelineID(UUID.fromString(protos.getId())); - } - - @Override - public String toString() { - return "PipelineID=" + id; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - PipelineID that = (PipelineID) o; - - return id.equals(that.id); - } - - @Override - public int hashCode() { - return id.hashCode(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java deleted file mode 100644 index 2a89aab5288..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import java.io.IOException; - -/** - * Signals that a pipeline is missing from PipelineManager. - */ -public class PipelineNotFoundException extends IOException{ - /** - * Constructs an {@code PipelineNotFoundException} with {@code null} - * as its error detail message. - */ - public PipelineNotFoundException() { - super(); - } - - /** - * Constructs an {@code PipelineNotFoundException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public PipelineNotFoundException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/UnknownPipelineStateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/UnknownPipelineStateException.java deleted file mode 100644 index 7c75fc0a139..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/UnknownPipelineStateException.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import java.io.IOException; - -/** - * Signals that a pipeline state is not recognized. - */ -public class UnknownPipelineStateException extends IOException { - /** - * Constructs an {@code UnknownPipelineStateException} with {@code null} - * as its error detail message. - */ - public UnknownPipelineStateException() { - super(); - } - - /** - * Constructs an {@code UnknownPipelineStateException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public UnknownPipelineStateException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java deleted file mode 100644 index 51adc888661..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.pipeline; -/** - Ozone supports the notion of different kind of pipelines. - That means that we can have a replication pipeline build on - Ratis, Simple or some other protocol. All Pipeline managers - the entities in charge of pipelines reside in the package. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java deleted file mode 100644 index 10a9b1b5de3..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java +++ /dev/null @@ -1,127 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; - -import java.util.Set; - -/** - * Holds the nodes that currently host the container for an object key hash. - */ -@InterfaceAudience.Private -public final class LocatedContainer { - private final String key; - private final String matchedKeyPrefix; - private final String containerName; - private final Set locations; - private final DatanodeInfo leader; - - /** - * Creates a LocatedContainer. - * - * @param key object key - * @param matchedKeyPrefix prefix of key that was used to find the location - * @param containerName container name - * @param locations nodes that currently host the container - * @param leader node that currently acts as pipeline leader - */ - public LocatedContainer(String key, String matchedKeyPrefix, - String containerName, Set locations, DatanodeInfo leader) { - this.key = key; - this.matchedKeyPrefix = matchedKeyPrefix; - this.containerName = containerName; - this.locations = locations; - this.leader = leader; - } - - /** - * Returns the container name. - * - * @return container name - */ - public String getContainerName() { - return this.containerName; - } - - /** - * Returns the object key. - * - * @return object key - */ - public String getKey() { - return this.key; - } - - /** - * Returns the node that currently acts as pipeline leader. - * - * @return node that currently acts as pipeline leader - */ - public DatanodeInfo getLeader() { - return this.leader; - } - - /** - * Returns the nodes that currently host the container. - * - * @return {@code Set} nodes that currently host the container - */ - public Set getLocations() { - return this.locations; - } - - /** - * Returns the prefix of the key that was used to find the location. - * - * @return prefix of the key that was used to find the location - */ - public String getMatchedKeyPrefix() { - return this.matchedKeyPrefix; - } - - @Override - public boolean equals(Object otherObj) { - if (otherObj == null) { - return false; - } - if (!(otherObj instanceof LocatedContainer)) { - return false; - } - LocatedContainer other = (LocatedContainer)otherObj; - return this.key == null ? other.key == null : this.key.equals(other.key); - } - - @Override - public int hashCode() { - return key.hashCode(); - } - - @Override - public String toString() { - return getClass().getSimpleName() - + "{key=" + key - + "; matchedKeyPrefix=" + matchedKeyPrefix - + "; containerName=" + containerName - + "; locations=" + locations - + "; leader=" + leader - + "}"; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java deleted file mode 100644 index 18045f88cbd..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.protocol; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.security.KerberosInfo; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; - -/** - * ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes - * to read/write a block. - */ -@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -public interface ScmBlockLocationProtocol extends Closeable { - - @SuppressWarnings("checkstyle:ConstantName") - /** - * Version 1: Initial version. - */ - long versionID = 1L; - - /** - * Asks SCM where a block should be allocated. SCM responds with the - * set of datanodes that should be used creating this block. - * @param size - size of the block. - * @param numBlocks - number of blocks. - * @param type - replication type of the blocks. - * @param factor - replication factor of the blocks. - * @param excludeList List of datanodes/containers to exclude during block - * allocation. - * @return allocated block accessing info (key, pipeline). - * @throws IOException - */ - List allocateBlock(long size, int numBlocks, - ReplicationType type, ReplicationFactor factor, String owner, - ExcludeList excludeList) throws IOException; - - /** - * Delete blocks for a set of object keys. - * - * @param keyBlocksInfoList Map of object key and its blocks. - * @return list of block deletion results. - * @throws IOException if there is any failure. - */ - List - deleteKeyBlocks(List keyBlocksInfoList) throws IOException; - - /** - * Gets the Clusterid and SCM Id from SCM. - */ - ScmInfo getScmInfo() throws IOException; - - /** - * Sort datanodes with distance to client. - * @param nodes list of network name of each node. - * @param clientMachine client address, depends, can be hostname or ipaddress. - */ - List sortDatanodes(List nodes, - String clientMachine) throws IOException; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java deleted file mode 100644 index 0d2ecf7ac16..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; - -import java.util.List; -import java.util.stream.Collectors; - -/** - * Holds the nodes that currently host the block for a block key. - */ -@InterfaceAudience.Private -public final class ScmLocatedBlock { - private final String key; - private final List locations; - private final DatanodeInfo leader; - - /** - * Creates a ScmLocatedBlock. - * - * @param key object key - * @param locations nodes that currently host the block - * @param leader node that currently acts as pipeline leader - */ - public ScmLocatedBlock(final String key, final List locations, - final DatanodeInfo leader) { - this.key = key; - this.locations = locations; - this.leader = leader; - } - - /** - * Returns the object key. - * - * @return object key - */ - public String getKey() { - return this.key; - } - - /** - * Returns the node that currently acts as pipeline leader. - * - * @return node that currently acts as pipeline leader - */ - public DatanodeInfo getLeader() { - return this.leader; - } - - /** - * Returns the nodes that currently host the block. - * - * @return {@literal List} nodes that currently host the block - */ - public List getLocations() { - return this.locations; - } - - @Override - public boolean equals(Object otherObj) { - if (otherObj == null) { - return false; - } - if (!(otherObj instanceof ScmLocatedBlock)) { - return false; - } - ScmLocatedBlock other = (ScmLocatedBlock)otherObj; - return this.key == null ? other.key == null : this.key.equals(other.key); - } - - @Override - public int hashCode() { - return key.hashCode(); - } - - @Override - public String toString() { - return getClass().getSimpleName() + "{key=" + key + "; locations=" - + locations.stream().map(loc -> loc.toString()).collect(Collectors - .joining(",")) + "; leader=" + leader + "}"; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java deleted file mode 100644 index 88db8205a40..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ /dev/null @@ -1,214 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.protocol; - -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import org.apache.hadoop.security.KerberosInfo; - -/** - * ContainerLocationProtocol is used by an HDFS node to find the set of nodes - * that currently host a container. - */ -@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -public interface StorageContainerLocationProtocol extends Closeable { - - @SuppressWarnings("checkstyle:ConstantName") - /** - * Version 1: Initial version. - */ - long versionID = 1L; - - /** - * Asks SCM where a container should be allocated. SCM responds with the - * set of datanodes that should be used creating this container. - * - */ - ContainerWithPipeline allocateContainer( - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor factor, String owner) - throws IOException; - - /** - * Ask SCM the location of the container. SCM responds with a group of - * nodes where this container and its replicas are located. - * - * @param containerID - ID of the container. - * @return ContainerInfo - the container info such as where the pipeline - * is located. - * @throws IOException - */ - ContainerInfo getContainer(long containerID) throws IOException; - - /** - * Ask SCM the location of the container. SCM responds with a group of - * nodes where this container and its replicas are located. - * - * @param containerID - ID of the container. - * @return ContainerWithPipeline - the container info with the pipeline. - * @throws IOException - */ - ContainerWithPipeline getContainerWithPipeline(long containerID) - throws IOException; - - /** - * Ask SCM a list of containers with a range of container names - * and the limit of count. - * Search container names between start name(exclusive), and - * use prefix name to filter the result. the max size of the - * searching range cannot exceed the value of count. - * - * @param startContainerID start container ID. - * @param count count, if count {@literal <} 0, the max size is unlimited.( - * Usually the count will be replace with a very big - * value instead of being unlimited in case the db is very big) - * - * @return a list of container. - * @throws IOException - */ - List listContainer(long startContainerID, int count) - throws IOException; - - /** - * Deletes a container in SCM. - * - * @param containerID - * @throws IOException - * if failed to delete the container mapping from db store - * or container doesn't exist. - */ - void deleteContainer(long containerID) throws IOException; - - /** - * Queries a list of Node Statuses. - * @param state - * @return List of Datanodes. - */ - List queryNode(HddsProtos.NodeState state, - HddsProtos.QueryScope queryScope, String poolName) throws IOException; - - /** - * Notify from client when begin or finish creating objects like pipeline - * or containers on datanodes. - * Container will be in Operational state after that. - * @param type object type - * @param id object id - * @param op operation type (e.g., create, close, delete) - * @param stage creation stage - */ - void notifyObjectStageChange( - ObjectStageChangeRequestProto.Type type, long id, - ObjectStageChangeRequestProto.Op op, - ObjectStageChangeRequestProto.Stage stage) throws IOException; - - /** - * Creates a replication pipeline of a specified type. - * @param type - replication type - * @param factor - factor 1 or 3 - * @param nodePool - optional machine list to build a pipeline. - * @throws IOException - */ - Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool) - throws IOException; - - /** - * Returns the list of active Pipelines. - * - * @return list of Pipeline - * - * @throws IOException in case of any exception - */ - List listPipelines() throws IOException; - - /** - * Activates a dormant pipeline. - * - * @param pipelineID ID of the pipeline to activate. - * @throws IOException in case of any Exception - */ - void activatePipeline(HddsProtos.PipelineID pipelineID) throws IOException; - - /** - * Deactivates an active pipeline. - * - * @param pipelineID ID of the pipeline to deactivate. - * @throws IOException in case of any Exception - */ - void deactivatePipeline(HddsProtos.PipelineID pipelineID) throws IOException; - - /** - * Closes a pipeline given the pipelineID. - * - * @param pipelineID ID of the pipeline to demolish - * @throws IOException - */ - void closePipeline(HddsProtos.PipelineID pipelineID) throws IOException; - - /** - * Returns information about SCM. - * - * @return {@link ScmInfo} - * @throws IOException - */ - ScmInfo getScmInfo() throws IOException; - - /** - * Check if SCM is in safe mode. - * - * @return Returns true if SCM is in safe mode else returns false. - * @throws IOException - */ - boolean inSafeMode() throws IOException; - - /** - * Force SCM out of Safe mode. - * - * @return returns true if operation is successful. - * @throws IOException - */ - boolean forceExitSafeMode() throws IOException; - - /** - * Start ReplicationManager. - */ - void startReplicationManager() throws IOException; - - /** - * Stop ReplicationManager. - */ - void stopReplicationManager() throws IOException; - - /** - * Returns ReplicationManager status. - * - * @return True if ReplicationManager is running, false otherwise. - */ - boolean getReplicationManagerStatus() throws IOException; - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java deleted file mode 100644 index b56a749453f..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.protocol; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java deleted file mode 100644 index a262bb5bdbd..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ /dev/null @@ -1,273 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.protocolPB; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Type; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateBlockResponse; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockRequestProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.KeyBlocks; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos - .SortDatanodesRequestProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos - .SortDatanodesResponseProto; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtocolTranslator; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; - -import com.google.common.base.Preconditions; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; - -import static org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Status.OK; - -/** - * This class is the client-side translator to translate the requests made on - * the {@link ScmBlockLocationProtocol} interface to the RPC server - * implementing {@link ScmBlockLocationProtocolPB}. - */ -@InterfaceAudience.Private -public final class ScmBlockLocationProtocolClientSideTranslatorPB - implements ScmBlockLocationProtocol, ProtocolTranslator, Closeable { - - /** - * RpcController is not used and hence is set to null. - */ - private static final RpcController NULL_RPC_CONTROLLER = null; - - private final ScmBlockLocationProtocolPB rpcProxy; - - /** - * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB. - * - * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy - */ - public ScmBlockLocationProtocolClientSideTranslatorPB( - ScmBlockLocationProtocolPB rpcProxy) { - this.rpcProxy = rpcProxy; - } - - /** - * Returns a SCMBlockLocationRequest builder with specified type. - * @param cmdType type of the request - */ - private SCMBlockLocationRequest.Builder createSCMBlockRequest(Type cmdType) { - return SCMBlockLocationRequest.newBuilder() - .setCmdType(cmdType) - .setTraceID(TracingUtil.exportCurrentSpan()); - } - - /** - * Submits client request to SCM server. - * @param req client request - * @return response from SCM - * @throws IOException thrown if any Protobuf service exception occurs - */ - private SCMBlockLocationResponse submitRequest( - SCMBlockLocationRequest req) throws IOException { - try { - SCMBlockLocationResponse response = - rpcProxy.send(NULL_RPC_CONTROLLER, req); - return response; - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - } - - private SCMBlockLocationResponse handleError(SCMBlockLocationResponse resp) - throws SCMException { - if (resp.getStatus() != OK) { - throw new SCMException(resp.getMessage(), - SCMException.ResultCodes.values()[resp.getStatus().ordinal()]); - } - return resp; - } - - /** - * Asks SCM where a block should be allocated. SCM responds with the - * set of datanodes that should be used creating this block. - * @param size - size of the block. - * @param num - number of blocks. - * @param type - replication type of the blocks. - * @param factor - replication factor of the blocks. - * @param excludeList - exclude list while allocating blocks. - * @return allocated block accessing info (key, pipeline). - * @throws IOException - */ - @Override - public List allocateBlock(long size, int num, - HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, - String owner, ExcludeList excludeList) throws IOException { - Preconditions.checkArgument(size > 0, "block size must be greater than 0"); - - AllocateScmBlockRequestProto request = - AllocateScmBlockRequestProto.newBuilder() - .setSize(size) - .setNumBlocks(num) - .setType(type) - .setFactor(factor) - .setOwner(owner) - .setExcludeList(excludeList.getProtoBuf()) - .build(); - - SCMBlockLocationRequest wrapper = createSCMBlockRequest( - Type.AllocateScmBlock) - .setAllocateScmBlockRequest(request) - .build(); - - final SCMBlockLocationResponse wrappedResponse = - handleError(submitRequest(wrapper)); - final AllocateScmBlockResponseProto response = - wrappedResponse.getAllocateScmBlockResponse(); - - List blocks = new ArrayList<>(response.getBlocksCount()); - for (AllocateBlockResponse resp : response.getBlocksList()) { - AllocatedBlock.Builder builder = new AllocatedBlock.Builder() - .setContainerBlockID( - ContainerBlockID.getFromProtobuf(resp.getContainerBlockID())) - .setPipeline(Pipeline.getFromProtobuf(resp.getPipeline())); - blocks.add(builder.build()); - } - - return blocks; - } - - /** - * Delete the set of keys specified. - * - * @param keyBlocksInfoList batch of block keys to delete. - * @return list of block deletion results. - * @throws IOException if there is any failure. - * - */ - @Override - public List deleteKeyBlocks( - List keyBlocksInfoList) throws IOException { - List keyBlocksProto = keyBlocksInfoList.stream() - .map(BlockGroup::getProto).collect(Collectors.toList()); - DeleteScmKeyBlocksRequestProto request = DeleteScmKeyBlocksRequestProto - .newBuilder() - .addAllKeyBlocks(keyBlocksProto) - .build(); - - SCMBlockLocationRequest wrapper = createSCMBlockRequest( - Type.DeleteScmKeyBlocks) - .setDeleteScmKeyBlocksRequest(request) - .build(); - - final SCMBlockLocationResponse wrappedResponse = - handleError(submitRequest(wrapper)); - final DeleteScmKeyBlocksResponseProto resp = - wrappedResponse.getDeleteScmKeyBlocksResponse(); - - List results = - new ArrayList<>(resp.getResultsCount()); - results.addAll(resp.getResultsList().stream().map( - result -> new DeleteBlockGroupResult(result.getObjectKey(), - DeleteBlockGroupResult - .convertBlockResultProto(result.getBlockResultsList()))) - .collect(Collectors.toList())); - return results; - } - - /** - * Gets the cluster Id and Scm Id from SCM. - * @return ScmInfo - * @throws IOException - */ - @Override - public ScmInfo getScmInfo() throws IOException { - HddsProtos.GetScmInfoRequestProto request = - HddsProtos.GetScmInfoRequestProto.getDefaultInstance(); - HddsProtos.GetScmInfoResponseProto resp; - - SCMBlockLocationRequest wrapper = createSCMBlockRequest( - Type.GetScmInfo) - .setGetScmInfoRequest(request) - .build(); - - final SCMBlockLocationResponse wrappedResponse = - handleError(submitRequest(wrapper)); - resp = wrappedResponse.getGetScmInfoResponse(); - ScmInfo.Builder builder = new ScmInfo.Builder() - .setClusterId(resp.getClusterId()) - .setScmId(resp.getScmId()); - return builder.build(); - } - - /** - * Sort the datanodes based on distance from client. - * @return List - * @throws IOException - */ - @Override - public List sortDatanodes(List nodes, - String clientMachine) throws IOException { - SortDatanodesRequestProto request = SortDatanodesRequestProto - .newBuilder() - .addAllNodeNetworkName(nodes) - .setClient(clientMachine) - .build(); - SCMBlockLocationRequest wrapper = createSCMBlockRequest( - Type.SortDatanodes) - .setSortDatanodesRequest(request) - .build(); - - final SCMBlockLocationResponse wrappedResponse = - handleError(submitRequest(wrapper)); - SortDatanodesResponseProto resp = - wrappedResponse.getSortDatanodesResponse(); - List results = new ArrayList<>(resp.getNodeCount()); - results.addAll(resp.getNodeList().stream() - .map(node -> DatanodeDetails.getFromProtoBuf(node)) - .collect(Collectors.toList())); - return results; - } - - @Override - public Object getUnderlyingProxyObject() { - return rpcProxy; - } - - @Override - public void close() { - RPC.stopProxy(rpcProxy); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java deleted file mode 100644 index 1ba698bf0e3..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.protocolPB; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos - .ScmBlockLocationProtocolService; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ipc.ProtocolInfo; -import org.apache.hadoop.security.KerberosInfo; - -/** - * Protocol used from an HDFS node to StorageContainerManager. This extends the - * Protocol Buffers service interface to add Hadoop-specific annotations. - */ -@ProtocolInfo(protocolName = - "org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol", - protocolVersion = 1) -@InterfaceAudience.Private -@KerberosInfo( - serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -public interface ScmBlockLocationProtocolPB - extends ScmBlockLocationProtocolService.BlockingInterface { -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java deleted file mode 100644 index 01db597dfae..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ /dev/null @@ -1,475 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.protocolPB; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.function.Consumer; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.GetScmInfoResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ActivatePipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ClosePipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest.Builder; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtocolTranslator; -import org.apache.hadoop.ipc.RPC; - -import com.google.common.base.Preconditions; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; - -/** - * This class is the client-side translator to translate the requests made on - * the {@link StorageContainerLocationProtocol} interface to the RPC server - * implementing {@link StorageContainerLocationProtocolPB}. - */ -@InterfaceAudience.Private -public final class StorageContainerLocationProtocolClientSideTranslatorPB - implements StorageContainerLocationProtocol, ProtocolTranslator, Closeable { - - /** - * RpcController is not used and hence is set to null. - */ - private static final RpcController NULL_RPC_CONTROLLER = null; - - private final StorageContainerLocationProtocolPB rpcProxy; - - /** - * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB. - * - * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy - */ - public StorageContainerLocationProtocolClientSideTranslatorPB( - StorageContainerLocationProtocolPB rpcProxy) { - this.rpcProxy = rpcProxy; - } - - /** - * Helper method to wrap the request and send the message. - */ - private ScmContainerLocationResponse submitRequest( - StorageContainerLocationProtocolProtos.Type type, - Consumer builderConsumer) throws IOException { - final ScmContainerLocationResponse response; - try { - - Builder builder = ScmContainerLocationRequest.newBuilder() - .setCmdType(type) - .setTraceID(TracingUtil.exportCurrentSpan()); - builderConsumer.accept(builder); - ScmContainerLocationRequest wrapper = builder.build(); - - response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper); - } catch (ServiceException ex) { - throw ProtobufHelper.getRemoteException(ex); - } - return response; - } - - /** - * Asks SCM where a container should be allocated. SCM responds with the set - * of datanodes that should be used creating this container. Ozone/SCM only - * supports replication factor of either 1 or 3. - * - * @param type - Replication Type - * @param factor - Replication Count - */ - @Override - public ContainerWithPipeline allocateContainer( - HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, - String owner) throws IOException { - - ContainerRequestProto request = ContainerRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setReplicationFactor(factor) - .setReplicationType(type) - .setOwner(owner) - .build(); - - ContainerResponseProto response = - submitRequest(Type.AllocateContainer, - builder -> builder.setContainerRequest(request)) - .getContainerResponse(); - //TODO should be migrated to use the top level status structure. - if (response.getErrorCode() != ContainerResponseProto.Error.success) { - throw new IOException(response.hasErrorMessage() ? - response.getErrorMessage() : "Allocate container failed."); - } - return ContainerWithPipeline.fromProtobuf( - response.getContainerWithPipeline()); - } - - public ContainerInfo getContainer(long containerID) throws IOException { - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative"); - GetContainerRequestProto request = GetContainerRequestProto - .newBuilder() - .setContainerID(containerID) - .setTraceID(TracingUtil.exportCurrentSpan()) - .build(); - ScmContainerLocationResponse response = - submitRequest(Type.GetContainer, - (builder) -> builder.setGetContainerRequest(request)); - return ContainerInfo - .fromProtobuf(response.getGetContainerResponse().getContainerInfo()); - - } - - /** - * {@inheritDoc} - */ - public ContainerWithPipeline getContainerWithPipeline(long containerID) - throws IOException { - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative"); - GetContainerWithPipelineRequestProto request = - GetContainerWithPipelineRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setContainerID(containerID).build(); - - ScmContainerLocationResponse response = - submitRequest(Type.GetContainerWithPipeline, - (builder) -> builder.setGetContainerWithPipelineRequest(request)); - - return ContainerWithPipeline.fromProtobuf( - response.getGetContainerWithPipelineResponse() - .getContainerWithPipeline()); - - } - - /** - * {@inheritDoc} - */ - @Override - public List listContainer(long startContainerID, int count) - throws IOException { - Preconditions.checkState(startContainerID >= 0, - "Container ID cannot be negative."); - Preconditions.checkState(count > 0, - "Container count must be greater than 0."); - SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto - .newBuilder(); - builder.setStartContainerID(startContainerID); - builder.setCount(count); - builder.setTraceID(TracingUtil.exportCurrentSpan()); - SCMListContainerRequestProto request = builder.build(); - - SCMListContainerResponseProto response = - submitRequest(Type.ListContainer, - builder1 -> builder1.setScmListContainerRequest(request)) - .getScmListContainerResponse(); - List containerList = new ArrayList<>(); - for (HddsProtos.ContainerInfoProto containerInfoProto : response - .getContainersList()) { - containerList.add(ContainerInfo.fromProtobuf(containerInfoProto)); - } - return containerList; - - } - - /** - * Ask SCM to delete a container by name. SCM will remove - * the container mapping in its database. - */ - @Override - public void deleteContainer(long containerID) - throws IOException { - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative"); - SCMDeleteContainerRequestProto request = SCMDeleteContainerRequestProto - .newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setContainerID(containerID) - .build(); - submitRequest(Type.DeleteContainer, - builder -> builder.setScmDeleteContainerRequest(request)); - - } - - /** - * Queries a list of Node Statuses. - */ - @Override - public List queryNode(HddsProtos.NodeState - nodeStatuses, HddsProtos.QueryScope queryScope, String poolName) - throws IOException { - // TODO : We support only cluster wide query right now. So ignoring checking - // queryScope and poolName - Preconditions.checkNotNull(nodeStatuses); - NodeQueryRequestProto request = NodeQueryRequestProto.newBuilder() - .setState(nodeStatuses) - .setTraceID(TracingUtil.exportCurrentSpan()) - .setScope(queryScope).setPoolName(poolName).build(); - NodeQueryResponseProto response = submitRequest(Type.QueryNode, - builder -> builder.setNodeQueryRequest(request)).getNodeQueryResponse(); - return response.getDatanodesList(); - - } - - /** - * Notify from client that creates object on datanodes. - * - * @param type object type - * @param id object id - * @param op operation type (e.g., create, close, delete) - * @param stage object creation stage : begin/complete - */ - @Override - public void notifyObjectStageChange( - ObjectStageChangeRequestProto.Type type, long id, - ObjectStageChangeRequestProto.Op op, - ObjectStageChangeRequestProto.Stage stage) throws IOException { - Preconditions.checkState(id >= 0, - "Object id cannot be negative."); - ObjectStageChangeRequestProto request = - ObjectStageChangeRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setType(type) - .setId(id) - .setOp(op) - .setStage(stage) - .build(); - submitRequest(Type.NotifyObjectStageChange, - builder -> builder.setObjectStageChangeRequest(request)); - - } - - /** - * Creates a replication pipeline of a specified type. - * - * @param replicationType - replication type - * @param factor - factor 1 or 3 - * @param nodePool - optional machine list to build a pipeline. - */ - @Override - public Pipeline createReplicationPipeline(HddsProtos.ReplicationType - replicationType, HddsProtos.ReplicationFactor factor, HddsProtos - .NodePool nodePool) throws IOException { - PipelineRequestProto request = PipelineRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setNodePool(nodePool) - .setReplicationFactor(factor) - .setReplicationType(replicationType) - .build(); - - PipelineResponseProto response = - submitRequest(Type.AllocatePipeline, - builder -> builder.setPipelineRequest(request)) - .getPipelineResponse(); - if (response.getErrorCode() == - PipelineResponseProto.Error.success) { - Preconditions.checkState(response.hasPipeline(), "With success, " + - "must come a pipeline"); - return Pipeline.getFromProtobuf(response.getPipeline()); - } else { - String errorMessage = String.format("create replication pipeline " + - "failed. code : %s Message: %s", response.getErrorCode(), - response.hasErrorMessage() ? response.getErrorMessage() : ""); - throw new IOException(errorMessage); - } - - } - - @Override - public List listPipelines() throws IOException { - ListPipelineRequestProto request = ListPipelineRequestProto - .newBuilder().setTraceID(TracingUtil.exportCurrentSpan()) - .build(); - - ListPipelineResponseProto response = submitRequest(Type.ListPipelines, - builder -> builder.setListPipelineRequest(request)) - .getListPipelineResponse(); - - List list = new ArrayList<>(); - for (HddsProtos.Pipeline pipeline : response.getPipelinesList()) { - Pipeline fromProtobuf = Pipeline.getFromProtobuf(pipeline); - list.add(fromProtobuf); - } - return list; - - } - - @Override - public void activatePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - ActivatePipelineRequestProto request = - ActivatePipelineRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setPipelineID(pipelineID) - .build(); - submitRequest(Type.ActivatePipeline, - builder -> builder.setActivatePipelineRequest(request)); - - } - - @Override - public void deactivatePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - - DeactivatePipelineRequestProto request = - DeactivatePipelineRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setPipelineID(pipelineID) - .build(); - submitRequest(Type.DeactivatePipeline, - builder -> builder.setDeactivatePipelineRequest(request)); - } - - @Override - public void closePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - - ClosePipelineRequestProto request = - ClosePipelineRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .setPipelineID(pipelineID) - .build(); - submitRequest(Type.ClosePipeline, - builder -> builder.setClosePipelineRequest(request)); - - } - - @Override - public ScmInfo getScmInfo() throws IOException { - HddsProtos.GetScmInfoRequestProto request = - HddsProtos.GetScmInfoRequestProto.newBuilder() - .setTraceID(TracingUtil.exportCurrentSpan()) - .build(); - - GetScmInfoResponseProto resp = submitRequest(Type.GetScmInfo, - builder -> builder.setGetScmInfoRequest(request)) - .getGetScmInfoResponse(); - ScmInfo.Builder builder = new ScmInfo.Builder() - .setClusterId(resp.getClusterId()) - .setScmId(resp.getScmId()); - return builder.build(); - - } - - /** - * Check if SCM is in safe mode. - * - * @return Returns true if SCM is in safe mode else returns false. - */ - @Override - public boolean inSafeMode() throws IOException { - InSafeModeRequestProto request = - InSafeModeRequestProto.getDefaultInstance(); - - return submitRequest(Type.InSafeMode, - builder -> builder.setInSafeModeRequest(request)) - .getInSafeModeResponse().getInSafeMode(); - - } - - /** - * Force SCM out of Safe mode. - * - * @return returns true if operation is successful. - */ - @Override - public boolean forceExitSafeMode() throws IOException { - ForceExitSafeModeRequestProto request = - ForceExitSafeModeRequestProto.getDefaultInstance(); - ForceExitSafeModeResponseProto resp = - submitRequest(Type.ForceExitSafeMode, - builder -> builder.setForceExitSafeModeRequest(request)) - .getForceExitSafeModeResponse(); - - return resp.getExitedSafeMode(); - - } - - @Override - public void startReplicationManager() throws IOException { - - StartReplicationManagerRequestProto request = - StartReplicationManagerRequestProto.getDefaultInstance(); - submitRequest(Type.StartReplicationManager, - builder -> builder.setStartReplicationManagerRequest(request)); - - } - - @Override - public void stopReplicationManager() throws IOException { - - StopReplicationManagerRequestProto request = - StopReplicationManagerRequestProto.getDefaultInstance(); - submitRequest(Type.StopReplicationManager, - builder -> builder.setStopReplicationManagerRequest(request)); - - } - - @Override - public boolean getReplicationManagerStatus() throws IOException { - - ReplicationManagerStatusRequestProto request = - ReplicationManagerStatusRequestProto.getDefaultInstance(); - ReplicationManagerStatusResponseProto response = - submitRequest(Type.GetReplicationManagerStatus, - builder -> builder.setSeplicationManagerStatusRequest(request)) - .getReplicationManagerStatusResponse(); - return response.getIsRunning(); - - } - - @Override - public Object getUnderlyingProxyObject() { - return rpcProxy; - } - - @Override - public void close() { - RPC.stopProxy(rpcProxy); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java deleted file mode 100644 index f0af7aaed87..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.protocolPB; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerLocationProtocolProtos - .StorageContainerLocationProtocolService; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ipc.ProtocolInfo; -import org.apache.hadoop.security.KerberosInfo; - -/** - * Protocol used from an HDFS node to StorageContainerManager. This extends the - * Protocol Buffers service interface to add Hadoop-specific annotations. - */ -@ProtocolInfo(protocolName = - "org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol", - protocolVersion = 1) -@KerberosInfo( - serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -@InterfaceAudience.Private -public interface StorageContainerLocationProtocolPB - extends StorageContainerLocationProtocolService.BlockingInterface { -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java deleted file mode 100644 index 652ae60973c..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.protocolPB; - -/** - * This package contains classes for the client of the storage container - * protocol. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/CheckedBiFunction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/CheckedBiFunction.java deleted file mode 100644 index df84859ab02..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/CheckedBiFunction.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - - -import java.io.IOException; - -/** - * Defines a functional interface having two inputs which throws IOException. - */ -@FunctionalInterface -public interface CheckedBiFunction { - void apply(LEFT left, RIGHT right) throws THROWABLE; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java deleted file mode 100644 index d0ba60d9ad7..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java +++ /dev/null @@ -1,573 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -import org.apache.hadoop.hdds.scm.XceiverClientReply; -import org.apache.hadoop.hdds.scm.container.common.helpers - .BlockNotCommittedException; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSelector; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .CloseContainerRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .DatanodeBlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .GetBlockRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .GetBlockResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .GetSmallFileRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .GetSmallFileResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .PutBlockRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .PutSmallFileRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadChunkRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadContainerRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadContainerResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .WriteChunkRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - PutSmallFileResponseProto; -import org.apache.hadoop.hdds.client.BlockID; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutionException; - -/** - * Implementation of all container protocol calls performed by Container - * clients. - */ -public final class ContainerProtocolCalls { - - /** - * There is no need to instantiate this class. - */ - private ContainerProtocolCalls() { - } - - /** - * Calls the container protocol to get a container block. - * - * @param xceiverClient client to perform call - * @param datanodeBlockID blockID to identify container - * @return container protocol get block response - * @throws IOException if there is an I/O error while performing the call - */ - public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, - DatanodeBlockID datanodeBlockID) throws IOException { - GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto - .newBuilder() - .setBlockID(datanodeBlockID); - String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); - - ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto - .newBuilder() - .setCmdType(Type.GetBlock) - .setContainerID(datanodeBlockID.getContainerID()) - .setDatanodeUuid(id) - .setGetBlock(readBlockRequest); - String encodedToken = getEncodedBlockToken(getService(datanodeBlockID)); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - - ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - xceiverClient.sendCommand(request, getValidatorList()); - return response.getGetBlock(); - } - - /** - * Calls the container protocol to get the length of a committed block. - * - * @param xceiverClient client to perform call - * @param blockID blockId for the Block - * @return container protocol getLastCommittedBlockLength response - * @throws IOException if there is an I/O error while performing the call - */ - public static ContainerProtos.GetCommittedBlockLengthResponseProto - getCommittedBlockLength( - XceiverClientSpi xceiverClient, BlockID blockID) - throws IOException { - ContainerProtos.GetCommittedBlockLengthRequestProto.Builder - getBlockLengthRequestBuilder = - ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder(). - setBlockID(blockID.getDatanodeBlockIDProtobuf()); - String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder builder = - ContainerCommandRequestProto.newBuilder() - .setCmdType(Type.GetCommittedBlockLength) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(id) - .setGetCommittedBlockLength(getBlockLengthRequestBuilder); - String encodedToken = getEncodedBlockToken(new Text(blockID. - getContainerBlockID().toString())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - xceiverClient.sendCommand(request, getValidatorList()); - return response.getGetCommittedBlockLength(); - } - - /** - * Calls the container protocol to put a container block. - * - * @param xceiverClient client to perform call - * @param containerBlockData block data to identify container - * @return putBlockResponse - * @throws IOException if there is an I/O error while performing the call - */ - public static ContainerProtos.PutBlockResponseProto putBlock( - XceiverClientSpi xceiverClient, BlockData containerBlockData) - throws IOException { - PutBlockRequestProto.Builder createBlockRequest = - PutBlockRequestProto.newBuilder().setBlockData(containerBlockData); - String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder builder = - ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutBlock) - .setContainerID(containerBlockData.getBlockID().getContainerID()) - .setDatanodeUuid(id) - .setPutBlock(createBlockRequest); - String encodedToken = - getEncodedBlockToken(getService(containerBlockData.getBlockID())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - xceiverClient.sendCommand(request, getValidatorList()); - return response.getPutBlock(); - } - - /** - * Calls the container protocol to put a container block. - * - * @param xceiverClient client to perform call - * @param containerBlockData block data to identify container - * @return putBlockResponse - * @throws IOException if there is an error while performing the call - * @throws InterruptedException - * @throws ExecutionException - */ - public static XceiverClientReply putBlockAsync( - XceiverClientSpi xceiverClient, BlockData containerBlockData) - throws IOException, InterruptedException, ExecutionException { - PutBlockRequestProto.Builder createBlockRequest = - PutBlockRequestProto.newBuilder().setBlockData(containerBlockData); - String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder builder = - ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutBlock) - .setContainerID(containerBlockData.getBlockID().getContainerID()) - .setDatanodeUuid(id) - .setPutBlock(createBlockRequest); - String encodedToken = - getEncodedBlockToken(getService(containerBlockData.getBlockID())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - return xceiverClient.sendCommandAsync(request); - } - - /** - * Calls the container protocol to read a chunk. - * - * @param xceiverClient client to perform call - * @param chunk information about chunk to read - * @param blockID ID of the block - * @param validators functions to validate the response - * @return container protocol read chunk response - * @throws IOException if there is an I/O error while performing the call - */ - public static ContainerProtos.ReadChunkResponseProto readChunk( - XceiverClientSpi xceiverClient, ChunkInfo chunk, BlockID blockID, - List validators) throws IOException { - ReadChunkRequestProto.Builder readChunkRequest = - ReadChunkRequestProto.newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .setChunkData(chunk); - String id = xceiverClient.getPipeline().getClosestNode().getUuidString(); - ContainerCommandRequestProto.Builder builder = - ContainerCommandRequestProto.newBuilder().setCmdType(Type.ReadChunk) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(id).setReadChunk(readChunkRequest); - String encodedToken = getEncodedBlockToken(new Text(blockID. - getContainerBlockID().toString())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto reply = - xceiverClient.sendCommand(request, validators); - return reply.getReadChunk(); - } - - /** - * Calls the container protocol to write a chunk. - * - * @param xceiverClient client to perform call - * @param chunk information about chunk to write - * @param blockID ID of the block - * @param data the data of the chunk to write - * @throws IOException if there is an error while performing the call - */ - public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk, - BlockID blockID, ByteString data) - throws IOException { - WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto - .newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .setChunkData(chunk) - .setData(data); - String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto - .newBuilder() - .setCmdType(Type.WriteChunk) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(id) - .setWriteChunk(writeChunkRequest); - String encodedToken = getEncodedBlockToken(new Text(blockID. - getContainerBlockID().toString())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - xceiverClient.sendCommand(request, getValidatorList()); - } - - /** - * Calls the container protocol to write a chunk. - * - * @param xceiverClient client to perform call - * @param chunk information about chunk to write - * @param blockID ID of the block - * @param data the data of the chunk to write - * @throws IOException if there is an I/O error while performing the call - */ - public static XceiverClientReply writeChunkAsync( - XceiverClientSpi xceiverClient, ChunkInfo chunk, BlockID blockID, - ByteString data) - throws IOException, ExecutionException, InterruptedException { - WriteChunkRequestProto.Builder writeChunkRequest = - WriteChunkRequestProto.newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .setChunkData(chunk).setData(data); - String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder builder = - ContainerCommandRequestProto.newBuilder().setCmdType(Type.WriteChunk) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(id).setWriteChunk(writeChunkRequest); - String encodedToken = getEncodedBlockToken(new Text(blockID. - getContainerBlockID().toString())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - return xceiverClient.sendCommandAsync(request); - } - - /** - * Allows writing a small file using single RPC. This takes the container - * name, block name and data to write sends all that data to the container - * using a single RPC. This API is designed to be used for files which are - * smaller than 1 MB. - * - * @param client - client that communicates with the container. - * @param blockID - ID of the block - * @param data - Data to be written into the container. - * @return container protocol writeSmallFile response - * @throws IOException - */ - public static PutSmallFileResponseProto writeSmallFile( - XceiverClientSpi client, BlockID blockID, byte[] data) - throws IOException { - - BlockData containerBlockData = - BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .build(); - PutBlockRequestProto.Builder createBlockRequest = - PutBlockRequestProto.newBuilder() - .setBlockData(containerBlockData); - - KeyValue keyValue = - KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true") - .build(); - Checksum checksum = new Checksum(); - ChecksumData checksumData = checksum.computeChecksum(data, 0, data.length); - ChunkInfo chunk = - ChunkInfo.newBuilder() - .setChunkName(blockID.getLocalID() + "_chunk") - .setOffset(0) - .setLen(data.length) - .addMetadata(keyValue) - .setChecksumData(checksumData.getProtoBufMessage()) - .build(); - - PutSmallFileRequestProto putSmallFileRequest = - PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk) - .setBlock(createBlockRequest).setData(ByteString.copyFrom(data)) - .build(); - - String id = client.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder builder = - ContainerCommandRequestProto.newBuilder() - .setCmdType(Type.PutSmallFile) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(id) - .setPutSmallFile(putSmallFileRequest); - String encodedToken = getEncodedBlockToken(new Text(blockID. - getContainerBlockID().toString())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - client.sendCommand(request, getValidatorList()); - return response.getPutSmallFile(); - } - - /** - * createContainer call that creates a container on the datanode. - * @param client - client - * @param containerID - ID of container - * @param encodedToken - encodedToken if security is enabled - * @throws IOException - */ - public static void createContainer(XceiverClientSpi client, long containerID, - String encodedToken) throws IOException { - ContainerProtos.CreateContainerRequestProto.Builder createRequest = - ContainerProtos.CreateContainerRequestProto - .newBuilder(); - createRequest.setContainerType(ContainerProtos.ContainerType - .KeyValueContainer); - - String id = client.getPipeline().getFirstNode().getUuidString(); - ContainerCommandRequestProto.Builder request = - ContainerCommandRequestProto.newBuilder(); - if (encodedToken != null) { - request.setEncodedToken(encodedToken); - } - request.setCmdType(ContainerProtos.Type.CreateContainer); - request.setContainerID(containerID); - request.setCreateContainer(createRequest.build()); - request.setDatanodeUuid(id); - client.sendCommand(request.build(), getValidatorList()); - } - - /** - * Deletes a container from a pipeline. - * - * @param client - * @param force whether or not to forcibly delete the container. - * @param encodedToken - encodedToken if security is enabled - * @throws IOException - */ - public static void deleteContainer(XceiverClientSpi client, long containerID, - boolean force, String encodedToken) throws IOException { - ContainerProtos.DeleteContainerRequestProto.Builder deleteRequest = - ContainerProtos.DeleteContainerRequestProto.newBuilder(); - deleteRequest.setForceDelete(force); - String id = client.getPipeline().getFirstNode().getUuidString(); - - ContainerCommandRequestProto.Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.DeleteContainer); - request.setContainerID(containerID); - request.setDeleteContainer(deleteRequest); - request.setDatanodeUuid(id); - if (encodedToken != null) { - request.setEncodedToken(encodedToken); - } - client.sendCommand(request.build(), getValidatorList()); - } - - /** - * Close a container. - * - * @param client - * @param containerID - * @param encodedToken - encodedToken if security is enabled - * @throws IOException - */ - public static void closeContainer(XceiverClientSpi client, - long containerID, String encodedToken) - throws IOException { - String id = client.getPipeline().getFirstNode().getUuidString(); - - ContainerCommandRequestProto.Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(Type.CloseContainer); - request.setContainerID(containerID); - request.setCloseContainer(CloseContainerRequestProto.getDefaultInstance()); - request.setDatanodeUuid(id); - if(encodedToken != null) { - request.setEncodedToken(encodedToken); - } - client.sendCommand(request.build(), getValidatorList()); - } - - /** - * readContainer call that gets meta data from an existing container. - * - * @param client - client - * @param encodedToken - encodedToken if security is enabled - * @throws IOException - */ - public static ReadContainerResponseProto readContainer( - XceiverClientSpi client, long containerID, String encodedToken) - throws IOException { - String id = client.getPipeline().getFirstNode().getUuidString(); - - ContainerCommandRequestProto.Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(Type.ReadContainer); - request.setContainerID(containerID); - request.setReadContainer(ReadContainerRequestProto.getDefaultInstance()); - request.setDatanodeUuid(id); - if(encodedToken != null) { - request.setEncodedToken(encodedToken); - } - ContainerCommandResponseProto response = - client.sendCommand(request.build(), getValidatorList()); - - return response.getReadContainer(); - } - - /** - * Reads the data given the blockID. - * - * @param client - * @param blockID - ID of the block - * @return GetSmallFileResponseProto - * @throws IOException - */ - public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client, - BlockID blockID) throws IOException { - GetBlockRequestProto.Builder getBlock = GetBlockRequestProto - .newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()); - ContainerProtos.GetSmallFileRequestProto getSmallFileRequest = - GetSmallFileRequestProto - .newBuilder().setBlock(getBlock) - .build(); - String id = client.getPipeline().getClosestNode().getUuidString(); - - ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto - .newBuilder() - .setCmdType(Type.GetSmallFile) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(id) - .setGetSmallFile(getSmallFileRequest); - String encodedToken = getEncodedBlockToken(new Text(blockID. - getContainerBlockID().toString())); - if (encodedToken != null) { - builder.setEncodedToken(encodedToken); - } - ContainerCommandRequestProto request = builder.build(); - ContainerCommandResponseProto response = - client.sendCommand(request, getValidatorList()); - return response.getGetSmallFile(); - } - - /** - * Validates a response from a container protocol call. Any non-successful - * return code is mapped to a corresponding exception and thrown. - * - * @param response container protocol call response - * @throws StorageContainerException if the container protocol call failed - */ - public static void validateContainerResponse( - ContainerCommandResponseProto response - ) throws StorageContainerException { - if (response.getResult() == ContainerProtos.Result.SUCCESS) { - return; - } else if (response.getResult() - == ContainerProtos.Result.BLOCK_NOT_COMMITTED) { - throw new BlockNotCommittedException(response.getMessage()); - } else if (response.getResult() - == ContainerProtos.Result.CLOSED_CONTAINER_IO) { - throw new ContainerNotOpenException(response.getMessage()); - } - throw new StorageContainerException( - response.getMessage(), response.getResult()); - } - - /** - * Returns a url encoded block token. Service param should match the service - * field of token. - * @param service - * - * */ - private static String getEncodedBlockToken(Text service) - throws IOException { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - Token token = - OzoneBlockTokenSelector.selectBlockToken(service, ugi.getTokens()); - if (token != null) { - return token.encodeToUrlString(); - } - return null; - } - - private static Text getService(DatanodeBlockID blockId) { - return new Text(new StringBuffer() - .append("conID: ") - .append(blockId.getContainerID()) - .append(" locID: ") - .append(blockId.getLocalID()) - .toString()); - } - - public static List getValidatorList() { - List validators = new ArrayList<>(1); - CheckedBiFunction - validator = (request, response) -> validateContainerResponse(response); - validators.add(validator); - return validators; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java deleted file mode 100644 index 8e981586bd6..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.storage; - -/** - * This package contains StorageContainerManager classes. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecurityException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecurityException.java deleted file mode 100644 index bbe25a9d840..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecurityException.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.security.exception; - -import java.io.IOException; - -/** - * Root Security Exception call for all Certificate related Execptions. - */ -public class SCMSecurityException extends IOException { - private final ErrorCode errorCode; - - /** - * Ctor. - * @param message - Error Message. - */ - public SCMSecurityException(String message) { - super(message); - this.errorCode = ErrorCode.DEFAULT; - } - - /** - * Ctor. - * @param message - Message. - * @param cause - Actual cause. - */ - public SCMSecurityException(String message, Throwable cause) { - super(message, cause); - this.errorCode = ErrorCode.DEFAULT; - } - - /** - * Ctor. - * @param message - Message. - * @param error - error code. - */ - public SCMSecurityException(String message, ErrorCode error) { - super(message); - this.errorCode = error; - } - - /** - * Ctor. - * @param cause - Base Exception. - */ - public SCMSecurityException(Throwable cause) { - super(cause); - this.errorCode = ErrorCode.DEFAULT; - } - - public ErrorCode getErrorCode() { - return errorCode; - } - - /** - * Error codes to make it easy to decode these exceptions. - */ - public enum ErrorCode { - DEFAULT, - MISSING_BLOCK_TOKEN, - BLOCK_TOKEN_VERIFICATION_FAILED - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/package-info.java deleted file mode 100644 index b9805925adf..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Exceptions thrown by SCM security classes. - */ -package org.apache.hadoop.hdds.security.exception; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenException.java deleted file mode 100644 index 7ea0ebcf21e..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenException.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.token; - -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; - -/** - * Block Token Exceptions from the SCM Security layer. - */ -public class BlockTokenException extends SCMSecurityException { - - /** - * Ctor. - * @param message - Error Message. - */ - public BlockTokenException(String message) { - super(message); - } - - /** - * Ctor. - * @param message - Message. - * @param cause - Actual cause. - */ - public BlockTokenException(String message, Throwable cause) { - super(message, cause); - } - - /** - * Ctor. - * @param cause - Base Exception. - */ - public BlockTokenException(Throwable cause) { - super(cause); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java deleted file mode 100644 index e94808ac9d7..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.security.token; - -import com.google.common.base.Strings; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.io.IOException; -import java.security.cert.X509Certificate; - - -/** - * Verify token and return a UGI with token if authenticated. - */ -public class BlockTokenVerifier implements TokenVerifier { - - private final CertificateClient caClient; - private final SecurityConfig conf; - private static boolean testStub = false; - private final static Logger LOGGER = - LoggerFactory.getLogger(BlockTokenVerifier.class); - - public BlockTokenVerifier(SecurityConfig conf, CertificateClient caClient) { - this.conf = conf; - this.caClient = caClient; - } - - private boolean isExpired(long expiryDate) { - return Time.now() > expiryDate; - } - - @Override - public UserGroupInformation verify(String user, String tokenStr) - throws SCMSecurityException { - if (conf.isBlockTokenEnabled()) { - // TODO: add audit logs. - - if (Strings.isNullOrEmpty(tokenStr)) { - throw new BlockTokenException("Fail to find any token (empty or " + - "null.)"); - } - final Token token = new Token(); - OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier(); - try { - token.decodeFromUrlString(tokenStr); - if (LOGGER.isDebugEnabled()) { - LOGGER.debug("Verifying token:{} for user:{} ", token, user); - } - ByteArrayInputStream buf = new ByteArrayInputStream( - token.getIdentifier()); - DataInputStream in = new DataInputStream(buf); - tokenId.readFields(in); - - } catch (IOException ex) { - throw new BlockTokenException("Failed to decode token : " + tokenStr); - } - - if (caClient == null) { - throw new SCMSecurityException("Certificate client not available " + - "to validate token"); - } - - X509Certificate singerCert; - singerCert = caClient.getCertificate(tokenId.getOmCertSerialId()); - - if (singerCert == null) { - throw new BlockTokenException("Can't find signer certificate " + - "(OmCertSerialId: " + tokenId.getOmCertSerialId() + - ") of the block token for user: " + tokenId.getUser()); - } - boolean validToken = caClient.verifySignature(tokenId.getBytes(), - token.getPassword(), singerCert); - if (!validToken) { - throw new BlockTokenException("Invalid block token for user: " + - tokenId.getUser()); - } - - // check expiration - if (isExpired(tokenId.getExpiryDate())) { - UserGroupInformation tokenUser = tokenId.getUser(); - tokenUser.setAuthenticationMethod( - UserGroupInformation.AuthenticationMethod.TOKEN); - throw new BlockTokenException("Expired block token for user: " + - tokenUser); - } - // defer access mode, bcsid and maxLength check to container dispatcher - UserGroupInformation ugi = tokenId.getUser(); - ugi.addToken(token); - ugi.setAuthenticationMethod(UserGroupInformation - .AuthenticationMethod.TOKEN); - return ugi; - } else { - return UserGroupInformation.createRemoteUser(user); - } - } - - public static boolean isTestStub() { - return testStub; - } - - // For testing purpose only. - public static void setTestStub(boolean isTestStub) { - BlockTokenVerifier.testStub = isTestStub; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java deleted file mode 100644 index 54cf18002c3..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java +++ /dev/null @@ -1,212 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.security.token; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.Builder; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.security.token.Token.TrivialRenewer; - -import java.io.DataInput; -import java.io.DataInputStream; -import java.io.DataOutput; -import java.io.IOException; -import java.util.EnumSet; - -/** - * Block token identifier for Ozone/HDDS. Ozone block access token is similar - * to HDFS block access token, which is meant to be lightweight and - * short-lived. No need to renew or revoke a block access token. when a - * cached block access token expires, the client simply get a new one. - * Block access token should be cached only in memory and never write to disk. - */ -@InterfaceAudience.Private -public class OzoneBlockTokenIdentifier extends TokenIdentifier { - - static final Text KIND_NAME = new Text("HDDS_BLOCK_TOKEN"); - private long expiryDate; - private String ownerId; - private String blockId; - private EnumSet modes; - private String omCertSerialId; - private long maxLength; - - public OzoneBlockTokenIdentifier() { - } - - public OzoneBlockTokenIdentifier(String ownerId, String blockId, - EnumSet modes, long expiryDate, String omCertSerialId, - long maxLength) { - this.ownerId = ownerId; - this.blockId = blockId; - this.expiryDate = expiryDate; - this.modes = modes == null ? EnumSet.noneOf(AccessModeProto.class) : modes; - this.omCertSerialId = omCertSerialId; - this.maxLength = maxLength; - } - - @Override - public UserGroupInformation getUser() { - if (this.getOwnerId() == null || "".equals(this.getOwnerId())) { - return UserGroupInformation.createRemoteUser(blockId); - } - return UserGroupInformation.createRemoteUser(ownerId); - } - - public long getExpiryDate() { - return expiryDate; - } - - public String getOwnerId() { - return ownerId; - } - - public String getBlockId() { - return blockId; - } - - public EnumSet getAccessModes() { - return modes; - } - - public String getOmCertSerialId(){ - return omCertSerialId; - } - - public long getMaxLength() { - return maxLength; - } - - @Override - public Text getKind() { - return KIND_NAME; - } - - @Override - public String toString() { - return "block_token_identifier (expiryDate=" + this.getExpiryDate() - + ", ownerId=" + this.getOwnerId() - + ", omCertSerialId=" + this.getOmCertSerialId() - + ", blockId=" + this.getBlockId() + ", access modes=" - + this.getAccessModes() + ", maxLength=" + this.getMaxLength() + ")"; - } - - static boolean isEqual(Object a, Object b) { - return a == null ? b == null : a.equals(b); - } - - @Override - public boolean equals(Object obj) { - if (obj == this) { - return true; - } - - if (obj instanceof OzoneBlockTokenIdentifier) { - OzoneBlockTokenIdentifier that = (OzoneBlockTokenIdentifier) obj; - return new EqualsBuilder() - .append(this.expiryDate, that.expiryDate) - .append(this.ownerId, that.ownerId) - .append(this.blockId, that.blockId) - .append(this.modes, that.modes) - .append(this.omCertSerialId, that.omCertSerialId) - .append(this.maxLength, that.maxLength) - .build(); - } - return false; - } - - @Override - public int hashCode() { - return new HashCodeBuilder(133, 567) - .append(this.expiryDate) - .append(this.blockId) - .append(this.ownerId) - .append(this.modes) - .append(this.omCertSerialId) - .append(this.maxLength) - .build(); - } - - @Override - public void readFields(DataInput in) throws IOException { - final DataInputStream dis = (DataInputStream) in; - if (!dis.markSupported()) { - throw new IOException("Could not peek first byte."); - } - BlockTokenSecretProto tokenPtoto = - BlockTokenSecretProto.parseFrom((DataInputStream) in); - this.ownerId = tokenPtoto.getOwnerId(); - this.blockId = tokenPtoto.getBlockId(); - this.modes = EnumSet.copyOf(tokenPtoto.getModesList()); - this.expiryDate = tokenPtoto.getExpiryDate(); - this.omCertSerialId = tokenPtoto.getOmCertSerialId(); - this.maxLength = tokenPtoto.getMaxLength(); - } - - @VisibleForTesting - public static OzoneBlockTokenIdentifier readFieldsProtobuf(DataInput in) - throws IOException { - BlockTokenSecretProto tokenPtoto = - BlockTokenSecretProto.parseFrom((DataInputStream) in); - return new OzoneBlockTokenIdentifier(tokenPtoto.getOwnerId(), - tokenPtoto.getBlockId(), EnumSet.copyOf(tokenPtoto.getModesList()), - tokenPtoto.getExpiryDate(), tokenPtoto.getOmCertSerialId(), - tokenPtoto.getMaxLength()); - } - - @Override - public void write(DataOutput out) throws IOException { - writeProtobuf(out); - } - - @VisibleForTesting - void writeProtobuf(DataOutput out) throws IOException { - Builder builder = BlockTokenSecretProto.newBuilder() - .setBlockId(this.getBlockId()) - .setOwnerId(this.getOwnerId()) - .setOmCertSerialId(this.getOmCertSerialId()) - .setExpiryDate(this.getExpiryDate()) - .setMaxLength(this.getMaxLength()); - // Add access mode allowed - for (AccessModeProto mode : this.getAccessModes()) { - builder.addModes(AccessModeProto.valueOf(mode.name())); - } - out.write(builder.build().toByteArray()); - } - - /** - * Default TrivialRenewer. - */ - @InterfaceAudience.Private - public static class Renewer extends TrivialRenewer { - - @Override - protected Text getKind() { - return KIND_NAME; - } - } -} - diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java deleted file mode 100644 index 9acc75ae170..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSelector.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.security.token; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.security.token.TokenSelector; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collection; - -/** - * A block token selector for Ozone. - */ -@InterfaceAudience.Private -public class OzoneBlockTokenSelector implements - TokenSelector { - - private static final Logger LOG = LoggerFactory - .getLogger(OzoneBlockTokenSelector.class); - - @Override - @SuppressWarnings("unchecked") - public Token selectToken(Text service, - Collection> tokens) { - if (service == null) { - return null; - } - for (Token token : tokens) { - if (OzoneBlockTokenIdentifier.KIND_NAME.equals(token.getKind()) - && token.getService().equals(service)) { - if (LOG.isTraceEnabled()) { - LOG.trace("Getting token for service:{}", service); - } - return (Token) token; - } - } - return null; - } - - /** - * Static method to avoid instantiation. - * */ - @SuppressWarnings("unchecked") - public static Token selectBlockToken(Text service, - Collection> tokens) { - if (service == null) { - return null; - } - for (Token token : tokens) { - if (OzoneBlockTokenIdentifier.KIND_NAME.equals(token.getKind()) - && token.getService().equals(service)) { - if (LOG.isTraceEnabled()) { - LOG.trace("Getting token for service:{}", service); - } - return (Token) token; - } - } - return null; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java deleted file mode 100644 index d8170abe817..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.security.token; - -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.security.UserGroupInformation; - -/** - * Ozone GRPC token header verifier. - */ -public interface TokenVerifier { - /** - * Given a user and tokenStr header, return a UGI object with token if - * verified. - * @param user user of the request - * @param tokenStr token str of the request - * @return UGI - * @throws SCMSecurityException - */ - UserGroupInformation verify(String user, String tokenStr) - throws SCMSecurityException; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/package-info.java deleted file mode 100644 index 885bed580c0..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains the block token related test classes. - */ -package org.apache.hadoop.hdds.security.token; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java deleted file mode 100644 index 8aaba5df999..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java +++ /dev/null @@ -1,371 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslProvider; -import org.bouncycastle.jce.provider.BouncyCastleProvider; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.Provider; -import java.security.Security; -import java.time.Duration; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_KEY_ALGORITHM; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_KEY_LEN; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_SECURITY_PROVIDER; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_ENABLED; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_ENABLED_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_PROVIDER; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_PROVIDER_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_TEST_CERT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_TEST_CERT_DEFAULT; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_ALGORITHM; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_LEN; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PRIVATE_KEY_FILE_NAME; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PUBLIC_KEY_FILE_NAME; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_PROVIDER; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DEFAULT_DURATION_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DEFAULT_DURATION; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DIR_NAME; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DIR_NAME_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_FILE_NAME; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_FILE_NAME_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_MAX_DURATION; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_MAX_DURATION_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_SIGNATURE_ALGO; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_SIGNATURE_ALGO_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; - -/** - * A class that deals with all Security related configs in HDDS. - *

- * This class allows security configs to be read and used consistently across - * all of security related code base. - */ -public class SecurityConfig { - private static final Logger LOG = - LoggerFactory.getLogger(SecurityConfig.class); - private static volatile Provider provider; - private final Configuration configuration; - private final int size; - private final String keyAlgo; - private final String providerString; - private final String metadatDir; - private final String keyDir; - private final String privateKeyFileName; - private final String publicKeyFileName; - private final Duration certDuration; - private final String x509SignatureAlgo; - private final boolean blockTokenEnabled; - private final String certificateDir; - private final String certificateFileName; - private final boolean grpcTlsEnabled; - private boolean grpcTlsUseTestCert; - private final Duration defaultCertDuration; - private final boolean isSecurityEnabled; - - /** - * Constructs a SecurityConfig. - * - * @param configuration - HDDS Configuration - */ - public SecurityConfig(Configuration configuration) { - Preconditions.checkNotNull(configuration, "Configuration cannot be null"); - this.configuration = configuration; - this.size = this.configuration.getInt(HDDS_KEY_LEN, HDDS_DEFAULT_KEY_LEN); - this.keyAlgo = this.configuration.get(HDDS_KEY_ALGORITHM, - HDDS_DEFAULT_KEY_ALGORITHM); - this.providerString = this.configuration.get(HDDS_SECURITY_PROVIDER, - HDDS_DEFAULT_SECURITY_PROVIDER); - - // Please Note: To make it easy for our customers we will attempt to read - // HDDS metadata dir and if that is not set, we will use Ozone directory. - // TODO: We might want to fix this later. - this.metadatDir = this.configuration.get(HDDS_METADATA_DIR_NAME, - configuration.get(OZONE_METADATA_DIRS, - configuration.get(HDDS_DATANODE_DIR_KEY))); - this.keyDir = this.configuration.get(HDDS_KEY_DIR_NAME, - HDDS_KEY_DIR_NAME_DEFAULT); - this.privateKeyFileName = this.configuration.get(HDDS_PRIVATE_KEY_FILE_NAME, - HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT); - this.publicKeyFileName = this.configuration.get(HDDS_PUBLIC_KEY_FILE_NAME, - HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT); - - String durationString = this.configuration.get(HDDS_X509_MAX_DURATION, - HDDS_X509_MAX_DURATION_DEFAULT); - this.certDuration = Duration.parse(durationString); - this.x509SignatureAlgo = this.configuration.get(HDDS_X509_SIGNATURE_ALGO, - HDDS_X509_SIGNATURE_ALGO_DEFAULT); - this.certificateDir = this.configuration.get(HDDS_X509_DIR_NAME, - HDDS_X509_DIR_NAME_DEFAULT); - this.certificateFileName = this.configuration.get(HDDS_X509_FILE_NAME, - HDDS_X509_FILE_NAME_DEFAULT); - - this.blockTokenEnabled = this.configuration.getBoolean( - HDDS_BLOCK_TOKEN_ENABLED, - HDDS_BLOCK_TOKEN_ENABLED_DEFAULT); - - this.grpcTlsEnabled = this.configuration.getBoolean(HDDS_GRPC_TLS_ENABLED, - HDDS_GRPC_TLS_ENABLED_DEFAULT); - - if (grpcTlsEnabled) { - this.grpcTlsUseTestCert = this.configuration.getBoolean( - HDDS_GRPC_TLS_TEST_CERT, HDDS_GRPC_TLS_TEST_CERT_DEFAULT); - } - - this.isSecurityEnabled = this.configuration.getBoolean( - OZONE_SECURITY_ENABLED_KEY, - OZONE_SECURITY_ENABLED_DEFAULT); - - String certDurationString = - this.configuration.get(HDDS_X509_DEFAULT_DURATION, - HDDS_X509_DEFAULT_DURATION_DEFAULT); - defaultCertDuration = Duration.parse(certDurationString); - - - // First Startup -- if the provider is null, check for the provider. - if (SecurityConfig.provider == null) { - synchronized (SecurityConfig.class) { - provider = Security.getProvider(this.providerString); - if (SecurityConfig.provider == null) { - // Provider not found, let us try to Dynamically initialize the - // provider. - provider = initSecurityProvider(this.providerString); - } - } - } - } - - /** - * Returns true if security is enabled for OzoneCluster. This is determined - * by value of OZONE_SECURITY_ENABLED_KEY. - * - * @return true if security is enabled for OzoneCluster. - */ - public boolean isSecurityEnabled() { - return isSecurityEnabled; - } - - /** - * Returns the Default Certificate Duration. - * - * @return Duration for the default certificate issue. - */ - public Duration getDefaultCertDuration() { - return defaultCertDuration; - } - - /** - * Returns the Standard Certificate file name. - * - * @return String - Name of the Certificate File. - */ - public String getCertificateFileName() { - return certificateFileName; - } - - /** - * Returns the public key file name, This is used for storing the public keys - * on disk. - * - * @return String, File name used for public keys. - */ - public String getPublicKeyFileName() { - return publicKeyFileName; - } - - /** - * Returns the private key file name.This is used for storing the private keys - * on disk. - * - * @return String, File name used for private keys. - */ - public String getPrivateKeyFileName() { - return privateKeyFileName; - } - - /** - * Returns the File path to where keys are stored with an additional component - * name inserted in between. - * - * @param component - Component Name - String. - * @return Path Key location. - */ - public Path getKeyLocation(String component) { - Preconditions.checkNotNull(this.metadatDir, "Metadata directory can't be" - + " null. Please check configs."); - return Paths.get(metadatDir, component, keyDir); - } - - /** - * Returns the File path to where certificates are stored with an addition - * component - * name inserted in between. - * - * @param component - Component Name - String. - * @return Path location. - */ - public Path getCertificateLocation(String component) { - Preconditions.checkNotNull(this.metadatDir, "Metadata directory can't be" - + " null. Please check configs."); - return Paths.get(metadatDir, component, certificateDir); - } - - /** - * Gets the Key Size, The default key size is 2048, since the default - * algorithm used is RSA. User can change this by setting the "hdds.key.len" - * in configuration. - * - * @return key size. - */ - public int getSize() { - return size; - } - - /** - * Returns the Provider name. SCM defaults to using Bouncy Castle and will - * return "BC". - * - * @return String Provider name. - */ - public String getProvider() { - return providerString; - } - - /** - * Returns the Key generation Algorithm used. User can change this by setting - * the "hdds.key.algo" in configuration. - * - * @return String Algo. - */ - public String getKeyAlgo() { - return keyAlgo; - } - - /** - * Returns the X.509 Signature Algorithm used. This can be changed by setting - * "hdds.x509.signature.algorithm" to the new name. The default algorithm is - * SHA256withRSA. - * - * @return String - */ - public String getSignatureAlgo() { - return x509SignatureAlgo; - } - - /** - * Returns the Configuration used for initializing this SecurityConfig. - * - * @return Configuration - */ - public Configuration getConfiguration() { - return configuration; - } - - /** - * Returns the maximum length a certificate can be valid in SCM. The default - * value is 5 years. This can be changed by setting "hdds.x509.max.duration" - * in configuration. The formats accepted are based on the ISO-8601 duration - * format PnDTnHnMn.nS - *

- * Default value is 5 years and written as P1865D. - * - * @return Duration. - */ - public Duration getMaxCertificateDuration() { - return this.certDuration; - } - - public boolean isBlockTokenEnabled() { - return this.blockTokenEnabled; - } - - /** - * Returns true if TLS is enabled for gRPC services. - * @return true if TLS is enabled for gRPC services. - */ - public boolean isGrpcTlsEnabled() { - return this.grpcTlsEnabled; - } - - /** - * Get the gRPC TLS provider. - * @return the gRPC TLS Provider. - */ - public SslProvider getGrpcSslProvider() { - return SslProvider.valueOf(configuration.get(HDDS_GRPC_TLS_PROVIDER, - HDDS_GRPC_TLS_PROVIDER_DEFAULT)); - } - - /** - * Return true if using test certificates with authority as localhost. - * This should be used only for unit test where certificates are generated - * by openssl with localhost as DN and should never use for production as it - * will bypass the hostname/ip matching verification. - * @return true if using test certificates. - */ - public boolean useTestCert() { - return grpcTlsUseTestCert; - } - - /** - * Adds a security provider dynamically if it is not loaded already. - * - * @param providerName - name of the provider. - */ - private Provider initSecurityProvider(String providerName) { - switch (providerName) { - case "BC": - Security.addProvider(new BouncyCastleProvider()); - return Security.getProvider(providerName); - default: - LOG.error("Security Provider:{} is unknown", provider); - throw new SecurityException("Unknown security provider:" + provider); - } - } - - /** - * Returns max date for which S3 tokens will be valid. - */ - public long getS3TokenMaxDate() { - return getConfiguration().getTimeDuration( - OzoneConfigKeys.OZONE_S3_TOKEN_MAX_LIFETIME_KEY, - OzoneConfigKeys.OZONE_S3_TOKEN_MAX_LIFETIME_KEY_DEFAULT, - TimeUnit.MICROSECONDS); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java deleted file mode 100644 index 12ececd8d4b..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.PKIProfile; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.bouncycastle.asn1.ASN1Encodable; -import org.bouncycastle.asn1.ASN1ObjectIdentifier; -import org.bouncycastle.asn1.pkcs.Attribute; -import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; -import org.bouncycastle.asn1.x500.RDN; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.asn1.x509.Extensions; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.operator.ContentVerifierProvider; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.operator.jcajce.JcaContentVerifierProviderBuilder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.bouncycastle.pkcs.PKCSException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.CompletableFuture; - -/** - * A base approver class for certificate approvals. - */ -public abstract class BaseApprover implements CertificateApprover { - private static final Logger LOG = - LoggerFactory.getLogger(CertificateApprover.class); - private final PKIProfile profile; - private final SecurityConfig securityConfig; - - public BaseApprover(PKIProfile pkiProfile, SecurityConfig config) { - this.profile = Objects.requireNonNull(pkiProfile); - this.securityConfig = Objects.requireNonNull(config); - } - - /** - * Returns the Security config. - * - * @return SecurityConfig - */ - public SecurityConfig getSecurityConfig() { - return securityConfig; - } - - /** - * Returns the Attribute array that encodes extensions. - * - * @param request - Certificate Request - * @return - An Array of Attributes that encode various extensions requested - * in this certificate. - */ - Attribute[] getAttributes(PKCS10CertificationRequest request) { - Objects.requireNonNull(request); - return - request.getAttributes(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest); - } - - /** - * Returns a list of Extensions encoded in a given attribute. - * - * @param attribute - Attribute to decode. - * @return - List of Extensions. - */ - List getExtensionsList(Attribute attribute) { - Objects.requireNonNull(attribute); - List extensionsList = new ArrayList<>(); - for (ASN1Encodable value : attribute.getAttributeValues()) { - if(value != null) { - Extensions extensions = Extensions.getInstance(value); - extensionsList.add(extensions); - } - } - return extensionsList; - } - - /** - * Returns the Extension decoded into a Java Collection. - * @param extensions - A set of Extensions in ASN.1. - * @return List of Decoded Extensions. - */ - List getIndividualExtension(Extensions extensions) { - Objects.requireNonNull(extensions); - List extenList = new ArrayList<>(); - for (ASN1ObjectIdentifier id : extensions.getExtensionOIDs()) { - if (id != null) { - Extension ext = extensions.getExtension(id); - if (ext != null) { - extenList.add(ext); - } - } - } - return extenList; - } - - - - /** - * This function verifies all extensions in the certificate. - * - * @param request - CSR - * @return - true if the extensions are acceptable by the profile, false - * otherwise. - */ - boolean verfiyExtensions(PKCS10CertificationRequest request) { - Objects.requireNonNull(request); - /* - * Inside a CSR we have - * 1. A list of Attributes - * 2. Inside each attribute a list of extensions. - * 3. We need to walk thru the each extension and verify they - * are expected and we can put that into a certificate. - */ - - for (Attribute attr : getAttributes(request)) { - for (Extensions extensionsList : getExtensionsList(attr)) { - for (Extension extension : getIndividualExtension(extensionsList)) { - if (!profile.validateExtension(extension)) { - LOG.error("Failed to verify extension. {}", - extension.getExtnId().getId()); - return false; - } - } - } - } - return true; - } - - /** - * Verifies the Signature on the CSR is valid. - * - * @param pkcs10Request - PCKS10 Request. - * @return True if it is valid, false otherwise. - * @throws OperatorCreationException - On Error. - * @throws PKCSException - on Error. - */ - boolean verifyPkcs10Request(PKCS10CertificationRequest pkcs10Request) - throws OperatorCreationException, PKCSException { - ContentVerifierProvider verifierProvider = new - JcaContentVerifierProviderBuilder() - .setProvider(this.securityConfig.getProvider()) - .build(pkcs10Request.getSubjectPublicKeyInfo()); - return - pkcs10Request.isSignatureValid(verifierProvider); - } - - /** - * {@inheritDoc} - */ - @Override - public CompletableFuture inspectCSR(String csr) - throws IOException { - return inspectCSR(CertificateSignRequest.getCertificationRequest(csr)); - } - - /** - * {@inheritDoc} - */ - @Override - public CompletableFuture - inspectCSR(PKCS10CertificationRequest csr) { - /** - * The base approver executes the following algorithm to verify that a - * CSR meets the PKI Profile criteria. - * - * 0. For time being (Until we have SCM HA) we will deny all request to - * become an intermediary CA. So we will not need to verify using CA - * profile, right now. - * - * 1. We verify the proof of possession. That is we verify the entity - * that sends us the CSR indeed has the private key for the said public key. - * - * 2. Then we will verify the RDNs meet the format and the Syntax that - * PKI profile dictates. - * - * 3. Then we decode each and every extension and ask if the PKI profile - * approves of these extension requests. - * - * 4. If all of these pass, We will return a Future which will point to - * the Certificate when finished. - */ - - CompletableFuture response = - new CompletableFuture<>(); - try { - // Step 0: Verify this is not a CA Certificate. - // Will be done by the Ozone PKI profile for time being. - // If there are any basicConstraints, they will flagged as not - // supported for time being. - - // Step 1: Let us verify that Certificate is indeed signed by someone - // who has access to the private key. - if (!verifyPkcs10Request(csr)) { - LOG.error("Failed to verify the signature in CSR."); - response.completeExceptionally(new SCMSecurityException("Failed to " + - "verify the CSR.")); - } - - // Step 2: Verify the RDNs are in the correct format. - // TODO: Ozone Profile does not verify RDN now, so this call will pass. - for (RDN rdn : csr.getSubject().getRDNs()) { - if (!profile.validateRDN(rdn)) { - LOG.error("Failed in verifying RDNs"); - response.completeExceptionally(new SCMSecurityException("Failed to " + - "verify the RDNs. Please check the subject name.")); - } - } - - // Step 3: Verify the Extensions. - if (!verfiyExtensions(csr)) { - LOG.error("failed in verification of extensions."); - response.completeExceptionally(new SCMSecurityException("Failed to " + - "verify extensions.")); - } - - } catch (OperatorCreationException | PKCSException e) { - LOG.error("Approval Failure.", e); - response.completeExceptionally(new SCMSecurityException(e)); - } - return response; - } - - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateApprover.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateApprover.java deleted file mode 100644 index 31d0aeaddc5..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateApprover.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; - -import java.io.IOException; -import java.security.PrivateKey; -import java.util.Date; -import java.util.concurrent.CompletableFuture; - -/** - * Certificate Approver interface is used to inspectCSR a certificate. - */ -public interface CertificateApprover { - /** - * Approves a Certificate Request based on the policies of this approver. - * - * @param csr - Certificate Signing Request. - * @return - Future that will be contain the certificate or exception. - */ - CompletableFuture - inspectCSR(PKCS10CertificationRequest csr); - - /** - * Approves a Certificate Request based on the policies of this approver. - * - * @param csr - Certificate Signing Request. - * @return - Future that will be contain the certificate or exception. - * @throws IOException - On Error. - */ - CompletableFuture - inspectCSR(String csr) throws IOException; - - /** - * Sign function signs a Certificate. - * @param config - Security Config. - * @param caPrivate - CAs private Key. - * @param caCertificate - CA Certificate. - * @param validFrom - Begin Date - * @param validTill - End Date - * @param certificationRequest - Certification Request. - * @param scmId - SCM id. - * @param clusterId - Cluster id. - * @return Signed Certificate. - * @throws IOException - On Error - * @throws OperatorCreationException - on Error. - */ - @SuppressWarnings("ParameterNumber") - X509CertificateHolder sign( - SecurityConfig config, - PrivateKey caPrivate, - X509CertificateHolder caCertificate, - Date validFrom, - Date validTill, - PKCS10CertificationRequest certificationRequest, - String scmId, - String clusterId) - throws IOException, OperatorCreationException; - - - /** - * Approval Types for a certificate request. - */ - enum ApprovalType { - KERBEROS_TRUSTED, /* The Request came from a DN using Kerberos Identity*/ - MANUAL, /* Wait for a Human being to inspect CSR of this certificate */ - TESTING_AUTOMATIC /* For testing purpose, Automatic Approval. */ - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java deleted file mode 100644 index b1d7d6b0844..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateApprover.ApprovalType; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; - -import java.io.IOException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.util.concurrent.Future; - -/** - * Interface for Certificate Authority. This can be extended to talk to - * external CAs later or HSMs later. - */ -public interface CertificateServer { - /** - * Initialize the Certificate Authority. - * - * @param securityConfig - Security Configuration. - * @param type - The Type of CertificateServer we are creating, we make this - * explicit so that when we read code it is visible to the users. - * @throws SCMSecurityException - Throws if the init fails. - */ - void init(SecurityConfig securityConfig, CAType type) - throws SCMSecurityException; - - /** - * Returns the CA Certificate for this CA. - * - * @return X509CertificateHolder - Certificate for this CA. - * @throws CertificateException - usually thrown if this CA is not - * initialized. - * @throws IOException - on Error. - */ - X509CertificateHolder getCACertificate() - throws CertificateException, IOException; - - /** - * Returns the Certificate corresponding to given certificate serial id if - * exist. Return null if it doesn't exist. - * - * @return certSerialId - Certificate serial id. - * @throws CertificateException - usually thrown if this CA is not - * initialized. - * @throws IOException - on Error. - */ - X509Certificate getCertificate(String certSerialId) - throws CertificateException, IOException; - - /** - * Request a Certificate based on Certificate Signing Request. - * - * @param csr - Certificate Signing Request. - * @param type - An Enum which says what kind of approval process to follow. - * @return A future that will have this certificate when this request is - * approved. - * @throws SCMSecurityException - on Error. - */ - Future requestCertificate( - PKCS10CertificationRequest csr, - CertificateApprover.ApprovalType type) - throws SCMSecurityException; - - - /** - * Request a Certificate based on Certificate Signing Request. - * - * @param csr - Certificate Signing Request as a PEM encoded String. - * @param type - An Enum which says what kind of approval process to follow. - * @return A future that will have this certificate when this request is - * approved. - * @throws SCMSecurityException - on Error. - */ - Future requestCertificate(String csr, - ApprovalType type) throws IOException; - - /** - * Revokes a Certificate issued by this CertificateServer. - * - * @param certificate - Certificate to revoke - * @param approver - Approval process to follow. - * @return Future that tells us what happened. - * @throws SCMSecurityException - on Error. - */ - Future revokeCertificate(X509Certificate certificate, - ApprovalType approver) throws SCMSecurityException; - - /** - * TODO : CRL, OCSP etc. Later. This is the start of a CertificateServer - * framework. - */ - - - /** - * Make it explicit what type of CertificateServer we are creating here. - */ - enum CAType { - SELF_SIGNED_CA, - INTERMEDIARY_CA - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java deleted file mode 100644 index 961d048c51c..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import java.io.IOException; -import java.math.BigInteger; -import java.security.cert.X509Certificate; - -/** - * This interface allows the DefaultCA to be portable and use different DB - * interfaces later. It also allows us define this interface in the SCM layer - * by which we don't have to take a circular dependency between hdds-common - * and the SCM. - * - * With this interface, DefaultCA server read and write DB or persistence - * layer and we can write to SCM's Metadata DB. - */ -public interface CertificateStore { - - /** - * Writes a new certificate that was issued to the persistent store. - * @param serialID - Certificate Serial Number. - * @param certificate - Certificate to persist. - * @throws IOException - on Failure. - */ - void storeValidCertificate(BigInteger serialID, - X509Certificate certificate) throws IOException; - - /** - * Moves a certificate in a transactional manner from valid certificate to - * revoked certificate state. - * @param serialID - Serial ID of the certificate. - * @throws IOException - */ - void revokeCertificate(BigInteger serialID) throws IOException; - - /** - * Deletes an expired certificate from the store. Please note: We don't - * remove revoked certificates, we need that information to generate the - * CRLs. - * @param serialID - Certificate ID. - */ - void removeExpiredCertificate(BigInteger serialID) throws IOException; - - /** - * Retrieves a Certificate based on the Serial number of that certificate. - * @param serialID - ID of the certificate. - * @param certType - * @return X509Certificate - * @throws IOException - */ - X509Certificate getCertificateByID(BigInteger serialID, CertType certType) - throws IOException; - - /** - * Different kind of Certificate stores. - */ - enum CertType { - VALID_CERTS, - REVOKED_CERTS - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java deleted file mode 100644 index c7f37c18063..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultApprover.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.PKIProfile; -import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil; -import org.apache.hadoop.util.Time; -import org.bouncycastle.asn1.x500.X500Name; -import org.bouncycastle.asn1.x500.style.BCStyle; -import org.bouncycastle.asn1.x509.AlgorithmIdentifier; -import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.cert.X509v3CertificateBuilder; -import org.bouncycastle.crypto.params.AsymmetricKeyParameter; -import org.bouncycastle.crypto.params.RSAKeyParameters; -import org.bouncycastle.crypto.util.PrivateKeyFactory; -import org.bouncycastle.crypto.util.PublicKeyFactory; -import org.bouncycastle.operator.ContentSigner; -import org.bouncycastle.operator.DefaultDigestAlgorithmIdentifierFinder; -import org.bouncycastle.operator.DefaultSignatureAlgorithmIdentifierFinder; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.operator.bc.BcRSAContentSignerBuilder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; - -import java.io.IOException; -import java.math.BigInteger; -import java.security.PrivateKey; -import java.util.Date; -import java.util.concurrent.CompletableFuture; - -/** - * Default Approver used the by the DefaultCA. - */ -public class DefaultApprover extends BaseApprover { - - /** - * Constructs the Default Approver. - * - * @param pkiProfile - PKI Profile to use. - * @param config - Security Config - */ - public DefaultApprover(PKIProfile pkiProfile, SecurityConfig config) { - super(pkiProfile, config); - } - - /** - * Sign function signs a Certificate. - * @param config - Security Config. - * @param caPrivate - CAs private Key. - * @param caCertificate - CA Certificate. - * @param validFrom - Begin Da te - * @param validTill - End Date - * @param certificationRequest - Certification Request. - * @param scmId - SCM id. - * @param clusterId - Cluster id. - * @return Signed Certificate. - * @throws IOException - On Error - * @throws OperatorCreationException - on Error. - */ - @SuppressWarnings("ParameterNumber") - public X509CertificateHolder sign( - SecurityConfig config, - PrivateKey caPrivate, - X509CertificateHolder caCertificate, - Date validFrom, - Date validTill, - PKCS10CertificationRequest certificationRequest, - String scmId, - String clusterId) throws IOException, OperatorCreationException { - - AlgorithmIdentifier sigAlgId = new - DefaultSignatureAlgorithmIdentifierFinder().find( - config.getSignatureAlgo()); - AlgorithmIdentifier digAlgId = new DefaultDigestAlgorithmIdentifierFinder() - .find(sigAlgId); - - AsymmetricKeyParameter asymmetricKP = PrivateKeyFactory.createKey(caPrivate - .getEncoded()); - SubjectPublicKeyInfo keyInfo = - certificationRequest.getSubjectPublicKeyInfo(); - - // Get scmId and cluster Id from subject name. - X500Name x500Name = certificationRequest.getSubject(); - String csrScmId = x500Name.getRDNs(BCStyle.OU)[0].getFirst().getValue(). - toASN1Primitive().toString(); - String csrClusterId = x500Name.getRDNs(BCStyle.O)[0].getFirst().getValue(). - toASN1Primitive().toString(); - - if (!scmId.equals(csrScmId) || !clusterId.equals(csrClusterId)) { - if (csrScmId.equalsIgnoreCase("null") && - csrClusterId.equalsIgnoreCase("null")) { - // Special case to handle DN certificate generation as DN might not know - // scmId and clusterId before registration. In secure mode registration - // will succeed only after datanode has a valid certificate. - String cn = x500Name.getRDNs(BCStyle.CN)[0].getFirst().getValue() - .toASN1Primitive().toString(); - x500Name = SecurityUtil.getDistinguishedName(cn, scmId, clusterId); - } else { - // Throw exception if scmId and clusterId doesn't match. - throw new SCMSecurityException("ScmId and ClusterId in CSR subject" + - " are incorrect."); - } - } - - RSAKeyParameters rsa = - (RSAKeyParameters) PublicKeyFactory.createKey(keyInfo); - if (rsa.getModulus().bitLength() < config.getSize()) { - throw new SCMSecurityException("Key size is too small in certificate " + - "signing request"); - } - X509v3CertificateBuilder certificateGenerator = - new X509v3CertificateBuilder( - caCertificate.getSubject(), - // Serial is not sequential but it is monotonically increasing. - BigInteger.valueOf(Time.monotonicNowNanos()), - validFrom, - validTill, - x500Name, keyInfo); - - ContentSigner sigGen = new BcRSAContentSignerBuilder(sigAlgId, digAlgId) - .build(asymmetricKP); - - return certificateGenerator.build(sigGen); - - } - - @Override - public CompletableFuture inspectCSR(String csr) - throws IOException { - return super.inspectCSR(csr); - } - - @Override - public CompletableFuture - inspectCSR(PKCS10CertificationRequest csr) { - return super.inspectCSR(csr); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java deleted file mode 100644 index a5147b34e2f..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java +++ /dev/null @@ -1,491 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.DefaultProfile; -import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.PKIProfile; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.math.BigInteger; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.security.spec.InvalidKeySpecException; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.LocalTime; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.Future; -import java.util.function.Consumer; - -import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.*; - -/** - * The default CertificateServer used by SCM. This has no dependencies on any - * external system, this allows us to bootstrap a CertificateServer from - * Scratch. - *

- * Details ======= - *

- * The Default CA server is one of the many possible implementations of an SCM - * Certificate Authority. - *

- * A certificate authority needs the Root Certificates and its private key to - * operate. The init function of the DefaultCA Server detects four possible - * states the System can be in. - *

- * 1. Success - This means that the expected Certificates and Keys are in - * place, and the CA was able to read those files into memory. - *

- * 2. Missing Keys - This means that private keys are missing. This is an error - * state which SCM CA cannot recover from. The cluster might have been - * initialized earlier and for some reason, we are not able to find the private - * keys for the CA. Eventually we will have 2 ways to recover from this state, - * first one is to copy the SCM CA private keys from a backup. Second one is to - * rekey the whole cluster. Both of these are improvements we will support in - * future. - *

- * 3. Missing Certificate - Similar to Missing Keys, but the root certificates - * are missing. - *

- * 4. Initialize - We don't have keys or certificates. DefaultCA assumes that - * this is a system bootup and will generate the keys and certificates - * automatically. - *

- * The init() follows the following logic, - *

- * 1. Compute the Verification Status -- Success, Missing Keys, Missing Certs or - * Initialize. - *

- * 2. ProcessVerificationStatus - Returns a Lambda, based on the Verification - * Status. - *

- * 3. Invoke the Lambda function. - *

- * At the end of the init function, we have functional CA. This function can be - * invoked as many times since we will regenerate the keys and certs only if - * both of them are missing. - */ -public class DefaultCAServer implements CertificateServer { - private static final Logger LOG = - LoggerFactory.getLogger(DefaultCAServer.class); - private final String subject; - private final String clusterID; - private final String scmID; - private String componentName = Paths.get("scm", "ca").toString(); - private Path caKeysPath; - private Path caRootX509Path; - private SecurityConfig config; - /** - * TODO: We will make these configurable in the future. - */ - private PKIProfile profile; - private CertificateApprover approver; - private CertificateStore store; - - /** - * Create an Instance of DefaultCAServer. - * @param subject - String Subject - * @param clusterID - String ClusterID - * @param scmID - String SCMID. - * @param certificateStore - A store used to persist Certificates. - */ - public DefaultCAServer(String subject, String clusterID, String scmID, - CertificateStore certificateStore) { - this.subject = subject; - this.clusterID = clusterID; - this.scmID = scmID; - this.store = certificateStore; - } - - @Override - public void init(SecurityConfig securityConfig, CAType type) - throws SCMSecurityException { - caKeysPath = securityConfig.getKeyLocation(componentName); - caRootX509Path = securityConfig.getCertificateLocation(componentName); - this.config = securityConfig; - - // TODO: Make these configurable and load different profiles based on - // config. - profile = new DefaultProfile(); - this.approver = new DefaultApprover(profile, this.config); - - /* In future we will spilt this code to have different kind of CAs. - * Right now, we have only self-signed CertificateServer. - */ - - if (type == CAType.SELF_SIGNED_CA) { - VerificationStatus status = verifySelfSignedCA(securityConfig); - Consumer caInitializer = - processVerificationStatus(status); - caInitializer.accept(securityConfig); - return; - } - - LOG.error("We support only Self-Signed CAs for now."); - throw new IllegalStateException("Not implemented functionality requested."); - } - - @Override - public X509CertificateHolder getCACertificate() throws IOException { - CertificateCodec certificateCodec = - new CertificateCodec(config, componentName); - try { - return certificateCodec.readCertificate(); - } catch (CertificateException e) { - throw new IOException(e); - } - } - - /** - * Returns the Certificate corresponding to given certificate serial id if - * exist. Return null if it doesn't exist. - * - * @param certSerialId - Certificate for this CA. - * @return X509CertificateHolder - * @throws CertificateException - usually thrown if this CA is not - * initialized. - * @throws IOException - on Error. - */ - @Override - public X509Certificate getCertificate(String certSerialId) throws - IOException { - return store.getCertificateByID(new BigInteger(certSerialId), - CertificateStore.CertType.VALID_CERTS); - } - - private KeyPair getCAKeys() throws IOException { - KeyCodec keyCodec = new KeyCodec(config, componentName); - try { - return new KeyPair(keyCodec.readPublicKey(), keyCodec.readPrivateKey()); - } catch (InvalidKeySpecException | NoSuchAlgorithmException e) { - throw new IOException(e); - } - } - - @Override - public Future requestCertificate( - PKCS10CertificationRequest csr, - CertificateApprover.ApprovalType approverType) { - LocalDate beginDate = LocalDate.now().atStartOfDay().toLocalDate(); - LocalDateTime temp = LocalDateTime.of(beginDate, LocalTime.MIDNIGHT); - LocalDate endDate = - temp.plus(config.getDefaultCertDuration()).toLocalDate(); - - CompletableFuture xcertHolder = - approver.inspectCSR(csr); - - if(xcertHolder.isCompletedExceptionally()) { - // This means that approver told us there are things which it disagrees - // with in this Certificate Request. Since the first set of sanity - // checks failed, we just return the future object right here. - return xcertHolder; - } - try { - switch (approverType) { - case MANUAL: - xcertHolder.completeExceptionally(new SCMSecurityException("Manual " + - "approval is not yet implemented.")); - break; - case KERBEROS_TRUSTED: - case TESTING_AUTOMATIC: - X509CertificateHolder xcert; - try { - xcert = signAndStoreCertificate(beginDate, endDate, csr); - } catch (SCMSecurityException e) { - // Certificate with conflicting serial id, retry again may resolve - // this issue. - LOG.error("Certificate storage failed, retrying one more time.", e); - xcert = signAndStoreCertificate(beginDate, endDate, csr); - } - - xcertHolder.complete(xcert); - break; - default: - return null; // cannot happen, keeping checkstyle happy. - } - } catch (CertificateException | IOException | OperatorCreationException e) { - LOG.error("Unable to issue a certificate. {}", e); - xcertHolder.completeExceptionally(new SCMSecurityException(e)); - } - return xcertHolder; - } - - private X509CertificateHolder signAndStoreCertificate(LocalDate beginDate, - LocalDate endDate, PKCS10CertificationRequest csr) throws IOException, - OperatorCreationException, CertificateException { - X509CertificateHolder xcert = approver.sign(config, - getCAKeys().getPrivate(), - getCACertificate(), java.sql.Date.valueOf(beginDate), - java.sql.Date.valueOf(endDate), csr, scmID, clusterID); - store.storeValidCertificate(xcert.getSerialNumber(), - CertificateCodec.getX509Certificate(xcert)); - return xcert; - } - - @Override - public Future requestCertificate(String csr, - CertificateApprover.ApprovalType type) throws IOException { - PKCS10CertificationRequest request = - getCertificationRequest(csr); - return requestCertificate(request, type); - } - - @Override - public Future revokeCertificate(X509Certificate certificate, - CertificateApprover.ApprovalType approverType) - throws SCMSecurityException { - CompletableFuture revoked = new CompletableFuture<>(); - if (certificate == null) { - revoked.completeExceptionally(new SCMSecurityException( - "Certificate cannot be null")); - return revoked; - } - try { - store.revokeCertificate(certificate.getSerialNumber()); - } catch (IOException ex) { - LOG.error("Revoking the certificate failed. {}", ex.getCause()); - throw new SCMSecurityException(ex); - } - return revoked; - } - - /** - * Generates a Self Signed CertificateServer. These are the steps in - * generating a Self-Signed CertificateServer. - *

- * 1. Generate a Private/Public Key Pair. 2. Persist to a protected location. - * 3. Generate a SelfSigned Root CertificateServer certificate. - * - * @param securityConfig - Config. - */ - private void generateSelfSignedCA(SecurityConfig securityConfig) throws - NoSuchAlgorithmException, NoSuchProviderException, IOException { - KeyPair keyPair = generateKeys(securityConfig); - generateRootCertificate(securityConfig, keyPair); - } - - /** - * Verify Self-Signed CertificateServer. 1. Check if the Certificate exist. 2. - * Check if the key pair exists. - * - * @param securityConfig -- Config - * @return Verification Status - */ - private VerificationStatus verifySelfSignedCA(SecurityConfig securityConfig) { - /* - The following is the truth table for the States. - True means we have that file False means it is missing. - +--------------+--------+--------+--------------+ - | Certificates | Keys | Result | Function | - +--------------+--------+--------+--------------+ - | True | True | True | Success | - | False | False | True | Initialize | - | True | False | False | Missing Key | - | False | True | False | Missing Cert | - +--------------+--------+--------+--------------+ - - This truth table maps to ~(certs xor keys) or certs == keys - */ - boolean keyStatus = checkIfKeysExist(); - boolean certStatus = checkIfCertificatesExist(); - - if ((certStatus == keyStatus) && (certStatus)) { - return VerificationStatus.SUCCESS; - } - - if ((certStatus == keyStatus) && (!certStatus)) { - return VerificationStatus.INITIALIZE; - } - - // At this point certStatus is not equal to keyStatus. - if (certStatus) { - return VerificationStatus.MISSING_KEYS; - } - - return VerificationStatus.MISSING_CERTIFICATE; - } - - /** - * Returns Keys status. - * - * @return True if the key files exist. - */ - private boolean checkIfKeysExist() { - if (!Files.exists(caKeysPath)) { - return false; - } - - return Files.exists(Paths.get(caKeysPath.toString(), - this.config.getPrivateKeyFileName())); - } - - /** - * Returns certificate Status. - * - * @return True if the Certificate files exist. - */ - private boolean checkIfCertificatesExist() { - if (!Files.exists(caRootX509Path)) { - return false; - } - return Files.exists(Paths.get(caRootX509Path.toString(), - this.config.getCertificateFileName())); - } - - /** - * Based on the Status of the verification, we return a lambda that gets - * executed by the init function of the CA. - * - * @param status - Verification Status. - */ - @VisibleForTesting - Consumer processVerificationStatus( - VerificationStatus status) { - Consumer consumer = null; - switch (status) { - case SUCCESS: - consumer = (arg) -> LOG.info("CertificateServer validation is " + - "successful"); - break; - case MISSING_KEYS: - consumer = (arg) -> { - LOG.error("We have found the Certificate for this CertificateServer, " + - "but keys used by this CertificateServer is missing. This is a " + - "non-recoverable error. Please restart the system after locating " + - "the Keys used by the CertificateServer."); - LOG.error("Exiting due to unrecoverable CertificateServer error."); - throw new IllegalStateException("Missing Keys, cannot continue."); - }; - break; - case MISSING_CERTIFICATE: - consumer = (arg) -> { - LOG.error("We found the keys, but the root certificate for this " + - "CertificateServer is missing. Please restart SCM after locating " + - "the " + - "Certificates."); - LOG.error("Exiting due to unrecoverable CertificateServer error."); - throw new IllegalStateException("Missing Root Certs, cannot continue."); - }; - break; - case INITIALIZE: - consumer = (arg) -> { - try { - generateSelfSignedCA(arg); - } catch (NoSuchProviderException | NoSuchAlgorithmException - | IOException e) { - LOG.error("Unable to initialize CertificateServer.", e); - } - VerificationStatus newStatus = verifySelfSignedCA(arg); - if (newStatus != VerificationStatus.SUCCESS) { - LOG.error("Unable to initialize CertificateServer, failed in " + - "verification."); - } - }; - break; - default: - /* Make CheckStyle happy */ - break; - } - return consumer; - } - - /** - * Generates a KeyPair for the Certificate. - * - * @param securityConfig - SecurityConfig. - * @return Key Pair. - * @throws NoSuchProviderException - on Error. - * @throws NoSuchAlgorithmException - on Error. - * @throws IOException - on Error. - */ - private KeyPair generateKeys(SecurityConfig securityConfig) - throws NoSuchProviderException, NoSuchAlgorithmException, IOException { - HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(securityConfig); - KeyPair keys = keyGenerator.generateKey(); - KeyCodec keyPEMWriter = new KeyCodec(securityConfig, - componentName); - keyPEMWriter.writeKey(keys); - return keys; - } - - /** - * Generates a self-signed Root Certificate for CA. - * - * @param securityConfig - SecurityConfig - * @param key - KeyPair. - * @throws IOException - on Error. - * @throws SCMSecurityException - on Error. - */ - private void generateRootCertificate(SecurityConfig securityConfig, - KeyPair key) throws IOException, SCMSecurityException { - Preconditions.checkNotNull(this.config); - LocalDate beginDate = LocalDate.now().atStartOfDay().toLocalDate(); - LocalDateTime temp = LocalDateTime.of(beginDate, LocalTime.MIDNIGHT); - LocalDate endDate = - temp.plus(securityConfig.getMaxCertificateDuration()).toLocalDate(); - X509CertificateHolder selfSignedCertificate = - SelfSignedCertificate - .newBuilder() - .setSubject(this.subject) - .setScmID(this.scmID) - .setClusterID(this.clusterID) - .setBeginDate(beginDate) - .setEndDate(endDate) - .makeCA() - .setConfiguration(securityConfig.getConfiguration()) - .setKey(key) - .build(); - - CertificateCodec certCodec = - new CertificateCodec(config, componentName); - certCodec.writeCertificate(selfSignedCertificate); - } - - /** - * This represents the verification status of the CA. Based on this enum - * appropriate action is taken in the Init. - */ - @VisibleForTesting - enum VerificationStatus { - SUCCESS, /* All artifacts needed by CertificateServer is present */ - MISSING_KEYS, /* Private key is missing, certificate Exists.*/ - MISSING_CERTIFICATE, /* Keys exist, but root certificate missing.*/ - INITIALIZE /* All artifacts are missing, we should init the system. */ - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java deleted file mode 100644 index 53eb98fbdc2..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles; - -import org.bouncycastle.asn1.x509.Extension; - -import java.util.function.BiFunction; - -import static java.lang.Boolean.TRUE; - -/** - * CA Profile, this is needed when SCM does HA. - * A place holder class indicating what we need to do when we support issuing - * CA certificates to other SCMs in HA mode. - */ -public class DefaultCAProfile extends DefaultProfile { - static final BiFunction - VALIDATE_BASIC_CONSTRAINTS = (e, b) -> TRUE; - static final BiFunction - VALIDATE_CRL_NUMBER = (e, b) -> TRUE; - static final BiFunction - VALIDATE_REASON_CODE = (e, b) -> TRUE; - static final BiFunction - VALIDATE_DELTA_CRL_INDICATOR = (e, b) -> TRUE; - static final BiFunction - VALIDATE_NAME_CONSTRAINTS = (e, b) -> TRUE; - static final BiFunction - VALIDATE_CRL_DISTRIBUTION_POINTS = (e, b) -> TRUE; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java deleted file mode 100644 index 5fdb6f7d966..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultProfile.java +++ /dev/null @@ -1,336 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles; - -import com.google.common.base.Preconditions; -import org.apache.commons.codec.DecoderException; -import org.apache.commons.codec.binary.Hex; -import org.apache.commons.validator.routines.DomainValidator; -import org.bouncycastle.asn1.ASN1ObjectIdentifier; -import org.bouncycastle.asn1.x500.RDN; -import org.bouncycastle.asn1.x509.ExtendedKeyUsage; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.asn1.x509.GeneralName; -import org.bouncycastle.asn1.x509.GeneralNames; -import org.bouncycastle.asn1.x509.KeyPurposeId; -import org.bouncycastle.asn1.x509.KeyUsage; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.AbstractMap.SimpleEntry; -import java.util.Arrays; -import java.util.BitSet; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.function.BiFunction; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static java.lang.Boolean.TRUE; -import static org.bouncycastle.asn1.x509.KeyPurposeId.id_kp_clientAuth; -import static org.bouncycastle.asn1.x509.KeyPurposeId.id_kp_serverAuth; - -/** - * Ozone PKI profile. - *

- * This PKI profile is invoked by SCM CA to make sure that certificates issued - * by SCM CA are constrained - */ -public class DefaultProfile implements PKIProfile { - static final BiFunction - VALIDATE_KEY_USAGE = DefaultProfile::validateKeyUsage; - static final BiFunction - VALIDATE_AUTHORITY_KEY_IDENTIFIER = (e, b) -> TRUE; - static final BiFunction - VALIDATE_LOGO_TYPE = (e, b) -> TRUE; - private static final Logger LOG = - LoggerFactory.getLogger(DefaultProfile.class); - static final BiFunction - VALIDATE_SAN = DefaultProfile::validateSubjectAlternativeName; - static final BiFunction - VALIDATE_EXTENDED_KEY_USAGE = DefaultProfile::validateExtendedKeyUsage; - // If we decide to add more General Names, we should add those here and - // also update the logic in validateGeneralName function. - private static final int[] GENERAL_NAMES = { - GeneralName.dNSName, - GeneralName.iPAddress, - }; - // Map that handles all the Extensions lookup and validations. - private static final Map> EXTENSIONS_MAP = Stream.of( - new SimpleEntry<>(Extension.keyUsage, VALIDATE_KEY_USAGE), - new SimpleEntry<>(Extension.subjectAlternativeName, VALIDATE_SAN), - new SimpleEntry<>(Extension.authorityKeyIdentifier, - VALIDATE_AUTHORITY_KEY_IDENTIFIER), - new SimpleEntry<>(Extension.extendedKeyUsage, - VALIDATE_EXTENDED_KEY_USAGE), - // Ozone certs are issued only for the use of Ozone. - // However, some users will discover that this is a full scale CA - // and decide to mis-use these certs for other purposes. - // To discourage usage of these certs for other purposes, we can leave - // the Ozone Logo inside these certs. So if a browser is used to - // connect these logos will show up. - // https://www.ietf.org/rfc/rfc3709.txt - new SimpleEntry<>(Extension.logoType, VALIDATE_LOGO_TYPE)) - .collect(Collectors.toMap(SimpleEntry::getKey, - SimpleEntry::getValue)); - // If we decide to add more General Names, we should add those here and - // also update the logic in validateGeneralName function. - private static final KeyPurposeId[] EXTENDED_KEY_USAGE = { - id_kp_serverAuth, // TLS Web server authentication - id_kp_clientAuth, // TLS Web client authentication - - }; - private final Set extendKeyPurposeSet; - private Set generalNameSet; - - /** - * Construct DefaultProfile. - */ - public DefaultProfile() { - generalNameSet = new HashSet<>(); - for (int val : GENERAL_NAMES) { - generalNameSet.add(val); - } - extendKeyPurposeSet = - new HashSet<>(Arrays.asList(EXTENDED_KEY_USAGE)); - - } - - /** - * This function validates that the KeyUsage Bits are subset of the Bits - * permitted by the ozone profile. - * - * @param ext - KeyUsage Extension. - * @param profile - PKI Profile - In this case this profile. - * @return True, if the request key usage is a subset, false otherwise. - */ - private static Boolean validateKeyUsage(Extension ext, PKIProfile profile) { - KeyUsage keyUsage = profile.getKeyUsage(); - KeyUsage requestedUsage = KeyUsage.getInstance(ext.getParsedValue()); - BitSet profileBitSet = BitSet.valueOf(keyUsage.getBytes()); - BitSet requestBitSet = BitSet.valueOf(requestedUsage.getBytes()); - // Check if the requestBitSet is a subset of profileBitSet - // p & r == r should be equal if it is a subset. - profileBitSet.and(requestBitSet); - return profileBitSet.equals(requestBitSet); - } - - /** - * Validates the SubjectAlternative names in the Certificate. - * - * @param ext - Extension - SAN, which allows us to get the SAN names. - * @param profile - This profile. - * @return - True if the request contains only SANs, General names that we - * support. False otherwise. - */ - private static Boolean validateSubjectAlternativeName(Extension ext, - PKIProfile profile) { - if (ext.isCritical()) { - // SAN extensions should not be marked as critical under ozone profile. - LOG.error("SAN extension marked as critical in the Extension. {}", - GeneralNames.getInstance(ext.getParsedValue()).toString()); - return false; - } - GeneralNames generalNames = GeneralNames.getInstance(ext.getParsedValue()); - for (GeneralName name : generalNames.getNames()) { - try { - if (!profile.validateGeneralName(name.getTagNo(), - name.getName().toString())) { - return false; - } - } catch (UnknownHostException e) { - LOG.error("IP address validation failed." - + name.getName().toString(), e); - return false; - } - } - return true; - } - - /** - * This function validates that the KeyUsage Bits are subset of the Bits - * permitted by the ozone profile. - * - * @param ext - KeyUsage Extension. - * @param profile - PKI Profile - In this case this profile. - * @return True, if the request key usage is a subset, false otherwise. - */ - private static Boolean validateExtendedKeyUsage(Extension ext, - PKIProfile profile) { - if (ext.isCritical()) { - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 - // Ozone profile opts to mark this extension as non-critical. - LOG.error("Extended Key usage marked as critical."); - return false; - } - ExtendedKeyUsage extendedKeyUsage = - ExtendedKeyUsage.getInstance(ext.getParsedValue()); - for (KeyPurposeId id : extendedKeyUsage.getUsages()) { - if (!profile.validateExtendedKeyUsage(id)) { - return false; - } - } - return true; - } - - /** - * {@inheritDoc} - */ - @Override - public int[] getGeneralNames() { - return Arrays.copyOfRange(GENERAL_NAMES, 0, GENERAL_NAMES.length); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean isSupportedGeneralName(int generalName) { - return generalNameSet.contains(generalName); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean validateGeneralName(int type, String value) { - // TODO : We should add more validation for IP address, for example - // it matches the local network, and domain matches where the cluster - // exits. - if (!isSupportedGeneralName(type)) { - return false; - } - switch (type) { - case GeneralName.iPAddress: - - // We need DatatypeConverter conversion, since the original CSR encodes - // an IP address int a Hex String, for example 8.8.8.8 is encoded as - // #08080808. Value string is always preceded by "#", we will strip - // that before passing it on. - - // getByAddress call converts the IP address to hostname/ipAddress format. - // if the hostname cannot determined then it will be /ipAddress. - - // TODO: Fail? if we cannot resolve the Hostname? - try { - final InetAddress byAddress = InetAddress.getByAddress( - Hex.decodeHex(value.substring(1))); - if (LOG.isDebugEnabled()) { - LOG.debug("Host Name/IP Address : {}", byAddress.toString()); - } - return true; - } catch (UnknownHostException | DecoderException e) { - return false; - } - case GeneralName.dNSName: - return DomainValidator.getInstance().isValid(value); - default: - // This should not happen, since it guarded via isSupportedGeneralName. - LOG.error("Unexpected type in General Name (int value) : " + type); - return false; - } - } - - @Override - public boolean validateExtendedKeyUsage(KeyPurposeId id) { - return extendKeyPurposeSet.contains(id); - } - - /** - * {@inheritDoc} - */ - @Override - public ASN1ObjectIdentifier[] getSupportedExtensions() { - return EXTENSIONS_MAP.keySet().toArray(new ASN1ObjectIdentifier[0]); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean isSupportedExtension(Extension extension) { - return EXTENSIONS_MAP.containsKey(extension.getExtnId()); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean validateExtension(Extension extension) { - Preconditions.checkNotNull(extension, "Extension cannot be null"); - - if (!isSupportedExtension(extension)) { - LOG.error("Unsupported Extension found: {} ", - extension.getExtnId().getId()); - return false; - } - - BiFunction func = - EXTENSIONS_MAP.get(extension.getExtnId()); - - if (func != null) { - return func.apply(extension, this); - } - return false; - } - - /** - * {@inheritDoc} - */ - @Override - public KeyUsage getKeyUsage() { - return new KeyUsage(KeyUsage.digitalSignature | KeyUsage.keyEncipherment - | KeyUsage.dataEncipherment | KeyUsage.keyAgreement); - } - - /** - * {@inheritDoc} - */ - @Override - public RDN[] getRDNs() { - return new RDN[0]; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean isValidRDN(RDN distinguishedName) { - // TODO: Right now we just approve all strings. - return true; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean validateRDN(RDN name) { - return true; - } - - @Override - public boolean isCA() { - return false; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/PKIProfile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/PKIProfile.java deleted file mode 100644 index c3ff198cd70..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/PKIProfile.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles; - -import org.bouncycastle.asn1.ASN1ObjectIdentifier; -import org.bouncycastle.asn1.x500.RDN; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.asn1.x509.KeyPurposeId; -import org.bouncycastle.asn1.x509.KeyUsage; - -import java.net.UnknownHostException; - -/** - * Base class for profile rules. Generally profiles are documents that define - * the PKI policy. In HDDS/Ozone world, we have chosen to make PKIs - * executable code. So if an end-user wants to use a custom profile or one of - * the existing profile like the list below, they are free to implement a - * custom profile. - * - * PKIX - Internet PKI profile. - * FPKI - (US) Federal PKI profile. - * MISSI - US DoD profile. - * ISO 15782 - Banking - Certificate Management Part 1: Public Key - * Certificates. - * TeleTrust/MailTrusT - German MailTrusT profile for TeleTrusT (it - * really is - * capitalised that way). - * German SigG Profile - Profile to implement the German digital - * signature law - * ISIS Profile - Another German profile. - * Australian Profile - Profile for the Australian PKAF - * SS 61 43 31 Electronic ID Certificate - Swedish profile. - * FINEID S3 - Finnish profile. - * ANX Profile - Automotive Network Exchange profile. - * Microsoft Profile - This isn't a real profile, but windows uses this. - */ -public interface PKIProfile { - - /** - * Returns the list of General Names supported by this profile. - * @return - an Array of supported General Names by this certificate profile. - */ - int[] getGeneralNames(); - - /** - * Checks if a given General Name is permitted in this profile. - * @param generalName - General name. - * @return true if it is allowed, false otherwise. - */ - boolean isSupportedGeneralName(int generalName); - - /** - * Allows the profile to dictate what value ranges are valid. - * @param type - Type of the General Name. - * @param value - Value of the General Name. - * @return - true if the value is permitted, false otherwise. - * @throws UnknownHostException - on Error in IP validation. - */ - boolean validateGeneralName(int type, String value) - throws UnknownHostException; - - /** - * Returns an array of Object identifiers for extensions supported by this - * profile. - * @return an Array of ASN1ObjectIdentifier for the supported extensions. - */ - ASN1ObjectIdentifier[] getSupportedExtensions(); - - /** - * Checks if the this extension is permitted in this profile. - * @param extension - Extension to check for. - * @return - true if this extension is supported, false otherwise. - */ - boolean isSupportedExtension(Extension extension); - - /** - * Checks if the extension has the value which this profile approves. - * @param extension - Extension to validate. - * @return - True if the extension is acceptable, false otherwise. - */ - boolean validateExtension(Extension extension); - - /** - * Validate the Extended Key Usage. - * @param id - KeyPurpose ID - * @return true, if this is a supported Purpose, false otherwise. - */ - boolean validateExtendedKeyUsage(KeyPurposeId id); - - /** - * Returns the permitted Key usage mask while using this profile. - * @return KeyUsage - */ - KeyUsage getKeyUsage(); - - /** - * Gets the supported list of RDNs supported by this profile. - * @return Array of RDNs. - */ - RDN[] getRDNs(); - - /** - * Returns true if this Relative Distinguished Name component is allowed in - * this profile. - * @param distinguishedName - RDN to check. - * @return boolean, True if this RDN is allowed, false otherwise. - */ - boolean isValidRDN(RDN distinguishedName); - - /** - * Allows the profile to control the value set of the RDN. Profile can - * reject a RDN name if needed. - * @param name - RDN. - * @return true if the name is acceptable to this profile, false otherwise. - */ - boolean validateRDN(RDN name); - - /** - * True if the profile we are checking is for issuing a CA certificate. - * @return True, if the profile used is for CA, false otherwise. - */ - boolean isCA(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/package-info.java deleted file mode 100644 index 36c885d3108..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/package-info.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * PKI PKIProfile package supports different kind of profiles that certificates - * can support. If you are not familiar with PKI profiles, there is an - * excellent introduction at - * - * https://www.cs.auckland.ac.nz/~pgut001/pubs/x509guide.txt - * - * At high level, the profiles in this directory define what kinds of - * Extensions, General names , Key usage and critical extensions are - * permitted when the CA is functional. - * - * An excellent example of a profile would be ozone profile if you would - * like to see a reference to create your own profiles. - */ -package org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java deleted file mode 100644 index af53904eeb6..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Classes related to Certificate Life Cycle or Certificate Authority Server. - */ -package org.apache.hadoop.hdds.security.x509.certificate.authority; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java deleted file mode 100644 index 34b4930fa7d..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/CertificateClient.java +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.client; - -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; - -import java.io.InputStream; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.cert.CertStore; -import java.security.cert.X509Certificate; -import java.util.List; - -/** - * Certificate client provides and interface to certificate operations that - * needs to be performed by all clients in the Ozone eco-system. - */ -public interface CertificateClient { - - /** - * Returns the private key of the specified component if it exists on the - * local system. - * - * @return private key or Null if there is no data. - */ - PrivateKey getPrivateKey(); - - /** - * Returns the public key of the specified component if it exists on the local - * system. - * - * @return public key or Null if there is no data. - */ - PublicKey getPublicKey(); - - /** - * Returns the certificate of the specified component if it exists on the - * local system. - * @param certSerialId - * - * @return certificate or Null if there is no data. - */ - X509Certificate getCertificate(String certSerialId) - throws CertificateException; - - /** - * Returns the certificate of the specified component if it exists on the - * local system. - * - * @return certificate or Null if there is no data. - */ - X509Certificate getCertificate(); - - /** - * Return the latest CA certificate known to the client. - * @return latest ca certificate known to the client. - */ - X509Certificate getCACertificate(); - - /** - * Verifies if this certificate is part of a trusted chain. - * @param certificate - certificate. - * @return true if it trusted, false otherwise. - */ - boolean verifyCertificate(X509Certificate certificate); - - /** - * Creates digital signature over the data stream using the components private - * key. - * - * @param stream - Data stream to sign. - * @return byte array - containing the signature. - * @throws CertificateException - on Error. - */ - byte[] signDataStream(InputStream stream) - throws CertificateException; - - byte[] signData(byte[] data) throws CertificateException; - - /** - * Verifies a digital Signature, given the signature and the certificate of - * the signer. - * - * @param stream - Data Stream. - * @param signature - Byte Array containing the signature. - * @param cert - Certificate of the Signer. - * @return true if verified, false if not. - */ - boolean verifySignature(InputStream stream, byte[] signature, - X509Certificate cert) throws CertificateException; - - /** - * Verifies a digital Signature, given the signature and the certificate of - * the signer. - * @param data - Data in byte array. - * @param signature - Byte Array containing the signature. - * @param cert - Certificate of the Signer. - * @return true if verified, false if not. - */ - boolean verifySignature(byte[] data, byte[] signature, - X509Certificate cert) throws CertificateException; - - /** - * Returns a CSR builder that can be used to creates a Certificate sigining - * request. - * - * @return CertificateSignRequest.Builder - */ - CertificateSignRequest.Builder getCSRBuilder() throws CertificateException; - - /** - * Get the certificate of well-known entity from SCM. - * - * @param query - String Query, please see the implementation for the - * discussion on the query formats. - * @return X509Certificate or null if not found. - */ - X509Certificate queryCertificate(String query); - - /** - * Stores the Certificate for this client. Don't use this api to add - * trusted certificates of others. - * - * @param pemEncodedCert - pem encoded X509 Certificate - * @param force - override any existing file - * @throws CertificateException - on Error. - * - */ - void storeCertificate(String pemEncodedCert, boolean force) - throws CertificateException; - - /** - * Stores the Certificate for this client. Don't use this api to add - * trusted certificates of others. - * - * @param pemEncodedCert - pem encoded X509 Certificate - * @param force - override any existing file - * @param caCert - Is CA certificate. - * @throws CertificateException - on Error. - * - */ - void storeCertificate(String pemEncodedCert, boolean force, boolean caCert) - throws CertificateException; - - /** - * Stores the trusted chain of certificates. - * - * @param certStore - Cert Store. - * @throws CertificateException - on Error. - */ - void storeTrustChain(CertStore certStore) throws CertificateException; - - /** - * Stores the trusted chain of certificates. - * - * @param certificates - List of Certificates. - - * @throws CertificateException - on Error. - */ - void storeTrustChain(List certificates) - throws CertificateException; - - /** - * Initialize certificate client. - * - * */ - InitResponse init() throws CertificateException; - - /** - * Represents initialization response of client. - * 1. SUCCESS: Means client is initialized successfully and all required - * files are in expected state. - * 2. FAILURE: Initialization failed due to some unrecoverable error. - * 3. GETCERT: Bootstrap of keypair is successful but certificate is not - * found. Client should request SCM signed certificate. - * - */ - enum InitResponse { - SUCCESS, - FAILURE, - GETCERT, - RECOVER - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java deleted file mode 100644 index 76986586d34..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DNCertificateClient.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.client; - -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.hdds.security.x509.SecurityConfig; - -/** - * Certificate client for DataNodes. - */ -public class DNCertificateClient extends DefaultCertificateClient { - - private static final Logger LOG = - LoggerFactory.getLogger(DNCertificateClient.class); - - public static final String COMPONENT_NAME = "dn"; - - public DNCertificateClient(SecurityConfig securityConfig, - String certSerialId) { - super(securityConfig, LOG, certSerialId, COMPONENT_NAME); - } - - public DNCertificateClient(SecurityConfig securityConfig) { - super(securityConfig, LOG, null, COMPONENT_NAME); - } - - /** - * Returns a CSR builder that can be used to creates a Certificate signing - * request. - * - * @return CertificateSignRequest.Builder - */ - @Override - public CertificateSignRequest.Builder getCSRBuilder() - throws CertificateException { - return super.getCSRBuilder() - .setDigitalEncryption(false) - .setDigitalSignature(false); - } - - public Logger getLogger() { - return LOG; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java deleted file mode 100644 index ff99e080c49..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java +++ /dev/null @@ -1,828 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.client; - -import com.google.common.base.Preconditions; -import org.apache.commons.io.FilenameUtils; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.math.NumberUtils; -import org.apache.commons.validator.routines.DomainValidator; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB; -import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.bouncycastle.cert.X509CertificateHolder; -import org.slf4j.Logger; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.net.InetSocketAddress; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.security.InvalidKeyException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.Signature; -import java.security.SignatureException; -import java.security.cert.CertStore; -import java.security.cert.X509Certificate; -import java.security.spec.InvalidKeySpecException; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ConcurrentHashMap; - -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.GETCERT; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.SUCCESS; -import static org.apache.hadoop.hdds.security.x509.exceptions.CertificateException.ErrorCode.*; - -/** - * Default Certificate client implementation. It provides certificate - * operations that needs to be performed by certificate clients in the Ozone - * eco-system. - */ -public abstract class DefaultCertificateClient implements CertificateClient { - - private static final String CERT_FILE_NAME_FORMAT = "%s.crt"; - private static final String CA_CERT_PREFIX = "CA-"; - private static final int CA_CERT_PREFIX_LEN = 3; - private final Logger logger; - private final SecurityConfig securityConfig; - private final KeyCodec keyCodec; - private PrivateKey privateKey; - private PublicKey publicKey; - private X509Certificate x509Certificate; - private Map certificateMap; - private String certSerialId; - private String caCertId; - private String component; - - DefaultCertificateClient(SecurityConfig securityConfig, Logger log, - String certSerialId, String component) { - Objects.requireNonNull(securityConfig); - this.securityConfig = securityConfig; - keyCodec = new KeyCodec(securityConfig, component); - this.logger = log; - this.certificateMap = new ConcurrentHashMap<>(); - this.certSerialId = certSerialId; - this.component = component; - - loadAllCertificates(); - } - - /** - * Load all certificates from configured location. - * */ - private void loadAllCertificates() { - // See if certs directory exists in file system. - Path certPath = securityConfig.getCertificateLocation(component); - if (Files.exists(certPath) && Files.isDirectory(certPath)) { - getLogger().info("Loading certificate from location:{}.", - certPath); - File[] certFiles = certPath.toFile().listFiles(); - - if (certFiles != null) { - CertificateCodec certificateCodec = - new CertificateCodec(securityConfig, component); - long latestCaCertSerailId = -1L; - for (File file : certFiles) { - if (file.isFile()) { - try { - X509CertificateHolder x509CertificateHolder = certificateCodec - .readCertificate(certPath, file.getName()); - X509Certificate cert = - CertificateCodec.getX509Certificate(x509CertificateHolder); - if (cert != null && cert.getSerialNumber() != null) { - if (cert.getSerialNumber().toString().equals(certSerialId)) { - x509Certificate = cert; - } - certificateMap.putIfAbsent(cert.getSerialNumber().toString(), - cert); - if (file.getName().startsWith(CA_CERT_PREFIX)) { - String certFileName = FilenameUtils.getBaseName( - file.getName()); - long tmpCaCertSerailId = NumberUtils.toLong( - certFileName.substring(CA_CERT_PREFIX_LEN)); - if (tmpCaCertSerailId > latestCaCertSerailId) { - latestCaCertSerailId = tmpCaCertSerailId; - } - } - getLogger().info("Added certificate from file:{}.", - file.getAbsolutePath()); - } else { - getLogger().error("Error reading certificate from file:{}", - file); - } - } catch (java.security.cert.CertificateException | IOException e) { - getLogger().error("Error reading certificate from file:{}.", - file.getAbsolutePath(), e); - } - } - } - if (latestCaCertSerailId != -1) { - caCertId = Long.toString(latestCaCertSerailId); - } - } - } - } - - /** - * Returns the private key of the specified if it exists on the local - * system. - * - * @return private key or Null if there is no data. - */ - @Override - public PrivateKey getPrivateKey() { - if (privateKey != null) { - return privateKey; - } - - Path keyPath = securityConfig.getKeyLocation(component); - if (OzoneSecurityUtil.checkIfFileExist(keyPath, - securityConfig.getPrivateKeyFileName())) { - try { - privateKey = keyCodec.readPrivateKey(); - } catch (InvalidKeySpecException | NoSuchAlgorithmException - | IOException e) { - getLogger().error("Error while getting private key.", e); - } - } - return privateKey; - } - - /** - * Returns the public key of the specified if it exists on the local system. - * - * @return public key or Null if there is no data. - */ - @Override - public PublicKey getPublicKey() { - if (publicKey != null) { - return publicKey; - } - - Path keyPath = securityConfig.getKeyLocation(component); - if (OzoneSecurityUtil.checkIfFileExist(keyPath, - securityConfig.getPublicKeyFileName())) { - try { - publicKey = keyCodec.readPublicKey(); - } catch (InvalidKeySpecException | NoSuchAlgorithmException - | IOException e) { - getLogger().error("Error while getting public key.", e); - } - } - return publicKey; - } - - /** - * Returns the default certificate of given client if it exists. - * - * @return certificate or Null if there is no data. - */ - @Override - public X509Certificate getCertificate() { - if (x509Certificate != null) { - return x509Certificate; - } - - if (certSerialId == null) { - getLogger().error("Default certificate serial id is not set. Can't " + - "locate the default certificate for this client."); - return null; - } - // Refresh the cache from file system. - loadAllCertificates(); - if (certificateMap.containsKey(certSerialId)) { - x509Certificate = certificateMap.get(certSerialId); - } - return x509Certificate; - } - - /** - * Return the latest CA certificate known to the client. - * @return latest ca certificate known to the client. - */ - @Override - public X509Certificate getCACertificate() { - if (caCertId != null) { - return certificateMap.get(caCertId); - } - return null; - } - - /** - * Returns the certificate with the specified certificate serial id if it - * exists else try to get it from SCM. - * @param certId - * - * @return certificate or Null if there is no data. - */ - @Override - public X509Certificate getCertificate(String certId) - throws CertificateException { - // Check if it is in cache. - if (certificateMap.containsKey(certId)) { - return certificateMap.get(certId); - } - // Try to get it from SCM. - return this.getCertificateFromScm(certId); - } - - /** - * Get certificate from SCM and store it in local file system. - * @param certId - * @return certificate - */ - private X509Certificate getCertificateFromScm(String certId) - throws CertificateException { - - getLogger().info("Getting certificate with certSerialId:{}.", - certId); - try { - SCMSecurityProtocol scmSecurityProtocolClient = getScmSecurityClient( - (OzoneConfiguration) securityConfig.getConfiguration()); - String pemEncodedCert = - scmSecurityProtocolClient.getCertificate(certId); - this.storeCertificate(pemEncodedCert, true); - return CertificateCodec.getX509Certificate(pemEncodedCert); - } catch (Exception e) { - getLogger().error("Error while getting Certificate with " + - "certSerialId:{} from scm.", certId, e); - throw new CertificateException("Error while getting certificate for " + - "certSerialId:" + certId, e, CERTIFICATE_ERROR); - } - } - - /** - * Verifies if this certificate is part of a trusted chain. - * - * @param certificate - certificate. - * @return true if it trusted, false otherwise. - */ - @Override - public boolean verifyCertificate(X509Certificate certificate) { - throw new UnsupportedOperationException("Operation not supported."); - } - - /** - * Creates digital signature over the data stream using the s private key. - * - * @param stream - Data stream to sign. - * @throws CertificateException - on Error. - */ - @Override - public byte[] signDataStream(InputStream stream) - throws CertificateException { - try { - Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(), - securityConfig.getProvider()); - sign.initSign(getPrivateKey()); - byte[] buffer = new byte[1024 * 4]; - - int len; - while (-1 != (len = stream.read(buffer))) { - sign.update(buffer, 0, len); - } - return sign.sign(); - } catch (NoSuchAlgorithmException | NoSuchProviderException - | InvalidKeyException | SignatureException | IOException e) { - getLogger().error("Error while signing the stream", e); - throw new CertificateException("Error while signing the stream", e, - CRYPTO_SIGN_ERROR); - } - } - - /** - * Creates digital signature over the data stream using the s private key. - * - * @param data - Data to sign. - * @throws CertificateException - on Error. - */ - @Override - public byte[] signData(byte[] data) throws CertificateException { - try { - Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(), - securityConfig.getProvider()); - - sign.initSign(getPrivateKey()); - sign.update(data); - - return sign.sign(); - } catch (NoSuchAlgorithmException | NoSuchProviderException - | InvalidKeyException | SignatureException e) { - getLogger().error("Error while signing the stream", e); - throw new CertificateException("Error while signing the stream", e, - CRYPTO_SIGN_ERROR); - } - } - - /** - * Verifies a digital Signature, given the signature and the certificate of - * the signer. - * - * @param stream - Data Stream. - * @param signature - Byte Array containing the signature. - * @param cert - Certificate of the Signer. - * @return true if verified, false if not. - */ - @Override - public boolean verifySignature(InputStream stream, byte[] signature, - X509Certificate cert) throws CertificateException { - try { - Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(), - securityConfig.getProvider()); - sign.initVerify(cert); - byte[] buffer = new byte[1024 * 4]; - - int len; - while (-1 != (len = stream.read(buffer))) { - sign.update(buffer, 0, len); - } - return sign.verify(signature); - } catch (NoSuchAlgorithmException | NoSuchProviderException - | InvalidKeyException | SignatureException | IOException e) { - getLogger().error("Error while signing the stream", e); - throw new CertificateException("Error while signing the stream", e, - CRYPTO_SIGNATURE_VERIFICATION_ERROR); - } - } - - /** - * Verifies a digital Signature, given the signature and the certificate of - * the signer. - * - * @param data - Data in byte array. - * @param signature - Byte Array containing the signature. - * @param cert - Certificate of the Signer. - * @return true if verified, false if not. - */ - @Override - public boolean verifySignature(byte[] data, byte[] signature, - X509Certificate cert) throws CertificateException { - try { - Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(), - securityConfig.getProvider()); - sign.initVerify(cert); - sign.update(data); - return sign.verify(signature); - } catch (NoSuchAlgorithmException | NoSuchProviderException - | InvalidKeyException | SignatureException e) { - getLogger().error("Error while signing the stream", e); - throw new CertificateException("Error while signing the stream", e, - CRYPTO_SIGNATURE_VERIFICATION_ERROR); - } - } - - /** - * Verifies a digital Signature, given the signature and the certificate of - * the signer. - * - * @param data - Data in byte array. - * @param signature - Byte Array containing the signature. - * @param pubKey - Certificate of the Signer. - * @return true if verified, false if not. - */ - private boolean verifySignature(byte[] data, byte[] signature, - PublicKey pubKey) throws CertificateException { - try { - Signature sign = Signature.getInstance(securityConfig.getSignatureAlgo(), - securityConfig.getProvider()); - sign.initVerify(pubKey); - sign.update(data); - return sign.verify(signature); - } catch (NoSuchAlgorithmException | NoSuchProviderException - | InvalidKeyException | SignatureException e) { - getLogger().error("Error while signing the stream", e); - throw new CertificateException("Error while signing the stream", e, - CRYPTO_SIGNATURE_VERIFICATION_ERROR); - } - } - - /** - * Returns a CSR builder that can be used to creates a Certificate signing - * request. - * - * @return CertificateSignRequest.Builder - */ - @Override - public CertificateSignRequest.Builder getCSRBuilder() - throws CertificateException { - CertificateSignRequest.Builder builder = - new CertificateSignRequest.Builder() - .setConfiguration(securityConfig.getConfiguration()); - try { - DomainValidator validator = DomainValidator.getInstance(); - // Add all valid ips. - OzoneSecurityUtil.getValidInetsForCurrentHost().forEach( - ip -> { - builder.addIpAddress(ip.getHostAddress()); - if(validator.isValid(ip.getCanonicalHostName())) { - builder.addDnsName(ip.getCanonicalHostName()); - } - }); - } catch (IOException e) { - throw new CertificateException("Error while adding ip to CSR builder", - e, CSR_ERROR); - } - return builder; - } - - /** - * Get the certificate of well-known entity from SCM. - * - * @param query - String Query, please see the implementation for the - * discussion on the query formats. - * @return X509Certificate or null if not found. - */ - @Override - public X509Certificate queryCertificate(String query) { - // TODO: - throw new UnsupportedOperationException("Operation not supported"); - } - - /** - * Stores the Certificate for this client. Don't use this api to add trusted - * certificates of others. - * - * @param pemEncodedCert - pem encoded X509 Certificate - * @param force - override any existing file - * @throws CertificateException - on Error. - * - */ - @Override - public void storeCertificate(String pemEncodedCert, boolean force) - throws CertificateException { - this.storeCertificate(pemEncodedCert, force, false); - } - - /** - * Stores the Certificate for this client. Don't use this api to add trusted - * certificates of others. - * - * @param pemEncodedCert - pem encoded X509 Certificate - * @param force - override any existing file - * @param caCert - Is CA certificate. - * @throws CertificateException - on Error. - * - */ - @Override - public void storeCertificate(String pemEncodedCert, boolean force, - boolean caCert) throws CertificateException { - CertificateCodec certificateCodec = new CertificateCodec(securityConfig, - component); - try { - Path basePath = securityConfig.getCertificateLocation(component); - - X509Certificate cert = - CertificateCodec.getX509Certificate(pemEncodedCert); - String certName = String.format(CERT_FILE_NAME_FORMAT, - cert.getSerialNumber().toString()); - - if(caCert) { - certName = CA_CERT_PREFIX + certName; - caCertId = cert.getSerialNumber().toString(); - } - - certificateCodec.writeCertificate(basePath, certName, - pemEncodedCert, force); - certificateMap.putIfAbsent(cert.getSerialNumber().toString(), cert); - } catch (IOException | java.security.cert.CertificateException e) { - throw new CertificateException("Error while storing certificate.", e, - CERTIFICATE_ERROR); - } - } - - /** - * Stores the trusted chain of certificates for a specific . - * - * @param ks - Key Store. - * @throws CertificateException - on Error. - */ - @Override - public synchronized void storeTrustChain(CertStore ks) - throws CertificateException { - throw new UnsupportedOperationException("Operation not supported."); - } - - - /** - * Stores the trusted chain of certificates for a specific . - * - * @param certificates - List of Certificates. - * @throws CertificateException - on Error. - */ - @Override - public synchronized void storeTrustChain(List certificates) - throws CertificateException { - throw new UnsupportedOperationException("Operation not supported."); - } - - /** - * Defines 8 cases of initialization. - * Each case specifies objects found. - * 0. NONE Keypair as well as certificate not found. - * 1. CERT Certificate found but keypair missing. - * 2. PUBLIC_KEY Public key found but private key and - * certificate is missing. - * 3. PUBLICKEY_CERT Only public key and certificate is present. - * 4. PRIVATE_KEY Only private key is present. - * 5. PRIVATEKEY_CERT Only private key and certificate is present. - * 6. PUBLICKEY_PRIVATEKEY indicates private and public key were read - * successfully from configured location but - * Certificate. - * 7. All Keypair as well as certificate is present. - * - * */ - protected enum InitCase { - NONE, - CERT, - PUBLIC_KEY, - PUBLICKEY_CERT, - PRIVATE_KEY, - PRIVATEKEY_CERT, - PUBLICKEY_PRIVATEKEY, - ALL - } - - /** - * - * Initializes client by performing following actions. - * 1. Create key dir if not created already. - * 2. Generates and stores a keypair. - * 3. Try to recover public key if private key and certificate is present - * but public key is missing. - * - * Truth table: - * +--------------+-----------------+--------------+----------------+ - * | Private Key | Public Keys | Certificate | Result | - * +--------------+-----------------+--------------+----------------+ - * | False (0) | False (0) | False (0) | GETCERT 000 | - * | False (0) | False (0) | True (1) | FAILURE 001 | - * | False (0) | True (1) | False (0) | FAILURE 010 | - * | False (0) | True (1) | True (1) | FAILURE 011 | - * | True (1) | False (0) | False (0) | FAILURE 100 | - * | True (1) | False (0) | True (1) | SUCCESS 101 | - * | True (1) | True (1) | False (0) | GETCERT 110 | - * | True (1) | True (1) | True (1) | SUCCESS 111 | - * +--------------+-----------------+--------------+----------------+ - * - * @return InitResponse - * Returns FAILURE in following cases: - * 1. If private key is missing but public key or certificate is available. - * 2. If public key and certificate is missing. - * - * Returns SUCCESS in following cases: - * 1. If keypair as well certificate is available. - * 2. If private key and certificate is available and public key is - * recovered successfully. - * - * Returns GETCERT in following cases: - * 1. First time when keypair and certificate is not available, keypair - * will be generated and stored at configured location. - * 2. When keypair (public/private key) is available but certificate is - * missing. - * - */ - @Override - public synchronized InitResponse init() throws CertificateException { - int initCase = 0; - PrivateKey pvtKey= getPrivateKey(); - PublicKey pubKey = getPublicKey(); - X509Certificate certificate = getCertificate(); - - if(pvtKey != null){ - initCase = initCase | 1<<2; - } - if(pubKey != null){ - initCase = initCase | 1<<1; - } - if(certificate != null){ - initCase = initCase | 1; - } - getLogger().info("Certificate client init case: {}", initCase); - Preconditions.checkArgument(initCase < 8, "Not a " + - "valid case."); - InitCase init = InitCase.values()[initCase]; - return handleCase(init); - } - - /** - * Default handling of each {@link InitCase}. - * */ - protected InitResponse handleCase(InitCase init) - throws CertificateException { - switch (init) { - case NONE: - getLogger().info("Creating keypair for client as keypair and " + - "certificate not found."); - bootstrapClientKeys(); - return GETCERT; - case CERT: - getLogger().error("Private key not found, while certificate is still" + - " present. Delete keypair and try again."); - return FAILURE; - case PUBLIC_KEY: - getLogger().error("Found public key but private key and certificate " + - "missing."); - return FAILURE; - case PRIVATE_KEY: - getLogger().info("Found private key but public key and certificate " + - "is missing."); - // TODO: Recovering public key from private might be possible in some - // cases. - return FAILURE; - case PUBLICKEY_CERT: - getLogger().error("Found public key and certificate but private " + - "key is missing."); - return FAILURE; - case PRIVATEKEY_CERT: - getLogger().info("Found private key and certificate but public key" + - " missing."); - if (recoverPublicKey()) { - return SUCCESS; - } else { - getLogger().error("Public key recovery failed."); - return FAILURE; - } - case PUBLICKEY_PRIVATEKEY: - getLogger().info("Found private and public key but certificate is" + - " missing."); - if (validateKeyPair(getPublicKey())) { - return GETCERT; - } else { - getLogger().info("Keypair validation failed."); - return FAILURE; - } - case ALL: - getLogger().info("Found certificate file along with KeyPair."); - if (validateKeyPairAndCertificate()) { - return SUCCESS; - } else { - return FAILURE; - } - default: - getLogger().error("Unexpected case: {} (private/public/cert)", - Integer.toBinaryString(init.ordinal())); - - return FAILURE; - } - } - - /** - * Validate keypair and certificate. - * */ - protected boolean validateKeyPairAndCertificate() throws - CertificateException { - if (validateKeyPair(getPublicKey())) { - getLogger().info("Keypair validated."); - // TODO: Certificates cryptographic validity can be checked as well. - if (validateKeyPair(getCertificate().getPublicKey())) { - getLogger().info("Keypair validated with certificate."); - } else { - getLogger().error("Stored certificate is generated with different " + - "private key."); - return false; - } - } else { - getLogger().error("Keypair validation failed."); - return false; - } - return true; - } - - /** - * Tries to recover public key from certificate. Also validates recovered - * public key. - * */ - protected boolean recoverPublicKey() throws CertificateException { - PublicKey pubKey = getCertificate().getPublicKey(); - try { - - if(validateKeyPair(pubKey)){ - keyCodec.writePublicKey(pubKey); - publicKey = pubKey; - } else { - getLogger().error("Can't recover public key " + - "corresponding to private key.", BOOTSTRAP_ERROR); - return false; - } - } catch (IOException e) { - throw new CertificateException("Error while trying to recover " + - "public key.", e, BOOTSTRAP_ERROR); - } - return true; - } - - /** - * Validates public and private key of certificate client. - * - * @param pubKey - * */ - protected boolean validateKeyPair(PublicKey pubKey) - throws CertificateException { - byte[] challenge = RandomStringUtils.random(1000).getBytes( - StandardCharsets.UTF_8); - byte[] sign = signDataStream(new ByteArrayInputStream(challenge)); - return verifySignature(challenge, sign, pubKey); - } - - /** - * Bootstrap the client by creating keypair and storing it in configured - * location. - * */ - protected void bootstrapClientKeys() throws CertificateException { - Path keyPath = securityConfig.getKeyLocation(component); - if (Files.notExists(keyPath)) { - try { - Files.createDirectories(keyPath); - } catch (IOException e) { - throw new CertificateException("Error while creating directories " + - "for certificate storage.", BOOTSTRAP_ERROR); - } - } - KeyPair keyPair = createKeyPair(); - privateKey = keyPair.getPrivate(); - publicKey = keyPair.getPublic(); - } - - protected KeyPair createKeyPair() throws CertificateException { - HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(securityConfig); - KeyPair keyPair = null; - try { - keyPair = keyGenerator.generateKey(); - keyCodec.writePublicKey(keyPair.getPublic()); - keyCodec.writePrivateKey(keyPair.getPrivate()); - } catch (NoSuchProviderException | NoSuchAlgorithmException - | IOException e) { - getLogger().error("Error while bootstrapping certificate client.", e); - throw new CertificateException("Error while bootstrapping certificate.", - BOOTSTRAP_ERROR); - } - return keyPair; - } - - public Logger getLogger() { - return logger; - } - - /** - * Create a scm security client, used to get SCM signed certificate. - * - * @return {@link SCMSecurityProtocol} - */ - private static SCMSecurityProtocol getScmSecurityClient( - OzoneConfiguration conf) throws IOException { - RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class, - ProtobufRpcEngine.class); - long scmVersion = - RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class); - InetSocketAddress scmSecurityProtoAdd = - HddsUtils.getScmAddressForSecurityProtocol(conf); - SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient = - new SCMSecurityProtocolClientSideTranslatorPB( - RPC.getProxy(SCMSecurityProtocolPB.class, scmVersion, - scmSecurityProtoAdd, UserGroupInformation.getCurrentUser(), - conf, NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))); - return scmSecurityClient; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java deleted file mode 100644 index cb3ce7536e1..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.client; - -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; - -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.GETCERT; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.RECOVER; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.SUCCESS; - -/** - * Certificate client for OzoneManager. - */ -public class OMCertificateClient extends DefaultCertificateClient { - - private static final Logger LOG = - LoggerFactory.getLogger(OMCertificateClient.class); - - public static final String COMPONENT_NAME = "om"; - - public OMCertificateClient(SecurityConfig securityConfig, - String certSerialId) { - super(securityConfig, LOG, certSerialId, COMPONENT_NAME); - } - - public OMCertificateClient(SecurityConfig securityConfig) { - super(securityConfig, LOG, null, COMPONENT_NAME); - } - - protected InitResponse handleCase(InitCase init) throws - CertificateException { - switch (init) { - case NONE: - LOG.info("Creating keypair for client as keypair and certificate not " + - "found."); - bootstrapClientKeys(); - return GETCERT; - case CERT: - LOG.error("Private key not found, while certificate is still present." + - "Delete keypair and try again."); - return FAILURE; - case PUBLIC_KEY: - LOG.error("Found public key but private key and certificate missing."); - return FAILURE; - case PRIVATE_KEY: - LOG.info("Found private key but public key and certificate is missing."); - // TODO: Recovering public key from private might be possible in some - // cases. - return FAILURE; - case PUBLICKEY_CERT: - LOG.error("Found public key and certificate but private key is " + - "missing."); - return FAILURE; - case PRIVATEKEY_CERT: - LOG.info("Found private key and certificate but public key missing."); - if (recoverPublicKey()) { - return SUCCESS; - } else { - LOG.error("Public key recovery failed."); - return FAILURE; - } - case PUBLICKEY_PRIVATEKEY: - LOG.info("Found private and public key but certificate is missing."); - if (validateKeyPair(getPublicKey())) { - return RECOVER; - } else { - LOG.error("Keypair validation failed."); - return FAILURE; - } - case ALL: - LOG.info("Found certificate file along with KeyPair."); - if (validateKeyPairAndCertificate()) { - return SUCCESS; - } else { - return FAILURE; - } - default: - LOG.error("Unexpected case: {} (private/public/cert)", - Integer.toBinaryString(init.ordinal())); - return FAILURE; - } - } - - /** - * Returns a CSR builder that can be used to creates a Certificate signing - * request. - * - * @return CertificateSignRequest.Builder - */ - @Override - public CertificateSignRequest.Builder getCSRBuilder() - throws CertificateException { - return super.getCSRBuilder() - .setDigitalEncryption(true) - .setDigitalSignature(true); - } - - - public Logger getLogger() { - return LOG; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/package-info.java deleted file mode 100644 index dea609bd249..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Classes related to creating and using certificates. - */ -package org.apache.hadoop.hdds.security.x509.certificate.client; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java deleted file mode 100644 index 2c8721b199b..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CertificateCodec.java +++ /dev/null @@ -1,299 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.utils; - -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; -import org.bouncycastle.openssl.jcajce.JcaPEMWriter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.StringWriter; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.attribute.PosixFilePermission; -import java.security.cert.CertificateEncodingException; -import java.security.cert.CertificateException; -import java.security.cert.CertificateFactory; -import java.security.cert.X509Certificate; -import java.util.Set; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE; -import static java.nio.file.attribute.PosixFilePermission.OWNER_READ; -import static java.nio.file.attribute.PosixFilePermission.OWNER_WRITE; - -/** - * A class used to read and write X.509 certificates PEM encoded Streams. - */ -public class CertificateCodec { - public static final String BEGIN_CERT = "-----BEGIN CERTIFICATE-----"; - public static final String END_CERT = "-----END CERTIFICATE-----"; - - private static final Logger LOG = - LoggerFactory.getLogger(CertificateCodec.class); - private static final JcaX509CertificateConverter CERTIFICATE_CONVERTER - = new JcaX509CertificateConverter(); - private final SecurityConfig securityConfig; - private final Path location; - private Set permissionSet = - Stream.of(OWNER_READ, OWNER_WRITE, OWNER_EXECUTE) - .collect(Collectors.toSet()); - /** - * Creates a CertificateCodec with component name. - * - * @param config - Security Config. - * @param component - Component String. - */ - public CertificateCodec(SecurityConfig config, String component) { - this.securityConfig = config; - this.location = securityConfig.getCertificateLocation(component); - } - - /** - * Returns a X509 Certificate from the Certificate Holder. - * - * @param holder - Holder - * @return X509Certificate. - * @throws CertificateException - on Error. - */ - public static X509Certificate getX509Certificate(X509CertificateHolder holder) - throws CertificateException { - return CERTIFICATE_CONVERTER.getCertificate(holder); - } - - /** - * Returns the Certificate as a PEM encoded String. - * - * @param x509CertHolder - X.509 Certificate Holder. - * @return PEM Encoded Certificate String. - * @throws SCMSecurityException - On failure to create a PEM String. - */ - public static String getPEMEncodedString(X509CertificateHolder x509CertHolder) - throws SCMSecurityException { - try { - return getPEMEncodedString(getX509Certificate(x509CertHolder)); - } catch (CertificateException exp) { - throw new SCMSecurityException(exp); - } - } - - /** - * Returns the Certificate as a PEM encoded String. - * - * @param certificate - X.509 Certificate. - * @return PEM Encoded Certificate String. - * @throws SCMSecurityException - On failure to create a PEM String. - */ - public static String getPEMEncodedString(X509Certificate certificate) - throws SCMSecurityException { - try { - StringWriter stringWriter = new StringWriter(); - try (JcaPEMWriter pemWriter = new JcaPEMWriter(stringWriter)) { - pemWriter.writeObject(certificate); - } - return stringWriter.toString(); - } catch (IOException e) { - LOG.error("Error in encoding certificate." + certificate - .getSubjectDN().toString(), e); - throw new SCMSecurityException("PEM Encoding failed for certificate." + - certificate.getSubjectDN().toString(), e); - } - } - - /** - * Gets the X.509 Certificate from PEM encoded String. - * - * @param pemEncodedString - PEM encoded String. - * @return X509Certificate - Certificate. - * @throws CertificateException - Thrown on Failure. - * @throws IOException - Thrown on Failure. - */ - public static X509Certificate getX509Certificate(String pemEncodedString) - throws CertificateException, IOException { - CertificateFactory fact = CertificateFactory.getInstance("X.509"); - try (InputStream input = IOUtils.toInputStream(pemEncodedString, UTF_8)) { - return (X509Certificate) fact.generateCertificate(input); - } - } - - /** - * Get Certificate location. - * - * @return Path - */ - public Path getLocation() { - return location; - } - - /** - * Gets the X.509 Certificate from PEM encoded String. - * - * @param pemEncodedString - PEM encoded String. - * @return X509Certificate - Certificate. - * @throws CertificateException - Thrown on Failure. - * @throws IOException - Thrown on Failure. - */ - public static X509Certificate getX509Cert(String pemEncodedString) - throws CertificateException, IOException { - CertificateFactory fact = CertificateFactory.getInstance("X.509"); - try (InputStream input = IOUtils.toInputStream(pemEncodedString, UTF_8)) { - return (X509Certificate) fact.generateCertificate(input); - } - } - - /** - * Write the Certificate pointed to the location by the configs. - * - * @param xCertificate - Certificate to write. - * @throws SCMSecurityException - on Error. - * @throws IOException - on Error. - */ - public void writeCertificate(X509CertificateHolder xCertificate) - throws SCMSecurityException, IOException { - String pem = getPEMEncodedString(xCertificate); - writeCertificate(location.toAbsolutePath(), - this.securityConfig.getCertificateFileName(), pem, false); - } - - /** - * Write the Certificate to the specific file. - * - * @param xCertificate - Certificate to write. - * @param fileName - file name to write to. - * @param overwrite - boolean value, true means overwrite an existing - * certificate. - * @throws SCMSecurityException - On Error. - * @throws IOException - On Error. - */ - public void writeCertificate(X509CertificateHolder xCertificate, - String fileName, boolean overwrite) - throws SCMSecurityException, IOException { - String pem = getPEMEncodedString(xCertificate); - writeCertificate(location.toAbsolutePath(), fileName, pem, overwrite); - } - - /** - * Helper function that writes data to the file. - * - * @param basePath - Base Path where the file needs to written to. - * @param fileName - Certificate file name. - * @param pemEncodedCertificate - pemEncoded Certificate file. - * @param force - Overwrite if the file exists. - * @throws IOException - on Error. - */ - public synchronized void writeCertificate(Path basePath, String fileName, - String pemEncodedCertificate, boolean force) - throws IOException { - File certificateFile = - Paths.get(basePath.toString(), fileName).toFile(); - if (certificateFile.exists() && !force) { - throw new SCMSecurityException("Specified certificate file already " + - "exists.Please use force option if you want to overwrite it."); - } - if (!basePath.toFile().exists()) { - if (!basePath.toFile().mkdirs()) { - LOG.error("Unable to create file path. Path: {}", basePath); - throw new IOException("Creation of the directories failed." - + basePath.toString()); - } - } - try (FileOutputStream file = new FileOutputStream(certificateFile)) { - IOUtils.write(pemEncodedCertificate, file, UTF_8); - } - - Files.setPosixFilePermissions(certificateFile.toPath(), permissionSet); - } - - /** - * Rertuns a default certificate using the default paths for this component. - * - * @return X509CertificateHolder. - * @throws SCMSecurityException - on Error. - * @throws CertificateException - on Error. - * @throws IOException - on Error. - */ - public X509CertificateHolder readCertificate() throws - CertificateException, IOException { - return readCertificate(this.location.toAbsolutePath(), - this.securityConfig.getCertificateFileName()); - } - - /** - * Returns the certificate from the specific PEM encoded file. - * - * @param basePath - base path - * @param fileName - fileName - * @return X%09 Certificate - * @throws IOException - on Error. - * @throws SCMSecurityException - on Error. - * @throws CertificateException - on Error. - */ - public synchronized X509CertificateHolder readCertificate(Path basePath, - String fileName) throws IOException, CertificateException { - File certificateFile = Paths.get(basePath.toString(), fileName).toFile(); - return getX509CertificateHolder(certificateFile); - } - - /** - * Helper function to read certificate. - * - * @param certificateFile - Full path to certificate file. - * @return X509CertificateHolder - * @throws IOException - On Error. - * @throws CertificateException - On Error. - */ - private X509CertificateHolder getX509CertificateHolder(File certificateFile) - throws IOException, CertificateException { - if (!certificateFile.exists()) { - throw new IOException("Unable to find the requested certificate. Path: " - + certificateFile.toString()); - } - CertificateFactory fact = CertificateFactory.getInstance("X.509"); - try (FileInputStream is = new FileInputStream(certificateFile)) { - return getCertificateHolder( - (X509Certificate) fact.generateCertificate(is)); - } - } - - /** - * Returns the Certificate holder from X509Ceritificate class. - * - * @param x509cert - Certificate class. - * @return X509CertificateHolder - * @throws CertificateEncodingException - on Error. - * @throws IOException - on Error. - */ - public X509CertificateHolder getCertificateHolder(X509Certificate x509cert) - throws CertificateEncodingException, IOException { - return new X509CertificateHolder(x509cert.getEncoded()); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java deleted file mode 100644 index 4971d4ae14f..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Certificate Utils. - */ -package org.apache.hadoop.hdds.security.x509.certificate.utils; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java deleted file mode 100644 index 28f853a7f63..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java +++ /dev/null @@ -1,289 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.certificates.utils; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil; -import org.apache.logging.log4j.util.Strings; -import org.bouncycastle.asn1.DEROctetString; -import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; -import org.bouncycastle.asn1.x500.X500Name; -import org.bouncycastle.asn1.x509.BasicConstraints; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.asn1.x509.Extensions; -import org.bouncycastle.asn1.x509.GeneralName; -import org.bouncycastle.asn1.x509.GeneralNames; -import org.bouncycastle.asn1.x509.KeyUsage; -import org.bouncycastle.openssl.jcajce.JcaPEMWriter; -import org.bouncycastle.operator.ContentSigner; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.bouncycastle.pkcs.PKCS10CertificationRequestBuilder; -import org.bouncycastle.pkcs.jcajce.JcaPKCS10CertificationRequestBuilder; -import org.bouncycastle.util.io.pem.PemObject; -import org.bouncycastle.util.io.pem.PemReader; - -import java.io.IOException; -import java.io.StringReader; -import java.io.StringWriter; -import java.security.KeyPair; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; - -/** - * A certificate sign request object that wraps operations to build a - * PKCS10CertificationRequest to CertificateServer. - */ -public final class CertificateSignRequest { - private final KeyPair keyPair; - private final SecurityConfig config; - private final Extensions extensions; - private String subject; - private String clusterID; - private String scmID; - - /** - * Private Ctor for CSR. - * - * @param subject - Subject - * @param scmID - SCM ID - * @param clusterID - Cluster ID - * @param keyPair - KeyPair - * @param config - SCM Config - * @param extensions - CSR extensions - */ - private CertificateSignRequest(String subject, String scmID, String clusterID, - KeyPair keyPair, SecurityConfig config, - Extensions extensions) { - this.subject = subject; - this.clusterID = clusterID; - this.scmID = scmID; - this.keyPair = keyPair; - this.config = config; - this.extensions = extensions; - } - - private PKCS10CertificationRequest generateCSR() throws - OperatorCreationException { - X500Name dnName = SecurityUtil.getDistinguishedName(subject, scmID, - clusterID); - PKCS10CertificationRequestBuilder p10Builder = - new JcaPKCS10CertificationRequestBuilder(dnName, keyPair.getPublic()); - - ContentSigner contentSigner = - new JcaContentSignerBuilder(config.getSignatureAlgo()) - .setProvider(config.getProvider()) - .build(keyPair.getPrivate()); - - if (extensions != null) { - p10Builder.addAttribute( - PKCSObjectIdentifiers.pkcs_9_at_extensionRequest, extensions); - } - return p10Builder.build(contentSigner); - } - public static String getEncodedString(PKCS10CertificationRequest request) - throws IOException { - PemObject pemObject = - new PemObject("CERTIFICATE REQUEST", request.getEncoded()); - StringWriter str = new StringWriter(); - try(JcaPEMWriter pemWriter = new JcaPEMWriter(str)) { - pemWriter.writeObject(pemObject); - } - return str.toString(); - } - - - /** - * Gets a CertificateRequest Object from PEM encoded CSR. - * - * @param csr - PEM Encoded Certificate Request String. - * @return PKCS10CertificationRequest - * @throws IOException - On Error. - */ - public static PKCS10CertificationRequest getCertificationRequest(String csr) - throws IOException { - try (PemReader reader = new PemReader(new StringReader(csr))) { - PemObject pemObject = reader.readPemObject(); - if(pemObject.getContent() == null) { - throw new SCMSecurityException("Invalid Certificate signing request"); - } - return new PKCS10CertificationRequest(pemObject.getContent()); - } - } - - /** - * Builder class for Certificate Sign Request. - */ - public static class Builder { - private String subject; - private String clusterID; - private String scmID; - private KeyPair key; - private SecurityConfig config; - private List altNames; - private Boolean ca = false; - private boolean digitalSignature; - private boolean digitalEncryption; - - public CertificateSignRequest.Builder setConfiguration( - Configuration configuration) { - this.config = new SecurityConfig(configuration); - return this; - } - - public CertificateSignRequest.Builder setKey(KeyPair keyPair) { - this.key = keyPair; - return this; - } - - public CertificateSignRequest.Builder setSubject(String subjectString) { - this.subject = subjectString; - return this; - } - - public CertificateSignRequest.Builder setClusterID(String s) { - this.clusterID = s; - return this; - } - - public CertificateSignRequest.Builder setScmID(String s) { - this.scmID = s; - return this; - } - - public Builder setDigitalSignature(boolean dSign) { - this.digitalSignature = dSign; - return this; - } - - public Builder setDigitalEncryption(boolean dEncryption) { - this.digitalEncryption = dEncryption; - return this; - } - - // Support SAN extenion with DNS and RFC822 Name - // other name type will be added as needed. - public CertificateSignRequest.Builder addDnsName(String dnsName) { - Preconditions.checkNotNull(dnsName, "dnsName cannot be null"); - this.addAltName(GeneralName.dNSName, dnsName); - return this; - } - - // IP address is subject to change which is optional for now. - public CertificateSignRequest.Builder addIpAddress(String ip) { - Preconditions.checkNotNull(ip, "Ip address cannot be null"); - this.addAltName(GeneralName.iPAddress, ip); - return this; - } - - private CertificateSignRequest.Builder addAltName(int tag, String name) { - if (altNames == null) { - altNames = new ArrayList<>(); - } - altNames.add(new GeneralName(tag, name)); - return this; - } - - public CertificateSignRequest.Builder setCA(Boolean isCA) { - this.ca = isCA; - return this; - } - - private Extension getKeyUsageExtension() throws IOException { - int keyUsageFlag = KeyUsage.keyAgreement; - if(digitalEncryption){ - keyUsageFlag |= KeyUsage.keyEncipherment | KeyUsage.dataEncipherment; - } - if(digitalSignature) { - keyUsageFlag |= KeyUsage.digitalSignature; - } - - if (ca) { - keyUsageFlag |= KeyUsage.keyCertSign | KeyUsage.cRLSign; - } - KeyUsage keyUsage = new KeyUsage(keyUsageFlag); - return new Extension(Extension.keyUsage, true, - new DEROctetString(keyUsage)); - } - - private Optional getSubjectAltNameExtension() throws - IOException { - if (altNames != null) { - return Optional.of(new Extension(Extension.subjectAlternativeName, - false, new DEROctetString(new GeneralNames( - altNames.toArray(new GeneralName[altNames.size()]))))); - } - return Optional.empty(); - } - - private Extension getBasicExtension() throws IOException { - // We don't set pathLenConstraint means no limit is imposed. - return new Extension(Extension.basicConstraints, - true, new DEROctetString(new BasicConstraints(ca))); - } - - private Extensions createExtensions() throws IOException { - List extensions = new ArrayList<>(); - - // Add basic extension - if(ca) { - extensions.add(getBasicExtension()); - } - - // Add key usage extension - extensions.add(getKeyUsageExtension()); - - // Add subject alternate name extension - Optional san = getSubjectAltNameExtension(); - if (san.isPresent()) { - extensions.add(san.get()); - } - - return new Extensions( - extensions.toArray(new Extension[extensions.size()])); - } - - public PKCS10CertificationRequest build() throws SCMSecurityException { - Preconditions.checkNotNull(key, "KeyPair cannot be null"); - Preconditions.checkArgument(Strings.isNotBlank(subject), "Subject " + - "cannot be blank"); - - try { - CertificateSignRequest csr = new CertificateSignRequest(subject, scmID, - clusterID, key, config, createExtensions()); - return csr.generateCSR(); - } catch (IOException ioe) { - throw new CertificateException(String.format("Unable to create " + - "extension for certificate sign request for %s.", SecurityUtil - .getDistinguishedName(subject, scmID, clusterID)), ioe.getCause()); - } catch (OperatorCreationException ex) { - throw new CertificateException(String.format("Unable to create " + - "certificate sign request for %s.", SecurityUtil - .getDistinguishedName(subject, scmID, clusterID)), - ex.getCause()); - } - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java deleted file mode 100644 index 1fd6d7c9af6..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificates.utils; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.apache.hadoop.util.Time; -import org.apache.logging.log4j.util.Strings; -import org.bouncycastle.asn1.DEROctetString; -import org.bouncycastle.asn1.x500.X500Name; -import org.bouncycastle.asn1.x509.BasicConstraints; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.asn1.x509.KeyUsage; -import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; -import org.bouncycastle.cert.CertIOException; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.cert.X509v3CertificateBuilder; -import org.bouncycastle.operator.ContentSigner; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; - -import java.io.IOException; -import java.math.BigInteger; -import java.security.KeyPair; -import java.time.Duration; -import java.time.LocalDate; -import java.time.LocalTime; -import java.time.ZoneOffset; -import java.util.Date; - -/** - * A Self Signed Certificate with CertificateServer basic constraint can be used - * to bootstrap a certificate infrastructure, if no external certificate is - * provided. - */ -public final class SelfSignedCertificate { - private static final String NAME_FORMAT = "CN=%s,OU=%s,O=%s"; - private String subject; - private String clusterID; - private String scmID; - private LocalDate beginDate; - private LocalDate endDate; - private KeyPair key; - private SecurityConfig config; - - /** - * Private Ctor invoked only via Builder Interface. - * - * @param subject - Subject - * @param scmID - SCM ID - * @param clusterID - Cluster ID - * @param beginDate - NotBefore - * @param endDate - Not After - * @param configuration - SCM Config - * @param keyPair - KeyPair - */ - private SelfSignedCertificate(String subject, String scmID, String clusterID, - LocalDate beginDate, LocalDate endDate, SecurityConfig configuration, - KeyPair keyPair) { - this.subject = subject; - this.clusterID = clusterID; - this.scmID = scmID; - this.beginDate = beginDate; - this.endDate = endDate; - config = configuration; - this.key = keyPair; - } - - @VisibleForTesting - public static String getNameFormat() { - return NAME_FORMAT; - } - - public static Builder newBuilder() { - return new Builder(); - } - - private X509CertificateHolder generateCertificate(boolean isCA) - throws OperatorCreationException, IOException { - // For the Root Certificate we form the name from Subject, SCM ID and - // Cluster ID. - String dnName = String.format(getNameFormat(), subject, scmID, clusterID); - X500Name name = new X500Name(dnName); - byte[] encoded = key.getPublic().getEncoded(); - SubjectPublicKeyInfo publicKeyInfo = - SubjectPublicKeyInfo.getInstance(encoded); - - - ContentSigner contentSigner = - new JcaContentSignerBuilder(config.getSignatureAlgo()) - .setProvider(config.getProvider()).build(key.getPrivate()); - - // Please note: Since this is a root certificate we use "ONE" as the - // serial number. Also note that skip enforcing locale or UTC. We are - // trying to operate at the Days level, hence Time zone is also skipped for - // now. - BigInteger serial = BigInteger.ONE; - if (!isCA) { - serial = new BigInteger(Long.toString(Time.monotonicNow())); - } - - ZoneOffset zoneOffset = - beginDate.atStartOfDay(ZoneOffset.systemDefault()).getOffset(); - - // Valid from the Start of the day when we generate this Certificate. - Date validFrom = - Date.from(beginDate.atTime(LocalTime.MIN).toInstant(zoneOffset)); - - // Valid till end day finishes. - Date validTill = - Date.from(endDate.atTime(LocalTime.MAX).toInstant(zoneOffset)); - - X509v3CertificateBuilder builder = new X509v3CertificateBuilder(name, - serial, validFrom, validTill, name, publicKeyInfo); - - if (isCA) { - builder.addExtension(Extension.basicConstraints, true, - new BasicConstraints(true)); - int keyUsageFlag = KeyUsage.keyCertSign | KeyUsage.cRLSign; - KeyUsage keyUsage = new KeyUsage(keyUsageFlag); - builder.addExtension(Extension.keyUsage, false, - new DEROctetString(keyUsage)); - } - return builder.build(contentSigner); - } - - /** - * Builder class for Root Certificates. - */ - public static class Builder { - private String subject; - private String clusterID; - private String scmID; - private LocalDate beginDate; - private LocalDate endDate; - private KeyPair key; - private SecurityConfig config; - private boolean isCA; - - public Builder setConfiguration(Configuration configuration) { - this.config = new SecurityConfig(configuration); - return this; - } - - public Builder setKey(KeyPair keyPair) { - this.key = keyPair; - return this; - } - - public Builder setSubject(String subjectString) { - this.subject = subjectString; - return this; - } - - public Builder setClusterID(String s) { - this.clusterID = s; - return this; - } - - public Builder setScmID(String s) { - this.scmID = s; - return this; - } - - public Builder setBeginDate(LocalDate date) { - this.beginDate = date; - return this; - } - - public Builder setEndDate(LocalDate date) { - this.endDate = date; - return this; - } - - public Builder makeCA() { - isCA = true; - return this; - } - - public X509CertificateHolder build() - throws SCMSecurityException, IOException { - Preconditions.checkNotNull(key, "Key cannot be null"); - Preconditions.checkArgument(Strings.isNotBlank(subject), "Subject " + - "cannot be blank"); - Preconditions.checkArgument(Strings.isNotBlank(clusterID), "Cluster ID " + - "cannot be blank"); - Preconditions.checkArgument(Strings.isNotBlank(scmID), "SCM ID cannot " + - "be blank"); - - Preconditions.checkArgument(beginDate.isBefore(endDate), "Certificate " + - "begin date should be before end date"); - - // We just read the beginDate and EndDate as Start of the Day and - // confirm that we do not violate the maxDuration Config. - Duration certDuration = Duration.between(beginDate.atStartOfDay(), - endDate.atStartOfDay()); - Duration maxDuration = config.getMaxCertificateDuration(); - if (certDuration.compareTo(maxDuration) > 0) { - throw new SCMSecurityException("The cert duration violates the " + - "maximum configured value. Please check the hdds.x509.max" + - ".duration config key. Current Value: " + certDuration + - " config: " + maxDuration); - } - - SelfSignedCertificate rootCertificate = - new SelfSignedCertificate(this.subject, - this.scmID, this.clusterID, this.beginDate, this.endDate, - this.config, key); - try { - return rootCertificate.generateCertificate(isCA); - } catch (OperatorCreationException | CertIOException e) { - throw new CertificateException("Unable to create root certificate.", - e.getCause()); - } - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/package-info.java deleted file mode 100644 index e7110e31251..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - Helpers for Certificates. - */ -package org.apache.hadoop.hdds.security.x509.certificates.utils; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/CertificateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/CertificateException.java deleted file mode 100644 index b3121283b18..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/CertificateException.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.exceptions; - -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; - -/** - * Certificate Exceptions from the SCM Security layer. - */ -public class CertificateException extends SCMSecurityException { - - private ErrorCode errorCode; - /** - * Ctor. - * @param message - Error Message. - */ - public CertificateException(String message) { - super(message); - } - - /** - * Ctor. - * @param message - Message. - * @param cause - Actual cause. - */ - public CertificateException(String message, Throwable cause) { - super(message, cause); - } - - /** - * Ctor. - * @param message - Message. - * @param cause - Actual cause. - * @param errorCode - */ - public CertificateException(String message, Throwable cause, - ErrorCode errorCode) { - super(message, cause); - this.errorCode = errorCode; - } - - /** - * Ctor. - * @param message - Message. - * @param errorCode - */ - public CertificateException(String message, ErrorCode errorCode) { - super(message); - this.errorCode = errorCode; - } - - /** - * Ctor. - * @param cause - Base Exception. - */ - public CertificateException(Throwable cause) { - super(cause); - } - - /** - * Error codes to make it easy to decode these exceptions. - */ - public enum ErrorCode { - KEYSTORE_ERROR, - CRYPTO_SIGN_ERROR, - CERTIFICATE_ERROR, - BOOTSTRAP_ERROR, - CSR_ERROR, - CRYPTO_SIGNATURE_VERIFICATION_ERROR, - CERTIFICATE_NOT_FOUND_ERROR - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/package-info.java deleted file mode 100644 index afcc474ad11..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/exceptions/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Exceptions thrown by X.509 security classes. - */ -package org.apache.hadoop.hdds.security.x509.exceptions; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java deleted file mode 100644 index 640f5ca0b94..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.keys; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.security.KeyPair; -import java.security.KeyPairGenerator; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; - -/** - * A class to generate Key Pair for use with Certificates. - */ -public class HDDSKeyGenerator { - private static final Logger LOG = - LoggerFactory.getLogger(HDDSKeyGenerator.class); - private final SecurityConfig securityConfig; - - /** - * Constructor for HDDSKeyGenerator. - * - * @param configuration - config - */ - public HDDSKeyGenerator(Configuration configuration) { - this.securityConfig = new SecurityConfig(configuration); - } - - /** - * Constructor that takes a SecurityConfig as the Argument. - * - * @param config - SecurityConfig - */ - public HDDSKeyGenerator(SecurityConfig config) { - this.securityConfig = config; - } - - /** - * Returns the Security config used for this object. - * - * @return SecurityConfig - */ - public SecurityConfig getSecurityConfig() { - return securityConfig; - } - - /** - * Use Config to generate key. - * - * @return KeyPair - * @throws NoSuchProviderException - On Error, due to missing Java - * dependencies. - * @throws NoSuchAlgorithmException - On Error, due to missing Java - * dependencies. - */ - public KeyPair generateKey() throws NoSuchProviderException, - NoSuchAlgorithmException { - return generateKey(securityConfig.getSize(), - securityConfig.getKeyAlgo(), securityConfig.getProvider()); - } - - /** - * Specify the size -- all other parameters are used from config. - * - * @param size - int, valid key sizes. - * @return KeyPair - * @throws NoSuchProviderException - On Error, due to missing Java - * dependencies. - * @throws NoSuchAlgorithmException - On Error, due to missing Java - * dependencies. - */ - public KeyPair generateKey(int size) throws - NoSuchProviderException, NoSuchAlgorithmException { - return generateKey(size, - securityConfig.getKeyAlgo(), securityConfig.getProvider()); - } - - /** - * Custom Key Generation, all values are user provided. - * - * @param size - Key Size - * @param algorithm - Algorithm to use - * @param provider - Security provider. - * @return KeyPair. - * @throws NoSuchProviderException - On Error, due to missing Java - * dependencies. - * @throws NoSuchAlgorithmException - On Error, due to missing Java - * dependencies. - */ - public KeyPair generateKey(int size, String algorithm, String provider) - throws NoSuchProviderException, NoSuchAlgorithmException { - if (LOG.isDebugEnabled()) { - LOG.debug("Generating key pair using size:{}, Algorithm:{}, Provider:{}", - size, algorithm, provider); - } - KeyPairGenerator generator = KeyPairGenerator - .getInstance(algorithm, provider); - generator.initialize(size); - return generator.generateKeyPair(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java deleted file mode 100644 index 82873b06c71..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java +++ /dev/null @@ -1,398 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.keys; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.output.FileWriterWithEncoding; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.bouncycastle.util.io.pem.PemObject; -import org.bouncycastle.util.io.pem.PemReader; -import org.bouncycastle.util.io.pem.PemWriter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.io.StringReader; -import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; -import java.nio.file.FileSystems; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.attribute.PosixFilePermission; -import java.security.KeyFactory; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.spec.InvalidKeySpecException; -import java.security.spec.PKCS8EncodedKeySpec; -import java.security.spec.X509EncodedKeySpec; -import java.util.Set; -import java.util.function.Supplier; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE; -import static java.nio.file.attribute.PosixFilePermission.OWNER_READ; -import static java.nio.file.attribute.PosixFilePermission.OWNER_WRITE; - -/** - * We store all Key material in good old PEM files. This helps in avoiding - * dealing will persistent Java KeyStore issues. Also when debugging, general - * tools like OpenSSL can be used to read and decode these files. - */ -public class KeyCodec { - public final static String PRIVATE_KEY = "PRIVATE KEY"; - public final static String PUBLIC_KEY = "PUBLIC KEY"; - public final static Charset DEFAULT_CHARSET = StandardCharsets.UTF_8; - private final static Logger LOG = - LoggerFactory.getLogger(KeyCodec.class); - private final Path location; - private final SecurityConfig securityConfig; - private Set permissionSet = - Stream.of(OWNER_READ, OWNER_WRITE, OWNER_EXECUTE) - .collect(Collectors.toSet()); - private Supplier isPosixFileSystem; - - /** - * Creates a KeyCodec with component name. - * - * @param config - Security Config. - * @param component - Component String. - */ - public KeyCodec(SecurityConfig config, String component) { - this.securityConfig = config; - isPosixFileSystem = KeyCodec::isPosix; - this.location = securityConfig.getKeyLocation(component); - } - - /** - * Checks if File System supports posix style security permissions. - * - * @return True if it supports posix. - */ - private static Boolean isPosix() { - return FileSystems.getDefault().supportedFileAttributeViews() - .contains("posix"); - } - - /** - * Returns the Permission set. - * - * @return Set - */ - @VisibleForTesting - public Set getPermissionSet() { - return permissionSet; - } - - /** - * Returns the Security config used for this object. - * - * @return SecurityConfig - */ - public SecurityConfig getSecurityConfig() { - return securityConfig; - } - - /** - * This function is used only for testing. - * - * @param isPosixFileSystem - Sets a boolean function for mimicking files - * systems that are not posix. - */ - @VisibleForTesting - public void setIsPosixFileSystem(Supplier isPosixFileSystem) { - this.isPosixFileSystem = isPosixFileSystem; - } - - /** - * Writes a given key using the default config options. - * - * @param keyPair - Key Pair to write to file. - * @throws IOException - On I/O failure. - */ - public void writeKey(KeyPair keyPair) throws IOException { - writeKey(location, keyPair, securityConfig.getPrivateKeyFileName(), - securityConfig.getPublicKeyFileName(), false); - } - - /** - * Writes a given private key using the default config options. - * - * @param key - Key to write to file. - * @throws IOException - On I/O failure. - */ - public void writePrivateKey(PrivateKey key) throws IOException { - File privateKeyFile = - Paths.get(location.toString(), - securityConfig.getPrivateKeyFileName()).toFile(); - - if (Files.exists(privateKeyFile.toPath())) { - throw new IOException("Private key already exist."); - } - - try (PemWriter privateKeyWriter = new PemWriter(new - FileWriterWithEncoding(privateKeyFile, DEFAULT_CHARSET))) { - privateKeyWriter.writeObject( - new PemObject(PRIVATE_KEY, key.getEncoded())); - } - Files.setPosixFilePermissions(privateKeyFile.toPath(), permissionSet); - } - - /** - * Writes a given public key using the default config options. - * - * @param key - Key to write to file. - * @throws IOException - On I/O failure. - */ - public void writePublicKey(PublicKey key) throws IOException { - File publicKeyFile = Paths.get(location.toString(), - securityConfig.getPublicKeyFileName()).toFile(); - - if (Files.exists(publicKeyFile.toPath())) { - throw new IOException("Private key already exist."); - } - - try (PemWriter keyWriter = new PemWriter(new - FileWriterWithEncoding(publicKeyFile, DEFAULT_CHARSET))) { - keyWriter.writeObject( - new PemObject(PUBLIC_KEY, key.getEncoded())); - } - Files.setPosixFilePermissions(publicKeyFile.toPath(), permissionSet); - } - - /** - * Writes a given key using default config options. - * - * @param keyPair - Key pair to write - * @param overwrite - Overwrites the keys if they already exist. - * @throws IOException - On I/O failure. - */ - public void writeKey(KeyPair keyPair, boolean overwrite) throws IOException { - writeKey(location, keyPair, securityConfig.getPrivateKeyFileName(), - securityConfig.getPublicKeyFileName(), overwrite); - } - - /** - * Writes a given key using default config options. - * - * @param basePath - The location to write to, override the config values. - * @param keyPair - Key pair to write - * @param overwrite - Overwrites the keys if they already exist. - * @throws IOException - On I/O failure. - */ - public void writeKey(Path basePath, KeyPair keyPair, boolean overwrite) - throws IOException { - writeKey(basePath, keyPair, securityConfig.getPrivateKeyFileName(), - securityConfig.getPublicKeyFileName(), overwrite); - } - - /** - * Reads a Private Key from the PEM Encoded Store. - * - * @param basePath - Base Path, Directory where the Key is stored. - * @param keyFileName - File Name of the private key - * @return PrivateKey Object. - * @throws IOException - on Error. - */ - private PKCS8EncodedKeySpec readKey(Path basePath, String keyFileName) - throws IOException { - File fileName = Paths.get(basePath.toString(), keyFileName).toFile(); - String keyData = FileUtils.readFileToString(fileName, DEFAULT_CHARSET); - final byte[] pemContent; - try (PemReader pemReader = new PemReader(new StringReader(keyData))) { - PemObject keyObject = pemReader.readPemObject(); - pemContent = keyObject.getContent(); - } - return new PKCS8EncodedKeySpec(pemContent); - } - - /** - * Returns a Private Key from a PEM encoded file. - * - * @param basePath - base path - * @param privateKeyFileName - private key file name. - * @return PrivateKey - * @throws InvalidKeySpecException - on Error. - * @throws NoSuchAlgorithmException - on Error. - * @throws IOException - on Error. - */ - public PrivateKey readPrivateKey(Path basePath, String privateKeyFileName) - throws InvalidKeySpecException, NoSuchAlgorithmException, IOException { - PKCS8EncodedKeySpec encodedKeySpec = readKey(basePath, privateKeyFileName); - final KeyFactory keyFactory = - KeyFactory.getInstance(securityConfig.getKeyAlgo()); - return - keyFactory.generatePrivate(encodedKeySpec); - } - - /** - * Read the Public Key using defaults. - * @return PublicKey. - * @throws InvalidKeySpecException - On Error. - * @throws NoSuchAlgorithmException - On Error. - * @throws IOException - On Error. - */ - public PublicKey readPublicKey() throws InvalidKeySpecException, - NoSuchAlgorithmException, IOException { - return readPublicKey(this.location.toAbsolutePath(), - securityConfig.getPublicKeyFileName()); - } - - /** - * Returns a public key from a PEM encoded file. - * - * @param basePath - base path. - * @param publicKeyFileName - public key file name. - * @return PublicKey - * @throws NoSuchAlgorithmException - on Error. - * @throws InvalidKeySpecException - on Error. - * @throws IOException - on Error. - */ - public PublicKey readPublicKey(Path basePath, String publicKeyFileName) - throws NoSuchAlgorithmException, InvalidKeySpecException, IOException { - PKCS8EncodedKeySpec encodedKeySpec = readKey(basePath, publicKeyFileName); - final KeyFactory keyFactory = - KeyFactory.getInstance(securityConfig.getKeyAlgo()); - return - keyFactory.generatePublic( - new X509EncodedKeySpec(encodedKeySpec.getEncoded())); - - } - - - /** - * Returns the private key using defaults. - * @return PrivateKey. - * @throws InvalidKeySpecException - On Error. - * @throws NoSuchAlgorithmException - On Error. - * @throws IOException - On Error. - */ - public PrivateKey readPrivateKey() throws InvalidKeySpecException, - NoSuchAlgorithmException, IOException { - return readPrivateKey(this.location.toAbsolutePath(), - securityConfig.getPrivateKeyFileName()); - } - - - /** - * Helper function that actually writes data to the files. - * - * @param basePath - base path to write key - * @param keyPair - Key pair to write to file. - * @param privateKeyFileName - private key file name. - * @param publicKeyFileName - public key file name. - * @param force - forces overwriting the keys. - * @throws IOException - On I/O failure. - */ - private synchronized void writeKey(Path basePath, KeyPair keyPair, - String privateKeyFileName, String publicKeyFileName, boolean force) - throws IOException { - checkPreconditions(basePath); - - File privateKeyFile = - Paths.get(location.toString(), privateKeyFileName).toFile(); - File publicKeyFile = - Paths.get(location.toString(), publicKeyFileName).toFile(); - checkKeyFile(privateKeyFile, force, publicKeyFile); - - try (PemWriter privateKeyWriter = new PemWriter(new - FileWriterWithEncoding(privateKeyFile, DEFAULT_CHARSET))) { - privateKeyWriter.writeObject( - new PemObject(PRIVATE_KEY, keyPair.getPrivate().getEncoded())); - } - - try (PemWriter publicKeyWriter = new PemWriter(new - FileWriterWithEncoding(publicKeyFile, DEFAULT_CHARSET))) { - publicKeyWriter.writeObject( - new PemObject(PUBLIC_KEY, keyPair.getPublic().getEncoded())); - } - Files.setPosixFilePermissions(privateKeyFile.toPath(), permissionSet); - Files.setPosixFilePermissions(publicKeyFile.toPath(), permissionSet); - } - - /** - * Checks if private and public key file already exists. Throws IOException if - * file exists and force flag is set to false, else will delete the existing - * file. - * - * @param privateKeyFile - Private key file. - * @param force - forces overwriting the keys. - * @param publicKeyFile - public key file. - * @throws IOException - On I/O failure. - */ - private void checkKeyFile(File privateKeyFile, boolean force, - File publicKeyFile) throws IOException { - if (privateKeyFile.exists() && force) { - if (!privateKeyFile.delete()) { - throw new IOException("Unable to delete private key file."); - } - } - - if (publicKeyFile.exists() && force) { - if (!publicKeyFile.delete()) { - throw new IOException("Unable to delete public key file."); - } - } - - if (privateKeyFile.exists()) { - throw new IOException("Private Key file already exists."); - } - - if (publicKeyFile.exists()) { - throw new IOException("Public Key file already exists."); - } - } - - /** - * Checks if base path exists and sets file permissions. - * - * @param basePath - base path to write key - * @throws IOException - On I/O failure. - */ - private void checkPreconditions(Path basePath) throws IOException { - Preconditions.checkNotNull(basePath, "Base path cannot be null"); - if (!isPosixFileSystem.get()) { - LOG.error("Keys cannot be stored securely without POSIX file system " - + "support for now."); - throw new IOException("Unsupported File System for pem file."); - } - - if (Files.exists(basePath)) { - // Not the end of the world if we reset the permissions on an existing - // directory. - Files.setPosixFilePermissions(basePath, permissionSet); - } else { - boolean success = basePath.toFile().mkdirs(); - if (!success) { - LOG.error("Unable to create the directory for the " - + "location. Location: {}", basePath); - throw new IOException("Unable to create the directory for the " - + "location. Location:" + basePath); - } - Files.setPosixFilePermissions(basePath, permissionSet); - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java deleted file mode 100644 index 6147d3a9901..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.keys; - -import java.security.KeyFactory; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.spec.InvalidKeySpecException; -import java.security.spec.PKCS8EncodedKeySpec; -import java.security.spec.X509EncodedKeySpec; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.bouncycastle.asn1.ASN1ObjectIdentifier; -import org.bouncycastle.asn1.ASN1Sequence; -import org.bouncycastle.asn1.ASN1Set; -import org.bouncycastle.asn1.pkcs.Attribute; -import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; -import org.bouncycastle.asn1.x500.X500Name; -import org.bouncycastle.asn1.x509.Extensions; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; - -/** - * Utility functions for Security modules for Ozone. - */ -public final class SecurityUtil { - - // Ozone Certificate distinguished format: (CN=Subject,OU=ScmID,O=ClusterID). - private static final String DISTINGUISHED_NAME_FORMAT = "CN=%s,OU=%s,O=%s"; - - private SecurityUtil() { - } - - public static String getDistinguishedNameFormat() { - return DISTINGUISHED_NAME_FORMAT; - } - - public static X500Name getDistinguishedName(String subject, String scmID, - String clusterID) { - return new X500Name(String.format(getDistinguishedNameFormat(), subject, - scmID, clusterID)); - } - - // TODO: move the PKCS10CSRValidator class - public static Extensions getPkcs9Extensions(PKCS10CertificationRequest csr) - throws CertificateException { - ASN1Set pkcs9ExtReq = getPkcs9ExtRequest(csr); - Object extReqElement = pkcs9ExtReq.getObjects().nextElement(); - if (extReqElement instanceof Extensions) { - return (Extensions) extReqElement; - } else { - if (extReqElement instanceof ASN1Sequence) { - return Extensions.getInstance((ASN1Sequence) extReqElement); - } else { - throw new CertificateException("Unknown element type :" + extReqElement - .getClass().getSimpleName()); - } - } - } - - public static ASN1Set getPkcs9ExtRequest(PKCS10CertificationRequest csr) - throws CertificateException { - for (Attribute attr : csr.getAttributes()) { - ASN1ObjectIdentifier oid = attr.getAttrType(); - if (oid.equals(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest)) { - return attr.getAttrValues(); - } - } - throw new CertificateException("No PKCS#9 extension found in CSR"); - } - - /* - * Returns private key created from encoded key. - * @return private key if successful else returns null. - */ - public static PrivateKey getPrivateKey(byte[] encodedKey, - SecurityConfig secureConfig) { - PrivateKey pvtKey = null; - if (encodedKey == null || encodedKey.length == 0) { - return null; - } - - try { - KeyFactory kf = null; - - kf = KeyFactory.getInstance(secureConfig.getKeyAlgo(), - secureConfig.getProvider()); - pvtKey = kf.generatePrivate(new PKCS8EncodedKeySpec(encodedKey)); - - } catch (NoSuchAlgorithmException | InvalidKeySpecException | - NoSuchProviderException e) { - return null; - } - return pvtKey; - } - - /* - * Returns public key created from encoded key. - * @return public key if successful else returns null. - */ - public static PublicKey getPublicKey(byte[] encodedKey, - SecurityConfig secureConfig) { - PublicKey key = null; - if (encodedKey == null || encodedKey.length == 0) { - return null; - } - - try { - KeyFactory kf = null; - kf = KeyFactory.getInstance(secureConfig.getKeyAlgo(), - secureConfig.getProvider()); - key = kf.generatePublic(new X509EncodedKeySpec(encodedKey)); - - } catch (NoSuchAlgorithmException | InvalidKeySpecException | - NoSuchProviderException e) { - return null; - } - return key; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java deleted file mode 100644 index 37a04d6c084..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Utils for private and public keys. - */ -package org.apache.hadoop.hdds.security.x509.keys; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java deleted file mode 100644 index a6369c68308..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - - -/** - * This package contains common routines used in creating an x509 based identity - * framework for HDDS. - */ -package org.apache.hadoop.hdds.security.x509; -/* - -Architecture of Certificate Infrastructure for SCM. -==================================================== - -The certificate infrastructure has two main parts, the certificate server or -the Certificate authority and the clients who want certificates. The CA is -responsible for issuing certificates to participating entities. - -To issue a certificate the CA has to verify the identity and the assertions -in the certificate. The client starts off making a request to CA for a -certificate. This request is called Certificate Signing Request or CSR -(PKCS#10). - -When a CSR arrives on the CA, CA will decode the CSR and verify that all the -fields in the CSR are in line with what the system expects. Since there are -lots of possible ways to construct an X.509 certificate, we rely on PKI -profiles. - -Generally, PKI profiles are policy documents or general guidelines that get -followed by the requester and CA. However, most of the PKI profiles that are -commonly available are general purpose and offers too much surface area. - -SCM CA infrastructure supports the notion of a PKI profile class which can -codify the RDNs, Extensions and other certificate policies. The CA when -issuing a certificate will invoke a certificate approver class, based on the -authentication method used. For example, out of the box, we support manual, -Kerberos, trusted network and testing authentication mechanisms. - -If there is no authentication mechanism in place, then when CA receives the -CSR, it runs the standard PKI profile over it verify that all the fields are -in expected ranges. Once that is done, The signing request is sent for human -review and approval. This form of certificate approval is called Manual, Of -all the certificate approval process this is the ** most secure **. This -approval needs to be done once for each data node. - -For existing clusters, where data nodes already have a Kerberos keytab, we -can leverage the Kerberos identity mechanism to identify the data node that -is requesting the certificate. In this case, users can configure the system -to leverage Kerberos while issuing certificates and SCM CA will be able to -verify the data nodes identity and issue certificates automatically. - -In environments like Kubernetes, we can leverage the base system services to -pass on a shared secret securely. In this model also, we can rely on these -secrets to make sure that is the right data node that is talking to us. This -kind of approval is called a Trusted network approval. In this process, each -data node not only sends the CSR but signs the request with a shared secret -with SCM. SCM then can issue a certificate without the intervention of a -human administrator. - -The last, TESTING method which never should be used other than in development - and testing clusters, is merely a mechanism to bypass all identity checks. If -this flag is setup, then CA will issue a CSR if the base approves all fields. - - * Please do not use this mechanism(TESTING) for any purpose other than - * testing. - -CA - Certificate Approval and Code Layout (as of Dec, 1st, 2018) -================================================================= -The CA implementation ( as of now it is called DefaultCA) receives a CSR from - the network layer. The network also tells the system what approver type to - use, that is if Kerberos or Shared secrets mechanism is used, it reports - that to Default CA. - -The default CA instantiates the approver based on the type of the approver -indicated by the network layer. This approver creates an instance of the PKI -profile and passes each field from the certificate signing request. The PKI -profile (as of today Dec 1st, 2018, we have one profile called Ozone profile) - verifies that each field in the CSR meets the approved set of values. - -Once the PKI Profile validates the request, it is either auto approved or -queued for manual review. - - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcClientInterceptor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcClientInterceptor.java deleted file mode 100644 index 58270baabcc..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcClientInterceptor.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.tracing; - -import org.apache.ratis.thirdparty.io.grpc.CallOptions; -import org.apache.ratis.thirdparty.io.grpc.Channel; -import org.apache.ratis.thirdparty.io.grpc.ClientCall; -import org.apache.ratis.thirdparty.io.grpc.ClientInterceptor; -import org.apache.ratis.thirdparty.io.grpc.ForwardingClientCall.SimpleForwardingClientCall; -import org.apache.ratis.thirdparty.io.grpc.Metadata; -import org.apache.ratis.thirdparty.io.grpc.Metadata.Key; -import org.apache.ratis.thirdparty.io.grpc.MethodDescriptor; - -/** - * Interceptor to add the tracing id to the outgoing call header. - */ -public class GrpcClientInterceptor implements ClientInterceptor { - - public static final Key TRACING_HEADER = - Key.of("Tracing", Metadata.ASCII_STRING_MARSHALLER); - - @Override - public ClientCall interceptCall( - MethodDescriptor method, CallOptions callOptions, - Channel next) { - - return new SimpleForwardingClientCall( - next.newCall(method, callOptions)) { - - @Override - public void start(Listener responseListener, Metadata headers) { - - Metadata tracingHeaders = new Metadata(); - tracingHeaders.put(TRACING_HEADER, TracingUtil.exportCurrentSpan()); - - headers.merge(tracingHeaders); - - super.start(responseListener, headers); - } - }; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java deleted file mode 100644 index b63af12b3fa..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/GrpcServerInterceptor.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.tracing; - -import io.opentracing.Scope; -import org.apache.ratis.thirdparty.io.grpc.ForwardingServerCallListener.SimpleForwardingServerCallListener; -import org.apache.ratis.thirdparty.io.grpc.Metadata; -import org.apache.ratis.thirdparty.io.grpc.ServerCall; -import org.apache.ratis.thirdparty.io.grpc.ServerCall.Listener; -import org.apache.ratis.thirdparty.io.grpc.ServerCallHandler; -import org.apache.ratis.thirdparty.io.grpc.ServerInterceptor; - -/** - * Interceptor to add the tracing id to the outgoing call header. - */ -public class GrpcServerInterceptor implements ServerInterceptor { - - @Override - public Listener interceptCall( - ServerCall call, Metadata headers, - ServerCallHandler next) { - - return new SimpleForwardingServerCallListener( - next.startCall(call, headers)) { - @Override - public void onMessage(ReqT message) { - try (Scope scope = TracingUtil - .importAndCreateScope( - call.getMethodDescriptor().getFullMethodName(), - headers.get(GrpcClientInterceptor.TRACING_HEADER))) { - super.onMessage(message); - } - } - }; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java deleted file mode 100644 index 56d59ea6f1a..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.tracing; - -import java.math.BigInteger; - -import io.jaegertracing.internal.JaegerSpanContext; -import io.jaegertracing.internal.exceptions.EmptyTracerStateStringException; -import io.jaegertracing.internal.exceptions.MalformedTracerStateStringException; -import io.jaegertracing.internal.exceptions.TraceIdOutOfBoundException; -import io.jaegertracing.spi.Codec; -import io.opentracing.propagation.Format; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A jaeger codec to save the current tracing context as a string. - */ -public class StringCodec implements Codec { - - public static final Logger LOG = LoggerFactory.getLogger(StringCodec.class); - public static final StringFormat FORMAT = new StringFormat(); - - @Override - public JaegerSpanContext extract(StringBuilder s) { - if (s == null) { - throw new EmptyTracerStateStringException(); - } - String value = s.toString(); - if (value != null && !value.equals("")) { - String[] parts = value.split(":"); - if (parts.length != 4) { - if (LOG.isDebugEnabled()) { - LOG.debug("MalformedTracerStateString: {}", value); - } - throw new MalformedTracerStateStringException(value); - } else { - String traceId = parts[0]; - if (traceId.length() <= 32 && traceId.length() >= 1) { - return new JaegerSpanContext(high(traceId), - (new BigInteger(traceId, 16)).longValue(), - (new BigInteger(parts[1], 16)).longValue(), - (new BigInteger(parts[2], 16)).longValue(), - (new BigInteger(parts[3], 16)).byteValue()); - } else { - throw new TraceIdOutOfBoundException( - "Trace id [" + traceId + "] length is not withing 1 and 32"); - } - } - } else { - throw new EmptyTracerStateStringException(); - } - } - - @Override - public void inject(JaegerSpanContext context, - StringBuilder string) { - int intFlag = context.getFlags() & 255; - string.append( - context.getTraceId() + ":" + Long.toHexString(context.getSpanId()) - + ":" + Long.toHexString(context.getParentId()) + ":" + Integer - .toHexString(intFlag)); - } - - private static long high(String hexString) { - if (hexString.length() > 16) { - int highLength = hexString.length() - 16; - String highString = hexString.substring(0, highLength); - return (new BigInteger(highString, 16)).longValue(); - } else { - return 0L; - } - } - - /** - * The format to save the context as text. - *

- * Using the mutable StringBuilder instead of plain String. - */ - public static final class StringFormat implements Format { - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java deleted file mode 100644 index 8bdf638acfc..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TraceAllMethod.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.tracing; - -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.Method; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; -import java.util.Map.Entry; - -import io.opentracing.Scope; -import io.opentracing.util.GlobalTracer; - -/** - * A Java proxy invocation handler to trace all the methods of the delegate - * class. - * - * @param - */ -public class TraceAllMethod implements InvocationHandler { - - /** - * Cache for all the method objects of the delegate class. - */ - private final Map[], Method>> methods = new HashMap<>(); - - private T delegate; - - private String name; - - public TraceAllMethod(T delegate, String name) { - this.delegate = delegate; - this.name = name; - for (Method method : delegate.getClass().getDeclaredMethods()) { - if (!methods.containsKey(method.getName())) { - methods.put(method.getName(), new HashMap<>()); - } - methods.get(method.getName()).put(method.getParameterTypes(), method); - } - } - - @Override - public Object invoke(Object proxy, Method method, Object[] args) - throws Throwable { - Method delegateMethod = findDelegatedMethod(method); - try (Scope scope = GlobalTracer.get().buildSpan( - name + "." + method.getName()) - .startActive(true)) { - try { - return delegateMethod.invoke(delegate, args); - } catch (Exception ex) { - if (ex.getCause() != null) { - throw ex.getCause(); - } else { - throw ex; - } - } - } - } - - private Method findDelegatedMethod(Method method) { - for (Entry[], Method> entry : methods.get(method.getName()) - .entrySet()) { - if (Arrays.equals(entry.getKey(), method.getParameterTypes())) { - return entry.getValue(); - } - } - return null; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java deleted file mode 100644 index 8e82a375abb..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.tracing; - -import java.lang.reflect.Proxy; - -import io.jaegertracing.Configuration; -import io.jaegertracing.internal.JaegerTracer; -import io.opentracing.Scope; -import io.opentracing.Span; -import io.opentracing.SpanContext; -import io.opentracing.Tracer; -import io.opentracing.util.GlobalTracer; - -import org.apache.hadoop.hdds.scm.ScmConfigKeys; - -/** - * Utility class to collect all the tracing helper methods. - */ -public final class TracingUtil { - - private static final String NULL_SPAN_AS_STRING = ""; - - private TracingUtil() { - } - - /** - * Initialize the tracing with the given service name. - * - * @param serviceName - */ - public static void initTracing(String serviceName) { - if (!GlobalTracer.isRegistered()) { - Configuration config = Configuration.fromEnv(serviceName); - JaegerTracer tracer = config.getTracerBuilder() - .registerExtractor(StringCodec.FORMAT, new StringCodec()) - .registerInjector(StringCodec.FORMAT, new StringCodec()) - .build(); - GlobalTracer.register(tracer); - } - } - - /** - * Export the active tracing span as a string. - * - * @return encoded tracing context. - */ - public static String exportCurrentSpan() { - if (GlobalTracer.get().activeSpan() != null) { - StringBuilder builder = new StringBuilder(); - GlobalTracer.get().inject(GlobalTracer.get().activeSpan().context(), - StringCodec.FORMAT, builder); - return builder.toString(); - } - return NULL_SPAN_AS_STRING; - } - - /** - * Export the specific span as a string. - * - * @return encoded tracing context. - */ - public static String exportSpan(Span span) { - if (span != null) { - StringBuilder builder = new StringBuilder(); - GlobalTracer.get().inject(span.context(), StringCodec.FORMAT, builder); - return builder.toString(); - } - return NULL_SPAN_AS_STRING; - } - - /** - * Create a new scope and use the imported span as the parent. - * - * @param name name of the newly created scope - * @param encodedParent Encoded parent span (could be null or empty) - * - * @return OpenTracing scope. - */ - public static Scope importAndCreateScope(String name, String encodedParent) { - Tracer.SpanBuilder spanBuilder; - Tracer tracer = GlobalTracer.get(); - SpanContext parentSpan = null; - if (encodedParent != null && encodedParent.length() > 0) { - StringBuilder builder = new StringBuilder(); - builder.append(encodedParent); - parentSpan = tracer.extract(StringCodec.FORMAT, builder); - - } - - if (parentSpan == null) { - spanBuilder = tracer.buildSpan(name); - } else { - spanBuilder = - tracer.buildSpan(name).asChildOf(parentSpan); - } - return spanBuilder.startActive(true); - } - - /** - * Creates a proxy of the implementation and trace all the method calls. - * - * @param delegate the original class instance - * @param interfce the interface which should be implemented by the proxy - * @param the type of the interface - * @param conf configuration - * - * @return A new interface which implements interfce but delegate all the - * calls to the delegate and also enables tracing. - */ - public static T createProxy(T delegate, Class interfce, - org.apache.hadoop.conf.Configuration conf) { - boolean isTracingEnabled = conf.getBoolean( - ScmConfigKeys.HDDS_TRACING_ENABLED, - ScmConfigKeys.HDDS_TRACING_ENABLED_DEFAULT); - if (!isTracingEnabled) { - return delegate; - } - Class aClass = delegate.getClass(); - return (T) Proxy.newProxyInstance(aClass.getClassLoader(), - new Class[] {interfce}, - new TraceAllMethod(delegate, interfce.getSimpleName())); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/package-info.java deleted file mode 100644 index 3ead03b6f6f..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.tracing; - -/** - * Helper classes to use distributed tracing in Ozone components. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java deleted file mode 100644 index ca8d87053f7..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java +++ /dev/null @@ -1,164 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.utils; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Lists; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.concurrent.CompletionService; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorCompletionService; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * An abstract class for a background service in ozone. - * A background service schedules multiple child tasks in parallel - * in a certain period. In each interval, it waits until all the tasks - * finish execution and then schedule next interval. - */ -public abstract class BackgroundService { - - @VisibleForTesting - public static final Logger LOG = - LoggerFactory.getLogger(BackgroundService.class); - - // Executor to launch child tasks - private final ScheduledExecutorService exec; - private final ThreadGroup threadGroup; - private final ThreadFactory threadFactory; - private final String serviceName; - private final long interval; - private final long serviceTimeout; - private final TimeUnit unit; - private final PeriodicalTask service; - - public BackgroundService(String serviceName, long interval, - TimeUnit unit, int threadPoolSize, long serviceTimeout) { - this.interval = interval; - this.unit = unit; - this.serviceName = serviceName; - this.serviceTimeout = serviceTimeout; - threadGroup = new ThreadGroup(serviceName); - ThreadFactory tf = r -> new Thread(threadGroup, r); - threadFactory = new ThreadFactoryBuilder() - .setThreadFactory(tf) - .setDaemon(true) - .setNameFormat(serviceName + "#%d") - .build(); - exec = Executors.newScheduledThreadPool(threadPoolSize, threadFactory); - service = new PeriodicalTask(); - } - - protected ExecutorService getExecutorService() { - return this.exec; - } - - @VisibleForTesting - public int getThreadCount() { - return threadGroup.activeCount(); - } - - @VisibleForTesting - public void triggerBackgroundTaskForTesting() { - service.run(); - } - - // start service - public void start() { - exec.scheduleWithFixedDelay(service, 0, interval, unit); - } - - public abstract BackgroundTaskQueue getTasks(); - - /** - * Run one or more background tasks concurrently. - * Wait until all tasks to return the result. - */ - public class PeriodicalTask implements Runnable { - @Override - public synchronized void run() { - if (LOG.isDebugEnabled()) { - LOG.debug("Running background service : {}", serviceName); - } - BackgroundTaskQueue tasks = getTasks(); - if (tasks.isEmpty()) { - // No task found, or some problems to init tasks - // return and retry in next interval. - return; - } - if (LOG.isDebugEnabled()) { - LOG.debug("Number of background tasks to execute : {}", tasks.size()); - } - CompletionService taskCompletionService = - new ExecutorCompletionService<>(exec); - - List> results = Lists.newArrayList(); - while (tasks.size() > 0) { - BackgroundTask task = tasks.poll(); - Future result = - taskCompletionService.submit(task); - results.add(result); - } - - results.parallelStream().forEach(taskResultFuture -> { - try { - // Collect task results - BackgroundTaskResult result = serviceTimeout > 0 - ? taskResultFuture.get(serviceTimeout, unit) - : taskResultFuture.get(); - if (LOG.isDebugEnabled()) { - LOG.debug("task execution result size {}", result.getSize()); - } - } catch (InterruptedException | ExecutionException e) { - LOG.warn( - "Background task fails to execute, " - + "retrying in next interval", e); - } catch (TimeoutException e) { - LOG.warn("Background task executes timed out, " - + "retrying in next interval", e); - } - }); - } - } - - // shutdown and make sure all threads are properly released. - public void shutdown() { - LOG.info("Shutting down service {}", this.serviceName); - exec.shutdown(); - try { - if (!exec.awaitTermination(60, TimeUnit.SECONDS)) { - exec.shutdownNow(); - } - } catch (InterruptedException e) { - exec.shutdownNow(); - } - if (threadGroup.activeCount() == 0 && !threadGroup.isDestroyed()) { - threadGroup.destroy(); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java deleted file mode 100644 index d5ad2a394dd..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.utils; - -import java.util.concurrent.Callable; - -/** - * A task thread to run by {@link BackgroundService}. - */ -public interface BackgroundTask extends Callable { - - int getPriority(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskQueue.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskQueue.java deleted file mode 100644 index 005d14b8e3c..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskQueue.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.utils; - -import java.util.PriorityQueue; - -/** - * A priority queue that stores a number of {@link BackgroundTask}. - */ -public class BackgroundTaskQueue { - - private final PriorityQueue tasks; - - public BackgroundTaskQueue() { - tasks = new PriorityQueue<>((task1, task2) - -> task1.getPriority() - task2.getPriority()); - } - - /** - * @return the head task in this queue. - */ - public synchronized BackgroundTask poll() { - return tasks.poll(); - } - - /** - * Add a {@link BackgroundTask} to the queue, - * the task will be sorted by its priority. - * - * @param task - */ - public synchronized void add(BackgroundTask task) { - tasks.add(task); - } - - /** - * @return true if the queue contains no task, false otherwise. - */ - public synchronized boolean isEmpty() { - return tasks.isEmpty(); - } - - /** - * @return the size of the queue. - */ - public synchronized int size() { - return tasks.size(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskResult.java deleted file mode 100644 index be8032b06a3..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTaskResult.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.utils; - -/** - * Result of a {@link BackgroundTask}. - */ -public interface BackgroundTaskResult { - - /** - * Returns the size of entries included in this result. - */ - int getSize(); - - /** - * An empty task result implementation. - */ - class EmptyTaskResult implements BackgroundTaskResult { - - public static EmptyTaskResult newResult() { - return new EmptyTaskResult(); - } - - @Override - public int getSize() { - return 0; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BatchOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BatchOperation.java deleted file mode 100644 index 377c7f6a1a8..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BatchOperation.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import com.google.common.collect.Lists; - -import java.util.List; - -/** - * An utility class to store a batch of DB write operations. - */ -public class BatchOperation { - - /** - * Enum for write operations. - */ - public enum Operation { - DELETE, PUT - } - - private List operations = - Lists.newArrayList(); - - /** - * Add a PUT operation into the batch. - */ - public void put(byte[] key, byte[] value) { - operations.add(new SingleOperation(Operation.PUT, key, value)); - } - - /** - * Add a DELETE operation into the batch. - */ - public void delete(byte[] key) { - operations.add(new SingleOperation(Operation.DELETE, key, null)); - - } - - public List getOperations() { - return operations; - } - - /** - * A SingleOperation represents a PUT or DELETE operation - * and the data the operation needs to manipulates. - */ - public static class SingleOperation { - - private Operation opt; - private byte[] key; - private byte[] value; - - public SingleOperation(Operation opt, byte[] key, byte[] value) { - this.opt = opt; - if (key == null) { - throw new IllegalArgumentException("key cannot be null"); - } - this.key = key.clone(); - this.value = value == null ? null : value.clone(); - } - - public Operation getOpt() { - return opt; - } - - public byte[] getKey() { - return key.clone(); - } - - public byte[] getValue() { - return value == null ? null : value.clone(); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/EntryConsumer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/EntryConsumer.java deleted file mode 100644 index dc08c2bd63c..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/EntryConsumer.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import java.io.IOException; - -/** - * A consumer for metadata store key-value entries. - * Used by {@link MetadataStore} class. - */ -@FunctionalInterface -public interface EntryConsumer { - - /** - * Consumes a key and value and produces a boolean result. - * @param key key - * @param value value - * @return a boolean value produced by the consumer - * @throws IOException - */ - boolean consume(byte[] key, byte[] value) throws IOException; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java deleted file mode 100644 index 6a372d12371..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.util.ClassUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class returns build information about Hadoop components. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public final class HddsVersionInfo { - - private static final Logger LOG = LoggerFactory.getLogger( - HddsVersionInfo.class); - - public static final VersionInfo HDDS_VERSION_INFO = - new VersionInfo("hdds"); - - private HddsVersionInfo() {} - - public static void main(String[] args) { - System.out.println("Using HDDS " + HDDS_VERSION_INFO.getVersion()); - System.out.println( - "Source code repository " + HDDS_VERSION_INFO.getUrl() + " -r " + - HDDS_VERSION_INFO.getRevision()); - System.out.println("Compiled by " + HDDS_VERSION_INFO.getUser() + " on " - + HDDS_VERSION_INFO.getDate()); - System.out.println( - "Compiled with protoc " + HDDS_VERSION_INFO.getProtocVersion()); - System.out.println( - "From source with checksum " + HDDS_VERSION_INFO.getSrcChecksum()); - if (LOG.isDebugEnabled()) { - LOG.debug("This command was run using " + - ClassUtil.findContainingJar(HddsVersionInfo.class)); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java deleted file mode 100644 index 0598987f9b5..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStore.java +++ /dev/null @@ -1,399 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter; -import org.fusesource.leveldbjni.JniDBFactory; -import org.iq80.leveldb.DB; -import org.iq80.leveldb.DBIterator; -import org.iq80.leveldb.Options; -import org.iq80.leveldb.ReadOptions; -import org.iq80.leveldb.Snapshot; -import org.iq80.leveldb.WriteBatch; -import org.iq80.leveldb.WriteOptions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -/** - * LevelDB interface. - */ -public class LevelDBStore implements MetadataStore { - - private static final Logger LOG = - LoggerFactory.getLogger(LevelDBStore.class); - - private DB db; - private final File dbFile; - private final Options dbOptions; - private final WriteOptions writeOptions; - - public LevelDBStore(File dbPath, boolean createIfMissing) - throws IOException { - dbOptions = new Options(); - dbOptions.createIfMissing(createIfMissing); - this.dbFile = dbPath; - this.writeOptions = new WriteOptions().sync(true); - openDB(dbPath, dbOptions); - } - - /** - * Opens a DB file. - * - * @param dbPath - DB File path - * @throws IOException - */ - public LevelDBStore(File dbPath, Options options) - throws IOException { - dbOptions = options; - this.dbFile = dbPath; - this.writeOptions = new WriteOptions().sync(true); - openDB(dbPath, dbOptions); - } - - private void openDB(File dbPath, Options options) throws IOException { - if (dbPath.getParentFile().mkdirs()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Db path {} created.", dbPath.getParentFile()); - } - } - db = JniDBFactory.factory.open(dbPath, options); - if (LOG.isDebugEnabled()) { - LOG.debug("LevelDB successfully opened"); - LOG.debug("[Option] cacheSize = " + options.cacheSize()); - LOG.debug("[Option] createIfMissing = " + options.createIfMissing()); - LOG.debug("[Option] blockSize = " + options.blockSize()); - LOG.debug("[Option] compressionType= " + options.compressionType()); - LOG.debug("[Option] maxOpenFiles= " + options.maxOpenFiles()); - LOG.debug("[Option] writeBufferSize= "+ options.writeBufferSize()); - } - } - - /** - * Puts a Key into file. - * - * @param key - key - * @param value - value - */ - @Override - public void put(byte[] key, byte[] value) { - db.put(key, value, writeOptions); - } - - /** - * Get Key. - * - * @param key key - * @return value - */ - @Override - public byte[] get(byte[] key) { - return db.get(key); - } - - /** - * Delete Key. - * - * @param key - Key - */ - @Override - public void delete(byte[] key) { - db.delete(key); - } - - /** - * Closes the DB. - * - * @throws IOException - */ - @Override - public void close() throws IOException { - if (db != null){ - db.close(); - } - } - - /** - * Returns true if the DB is empty. - * - * @return boolean - * @throws IOException - */ - @Override - public boolean isEmpty() throws IOException { - try (DBIterator iter = db.iterator()) { - iter.seekToFirst(); - boolean hasNext = !iter.hasNext(); - return hasNext; - } - } - - /** - * Returns the actual levelDB object. - * @return DB handle. - */ - public DB getDB() { - return db; - } - - /** - * Returns an iterator on all the key-value pairs in the DB. - * @return an iterator on DB entries. - */ - public DBIterator getIterator() { - return db.iterator(); - } - - - @Override - public void destroy() throws IOException { - close(); - JniDBFactory.factory.destroy(dbFile, dbOptions); - } - - @Override - public ImmutablePair peekAround(int offset, - byte[] from) throws IOException, IllegalArgumentException { - try (DBIterator it = db.iterator()) { - if (from == null) { - it.seekToFirst(); - } else { - it.seek(from); - } - if (!it.hasNext()) { - return null; - } - switch (offset) { - case 0: - Entry current = it.next(); - return new ImmutablePair<>(current.getKey(), current.getValue()); - case 1: - if (it.next() != null && it.hasNext()) { - Entry next = it.peekNext(); - return new ImmutablePair<>(next.getKey(), next.getValue()); - } - break; - case -1: - if (it.hasPrev()) { - Entry prev = it.peekPrev(); - return new ImmutablePair<>(prev.getKey(), prev.getValue()); - } - break; - default: - throw new IllegalArgumentException( - "Position can only be -1, 0 " + "or 1, but found " + offset); - } - } - return null; - } - - @Override - public void iterate(byte[] from, EntryConsumer consumer) - throws IOException { - try (DBIterator iter = db.iterator()) { - if (from != null) { - iter.seek(from); - } else { - iter.seekToFirst(); - } - while (iter.hasNext()) { - Entry current = iter.next(); - if (!consumer.consume(current.getKey(), - current.getValue())) { - break; - } - } - } - } - - /** - * Compacts the DB by removing deleted keys etc. - * @throws IOException if there is an error. - */ - @Override - public void compactDB() throws IOException { - if(db != null) { - // From LevelDB docs : begin == null and end == null means the whole DB. - db.compactRange(null, null); - } - } - - @Override - public void flushDB(boolean sync) { - // TODO: Implement flush for level db - // do nothing - } - - @Override - public void writeBatch(BatchOperation operation) throws IOException { - List operations = - operation.getOperations(); - if (!operations.isEmpty()) { - try (WriteBatch writeBatch = db.createWriteBatch()) { - for (BatchOperation.SingleOperation opt : operations) { - switch (opt.getOpt()) { - case DELETE: - writeBatch.delete(opt.getKey()); - break; - case PUT: - writeBatch.put(opt.getKey(), opt.getValue()); - break; - default: - throw new IllegalArgumentException("Invalid operation " - + opt.getOpt()); - } - } - db.write(writeBatch); - } - } - } - - @Override - public List> getRangeKVs(byte[] startKey, - int count, MetadataKeyFilters.MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException { - return getRangeKVs(startKey, count, false, filters); - } - - @Override - public List> getSequentialRangeKVs(byte[] startKey, - int count, MetadataKeyFilters.MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException { - return getRangeKVs(startKey, count, true, filters); - } - - /** - * Returns a certain range of key value pairs as a list based on a - * startKey or count. Further a {@link MetadataKeyFilter} can be added to - * filter keys if necessary. To prevent race conditions while listing - * entries, this implementation takes a snapshot and lists the entries from - * the snapshot. This may, on the other hand, cause the range result slight - * different with actual data if data is updating concurrently. - *

- * If the startKey is specified and found in levelDB, this key and the keys - * after this key will be included in the result. If the startKey is null - * all entries will be included as long as other conditions are satisfied. - * If the given startKey doesn't exist, an empty list will be returned. - *

- * The count argument is to limit number of total entries to return, - * the value for count must be an integer greater than 0. - *

- * This method allows to specify one or more {@link MetadataKeyFilter} - * to filter keys by certain condition. Once given, only the entries - * whose key passes all the filters will be included in the result. - * - * @param startKey a start key. - * @param count max number of entries to return. - * @param filters customized one or more {@link MetadataKeyFilter}. - * @return a list of entries found in the database or an empty list if the - * startKey is invalid. - * @throws IOException if there are I/O errors. - * @throws IllegalArgumentException if count is less than 0. - */ - private List> getRangeKVs(byte[] startKey, - int count, boolean sequential, MetadataKeyFilter... filters) - throws IOException { - List> result = new ArrayList<>(); - long start = System.currentTimeMillis(); - if (count < 0) { - throw new IllegalArgumentException( - "Invalid count given " + count + ", count must be greater than 0"); - } - Snapshot snapShot = null; - DBIterator dbIter = null; - try { - snapShot = db.getSnapshot(); - ReadOptions readOptions = new ReadOptions().snapshot(snapShot); - dbIter = db.iterator(readOptions); - if (startKey == null) { - dbIter.seekToFirst(); - } else { - if (db.get(startKey) == null) { - // Key not found, return empty list - return result; - } - dbIter.seek(startKey); - } - while (dbIter.hasNext() && result.size() < count) { - byte[] preKey = dbIter.hasPrev() ? dbIter.peekPrev().getKey() : null; - byte[] nextKey = dbIter.hasNext() ? dbIter.peekNext().getKey() : null; - Entry current = dbIter.next(); - - if (filters == null) { - result.add(current); - } else { - if (Arrays.asList(filters).stream().allMatch( - entry -> entry.filterKey(preKey, current.getKey(), nextKey))) { - result.add(current); - } else { - if (result.size() > 0 && sequential) { - // if the caller asks for a sequential range of results, - // and we met a dis-match, abort iteration from here. - // if result is empty, we continue to look for the first match. - break; - } - } - } - } - } finally { - if (snapShot != null) { - snapShot.close(); - } - if (dbIter != null) { - dbIter.close(); - } - if (LOG.isDebugEnabled()) { - if (filters != null) { - for (MetadataKeyFilters.MetadataKeyFilter filter : filters) { - int scanned = filter.getKeysScannedNum(); - int hinted = filter.getKeysHintedNum(); - if (scanned > 0 || hinted > 0) { - if (LOG.isDebugEnabled()) { - LOG.debug( - "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}", - filter.getClass().getSimpleName(), - filter.getKeysScannedNum(), filter.getKeysHintedNum()); - } - } - } - } - long end = System.currentTimeMillis(); - long timeConsumed = end - start; - if (LOG.isDebugEnabled()) { - LOG.debug("Time consumed for getRangeKVs() is {}ms," - + " result length is {}.", timeConsumed, result.size()); - } - } - } - return result; - } - - @Override - public MetaStoreIterator iterator() { - return new LevelDBStoreIterator(db.iterator()); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java deleted file mode 100644 index f5b6769b70d..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LevelDBStoreIterator.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import org.iq80.leveldb.DBIterator; -import java.util.Map; -import java.util.NoSuchElementException; - -/** - * LevelDB store iterator. - */ -public class LevelDBStoreIterator - implements MetaStoreIterator { - - - private DBIterator levelDBIterator; - - public LevelDBStoreIterator(DBIterator iterator) { - this.levelDBIterator = iterator; - levelDBIterator.seekToFirst(); - } - - @Override - public boolean hasNext() { - return levelDBIterator.hasNext(); - } - - @Override - public MetadataStore.KeyValue next() { - if(levelDBIterator.hasNext()) { - Map.Entry entry = levelDBIterator.next(); - return MetadataStore.KeyValue.create(entry.getKey(), entry.getValue()); - } - throw new NoSuchElementException("LevelDB Store has no more elements"); - } - - @Override - public void seekToFirst() { - levelDBIterator.seekToFirst(); - } - - @Override - public void seekToLast() { - levelDBIterator.seekToLast(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetaStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetaStoreIterator.java deleted file mode 100644 index 2a33de712ea..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetaStoreIterator.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import java.util.Iterator; - -/** - * Iterator for MetaDataStore DB. - * @param - */ -public interface MetaStoreIterator extends Iterator { - - /** - * seek to first entry. - */ - void seekToFirst(); - - /** - * seek to last entry. - */ - void seekToLast(); - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java deleted file mode 100644 index a88ce475bab..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.OzoneConsts; - -import java.util.ArrayList; -import java.util.List; - -/** - * An utility class to filter levelDB keys. - */ -public final class MetadataKeyFilters { - - private static KeyPrefixFilter deletingKeyFilter = - new MetadataKeyFilters.KeyPrefixFilter() - .addFilter(OzoneConsts.DELETING_KEY_PREFIX); - - private static KeyPrefixFilter deletedKeyFilter = - new MetadataKeyFilters.KeyPrefixFilter() - .addFilter(OzoneConsts.DELETED_KEY_PREFIX); - - private static KeyPrefixFilter normalKeyFilter = - new MetadataKeyFilters.KeyPrefixFilter() - .addFilter(OzoneConsts.DELETING_KEY_PREFIX, true) - .addFilter(OzoneConsts.DELETED_KEY_PREFIX, true) - .addFilter(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX, true) - .addFilter(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX, true); - - private MetadataKeyFilters() { - } - - public static KeyPrefixFilter getDeletingKeyFilter() { - return deletingKeyFilter; - } - - public static KeyPrefixFilter getDeletedKeyFilter() { - return deletedKeyFilter; - } - - public static KeyPrefixFilter getNormalKeyFilter() { - return normalKeyFilter; - } - /** - * Interface for levelDB key filters. - */ - public interface MetadataKeyFilter { - /** - * Filter levelDB key with a certain condition. - * - * @param preKey previous key. - * @param currentKey current key. - * @param nextKey next key. - * @return true if a certain condition satisfied, return false otherwise. - */ - boolean filterKey(byte[] preKey, byte[] currentKey, byte[] nextKey); - - default int getKeysScannedNum() { - return 0; - } - - default int getKeysHintedNum() { - return 0; - } - } - - /** - * Utility class to filter key by a string prefix. This filter - * assumes keys can be parsed to a string. - */ - public static class KeyPrefixFilter implements MetadataKeyFilter { - - private List positivePrefixList = new ArrayList<>(); - private List negativePrefixList = new ArrayList<>(); - private boolean atleastOnePositiveMatch; - private int keysScanned = 0; - private int keysHinted = 0; - - public KeyPrefixFilter() {} - - /** - * KeyPrefixFilter constructor. It is made of positive and negative prefix - * list. PositivePrefixList is the list of prefixes which are accepted - * whereas negativePrefixList contains the list of prefixes which are - * rejected. - * - * @param atleastOnePositiveMatch if positive it requires key to be accepted - * by atleast one positive filter. - */ - public KeyPrefixFilter(boolean atleastOnePositiveMatch) { - this.atleastOnePositiveMatch = atleastOnePositiveMatch; - } - - public KeyPrefixFilter addFilter(String keyPrefix) { - addFilter(keyPrefix, false); - return this; - } - - public KeyPrefixFilter addFilter(String keyPrefix, boolean negative) { - Preconditions.checkArgument(!Strings.isNullOrEmpty(keyPrefix), - "KeyPrefix is null or empty: " + keyPrefix); - // keyPrefix which needs to be added should not be prefix of any opposing - // filter already present. If keyPrefix is a negative filter it should not - // be a prefix of any positive filter. Nor should any opposing filter be - // a prefix of keyPrefix. - // For example if b0 is accepted b can not be rejected and - // if b is accepted b0 can not be rejected. If these scenarios need to be - // handled we need to add priorities. - if (negative) { - Preconditions.checkArgument(positivePrefixList.stream().noneMatch( - prefix -> prefix.startsWith(keyPrefix) || keyPrefix - .startsWith(prefix)), - "KeyPrefix: " + keyPrefix + " already accepted."); - this.negativePrefixList.add(keyPrefix); - } else { - Preconditions.checkArgument(negativePrefixList.stream().noneMatch( - prefix -> prefix.startsWith(keyPrefix) || keyPrefix - .startsWith(prefix)), - "KeyPrefix: " + keyPrefix + " already rejected."); - this.positivePrefixList.add(keyPrefix); - } - return this; - } - - @Override - public boolean filterKey(byte[] preKey, byte[] currentKey, - byte[] nextKey) { - keysScanned++; - if (currentKey == null) { - return false; - } - boolean accept; - - // There are no filters present - if (positivePrefixList.isEmpty() && negativePrefixList.isEmpty()) { - return true; - } - - accept = !positivePrefixList.isEmpty() && positivePrefixList.stream() - .anyMatch(prefix -> { - byte[] prefixBytes = DFSUtil.string2Bytes(prefix); - return prefixMatch(prefixBytes, currentKey); - }); - if (accept) { - keysHinted++; - return true; - } else if (atleastOnePositiveMatch) { - return false; - } - - accept = !negativePrefixList.isEmpty() && negativePrefixList.stream() - .allMatch(prefix -> { - byte[] prefixBytes = DFSUtil.string2Bytes(prefix); - return !prefixMatch(prefixBytes, currentKey); - }); - if (accept) { - keysHinted++; - return true; - } - - return false; - } - - @Override - public int getKeysScannedNum() { - return keysScanned; - } - - @Override - public int getKeysHintedNum() { - return keysHinted; - } - - private static boolean prefixMatch(byte[] prefix, byte[] key) { - Preconditions.checkNotNull(prefix); - Preconditions.checkNotNull(key); - if (key.length < prefix.length) { - return false; - } - for (int i = 0; i < prefix.length; i++) { - if (key[i] != prefix[i]) { - return false; - } - } - return true; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStore.java deleted file mode 100644 index f05e6d2d275..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStore.java +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.Map; - -/** - * Interface for key-value store that stores ozone metadata. - * Ozone metadata is stored as key value pairs, both key and value - * are arbitrary byte arrays. - */ -@InterfaceStability.Evolving -public interface MetadataStore extends Closeable{ - - /** - * Puts a key-value pair into the store. - * - * @param key metadata key - * @param value metadata value - */ - void put(byte[] key, byte[] value) throws IOException; - - /** - * @return true if the metadata store is empty. - * - * @throws IOException - */ - boolean isEmpty() throws IOException; - - /** - * Returns the value mapped to the given key in byte array. - * - * @param key metadata key - * @return value in byte array - * @throws IOException - */ - byte[] get(byte[] key) throws IOException; - - /** - * Deletes a key from the metadata store. - * - * @param key metadata key - * @throws IOException - */ - void delete(byte[] key) throws IOException; - - /** - * Returns a certain range of key value pairs as a list based on a - * startKey or count. Further a {@link MetadataKeyFilter} can be added to - * filter keys if necessary. To prevent race conditions while listing - * entries, this implementation takes a snapshot and lists the entries from - * the snapshot. This may, on the other hand, cause the range result slight - * different with actual data if data is updating concurrently. - *

- * If the startKey is specified and found in levelDB, this key and the keys - * after this key will be included in the result. If the startKey is null - * all entries will be included as long as other conditions are satisfied. - * If the given startKey doesn't exist and empty list will be returned. - *

- * The count argument is to limit number of total entries to return, - * the value for count must be an integer greater than 0. - *

- * This method allows to specify one or more {@link MetadataKeyFilter} - * to filter keys by certain condition. Once given, only the entries - * whose key passes all the filters will be included in the result. - * - * @param startKey a start key. - * @param count max number of entries to return. - * @param filters customized one or more {@link MetadataKeyFilter}. - * @return a list of entries found in the database or an empty list if the - * startKey is invalid. - * @throws IOException if there are I/O errors. - * @throws IllegalArgumentException if count is less than 0. - */ - List> getRangeKVs(byte[] startKey, - int count, MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException; - - /** - * This method is very similar to {@link #getRangeKVs}, the only - * different is this method is supposed to return a sequential range - * of elements based on the filters. While iterating the elements, - * if it met any entry that cannot pass the filter, the iterator will stop - * from this point without looking for next match. If no filter is given, - * this method behaves just like {@link #getRangeKVs}. - * - * @param startKey a start key. - * @param count max number of entries to return. - * @param filters customized one or more {@link MetadataKeyFilter}. - * @return a list of entries found in the database. - * @throws IOException - * @throws IllegalArgumentException - */ - List> getSequentialRangeKVs(byte[] startKey, - int count, MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException; - - /** - * A batch of PUT, DELETE operations handled as a single atomic write. - * - * @throws IOException write fails - */ - void writeBatch(BatchOperation operation) throws IOException; - - /** - * Compact the entire database. - * @throws IOException - */ - void compactDB() throws IOException; - - /** - * Flush the outstanding I/O operations of the DB. - * @param sync if true will sync the outstanding I/Os to the disk. - */ - void flushDB(boolean sync) throws IOException; - - /** - * Destroy the content of the specified database, - * a destroyed database will not be able to load again. - * Be very careful with this method. - * - * @throws IOException if I/O error happens - */ - void destroy() throws IOException; - - /** - * Seek the database to a certain key, returns the key-value - * pairs around this key based on the given offset. Note, this method - * can only support offset -1 (left), 0 (current) and 1 (right), - * any other offset given will cause a {@link IllegalArgumentException}. - * - * @param offset offset to the key - * @param from from which key - * @return a key-value pair - * @throws IOException - */ - ImmutablePair peekAround(int offset, byte[] from) - throws IOException, IllegalArgumentException; - - /** - * Iterates entries in the database from a certain key. - * Applies the given {@link EntryConsumer} to the key and value of - * each entry, the function produces a boolean result which is used - * as the criteria to exit from iteration. - * - * @param from the start key - * @param consumer - * a {@link EntryConsumer} applied to each key and value. If the consumer - * returns true, continues the iteration to next entry; otherwise exits - * the iteration. - * @throws IOException - */ - void iterate(byte[] from, EntryConsumer consumer) - throws IOException; - - /** - * Returns the iterator for this metadata store. - * @return MetaStoreIterator - */ - MetaStoreIterator iterator(); - - /** - * Class used to represent the key and value pair of a db entry. - */ - class KeyValue { - - private final byte[] key; - private final byte[] value; - - /** - * KeyValue Constructor, used to represent a key and value of a db entry. - * @param key - * @param value - */ - private KeyValue(byte[] key, byte[] value) { - this.key = key; - this.value = value; - } - - /** - * Return key. - * @return byte[] - */ - public byte[] getKey() { - byte[] result = new byte[key.length]; - System.arraycopy(key, 0, result, 0, key.length); - return result; - } - - /** - * Return value. - * @return byte[] - */ - public byte[] getValue() { - byte[] result = new byte[value.length]; - System.arraycopy(value, 0, result, 0, value.length); - return result; - } - - /** - * Create a KeyValue pair. - * @param key - * @param value - * @return KeyValue object. - */ - public static KeyValue create(byte[] key, byte[] value) { - return new KeyValue(key, value); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java deleted file mode 100644 index 85bb6aa4bff..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import java.io.File; -import java.io.IOException; -import java.util.Optional; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConfigKeys; - -import com.google.common.annotations.VisibleForTesting; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF; -import org.iq80.leveldb.Options; -import org.rocksdb.BlockBasedTableConfig; -import org.rocksdb.Statistics; -import org.rocksdb.StatsLevel; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Builder for metadata store. - */ -public class MetadataStoreBuilder { - - @VisibleForTesting - static final Logger LOG = - LoggerFactory.getLogger(MetadataStoreBuilder.class); - private File dbFile; - private long cacheSize; - private boolean createIfMissing = true; - private Optional optionalConf = Optional.empty(); - private String dbType; - - public static MetadataStoreBuilder newBuilder() { - return new MetadataStoreBuilder(); - } - - public MetadataStoreBuilder setDbFile(File dbPath) { - this.dbFile = dbPath; - return this; - } - - public MetadataStoreBuilder setCacheSize(long cache) { - this.cacheSize = cache; - return this; - } - - public MetadataStoreBuilder setCreateIfMissing(boolean doCreate) { - this.createIfMissing = doCreate; - return this; - } - - public MetadataStoreBuilder setConf(Configuration configuration) { - this.optionalConf = Optional.of(configuration); - return this; - } - - /** - * Set the container DB Type. - * @param type - * @return MetadataStoreBuilder - */ - public MetadataStoreBuilder setDBType(String type) { - this.dbType = type; - return this; - } - - - public MetadataStore build() throws IOException { - if (dbFile == null) { - throw new IllegalArgumentException("Failed to build metadata store, " - + "dbFile is required but not found"); - } - - // Build db store based on configuration - final Configuration conf = optionalConf.orElseGet( - () -> new OzoneConfiguration()); - - if(dbType == null) { - LOG.debug("dbType is null, using "); - dbType = conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT); - LOG.debug("dbType is null, using dbType {} from ozone configuration", - dbType); - } else { - LOG.debug("Using dbType {} for metastore", dbType); - } - if (OZONE_METADATA_STORE_IMPL_LEVELDB.equals(dbType)) { - Options options = new Options(); - options.createIfMissing(createIfMissing); - if (cacheSize > 0) { - options.cacheSize(cacheSize); - } - return new LevelDBStore(dbFile, options); - } else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(dbType)) { - org.rocksdb.Options opts = new org.rocksdb.Options(); - opts.setCreateIfMissing(createIfMissing); - - if (cacheSize > 0) { - BlockBasedTableConfig tableConfig = new BlockBasedTableConfig(); - tableConfig.setBlockCacheSize(cacheSize); - opts.setTableFormatConfig(tableConfig); - } - - String rocksDbStat = conf.getTrimmed( - OZONE_METADATA_STORE_ROCKSDB_STATISTICS, - OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT); - - if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { - Statistics statistics = new Statistics(); - statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); - opts = opts.setStatistics(statistics); - - } - return new RocksDBStore(dbFile, opts); - } - - throw new IllegalArgumentException("Invalid argument for " - + OzoneConfigKeys.OZONE_METADATA_STORE_IMPL - + ". Expecting " + OZONE_METADATA_STORE_IMPL_LEVELDB - + " or " + OZONE_METADATA_STORE_IMPL_ROCKSDB - + ", but met " + dbType); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RetriableTask.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RetriableTask.java deleted file mode 100644 index a3ee1fd51bd..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RetriableTask.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.util.ThreadUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.concurrent.Callable; - -/** - * {@code Callable} implementation that retries a delegate task according to - * the specified {@code RetryPolicy}. Sleeps between retries in the caller - * thread. - * - * @param the result type of method {@code call} - */ -public class RetriableTask implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(RetriableTask.class); - - private final String name; - private final Callable task; - private final RetryPolicy retryPolicy; - - public RetriableTask(RetryPolicy retryPolicy, String name, Callable task) { - this.retryPolicy = retryPolicy; - this.name = name; - this.task = task; - } - - @Override - public V call() throws Exception { - int attempts = 0; - Exception cause; - while (true) { - try { - return task.call(); - } catch (Exception e) { - cause = e; - RetryPolicy.RetryAction action = retryPolicy.shouldRetry(e, ++attempts, - 0, true); - if (action.action == RetryPolicy.RetryAction.RetryDecision.RETRY) { - LOG.info("Execution of task {} failed, will be retried in {} ms", - name, action.delayMillis); - ThreadUtil.sleepAtLeastIgnoreInterrupts(action.delayMillis); - } else { - break; - } - } - } - - String msg = String.format( - "Execution of task %s failed permanently after %d attempts", - name, attempts); - LOG.warn(msg, cause); - throw new IOException(msg, cause); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStore.java deleted file mode 100644 index 7dd1bde1b77..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStore.java +++ /dev/null @@ -1,405 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import com.google.common.base.Preconditions; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.ratis.thirdparty.com.google.common.annotations. - VisibleForTesting; -import org.rocksdb.DbPath; -import org.rocksdb.Options; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; -import org.rocksdb.RocksIterator; -import org.rocksdb.WriteBatch; -import org.rocksdb.WriteOptions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.ObjectName; -import java.io.File; -import java.io.IOException; -import java.util.AbstractMap; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * RocksDB implementation of ozone metadata store. - */ -public class RocksDBStore implements MetadataStore { - - private static final Logger LOG = - LoggerFactory.getLogger(RocksDBStore.class); - - private RocksDB db = null; - private File dbLocation; - private WriteOptions writeOptions; - private Options dbOptions; - private ObjectName statMBeanName; - - public RocksDBStore(File dbFile, Options options) - throws IOException { - Preconditions.checkNotNull(dbFile, "DB file location cannot be null"); - RocksDB.loadLibrary(); - dbOptions = options; - dbLocation = dbFile; - writeOptions = new WriteOptions(); - try { - - db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath()); - if (dbOptions.statistics() != null) { - - Map jmxProperties = new HashMap(); - jmxProperties.put("dbName", dbFile.getName()); - statMBeanName = HddsUtils.registerWithJmxProperties( - "Ozone", "RocksDbStore", jmxProperties, - RocksDBStoreMBean.create(dbOptions.statistics(), - dbFile.getName())); - if (statMBeanName == null) { - LOG.warn("jmx registration failed during RocksDB init, db path :{}", - dbFile.getAbsolutePath()); - } - } - } catch (RocksDBException e) { - throw new IOException( - "Failed init RocksDB, db path : " + dbFile.getAbsolutePath(), e); - } - - if (LOG.isDebugEnabled()) { - LOG.debug("RocksDB successfully opened."); - LOG.debug("[Option] dbLocation= {}", dbLocation.getAbsolutePath()); - LOG.debug("[Option] createIfMissing = {}", options.createIfMissing()); - LOG.debug("[Option] compactionPriority= {}", options.compactionStyle()); - LOG.debug("[Option] compressionType= {}", options.compressionType()); - LOG.debug("[Option] maxOpenFiles= {}", options.maxOpenFiles()); - LOG.debug("[Option] writeBufferSize= {}", options.writeBufferSize()); - } - } - - public static IOException toIOException(String msg, RocksDBException e) { - String statusCode = e.getStatus() == null ? "N/A" : - e.getStatus().getCodeString(); - String errMessage = e.getMessage() == null ? "Unknown error" : - e.getMessage(); - String output = msg + "; status : " + statusCode - + "; message : " + errMessage; - return new IOException(output, e); - } - - @Override - public void put(byte[] key, byte[] value) throws IOException { - try { - db.put(writeOptions, key, value); - } catch (RocksDBException e) { - throw toIOException("Failed to put key-value to metadata store", e); - } - } - - @Override - public boolean isEmpty() throws IOException { - RocksIterator it = null; - try { - it = db.newIterator(); - it.seekToFirst(); - return !it.isValid(); - } finally { - if (it != null) { - it.close(); - } - } - } - - @Override - public byte[] get(byte[] key) throws IOException { - try { - return db.get(key); - } catch (RocksDBException e) { - throw toIOException("Failed to get the value for the given key", e); - } - } - - @Override - public void delete(byte[] key) throws IOException { - try { - db.delete(key); - } catch (RocksDBException e) { - throw toIOException("Failed to delete the given key", e); - } - } - - @Override - public List> getRangeKVs(byte[] startKey, - int count, MetadataKeyFilters.MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException { - return getRangeKVs(startKey, count, false, filters); - } - - @Override - public List> getSequentialRangeKVs(byte[] startKey, - int count, MetadataKeyFilters.MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException { - return getRangeKVs(startKey, count, true, filters); - } - - private List> getRangeKVs(byte[] startKey, - int count, boolean sequential, - MetadataKeyFilters.MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException { - List> result = new ArrayList<>(); - long start = System.currentTimeMillis(); - if (count < 0) { - throw new IllegalArgumentException( - "Invalid count given " + count + ", count must be greater than 0"); - } - RocksIterator it = null; - try { - it = db.newIterator(); - if (startKey == null) { - it.seekToFirst(); - } else { - if(get(startKey) == null) { - // Key not found, return empty list - return result; - } - it.seek(startKey); - } - while(it.isValid() && result.size() < count) { - byte[] currentKey = it.key(); - byte[] currentValue = it.value(); - - it.prev(); - final byte[] prevKey = it.isValid() ? it.key() : null; - - it.seek(currentKey); - it.next(); - final byte[] nextKey = it.isValid() ? it.key() : null; - - if (filters == null) { - result.add(new AbstractMap.SimpleImmutableEntry<>(currentKey, - currentValue)); - } else { - if (Arrays.asList(filters).stream() - .allMatch(entry -> entry.filterKey(prevKey, - currentKey, nextKey))) { - result.add(new AbstractMap.SimpleImmutableEntry<>(currentKey, - currentValue)); - } else { - if (result.size() > 0 && sequential) { - // if the caller asks for a sequential range of results, - // and we met a dis-match, abort iteration from here. - // if result is empty, we continue to look for the first match. - break; - } - } - } - } - } finally { - if (it != null) { - it.close(); - } - long end = System.currentTimeMillis(); - long timeConsumed = end - start; - if (LOG.isDebugEnabled()) { - if (filters != null) { - for (MetadataKeyFilters.MetadataKeyFilter filter : filters) { - int scanned = filter.getKeysScannedNum(); - int hinted = filter.getKeysHintedNum(); - if (scanned > 0 || hinted > 0) { - LOG.debug( - "getRangeKVs ({}) numOfKeysScanned={}, numOfKeysHinted={}", - filter.getClass().getSimpleName(), filter.getKeysScannedNum(), - filter.getKeysHintedNum()); - } - } - } - LOG.debug("Time consumed for getRangeKVs() is {}ms," - + " result length is {}.", timeConsumed, result.size()); - } - } - return result; - } - - @Override - public void writeBatch(BatchOperation operation) - throws IOException { - List operations = - operation.getOperations(); - if (!operations.isEmpty()) { - try (WriteBatch writeBatch = new WriteBatch()) { - for (BatchOperation.SingleOperation opt : operations) { - switch (opt.getOpt()) { - case DELETE: - writeBatch.delete(opt.getKey()); - break; - case PUT: - writeBatch.put(opt.getKey(), opt.getValue()); - break; - default: - throw new IllegalArgumentException("Invalid operation " - + opt.getOpt()); - } - } - db.write(writeOptions, writeBatch); - } catch (RocksDBException e) { - throw toIOException("Batch write operation failed", e); - } - } - } - - @Override - public void compactDB() throws IOException { - if (db != null) { - try { - db.compactRange(); - } catch (RocksDBException e) { - throw toIOException("Failed to compact db", e); - } - } - } - - @Override - public void flushDB(boolean sync) throws IOException { - if (db != null) { - try { - // for RocksDB it is sufficient to flush the WAL as entire db can - // be reconstructed using it. - db.flushWal(sync); - } catch (RocksDBException e) { - throw toIOException("Failed to flush db", e); - } - } - } - - private void deleteQuietly(File fileOrDir) { - if (fileOrDir != null && fileOrDir.exists()) { - try { - FileUtils.forceDelete(fileOrDir); - } catch (IOException e) { - LOG.warn("Failed to delete dir {}", fileOrDir.getAbsolutePath(), e); - } - } - } - - @Override - public void destroy() throws IOException { - // Make sure db is closed. - close(); - - // There is no destroydb java API available, - // equivalently we can delete all db directories. - deleteQuietly(dbLocation); - deleteQuietly(new File(dbOptions.dbLogDir())); - deleteQuietly(new File(dbOptions.walDir())); - List dbPaths = dbOptions.dbPaths(); - if (dbPaths != null) { - dbPaths.forEach(dbPath -> { - deleteQuietly(new File(dbPath.toString())); - }); - } - } - - @Override - public ImmutablePair peekAround(int offset, - byte[] from) throws IOException, IllegalArgumentException { - RocksIterator it = null; - try { - it = db.newIterator(); - if (from == null) { - it.seekToFirst(); - } else { - it.seek(from); - } - if (!it.isValid()) { - return null; - } - - switch (offset) { - case 0: - break; - case 1: - it.next(); - break; - case -1: - it.prev(); - break; - default: - throw new IllegalArgumentException( - "Position can only be -1, 0 " + "or 1, but found " + offset); - } - return it.isValid() ? new ImmutablePair<>(it.key(), it.value()) : null; - } finally { - if (it != null) { - it.close(); - } - } - } - - @Override - public void iterate(byte[] from, EntryConsumer consumer) - throws IOException { - RocksIterator it = null; - try { - it = db.newIterator(); - if (from != null) { - it.seek(from); - } else { - it.seekToFirst(); - } - while (it.isValid()) { - if (!consumer.consume(it.key(), it.value())) { - break; - } - it.next(); - } - } finally { - if (it != null) { - it.close(); - } - } - } - - @Override - public void close() throws IOException { - if (statMBeanName != null) { - MBeans.unregister(statMBeanName); - statMBeanName = null; - } - if (db != null) { - db.close(); - } - - } - - @VisibleForTesting - protected ObjectName getStatMBeanName() { - return statMBeanName; - } - - @Override - public MetaStoreIterator iterator() { - return new RocksDBStoreIterator(db.newIterator()); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreIterator.java deleted file mode 100644 index e39ec577458..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreIterator.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package org.apache.hadoop.hdds.utils; - -import org.rocksdb.RocksIterator; - -import java.util.NoSuchElementException; - -/** - * RocksDB store iterator. - */ -public class RocksDBStoreIterator - implements MetaStoreIterator { - - private RocksIterator rocksDBIterator; - - public RocksDBStoreIterator(RocksIterator iterator) { - this.rocksDBIterator = iterator; - rocksDBIterator.seekToFirst(); - } - - @Override - public boolean hasNext() { - return rocksDBIterator.isValid(); - } - - @Override - public MetadataStore.KeyValue next() { - if (rocksDBIterator.isValid()) { - MetadataStore.KeyValue value = - MetadataStore.KeyValue.create(rocksDBIterator.key(), rocksDBIterator - .value()); - rocksDBIterator.next(); - return value; - } - throw new NoSuchElementException("RocksDB Store has no more elements"); - } - - @Override - public void seekToFirst() { - rocksDBIterator.seekToFirst(); - } - - @Override - public void seekToLast() { - rocksDBIterator.seekToLast(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreMBean.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreMBean.java deleted file mode 100644 index 60d4db880c4..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/RocksDBStoreMBean.java +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils; - -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.Interns; -import org.rocksdb.HistogramData; -import org.rocksdb.HistogramType; -import org.rocksdb.Statistics; -import org.rocksdb.TickerType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.Attribute; -import javax.management.AttributeList; -import javax.management.AttributeNotFoundException; -import javax.management.DynamicMBean; -import javax.management.InvalidAttributeValueException; -import javax.management.MBeanAttributeInfo; -import javax.management.MBeanException; -import javax.management.MBeanInfo; -import javax.management.ReflectionException; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -/** - * Adapter JMX bean to publish all the Rocksdb metrics. - */ -public class RocksDBStoreMBean implements DynamicMBean, MetricsSource { - - private Statistics statistics; - - private Set histogramAttributes = new HashSet<>(); - - private String contextName; - - private static final Logger LOG = - LoggerFactory.getLogger(RocksDBStoreMBean.class); - - public final static String ROCKSDB_CONTEXT_PREFIX = "Rocksdb_"; - - public RocksDBStoreMBean(Statistics statistics, String dbName) { - this.contextName = ROCKSDB_CONTEXT_PREFIX + dbName; - this.statistics = statistics; - histogramAttributes.add("Average"); - histogramAttributes.add("Median"); - histogramAttributes.add("Percentile95"); - histogramAttributes.add("Percentile99"); - histogramAttributes.add("StandardDeviation"); - } - - public static RocksDBStoreMBean create(Statistics statistics, - String contextName) { - - RocksDBStoreMBean rocksDBStoreMBean = new RocksDBStoreMBean( - statistics, contextName); - MetricsSystem ms = DefaultMetricsSystem.instance(); - MetricsSource metricsSource = ms.getSource(rocksDBStoreMBean.contextName); - if (metricsSource != null) { - return (RocksDBStoreMBean)metricsSource; - } else { - return ms.register(rocksDBStoreMBean.contextName, - "RocksDB Metrics", - rocksDBStoreMBean); - } - } - - @Override - public Object getAttribute(String attribute) - throws AttributeNotFoundException, MBeanException, ReflectionException { - for (String histogramAttribute : histogramAttributes) { - if (attribute.endsWith("_" + histogramAttribute.toUpperCase())) { - String keyName = attribute - .substring(0, attribute.length() - histogramAttribute.length() - 1); - try { - HistogramData histogram = - statistics.getHistogramData(HistogramType.valueOf(keyName)); - try { - Method method = - HistogramData.class.getMethod("get" + histogramAttribute); - return method.invoke(histogram); - } catch (Exception e) { - throw new ReflectionException(e, - "Can't read attribute " + attribute); - } - } catch (IllegalArgumentException exception) { - throw new AttributeNotFoundException( - "No such attribute in RocksDB stats: " + attribute); - } - } - } - try { - return statistics.getTickerCount(TickerType.valueOf(attribute)); - } catch (IllegalArgumentException ex) { - throw new AttributeNotFoundException( - "No such attribute in RocksDB stats: " + attribute); - } - } - - @Override - public void setAttribute(Attribute attribute) - throws AttributeNotFoundException, InvalidAttributeValueException, - MBeanException, ReflectionException { - - } - - @Override - public AttributeList getAttributes(String[] attributes) { - AttributeList result = new AttributeList(); - for (String attributeName : attributes) { - try { - Object value = getAttribute(attributeName); - result.add(value); - } catch (Exception e) { - //TODO - } - } - return result; - } - - @Override - public AttributeList setAttributes(AttributeList attributes) { - return null; - } - - @Override - public Object invoke(String actionName, Object[] params, String[] signature) - throws MBeanException, ReflectionException { - return null; - } - - @Override - public MBeanInfo getMBeanInfo() { - - List attributes = new ArrayList<>(); - for (TickerType tickerType : TickerType.values()) { - attributes.add(new MBeanAttributeInfo(tickerType.name(), "long", - "RocksDBStat: " + tickerType.name(), true, false, false)); - } - for (HistogramType histogramType : HistogramType.values()) { - for (String histogramAttribute : histogramAttributes) { - attributes.add(new MBeanAttributeInfo( - histogramType.name() + "_" + histogramAttribute.toUpperCase(), - "long", "RocksDBStat: " + histogramType.name(), true, false, - false)); - } - } - - return new MBeanInfo("", "RocksDBStat", - attributes.toArray(new MBeanAttributeInfo[0]), null, null, null); - - } - - @Override - public void getMetrics(MetricsCollector metricsCollector, boolean b) { - MetricsRecordBuilder rb = metricsCollector.addRecord(contextName); - getHistogramData(rb); - getTickerTypeData(rb); - } - - /** - * Collect all histogram metrics from RocksDB statistics. - * @param rb Metrics Record Builder. - */ - private void getHistogramData(MetricsRecordBuilder rb) { - for (HistogramType histogramType : HistogramType.values()) { - HistogramData histogram = - statistics.getHistogramData( - HistogramType.valueOf(histogramType.name())); - for (String histogramAttribute : histogramAttributes) { - try { - Method method = - HistogramData.class.getMethod("get" + histogramAttribute); - double metricValue = (double) method.invoke(histogram); - rb.addGauge(Interns.info(histogramType.name() + "_" + - histogramAttribute.toUpperCase(), "RocksDBStat"), - metricValue); - } catch (Exception e) { - LOG.error("Error reading histogram data {} ", e); - } - } - } - } - - /** - * Collect all Counter metrics from RocksDB statistics. - * @param rb Metrics Record Builder. - */ - private void getTickerTypeData(MetricsRecordBuilder rb) { - for (TickerType tickerType : TickerType.values()) { - rb.addCounter(Interns.info(tickerType.name(), "RocksDBStat"), - statistics.getTickerCount(tickerType)); - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/Scheduler.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/Scheduler.java deleted file mode 100644 index 9edc1044810..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/Scheduler.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -import org.apache.ratis.util.function.CheckedRunnable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -/** - * This class encapsulates ScheduledExecutorService. - */ -public class Scheduler { - - private static final Logger LOG = - LoggerFactory.getLogger(Scheduler.class); - - private ScheduledExecutorService scheduler; - - private volatile boolean isClosed; - - private String threadName; - - /** - * Creates a ScheduledExecutorService based on input arguments. - * @param threadName - thread name - * @param isDaemon - if true the threads in the scheduler are started as - * daemon - * @param numCoreThreads - number of core threads to maintain in the scheduler - */ - public Scheduler(String threadName, boolean isDaemon, int numCoreThreads) { - scheduler = Executors.newScheduledThreadPool(numCoreThreads, r -> { - Thread t = new Thread(r); - t.setName(threadName); - t.setDaemon(isDaemon); - return t; - }); - this.threadName = threadName; - isClosed = false; - } - - public void schedule(Runnable runnable, long delay, TimeUnit timeUnit) { - scheduler.schedule(runnable, delay, timeUnit); - } - - public void schedule(CheckedRunnable runnable, long delay, - TimeUnit timeUnit, Logger logger, String errMsg) { - scheduler.schedule(() -> { - try { - runnable.run(); - } catch (Throwable throwable) { - logger.error(errMsg, throwable); - } - }, delay, timeUnit); - } - - public void scheduleWithFixedDelay(Runnable runnable, long initialDelay, - long fixedDelay, TimeUnit timeUnit) { - scheduler - .scheduleWithFixedDelay(runnable, initialDelay, fixedDelay, timeUnit); - } - - public boolean isClosed() { - return isClosed; - } - - /** - * Closes the scheduler for further task submission. Any pending tasks not - * yet executed are also cancelled. For the executing tasks the scheduler - * waits 60 seconds for completion. - */ - public synchronized void close() { - isClosed = true; - if (scheduler != null) { - scheduler.shutdownNow(); - try { - scheduler.awaitTermination(60, TimeUnit.SECONDS); - } catch (InterruptedException e) { - LOG.info( - threadName + " interrupted while waiting for task completion {}", - e); - } - } - scheduler = null; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java deleted file mode 100644 index 09145361257..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils; - -import org.apache.hadoop.hdds.HddsUtils; - -/** - * This class uses system current time milliseconds to generate unique id. - */ -public final class UniqueId { - /* - * When we represent time in milliseconds using 'long' data type, - * the LSB bits are used. Currently we are only using 44 bits (LSB), - * 20 bits (MSB) are not used. - * We will exhaust this 44 bits only when we are in year 2525, - * until then we can safely use this 20 bits (MSB) for offset to generate - * unique id within millisecond. - * - * Year : Mon Dec 31 18:49:04 IST 2525 - * TimeInMillis: 17545641544247 - * Binary Representation: - * MSB (20 bits): 0000 0000 0000 0000 0000 - * LSB (44 bits): 1111 1111 0101 0010 1001 1011 1011 0100 1010 0011 0111 - * - * We have 20 bits to run counter, we should exclude the first bit (MSB) - * as we don't want to deal with negative values. - * To be on safer side we will use 'short' data type which is of length - * 16 bits and will give us 65,536 values for offset. - * - */ - - private static volatile short offset = 0; - - /** - * Private constructor so that no one can instantiate this class. - */ - private UniqueId() {} - - /** - * Calculate and returns next unique id based on System#currentTimeMillis. - * - * @return unique long value - */ - public static synchronized long next() { - long utcTime = HddsUtils.getUtcTime(); - if ((utcTime & 0xFFFF000000000000L) == 0) { - return utcTime << Short.SIZE | (offset++ & 0x0000FFFF); - } - throw new RuntimeException("Got invalid UTC time," + - " cannot generate unique Id. UTC Time: " + utcTime); - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java deleted file mode 100644 index ca9f859ccb4..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/VersionInfo.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.util.ThreadUtil; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.io.InputStream; -import java.util.Properties; - -/** - * This class returns build information about Hadoop components. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class VersionInfo { - - private final Properties info = new Properties(); - - public VersionInfo(String component) { - String versionInfoFile = component + "-version-info.properties"; - InputStream is = null; - try { - is = ThreadUtil.getResourceAsStream( - getClass().getClassLoader(), - versionInfoFile); - info.load(is); - } catch (IOException ex) { - LoggerFactory.getLogger(getClass()).warn("Could not read '" + - versionInfoFile + "', " + ex.toString(), ex); - } finally { - IOUtils.closeStream(is); - } - } - - public String getRelease() { - return info.getProperty("release", "Unknown"); - } - - public String getVersion() { - return info.getProperty("version", "Unknown"); - } - - public String getRevision() { - return info.getProperty("revision", "Unknown"); - } - - public String getBranch() { - return info.getProperty("branch", "Unknown"); - } - - public String getDate() { - return info.getProperty("date", "Unknown"); - } - - public String getUser() { - return info.getProperty("user", "Unknown"); - } - - public String getUrl() { - return info.getProperty("url", "Unknown"); - } - - public String getSrcChecksum() { - return info.getProperty("srcChecksum", "Unknown"); - } - - public String getProtocVersion() { - return info.getProperty("protocVersion", "Unknown"); - } - - public String getBuildVersion() { - return getVersion() + - " from " + getRevision() + - " by " + getUser() + - " source checksum " + getSrcChecksum(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BatchOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BatchOperation.java deleted file mode 100644 index 8ca5d188ebc..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/BatchOperation.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -/** - * Class represents a batch operation, collects multiple db operation. - */ -public interface BatchOperation extends AutoCloseable { - - void close(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayKeyValue.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayKeyValue.java deleted file mode 100644 index 7c602911fe4..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/ByteArrayKeyValue.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -import org.apache.hadoop.hdds.utils.db.Table.KeyValue; - -/** - * Key value for raw Table implementations. - */ -public final class ByteArrayKeyValue implements KeyValue { - private byte[] key; - private byte[] value; - - private ByteArrayKeyValue(byte[] key, byte[] value) { - this.key = key; - this.value = value; - } - - /** - * Create a KeyValue pair. - * - * @param key - Key Bytes - * @param value - Value bytes - * @return KeyValue object. - */ - public static ByteArrayKeyValue create(byte[] key, byte[] value) { - return new ByteArrayKeyValue(key, value); - } - - /** - * Return key. - * - * @return byte[] - */ - public byte[] getKey() { - byte[] result = new byte[key.length]; - System.arraycopy(key, 0, result, 0, key.length); - return result; - } - - /** - * Return value. - * - * @return byte[] - */ - public byte[] getValue() { - byte[] result = new byte[value.length]; - System.arraycopy(value, 0, result, 0, value.length); - return result; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java deleted file mode 100644 index 36ece3ea774..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Codec.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; - -/** - * Codec interface to marshall/unmarshall data to/from a byte[] based - * key/value store. - * - * @param Unserialized type - */ -public interface Codec { - - /** - * Convert object to raw persisted format. - * @param object The original java object. Should not be null. - */ - byte[] toPersistedFormat(T object) throws IOException; - - /** - * Convert object from raw persisted format. - * - * @param rawData Byte array from the key/value store. Should not be null. - */ - T fromPersistedFormat(byte[] rawData) throws IOException; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java deleted file mode 100644 index f92189aef5b..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecRegistry.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import com.google.common.base.Preconditions; - -/** - * Collection of available codecs. - */ -public class CodecRegistry { - - private Map> valueCodecs; - - public CodecRegistry() { - valueCodecs = new HashMap<>(); - valueCodecs.put(String.class, new StringCodec()); - valueCodecs.put(Long.class, new LongCodec()); - } - - /** - * Convert raw value to strongly typed value/key with the help of a codec. - * - * @param rawData original byte array from the db. - * @param format Class of the return value - * @param Type of the return value. - * @return the object with the parsed field data - */ - public T asObject(byte[] rawData, Class format) - throws IOException { - if (rawData == null) { - return null; - } - Codec codec = getCodec(format); - return (T) codec.fromPersistedFormat(rawData); - } - - /** - * Convert strongly typed object to raw data to store it in the kv store. - * - * @param object typed object. - * @param Type of the typed object. - * @return byte array to store it ini the kv store. - */ - public byte[] asRawData(T object) throws IOException { - Preconditions.checkNotNull(object, - "Null value shouldn't be persisted in the database"); - Codec codec = getCodec(object); - return codec.toPersistedFormat(object); - } - - /** - * Get codec for the typed object including class and subclass. - * @param object typed object. - * @return Codec for the typed object. - * @throws IOException - */ - private Codec getCodec(T object) throws IOException { - Class format = (Class) object.getClass(); - return getCodec(format); - } - - - /** - * Get codec for the typed object including class and subclass. - * @param Type of the typed object. - * @return Codec for the typed object. - * @throws IOException - */ - private Codec getCodec(Class format) throws IOException { - Codec codec; - if (valueCodecs.containsKey(format)) { - codec = (Codec) valueCodecs.get(format); - } else if (valueCodecs.containsKey(format.getSuperclass())) { - codec = (Codec) valueCodecs.get(format.getSuperclass()); - } else { - throw new IllegalStateException( - "Codec is not registered for type: " + format); - } - return codec; - } - - /** - * Addds codec to the internal collection. - * - * @param type Type of the codec source/destination object. - * @param codec The codec itself. - * @param The type of the codec - */ - public void addCodec(Class type, Codec codec) { - valueCodecs.put(type, codec); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBCheckpoint.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBCheckpoint.java deleted file mode 100644 index 6a45298cb6a..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBCheckpoint.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.nio.file.Path; - -/** - * Generic DB Checkpoint interface. - */ -public interface DBCheckpoint { - - /** - * Get Snapshot location. - */ - Path getCheckpointLocation(); - - /** - * Get Snapshot creation timestamp. - */ - long getCheckpointTimestamp(); - - /** - * Get last sequence number of Snapshot. - */ - long getLatestSequenceNumber(); - - /** - * Time taken in milliseconds for the checkpoint to be created. - */ - long checkpointCreationTimeTaken(); - - /** - * Destroy the contents of the specified checkpoint to ensure - * proper cleanup of the footprint on disk. - * - * @throws IOException if I/O error happens - */ - void cleanupCheckpoint() throws IOException; - - /** - * Set the OM Ratis snapshot index corresponding to the OM DB checkpoint. - * The snapshot index is the latest snapshot index saved by ratis - * snapshots. It is not guaranteed to be the last ratis index applied to - * the OM DB state. - * @param omRatisSnapshotIndex the saved ratis snapshot index - */ - void setRatisSnapshotIndex(long omRatisSnapshotIndex); - - /** - * Get the OM Ratis snapshot index corresponding to the OM DB checkpoint. - * The ratis snapshot index indicates upto which index is definitely - * included in the DB checkpoint. It is not guaranteed to be the last ratis - * log index applied to the DB checkpoint. - */ - long getRatisSnapshotIndex(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java deleted file mode 100644 index 43754255eab..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import com.google.common.base.Preconditions; -import org.eclipse.jetty.util.StringUtil; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.DBOptions; -import org.rocksdb.Env; -import org.rocksdb.OptionsUtil; -import org.rocksdb.RocksDBException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.List; - -/** - * A Class that controls the standard config options of RocksDB. - *

- * Important : Some of the functions in this file are magic functions designed - * for the use of OZONE developers only. Due to that this information is - * documented in this files only and is *not* intended for end user consumption. - * Please do not use this information to tune your production environments. - * Please remember the SpiderMan principal; with great power comes great - * responsibility. - */ -public final class DBConfigFromFile { - private static final Logger LOG = - LoggerFactory.getLogger(DBConfigFromFile.class); - - public static final String CONFIG_DIR = "HADOOP_CONF_DIR"; - - private DBConfigFromFile() { - } - - public static File getConfigLocation() throws IOException { - String path = System.getenv(CONFIG_DIR); - - // Make testing easy. - // If there is No Env. defined, let us try to read the JVM property - if (StringUtil.isBlank(path)) { - path = System.getProperty(CONFIG_DIR); - } - - if (StringUtil.isBlank(path)) { - LOG.debug("Unable to find the configuration directory. " - + "Please make sure that HADOOP_CONF_DIR is setup correctly."); - } - if(StringUtil.isBlank(path)){ - return null; - } - return new File(path); - - } - - /** - * This class establishes a magic pattern where we look for DBFile.ini as the - * options for RocksDB. - * - * @param dbFileName - The DBFile Name. For example, OzoneManager.db - * @return Name of the DB File options - */ - public static String getOptionsFileNameFromDB(String dbFileName) { - Preconditions.checkNotNull(dbFileName); - return dbFileName + ".ini"; - } - - /** - * One of the Magic functions designed for the use of Ozone Developers *ONLY*. - * This function takes the name of DB file and looks up the a .ini file that - * follows the ROCKSDB config format and uses that file for DBOptions and - * Column family Options. The Format for this file is specified by RockDB. - *

- * Here is a sample config from RocksDB sample Repo. - *

- * https://github.com/facebook/rocksdb/blob/master/examples - * /rocksdb_option_file_example.ini - *

- * We look for a specific pattern, say OzoneManager.db will have its configs - * specified in OzoneManager.db.ini. This option is used only by the - * performance testing group to allow tuning of all parameters freely. - *

- * For the end users we offer a set of Predefined options that is easy to use - * and the user does not need to become an expert in RockDB config. - *

- * This code assumes the .ini file is placed in the same directory as normal - * config files. That is in $HADOOP_DIR/etc/hadoop. For example, if we want to - * control OzoneManager.db configs from a file, we need to create a file - * called OzoneManager.db.ini and place that file in $HADOOP_DIR/etc/hadoop. - * - * @param dbFileName - The DB File Name, for example, OzoneManager.db. - * @param cfDescs - ColumnFamily Handles. - * @return DBOptions, Options to be used for opening/creating the DB. - * @throws IOException - */ - public static DBOptions readFromFile(String dbFileName, - List cfDescs) throws IOException { - Preconditions.checkNotNull(dbFileName); - Preconditions.checkNotNull(cfDescs); - Preconditions.checkArgument(cfDescs.size() > 0); - - //TODO: Add Documentation on how to support RocksDB Mem Env. - Env env = Env.getDefault(); - DBOptions options = null; - File configLocation = getConfigLocation(); - if(configLocation != null && - StringUtil.isNotBlank(configLocation.toString())){ - Path optionsFile = Paths.get(configLocation.toString(), - getOptionsFileNameFromDB(dbFileName)); - - if (optionsFile.toFile().exists()) { - options = new DBOptions(); - try { - OptionsUtil.loadOptionsFromFile(optionsFile.toString(), - env, options, cfDescs, true); - - } catch (RocksDBException rdEx) { - RDBTable.toIOException("Unable to find/open Options file.", rdEx); - } - } - } - return options; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java deleted file mode 100644 index 57516fd89a4..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBProfile.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import org.apache.hadoop.conf.StorageUnit; -import org.rocksdb.BlockBasedTableConfig; -import org.rocksdb.BloomFilter; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.CompactionStyle; -import org.rocksdb.DBOptions; - -import java.math.BigDecimal; - -/** - * User visible configs based RocksDB tuning page. Documentation for Options. - *

- * https://github.com/facebook/rocksdb/blob/master/include/rocksdb/options.h - *

- * Most tuning parameters are based on this URL. - *

- * https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning - */ -public enum DBProfile { - //TODO : Add more profiles like TEST etc. - SSD { - @Override - public String toString() { - return "DBProfile.SSD"; - } - - @Override - public ColumnFamilyOptions getColumnFamilyOptions() { - - // Set BlockCacheSize to 256 MB. This should not be an issue for HADOOP. - final long blockCacheSize = toLong(StorageUnit.MB.toBytes(256.00)); - - // Set the Default block size to 16KB - final long blockSize = toLong(StorageUnit.KB.toBytes(16)); - - // Write Buffer Size -- set to 128 MB - final long writeBufferSize = toLong(StorageUnit.MB.toBytes(128)); - - return new ColumnFamilyOptions() - .setLevelCompactionDynamicLevelBytes(true) - .setWriteBufferSize(writeBufferSize) - .setTableFormatConfig( - new BlockBasedTableConfig() - .setBlockCacheSize(blockCacheSize) - .setBlockSize(blockSize) - .setCacheIndexAndFilterBlocks(true) - .setPinL0FilterAndIndexBlocksInCache(true) - .setFilter(new BloomFilter())); - } - - @Override - public DBOptions getDBOptions() { - final int maxBackgroundCompactions = 4; - final int maxBackgroundFlushes = 2; - final long bytesPerSync = toLong(StorageUnit.MB.toBytes(1.00)); - final boolean createIfMissing = true; - final boolean createMissingColumnFamilies = true; - return new DBOptions() - .setIncreaseParallelism(Runtime.getRuntime().availableProcessors()) - .setMaxBackgroundCompactions(maxBackgroundCompactions) - .setMaxBackgroundFlushes(maxBackgroundFlushes) - .setBytesPerSync(bytesPerSync) - .setCreateIfMissing(createIfMissing) - .setCreateMissingColumnFamilies(createMissingColumnFamilies); - } - - - }, - DISK { - @Override - public String toString() { - return "DBProfile.DISK"; - } - - @Override - public DBOptions getDBOptions() { - final long readAheadSize = toLong(StorageUnit.MB.toBytes(4.00)); - return SSD.getDBOptions().setCompactionReadaheadSize(readAheadSize); - } - - @Override - public ColumnFamilyOptions getColumnFamilyOptions() { - ColumnFamilyOptions columnFamilyOptions = SSD.getColumnFamilyOptions(); - columnFamilyOptions.setCompactionStyle(CompactionStyle.LEVEL); - return columnFamilyOptions; - } - - - }; - - private static long toLong(double value) { - BigDecimal temp = new BigDecimal(value); - return temp.longValue(); - } - - public abstract DBOptions getDBOptions(); - - public abstract ColumnFamilyOptions getColumnFamilyOptions(); -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java deleted file mode 100644 index b3f58384203..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Map; - -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl; - -/** - * The DBStore interface provides the ability to create Tables, which store - * a specific type of Key-Value pair. Some DB interfaces like LevelDB will not - * be able to do this. In those case a Table creation will map to a default - * store. - * - */ -@InterfaceStability.Evolving -public interface DBStore extends AutoCloseable { - - /** - * Gets an existing TableStore. - * - * @param name - Name of the TableStore to get - * @return - TableStore. - * @throws IOException on Failure - */ - Table getTable(String name) throws IOException; - - - /** - * Gets an existing TableStore with implicit key/value conversion and - * with default cleanup policy for cache. Default cache clean up policy is - * manual. - * - * @param name - Name of the TableStore to get - * @param keyType - * @param valueType - * @return - TableStore. - * @throws IOException on Failure - */ - Table getTable(String name, - Class keyType, Class valueType) throws IOException; - - /** - * Gets an existing TableStore with implicit key/value conversion and - * with specified cleanup policy for cache. - * @throws IOException - */ - Table getTable(String name, - Class keyType, Class valueType, - TableCacheImpl.CacheCleanupPolicy cleanupPolicy) throws IOException; - - /** - * Lists the Known list of Tables in a DB. - * - * @return List of Tables, in case of Rocks DB and LevelDB we will return at - * least one entry called DEFAULT. - * @throws IOException on Failure - */ - ArrayList listTables() throws IOException; - - /** - * Flush the DB buffer onto persistent storage. - * @throws IOException - */ - void flush() throws IOException; - - /** - * Compact the entire database. - * - * @throws IOException on Failure - */ - void compactDB() throws IOException; - - /** - * Moves a key from the Source Table to the destination Table. - * - * @param key - Key to move. - * @param source - Source Table. - * @param dest - Destination Table. - * @throws IOException on Failure - */ - void move(KEY key, Table source, - Table dest) throws IOException; - - /** - * Moves a key from the Source Table to the destination Table and updates the - * destination to the new value. - * - * @param key - Key to move. - * @param value - new value to write to the destination table. - * @param source - Source Table. - * @param dest - Destination Table. - * @throws IOException on Failure - */ - void move(KEY key, VALUE value, Table source, - Table dest) - throws IOException; - - /** - * Moves a key from the Source Table to the destination Table and updates the - * destination with the new key name and value. - * This is similar to deleting an entry in one table and adding an entry in - * another table, here it is done atomically. - * - * @param sourceKey - Key to move. - * @param destKey - Destination key name. - * @param value - new value to write to the destination table. - * @param source - Source Table. - * @param dest - Destination Table. - * @throws IOException on Failure - */ - void move(KEY sourceKey, KEY destKey, VALUE value, - Table source, Table dest) - throws IOException; - - /** - * Returns an estimated count of keys in this DB. - * - * @return long, estimate of keys in the DB. - */ - long getEstimatedKeyCount() throws IOException; - - /** - * Initialize an atomic batch operation which can hold multiple PUT/DELETE - * operations and committed later in one step. - * - * @return BatchOperation holder which can be used to add or commit batch - * operations. - */ - BatchOperation initBatchOperation(); - - /** - * Commit the batch operations. - * - * @param operation which contains all the required batch operation. - * @throws IOException on Failure. - */ - void commitBatchOperation(BatchOperation operation) throws IOException; - - /** - * Get current snapshot of OM DB store as an artifact stored on - * the local filesystem. - * @return An object that encapsulates the checkpoint information along with - * location. - */ - DBCheckpoint getCheckpoint(boolean flush) throws IOException; - - /** - * Get DB Store location. - * @return DB file location. - */ - File getDbLocation(); - - /** - * Get List of Index to Table Names. - * (For decoding table from column family index) - * @return Map of Index -> TableName - */ - Map getTableNames(); - - /** - * Get Codec registry. - * @return codec registry. - */ - CodecRegistry getCodecRegistry(); - - /** - * Get data written to DB since a specific sequence number. - * @param sequenceNumber - * @return - * @throws SequenceNumberNotFoundException - */ - DBUpdatesWrapper getUpdatesSince(long sequenceNumber) - throws SequenceNumberNotFoundException; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java deleted file mode 100644 index 263864fede8..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ /dev/null @@ -1,243 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.DFSUtil; -import org.eclipse.jetty.util.StringUtil; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.DBOptions; -import org.rocksdb.InfoLogLevel; -import org.rocksdb.RocksDB; -import org.rocksdb.Statistics; -import org.rocksdb.StatsLevel; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Set; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_DB_PROFILE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF; - -/** - * DBStore Builder. - */ -public final class DBStoreBuilder { - private static final Logger LOG = - LoggerFactory.getLogger(DBStoreBuilder.class); - public static final Logger ROCKS_DB_LOGGER = - LoggerFactory.getLogger(RocksDB.class); - private Set tables; - private DBProfile dbProfile; - private DBOptions rocksDBOption; - private String dbname; - private Path dbPath; - private List tableNames; - private Configuration configuration; - private CodecRegistry registry; - private String rocksDbStat; - private RocksDBConfiguration rocksDBConfiguration; - - private DBStoreBuilder(OzoneConfiguration configuration) { - tables = new HashSet<>(); - tableNames = new LinkedList<>(); - this.configuration = configuration; - this.registry = new CodecRegistry(); - this.rocksDbStat = configuration.getTrimmed( - OZONE_METADATA_STORE_ROCKSDB_STATISTICS, - OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT); - this.rocksDBConfiguration = - configuration.getObject(RocksDBConfiguration.class); - } - - public static DBStoreBuilder newBuilder(OzoneConfiguration configuration) { - return new DBStoreBuilder(configuration); - } - - public DBStoreBuilder setProfile(DBProfile profile) { - dbProfile = profile; - return this; - } - - public DBStoreBuilder setName(String name) { - dbname = name; - return this; - } - - public DBStoreBuilder addTable(String tableName) { - tableNames.add(tableName); - return this; - } - - public DBStoreBuilder addCodec(Class type, Codec codec) { - registry.addCodec(type, codec); - return this; - } - - public DBStoreBuilder addTable(String tableName, ColumnFamilyOptions option) - throws IOException { - TableConfig tableConfig = new TableConfig(tableName, option); - if (!tables.add(tableConfig)) { - String message = "Unable to add the table: " + tableName + - ". Please check if this table name is already in use."; - LOG.error(message); - throw new IOException(message); - } - LOG.info("using custom profile for table: {}", tableName); - return this; - } - - public DBStoreBuilder setDBOption(DBOptions option) { - rocksDBOption = option; - return this; - } - - public DBStoreBuilder setPath(Path path) { - Preconditions.checkNotNull(path); - dbPath = path; - return this; - } - - /** - * Builds a DBStore instance and returns that. - * - * @return DBStore - */ - public DBStore build() throws IOException { - if(StringUtil.isBlank(dbname) || (dbPath == null)) { - LOG.error("Required Parameter missing."); - throw new IOException("Required parameter is missing. Please make sure " - + "sure Path and DB name is provided."); - } - processDBProfile(); - processTables(); - DBOptions options = getDbProfile(); - File dbFile = getDBFile(); - if (!dbFile.getParentFile().exists()) { - throw new IOException("The DB destination directory should exist."); - } - return new RDBStore(dbFile, options, tables, registry); - } - - /** - * if the DBProfile is not set, we will default to using default from the - * config file. - */ - private void processDBProfile() { - if (dbProfile == null) { - dbProfile = this.configuration.getEnum(HDDS_DB_PROFILE, - HDDS_DEFAULT_DB_PROFILE); - } - } - - private void processTables() throws IOException { - if (tableNames.size() > 0) { - for (String name : tableNames) { - addTable(name, dbProfile.getColumnFamilyOptions()); - LOG.info("Using default column profile:{} for Table:{}", - dbProfile.toString(), name); - } - } - addTable(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY), - dbProfile.getColumnFamilyOptions()); - LOG.info("Using default column profile:{} for Table:{}", - dbProfile.toString(), - DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY)); - } - - private DBOptions getDbProfile() { - if (rocksDBOption != null) { - return rocksDBOption; - } - DBOptions option = null; - if (StringUtil.isNotBlank(dbname)) { - List columnFamilyDescriptors = new LinkedList<>(); - - for (TableConfig tc : tables) { - columnFamilyDescriptors.add(tc.getDescriptor()); - } - - if (columnFamilyDescriptors.size() > 0) { - try { - option = DBConfigFromFile.readFromFile(dbname, - columnFamilyDescriptors); - if(option != null) { - LOG.info("Using Configs from {}.ini file", dbname); - } - } catch (IOException ex) { - LOG.info("Unable to read ROCKDB config", ex); - } - } - } - - if (option == null) { - LOG.info("Using default options. {}", dbProfile.toString()); - option = dbProfile.getDBOptions(); - } - - if (rocksDBConfiguration.isRocksdbLoggingEnabled()) { - org.rocksdb.Logger logger = new org.rocksdb.Logger(option) { - @Override - protected void log(InfoLogLevel infoLogLevel, String s) { - ROCKS_DB_LOGGER.info(s); - } - }; - InfoLogLevel level = InfoLogLevel.valueOf(rocksDBConfiguration - .getRocksdbLogLevel() + "_LEVEL"); - logger.setInfoLogLevel(level); - option.setLogger(logger); - } - - if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { - Statistics statistics = new Statistics(); - statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); - option = option.setStatistics(statistics); - } - return option; - } - - private File getDBFile() throws IOException { - if (dbPath == null) { - LOG.error("DB path is required."); - throw new IOException("A Path to for DB file is needed."); - } - - if (StringUtil.isBlank(dbname)) { - LOG.error("DBName is a required."); - throw new IOException("A valid DB name is required."); - } - return Paths.get(dbPath.toString(), dbname).toFile(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBUpdatesWrapper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBUpdatesWrapper.java deleted file mode 100644 index aa48c5e83b0..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBUpdatesWrapper.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.util.ArrayList; -import java.util.List; - -/** - * Wrapper class to hold DB data read from the RocksDB log file. - */ -public class DBUpdatesWrapper { - - private List dataList = new ArrayList<>(); - private long currentSequenceNumber = -1; - - public void addWriteBatch(byte[] data, long sequenceNumber) { - dataList.add(data); - if (currentSequenceNumber < sequenceNumber) { - currentSequenceNumber = sequenceNumber; - } - } - - public List getData() { - return dataList; - } - - public void setCurrentSequenceNumber(long sequenceNumber) { - this.currentSequenceNumber = sequenceNumber; - } - - public long getCurrentSequenceNumber() { - return currentSequenceNumber; - } -} - diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java deleted file mode 100644 index e95e0f1b757..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/IntegerCodec.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; - -import com.google.common.primitives.Ints; - -/** - * Codec to convert Integer to/from byte array. - */ -public class IntegerCodec implements Codec { - @Override - public byte[] toPersistedFormat(Integer object) throws IOException { - return Ints.toByteArray(object); - } - - @Override - public Integer fromPersistedFormat(byte[] rawData) throws IOException { - return Ints.fromByteArray(rawData); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java deleted file mode 100644 index 6c95246ebea..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/LongCodec.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -import com.google.common.primitives.Longs; - - -/** - * Codec to convert Long to/from byte array. - */ -public class LongCodec implements Codec { - - @Override - public byte[] toPersistedFormat(Long object) { - if (object != null) { - return Longs.toByteArray(object); - } else { - return null; - } - } - - @Override - public Long fromPersistedFormat(byte[] rawData) { - if (rawData != null) { - return Longs.fromByteArray(rawData); - } else { - return null; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java deleted file mode 100644 index 42843b080d7..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; - -import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; -import org.rocksdb.WriteBatch; -import org.rocksdb.WriteOptions; - -/** - * Batch operation implementation for rocks db. - */ -public class RDBBatchOperation implements BatchOperation { - - private final WriteBatch writeBatch; - - public RDBBatchOperation() { - writeBatch = new WriteBatch(); - } - - public RDBBatchOperation(WriteBatch writeBatch) { - this.writeBatch = writeBatch; - } - - public void commit(RocksDB db, WriteOptions writeOptions) throws IOException { - try { - db.write(writeOptions, writeBatch); - } catch (RocksDBException e) { - throw new IOException("Unable to write the batch.", e); - } - } - - @Override - public void close() { - writeBatch.close(); - } - - public void delete(ColumnFamilyHandle handle, byte[] key) throws IOException { - try { - writeBatch.delete(handle, key); - } catch (RocksDBException e) { - throw new IOException("Can't record batch delete operation.", e); - } - } - - public void put(ColumnFamilyHandle handle, byte[] key, byte[] value) - throws IOException { - try { - writeBatch.put(handle, key, value); - } catch (RocksDBException e) { - throw new IOException("Can't record batch put operation.", e); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java deleted file mode 100644 index 42b9b77d2d8..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBCheckpointManager.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.nio.file.Path; -import java.nio.file.Paths; -import java.time.Duration; -import java.time.Instant; - -import org.apache.commons.lang3.StringUtils; -import org.rocksdb.Checkpoint; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * RocksDB Checkpoint Manager, used to create and cleanup checkpoints. - */ -public class RDBCheckpointManager { - - private final Checkpoint checkpoint; - private final RocksDB db; - public static final String RDB_CHECKPOINT_DIR_PREFIX = "rdb_checkpoint_"; - private static final Logger LOG = - LoggerFactory.getLogger(RDBCheckpointManager.class); - private String checkpointNamePrefix = ""; - - public RDBCheckpointManager(RocksDB rocksDB) { - this.db = rocksDB; - this.checkpoint = Checkpoint.create(rocksDB); - } - - /** - * Create a checkpoint manager with a prefix to be added to the - * snapshots created. - * - * @param rocksDB DB instance - * @param checkpointPrefix prefix string. - */ - public RDBCheckpointManager(RocksDB rocksDB, String checkpointPrefix) { - this.db = rocksDB; - this.checkpointNamePrefix = checkpointPrefix; - this.checkpoint = Checkpoint.create(rocksDB); - } - - /** - * Create RocksDB snapshot by saving a checkpoint to a directory. - * - * @param parentDir The directory where the checkpoint needs to be created. - * @return RocksDB specific Checkpoint information object. - */ - public RocksDBCheckpoint createCheckpoint(String parentDir) { - try { - long currentTime = System.currentTimeMillis(); - - String checkpointDir = StringUtils.EMPTY; - if (StringUtils.isNotEmpty(checkpointNamePrefix)) { - checkpointDir += checkpointNamePrefix; - } - checkpointDir += "_" + RDB_CHECKPOINT_DIR_PREFIX + currentTime; - - Path checkpointPath = Paths.get(parentDir, checkpointDir); - Instant start = Instant.now(); - checkpoint.createCheckpoint(checkpointPath.toString()); - Instant end = Instant.now(); - - long duration = Duration.between(start, end).toMillis(); - LOG.info("Created checkpoint at " + checkpointPath.toString() + " in " - + duration + " milliseconds"); - - return new RocksDBCheckpoint( - checkpointPath, - currentTime, - db.getLatestSequenceNumber(), //Best guesstimate here. Not accurate. - duration); - - } catch (RocksDBException e) { - LOG.error("Unable to create RocksDB Snapshot.", e); - } - return null; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java deleted file mode 100644 index 53bd424642a..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ /dev/null @@ -1,381 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_CHECKPOINTS_DIR_NAME; - -import javax.management.ObjectName; -import java.io.File; -import java.io.IOException; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Hashtable; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.utils.RocksDBStoreMBean; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.metrics2.util.MBeans; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl; -import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.DBOptions; -import org.rocksdb.FlushOptions; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; -import org.rocksdb.TransactionLogIterator; -import org.rocksdb.WriteOptions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * RocksDB Store that supports creating Tables in DB. - */ -public class RDBStore implements DBStore { - private static final Logger LOG = - LoggerFactory.getLogger(RDBStore.class); - private RocksDB db; - private File dbLocation; - private final WriteOptions writeOptions; - private final DBOptions dbOptions; - private final CodecRegistry codecRegistry; - private final Hashtable handleTable; - private ObjectName statMBeanName; - private RDBCheckpointManager checkPointManager; - private String checkpointsParentDir; - private List columnFamilyHandles; - - @VisibleForTesting - public RDBStore(File dbFile, DBOptions options, - Set families) throws IOException { - this(dbFile, options, families, new CodecRegistry()); - } - - public RDBStore(File dbFile, DBOptions options, Set families, - CodecRegistry registry) - throws IOException { - Preconditions.checkNotNull(dbFile, "DB file location cannot be null"); - Preconditions.checkNotNull(families); - Preconditions.checkArgument(families.size() > 0); - handleTable = new Hashtable<>(); - codecRegistry = registry; - final List columnFamilyDescriptors = - new ArrayList<>(); - columnFamilyHandles = new ArrayList<>(); - - for (TableConfig family : families) { - columnFamilyDescriptors.add(family.getDescriptor()); - } - - dbOptions = options; - dbLocation = dbFile; - // TODO: Read from the next Config. - writeOptions = new WriteOptions(); - - try { - db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath(), - columnFamilyDescriptors, columnFamilyHandles); - - for (int x = 0; x < columnFamilyHandles.size(); x++) { - handleTable.put( - DFSUtil.bytes2String(columnFamilyHandles.get(x).getName()), - columnFamilyHandles.get(x)); - } - - if (dbOptions.statistics() != null) { - Map jmxProperties = new HashMap<>(); - jmxProperties.put("dbName", dbFile.getName()); - statMBeanName = HddsUtils.registerWithJmxProperties( - "Ozone", "RocksDbStore", jmxProperties, - RocksDBStoreMBean.create(dbOptions.statistics(), - dbFile.getName())); - if (statMBeanName == null) { - LOG.warn("jmx registration failed during RocksDB init, db path :{}", - dbFile.getAbsolutePath()); - } - } - - //create checkpoints directory if not exists. - checkpointsParentDir = Paths.get(dbLocation.getParent(), - OM_DB_CHECKPOINTS_DIR_NAME).toString(); - File checkpointsDir = new File(checkpointsParentDir); - if (!checkpointsDir.exists()) { - boolean success = checkpointsDir.mkdir(); - if (!success) { - LOG.warn("Unable to create RocksDB checkpoint directory"); - } - } - - //Initialize checkpoint manager - checkPointManager = new RDBCheckpointManager(db, "om"); - - } catch (RocksDBException e) { - throw toIOException( - "Failed init RocksDB, db path : " + dbFile.getAbsolutePath(), e); - } - - if (LOG.isDebugEnabled()) { - LOG.debug("RocksDB successfully opened."); - LOG.debug("[Option] dbLocation= {}", dbLocation.getAbsolutePath()); - LOG.debug("[Option] createIfMissing = {}", options.createIfMissing()); - LOG.debug("[Option] maxOpenFiles= {}", options.maxOpenFiles()); - } - } - - public static IOException toIOException(String msg, RocksDBException e) { - String statusCode = e.getStatus() == null ? "N/A" : - e.getStatus().getCodeString(); - String errMessage = e.getMessage() == null ? "Unknown error" : - e.getMessage(); - String output = msg + "; status : " + statusCode - + "; message : " + errMessage; - return new IOException(output, e); - } - - @Override - public void compactDB() throws IOException { - if (db != null) { - try { - db.compactRange(); - } catch (RocksDBException e) { - throw toIOException("Failed to compact db", e); - } - } - } - - @Override - public void close() throws IOException { - - for (final ColumnFamilyHandle handle : handleTable.values()) { - handle.close(); - } - - if (statMBeanName != null) { - MBeans.unregister(statMBeanName); - statMBeanName = null; - } - - if (db != null) { - db.close(); - } - - if (dbOptions != null) { - dbOptions.close(); - } - - if (writeOptions != null) { - writeOptions.close(); - } - } - - @Override - public void move(KEY key, Table source, - Table dest) throws IOException { - try (BatchOperation batchOperation = initBatchOperation()) { - - VALUE value = source.get(key); - dest.putWithBatch(batchOperation, key, value); - source.deleteWithBatch(batchOperation, key); - commitBatchOperation(batchOperation); - } - } - - @Override - public void move(KEY key, VALUE value, Table source, - Table dest) throws IOException { - move(key, key, value, source, dest); - } - - @Override - public void move(KEY sourceKey, KEY destKey, VALUE value, - Table source, - Table dest) throws IOException { - try (BatchOperation batchOperation = initBatchOperation()) { - dest.putWithBatch(batchOperation, destKey, value); - source.deleteWithBatch(batchOperation, sourceKey); - commitBatchOperation(batchOperation); - } - } - - @Override - public long getEstimatedKeyCount() throws IOException { - try { - return db.getLongProperty("rocksdb.estimate-num-keys"); - } catch (RocksDBException e) { - throw toIOException("Unable to get the estimated count.", e); - } - } - - @Override - public BatchOperation initBatchOperation() { - return new RDBBatchOperation(); - } - - @Override - public void commitBatchOperation(BatchOperation operation) - throws IOException { - ((RDBBatchOperation) operation).commit(db, writeOptions); - } - - - @VisibleForTesting - protected ObjectName getStatMBeanName() { - return statMBeanName; - } - - @Override - public Table getTable(String name) throws IOException { - ColumnFamilyHandle handle = handleTable.get(name); - if (handle == null) { - throw new IOException("No such table in this DB. TableName : " + name); - } - return new RDBTable(this.db, handle, this.writeOptions); - } - - @Override - public Table getTable(String name, - Class keyType, Class valueType) throws IOException { - return new TypedTable(getTable(name), codecRegistry, keyType, - valueType); - } - - @Override - public Table getTable(String name, - Class keyType, Class valueType, - TableCacheImpl.CacheCleanupPolicy cleanupPolicy) throws IOException { - return new TypedTable(getTable(name), codecRegistry, keyType, - valueType, cleanupPolicy); - } - - @Override - public ArrayList

listTables() throws IOException { - ArrayList
returnList = new ArrayList<>(); - for (ColumnFamilyHandle handle : handleTable.values()) { - returnList.add(new RDBTable(db, handle, writeOptions)); - } - return returnList; - } - - @Override - public void flush() throws IOException { - final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(true); - try { - db.flush(flushOptions); - } catch (RocksDBException e) { - LOG.error("Unable to Flush RocksDB data", e); - throw toIOException("Unable to Flush RocksDB data", e); - } - } - - @Override - public DBCheckpoint getCheckpoint(boolean flush) { - final FlushOptions flushOptions = new FlushOptions().setWaitForFlush(flush); - try { - db.flush(flushOptions); - } catch (RocksDBException e) { - LOG.error("Unable to Flush RocksDB data before creating snapshot", e); - } - return checkPointManager.createCheckpoint(checkpointsParentDir); - } - - @Override - public File getDbLocation() { - return dbLocation; - } - - @Override - public Map getTableNames() { - Map tableNames = new HashMap<>(); - StringCodec stringCodec = new StringCodec(); - - for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandles) { - try { - tableNames.put(columnFamilyHandle.getID(), stringCodec - .fromPersistedFormat(columnFamilyHandle.getName())); - } catch (RocksDBException | IOException e) { - LOG.error("Unexpected exception while reading column family handle " + - "name", e); - } - } - return tableNames; - } - - @Override - public CodecRegistry getCodecRegistry() { - return codecRegistry; - } - - @Override - public DBUpdatesWrapper getUpdatesSince(long sequenceNumber) - throws SequenceNumberNotFoundException { - - DBUpdatesWrapper dbUpdatesWrapper = new DBUpdatesWrapper(); - try { - TransactionLogIterator transactionLogIterator = - db.getUpdatesSince(sequenceNumber); - - // Only the first record needs to be checked if its seq number < - // ( 1 + passed_in_sequence_number). For example, if seqNumber passed - // in is 100, then we can read from the WAL ONLY if the first sequence - // number is <= 101. If it is 102, then 101 may already be flushed to - // SST. If it 99, we can skip 99 and 100, and then read from 101. - - boolean checkValidStartingSeqNumber = true; - - while (transactionLogIterator.isValid()) { - TransactionLogIterator.BatchResult result = - transactionLogIterator.getBatch(); - long currSequenceNumber = result.sequenceNumber(); - if (checkValidStartingSeqNumber && - currSequenceNumber > 1 + sequenceNumber) { - throw new SequenceNumberNotFoundException("Unable to read data from" + - " RocksDB wal to get delta updates. It may have already been" + - "flushed to SSTs."); - } - // If the above condition was not satisfied, then it is OK to reset - // the flag. - checkValidStartingSeqNumber = false; - if (currSequenceNumber <= sequenceNumber) { - transactionLogIterator.next(); - continue; - } - dbUpdatesWrapper.addWriteBatch(result.writeBatch().data(), - result.sequenceNumber()); - transactionLogIterator.next(); - } - } catch (RocksDBException e) { - LOG.error("Unable to get delta updates since sequenceNumber {} ", - sequenceNumber, e); - } - return dbUpdatesWrapper; - } - - @VisibleForTesting - public RocksDB getDb() { - return db; - } - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java deleted file mode 100644 index 784738b0cec..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStoreIterator.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.util.NoSuchElementException; -import java.util.function.Consumer; - -import org.rocksdb.RocksIterator; - -/** - * RocksDB store iterator. - */ -public class RDBStoreIterator - implements TableIterator { - - private RocksIterator rocksDBIterator; - - public RDBStoreIterator(RocksIterator iterator) { - this.rocksDBIterator = iterator; - rocksDBIterator.seekToFirst(); - } - - @Override - public void forEachRemaining( - Consumer action) { - while (hasNext()) { - action.accept(next()); - } - } - - @Override - public boolean hasNext() { - return rocksDBIterator.isValid(); - } - - @Override - public ByteArrayKeyValue next() { - if (rocksDBIterator.isValid()) { - ByteArrayKeyValue value = - ByteArrayKeyValue.create(rocksDBIterator.key(), rocksDBIterator - .value()); - rocksDBIterator.next(); - return value; - } - throw new NoSuchElementException("RocksDB Store has no more elements"); - } - - @Override - public void seekToFirst() { - rocksDBIterator.seekToFirst(); - } - - @Override - public void seekToLast() { - rocksDBIterator.seekToLast(); - } - - @Override - public ByteArrayKeyValue seek(byte[] key) { - rocksDBIterator.seek(key); - if (rocksDBIterator.isValid()) { - return ByteArrayKeyValue.create(rocksDBIterator.key(), - rocksDBIterator.value()); - } - return null; - } - - @Override - public byte[] key() { - if (rocksDBIterator.isValid()) { - return rocksDBIterator.key(); - } - return null; - } - - @Override - public ByteArrayKeyValue value() { - if (rocksDBIterator.isValid()) { - return ByteArrayKeyValue.create(rocksDBIterator.key(), - rocksDBIterator.value()); - } - return null; - } - - @Override - public void close() throws IOException { - rocksDBIterator.close(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java deleted file mode 100644 index 49ccc020922..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.DFSUtil; - -import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.ReadOptions; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; -import org.rocksdb.WriteOptions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * RocksDB implementation of ozone metadata store. This class should be only - * used as part of TypedTable as it's underlying implementation to access the - * metadata store content. All other user's using Table should use TypedTable. - */ -@InterfaceAudience.Private -class RDBTable implements Table { - - - private static final Logger LOG = - LoggerFactory.getLogger(RDBTable.class); - - private final RocksDB db; - private final ColumnFamilyHandle handle; - private final WriteOptions writeOptions; - - /** - * Constructs a TableStore. - * - * @param db - DBstore that we are using. - * @param handle - ColumnFamily Handle. - * @param writeOptions - RocksDB write Options. - */ - RDBTable(RocksDB db, ColumnFamilyHandle handle, - WriteOptions writeOptions) { - this.db = db; - this.handle = handle; - this.writeOptions = writeOptions; - } - - /** - * Converts RocksDB exception to IOE. - * @param msg - Message to add to exception. - * @param e - Original Exception. - * @return IOE. - */ - public static IOException toIOException(String msg, RocksDBException e) { - String statusCode = e.getStatus() == null ? "N/A" : - e.getStatus().getCodeString(); - String errMessage = e.getMessage() == null ? "Unknown error" : - e.getMessage(); - String output = msg + "; status : " + statusCode - + "; message : " + errMessage; - return new IOException(output, e); - } - - /** - * Returns the Column family Handle. - * - * @return ColumnFamilyHandle. - */ - public ColumnFamilyHandle getHandle() { - return handle; - } - - @Override - public void put(byte[] key, byte[] value) throws IOException { - try { - db.put(handle, writeOptions, key, value); - } catch (RocksDBException e) { - LOG.error("Failed to write to DB. Key: {}", new String(key, - StandardCharsets.UTF_8)); - throw toIOException("Failed to put key-value to metadata " - + "store", e); - } - } - - @Override - public void putWithBatch(BatchOperation batch, byte[] key, byte[] value) - throws IOException { - if (batch instanceof RDBBatchOperation) { - ((RDBBatchOperation) batch).put(getHandle(), key, value); - } else { - throw new IllegalArgumentException("batch should be RDBBatchOperation"); - } - } - - - @Override - public boolean isEmpty() throws IOException { - try (TableIterator keyIter = iterator()) { - keyIter.seekToFirst(); - return !keyIter.hasNext(); - } - } - - @Override - public boolean isExist(byte[] key) throws IOException { - try { - // RocksDB#keyMayExist - // If the key definitely does not exist in the database, then this - // method returns false, else true. - return db.keyMayExist(handle, key, new StringBuilder()) - && db.get(handle, key) != null; - } catch (RocksDBException e) { - throw toIOException( - "Error in accessing DB. ", e); - } - } - - @Override - public byte[] get(byte[] key) throws IOException { - try { - return db.get(handle, key); - } catch (RocksDBException e) { - throw toIOException( - "Failed to get the value for the given key", e); - } - } - - @Override - public void delete(byte[] key) throws IOException { - try { - db.delete(handle, key); - } catch (RocksDBException e) { - throw toIOException("Failed to delete the given key", e); - } - } - - @Override - public void deleteWithBatch(BatchOperation batch, byte[] key) - throws IOException { - if (batch instanceof RDBBatchOperation) { - ((RDBBatchOperation) batch).delete(getHandle(), key); - } else { - throw new IllegalArgumentException("batch should be RDBBatchOperation"); - } - - } - - @Override - public TableIterator iterator() { - ReadOptions readOptions = new ReadOptions(); - readOptions.setFillCache(false); - return new RDBStoreIterator(db.newIterator(handle, readOptions)); - } - - @Override - public String getName() throws IOException { - try { - return DFSUtil.bytes2String(this.getHandle().getName()); - } catch (RocksDBException rdbEx) { - throw toIOException("Unable to get the table name.", rdbEx); - } - } - - @Override - public void close() throws Exception { - // Nothing do for a Column Family. - } - - @Override - public long getEstimatedKeyCount() throws IOException { - try { - return db.getLongProperty(handle, "rocksdb.estimate-num-keys"); - } catch (RocksDBException e) { - throw toIOException( - "Failed to get estimated key count of table " + getName(), e); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java deleted file mode 100644 index 149743816c2..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBCheckpoint.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.nio.file.Path; - -import org.apache.commons.io.FileUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class to hold information and location of a RocksDB Checkpoint. - */ -public class RocksDBCheckpoint implements DBCheckpoint { - - private static final Logger LOG = - LoggerFactory.getLogger(RocksDBCheckpoint.class); - - private Path checkpointLocation; - private long checkpointTimestamp = System.currentTimeMillis(); - private long latestSequenceNumber = -1; - private long checkpointCreationTimeTaken = 0L; - private long ratisSnapshotIndex = 0L; - - public RocksDBCheckpoint(Path checkpointLocation) { - this.checkpointLocation = checkpointLocation; - } - - public RocksDBCheckpoint(Path checkpointLocation, - long snapshotTimestamp, - long latestSequenceNumber, - long checkpointCreationTimeTaken) { - this.checkpointLocation = checkpointLocation; - this.checkpointTimestamp = snapshotTimestamp; - this.latestSequenceNumber = latestSequenceNumber; - this.checkpointCreationTimeTaken = checkpointCreationTimeTaken; - } - - @Override - public Path getCheckpointLocation() { - return this.checkpointLocation; - } - - @Override - public long getCheckpointTimestamp() { - return this.checkpointTimestamp; - } - - @Override - public long getLatestSequenceNumber() { - return this.latestSequenceNumber; - } - - @Override - public long checkpointCreationTimeTaken() { - return checkpointCreationTimeTaken; - } - - @Override - public void cleanupCheckpoint() throws IOException { - LOG.info("Cleaning up RocksDB checkpoint at " + - checkpointLocation.toString()); - FileUtils.deleteDirectory(checkpointLocation.toFile()); - } - - @Override - public void setRatisSnapshotIndex(long omRatisSnapshotIndex) { - this.ratisSnapshotIndex = omRatisSnapshotIndex; - } - - @Override - public long getRatisSnapshotIndex() { - return ratisSnapshotIndex; - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java deleted file mode 100644 index 1a8c846a3eb..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db; - -import org.apache.hadoop.hdds.conf.Config; -import org.apache.hadoop.hdds.conf.ConfigGroup; -import org.apache.hadoop.hdds.conf.ConfigTag; -import org.apache.hadoop.hdds.conf.ConfigType; - -/** - * Holds configuration items for OM RocksDB. - */ -@ConfigGroup(prefix = "hadoop.hdds.db") -public class RocksDBConfiguration { - - private boolean rocksdbLogEnabled; - - @Config(key = "rocksdb.logging.enabled", - type = ConfigType.BOOLEAN, - defaultValue = "false", - tags = {ConfigTag.OM}, - description = "Enable/Disable RocksDB logging for OM.") - public void setRocksdbLoggingEnabled(boolean enabled) { - this.rocksdbLogEnabled = enabled; - } - - public boolean isRocksdbLoggingEnabled() { - return rocksdbLogEnabled; - } - - private String rocksdbLogLevel; - - @Config(key = "rocksdb.logging.level", - type = ConfigType.STRING, - defaultValue = "INFO", - tags = {ConfigTag.OM}, - description = "OM RocksDB logging level (INFO/DEBUG/WARN/ERROR/FATAL)") - public void setRocksdbLogLevel(String level) { - this.rocksdbLogLevel = level; - } - - public String getRocksdbLogLevel() { - return rocksdbLogLevel; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/SequenceNumberNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/SequenceNumberNotFoundException.java deleted file mode 100644 index e9b4fa391ec..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/SequenceNumberNotFoundException.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; - -/** - * Thrown if RocksDB is unable to find requested data from WAL file. - */ -public class SequenceNumberNotFoundException extends IOException { - - public SequenceNumberNotFoundException() { - super(); - } - - public SequenceNumberNotFoundException(String message) { - super(message); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodec.java deleted file mode 100644 index f8237367c72..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodec.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import org.apache.hadoop.hdfs.DFSUtil; - -/** - * Codec to convert String to/from byte array. - */ -public class StringCodec implements Codec { - - @Override - public byte[] toPersistedFormat(String object) throws IOException { - if (object != null) { - return DFSUtil.string2Bytes(object); - } else { - return null; - } - } - - @Override - public String fromPersistedFormat(byte[] rawData) throws IOException { - if (rawData != null) { - return DFSUtil.bytes2String(rawData); - } else { - return null; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java deleted file mode 100644 index 0502541e9c5..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.util.Iterator; -import java.util.Map; - -import org.apache.commons.lang3.NotImplementedException; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -/** - * Interface for key-value store that stores ozone metadata. Ozone metadata is - * stored as key value pairs, both key and value are arbitrary byte arrays. Each - * Table Stores a certain kind of keys and values. This allows a DB to have - * different kind of tables. - */ -@InterfaceStability.Evolving -public interface Table extends AutoCloseable { - - /** - * Puts a key-value pair into the store. - * - * @param key metadata key - * @param value metadata value - */ - void put(KEY key, VALUE value) throws IOException; - - /** - * Puts a key-value pair into the store as part of a bath operation. - * - * @param batch the batch operation - * @param key metadata key - * @param value metadata value - */ - void putWithBatch(BatchOperation batch, KEY key, VALUE value) - throws IOException; - - /** - * @return true if the metadata store is empty. - * @throws IOException on Failure - */ - boolean isEmpty() throws IOException; - - /** - * Check if a given key exists in Metadata store. - * (Optimization to save on data deserialization) - * A lock on the key / bucket needs to be acquired before invoking this API. - * @param key metadata key - * @return true if the metadata store contains a key. - * @throws IOException on Failure - */ - boolean isExist(KEY key) throws IOException; - - /** - * Returns the value mapped to the given key in byte array or returns null - * if the key is not found. - * - * @param key metadata key - * @return value in byte array or null if the key is not found. - * @throws IOException on Failure - */ - VALUE get(KEY key) throws IOException; - - /** - * Deletes a key from the metadata store. - * - * @param key metadata key - * @throws IOException on Failure - */ - void delete(KEY key) throws IOException; - - /** - * Deletes a key from the metadata store as part of a batch operation. - * - * @param batch the batch operation - * @param key metadata key - * @throws IOException on Failure - */ - void deleteWithBatch(BatchOperation batch, KEY key) throws IOException; - - /** - * Returns the iterator for this metadata store. - * - * @return MetaStoreIterator - */ - TableIterator> iterator(); - - /** - * Returns the Name of this Table. - * @return - Table Name. - * @throws IOException on failure. - */ - String getName() throws IOException; - - /** - * Returns the key count of this Table. Note the result can be inaccurate. - * @return Estimated key count of this Table - * @throws IOException on failure - */ - long getEstimatedKeyCount() throws IOException; - - /** - * Add entry to the table cache. - * - * If the cacheKey already exists, it will override the entry. - * @param cacheKey - * @param cacheValue - */ - default void addCacheEntry(CacheKey cacheKey, - CacheValue cacheValue) { - throw new NotImplementedException("addCacheEntry is not implemented"); - } - - /** - * Get the cache value from table cache. - * @param cacheKey - */ - default CacheValue getCacheValue(CacheKey cacheKey) { - throw new NotImplementedException("getCacheValue is not implemented"); - } - - /** - * Removes all the entries from the table cache which are having epoch value - * less - * than or equal to specified epoch value. - * @param epoch - */ - default void cleanupCache(long epoch) { - throw new NotImplementedException("cleanupCache is not implemented"); - } - - /** - * Return cache iterator maintained for this table. - */ - default Iterator, CacheValue>> - cacheIterator() { - throw new NotImplementedException("cacheIterator is not implemented"); - } - - /** - * Class used to represent the key and value pair of a db entry. - */ - interface KeyValue { - - KEY getKey() throws IOException; - - VALUE getValue() throws IOException; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java deleted file mode 100644 index d8eb401659e..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdfs.DFSUtil; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.ColumnFamilyOptions; - -/** - * Class that maintains Table Configuration. - */ -public class TableConfig { - private final String name; - private final ColumnFamilyOptions columnFamilyOptions; - - - /** - * Constructs a Table Config. - * @param name - Name of the Table. - * @param columnFamilyOptions - Column Family options. - */ - public TableConfig(String name, ColumnFamilyOptions columnFamilyOptions) { - this.name = name; - this.columnFamilyOptions = columnFamilyOptions; - } - - /** - * Returns the Name for this Table. - * @return - Name String - */ - public String getName() { - return name; - } - - /** - * Returns a ColumnFamilyDescriptor for this table. - * @return ColumnFamilyDescriptor - */ - public ColumnFamilyDescriptor getDescriptor() { - return new ColumnFamilyDescriptor(DFSUtil.string2Bytes(name), - columnFamilyOptions); - } - - /** - * Returns Column family options for this Table. - * @return ColumnFamilyOptions used for the Table. - */ - public ColumnFamilyOptions getColumnFamilyOptions() { - return columnFamilyOptions; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - TableConfig that = (TableConfig) o; - return new EqualsBuilder() - .append(getName(), that.getName()) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(getName()) - .toHashCode(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java deleted file mode 100644 index a684157a43b..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableIterator.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.Closeable; -import java.io.IOException; -import java.util.Iterator; - -/** - * Iterator for MetaDataStore DB. - * - * @param - */ -public interface TableIterator extends Iterator, Closeable { - - /** - * seek to first entry. - */ - void seekToFirst(); - - /** - * seek to last entry. - */ - void seekToLast(); - - /** - * Seek to the specific key. - * - * @param key - Bytes that represent the key. - * @return VALUE. - */ - T seek(KEY key) throws IOException; - - /** - * Returns the key value at the current position. - * @return KEY - */ - KEY key() throws IOException; - - /** - * Returns the VALUE at the current position. - * @return VALUE - */ - T value(); - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java deleted file mode 100644 index 597eff1f658..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java +++ /dev/null @@ -1,361 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.util.Iterator; -import java.util.Map; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Optional; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheResult; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl; -import org.apache.hadoop.hdds.utils.db.cache.TableCache; -import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl.CacheCleanupPolicy; - -import static org.apache.hadoop.hdds.utils.db.cache.CacheResult.CacheStatus.EXISTS; -import static org.apache.hadoop.hdds.utils.db.cache.CacheResult.CacheStatus.NOT_EXIST; -/** - * Strongly typed table implementation. - *

- * Automatically converts values and keys using a raw byte[] based table - * implementation and registered converters. - * - * @param type of the keys in the store. - * @param type of the values in the store. - */ -public class TypedTable implements Table { - - private final Table rawTable; - - private final CodecRegistry codecRegistry; - - private final Class keyType; - - private final Class valueType; - - private final TableCache, CacheValue> cache; - - private final static long EPOCH_DEFAULT = -1L; - - /** - * Create an TypedTable from the raw table. - * Default cleanup policy used for the table is - * {@link CacheCleanupPolicy#MANUAL}. - * @param rawTable - * @param codecRegistry - * @param keyType - * @param valueType - */ - public TypedTable( - Table rawTable, - CodecRegistry codecRegistry, Class keyType, - Class valueType) throws IOException { - this(rawTable, codecRegistry, keyType, valueType, - CacheCleanupPolicy.MANUAL); - } - - /** - * Create an TypedTable from the raw table with specified cleanup policy - * for table cache. - * @param rawTable - * @param codecRegistry - * @param keyType - * @param valueType - * @param cleanupPolicy - */ - public TypedTable( - Table rawTable, - CodecRegistry codecRegistry, Class keyType, - Class valueType, - TableCacheImpl.CacheCleanupPolicy cleanupPolicy) throws IOException { - this.rawTable = rawTable; - this.codecRegistry = codecRegistry; - this.keyType = keyType; - this.valueType = valueType; - cache = new TableCacheImpl<>(cleanupPolicy); - - if (cleanupPolicy == CacheCleanupPolicy.NEVER) { - //fill cache - try(TableIterator> tableIterator = - iterator()) { - - while (tableIterator.hasNext()) { - KeyValue< KEY, VALUE > kv = tableIterator.next(); - - // We should build cache after OM restart when clean up policy is - // NEVER. Setting epoch value -1, so that when it is marked for - // delete, this will be considered for cleanup. - cache.loadInitial(new CacheKey<>(kv.getKey()), - new CacheValue<>(Optional.of(kv.getValue()), EPOCH_DEFAULT)); - } - } - } - } - - @Override - public void put(KEY key, VALUE value) throws IOException { - byte[] keyData = codecRegistry.asRawData(key); - byte[] valueData = codecRegistry.asRawData(value); - rawTable.put(keyData, valueData); - } - - @Override - public void putWithBatch(BatchOperation batch, KEY key, VALUE value) - throws IOException { - byte[] keyData = codecRegistry.asRawData(key); - byte[] valueData = codecRegistry.asRawData(value); - rawTable.putWithBatch(batch, keyData, valueData); - } - - @Override - public boolean isEmpty() throws IOException { - return rawTable.isEmpty(); - } - - @Override - public boolean isExist(KEY key) throws IOException { - - CacheResult> cacheResult = - cache.lookup(new CacheKey<>(key)); - - if (cacheResult.getCacheStatus() == EXISTS) { - return true; - } else if (cacheResult.getCacheStatus() == NOT_EXIST) { - return false; - } else { - return rawTable.isExist(codecRegistry.asRawData(key)); - } - } - - /** - * Returns the value mapped to the given key in byte array or returns null - * if the key is not found. - * - * Caller's of this method should use synchronization mechanism, when - * accessing. First it will check from cache, if it has entry return the - * value, otherwise get from the RocksDB table. - * - * @param key metadata key - * @return VALUE - * @throws IOException - */ - @Override - public VALUE get(KEY key) throws IOException { - // Here the metadata lock will guarantee that cache is not updated for same - // key during get key. - - CacheResult> cacheResult = - cache.lookup(new CacheKey<>(key)); - - if (cacheResult.getCacheStatus() == EXISTS) { - return cacheResult.getValue().getCacheValue(); - } else if (cacheResult.getCacheStatus() == NOT_EXIST) { - return null; - } else { - return getFromTable(key); - } - } - - private VALUE getFromTable(KEY key) throws IOException { - byte[] keyBytes = codecRegistry.asRawData(key); - byte[] valueBytes = rawTable.get(keyBytes); - return codecRegistry.asObject(valueBytes, valueType); - } - - @Override - public void delete(KEY key) throws IOException { - rawTable.delete(codecRegistry.asRawData(key)); - } - - @Override - public void deleteWithBatch(BatchOperation batch, KEY key) - throws IOException { - rawTable.deleteWithBatch(batch, codecRegistry.asRawData(key)); - - } - - @Override - public TableIterator iterator() { - TableIterator> iterator = - rawTable.iterator(); - return new TypedTableIterator(iterator, keyType, valueType); - } - - @Override - public String getName() throws IOException { - return rawTable.getName(); - } - - @Override - public long getEstimatedKeyCount() throws IOException { - return rawTable.getEstimatedKeyCount(); - } - - @Override - public void close() throws Exception { - rawTable.close(); - - } - - @Override - public void addCacheEntry(CacheKey cacheKey, - CacheValue cacheValue) { - // This will override the entry if there is already entry for this key. - cache.put(cacheKey, cacheValue); - } - - @Override - public CacheValue getCacheValue(CacheKey cacheKey) { - return cache.get(cacheKey); - } - - public Iterator, CacheValue>> cacheIterator() { - return cache.iterator(); - } - - @Override - public void cleanupCache(long epoch) { - cache.cleanup(epoch); - } - - @VisibleForTesting - TableCache, CacheValue> getCache() { - return cache; - } - - public Table getRawTable() { - return rawTable; - } - - public CodecRegistry getCodecRegistry() { - return codecRegistry; - } - - public Class getKeyType() { - return keyType; - } - - public Class getValueType() { - return valueType; - } - - /** - * Key value implementation for strongly typed tables. - */ - public class TypedKeyValue implements KeyValue { - - private KeyValue rawKeyValue; - - public TypedKeyValue(KeyValue rawKeyValue) { - this.rawKeyValue = rawKeyValue; - } - - public TypedKeyValue(KeyValue rawKeyValue, - Class keyType, Class valueType) { - this.rawKeyValue = rawKeyValue; - } - - @Override - public KEY getKey() throws IOException { - return codecRegistry.asObject(rawKeyValue.getKey(), keyType); - } - - @Override - public VALUE getValue() throws IOException { - return codecRegistry.asObject(rawKeyValue.getValue(), valueType); - } - } - - /** - * Table Iterator implementation for strongly typed tables. - */ - public class TypedTableIterator implements TableIterator { - - private TableIterator> - rawIterator; - private final Class keyClass; - private final Class valueClass; - - public TypedTableIterator( - TableIterator> rawIterator, - Class keyType, - Class valueType) { - this.rawIterator = rawIterator; - keyClass = keyType; - valueClass = valueType; - } - - @Override - public void seekToFirst() { - rawIterator.seekToFirst(); - } - - @Override - public void seekToLast() { - rawIterator.seekToLast(); - } - - @Override - public TypedKeyValue seek(KEY key) throws IOException { - byte[] keyBytes = codecRegistry.asRawData(key); - KeyValue result = rawIterator.seek(keyBytes); - if (result == null) { - return null; - } - return new TypedKeyValue(result); - } - - @Override - public KEY key() throws IOException { - byte[] result = rawIterator.key(); - if (result == null) { - return null; - } - return codecRegistry.asObject(result, keyClass); - } - - @Override - public TypedKeyValue value() { - KeyValue keyValue = rawIterator.value(); - if(keyValue != null) { - return new TypedKeyValue(keyValue, keyClass, valueClass); - } - return null; - } - - @Override - public void close() throws IOException { - rawIterator.close(); - } - - @Override - public boolean hasNext() { - return rawIterator.hasNext(); - } - - @Override - public TypedKeyValue next() { - return new TypedKeyValue(rawIterator.next(), keyType, - valueType); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java deleted file mode 100644 index 7be2921b6a1..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.cache; - -import java.util.Objects; - -/** - * CacheKey for the RocksDB table. - * @param - */ -public class CacheKey implements Comparable { - - private final KEY key; - - public CacheKey(KEY key) { - Objects.requireNonNull(key, "Key Should not be null in CacheKey"); - this.key = key; - } - - public KEY getCacheKey() { - return key; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - CacheKey cacheKey = (CacheKey) o; - return Objects.equals(key, cacheKey.key); - } - - @Override - public int hashCode() { - return Objects.hash(key); - } - - @Override - public int compareTo(Object o) { - if(Objects.equals(key, ((CacheKey)o).key)) { - return 0; - } else { - return key.toString().compareTo((((CacheKey) o).key).toString()); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheResult.java deleted file mode 100644 index 8c5a68ba072..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheResult.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.cache; - -import java.util.Objects; - -/** - * CacheResult which is returned as response for Key exist in cache or not. - * @param - */ -public class CacheResult { - - private CacheStatus cacheStatus; - private CACHEVALUE cachevalue; - - public CacheResult(CacheStatus cacheStatus, CACHEVALUE cachevalue) { - this.cacheStatus = cacheStatus; - this.cachevalue = cachevalue; - } - - public CacheStatus getCacheStatus() { - return cacheStatus; - } - - public CACHEVALUE getValue() { - return cachevalue; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - CacheResult< ? > that = (CacheResult< ? >) o; - return cacheStatus == that.cacheStatus && - Objects.equals(cachevalue, that.cachevalue); - } - - @Override - public int hashCode() { - return Objects.hash(cacheStatus, cachevalue); - } - - /** - * Status which tells whether key exists in cache or not. - */ - public enum CacheStatus { - EXISTS, // When key exists in cache. - - NOT_EXIST, // We guarantee that it does not exist. This will be returned - // when the key does not exist in cache, when cache clean up policy is - // NEVER. - MAY_EXIST // This will be returned when the key does not exist in - // cache, when cache clean up policy is MANUAL. So caller need to check - // if it might exist in it's rocksdb table. - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java deleted file mode 100644 index de9fe0d95f3..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheValue.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.cache; - -import com.google.common.base.Optional; - -/** - * CacheValue for the RocksDB Table. - * @param - */ -public class CacheValue { - - private Optional value; - // This value is used for evict entries from cache. - // This value is set with ratis transaction context log entry index. - private long epoch; - - public CacheValue(Optional value, long epoch) { - this.value = value; - this.epoch = epoch; - } - - public VALUE getCacheValue() { - return value.orNull(); - } - - public long getEpoch() { - return epoch; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java deleted file mode 100644 index 7235202b9a4..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db.cache; - -import java.util.Objects; - -/** - * Class used which describes epoch entry. This will be used during deletion - * entries from cache for partial table cache. - * @param - */ -public class EpochEntry implements Comparable { - - private long epoch; - private CACHEKEY cachekey; - - EpochEntry(long epoch, CACHEKEY cachekey) { - this.epoch = epoch; - this.cachekey = cachekey; - } - - public long getEpoch() { - return epoch; - } - - public CACHEKEY getCachekey() { - return cachekey; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - EpochEntry that = (EpochEntry) o; - return epoch == that.epoch && cachekey == that.cachekey; - } - - @Override - public int hashCode() { - return Objects.hash(epoch, cachekey); - } - - public int compareTo(Object o) { - if(this.epoch == ((EpochEntry)o).epoch) { - return 0; - } else if (this.epoch < ((EpochEntry)o).epoch) { - return -1; - } else { - return 1; - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCache.java deleted file mode 100644 index de5a07978f5..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCache.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db.cache; - -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceStability.Evolving; - -import java.util.Iterator; -import java.util.Map; - -/** - * Cache used for RocksDB tables. - * @param - * @param - */ - -@Private -@Evolving -public interface TableCache { - - /** - * Return the value for the key if it is present, otherwise return null. - * @param cacheKey - * @return CACHEVALUE - */ - CACHEVALUE get(CACHEKEY cacheKey); - - /** - * This method should be called for tables with cache cleanup policy - * {@link TableCacheImpl.CacheCleanupPolicy#NEVER} after system restart to - * fill up the cache. - * @param cacheKey - * @param cacheValue - */ - void loadInitial(CACHEKEY cacheKey, CACHEVALUE cacheValue); - - /** - * Add an entry to the cache, if the key already exists it overrides. - * @param cacheKey - * @param value - */ - void put(CACHEKEY cacheKey, CACHEVALUE value); - - /** - * Removes all the entries from the cache which are having epoch value less - * than or equal to specified epoch value. - * - * If clean up policy is NEVER, this is a do nothing operation. - * If clean up policy is MANUAL, it is caller responsibility to cleanup the - * cache before calling cleanup. - * @param epoch - */ - void cleanup(long epoch); - - /** - * Return the size of the cache. - * @return size - */ - int size(); - - /** - * Return an iterator for the cache. - * @return iterator of the underlying cache for the table. - */ - Iterator> iterator(); - - /** - * Check key exist in cache or not. - * - * If it exists return CacheResult with value and status as - * {@link CacheResult.CacheStatus#EXISTS} - * - * If it does not exist: - * If cache clean up policy is - * {@link TableCacheImpl.CacheCleanupPolicy#NEVER} it means table cache is - * full cache. It return's {@link CacheResult} with null - * and status as {@link CacheResult.CacheStatus#NOT_EXIST}. - * - * If cache clean up policy is - * {@link TableCacheImpl.CacheCleanupPolicy#MANUAL} it means - * table cache is partial cache. It return's {@link CacheResult} with - * null and status as MAY_EXIST. - * - * @param cachekey - */ - CacheResult lookup(CACHEKEY cachekey); - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java deleted file mode 100644 index 3e6999a49cf..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/TableCacheImpl.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db.cache; - -import java.util.Iterator; -import java.util.Map; -import java.util.NavigableSet; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.ConcurrentSkipListSet; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.ThreadFactory; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceStability.Evolving; - -/** - * Cache implementation for the table. Depending on the cache clean up policy - * this cache will be full cache or partial cache. - * - * If cache cleanup policy is set as {@link CacheCleanupPolicy#MANUAL}, - * this will be a partial cache. - * - * If cache cleanup policy is set as {@link CacheCleanupPolicy#NEVER}, - * this will be a full cache. - */ -@Private -@Evolving -public class TableCacheImpl implements TableCache { - - private final Map cache; - private final NavigableSet> epochEntries; - private ExecutorService executorService; - private CacheCleanupPolicy cleanupPolicy; - - - - public TableCacheImpl(CacheCleanupPolicy cleanupPolicy) { - - // As for full table cache only we need elements to be inserted in sorted - // manner, so that list will be easy. For other we can go with Hash map. - if (cleanupPolicy == CacheCleanupPolicy.NEVER) { - cache = new ConcurrentSkipListMap<>(); - } else { - cache = new ConcurrentHashMap<>(); - } - epochEntries = new ConcurrentSkipListSet<>(); - // Created a singleThreadExecutor, so one cleanup will be running at a - // time. - ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("PartialTableCache Cleanup Thread - %d").build(); - executorService = Executors.newSingleThreadExecutor(build); - this.cleanupPolicy = cleanupPolicy; - } - - @Override - public CACHEVALUE get(CACHEKEY cachekey) { - return cache.get(cachekey); - } - - @Override - public void loadInitial(CACHEKEY cacheKey, CACHEVALUE cacheValue) { - // No need to add entry to epochEntries. Adding to cache is required during - // normal put operation. - cache.put(cacheKey, cacheValue); - } - - @Override - public void put(CACHEKEY cacheKey, CACHEVALUE value) { - cache.put(cacheKey, value); - epochEntries.add(new EpochEntry<>(value.getEpoch(), cacheKey)); - } - - @Override - public void cleanup(long epoch) { - executorService.submit(() -> evictCache(epoch, cleanupPolicy)); - } - - @Override - public int size() { - return cache.size(); - } - - @Override - public Iterator> iterator() { - return cache.entrySet().iterator(); - } - - private void evictCache(long epoch, CacheCleanupPolicy cacheCleanupPolicy) { - EpochEntry currentEntry = null; - for (Iterator> iterator = epochEntries.iterator(); - iterator.hasNext();) { - currentEntry = iterator.next(); - CACHEKEY cachekey = currentEntry.getCachekey(); - CacheValue cacheValue = cache.computeIfPresent(cachekey, ((k, v) -> { - if (cleanupPolicy == CacheCleanupPolicy.MANUAL) { - if (v.getEpoch() <= epoch) { - iterator.remove(); - return null; - } - } else if (cleanupPolicy == CacheCleanupPolicy.NEVER) { - // Remove only entries which are marked for delete. - if (v.getEpoch() <= epoch && v.getCacheValue() == null) { - iterator.remove(); - return null; - } - } - return v; - })); - // If currentEntry epoch is greater than epoch, we have deleted all - // entries less than specified epoch. So, we can break. - if (cacheValue != null && cacheValue.getEpoch() >= epoch) { - break; - } - } - } - - public CacheResult lookup(CACHEKEY cachekey) { - - CACHEVALUE cachevalue = cache.get(cachekey); - if (cachevalue == null) { - if (cleanupPolicy == CacheCleanupPolicy.NEVER) { - return new CacheResult<>(CacheResult.CacheStatus.NOT_EXIST, null); - } else { - return new CacheResult<>(CacheResult.CacheStatus.MAY_EXIST, - null); - } - } else { - if (cachevalue.getCacheValue() != null) { - return new CacheResult<>(CacheResult.CacheStatus.EXISTS, cachevalue); - } else { - // When entity is marked for delete, cacheValue will be set to null. - // In that case we can return NOT_EXIST irrespective of cache cleanup - // policy. - return new CacheResult<>(CacheResult.CacheStatus.NOT_EXIST, null); - } - } - } - - /** - * Cleanup policies for table cache. - */ - public enum CacheCleanupPolicy { - NEVER, // Cache will not be cleaned up. This mean's the table maintains - // full cache. - MANUAL // Cache will be cleaned up, once after flushing to DB. It is - // caller's responsibility to flush to DB, before calling cleanup cache. - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java deleted file mode 100644 index eb9c5b9da8f..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils.db.cache; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java deleted file mode 100644 index 8b56bffa777..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Database interfaces for Ozone. - */ -package org.apache.hadoop.hdds.utils.db; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/package-info.java deleted file mode 100644 index 4576dc82a8e..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java deleted file mode 100644 index 3f7d0b915d5..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ /dev/null @@ -1,464 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; - -import org.apache.ratis.proto.RaftProtos.ReplicationLevel; -import org.apache.ratis.util.TimeDuration; - -import java.util.concurrent.TimeUnit; - -/** - * This class contains constants for configuration keys used in Ozone. - */ -@InterfaceAudience.Public -@InterfaceStability.Unstable -public final class OzoneConfigKeys { - public static final String OZONE_TAGS_SYSTEM_KEY = - "ozone.tags.system"; - public static final String DFS_CONTAINER_IPC_PORT = - "dfs.container.ipc"; - public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859; - - public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs"; - - /** - * - * When set to true, allocate a random free port for ozone container, - * so that a mini cluster is able to launch multiple containers on a node. - * - * When set to false (default), container port is fixed as specified by - * DFS_CONTAINER_IPC_PORT_DEFAULT. - */ - public static final String DFS_CONTAINER_IPC_RANDOM_PORT = - "dfs.container.ipc.random.port"; - public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = - false; - - public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY = - "dfs.container.chunk.write.sync"; - public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false; - /** - * Ratis Port where containers listen to. - */ - public static final String DFS_CONTAINER_RATIS_IPC_PORT = - "dfs.container.ratis.ipc"; - public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; - - /** - * When set to true, allocate a random free port for ozone container, so that - * a mini cluster is able to launch multiple containers on a node. - */ - public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT = - "dfs.container.ratis.ipc.random.port"; - public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = - false; - public static final String OZONE_ENABLED = - "ozone.enabled"; - public static final boolean OZONE_ENABLED_DEFAULT = false; - public static final String OZONE_TRACE_ENABLED_KEY = - "ozone.trace.enabled"; - public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false; - - public static final String OZONE_METADATA_STORE_IMPL = - "ozone.metastore.impl"; - public static final String OZONE_METADATA_STORE_IMPL_LEVELDB = - "LevelDB"; - public static final String OZONE_METADATA_STORE_IMPL_ROCKSDB = - "RocksDB"; - public static final String OZONE_METADATA_STORE_IMPL_DEFAULT = - OZONE_METADATA_STORE_IMPL_ROCKSDB; - - public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS = - "ozone.metastore.rocksdb.statistics"; - - public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT = - "OFF"; - public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF = - "OFF"; - - public static final String OZONE_UNSAFEBYTEOPERATIONS_ENABLED = - "ozone.UnsafeByteOperations.enabled"; - public static final boolean OZONE_UNSAFEBYTEOPERATIONS_ENABLED_DEFAULT - = true; - - public static final String OZONE_CONTAINER_CACHE_SIZE = - "ozone.container.cache.size"; - public static final int OZONE_CONTAINER_CACHE_DEFAULT = 1024; - - public static final String OZONE_SCM_BLOCK_SIZE = - "ozone.scm.block.size"; - public static final String OZONE_SCM_BLOCK_SIZE_DEFAULT = "256MB"; - - /** - * Ozone administrator users delimited by comma. - * If not set, only the user who launches an ozone service will be the - * admin user. This property must be set if ozone services are started by - * different users. Otherwise the RPC layer will reject calls from - * other servers which are started by users not in the list. - * */ - public static final String OZONE_ADMINISTRATORS = - "ozone.administrators"; - /** - * Used only for testing purpose. Results in making every user an admin. - * */ - public static final String OZONE_ADMINISTRATORS_WILDCARD = "*"; - - public static final String OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE = - "ozone.client.stream.buffer.flush.size"; - - public static final String OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE_DEFAULT = - "64MB"; - - public static final String OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE = - "ozone.client.stream.buffer.max.size"; - - public static final String OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE_DEFAULT = - "128MB"; - - public static final String OZONE_CLIENT_WATCH_REQUEST_TIMEOUT = - "ozone.client.watch.request.timeout"; - - public static final String OZONE_CLIENT_WATCH_REQUEST_TIMEOUT_DEFAULT = - "30s"; - - public static final String OZONE_CLIENT_MAX_RETRIES = - "ozone.client.max.retries"; - public static final int OZONE_CLIENT_MAX_RETRIES_DEFAULT = 100; - public static final String OZONE_CLIENT_RETRY_INTERVAL = - "ozone.client.retry.interval"; - public static final TimeDuration OZONE_CLIENT_RETRY_INTERVAL_DEFAULT = - TimeDuration.valueOf(0, TimeUnit.MILLISECONDS); - - // This defines the overall connection limit for the connection pool used in - // RestClient. - public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_MAX = - "ozone.rest.client.http.connection.max"; - public static final int OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT = 100; - - // This defines the connection limit per one HTTP route/host. - public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX = - "ozone.rest.client.http.connection.per-route.max"; - - public static final int - OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT = 20; - - public static final String OZONE_CLIENT_SOCKET_TIMEOUT = - "ozone.client.socket.timeout"; - public static final int OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT = 5000; - public static final String OZONE_CLIENT_CONNECTION_TIMEOUT = - "ozone.client.connection.timeout"; - public static final int OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT = 5000; - - public static final String OZONE_REPLICATION = "ozone.replication"; - public static final int OZONE_REPLICATION_DEFAULT = - ReplicationFactor.THREE.getValue(); - - public static final String OZONE_REPLICATION_TYPE = "ozone.replication.type"; - public static final String OZONE_REPLICATION_TYPE_DEFAULT = - ReplicationType.RATIS.toString(); - - /** - * Configuration property to configure the cache size of client list calls. - */ - public static final String OZONE_CLIENT_LIST_CACHE_SIZE = - "ozone.client.list.cache"; - public static final int OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT = 1000; - - /** - * Configuration properties for Ozone Block Deleting Service. - */ - public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL = - "ozone.block.deleting.service.interval"; - public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT - = "60s"; - - /** - * The interval of open key clean service. - */ - public static final String OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS = - "ozone.open.key.cleanup.service.interval.seconds"; - public static final int - OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT - = 24 * 3600; // a total of 24 hour - - /** - * An open key gets cleaned up when it is being in open state for too long. - */ - public static final String OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS = - "ozone.open.key.expire.threshold"; - public static final int OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT = - 24 * 3600; - - public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT = - "ozone.block.deleting.service.timeout"; - public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT - = "300s"; // 300s for default - - public static final String OZONE_KEY_PREALLOCATION_BLOCKS_MAX = - "ozone.key.preallocation.max.blocks"; - public static final int OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT - = 64; - - public static final String OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER = - "ozone.block.deleting.limit.per.task"; - public static final int OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT - = 1000; - - public static final String OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL - = "ozone.block.deleting.container.limit.per.interval"; - public static final int - OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10; - - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; - public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; - public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY; - public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; - public static final ReplicationLevel - DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; - public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; - public static final String - DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT; - - // config settings to enable stateMachineData write timeout - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; - public static final TimeDuration - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; - - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL = - ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL; - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT = - ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT; - - public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = - "dfs.container.ratis.datanode.storage.dir"; - public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY; - public static final TimeDuration - DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT; - public static final String DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY = - ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY; - public static final int DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT = - ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT; - public static final String DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY = - ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY; - public static final TimeDuration - DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT = - ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT; - public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; - public static final TimeDuration - DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT; - public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES; - public static final int - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS; - public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT; - public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; - public static final int - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; - public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP; - public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS; - public static final int - DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT; - public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY; - public static final TimeDuration - DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT; - public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; - public static final TimeDuration - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT; - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT; - - public static final String DFS_RATIS_SERVER_FAILURE_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY; - public static final TimeDuration - DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT; - - public static final String HDDS_DATANODE_PLUGINS_KEY = - "hdds.datanode.plugins"; - - public static final String - HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD = - "hdds.datanode.storage.utilization.warning.threshold"; - public static final double - HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD_DEFAULT = 0.75; - public static final String - HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD = - "hdds.datanode.storage.utilization.critical.threshold"; - public static final double - HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT = 0.95; - - public static final String OZONE_SECURITY_ENABLED_KEY = - "ozone.security.enabled"; - public static final boolean OZONE_SECURITY_ENABLED_DEFAULT = false; - - public static final String OZONE_CONTAINER_COPY_WORKDIR = - "hdds.datanode.replication.work.dir"; - - /** - * Config properties to set client side checksum properties. - */ - public static final String OZONE_CLIENT_CHECKSUM_TYPE = - "ozone.client.checksum.type"; - public static final String OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT = "CRC32"; - public static final String OZONE_CLIENT_BYTES_PER_CHECKSUM = - "ozone.client.bytes.per.checksum"; - public static final String OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT = "1MB"; - public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT_BYTES = - 1024 * 1024; - public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE = 256 * 1024; - public static final String OZONE_CLIENT_VERIFY_CHECKSUM = - "ozone.client.verify.checksum"; - public static final boolean OZONE_CLIENT_VERIFY_CHECKSUM_DEFAULT = true; - public static final String OZONE_ACL_AUTHORIZER_CLASS = - "ozone.acl.authorizer.class"; - public static final String OZONE_ACL_AUTHORIZER_CLASS_DEFAULT = - "org.apache.hadoop.ozone.security.acl.OzoneAccessAuthorizer"; - public static final String OZONE_ACL_AUTHORIZER_CLASS_NATIVE = - "org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer"; - public static final String OZONE_ACL_ENABLED = - "ozone.acl.enabled"; - public static final boolean OZONE_ACL_ENABLED_DEFAULT = - false; - public static final String OZONE_S3_TOKEN_MAX_LIFETIME_KEY = - "ozone.s3.token.max.lifetime"; - public static final String OZONE_S3_TOKEN_MAX_LIFETIME_KEY_DEFAULT = "3m"; - //For technical reasons this is unused and hardcoded to the - // OzoneFileSystem.initialize. - public static final String OZONE_FS_ISOLATED_CLASSLOADER = - "ozone.fs.isolated-classloader"; - - // Ozone Client Retry and Failover configurations - public static final String OZONE_CLIENT_RETRY_MAX_ATTEMPTS_KEY = - "ozone.client.retry.max.attempts"; - public static final int OZONE_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT = - 10; - public static final String OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = - "ozone.client.failover.max.attempts"; - public static final int OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = - 15; - public static final String OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY = - "ozone.client.failover.sleep.base.millis"; - public static final int OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT = - 500; - public static final String OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY = - "ozone.client.failover.sleep.max.millis"; - public static final int OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT = - 15000; - - public static final String OZONE_FREON_HTTP_ENABLED_KEY = - "ozone.freon.http.enabled"; - public static final String OZONE_FREON_HTTP_BIND_HOST_KEY = - "ozone.freon.http-bind-host"; - public static final String OZONE_FREON_HTTPS_BIND_HOST_KEY = - "ozone.freon.https-bind-host"; - public static final String OZONE_FREON_HTTP_ADDRESS_KEY = - "ozone.freon.http-address"; - public static final String OZONE_FREON_HTTPS_ADDRESS_KEY = - "ozone.freon.https-address"; - - public static final String OZONE_FREON_HTTP_BIND_HOST_DEFAULT = "0.0.0.0"; - public static final int OZONE_FREON_HTTP_BIND_PORT_DEFAULT = 9884; - public static final int OZONE_FREON_HTTPS_BIND_PORT_DEFAULT = 9885; - public static final String - OZONE_FREON_HTTP_KERBEROS_PRINCIPAL_KEY = - "ozone.freon.http.kerberos.principal"; - public static final String - OZONE_FREON_HTTP_KERBEROS_KEYTAB_FILE_KEY = - "ozone.freon.http.kerberos.keytab"; - - public static final String OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY = - "ozone.network.topology.aware.read"; - public static final boolean OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT = false; - - public static final String OZONE_MANAGER_FAIR_LOCK = "ozone.om.lock.fair"; - public static final boolean OZONE_MANAGER_FAIR_LOCK_DEFAULT = false; - - /** - * There is no need to instantiate this class. - */ - private OzoneConfigKeys() { - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java deleted file mode 100644 index 9817d877eb5..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ /dev/null @@ -1,327 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.thirdparty.io.grpc.Context; -import org.apache.ratis.thirdparty.io.grpc.Metadata; - -import static org.apache.ratis.thirdparty.io.grpc.Metadata.ASCII_STRING_MARSHALLER; - -/** - * Set of constants used in Ozone implementation. - */ -@InterfaceAudience.Private -public final class OzoneConsts { - - - public static final String STORAGE_DIR = "scm"; - public static final String SCM_ID = "scmUuid"; - - public static final String OZONE_SIMPLE_ROOT_USER = "root"; - public static final String OZONE_SIMPLE_HDFS_USER = "hdfs"; - - public static final String STORAGE_ID = "storageID"; - public static final String DATANODE_UUID = "datanodeUuid"; - public static final String CLUSTER_ID = "clusterID"; - public static final String LAYOUTVERSION = "layOutVersion"; - public static final String CTIME = "ctime"; - /* - * BucketName length is used for both buckets and volume lengths - */ - public static final int OZONE_MIN_BUCKET_NAME_LENGTH = 3; - public static final int OZONE_MAX_BUCKET_NAME_LENGTH = 63; - - public static final String OZONE_ACL_USER_TYPE = "user"; - public static final String OZONE_ACL_GROUP_TYPE = "group"; - public static final String OZONE_ACL_WORLD_TYPE = "world"; - public static final String OZONE_ACL_ANONYMOUS_TYPE = "anonymous"; - public static final String OZONE_ACL_IP_TYPE = "ip"; - - public static final String OZONE_ACL_READ = "r"; - public static final String OZONE_ACL_WRITE = "w"; - public static final String OZONE_ACL_DELETE = "d"; - public static final String OZONE_ACL_LIST = "l"; - public static final String OZONE_ACL_ALL = "a"; - public static final String OZONE_ACL_NONE = "n"; - public static final String OZONE_ACL_CREATE = "c"; - public static final String OZONE_ACL_READ_ACL = "x"; - public static final String OZONE_ACL_WRITE_ACL = "y"; - - - public static final String OZONE_DATE_FORMAT = - "EEE, dd MMM yyyy HH:mm:ss zzz"; - public static final String OZONE_TIME_ZONE = "GMT"; - - public static final String OZONE_COMPONENT = "component"; - public static final String OZONE_FUNCTION = "function"; - public static final String OZONE_RESOURCE = "resource"; - public static final String OZONE_USER = "user"; - public static final String OZONE_REQUEST = "request"; - - // OM Http server endpoints - public static final String OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT = - "/serviceList"; - public static final String OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT = - "/dbCheckpoint"; - - // Ozone File System scheme - public static final String OZONE_URI_SCHEME = "o3fs"; - - public static final String OZONE_RPC_SCHEME = "o3"; - public static final String OZONE_HTTP_SCHEME = "http"; - public static final String OZONE_URI_DELIMITER = "/"; - - public static final String CONTAINER_EXTENSION = ".container"; - public static final String CONTAINER_META = ".meta"; - - // Refer to {@link ContainerReader} for container storage layout on disk. - public static final String CONTAINER_PREFIX = "containers"; - public static final String CONTAINER_META_PATH = "metadata"; - public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp"; - public static final String CONTAINER_CHUNK_NAME_DELIMITER = "."; - public static final String CONTAINER_ROOT_PREFIX = "repository"; - - public static final String FILE_HASH = "SHA-256"; - public static final String MD5_HASH = "MD5"; - public final static String CHUNK_OVERWRITE = "OverWriteRequested"; - - public static final int CHUNK_SIZE = 1 * 1024 * 1024; // 1 MB - public static final long KB = 1024L; - public static final long MB = KB * 1024L; - public static final long GB = MB * 1024L; - public static final long TB = GB * 1024L; - - /** - * level DB names used by SCM and data nodes. - */ - public static final String CONTAINER_DB_SUFFIX = "container.db"; - public static final String PIPELINE_DB_SUFFIX = "pipeline.db"; - public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX; - public static final String SCM_PIPELINE_DB = "scm-" + PIPELINE_DB_SUFFIX; - public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX; - public static final String DELETED_BLOCK_DB = "deletedBlock.db"; - public static final String OM_DB_NAME = "om.db"; - public static final String OM_DB_BACKUP_PREFIX = "om.db.backup."; - public static final String OM_DB_CHECKPOINTS_DIR_NAME = "om.db.checkpoints"; - public static final String OZONE_MANAGER_TOKEN_DB_NAME = "om-token.db"; - public static final String SCM_DB_NAME = "scm.db"; - - public static final String STORAGE_DIR_CHUNKS = "chunks"; - public static final String OZONE_DB_CHECKPOINT_REQUEST_FLUSH = - "flushBeforeCheckpoint"; - - /** - * Supports Bucket Versioning. - */ - public enum Versioning { - NOT_DEFINED, ENABLED, DISABLED; - - public static Versioning getVersioning(boolean versioning) { - return versioning ? ENABLED : DISABLED; - } - } - - public static final String DELETING_KEY_PREFIX = "#deleting#"; - public static final String DELETED_KEY_PREFIX = "#deleted#"; - public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#"; - public static final String BLOCK_COMMIT_SEQUENCE_ID_PREFIX = "#BCSID"; - - /** - * OM LevelDB prefixes. - * - * OM DB stores metadata as KV pairs with certain prefixes, - * prefix is used to improve the performance to get related - * metadata. - * - * OM DB Schema: - * ---------------------------------------------------------- - * | KEY | VALUE | - * ---------------------------------------------------------- - * | $userName | VolumeList | - * ---------------------------------------------------------- - * | /#volumeName | VolumeInfo | - * ---------------------------------------------------------- - * | /#volumeName/#bucketName | BucketInfo | - * ---------------------------------------------------------- - * | /volumeName/bucketName/keyName | KeyInfo | - * ---------------------------------------------------------- - * | #deleting#/volumeName/bucketName/keyName | KeyInfo | - * ---------------------------------------------------------- - */ - - public static final String OM_KEY_PREFIX = "/"; - public static final String OM_USER_PREFIX = "$"; - public static final String OM_S3_PREFIX ="S3:"; - public static final String OM_S3_VOLUME_PREFIX = "s3"; - public static final String OM_S3_SECRET = "S3Secret:"; - public static final String OM_PREFIX = "Prefix:"; - - /** - * Max chunk size limit. - */ - public static final int OZONE_SCM_CHUNK_MAX_SIZE = 32 * 1024 * 1024; - - - /** - * Max OM Quota size of 1024 PB. - */ - public static final long MAX_QUOTA_IN_BYTES = 1024L * 1024 * TB; - - /** - * Max number of keys returned per list buckets operation. - */ - public static final int MAX_LISTBUCKETS_SIZE = 1024; - - /** - * Max number of keys returned per list keys operation. - */ - public static final int MAX_LISTKEYS_SIZE = 1024; - - /** - * Max number of volumes returned per list volumes operation. - */ - public static final int MAX_LISTVOLUMES_SIZE = 1024; - - public static final int INVALID_PORT = -1; - - - /** - * Default SCM Datanode ID file name. - */ - public static final String OZONE_SCM_DATANODE_ID_FILE_DEFAULT = "datanode.id"; - - // The ServiceListJSONServlet context attribute where OzoneManager - // instance gets stored. - public static final String OM_CONTEXT_ATTRIBUTE = "ozone.om"; - - private OzoneConsts() { - // Never Constructed - } - - // YAML fields for .container files - public static final String CONTAINER_ID = "containerID"; - public static final String CONTAINER_TYPE = "containerType"; - public static final String STATE = "state"; - public static final String METADATA = "metadata"; - public static final String MAX_SIZE = "maxSize"; - public static final String METADATA_PATH = "metadataPath"; - public static final String CHUNKS_PATH = "chunksPath"; - public static final String CONTAINER_DB_TYPE = "containerDBType"; - public static final String CHECKSUM = "checksum"; - public static final String ORIGIN_PIPELINE_ID = "originPipelineId"; - public static final String ORIGIN_NODE_ID = "originNodeId"; - - // Supported store types. - public static final String OZONE = "ozone"; - public static final String S3 = "s3"; - - // For OM Audit usage - public static final String VOLUME = "volume"; - public static final String BUCKET = "bucket"; - public static final String KEY = "key"; - public static final String QUOTA = "quota"; - public static final String QUOTA_IN_BYTES = "quotaInBytes"; - public static final String OBJECT_ID = "objectID"; - public static final String UPDATE_ID = "updateID"; - public static final String CLIENT_ID = "clientID"; - public static final String OWNER = "owner"; - public static final String ADMIN = "admin"; - public static final String USERNAME = "username"; - public static final String PREV_KEY = "prevKey"; - public static final String START_KEY = "startKey"; - public static final String MAX_KEYS = "maxKeys"; - public static final String PREFIX = "prefix"; - public static final String KEY_PREFIX = "keyPrefix"; - public static final String ACL = "acl"; - public static final String ACLS = "acls"; - public static final String USER_ACL = "userAcl"; - public static final String ADD_ACLS = "addAcls"; - public static final String REMOVE_ACLS = "removeAcls"; - public static final String MAX_NUM_OF_BUCKETS = "maxNumOfBuckets"; - public static final String TO_KEY_NAME = "toKeyName"; - public static final String STORAGE_TYPE = "storageType"; - public static final String RESOURCE_TYPE = "resourceType"; - public static final String IS_VERSION_ENABLED = "isVersionEnabled"; - public static final String CREATION_TIME = "creationTime"; - public static final String DATA_SIZE = "dataSize"; - public static final String REPLICATION_TYPE = "replicationType"; - public static final String REPLICATION_FACTOR = "replicationFactor"; - public static final String KEY_LOCATION_INFO = "keyLocationInfo"; - public static final String MULTIPART_LIST = "multipartList"; - public static final String UPLOAD_ID = "uploadID"; - public static final String PART_NUMBER_MARKER = "partNumberMarker"; - public static final String MAX_PARTS = "maxParts"; - public static final String S3_BUCKET = "s3Bucket"; - public static final String S3_GETSECRET_USER = "S3GetSecretUser"; - - - - // For OM metrics saving to a file - public static final String OM_METRICS_FILE = "omMetrics"; - public static final String OM_METRICS_TEMP_FILE = OM_METRICS_FILE + ".tmp"; - - // For Multipart upload - public static final int OM_MULTIPART_MIN_SIZE = 5 * 1024 * 1024; - - // GRPC block token metadata header and context key - public static final String OZONE_BLOCK_TOKEN = "blocktoken"; - public static final Context.Key UGI_CTX_KEY = - Context.key("UGI"); - - public static final Metadata.Key OBT_METADATA_KEY = - Metadata.Key.of(OZONE_BLOCK_TOKEN, ASCII_STRING_MARSHALLER); - public static final Metadata.Key USER_METADATA_KEY = - Metadata.Key.of(OZONE_USER, ASCII_STRING_MARSHALLER); - - public static final String RPC_PORT = "RPC"; - - // Default OMServiceID for OM Ratis servers to use as RaftGroupId - public static final String OM_SERVICE_ID_DEFAULT = "omServiceIdDefault"; - - // Dummy OMNodeID for OM Clients to use for a non-HA OM setup - public static final String OM_NODE_ID_DUMMY = "omNodeIdDummy"; - - // OM Ratis snapshot file to store the last applied index - public static final String OM_RATIS_SNAPSHOT_INDEX = "ratisSnapshotIndex"; - - // OM Http request parameter to be used while downloading DB checkpoint - // from OM leader to follower - public static final String OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT = - "snapshotBeforeCheckpoint"; - - public static final String JAVA_TMP_DIR = "java.io.tmpdir"; - public static final String LOCALHOST = "localhost"; - - - public static final int S3_BUCKET_MIN_LENGTH = 3; - public static final int S3_BUCKET_MAX_LENGTH = 64; - - //GDPR - public static final String GDPR_FLAG = "gdprEnabled"; - public static final String GDPR_ALGORITHM_NAME = "AES"; - public static final int GDPR_DEFAULT_RANDOM_SECRET_LENGTH = 16; - public static final String GDPR_CHARSET = "UTF-8"; - public static final String GDPR_LENGTH = "length"; - public static final String GDPR_SECRET = "secret"; - public static final String GDPR_ALGORITHM = "algorithm"; - - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java deleted file mode 100644 index c1fb8938061..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; - -import org.apache.commons.validator.routines.InetAddressValidator; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.InetAddress; -import java.net.NetworkInterface; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Enumeration; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -/** - * Ozone security Util class. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public final class OzoneSecurityUtil { - - private final static Logger LOG = - LoggerFactory.getLogger(OzoneSecurityUtil.class); - // List of ip's not recommended to be added to CSR. - private final static Set INVALID_IPS = new HashSet<>(Arrays.asList( - "0.0.0.0", "127.0.0.1")); - - private OzoneSecurityUtil() { - } - - public static boolean isSecurityEnabled(Configuration conf) { - return conf.getBoolean(OZONE_SECURITY_ENABLED_KEY, - OZONE_SECURITY_ENABLED_DEFAULT); - } - - /** - * Returns Keys status. - * - * @return True if the key files exist. - */ - public static boolean checkIfFileExist(Path path, String fileName) { - if (Files.exists(path) && Files.exists(Paths.get(path.toString(), - fileName))) { - return true; - } - return false; - } - - /** - * Iterates through network interfaces and return all valid ip's not - * listed in CertificateSignRequest#INVALID_IPS. - * - * @return List - * @throws IOException if no network interface are found or if an error - * occurs. - */ - public static List getValidInetsForCurrentHost() - throws IOException { - List hostIps = new ArrayList<>(); - InetAddressValidator ipValidator = InetAddressValidator.getInstance(); - - Enumeration enumNI = - NetworkInterface.getNetworkInterfaces(); - if (enumNI != null) { - while (enumNI.hasMoreElements()) { - NetworkInterface ifc = enumNI.nextElement(); - if (ifc.isUp()) { - Enumeration enumAdds = ifc.getInetAddresses(); - while (enumAdds.hasMoreElements()) { - InetAddress addr = enumAdds.nextElement(); - - if (ipValidator.isValid(addr.getHostAddress()) - && !INVALID_IPS.contains(addr.getHostAddress())) { - LOG.info("Adding ip:{},host:{}", addr.getHostAddress(), - addr.getHostName()); - hostIps.add(addr); - } else { - LOG.info("ip:{},host:{} not returned.", addr.getHostAddress(), - addr.getHostName()); - } - } - } - } - return hostIps; - } else { - throw new IOException("Unable to get network interfaces."); - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java deleted file mode 100644 index 8c1d6f0c67d..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.audit; - -/** - * Interface to define AuditAction. - */ -public interface AuditAction { - /** - * Implementation must override. - * @return String - */ - String getAction(); -} - diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java deleted file mode 100644 index 098ab6b2f7f..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.audit; - -/** - * Enum to define AuditEventStatus values. - */ -public enum AuditEventStatus { - SUCCESS("SUCCESS"), - FAILURE("FAILURE"); - - private String status; - - AuditEventStatus(String status){ - this.status = status; - } - - public String getStatus() { - return status; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java deleted file mode 100644 index ee6f45dadb4..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.audit; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Marker; -import org.apache.logging.log4j.spi.ExtendedLogger; - - -/** - * Class to define Audit Logger for Ozone. - */ -public class AuditLogger { - - private ExtendedLogger logger; - private static final String FQCN = AuditLogger.class.getName(); - private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker(); - private static final Marker READ_MARKER = AuditMarker.READ.getMarker(); - - /** - * Parametrized Constructor to initialize logger. - * @param type Audit Logger Type - */ - public AuditLogger(AuditLoggerType type){ - initializeLogger(type); - } - - /** - * Initializes the logger with specific type. - * @param loggerType specified one of the values from enum AuditLoggerType. - */ - private void initializeLogger(AuditLoggerType loggerType){ - this.logger = LogManager.getContext(false).getLogger(loggerType.getType()); - } - - @VisibleForTesting - public ExtendedLogger getLogger() { - return logger; - } - - public void logWriteSuccess(AuditMessage msg) { - this.logger.logIfEnabled(FQCN, Level.INFO, WRITE_MARKER, msg, null); - } - - public void logWriteFailure(AuditMessage msg) { - this.logger.logIfEnabled(FQCN, Level.ERROR, WRITE_MARKER, msg, - msg.getThrowable()); - } - - public void logReadSuccess(AuditMessage msg) { - this.logger.logIfEnabled(FQCN, Level.INFO, READ_MARKER, msg, null); - } - - public void logReadFailure(AuditMessage msg) { - this.logger.logIfEnabled(FQCN, Level.ERROR, READ_MARKER, msg, - msg.getThrowable()); - } - - public void logWrite(AuditMessage auditMessage) { - if (auditMessage.getThrowable() == null) { - this.logger.logIfEnabled(FQCN, Level.INFO, WRITE_MARKER, auditMessage, - auditMessage.getThrowable()); - } else { - this.logger.logIfEnabled(FQCN, Level.ERROR, WRITE_MARKER, auditMessage, - auditMessage.getThrowable()); - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java deleted file mode 100644 index 18241c7712a..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.audit; - -/** - * Enumeration for defining types of Audit Loggers in Ozone. - */ -public enum AuditLoggerType { - DNLOGGER("DNAudit"), - OMLOGGER("OMAudit"), - SCMLOGGER("SCMAudit"); - - private String type; - - public String getType() { - return type; - } - - AuditLoggerType(String type){ - this.type = type; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java deleted file mode 100644 index 505b9580715..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit; - -import org.apache.logging.log4j.Marker; -import org.apache.logging.log4j.MarkerManager; - -/** - * Defines audit marker types. - */ -public enum AuditMarker { - WRITE(MarkerManager.getMarker("WRITE")), - READ(MarkerManager.getMarker("READ")); - - private Marker marker; - - AuditMarker(Marker marker){ - this.marker = marker; - } - - public Marker getMarker(){ - return marker; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java deleted file mode 100644 index 1569ffe3ba7..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java +++ /dev/null @@ -1,131 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit; - -import org.apache.logging.log4j.message.Message; - -import java.util.Map; - -/** - * Defines audit message structure. - */ -public class AuditMessage implements Message { - - private String message; - private Throwable throwable; - - private static final String MSG_PATTERN = - "user=%s | ip=%s | op=%s %s | ret=%s"; - - public AuditMessage(){ - - } - - @Override - public String getFormattedMessage() { - return message; - } - - @Override - public String getFormat() { - return null; - } - - @Override - public Object[] getParameters() { - return new Object[0]; - } - - @Override - public Throwable getThrowable() { - return throwable; - } - - /** - * Use when there are custom string to be added to default msg. - * @param customMessage custom string - */ - private void appendMessage(String customMessage) { - this.message += customMessage; - } - - public String getMessage() { - return message; - } - - public void setMessage(String message) { - this.message = message; - } - - public void setThrowable(Throwable throwable) { - this.throwable = throwable; - } - - /** - * Builder class for AuditMessage. - */ - public static class Builder { - private Throwable throwable; - private String user; - private String ip; - private String op; - private Map params; - private String ret; - - public Builder(){ - - } - - public Builder setUser(String usr){ - this.user = usr; - return this; - } - - public Builder atIp(String ipAddr){ - this.ip = ipAddr; - return this; - } - - public Builder forOperation(String operation){ - this.op = operation; - return this; - } - - public Builder withParams(Map args){ - this.params = args; - return this; - } - - public Builder withResult(String result){ - this.ret = result; - return this; - } - - public Builder withException(Throwable ex){ - this.throwable = ex; - return this; - } - - public AuditMessage build(){ - AuditMessage auditMessage = new AuditMessage(); - auditMessage.message = String.format(MSG_PATTERN, - this.user, this.ip, this.op, this.params, this.ret); - auditMessage.throwable = this.throwable; - return auditMessage; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java deleted file mode 100644 index 9d7dbee35b0..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.audit; - -import java.util.Map; - -/** - * Interface to make an entity auditable. - */ -public interface Auditable { - /** - * Must override in implementation. - * @return {@literal Map} with values to be logged in audit. - */ - Map toAuditMap(); -} - diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java deleted file mode 100644 index 51c029868bf..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.audit; - -import java.util.Map; - -/** - * Interface to mark an actor as Auditor. - */ -public interface Auditor { - - AuditMessage buildAuditMessageForSuccess( - AuditAction op, Map auditMap); - - AuditMessage buildAuditMessageForFailure( - AuditAction op, Map auditMap, Throwable throwable); - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java deleted file mode 100644 index 1c87f2bdeba..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit; - -/** - * Enum to define Audit Action types for Datanode. - */ -public enum DNAction implements AuditAction { - - CREATE_CONTAINER, - READ_CONTAINER, - UPDATE_CONTAINER, - DELETE_CONTAINER, - LIST_CONTAINER, - PUT_BLOCK, - GET_BLOCK, - DELETE_BLOCK, - LIST_BLOCK, - READ_CHUNK, - DELETE_CHUNK, - WRITE_CHUNK, - LIST_CHUNK, - COMPACT_CHUNK, - PUT_SMALL_FILE, - GET_SMALL_FILE, - CLOSE_CONTAINER, - GET_COMMITTED_BLOCK_LENGTH; - - @Override - public String getAction() { - return this.toString(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java deleted file mode 100644 index d03ad157220..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit; - -/** - * Enum to define Audit Action types for SCM. - */ -public enum SCMAction implements AuditAction { - - GET_VERSION, - REGISTER, - SEND_HEARTBEAT, - GET_SCM_INFO, - ALLOCATE_BLOCK, - DELETE_KEY_BLOCK, - ALLOCATE_CONTAINER, - GET_CONTAINER, - GET_CONTAINER_WITH_PIPELINE, - LIST_CONTAINER, - LIST_PIPELINE, - CLOSE_PIPELINE, - ACTIVATE_PIPELINE, - DEACTIVATE_PIPELINE, - DELETE_CONTAINER, - IN_SAFE_MODE, - FORCE_EXIT_SAFE_MODE, - SORT_DATANODE, - START_REPLICATION_MANAGER, - STOP_REPLICATION_MANAGER, - GET_REPLICATION_MANAGER_STATUS; - - @Override - public String getAction() { - return this.toString(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java deleted file mode 100644 index c8284fd8ff3..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java +++ /dev/null @@ -1,138 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit; -/** - ****************************************************************************** - * Important - * 1. Any changes to classes in this package can render the logging - * framework broken. - * 2. The logger framework has been designed keeping in mind future - * plans to build a log parser. - * 3. Please exercise great caution when attempting changes in this package. - ****************************************************************************** - * - * - * This package lays the foundation for Audit logging in Ozone. - * AuditLogging in Ozone has been built using log4j2 which brings in new - * features that facilitate turning on/off selective audit events by using - * MarkerFilter, checking for change in logging configuration periodically - * and reloading the changes, use of disruptor framework for improved - * Asynchronous logging. - * - * The log4j2 configurations can be specified in XML, YAML, JSON and - * Properties file. For Ozone, we are using the Properties file due to sheer - * simplicity, readability and ease of modification. - * - * log4j2 configuration file can be passed to startup command with option - * -Dlog4j.configurationFile unlike -Dlog4j.configuration in log4j 1.x - * - ****************************************************************************** - * Understanding the Audit Logging framework in Ozone. - ****************************************************************************** - * **** Auditable *** - * This is an interface to mark an entity as auditable. - * This interface must be implemented by entities requiring audit logging. - * For example - OMVolumeArgs, OMBucketArgs. - * The implementing class must override toAuditMap() to return an - * instance of Map where both Key and Value are String. - * - * Key: must contain printable US ASCII characters - * May not contain a space, =, ], or " - * If the key is multi word then use camel case. - * - * Value: if it is a collection/array, then it must be converted to a comma - * delimited string - * - * *** AuditAction *** - * This is an interface to define the various type of actions to be audited. - * To ensure separation of concern, for each sub-component you must create an - * Enum to implement AuditAction. - * Structure of Enum can be referred from the test class DummyAction. - * - * For starters, we expect following 3 implementations of AuditAction: - * OMAction - to define action types for Ozone Manager - * SCMAction - to define action types for Storage Container manager - * DNAction - to define action types for Datanode - * - * *** AuditEventStatus *** - * Enum to define Audit event status like success and failure. - * This is used in AuditLogger.logXXX() methods. - * - * * *** AuditLogger *** - * This is where the audit logging magic unfolds. - * The class has 2 Markers defined - READ and WRITE. - * These markers are used to tag when logging events. - * - * *** AuditLoggerType *** - * Enum to define the various AuditLoggers in Ozone - * - * *** AuditMarker *** - * Enum to define various Audit Markers used in AuditLogging. - * - * *** AuditMessage *** - * Entity to define an audit message to be logged - * It will generate a message formatted as: - * user=xxx ip=xxx op=XXXX_XXXX {key=val, key1=val1..} ret=XXXXXX - * - * *** Auditor *** - * Interface to mark an actor class as Auditor - * Must be implemented by class where we want to log audit events - * Implementing class must override and implement methods - * buildAuditMessageForSuccess and buildAuditMessageForFailure. - * - * **************************************************************************** - * Usage - * **************************************************************************** - * Using the AuditLogger to log events: - * 1. Get a logger by specifying the appropriate logger type - * Example: ExtendedLogger AUDIT = new AuditLogger(AuditLoggerType.OMLogger) - * - * 2. Construct an instance of AuditMessage - * - * 3. Log Read/Write and Success/Failure event as needed. - * Example - * AUDIT.logWriteSuccess(buildAuditMessageForSuccess(params)) - * - * 4. Log Level implicitly defaults to INFO for xxxxSuccess() and ERROR for - * xxxxFailure() - * AUDIT.logWriteSuccess(buildAuditMessageForSuccess(params)) - * AUDIT.logWriteFailure(buildAuditMessageForSuccess(params)) - * - * See sample invocations in src/test in the following class: - * org.apache.hadoop.ozone.audit.TestOzoneAuditLogger - * - * **************************************************************************** - * Defining new Logger types - * **************************************************************************** - * New Logger type can be added with following steps: - * 1. Update AuditLoggerType to add the new type - * 2. Create new Enum by implementing AuditAction if needed - * 3. Ensure the required entity implements Auditable - * - * **************************************************************************** - * Defining new Marker types - * **************************************************************************** - * New Markers can be configured as follows: - * 1. Define new markers in AuditMarker - * 2. Get the Marker in AuditLogger for use in the log methods, example: - * private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker(); - * 3. Define log methods in AuditLogger to use the new Marker type - * 4. Call these new methods from the required classes to audit with these - * new markers - * 5. The marker based filtering can be configured in log4j2 configurations - * Refer log4j2.properties in src/test/resources for a sample. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java deleted file mode 100644 index 1925c22aa23..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.common; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos - .KeyBlocks; - -import java.util.ArrayList; -import java.util.List; - -/** - * A group of blocks relations relevant, e.g belong to a certain object key. - */ -public final class BlockGroup { - - private String groupID; - private List blockIDs; - private BlockGroup(String groupID, List blockIDs) { - this.groupID = groupID; - this.blockIDs = blockIDs; - } - - public List getBlockIDList() { - return blockIDs; - } - - public String getGroupID() { - return groupID; - } - - public KeyBlocks getProto() { - KeyBlocks.Builder kbb = KeyBlocks.newBuilder(); - for (BlockID block : blockIDs) { - kbb.addBlocks(block.getProtobuf()); - } - return kbb.setKey(groupID).build(); - } - - /** - * Parses a KeyBlocks proto to a group of blocks. - * @param proto KeyBlocks proto. - * @return a group of blocks. - */ - public static BlockGroup getFromProto(KeyBlocks proto) { - List blockIDs = new ArrayList<>(); - for (HddsProtos.BlockID block : proto.getBlocksList()) { - blockIDs.add(new BlockID(block.getContainerBlockID().getContainerID(), - block.getContainerBlockID().getLocalID())); - } - return BlockGroup.newBuilder().setKeyName(proto.getKey()) - .addAllBlockIDs(blockIDs).build(); - } - - public static Builder newBuilder() { - return new Builder(); - } - - @Override - public String toString() { - return "BlockGroup[" + - "groupID='" + groupID + '\'' + - ", blockIDs=" + blockIDs + - ']'; - } - - /** - * BlockGroup instance builder. - */ - public static class Builder { - - private String groupID; - private List blockIDs; - - public Builder setKeyName(String blockGroupID) { - this.groupID = blockGroupID; - return this; - } - - public Builder addAllBlockIDs(List keyBlocks) { - this.blockIDs = keyBlocks; - return this; - } - - public BlockGroup build() { - return new BlockGroup(groupID, blockIDs); - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java deleted file mode 100644 index 0e70515a492..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java +++ /dev/null @@ -1,286 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.primitives.Longs; - -import java.nio.ByteBuffer; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ChecksumType; -import org.apache.hadoop.io.MD5Hash; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.util.PureJavaCrc32; -import org.apache.hadoop.util.PureJavaCrc32C; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class to compute and verify checksums for chunks. - * - * This class is not thread safe. - */ -public class Checksum { - - public static final Logger LOG = LoggerFactory.getLogger(Checksum.class); - - private final ChecksumType checksumType; - private final int bytesPerChecksum; - - private PureJavaCrc32 crc32Checksum; - private PureJavaCrc32C crc32cChecksum; - private MessageDigest sha; - - /** - * Constructs a Checksum object. - * @param type type of Checksum - * @param bytesPerChecksum number of bytes of data per checksum - */ - public Checksum(ChecksumType type, int bytesPerChecksum) { - this.checksumType = type; - this.bytesPerChecksum = bytesPerChecksum; - } - - /** - * Constructs a Checksum object with default ChecksumType and default - * BytesPerChecksum. - */ - @VisibleForTesting - public Checksum() { - this.checksumType = ChecksumType.valueOf( - OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT); - this.bytesPerChecksum = OzoneConfigKeys - .OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT_BYTES; // Default is 1MB - } - - /** - * Computes checksum for give data. - * @param byteBuffer input data in the form of ByteString. - * @return ChecksumData computed for input data. - */ - public ChecksumData computeChecksum(ByteBuffer byteBuffer) - throws OzoneChecksumException { - return computeChecksum(byteBuffer.array(), byteBuffer.position(), - byteBuffer.limit()); - } - - /** - * Computes checksum for give data. - * @param data input data in the form of byte array. - * @return ChecksumData computed for input data. - */ - public ChecksumData computeChecksum(byte[] data) - throws OzoneChecksumException { - return computeChecksum(data, 0, data.length); - } - - /** - * Computes checksum for give data. - * @param data input data in the form of byte array. - * @return ChecksumData computed for input data. - */ - public ChecksumData computeChecksum(byte[] data, int offset, int len) - throws OzoneChecksumException { - ChecksumData checksumData = new ChecksumData(this.checksumType, this - .bytesPerChecksum); - if (checksumType == ChecksumType.NONE) { - // Since type is set to NONE, we do not need to compute the checksums - return checksumData; - } - - switch (checksumType) { - case CRC32: - crc32Checksum = new PureJavaCrc32(); - break; - case CRC32C: - crc32cChecksum = new PureJavaCrc32C(); - break; - case SHA256: - try { - sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH); - } catch (NoSuchAlgorithmException e) { - throw new OzoneChecksumException(OzoneConsts.FILE_HASH, e); - } - break; - case MD5: - break; - default: - throw new OzoneChecksumException(checksumType); - } - - // Compute number of checksums needs for given data length based on bytes - // per checksum. - int dataSize = len - offset; - int numChecksums = (dataSize + bytesPerChecksum - 1) / bytesPerChecksum; - - // Checksum is computed for each bytesPerChecksum number of bytes of data - // starting at offset 0. The last checksum might be computed for the - // remaining data with length less than bytesPerChecksum. - List checksumList = new ArrayList<>(numChecksums); - for (int index = 0; index < numChecksums; index++) { - checksumList.add(computeChecksumAtIndex(data, index, offset, len)); - } - checksumData.setChecksums(checksumList); - - return checksumData; - } - - /** - * Computes checksum based on checksumType for a data block at given index - * and a max length of bytesPerChecksum. - * @param data input data - * @param index index to compute the offset from where data must be read - * @param start start pos of the array where the computation has to start - * @length length of array till which checksum needs to be computed - * @return computed checksum ByteString - * @throws OzoneChecksumException thrown when ChecksumType is not recognized - */ - private ByteString computeChecksumAtIndex(byte[] data, int index, int start, - int length) - throws OzoneChecksumException { - int offset = start + index * bytesPerChecksum; - int dataLength = length - start; - int len = bytesPerChecksum; - if ((offset + len) > dataLength) { - len = dataLength - offset; - } - byte[] checksumBytes = null; - switch (checksumType) { - case CRC32: - checksumBytes = computeCRC32Checksum(data, offset, len); - break; - case CRC32C: - checksumBytes = computeCRC32CChecksum(data, offset, len); - break; - case SHA256: - checksumBytes = computeSHA256Checksum(data, offset, len); - break; - case MD5: - checksumBytes = computeMD5Checksum(data, offset, len); - break; - default: - throw new OzoneChecksumException(checksumType); - } - - return ByteString.copyFrom(checksumBytes); - } - - /** - * Computes CRC32 checksum. - */ - private byte[] computeCRC32Checksum(byte[] data, int offset, int len) { - crc32Checksum.reset(); - crc32Checksum.update(data, offset, len); - return Longs.toByteArray(crc32Checksum.getValue()); - } - - /** - * Computes CRC32C checksum. - */ - private byte[] computeCRC32CChecksum(byte[] data, int offset, int len) { - crc32cChecksum.reset(); - crc32cChecksum.update(data, offset, len); - return Longs.toByteArray(crc32cChecksum.getValue()); - } - - /** - * Computes SHA-256 checksum. - */ - private byte[] computeSHA256Checksum(byte[] data, int offset, int len) { - sha.reset(); - sha.update(data, offset, len); - return sha.digest(); - } - - /** - * Computes MD5 checksum. - */ - private byte[] computeMD5Checksum(byte[] data, int offset, int len) { - MD5Hash md5out = MD5Hash.digest(data, offset, len); - return md5out.getDigest(); - } - - /** - * Computes the ChecksumData for the input data and verifies that it - * matches with that of the input checksumData, starting from index - * startIndex. - * @param byteString input data - * @param checksumData checksumData to match with - * @param startIndex index of first checksum in checksumData to match with - * data's computed checksum. - * @throws OzoneChecksumException is thrown if checksums do not match - */ - public static boolean verifyChecksum(ByteString byteString, - ChecksumData checksumData, int startIndex) throws OzoneChecksumException { - return verifyChecksum(byteString.toByteArray(), checksumData, startIndex); - } - - /** - * Computes the ChecksumData for the input data and verifies that it - * matches with that of the input checksumData. - * @param data input data - * @param checksumData checksumData to match with - * @throws OzoneChecksumException is thrown if checksums do not match - */ - public static boolean verifyChecksum(byte[] data, ChecksumData checksumData) - throws OzoneChecksumException { - return verifyChecksum(data, checksumData, 0); - } - - /** - * Computes the ChecksumData for the input data and verifies that it - * matches with that of the input checksumData. - * @param data input data - * @param checksumData checksumData to match with - * @param startIndex index of first checksum in checksumData to match with - * data's computed checksum. - * @throws OzoneChecksumException is thrown if checksums do not match - */ - public static boolean verifyChecksum(byte[] data, ChecksumData checksumData, - int startIndex) throws OzoneChecksumException { - ChecksumType checksumType = checksumData.getChecksumType(); - if (checksumType == ChecksumType.NONE) { - // Checksum is set to NONE. No further verification is required. - return true; - } - - int bytesPerChecksum = checksumData.getBytesPerChecksum(); - Checksum checksum = new Checksum(checksumType, bytesPerChecksum); - ChecksumData computedChecksumData = - checksum.computeChecksum(data, 0, data.length); - - return checksumData.verifyChecksumDataMatches(computedChecksumData, - startIndex); - } - - /** - * Returns a ChecksumData with type NONE for testing. - */ - @VisibleForTesting - public static ContainerProtos.ChecksumData getNoChecksumDataProto() { - return new ChecksumData(ChecksumType.NONE, 0).getProtoBufMessage(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java deleted file mode 100644 index 7ce643db471..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Some portions of this file Copyright (c) 2004-2006 Intel Corportation - * and licensed under the BSD license. - */ -package org.apache.hadoop.ozone.common; - -import org.apache.ratis.util.Preconditions; - -import java.nio.ByteBuffer; -import java.util.zip.Checksum; - -/** - * A sub-interface of {@link Checksum} - * with a method to update checksum from a {@link ByteBuffer}. - */ -public interface ChecksumByteBuffer extends Checksum { - /** - * Updates the current checksum with the specified bytes in the buffer. - * Upon return, the buffer's position will be equal to its limit. - * - * @param buffer the bytes to update the checksum with - */ - void update(ByteBuffer buffer); - - @Override - default void update(byte[] b, int off, int len) { - update(ByteBuffer.wrap(b, off, len).asReadOnlyBuffer()); - } - - /** - * An abstract class implementing {@link ChecksumByteBuffer} - * with a 32-bit checksum and a lookup table. - */ - @SuppressWarnings("innerassignment") - abstract class CrcIntTable implements ChecksumByteBuffer { - /** Current CRC value with bit-flipped. */ - private int crc; - - CrcIntTable() { - reset(); - Preconditions.assertTrue(getTable().length == 8 * (1 << 8)); - } - - abstract int[] getTable(); - - @Override - public final long getValue() { - return (~crc) & 0xffffffffL; - } - - @Override - public final void reset() { - crc = 0xffffffff; - } - - @Override - public final void update(int b) { - crc = (crc >>> 8) ^ getTable()[(((crc ^ b) << 24) >>> 24)]; - } - - @Override - public final void update(ByteBuffer b) { - crc = update(crc, b, getTable()); - } - - private static int update(int crc, ByteBuffer b, int[] table) { - for(; b.remaining() > 7;) { - final int c0 = (b.get() ^ crc) & 0xff; - final int c1 = (b.get() ^ (crc >>>= 8)) & 0xff; - final int c2 = (b.get() ^ (crc >>>= 8)) & 0xff; - final int c3 = (b.get() ^ (crc >>> 8)) & 0xff; - crc = (table[0x700 + c0] ^ table[0x600 + c1]) - ^ (table[0x500 + c2] ^ table[0x400 + c3]); - - final int c4 = b.get() & 0xff; - final int c5 = b.get() & 0xff; - final int c6 = b.get() & 0xff; - final int c7 = b.get() & 0xff; - - crc ^= (table[0x300 + c4] ^ table[0x200 + c5]) - ^ (table[0x100 + c6] ^ table[c7]); - } - - // loop unroll - duff's device style - switch (b.remaining()) { - case 7: - crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; - case 6: - crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; - case 5: - crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; - case 4: - crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; - case 3: - crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; - case 2: - crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; - case 1: - crc = (crc >>> 8) ^ table[((crc ^ b.get()) & 0xff)]; - default: // noop - } - - return crc; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java deleted file mode 100644 index 4a927fbae6c..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumData.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import java.util.List; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ChecksumType; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; - -/** - * Java class that represents Checksum ProtoBuf class. This helper class allows - * us to convert to and from protobuf to normal java. - */ -public class ChecksumData { - - private ChecksumType type; - // Checksum will be computed for every bytesPerChecksum number of bytes and - // stored sequentially in checksumList - private int bytesPerChecksum; - private List checksums; - - public ChecksumData(ChecksumType checksumType, int bytesPerChecksum) { - this(checksumType, bytesPerChecksum, Lists.newArrayList()); - } - - public ChecksumData(ChecksumType checksumType, int bytesPerChecksum, - List checksums) { - this.type = checksumType; - this.bytesPerChecksum = bytesPerChecksum; - this.checksums = checksums; - } - - /** - * Getter method for checksumType. - */ - public ChecksumType getChecksumType() { - return this.type; - } - - /** - * Getter method for bytesPerChecksum. - */ - public int getBytesPerChecksum() { - return this.bytesPerChecksum; - } - - /** - * Getter method for checksums. - */ - @VisibleForTesting - public List getChecksums() { - return this.checksums; - } - - /** - * Setter method for checksums. - * @param checksumList list of checksums - */ - public void setChecksums(List checksumList) { - this.checksums.clear(); - this.checksums.addAll(checksumList); - } - - /** - * Construct the Checksum ProtoBuf message. - * @return Checksum ProtoBuf message - */ - public ContainerProtos.ChecksumData getProtoBufMessage() { - ContainerProtos.ChecksumData.Builder checksumProtoBuilder = - ContainerProtos.ChecksumData.newBuilder() - .setType(this.type) - .setBytesPerChecksum(this.bytesPerChecksum); - - checksumProtoBuilder.addAllChecksums(checksums); - - return checksumProtoBuilder.build(); - } - - /** - * Constructs Checksum class object from the Checksum ProtoBuf message. - * @param checksumDataProto Checksum ProtoBuf message - * @return ChecksumData object representing the proto - */ - public static ChecksumData getFromProtoBuf( - ContainerProtos.ChecksumData checksumDataProto) { - Preconditions.checkNotNull(checksumDataProto); - - ChecksumData checksumData = new ChecksumData( - checksumDataProto.getType(), checksumDataProto.getBytesPerChecksum()); - - if (checksumDataProto.getChecksumsCount() != 0) { - checksumData.setChecksums(checksumDataProto.getChecksumsList()); - } - - return checksumData; - } - - /** - * Verify that this ChecksumData from startIndex to endIndex matches with the - * provided ChecksumData. - * The checksum at startIndex of this ChecksumData will be matched with the - * checksum at index 0 of the provided ChecksumData, and checksum at - * (startIndex + 1) of this ChecksumData with checksum at index 1 of - * provided ChecksumData and so on. - * @param that the ChecksumData to match with - * @param startIndex index of the first checksum from this ChecksumData - * which will be used to compare checksums - * @return true if checksums match - * @throws OzoneChecksumException - */ - public boolean verifyChecksumDataMatches(ChecksumData that, int startIndex) - throws OzoneChecksumException { - - // pre checks - if (this.checksums.size() == 0) { - throw new OzoneChecksumException("Original checksumData has no " + - "checksums"); - } - - if (that.checksums.size() == 0) { - throw new OzoneChecksumException("Computed checksumData has no " + - "checksums"); - } - - int numChecksums = that.checksums.size(); - - try { - // Verify that checksum matches at each index - for (int index = 0; index < numChecksums; index++) { - if (!matchChecksumAtIndex(this.checksums.get(startIndex + index), - that.checksums.get(index))) { - // checksum mismatch. throw exception. - throw new OzoneChecksumException(index); - } - } - } catch (ArrayIndexOutOfBoundsException e) { - throw new OzoneChecksumException("Computed checksum has " - + numChecksums + " number of checksums. Original checksum has " + - (this.checksums.size() - startIndex) + " number of checksums " + - "starting from index " + startIndex); - } - return true; - } - - private static boolean matchChecksumAtIndex( - ByteString expectedChecksumAtIndex, ByteString computedChecksumAtIndex) { - return expectedChecksumAtIndex.equals(computedChecksumAtIndex); - } - - @Override - public boolean equals(Object obj) { - if (!(obj instanceof ChecksumData)) { - return false; - } - - ChecksumData that = (ChecksumData) obj; - - if (!this.type.equals(that.getChecksumType())) { - return false; - } - if (this.bytesPerChecksum != that.getBytesPerChecksum()) { - return false; - } - if (this.checksums.size() != that.checksums.size()) { - return false; - } - - // Match checksum at each index - for (int index = 0; index < this.checksums.size(); index++) { - if (!matchChecksumAtIndex(this.checksums.get(index), - that.checksums.get(index))) { - return false; - } - } - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder hc = new HashCodeBuilder(); - hc.append(type); - hc.append(bytesPerChecksum); - hc.append(checksums.toArray()); - return hc.toHashCode(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java deleted file mode 100644 index 892b6951534..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos - .DeleteScmBlockResult; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos - .DeleteScmBlockResult.Result; - -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -/** - * Result to delete a group of blocks. - */ -public class DeleteBlockGroupResult { - private String objectKey; - private List blockResultList; - public DeleteBlockGroupResult(String objectKey, - List blockResultList) { - this.objectKey = objectKey; - this.blockResultList = blockResultList; - } - - public String getObjectKey() { - return objectKey; - } - - public List getBlockResultList() { - return blockResultList; - } - - public List getBlockResultProtoList() { - List resultProtoList = - new ArrayList<>(blockResultList.size()); - for (DeleteBlockResult result : blockResultList) { - DeleteScmBlockResult proto = DeleteScmBlockResult.newBuilder() - .setBlockID(result.getBlockID().getProtobuf()) - .setResult(result.getResult()).build(); - resultProtoList.add(proto); - } - return resultProtoList; - } - - public static List convertBlockResultProto( - List results) { - List protoResults = new ArrayList<>(results.size()); - for (DeleteScmBlockResult result : results) { - protoResults.add(new DeleteBlockResult(BlockID.getFromProtobuf( - result.getBlockID()), result.getResult())); - } - return protoResults; - } - - /** - * Only if all blocks are successfully deleted, this group is considered - * to be successfully executed. - * - * @return true if all blocks are successfully deleted, false otherwise. - */ - public boolean isSuccess() { - for (DeleteBlockResult result : blockResultList) { - if (result.getResult() != Result.success) { - return false; - } - } - return true; - } - - /** - * @return A list of deletion failed block IDs. - */ - public List getFailedBlocks() { - List failedBlocks = blockResultList.stream() - .filter(result -> result.getResult() != Result.success) - .map(DeleteBlockResult::getBlockID).collect(Collectors.toList()); - return failedBlocks; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java deleted file mode 100644 index 518b5194781..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -import java.io.File; -import java.io.IOException; - -/** - * The exception is thrown when file system state is inconsistent - * and is not recoverable. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class InconsistentStorageStateException extends IOException { - private static final long serialVersionUID = 1L; - - public InconsistentStorageStateException(String descr) { - super(descr); - } - - public InconsistentStorageStateException(File dir, String descr) { - super("Directory " + getFilePath(dir) + " is in an inconsistent state: " - + descr); - } - - private static String getFilePath(File dir) { - try { - return dir.getCanonicalPath(); - } catch (IOException e) { - } - return dir.getPath(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java deleted file mode 100644 index 20e40af09f3..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/OzoneChecksumException.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import java.io.IOException; -import java.security.NoSuchAlgorithmException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -/** Thrown for checksum errors. */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class OzoneChecksumException extends IOException { - - /** - * OzoneChecksumException to throw when checksum verfication fails. - * @param index checksum list index at which checksum match failed - */ - public OzoneChecksumException(int index) { - super(String.format("Checksum mismatch at index %d", index)); - } - - /** - * OzoneChecksumException to throw when unrecognized checksumType is given. - * @param unrecognizedChecksumType - */ - public OzoneChecksumException( - ContainerProtos.ChecksumType unrecognizedChecksumType) { - super(String.format("Unrecognized ChecksumType: %s", - unrecognizedChecksumType)); - } - - /** - * OzoneChecksumException to wrap around NoSuchAlgorithmException. - * @param algorithm name of algorithm - * @param ex original exception thrown - */ - public OzoneChecksumException( - String algorithm, NoSuchAlgorithmException ex) { - super(String.format("NoSuchAlgorithmException thrown while computing " + - "SHA-256 checksum using algorithm %s", algorithm), ex); - } - - /** - * OzoneChecksumException to throw with custom message. - */ - public OzoneChecksumException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java deleted file mode 100644 index 0d1f6307501..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java +++ /dev/null @@ -1,556 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -/** - * Similar to {@link org.apache.hadoop.util.PureJavaCrc32} - * except that this class implement {@link ChecksumByteBuffer}. - */ -final class PureJavaCrc32ByteBuffer extends ChecksumByteBuffer.CrcIntTable { - @Override - int[] getTable() { - return T; - } - - /** - * CRC-32 lookup table generated by the polynomial 0xEDB88320. - * See also org.apache.hadoop.util.TestPureJavaCrc32.Table. - */ - private static final int[] T = { - /* T8_0 */ - 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, - 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, - 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, - 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, - 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, - 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, - 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, - 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, - 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, - 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, - 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, - 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, - 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, - 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, - 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, - 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, - 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, - 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, - 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, - 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, - 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, - 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, - 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, - 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, - 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, - 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, - 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, - 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, - 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, - 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, - 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, - 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, - 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, - 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, - 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, - 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, - 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, - 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, - 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, - 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, - 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, - 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, - 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, - 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, - 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, - 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, - 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, - 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, - 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, - 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, - 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, - 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, - 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, - 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, - 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, - 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, - 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, - 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, - 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, - 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, - 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, - 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, - 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, - 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D, - /* T8_1 */ - 0x00000000, 0x191B3141, 0x32366282, 0x2B2D53C3, - 0x646CC504, 0x7D77F445, 0x565AA786, 0x4F4196C7, - 0xC8D98A08, 0xD1C2BB49, 0xFAEFE88A, 0xE3F4D9CB, - 0xACB54F0C, 0xB5AE7E4D, 0x9E832D8E, 0x87981CCF, - 0x4AC21251, 0x53D92310, 0x78F470D3, 0x61EF4192, - 0x2EAED755, 0x37B5E614, 0x1C98B5D7, 0x05838496, - 0x821B9859, 0x9B00A918, 0xB02DFADB, 0xA936CB9A, - 0xE6775D5D, 0xFF6C6C1C, 0xD4413FDF, 0xCD5A0E9E, - 0x958424A2, 0x8C9F15E3, 0xA7B24620, 0xBEA97761, - 0xF1E8E1A6, 0xE8F3D0E7, 0xC3DE8324, 0xDAC5B265, - 0x5D5DAEAA, 0x44469FEB, 0x6F6BCC28, 0x7670FD69, - 0x39316BAE, 0x202A5AEF, 0x0B07092C, 0x121C386D, - 0xDF4636F3, 0xC65D07B2, 0xED705471, 0xF46B6530, - 0xBB2AF3F7, 0xA231C2B6, 0x891C9175, 0x9007A034, - 0x179FBCFB, 0x0E848DBA, 0x25A9DE79, 0x3CB2EF38, - 0x73F379FF, 0x6AE848BE, 0x41C51B7D, 0x58DE2A3C, - 0xF0794F05, 0xE9627E44, 0xC24F2D87, 0xDB541CC6, - 0x94158A01, 0x8D0EBB40, 0xA623E883, 0xBF38D9C2, - 0x38A0C50D, 0x21BBF44C, 0x0A96A78F, 0x138D96CE, - 0x5CCC0009, 0x45D73148, 0x6EFA628B, 0x77E153CA, - 0xBABB5D54, 0xA3A06C15, 0x888D3FD6, 0x91960E97, - 0xDED79850, 0xC7CCA911, 0xECE1FAD2, 0xF5FACB93, - 0x7262D75C, 0x6B79E61D, 0x4054B5DE, 0x594F849F, - 0x160E1258, 0x0F152319, 0x243870DA, 0x3D23419B, - 0x65FD6BA7, 0x7CE65AE6, 0x57CB0925, 0x4ED03864, - 0x0191AEA3, 0x188A9FE2, 0x33A7CC21, 0x2ABCFD60, - 0xAD24E1AF, 0xB43FD0EE, 0x9F12832D, 0x8609B26C, - 0xC94824AB, 0xD05315EA, 0xFB7E4629, 0xE2657768, - 0x2F3F79F6, 0x362448B7, 0x1D091B74, 0x04122A35, - 0x4B53BCF2, 0x52488DB3, 0x7965DE70, 0x607EEF31, - 0xE7E6F3FE, 0xFEFDC2BF, 0xD5D0917C, 0xCCCBA03D, - 0x838A36FA, 0x9A9107BB, 0xB1BC5478, 0xA8A76539, - 0x3B83984B, 0x2298A90A, 0x09B5FAC9, 0x10AECB88, - 0x5FEF5D4F, 0x46F46C0E, 0x6DD93FCD, 0x74C20E8C, - 0xF35A1243, 0xEA412302, 0xC16C70C1, 0xD8774180, - 0x9736D747, 0x8E2DE606, 0xA500B5C5, 0xBC1B8484, - 0x71418A1A, 0x685ABB5B, 0x4377E898, 0x5A6CD9D9, - 0x152D4F1E, 0x0C367E5F, 0x271B2D9C, 0x3E001CDD, - 0xB9980012, 0xA0833153, 0x8BAE6290, 0x92B553D1, - 0xDDF4C516, 0xC4EFF457, 0xEFC2A794, 0xF6D996D5, - 0xAE07BCE9, 0xB71C8DA8, 0x9C31DE6B, 0x852AEF2A, - 0xCA6B79ED, 0xD37048AC, 0xF85D1B6F, 0xE1462A2E, - 0x66DE36E1, 0x7FC507A0, 0x54E85463, 0x4DF36522, - 0x02B2F3E5, 0x1BA9C2A4, 0x30849167, 0x299FA026, - 0xE4C5AEB8, 0xFDDE9FF9, 0xD6F3CC3A, 0xCFE8FD7B, - 0x80A96BBC, 0x99B25AFD, 0xB29F093E, 0xAB84387F, - 0x2C1C24B0, 0x350715F1, 0x1E2A4632, 0x07317773, - 0x4870E1B4, 0x516BD0F5, 0x7A468336, 0x635DB277, - 0xCBFAD74E, 0xD2E1E60F, 0xF9CCB5CC, 0xE0D7848D, - 0xAF96124A, 0xB68D230B, 0x9DA070C8, 0x84BB4189, - 0x03235D46, 0x1A386C07, 0x31153FC4, 0x280E0E85, - 0x674F9842, 0x7E54A903, 0x5579FAC0, 0x4C62CB81, - 0x8138C51F, 0x9823F45E, 0xB30EA79D, 0xAA1596DC, - 0xE554001B, 0xFC4F315A, 0xD7626299, 0xCE7953D8, - 0x49E14F17, 0x50FA7E56, 0x7BD72D95, 0x62CC1CD4, - 0x2D8D8A13, 0x3496BB52, 0x1FBBE891, 0x06A0D9D0, - 0x5E7EF3EC, 0x4765C2AD, 0x6C48916E, 0x7553A02F, - 0x3A1236E8, 0x230907A9, 0x0824546A, 0x113F652B, - 0x96A779E4, 0x8FBC48A5, 0xA4911B66, 0xBD8A2A27, - 0xF2CBBCE0, 0xEBD08DA1, 0xC0FDDE62, 0xD9E6EF23, - 0x14BCE1BD, 0x0DA7D0FC, 0x268A833F, 0x3F91B27E, - 0x70D024B9, 0x69CB15F8, 0x42E6463B, 0x5BFD777A, - 0xDC656BB5, 0xC57E5AF4, 0xEE530937, 0xF7483876, - 0xB809AEB1, 0xA1129FF0, 0x8A3FCC33, 0x9324FD72, - /* T8_2 */ - 0x00000000, 0x01C26A37, 0x0384D46E, 0x0246BE59, - 0x0709A8DC, 0x06CBC2EB, 0x048D7CB2, 0x054F1685, - 0x0E1351B8, 0x0FD13B8F, 0x0D9785D6, 0x0C55EFE1, - 0x091AF964, 0x08D89353, 0x0A9E2D0A, 0x0B5C473D, - 0x1C26A370, 0x1DE4C947, 0x1FA2771E, 0x1E601D29, - 0x1B2F0BAC, 0x1AED619B, 0x18ABDFC2, 0x1969B5F5, - 0x1235F2C8, 0x13F798FF, 0x11B126A6, 0x10734C91, - 0x153C5A14, 0x14FE3023, 0x16B88E7A, 0x177AE44D, - 0x384D46E0, 0x398F2CD7, 0x3BC9928E, 0x3A0BF8B9, - 0x3F44EE3C, 0x3E86840B, 0x3CC03A52, 0x3D025065, - 0x365E1758, 0x379C7D6F, 0x35DAC336, 0x3418A901, - 0x3157BF84, 0x3095D5B3, 0x32D36BEA, 0x331101DD, - 0x246BE590, 0x25A98FA7, 0x27EF31FE, 0x262D5BC9, - 0x23624D4C, 0x22A0277B, 0x20E69922, 0x2124F315, - 0x2A78B428, 0x2BBADE1F, 0x29FC6046, 0x283E0A71, - 0x2D711CF4, 0x2CB376C3, 0x2EF5C89A, 0x2F37A2AD, - 0x709A8DC0, 0x7158E7F7, 0x731E59AE, 0x72DC3399, - 0x7793251C, 0x76514F2B, 0x7417F172, 0x75D59B45, - 0x7E89DC78, 0x7F4BB64F, 0x7D0D0816, 0x7CCF6221, - 0x798074A4, 0x78421E93, 0x7A04A0CA, 0x7BC6CAFD, - 0x6CBC2EB0, 0x6D7E4487, 0x6F38FADE, 0x6EFA90E9, - 0x6BB5866C, 0x6A77EC5B, 0x68315202, 0x69F33835, - 0x62AF7F08, 0x636D153F, 0x612BAB66, 0x60E9C151, - 0x65A6D7D4, 0x6464BDE3, 0x662203BA, 0x67E0698D, - 0x48D7CB20, 0x4915A117, 0x4B531F4E, 0x4A917579, - 0x4FDE63FC, 0x4E1C09CB, 0x4C5AB792, 0x4D98DDA5, - 0x46C49A98, 0x4706F0AF, 0x45404EF6, 0x448224C1, - 0x41CD3244, 0x400F5873, 0x4249E62A, 0x438B8C1D, - 0x54F16850, 0x55330267, 0x5775BC3E, 0x56B7D609, - 0x53F8C08C, 0x523AAABB, 0x507C14E2, 0x51BE7ED5, - 0x5AE239E8, 0x5B2053DF, 0x5966ED86, 0x58A487B1, - 0x5DEB9134, 0x5C29FB03, 0x5E6F455A, 0x5FAD2F6D, - 0xE1351B80, 0xE0F771B7, 0xE2B1CFEE, 0xE373A5D9, - 0xE63CB35C, 0xE7FED96B, 0xE5B86732, 0xE47A0D05, - 0xEF264A38, 0xEEE4200F, 0xECA29E56, 0xED60F461, - 0xE82FE2E4, 0xE9ED88D3, 0xEBAB368A, 0xEA695CBD, - 0xFD13B8F0, 0xFCD1D2C7, 0xFE976C9E, 0xFF5506A9, - 0xFA1A102C, 0xFBD87A1B, 0xF99EC442, 0xF85CAE75, - 0xF300E948, 0xF2C2837F, 0xF0843D26, 0xF1465711, - 0xF4094194, 0xF5CB2BA3, 0xF78D95FA, 0xF64FFFCD, - 0xD9785D60, 0xD8BA3757, 0xDAFC890E, 0xDB3EE339, - 0xDE71F5BC, 0xDFB39F8B, 0xDDF521D2, 0xDC374BE5, - 0xD76B0CD8, 0xD6A966EF, 0xD4EFD8B6, 0xD52DB281, - 0xD062A404, 0xD1A0CE33, 0xD3E6706A, 0xD2241A5D, - 0xC55EFE10, 0xC49C9427, 0xC6DA2A7E, 0xC7184049, - 0xC25756CC, 0xC3953CFB, 0xC1D382A2, 0xC011E895, - 0xCB4DAFA8, 0xCA8FC59F, 0xC8C97BC6, 0xC90B11F1, - 0xCC440774, 0xCD866D43, 0xCFC0D31A, 0xCE02B92D, - 0x91AF9640, 0x906DFC77, 0x922B422E, 0x93E92819, - 0x96A63E9C, 0x976454AB, 0x9522EAF2, 0x94E080C5, - 0x9FBCC7F8, 0x9E7EADCF, 0x9C381396, 0x9DFA79A1, - 0x98B56F24, 0x99770513, 0x9B31BB4A, 0x9AF3D17D, - 0x8D893530, 0x8C4B5F07, 0x8E0DE15E, 0x8FCF8B69, - 0x8A809DEC, 0x8B42F7DB, 0x89044982, 0x88C623B5, - 0x839A6488, 0x82580EBF, 0x801EB0E6, 0x81DCDAD1, - 0x8493CC54, 0x8551A663, 0x8717183A, 0x86D5720D, - 0xA9E2D0A0, 0xA820BA97, 0xAA6604CE, 0xABA46EF9, - 0xAEEB787C, 0xAF29124B, 0xAD6FAC12, 0xACADC625, - 0xA7F18118, 0xA633EB2F, 0xA4755576, 0xA5B73F41, - 0xA0F829C4, 0xA13A43F3, 0xA37CFDAA, 0xA2BE979D, - 0xB5C473D0, 0xB40619E7, 0xB640A7BE, 0xB782CD89, - 0xB2CDDB0C, 0xB30FB13B, 0xB1490F62, 0xB08B6555, - 0xBBD72268, 0xBA15485F, 0xB853F606, 0xB9919C31, - 0xBCDE8AB4, 0xBD1CE083, 0xBF5A5EDA, 0xBE9834ED, - /* T8_3 */ - 0x00000000, 0xB8BC6765, 0xAA09C88B, 0x12B5AFEE, - 0x8F629757, 0x37DEF032, 0x256B5FDC, 0x9DD738B9, - 0xC5B428EF, 0x7D084F8A, 0x6FBDE064, 0xD7018701, - 0x4AD6BFB8, 0xF26AD8DD, 0xE0DF7733, 0x58631056, - 0x5019579F, 0xE8A530FA, 0xFA109F14, 0x42ACF871, - 0xDF7BC0C8, 0x67C7A7AD, 0x75720843, 0xCDCE6F26, - 0x95AD7F70, 0x2D111815, 0x3FA4B7FB, 0x8718D09E, - 0x1ACFE827, 0xA2738F42, 0xB0C620AC, 0x087A47C9, - 0xA032AF3E, 0x188EC85B, 0x0A3B67B5, 0xB28700D0, - 0x2F503869, 0x97EC5F0C, 0x8559F0E2, 0x3DE59787, - 0x658687D1, 0xDD3AE0B4, 0xCF8F4F5A, 0x7733283F, - 0xEAE41086, 0x525877E3, 0x40EDD80D, 0xF851BF68, - 0xF02BF8A1, 0x48979FC4, 0x5A22302A, 0xE29E574F, - 0x7F496FF6, 0xC7F50893, 0xD540A77D, 0x6DFCC018, - 0x359FD04E, 0x8D23B72B, 0x9F9618C5, 0x272A7FA0, - 0xBAFD4719, 0x0241207C, 0x10F48F92, 0xA848E8F7, - 0x9B14583D, 0x23A83F58, 0x311D90B6, 0x89A1F7D3, - 0x1476CF6A, 0xACCAA80F, 0xBE7F07E1, 0x06C36084, - 0x5EA070D2, 0xE61C17B7, 0xF4A9B859, 0x4C15DF3C, - 0xD1C2E785, 0x697E80E0, 0x7BCB2F0E, 0xC377486B, - 0xCB0D0FA2, 0x73B168C7, 0x6104C729, 0xD9B8A04C, - 0x446F98F5, 0xFCD3FF90, 0xEE66507E, 0x56DA371B, - 0x0EB9274D, 0xB6054028, 0xA4B0EFC6, 0x1C0C88A3, - 0x81DBB01A, 0x3967D77F, 0x2BD27891, 0x936E1FF4, - 0x3B26F703, 0x839A9066, 0x912F3F88, 0x299358ED, - 0xB4446054, 0x0CF80731, 0x1E4DA8DF, 0xA6F1CFBA, - 0xFE92DFEC, 0x462EB889, 0x549B1767, 0xEC277002, - 0x71F048BB, 0xC94C2FDE, 0xDBF98030, 0x6345E755, - 0x6B3FA09C, 0xD383C7F9, 0xC1366817, 0x798A0F72, - 0xE45D37CB, 0x5CE150AE, 0x4E54FF40, 0xF6E89825, - 0xAE8B8873, 0x1637EF16, 0x048240F8, 0xBC3E279D, - 0x21E91F24, 0x99557841, 0x8BE0D7AF, 0x335CB0CA, - 0xED59B63B, 0x55E5D15E, 0x47507EB0, 0xFFEC19D5, - 0x623B216C, 0xDA874609, 0xC832E9E7, 0x708E8E82, - 0x28ED9ED4, 0x9051F9B1, 0x82E4565F, 0x3A58313A, - 0xA78F0983, 0x1F336EE6, 0x0D86C108, 0xB53AA66D, - 0xBD40E1A4, 0x05FC86C1, 0x1749292F, 0xAFF54E4A, - 0x322276F3, 0x8A9E1196, 0x982BBE78, 0x2097D91D, - 0x78F4C94B, 0xC048AE2E, 0xD2FD01C0, 0x6A4166A5, - 0xF7965E1C, 0x4F2A3979, 0x5D9F9697, 0xE523F1F2, - 0x4D6B1905, 0xF5D77E60, 0xE762D18E, 0x5FDEB6EB, - 0xC2098E52, 0x7AB5E937, 0x680046D9, 0xD0BC21BC, - 0x88DF31EA, 0x3063568F, 0x22D6F961, 0x9A6A9E04, - 0x07BDA6BD, 0xBF01C1D8, 0xADB46E36, 0x15080953, - 0x1D724E9A, 0xA5CE29FF, 0xB77B8611, 0x0FC7E174, - 0x9210D9CD, 0x2AACBEA8, 0x38191146, 0x80A57623, - 0xD8C66675, 0x607A0110, 0x72CFAEFE, 0xCA73C99B, - 0x57A4F122, 0xEF189647, 0xFDAD39A9, 0x45115ECC, - 0x764DEE06, 0xCEF18963, 0xDC44268D, 0x64F841E8, - 0xF92F7951, 0x41931E34, 0x5326B1DA, 0xEB9AD6BF, - 0xB3F9C6E9, 0x0B45A18C, 0x19F00E62, 0xA14C6907, - 0x3C9B51BE, 0x842736DB, 0x96929935, 0x2E2EFE50, - 0x2654B999, 0x9EE8DEFC, 0x8C5D7112, 0x34E11677, - 0xA9362ECE, 0x118A49AB, 0x033FE645, 0xBB838120, - 0xE3E09176, 0x5B5CF613, 0x49E959FD, 0xF1553E98, - 0x6C820621, 0xD43E6144, 0xC68BCEAA, 0x7E37A9CF, - 0xD67F4138, 0x6EC3265D, 0x7C7689B3, 0xC4CAEED6, - 0x591DD66F, 0xE1A1B10A, 0xF3141EE4, 0x4BA87981, - 0x13CB69D7, 0xAB770EB2, 0xB9C2A15C, 0x017EC639, - 0x9CA9FE80, 0x241599E5, 0x36A0360B, 0x8E1C516E, - 0x866616A7, 0x3EDA71C2, 0x2C6FDE2C, 0x94D3B949, - 0x090481F0, 0xB1B8E695, 0xA30D497B, 0x1BB12E1E, - 0x43D23E48, 0xFB6E592D, 0xE9DBF6C3, 0x516791A6, - 0xCCB0A91F, 0x740CCE7A, 0x66B96194, 0xDE0506F1, - /* T8_4 */ - 0x00000000, 0x3D6029B0, 0x7AC05360, 0x47A07AD0, - 0xF580A6C0, 0xC8E08F70, 0x8F40F5A0, 0xB220DC10, - 0x30704BC1, 0x0D106271, 0x4AB018A1, 0x77D03111, - 0xC5F0ED01, 0xF890C4B1, 0xBF30BE61, 0x825097D1, - 0x60E09782, 0x5D80BE32, 0x1A20C4E2, 0x2740ED52, - 0x95603142, 0xA80018F2, 0xEFA06222, 0xD2C04B92, - 0x5090DC43, 0x6DF0F5F3, 0x2A508F23, 0x1730A693, - 0xA5107A83, 0x98705333, 0xDFD029E3, 0xE2B00053, - 0xC1C12F04, 0xFCA106B4, 0xBB017C64, 0x866155D4, - 0x344189C4, 0x0921A074, 0x4E81DAA4, 0x73E1F314, - 0xF1B164C5, 0xCCD14D75, 0x8B7137A5, 0xB6111E15, - 0x0431C205, 0x3951EBB5, 0x7EF19165, 0x4391B8D5, - 0xA121B886, 0x9C419136, 0xDBE1EBE6, 0xE681C256, - 0x54A11E46, 0x69C137F6, 0x2E614D26, 0x13016496, - 0x9151F347, 0xAC31DAF7, 0xEB91A027, 0xD6F18997, - 0x64D15587, 0x59B17C37, 0x1E1106E7, 0x23712F57, - 0x58F35849, 0x659371F9, 0x22330B29, 0x1F532299, - 0xAD73FE89, 0x9013D739, 0xD7B3ADE9, 0xEAD38459, - 0x68831388, 0x55E33A38, 0x124340E8, 0x2F236958, - 0x9D03B548, 0xA0639CF8, 0xE7C3E628, 0xDAA3CF98, - 0x3813CFCB, 0x0573E67B, 0x42D39CAB, 0x7FB3B51B, - 0xCD93690B, 0xF0F340BB, 0xB7533A6B, 0x8A3313DB, - 0x0863840A, 0x3503ADBA, 0x72A3D76A, 0x4FC3FEDA, - 0xFDE322CA, 0xC0830B7A, 0x872371AA, 0xBA43581A, - 0x9932774D, 0xA4525EFD, 0xE3F2242D, 0xDE920D9D, - 0x6CB2D18D, 0x51D2F83D, 0x167282ED, 0x2B12AB5D, - 0xA9423C8C, 0x9422153C, 0xD3826FEC, 0xEEE2465C, - 0x5CC29A4C, 0x61A2B3FC, 0x2602C92C, 0x1B62E09C, - 0xF9D2E0CF, 0xC4B2C97F, 0x8312B3AF, 0xBE729A1F, - 0x0C52460F, 0x31326FBF, 0x7692156F, 0x4BF23CDF, - 0xC9A2AB0E, 0xF4C282BE, 0xB362F86E, 0x8E02D1DE, - 0x3C220DCE, 0x0142247E, 0x46E25EAE, 0x7B82771E, - 0xB1E6B092, 0x8C869922, 0xCB26E3F2, 0xF646CA42, - 0x44661652, 0x79063FE2, 0x3EA64532, 0x03C66C82, - 0x8196FB53, 0xBCF6D2E3, 0xFB56A833, 0xC6368183, - 0x74165D93, 0x49767423, 0x0ED60EF3, 0x33B62743, - 0xD1062710, 0xEC660EA0, 0xABC67470, 0x96A65DC0, - 0x248681D0, 0x19E6A860, 0x5E46D2B0, 0x6326FB00, - 0xE1766CD1, 0xDC164561, 0x9BB63FB1, 0xA6D61601, - 0x14F6CA11, 0x2996E3A1, 0x6E369971, 0x5356B0C1, - 0x70279F96, 0x4D47B626, 0x0AE7CCF6, 0x3787E546, - 0x85A73956, 0xB8C710E6, 0xFF676A36, 0xC2074386, - 0x4057D457, 0x7D37FDE7, 0x3A978737, 0x07F7AE87, - 0xB5D77297, 0x88B75B27, 0xCF1721F7, 0xF2770847, - 0x10C70814, 0x2DA721A4, 0x6A075B74, 0x576772C4, - 0xE547AED4, 0xD8278764, 0x9F87FDB4, 0xA2E7D404, - 0x20B743D5, 0x1DD76A65, 0x5A7710B5, 0x67173905, - 0xD537E515, 0xE857CCA5, 0xAFF7B675, 0x92979FC5, - 0xE915E8DB, 0xD475C16B, 0x93D5BBBB, 0xAEB5920B, - 0x1C954E1B, 0x21F567AB, 0x66551D7B, 0x5B3534CB, - 0xD965A31A, 0xE4058AAA, 0xA3A5F07A, 0x9EC5D9CA, - 0x2CE505DA, 0x11852C6A, 0x562556BA, 0x6B457F0A, - 0x89F57F59, 0xB49556E9, 0xF3352C39, 0xCE550589, - 0x7C75D999, 0x4115F029, 0x06B58AF9, 0x3BD5A349, - 0xB9853498, 0x84E51D28, 0xC34567F8, 0xFE254E48, - 0x4C059258, 0x7165BBE8, 0x36C5C138, 0x0BA5E888, - 0x28D4C7DF, 0x15B4EE6F, 0x521494BF, 0x6F74BD0F, - 0xDD54611F, 0xE03448AF, 0xA794327F, 0x9AF41BCF, - 0x18A48C1E, 0x25C4A5AE, 0x6264DF7E, 0x5F04F6CE, - 0xED242ADE, 0xD044036E, 0x97E479BE, 0xAA84500E, - 0x4834505D, 0x755479ED, 0x32F4033D, 0x0F942A8D, - 0xBDB4F69D, 0x80D4DF2D, 0xC774A5FD, 0xFA148C4D, - 0x78441B9C, 0x4524322C, 0x028448FC, 0x3FE4614C, - 0x8DC4BD5C, 0xB0A494EC, 0xF704EE3C, 0xCA64C78C, - /* T8_5 */ - 0x00000000, 0xCB5CD3A5, 0x4DC8A10B, 0x869472AE, - 0x9B914216, 0x50CD91B3, 0xD659E31D, 0x1D0530B8, - 0xEC53826D, 0x270F51C8, 0xA19B2366, 0x6AC7F0C3, - 0x77C2C07B, 0xBC9E13DE, 0x3A0A6170, 0xF156B2D5, - 0x03D6029B, 0xC88AD13E, 0x4E1EA390, 0x85427035, - 0x9847408D, 0x531B9328, 0xD58FE186, 0x1ED33223, - 0xEF8580F6, 0x24D95353, 0xA24D21FD, 0x6911F258, - 0x7414C2E0, 0xBF481145, 0x39DC63EB, 0xF280B04E, - 0x07AC0536, 0xCCF0D693, 0x4A64A43D, 0x81387798, - 0x9C3D4720, 0x57619485, 0xD1F5E62B, 0x1AA9358E, - 0xEBFF875B, 0x20A354FE, 0xA6372650, 0x6D6BF5F5, - 0x706EC54D, 0xBB3216E8, 0x3DA66446, 0xF6FAB7E3, - 0x047A07AD, 0xCF26D408, 0x49B2A6A6, 0x82EE7503, - 0x9FEB45BB, 0x54B7961E, 0xD223E4B0, 0x197F3715, - 0xE82985C0, 0x23755665, 0xA5E124CB, 0x6EBDF76E, - 0x73B8C7D6, 0xB8E41473, 0x3E7066DD, 0xF52CB578, - 0x0F580A6C, 0xC404D9C9, 0x4290AB67, 0x89CC78C2, - 0x94C9487A, 0x5F959BDF, 0xD901E971, 0x125D3AD4, - 0xE30B8801, 0x28575BA4, 0xAEC3290A, 0x659FFAAF, - 0x789ACA17, 0xB3C619B2, 0x35526B1C, 0xFE0EB8B9, - 0x0C8E08F7, 0xC7D2DB52, 0x4146A9FC, 0x8A1A7A59, - 0x971F4AE1, 0x5C439944, 0xDAD7EBEA, 0x118B384F, - 0xE0DD8A9A, 0x2B81593F, 0xAD152B91, 0x6649F834, - 0x7B4CC88C, 0xB0101B29, 0x36846987, 0xFDD8BA22, - 0x08F40F5A, 0xC3A8DCFF, 0x453CAE51, 0x8E607DF4, - 0x93654D4C, 0x58399EE9, 0xDEADEC47, 0x15F13FE2, - 0xE4A78D37, 0x2FFB5E92, 0xA96F2C3C, 0x6233FF99, - 0x7F36CF21, 0xB46A1C84, 0x32FE6E2A, 0xF9A2BD8F, - 0x0B220DC1, 0xC07EDE64, 0x46EAACCA, 0x8DB67F6F, - 0x90B34FD7, 0x5BEF9C72, 0xDD7BEEDC, 0x16273D79, - 0xE7718FAC, 0x2C2D5C09, 0xAAB92EA7, 0x61E5FD02, - 0x7CE0CDBA, 0xB7BC1E1F, 0x31286CB1, 0xFA74BF14, - 0x1EB014D8, 0xD5ECC77D, 0x5378B5D3, 0x98246676, - 0x852156CE, 0x4E7D856B, 0xC8E9F7C5, 0x03B52460, - 0xF2E396B5, 0x39BF4510, 0xBF2B37BE, 0x7477E41B, - 0x6972D4A3, 0xA22E0706, 0x24BA75A8, 0xEFE6A60D, - 0x1D661643, 0xD63AC5E6, 0x50AEB748, 0x9BF264ED, - 0x86F75455, 0x4DAB87F0, 0xCB3FF55E, 0x006326FB, - 0xF135942E, 0x3A69478B, 0xBCFD3525, 0x77A1E680, - 0x6AA4D638, 0xA1F8059D, 0x276C7733, 0xEC30A496, - 0x191C11EE, 0xD240C24B, 0x54D4B0E5, 0x9F886340, - 0x828D53F8, 0x49D1805D, 0xCF45F2F3, 0x04192156, - 0xF54F9383, 0x3E134026, 0xB8873288, 0x73DBE12D, - 0x6EDED195, 0xA5820230, 0x2316709E, 0xE84AA33B, - 0x1ACA1375, 0xD196C0D0, 0x5702B27E, 0x9C5E61DB, - 0x815B5163, 0x4A0782C6, 0xCC93F068, 0x07CF23CD, - 0xF6999118, 0x3DC542BD, 0xBB513013, 0x700DE3B6, - 0x6D08D30E, 0xA65400AB, 0x20C07205, 0xEB9CA1A0, - 0x11E81EB4, 0xDAB4CD11, 0x5C20BFBF, 0x977C6C1A, - 0x8A795CA2, 0x41258F07, 0xC7B1FDA9, 0x0CED2E0C, - 0xFDBB9CD9, 0x36E74F7C, 0xB0733DD2, 0x7B2FEE77, - 0x662ADECF, 0xAD760D6A, 0x2BE27FC4, 0xE0BEAC61, - 0x123E1C2F, 0xD962CF8A, 0x5FF6BD24, 0x94AA6E81, - 0x89AF5E39, 0x42F38D9C, 0xC467FF32, 0x0F3B2C97, - 0xFE6D9E42, 0x35314DE7, 0xB3A53F49, 0x78F9ECEC, - 0x65FCDC54, 0xAEA00FF1, 0x28347D5F, 0xE368AEFA, - 0x16441B82, 0xDD18C827, 0x5B8CBA89, 0x90D0692C, - 0x8DD55994, 0x46898A31, 0xC01DF89F, 0x0B412B3A, - 0xFA1799EF, 0x314B4A4A, 0xB7DF38E4, 0x7C83EB41, - 0x6186DBF9, 0xAADA085C, 0x2C4E7AF2, 0xE712A957, - 0x15921919, 0xDECECABC, 0x585AB812, 0x93066BB7, - 0x8E035B0F, 0x455F88AA, 0xC3CBFA04, 0x089729A1, - 0xF9C19B74, 0x329D48D1, 0xB4093A7F, 0x7F55E9DA, - 0x6250D962, 0xA90C0AC7, 0x2F987869, 0xE4C4ABCC, - /* T8_6 */ - 0x00000000, 0xA6770BB4, 0x979F1129, 0x31E81A9D, - 0xF44F2413, 0x52382FA7, 0x63D0353A, 0xC5A73E8E, - 0x33EF4E67, 0x959845D3, 0xA4705F4E, 0x020754FA, - 0xC7A06A74, 0x61D761C0, 0x503F7B5D, 0xF64870E9, - 0x67DE9CCE, 0xC1A9977A, 0xF0418DE7, 0x56368653, - 0x9391B8DD, 0x35E6B369, 0x040EA9F4, 0xA279A240, - 0x5431D2A9, 0xF246D91D, 0xC3AEC380, 0x65D9C834, - 0xA07EF6BA, 0x0609FD0E, 0x37E1E793, 0x9196EC27, - 0xCFBD399C, 0x69CA3228, 0x582228B5, 0xFE552301, - 0x3BF21D8F, 0x9D85163B, 0xAC6D0CA6, 0x0A1A0712, - 0xFC5277FB, 0x5A257C4F, 0x6BCD66D2, 0xCDBA6D66, - 0x081D53E8, 0xAE6A585C, 0x9F8242C1, 0x39F54975, - 0xA863A552, 0x0E14AEE6, 0x3FFCB47B, 0x998BBFCF, - 0x5C2C8141, 0xFA5B8AF5, 0xCBB39068, 0x6DC49BDC, - 0x9B8CEB35, 0x3DFBE081, 0x0C13FA1C, 0xAA64F1A8, - 0x6FC3CF26, 0xC9B4C492, 0xF85CDE0F, 0x5E2BD5BB, - 0x440B7579, 0xE27C7ECD, 0xD3946450, 0x75E36FE4, - 0xB044516A, 0x16335ADE, 0x27DB4043, 0x81AC4BF7, - 0x77E43B1E, 0xD19330AA, 0xE07B2A37, 0x460C2183, - 0x83AB1F0D, 0x25DC14B9, 0x14340E24, 0xB2430590, - 0x23D5E9B7, 0x85A2E203, 0xB44AF89E, 0x123DF32A, - 0xD79ACDA4, 0x71EDC610, 0x4005DC8D, 0xE672D739, - 0x103AA7D0, 0xB64DAC64, 0x87A5B6F9, 0x21D2BD4D, - 0xE47583C3, 0x42028877, 0x73EA92EA, 0xD59D995E, - 0x8BB64CE5, 0x2DC14751, 0x1C295DCC, 0xBA5E5678, - 0x7FF968F6, 0xD98E6342, 0xE86679DF, 0x4E11726B, - 0xB8590282, 0x1E2E0936, 0x2FC613AB, 0x89B1181F, - 0x4C162691, 0xEA612D25, 0xDB8937B8, 0x7DFE3C0C, - 0xEC68D02B, 0x4A1FDB9F, 0x7BF7C102, 0xDD80CAB6, - 0x1827F438, 0xBE50FF8C, 0x8FB8E511, 0x29CFEEA5, - 0xDF879E4C, 0x79F095F8, 0x48188F65, 0xEE6F84D1, - 0x2BC8BA5F, 0x8DBFB1EB, 0xBC57AB76, 0x1A20A0C2, - 0x8816EAF2, 0x2E61E146, 0x1F89FBDB, 0xB9FEF06F, - 0x7C59CEE1, 0xDA2EC555, 0xEBC6DFC8, 0x4DB1D47C, - 0xBBF9A495, 0x1D8EAF21, 0x2C66B5BC, 0x8A11BE08, - 0x4FB68086, 0xE9C18B32, 0xD82991AF, 0x7E5E9A1B, - 0xEFC8763C, 0x49BF7D88, 0x78576715, 0xDE206CA1, - 0x1B87522F, 0xBDF0599B, 0x8C184306, 0x2A6F48B2, - 0xDC27385B, 0x7A5033EF, 0x4BB82972, 0xEDCF22C6, - 0x28681C48, 0x8E1F17FC, 0xBFF70D61, 0x198006D5, - 0x47ABD36E, 0xE1DCD8DA, 0xD034C247, 0x7643C9F3, - 0xB3E4F77D, 0x1593FCC9, 0x247BE654, 0x820CEDE0, - 0x74449D09, 0xD23396BD, 0xE3DB8C20, 0x45AC8794, - 0x800BB91A, 0x267CB2AE, 0x1794A833, 0xB1E3A387, - 0x20754FA0, 0x86024414, 0xB7EA5E89, 0x119D553D, - 0xD43A6BB3, 0x724D6007, 0x43A57A9A, 0xE5D2712E, - 0x139A01C7, 0xB5ED0A73, 0x840510EE, 0x22721B5A, - 0xE7D525D4, 0x41A22E60, 0x704A34FD, 0xD63D3F49, - 0xCC1D9F8B, 0x6A6A943F, 0x5B828EA2, 0xFDF58516, - 0x3852BB98, 0x9E25B02C, 0xAFCDAAB1, 0x09BAA105, - 0xFFF2D1EC, 0x5985DA58, 0x686DC0C5, 0xCE1ACB71, - 0x0BBDF5FF, 0xADCAFE4B, 0x9C22E4D6, 0x3A55EF62, - 0xABC30345, 0x0DB408F1, 0x3C5C126C, 0x9A2B19D8, - 0x5F8C2756, 0xF9FB2CE2, 0xC813367F, 0x6E643DCB, - 0x982C4D22, 0x3E5B4696, 0x0FB35C0B, 0xA9C457BF, - 0x6C636931, 0xCA146285, 0xFBFC7818, 0x5D8B73AC, - 0x03A0A617, 0xA5D7ADA3, 0x943FB73E, 0x3248BC8A, - 0xF7EF8204, 0x519889B0, 0x6070932D, 0xC6079899, - 0x304FE870, 0x9638E3C4, 0xA7D0F959, 0x01A7F2ED, - 0xC400CC63, 0x6277C7D7, 0x539FDD4A, 0xF5E8D6FE, - 0x647E3AD9, 0xC209316D, 0xF3E12BF0, 0x55962044, - 0x90311ECA, 0x3646157E, 0x07AE0FE3, 0xA1D90457, - 0x579174BE, 0xF1E67F0A, 0xC00E6597, 0x66796E23, - 0xA3DE50AD, 0x05A95B19, 0x34414184, 0x92364A30, - /* T8_7 */ - 0x00000000, 0xCCAA009E, 0x4225077D, 0x8E8F07E3, - 0x844A0EFA, 0x48E00E64, 0xC66F0987, 0x0AC50919, - 0xD3E51BB5, 0x1F4F1B2B, 0x91C01CC8, 0x5D6A1C56, - 0x57AF154F, 0x9B0515D1, 0x158A1232, 0xD92012AC, - 0x7CBB312B, 0xB01131B5, 0x3E9E3656, 0xF23436C8, - 0xF8F13FD1, 0x345B3F4F, 0xBAD438AC, 0x767E3832, - 0xAF5E2A9E, 0x63F42A00, 0xED7B2DE3, 0x21D12D7D, - 0x2B142464, 0xE7BE24FA, 0x69312319, 0xA59B2387, - 0xF9766256, 0x35DC62C8, 0xBB53652B, 0x77F965B5, - 0x7D3C6CAC, 0xB1966C32, 0x3F196BD1, 0xF3B36B4F, - 0x2A9379E3, 0xE639797D, 0x68B67E9E, 0xA41C7E00, - 0xAED97719, 0x62737787, 0xECFC7064, 0x205670FA, - 0x85CD537D, 0x496753E3, 0xC7E85400, 0x0B42549E, - 0x01875D87, 0xCD2D5D19, 0x43A25AFA, 0x8F085A64, - 0x562848C8, 0x9A824856, 0x140D4FB5, 0xD8A74F2B, - 0xD2624632, 0x1EC846AC, 0x9047414F, 0x5CED41D1, - 0x299DC2ED, 0xE537C273, 0x6BB8C590, 0xA712C50E, - 0xADD7CC17, 0x617DCC89, 0xEFF2CB6A, 0x2358CBF4, - 0xFA78D958, 0x36D2D9C6, 0xB85DDE25, 0x74F7DEBB, - 0x7E32D7A2, 0xB298D73C, 0x3C17D0DF, 0xF0BDD041, - 0x5526F3C6, 0x998CF358, 0x1703F4BB, 0xDBA9F425, - 0xD16CFD3C, 0x1DC6FDA2, 0x9349FA41, 0x5FE3FADF, - 0x86C3E873, 0x4A69E8ED, 0xC4E6EF0E, 0x084CEF90, - 0x0289E689, 0xCE23E617, 0x40ACE1F4, 0x8C06E16A, - 0xD0EBA0BB, 0x1C41A025, 0x92CEA7C6, 0x5E64A758, - 0x54A1AE41, 0x980BAEDF, 0x1684A93C, 0xDA2EA9A2, - 0x030EBB0E, 0xCFA4BB90, 0x412BBC73, 0x8D81BCED, - 0x8744B5F4, 0x4BEEB56A, 0xC561B289, 0x09CBB217, - 0xAC509190, 0x60FA910E, 0xEE7596ED, 0x22DF9673, - 0x281A9F6A, 0xE4B09FF4, 0x6A3F9817, 0xA6959889, - 0x7FB58A25, 0xB31F8ABB, 0x3D908D58, 0xF13A8DC6, - 0xFBFF84DF, 0x37558441, 0xB9DA83A2, 0x7570833C, - 0x533B85DA, 0x9F918544, 0x111E82A7, 0xDDB48239, - 0xD7718B20, 0x1BDB8BBE, 0x95548C5D, 0x59FE8CC3, - 0x80DE9E6F, 0x4C749EF1, 0xC2FB9912, 0x0E51998C, - 0x04949095, 0xC83E900B, 0x46B197E8, 0x8A1B9776, - 0x2F80B4F1, 0xE32AB46F, 0x6DA5B38C, 0xA10FB312, - 0xABCABA0B, 0x6760BA95, 0xE9EFBD76, 0x2545BDE8, - 0xFC65AF44, 0x30CFAFDA, 0xBE40A839, 0x72EAA8A7, - 0x782FA1BE, 0xB485A120, 0x3A0AA6C3, 0xF6A0A65D, - 0xAA4DE78C, 0x66E7E712, 0xE868E0F1, 0x24C2E06F, - 0x2E07E976, 0xE2ADE9E8, 0x6C22EE0B, 0xA088EE95, - 0x79A8FC39, 0xB502FCA7, 0x3B8DFB44, 0xF727FBDA, - 0xFDE2F2C3, 0x3148F25D, 0xBFC7F5BE, 0x736DF520, - 0xD6F6D6A7, 0x1A5CD639, 0x94D3D1DA, 0x5879D144, - 0x52BCD85D, 0x9E16D8C3, 0x1099DF20, 0xDC33DFBE, - 0x0513CD12, 0xC9B9CD8C, 0x4736CA6F, 0x8B9CCAF1, - 0x8159C3E8, 0x4DF3C376, 0xC37CC495, 0x0FD6C40B, - 0x7AA64737, 0xB60C47A9, 0x3883404A, 0xF42940D4, - 0xFEEC49CD, 0x32464953, 0xBCC94EB0, 0x70634E2E, - 0xA9435C82, 0x65E95C1C, 0xEB665BFF, 0x27CC5B61, - 0x2D095278, 0xE1A352E6, 0x6F2C5505, 0xA386559B, - 0x061D761C, 0xCAB77682, 0x44387161, 0x889271FF, - 0x825778E6, 0x4EFD7878, 0xC0727F9B, 0x0CD87F05, - 0xD5F86DA9, 0x19526D37, 0x97DD6AD4, 0x5B776A4A, - 0x51B26353, 0x9D1863CD, 0x1397642E, 0xDF3D64B0, - 0x83D02561, 0x4F7A25FF, 0xC1F5221C, 0x0D5F2282, - 0x079A2B9B, 0xCB302B05, 0x45BF2CE6, 0x89152C78, - 0x50353ED4, 0x9C9F3E4A, 0x121039A9, 0xDEBA3937, - 0xD47F302E, 0x18D530B0, 0x965A3753, 0x5AF037CD, - 0xFF6B144A, 0x33C114D4, 0xBD4E1337, 0x71E413A9, - 0x7B211AB0, 0xB78B1A2E, 0x39041DCD, 0xF5AE1D53, - 0x2C8E0FFF, 0xE0240F61, 0x6EAB0882, 0xA201081C, - 0xA8C40105, 0x646E019B, 0xEAE10678, 0x264B06E6 - }; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java deleted file mode 100644 index 1c443575f81..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java +++ /dev/null @@ -1,559 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Some portions of this file Copyright (c) 2004-2006 Intel Corportation - * and licensed under the BSD license. - */ -package org.apache.hadoop.ozone.common; - -/** - * Similar to {@link org.apache.hadoop.util.PureJavaCrc32C} - * except that this class implement {@link ChecksumByteBuffer}. - */ -final class PureJavaCrc32CByteBuffer extends ChecksumByteBuffer.CrcIntTable { - @Override - int[] getTable() { - return T; - } - - /** - * CRC-32C lookup table generated by the polynomial 0x82F63B78. - * See also org.apache.hadoop.util.TestPureJavaCrc32.Table. - */ - private static final int[] T = { - /* T8_0 */ - 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, - 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, - 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, - 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24, - 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, - 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, - 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, - 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B, - 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, - 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35, - 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, - 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, - 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, - 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A, - 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, - 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, - 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, - 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957, - 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, - 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198, - 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, - 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38, - 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, - 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7, - 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, - 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, - 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, - 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46, - 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, - 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6, - 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, - 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, - 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, - 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93, - 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, - 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, - 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, - 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC, - 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, - 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033, - 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, - 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D, - 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, - 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982, - 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, - 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, - 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, - 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED, - 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, - 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F, - 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, - 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, - 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, - 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540, - 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, - 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, - 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, - 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1, - 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, - 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E, - 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, - 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E, - 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, - 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351, - /* T8_1 */ - 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, - 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945, - 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, - 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD, - 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, - 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4, - 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, - 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C, - 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, - 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47, - 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, - 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF, - 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, - 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6, - 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, - 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E, - 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, - 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41, - 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, - 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9, - 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, - 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0, - 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, - 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78, - 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, - 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43, - 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, - 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB, - 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, - 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2, - 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, - 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A, - 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, - 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC, - 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, - 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004, - 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, - 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D, - 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, - 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185, - 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, - 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE, - 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, - 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306, - 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, - 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F, - 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, - 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287, - 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, - 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8, - 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, - 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600, - 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, - 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439, - 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, - 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781, - 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, - 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA, - 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, - 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502, - 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, - 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B, - 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, - 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483, - /* T8_2 */ - 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, - 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469, - 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, - 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC, - 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, - 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3, - 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, - 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726, - 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, - 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D, - 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, - 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8, - 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, - 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7, - 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, - 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32, - 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, - 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0, - 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, - 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75, - 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, - 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A, - 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, - 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF, - 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, - 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4, - 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, - 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161, - 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, - 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E, - 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, - 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB, - 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, - 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A, - 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, - 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF, - 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, - 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0, - 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, - 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065, - 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, - 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E, - 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, - 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB, - 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, - 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4, - 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, - 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71, - 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, - 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3, - 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, - 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36, - 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, - 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79, - 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, - 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC, - 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, - 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7, - 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, - 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622, - 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, - 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D, - 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, - 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8, - /* T8_3 */ - 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, - 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA, - 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, - 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C, - 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, - 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7, - 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, - 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11, - 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, - 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41, - 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, - 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7, - 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, - 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C, - 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, - 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A, - 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, - 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D, - 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, - 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB, - 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, - 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610, - 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, - 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6, - 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, - 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6, - 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, - 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040, - 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, - 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B, - 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, - 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D, - 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, - 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5, - 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, - 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213, - 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, - 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8, - 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, - 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E, - 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, - 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E, - 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, - 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698, - 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, - 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443, - 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, - 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5, - 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, - 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12, - 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, - 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4, - 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, - 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F, - 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, - 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9, - 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, - 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99, - 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, - 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F, - 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, - 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4, - 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, - 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842, - /* T8_4 */ - 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, - 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44, - 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, - 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5, - 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, - 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97, - 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, - 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406, - 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, - 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13, - 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, - 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082, - 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, - 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0, - 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, - 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151, - 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, - 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA, - 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, - 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B, - 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, - 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539, - 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, - 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8, - 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, - 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD, - 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, - 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C, - 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, - 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E, - 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, - 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF, - 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, - 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18, - 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, - 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089, - 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, - 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB, - 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, - 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A, - 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, - 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F, - 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, - 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE, - 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, - 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C, - 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, - 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D, - 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, - 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6, - 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, - 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27, - 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, - 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065, - 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, - 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4, - 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, - 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1, - 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, - 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70, - 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, - 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532, - 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, - 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3, - /* T8_5 */ - 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, - 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD, - 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, - 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2, - 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, - 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93, - 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, - 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C, - 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, - 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20, - 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, - 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F, - 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, - 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E, - 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, - 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201, - 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, - 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746, - 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, - 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59, - 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, - 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778, - 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, - 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67, - 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, - 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB, - 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, - 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4, - 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, - 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5, - 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, - 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA, - 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, - 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B, - 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, - 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364, - 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, - 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45, - 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, - 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A, - 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, - 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6, - 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, - 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9, - 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, - 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8, - 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, - 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7, - 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, - 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090, - 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, - 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F, - 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, - 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE, - 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, - 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1, - 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, - 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D, - 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, - 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02, - 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, - 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623, - 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, - 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C, - /* T8_6 */ - 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, - 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089, - 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, - 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA, - 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, - 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F, - 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, - 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C, - 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, - 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334, - 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, - 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67, - 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, - 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992, - 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, - 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1, - 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, - 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3, - 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, - 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0, - 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, - 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55, - 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, - 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006, - 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, - 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E, - 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, - 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D, - 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, - 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8, - 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, - 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB, - 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, - 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D, - 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, - 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E, - 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, - 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB, - 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, - 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988, - 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, - 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0, - 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, - 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093, - 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, - 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766, - 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, - 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35, - 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, - 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907, - 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, - 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454, - 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, - 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1, - 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, - 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2, - 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, - 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA, - 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, - 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9, - 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, - 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C, - 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, - 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F, - /* T8_7 */ - 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, - 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504, - 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, - 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE, - 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, - 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0, - 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, - 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A, - 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, - 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D, - 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, - 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447, - 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, - 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929, - 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, - 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3, - 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, - 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36, - 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, - 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC, - 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, - 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782, - 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, - 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358, - 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, - 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF, - 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, - 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75, - 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, - 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B, - 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, - 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1, - 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, - 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360, - 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, - 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA, - 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, - 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4, - 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, - 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E, - 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, - 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9, - 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, - 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223, - 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, - 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D, - 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, - 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97, - 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, - 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852, - 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, - 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88, - 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, - 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6, - 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, - 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C, - 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, - 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB, - 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, - 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911, - 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, - 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F, - 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, - 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5 - }; -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java deleted file mode 100644 index 7992dad78db..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java +++ /dev/null @@ -1,261 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.file.DirectoryStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Properties; - -/** - * Storage information file. This Class defines the methods to check - * the consistency of the storage dir and the version file. - *

- * Local storage information is stored in a separate file VERSION. - * It contains type of the node, - * the storage layout version, the SCM id, and - * the OM/SCM state creation time. - * - */ -@InterfaceAudience.Private -public abstract class Storage { - private static final Logger LOG = LoggerFactory.getLogger(Storage.class); - - public static final String STORAGE_DIR_CURRENT = "current"; - protected static final String STORAGE_FILE_VERSION = "VERSION"; - public static final String CONTAINER_DIR = "containerDir"; - - private final NodeType nodeType; - private final File root; - private final File storageDir; - - private StorageState state; - private StorageInfo storageInfo; - - - /** - * Determines the state of the Version file. - */ - public enum StorageState { - NON_EXISTENT, NOT_INITIALIZED, INITIALIZED - } - - public Storage(NodeType type, File root, String sdName) - throws IOException { - this.nodeType = type; - this.root = root; - this.storageDir = new File(root, sdName); - this.state = getStorageState(); - if (state == StorageState.INITIALIZED) { - this.storageInfo = new StorageInfo(type, getVersionFile()); - } else { - this.storageInfo = new StorageInfo( - nodeType, StorageInfo.newClusterID(), Time.now()); - setNodeProperties(); - } - } - - /** - * Gets the path of the Storage dir. - * @return Storage dir path - */ - public String getStorageDir() { - return storageDir.getAbsoluteFile().toString(); - } - - /** - * Gets the state of the version file. - * @return the state of the Version file - */ - public StorageState getState() { - return state; - } - - public NodeType getNodeType() { - return storageInfo.getNodeType(); - } - - public String getClusterID() { - return storageInfo.getClusterID(); - } - - public long getCreationTime() { - return storageInfo.getCreationTime(); - } - - public void setClusterId(String clusterId) throws IOException { - if (state == StorageState.INITIALIZED) { - throw new IOException( - "Storage directory " + storageDir + " already initialized."); - } else { - storageInfo.setClusterId(clusterId); - } - } - - /** - * Retrieves the storageInfo instance to read/write the common - * version file properties. - * @return the instance of the storageInfo class - */ - protected StorageInfo getStorageInfo() { - return storageInfo; - } - - abstract protected Properties getNodeProperties(); - - /** - * Sets the Node properties specific to OM/SCM. - */ - private void setNodeProperties() { - Properties nodeProperties = getNodeProperties(); - if (nodeProperties != null) { - for (String key : nodeProperties.stringPropertyNames()) { - storageInfo.setProperty(key, nodeProperties.getProperty(key)); - } - } - } - - /** - * Directory {@code current} contains latest files defining - * the file system meta-data. - * - * @return the directory path - */ - public File getCurrentDir() { - return new File(storageDir, STORAGE_DIR_CURRENT); - } - - /** - * File {@code VERSION} contains the following fields: - *

    - *
  1. node type
  2. - *
  3. OM/SCM state creation time
  4. - *
  5. other fields specific for this node type
  6. - *
- * The version file is always written last during storage directory updates. - * The existence of the version file indicates that all other files have - * been successfully written in the storage directory, the storage is valid - * and does not need to be recovered. - * - * @return the version file path - */ - private File getVersionFile() { - return new File(getCurrentDir(), STORAGE_FILE_VERSION); - } - - - /** - * Check to see if current/ directory is empty. This method is used - * before determining to format the directory. - * @throws IOException if unable to list files under the directory. - */ - private void checkEmptyCurrent() throws IOException { - File currentDir = getCurrentDir(); - if (!currentDir.exists()) { - // if current/ does not exist, it's safe to format it. - return; - } - try (DirectoryStream dirStream = Files - .newDirectoryStream(currentDir.toPath())) { - if (dirStream.iterator().hasNext()) { - throw new InconsistentStorageStateException(getCurrentDir(), - "Can't initialize the storage directory because the current " - + "it is not empty."); - } - } - } - - /** - * Check consistency of the storage directory. - * - * @return state {@link StorageState} of the storage directory - * @throws IOException - */ - private StorageState getStorageState() throws IOException { - assert root != null : "root is null"; - String rootPath = root.getCanonicalPath(); - try { // check that storage exists - if (!root.exists()) { - // storage directory does not exist - LOG.warn("Storage directory " + rootPath + " does not exist"); - return StorageState.NON_EXISTENT; - } - // or is inaccessible - if (!root.isDirectory()) { - LOG.warn(rootPath + "is not a directory"); - return StorageState.NON_EXISTENT; - } - if (!FileUtil.canWrite(root)) { - LOG.warn("Cannot access storage directory " + rootPath); - return StorageState.NON_EXISTENT; - } - } catch (SecurityException ex) { - LOG.warn("Cannot access storage directory " + rootPath, ex); - return StorageState.NON_EXISTENT; - } - - // check whether current directory is valid - File versionFile = getVersionFile(); - boolean hasCurrent = versionFile.exists(); - - if (hasCurrent) { - return StorageState.INITIALIZED; - } else { - checkEmptyCurrent(); - return StorageState.NOT_INITIALIZED; - } - } - - /** - * Creates the Version file if not present, - * otherwise returns with IOException. - * @throws IOException - */ - public void initialize() throws IOException { - if (state == StorageState.INITIALIZED) { - throw new IOException("Storage directory already initialized."); - } - if (!getCurrentDir().mkdirs()) { - throw new IOException("Cannot create directory " + getCurrentDir()); - } - storageInfo.writeTo(getVersionFile()); - } - - /** - * Persists current StorageInfo to file system.. - * @throws IOException - */ - public void persistCurrentState() throws IOException { - if (!getCurrentDir().exists()) { - throw new IOException("Metadata dir doesn't exist, dir: " + - getCurrentDir()); - } - storageInfo.writeTo(getVersionFile()); - } - -} - diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java deleted file mode 100644 index ad26f77e3ac..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java +++ /dev/null @@ -1,182 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.util.Properties; -import java.util.UUID; - -/** - * Common class for storage information. This class defines the common - * properties and functions to set them , write them into the version file - * and read them from the version file. - * - */ -@InterfaceAudience.Private -public class StorageInfo { - - private Properties properties = new Properties(); - - /** - * Property to hold node type. - */ - private static final String NODE_TYPE = "nodeType"; - /** - * Property to hold ID of the cluster. - */ - private static final String CLUSTER_ID = "clusterID"; - /** - * Property to hold creation time of the storage. - */ - private static final String CREATION_TIME = "cTime"; - - /** - * Constructs StorageInfo instance. - * @param type - * Type of the node using the storage - * @param cid - * Cluster ID - * @param cT - * Cluster creation Time - - * @throws IOException - on Error. - */ - public StorageInfo(NodeType type, String cid, long cT) - throws IOException { - Preconditions.checkNotNull(type); - Preconditions.checkNotNull(cid); - properties.setProperty(NODE_TYPE, type.name()); - properties.setProperty(CLUSTER_ID, cid); - properties.setProperty(CREATION_TIME, String.valueOf(cT)); - } - - public StorageInfo(NodeType type, File propertiesFile) - throws IOException { - this.properties = readFrom(propertiesFile); - verifyNodeType(type); - verifyClusterId(); - verifyCreationTime(); - } - - public NodeType getNodeType() { - return NodeType.valueOf(properties.getProperty(NODE_TYPE)); - } - - public String getClusterID() { - return properties.getProperty(CLUSTER_ID); - } - - public Long getCreationTime() { - String creationTime = properties.getProperty(CREATION_TIME); - if(creationTime != null) { - return Long.parseLong(creationTime); - } - return null; - } - - public String getProperty(String key) { - return properties.getProperty(key); - } - - public void setProperty(String key, String value) { - properties.setProperty(key, value); - } - - public void setClusterId(String clusterId) { - properties.setProperty(CLUSTER_ID, clusterId); - } - - private void verifyNodeType(NodeType type) - throws InconsistentStorageStateException { - NodeType nodeType = getNodeType(); - Preconditions.checkNotNull(nodeType); - if(type != nodeType) { - throw new InconsistentStorageStateException("Expected NodeType: " + type + - ", but found: " + nodeType); - } - } - - private void verifyClusterId() - throws InconsistentStorageStateException { - String clusterId = getClusterID(); - Preconditions.checkNotNull(clusterId); - if(clusterId.isEmpty()) { - throw new InconsistentStorageStateException("Cluster ID not found"); - } - } - - private void verifyCreationTime() { - Long creationTime = getCreationTime(); - Preconditions.checkNotNull(creationTime); - } - - - public void writeTo(File to) - throws IOException { - try (RandomAccessFile file = new RandomAccessFile(to, "rws"); - FileOutputStream out = new FileOutputStream(file.getFD())) { - file.seek(0); - /* - * If server is interrupted before this line, - * the version file will remain unchanged. - */ - properties.store(out, null); - /* - * Now the new fields are flushed to the head of the file, but file - * length can still be larger then required and therefore the file can - * contain whole or corrupted fields from its old contents in the end. - * If server is interrupted here and restarted later these extra fields - * either should not effect server behavior or should be handled - * by the server correctly. - */ - file.setLength(out.getChannel().position()); - } - } - - private Properties readFrom(File from) throws IOException { - try (RandomAccessFile file = new RandomAccessFile(from, "rws"); - FileInputStream in = new FileInputStream(file.getFD())) { - Properties props = new Properties(); - file.seek(0); - props.load(in); - return props; - } - } - - /** - * Generate new clusterID. - * - * clusterID is a persistent attribute of the cluster. - * It is generated when the cluster is created and remains the same - * during the life cycle of the cluster. When a new SCM node is initialized, - * if this is a new cluster, a new clusterID is generated and stored. - * @return new clusterID - */ - public static String newClusterID() { - return "CID-" + UUID.randomUUID().toString(); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java deleted file mode 100644 index 6517e5897ea..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java deleted file mode 100644 index 9aeff248381..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.common.statemachine; - -/** - * Class wraps invalid state transition exception. - */ -public class InvalidStateTransitionException extends Exception { - private Enum currentState; - private Enum event; - - public InvalidStateTransitionException(Enum currentState, Enum event) { - super("Invalid event: " + event + " at " + currentState + " state."); - this.currentState = currentState; - this.event = event; - } - - public Enum getCurrentState() { - return currentState; - } - - public Enum getEvent() { - return event; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java deleted file mode 100644 index bf8cbd596ed..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.common.statemachine; - -import com.google.common.base.Supplier; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; - -import java.util.HashMap; -import java.util.Map; -import java.util.Set; - -/** - * Template class that wraps simple event driven state machine. - * @param states allowed - * @param events allowed - */ -public class StateMachine, EVENT extends Enum> { - private STATE initialState; - private Set finalStates; - - private final LoadingCache> transitions = - CacheBuilder.newBuilder().build( - CacheLoader.from((Supplier>) () -> new HashMap())); - - public StateMachine(STATE initState, Set finalStates) { - this.initialState = initState; - this.finalStates = finalStates; - } - - public STATE getInitialState() { - return initialState; - } - - public Set getFinalStates() { - return finalStates; - } - - public STATE getNextState(STATE from, EVENT e) - throws InvalidStateTransitionException { - STATE target = transitions.getUnchecked(e).get(from); - if (target == null) { - throw new InvalidStateTransitionException(from, e); - } - return target; - } - - public void addTransition(STATE from, STATE to, EVENT e) { - transitions.getUnchecked(e).put(from, to); - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java deleted file mode 100644 index 045409e3ed2..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common.statemachine; -/** - state machine template class for ozone. - **/ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java deleted file mode 100644 index e0cac8beded..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.helpers; - -import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.client.BlockID; -import com.google.common.base.Preconditions; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.ArrayList; - -/** - * Helper class to convert Protobuf to Java classes. - */ -public class BlockData { - private final BlockID blockID; - private final Map metadata; - - /** - * Represent a list of chunks. - * In order to reduce memory usage, chunkList is declared as an - * {@link Object}. - * When #elements == 0, chunkList is null. - * When #elements == 1, chunkList refers to the only element. - * When #elements > 1, chunkList refers to the list. - * - * Please note : when we are working with blocks, we don't care what they - * point to. So we We don't read chunkinfo nor validate them. It is - * responsibility of higher layer like ozone. We just read and write data - * from network. - */ - private Object chunkList; - - /** - * total size of the key. - */ - private long size; - - /** - * Constructs a BlockData Object. - * - * @param blockID - */ - public BlockData(BlockID blockID) { - this.blockID = blockID; - this.metadata = new TreeMap<>(); - this.size = 0; - } - - public long getBlockCommitSequenceId() { - return blockID.getBlockCommitSequenceId(); - } - - public void setBlockCommitSequenceId(long blockCommitSequenceId) { - this.blockID.setBlockCommitSequenceId(blockCommitSequenceId); - } - - /** - * Returns a blockData object from the protobuf data. - * - * @param data - Protobuf data. - * @return - BlockData - * @throws IOException - */ - public static BlockData getFromProtoBuf(ContainerProtos.BlockData data) throws - IOException { - BlockData blockData = new BlockData( - BlockID.getFromProtobuf(data.getBlockID())); - for (int x = 0; x < data.getMetadataCount(); x++) { - blockData.addMetadata(data.getMetadata(x).getKey(), - data.getMetadata(x).getValue()); - } - blockData.setChunks(data.getChunksList()); - if (data.hasSize()) { - Preconditions.checkArgument(data.getSize() == blockData.getSize()); - } - return blockData; - } - - /** - * Returns a Protobuf message from BlockData. - * @return Proto Buf Message. - */ - public ContainerProtos.BlockData getProtoBufMessage() { - ContainerProtos.BlockData.Builder builder = - ContainerProtos.BlockData.newBuilder(); - builder.setBlockID(this.blockID.getDatanodeBlockIDProtobuf()); - for (Map.Entry entry : metadata.entrySet()) { - ContainerProtos.KeyValue.Builder keyValBuilder = - ContainerProtos.KeyValue.newBuilder(); - builder.addMetadata(keyValBuilder.setKey(entry.getKey()) - .setValue(entry.getValue()).build()); - } - builder.addAllChunks(getChunks()); - builder.setSize(size); - return builder.build(); - } - - /** - * Adds metadata. - * - * @param key - Key - * @param value - Value - * @throws IOException - */ - public synchronized void addMetadata(String key, String value) throws - IOException { - if (this.metadata.containsKey(key)) { - throw new IOException("This key already exists. Key " + key); - } - metadata.put(key, value); - } - - public synchronized Map getMetadata() { - return Collections.unmodifiableMap(this.metadata); - } - - /** - * Returns value of a key. - */ - public synchronized String getValue(String key) { - return metadata.get(key); - } - - /** - * Deletes a metadata entry from the map. - * - * @param key - Key - */ - public synchronized void deleteKey(String key) { - metadata.remove(key); - } - - @SuppressWarnings("unchecked") - private List castChunkList() { - return (List)chunkList; - } - - /** - * Returns chunks list. - * - * @return list of chunkinfo. - */ - public List getChunks() { - return chunkList == null? Collections.emptyList() - : chunkList instanceof ContainerProtos.ChunkInfo? - Collections.singletonList((ContainerProtos.ChunkInfo)chunkList) - : Collections.unmodifiableList(castChunkList()); - } - - /** - * Adds chinkInfo to the list. - */ - public void addChunk(ContainerProtos.ChunkInfo chunkInfo) { - if (chunkList == null) { - chunkList = chunkInfo; - } else { - final List list; - if (chunkList instanceof ContainerProtos.ChunkInfo) { - list = new ArrayList<>(2); - list.add((ContainerProtos.ChunkInfo)chunkList); - chunkList = list; - } else { - list = castChunkList(); - } - list.add(chunkInfo); - } - size += chunkInfo.getLen(); - } - - /** - * removes the chunk. - */ - public boolean removeChunk(ContainerProtos.ChunkInfo chunkInfo) { - final boolean removed; - if (chunkList instanceof List) { - final List list = castChunkList(); - removed = list.remove(chunkInfo); - if (list.size() == 1) { - chunkList = list.get(0); - } - } else if (chunkInfo.equals(chunkList)) { - chunkList = null; - removed = true; - } else { - removed = false; - } - - if (removed) { - size -= chunkInfo.getLen(); - } - return removed; - } - - /** - * Returns container ID. - * - * @return long. - */ - public long getContainerID() { - return blockID.getContainerID(); - } - - /** - * Returns LocalID. - * @return long. - */ - public long getLocalID() { - return blockID.getLocalID(); - } - - /** - * Return Block ID. - * @return BlockID. - */ - public BlockID getBlockID() { - return blockID; - } - - /** - * Sets Chunk list. - * - * @param chunks - List of chunks. - */ - public void setChunks(List chunks) { - if (chunks == null) { - chunkList = null; - size = 0L; - } else { - final int n = chunks.size(); - chunkList = n == 0? null: n == 1? chunks.get(0): chunks; - size = chunks.parallelStream().mapToLong( - ContainerProtos.ChunkInfo::getLen).sum(); - } - } - - /** - * Get the total size of chunks allocated for the key. - * @return total size of the key. - */ - public long getSize() { - return size; - } - - @Override - public String toString() { - return new ToStringBuilder(this, ToStringStyle.NO_CLASS_NAME_STYLE) - .append("blockId", blockID.toString()) - .append("size", this.size) - .toString(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java deleted file mode 100644 index 1c73a316e5d..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -import java.io.IOException; -import java.util.Map; -import java.util.TreeMap; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; - -/** - * Java class that represents ChunkInfo ProtoBuf class. This helper class allows - * us to convert to and from protobuf to normal java. - */ -public class ChunkInfo { - private final String chunkName; - private final long offset; - private final long len; - private ChecksumData checksumData; - private final Map metadata; - - - /** - * Constructs a ChunkInfo. - * - * @param chunkName - File Name where chunk lives. - * @param offset - offset where Chunk Starts. - * @param len - Length of the Chunk. - */ - public ChunkInfo(String chunkName, long offset, long len) { - this.chunkName = chunkName; - this.offset = offset; - this.len = len; - this.metadata = new TreeMap<>(); - } - - /** - * Adds metadata. - * - * @param key - Key Name. - * @param value - Value. - * @throws IOException - */ - public void addMetadata(String key, String value) throws IOException { - synchronized (this.metadata) { - if (this.metadata.containsKey(key)) { - throw new IOException("This key already exists. Key " + key); - } - metadata.put(key, value); - } - } - - /** - * Gets a Chunkinfo class from the protobuf definitions. - * - * @param info - Protobuf class - * @return ChunkInfo - * @throws IOException - */ - public static ChunkInfo getFromProtoBuf(ContainerProtos.ChunkInfo info) - throws IOException { - Preconditions.checkNotNull(info); - - ChunkInfo chunkInfo = new ChunkInfo(info.getChunkName(), info.getOffset(), - info.getLen()); - - for (int x = 0; x < info.getMetadataCount(); x++) { - chunkInfo.addMetadata(info.getMetadata(x).getKey(), - info.getMetadata(x).getValue()); - } - - chunkInfo.setChecksumData( - ChecksumData.getFromProtoBuf(info.getChecksumData())); - - return chunkInfo; - } - - /** - * Returns a ProtoBuf Message from ChunkInfo. - * - * @return Protocol Buffer Message - */ - public ContainerProtos.ChunkInfo getProtoBufMessage() { - ContainerProtos.ChunkInfo.Builder builder = ContainerProtos - .ChunkInfo.newBuilder(); - - builder.setChunkName(this.getChunkName()); - builder.setOffset(this.getOffset()); - builder.setLen(this.getLen()); - if (checksumData == null) { - // ChecksumData cannot be null while computing the protobufMessage. - // Set it to NONE type (equivalent to non checksum). - builder.setChecksumData(Checksum.getNoChecksumDataProto()); - } else { - builder.setChecksumData(this.checksumData.getProtoBufMessage()); - } - - for (Map.Entry entry : metadata.entrySet()) { - ContainerProtos.KeyValue.Builder keyValBuilder = - ContainerProtos.KeyValue.newBuilder(); - builder.addMetadata(keyValBuilder.setKey(entry.getKey()) - .setValue(entry.getValue()).build()); - } - - return builder.build(); - } - - /** - * Returns the chunkName. - * - * @return - String - */ - public String getChunkName() { - return chunkName; - } - - /** - * Gets the start offset of the given chunk in physical file. - * - * @return - long - */ - public long getOffset() { - return offset; - } - - /** - * Returns the length of the Chunk. - * - * @return long - */ - public long getLen() { - return len; - } - - /** - * Returns the checksumData of this chunk. - */ - public ChecksumData getChecksumData() { - return checksumData; - } - - /** - * Sets the checksums of this chunk. - */ - public void setChecksumData(ChecksumData cData) { - this.checksumData = cData; - } - - /** - * Returns Metadata associated with this Chunk. - * - * @return - Map of Key,values. - */ - public Map getMetadata() { - return metadata; - } - - @Override - public String toString() { - return "ChunkInfo{" + - "chunkName='" + chunkName + - ", offset=" + offset + - ", len=" + len + - '}'; - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java deleted file mode 100644 index 11d9028f190..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java +++ /dev/null @@ -1,196 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.helpers; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.ozone.audit.DNAction; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Map; -import java.util.TreeMap; - -/** - * Utilities for converting protobuf classes to Java classes. - */ -public final class ContainerCommandRequestPBHelper { - - static final Logger LOG = - LoggerFactory.getLogger(ContainerCommandRequestPBHelper.class); - - private ContainerCommandRequestPBHelper() { - } - - public static Map getAuditParams( - ContainerCommandRequestProto msg) { - Map auditParams = new TreeMap<>(); - Type cmdType = msg.getCmdType(); - String containerID = String.valueOf(msg.getContainerID()); - switch(cmdType) { - case CreateContainer: - auditParams.put("containerID", containerID); - auditParams.put("containerType", - msg.getCreateContainer().getContainerType().toString()); - return auditParams; - - case ReadContainer: - auditParams.put("containerID", containerID); - return auditParams; - - case UpdateContainer: - auditParams.put("containerID", containerID); - auditParams.put("forceUpdate", - String.valueOf(msg.getUpdateContainer().getForceUpdate())); - return auditParams; - - case DeleteContainer: - auditParams.put("containerID", containerID); - auditParams.put("forceDelete", - String.valueOf(msg.getDeleteContainer().getForceDelete())); - return auditParams; - - case ListContainer: - auditParams.put("startContainerID", containerID); - auditParams.put("count", - String.valueOf(msg.getListContainer().getCount())); - return auditParams; - - case PutBlock: - try{ - auditParams.put("blockData", - BlockData.getFromProtoBuf(msg.getPutBlock().getBlockData()) - .toString()); - } catch (IOException ex){ - if (LOG.isTraceEnabled()) { - LOG.trace("Encountered error parsing BlockData from protobuf: " - + ex.getMessage()); - } - return null; - } - return auditParams; - - case GetBlock: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getGetBlock().getBlockID()).toString()); - return auditParams; - - case DeleteBlock: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getDeleteBlock().getBlockID()) - .toString()); - return auditParams; - - case ListBlock: - auditParams.put("startLocalID", - String.valueOf(msg.getListBlock().getStartLocalID())); - auditParams.put("count", String.valueOf(msg.getListBlock().getCount())); - return auditParams; - - case ReadChunk: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getReadChunk().getBlockID()).toString()); - return auditParams; - - case DeleteChunk: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getDeleteChunk().getBlockID()) - .toString()); - return auditParams; - - case WriteChunk: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID()) - .toString()); - return auditParams; - - case ListChunk: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getListChunk().getBlockID()).toString()); - auditParams.put("prevChunkName", msg.getListChunk().getPrevChunkName()); - auditParams.put("count", String.valueOf(msg.getListChunk().getCount())); - return auditParams; - - case CompactChunk: return null; //CompactChunk operation - - case PutSmallFile: - try{ - auditParams.put("blockData", - BlockData.getFromProtoBuf(msg.getPutSmallFile() - .getBlock().getBlockData()).toString()); - } catch (IOException ex){ - if (LOG.isTraceEnabled()) { - LOG.trace("Encountered error parsing BlockData from protobuf: " - + ex.getMessage()); - } - } - return auditParams; - - case GetSmallFile: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getGetSmallFile().getBlock().getBlockID()) - .toString()); - return auditParams; - - case CloseContainer: - auditParams.put("containerID", containerID); - return auditParams; - - case GetCommittedBlockLength: - auditParams.put("blockData", - BlockID.getFromProtobuf(msg.getGetCommittedBlockLength().getBlockID()) - .toString()); - return auditParams; - - default : - LOG.debug("Invalid command type - " + cmdType); - return null; - } - - } - - public static DNAction getAuditAction(Type cmdType) { - switch (cmdType) { - case CreateContainer : return DNAction.CREATE_CONTAINER; - case ReadContainer : return DNAction.READ_CONTAINER; - case UpdateContainer : return DNAction.UPDATE_CONTAINER; - case DeleteContainer : return DNAction.DELETE_CONTAINER; - case ListContainer : return DNAction.LIST_CONTAINER; - case PutBlock : return DNAction.PUT_BLOCK; - case GetBlock : return DNAction.GET_BLOCK; - case DeleteBlock : return DNAction.DELETE_BLOCK; - case ListBlock : return DNAction.LIST_BLOCK; - case ReadChunk : return DNAction.READ_CHUNK; - case DeleteChunk : return DNAction.DELETE_CHUNK; - case WriteChunk : return DNAction.WRITE_CHUNK; - case ListChunk : return DNAction.LIST_CHUNK; - case CompactChunk : return DNAction.COMPACT_CHUNK; - case PutSmallFile : return DNAction.PUT_SMALL_FILE; - case GetSmallFile : return DNAction.GET_SMALL_FILE; - case CloseContainer : return DNAction.CLOSE_CONTAINER; - case GetCommittedBlockLength : return DNAction.GET_COMMITTED_BLOCK_LENGTH; - default : - LOG.debug("Invalid command type - " + cmdType); - return null; - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java deleted file mode 100644 index fa5df113d87..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -/** - * Helper classes for the container protocol communication. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java deleted file mode 100644 index dfa93156dab..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java +++ /dev/null @@ -1,189 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -import org.apache.hadoop.util.Time; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.Callable; - -/** - * This class represents the lease created on a resource. Callback can be - * registered on the lease which will be executed in case of timeout. - * - * @param Resource type for which the lease can be associated - */ -public class Lease { - - /** - * The resource for which this lease is created. - */ - private final T resource; - - private final long creationTime; - - /** - * Lease lifetime in milliseconds. - */ - private volatile long leaseTimeout; - - private boolean expired; - - /** - * Functions to be called in case of timeout. - */ - private List> callbacks; - - - /** - * Creates a lease on the specified resource with given timeout. - * - * @param resource - * Resource for which the lease has to be created - * @param timeout - * Lease lifetime in milliseconds - */ - public Lease(T resource, long timeout) { - this.resource = resource; - this.leaseTimeout = timeout; - this.callbacks = Collections.synchronizedList(new ArrayList<>()); - this.creationTime = Time.monotonicNow(); - this.expired = false; - } - - /** - * Returns true if the lease has expired, else false. - * - * @return true if expired, else false - */ - public boolean hasExpired() { - return expired; - } - - /** - * Registers a callback which will be executed in case of timeout. Callbacks - * are executed in a separate Thread. - * - * @param callback - * The Callable which has to be executed - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public void registerCallBack(Callable callback) - throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - callbacks.add(callback); - } - - /** - * Returns the time elapsed since the creation of lease. - * - * @return elapsed time in milliseconds - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public long getElapsedTime() throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - return Time.monotonicNow() - creationTime; - } - - /** - * Returns the time available before timeout. - * - * @return remaining time in milliseconds - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public long getRemainingTime() throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - return leaseTimeout - getElapsedTime(); - } - - /** - * Returns total lease lifetime. - * - * @return total lifetime of lease in milliseconds - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public long getLeaseLifeTime() throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - return leaseTimeout; - } - - /** - * Renews the lease timeout period. - * - * @param timeout - * Time to be added to the lease in milliseconds - * @throws LeaseExpiredException - * If the lease has already timed out - */ - public void renew(long timeout) throws LeaseExpiredException { - if(hasExpired()) { - throw new LeaseExpiredException("Resource: " + resource); - } - leaseTimeout += timeout; - } - - @Override - public int hashCode() { - return resource.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if(obj instanceof Lease) { - return resource.equals(((Lease) obj).resource); - } - return false; - } - - @Override - public String toString() { - return "Lease<" + resource.toString() + ">"; - } - - /** - * Returns the callbacks to be executed for the lease in case of timeout. - * - * @return callbacks to be executed - */ - List> getCallbacks() { - return callbacks; - } - - /** - * Expires/Invalidates the lease. - */ - void invalidate() { - callbacks = null; - expired = true; - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java deleted file mode 100644 index a39ea22df10..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents that there is already a lease acquired on the - * same resource. - */ -public class LeaseAlreadyExistException extends LeaseException { - - /** - * Constructs an {@code LeaseAlreadyExistException} with {@code null} - * as its error detail message. - */ - public LeaseAlreadyExistException() { - super(); - } - - /** - * Constructs an {@code LeaseAlreadyExistException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseAlreadyExistException(String message) { - super(message); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java deleted file mode 100644 index e2ca455ef0c..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.concurrent.Callable; - -/** - * This class is responsible for executing the callbacks of a lease in case of - * timeout. - */ -public class LeaseCallbackExecutor implements Runnable { - - private static final Logger LOG = LoggerFactory.getLogger(Lease.class); - - private final T resource; - private final List> callbacks; - - /** - * Constructs LeaseCallbackExecutor instance with list of callbacks. - * - * @param resource - * The resource for which the callbacks are executed - * @param callbacks - * Callbacks to be executed by this executor - */ - public LeaseCallbackExecutor(T resource, List> callbacks) { - this.resource = resource; - this.callbacks = callbacks; - } - - @Override - public void run() { - if (LOG.isDebugEnabled()) { - LOG.debug("Executing callbacks for lease on {}", resource); - } - for(Callable callback : callbacks) { - try { - callback.call(); - } catch (Exception e) { - LOG.warn("Exception while executing callback for lease on {}", - resource, e); - } - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java deleted file mode 100644 index 418f4127df7..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents all lease related exceptions. - */ -public class LeaseException extends Exception { - - /** - * Constructs an {@code LeaseException} with {@code null} - * as its error detail message. - */ - public LeaseException() { - super(); - } - - /** - * Constructs an {@code LeaseException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseException(String message) { - super(message); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java deleted file mode 100644 index 440a023beff..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents that the lease that is being accessed has expired. - */ -public class LeaseExpiredException extends LeaseException { - - /** - * Constructs an {@code LeaseExpiredException} with {@code null} - * as its error detail message. - */ - public LeaseExpiredException() { - super(); - } - - /** - * Constructs an {@code LeaseExpiredException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseExpiredException(String message) { - super(message); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java deleted file mode 100644 index 02befaef980..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java +++ /dev/null @@ -1,251 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.Map; -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -/** - * LeaseManager is someone who can provide you leases based on your - * requirement. If you want to return the lease back before it expires, - * you can give it back to Lease Manager. He is the one responsible for - * the lifecycle of leases. The resource for which lease is created - * should have proper {@code equals} method implementation, resource - * equality is checked while the lease is created. - * - * @param Type of leases that this lease manager can create - */ -public class LeaseManager { - - private static final Logger LOG = - LoggerFactory.getLogger(LeaseManager.class); - - private final String name; - private final long defaultTimeout; - private Map> activeLeases; - private LeaseMonitor leaseMonitor; - private Thread leaseMonitorThread; - private boolean isRunning; - - /** - * Creates an instance of lease manager. - * - * @param name - * Name for the LeaseManager instance. - * @param defaultTimeout - * Default timeout in milliseconds to be used for lease creation. - */ - public LeaseManager(String name, long defaultTimeout) { - this.name = name; - this.defaultTimeout = defaultTimeout; - } - - /** - * Starts the lease manager service. - */ - public void start() { - LOG.debug("Starting {} LeaseManager service", name); - activeLeases = new ConcurrentHashMap<>(); - leaseMonitor = new LeaseMonitor(); - leaseMonitorThread = new Thread(leaseMonitor); - leaseMonitorThread.setName(name + "-LeaseManager#LeaseMonitor"); - leaseMonitorThread.setDaemon(true); - leaseMonitorThread.setUncaughtExceptionHandler((thread, throwable) -> { - // Let us just restart this thread after logging an error. - // if this thread is not running we cannot handle Lease expiry. - LOG.error("LeaseMonitor thread encountered an error. Thread: {}", - thread.toString(), throwable); - leaseMonitorThread.start(); - }); - LOG.debug("Starting {}-LeaseManager#LeaseMonitor Thread", name); - leaseMonitorThread.start(); - isRunning = true; - } - - /** - * Returns a lease for the specified resource with default timeout. - * - * @param resource - * Resource for which lease has to be created - * @throws LeaseAlreadyExistException - * If there is already a lease on the resource - */ - public synchronized Lease acquire(T resource) - throws LeaseAlreadyExistException { - return acquire(resource, defaultTimeout); - } - - /** - * Returns a lease for the specified resource with the timeout provided. - * - * @param resource - * Resource for which lease has to be created - * @param timeout - * The timeout in milliseconds which has to be set on the lease - * @throws LeaseAlreadyExistException - * If there is already a lease on the resource - */ - public synchronized Lease acquire(T resource, long timeout) - throws LeaseAlreadyExistException { - checkStatus(); - if (LOG.isDebugEnabled()) { - LOG.debug("Acquiring lease on {} for {} milliseconds", resource, timeout); - } - if(activeLeases.containsKey(resource)) { - throw new LeaseAlreadyExistException("Resource: " + resource); - } - Lease lease = new Lease<>(resource, timeout); - activeLeases.put(resource, lease); - leaseMonitorThread.interrupt(); - return lease; - } - - /** - * Returns a lease associated with the specified resource. - * - * @param resource - * Resource for which the lease has to be returned - * @throws LeaseNotFoundException - * If there is no active lease on the resource - */ - public Lease get(T resource) throws LeaseNotFoundException { - checkStatus(); - Lease lease = activeLeases.get(resource); - if(lease != null) { - return lease; - } - throw new LeaseNotFoundException("Resource: " + resource); - } - - /** - * Releases the lease associated with the specified resource. - * - * @param resource - * The for which the lease has to be released - * @throws LeaseNotFoundException - * If there is no active lease on the resource - */ - public synchronized void release(T resource) - throws LeaseNotFoundException { - checkStatus(); - if (LOG.isDebugEnabled()) { - LOG.debug("Releasing lease on {}", resource); - } - Lease lease = activeLeases.remove(resource); - if(lease == null) { - throw new LeaseNotFoundException("Resource: " + resource); - } - lease.invalidate(); - } - - /** - * Shuts down the LeaseManager and releases the resources. All the active - * {@link Lease} will be released (callbacks on leases will not be - * executed). - */ - public void shutdown() { - checkStatus(); - LOG.debug("Shutting down LeaseManager service"); - leaseMonitor.disable(); - leaseMonitorThread.interrupt(); - for(T resource : activeLeases.keySet()) { - try { - release(resource); - } catch(LeaseNotFoundException ex) { - //Ignore the exception, someone might have released the lease - } - } - isRunning = false; - } - - /** - * Throws {@link LeaseManagerNotRunningException} if the service is not - * running. - */ - private void checkStatus() { - if(!isRunning) { - throw new LeaseManagerNotRunningException("LeaseManager not running."); - } - } - - /** - * Monitors the leases and expires them based on the timeout, also - * responsible for executing the callbacks of expired leases. - */ - private final class LeaseMonitor implements Runnable { - - private boolean monitor = true; - private ExecutorService executorService; - - private LeaseMonitor() { - this.monitor = true; - this.executorService = Executors.newCachedThreadPool(); - } - - @Override - public void run() { - while (monitor) { - LOG.debug("{}-LeaseMonitor: checking for lease expiry", name); - long sleepTime = Long.MAX_VALUE; - - for (T resource : activeLeases.keySet()) { - try { - Lease lease = get(resource); - long remainingTime = lease.getRemainingTime(); - if (remainingTime <= 0) { - //Lease has timed out - List> leaseCallbacks = lease.getCallbacks(); - release(resource); - executorService.execute( - new LeaseCallbackExecutor(resource, leaseCallbacks)); - } else { - sleepTime = remainingTime > sleepTime ? - sleepTime : remainingTime; - } - } catch (LeaseNotFoundException | LeaseExpiredException ex) { - //Ignore the exception, someone might have released the lease - } - } - - try { - if(!Thread.interrupted()) { - Thread.sleep(sleepTime); - } - } catch (InterruptedException ignored) { - // This means a new lease is added to activeLeases. - } - } - } - - /** - * Disables lease monitor, next interrupt call on the thread - * will stop lease monitor. - */ - public void disable() { - monitor = false; - } - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java deleted file mode 100644 index ced31de4394..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents that there LeaseManager service is not running. - */ -public class LeaseManagerNotRunningException extends RuntimeException { - - /** - * Constructs an {@code LeaseManagerNotRunningException} with {@code null} - * as its error detail message. - */ - public LeaseManagerNotRunningException() { - super(); - } - - /** - * Constructs an {@code LeaseManagerNotRunningException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseManagerNotRunningException(String message) { - super(message); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java deleted file mode 100644 index c292d332323..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; - -/** - * This exception represents that the lease that is being accessed does not - * exist. - */ -public class LeaseNotFoundException extends LeaseException { - - /** - * Constructs an {@code LeaseNotFoundException} with {@code null} - * as its error detail message. - */ - public LeaseNotFoundException() { - super(); - } - - /** - * Constructs an {@code LeaseNotFoundException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public LeaseNotFoundException(String message) { - super(message); - } - -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java deleted file mode 100644 index 48ee2e1c6ab..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * A generic lease management API which can be used if a service - * needs any kind of lease management. - */ - -package org.apache.hadoop.ozone.lease; -/* - This package contains lease management related classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java deleted file mode 100644 index 95dfd6c393c..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/ActiveLock.java +++ /dev/null @@ -1,141 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; - -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -/** - * Lock implementation which also maintains counter. - */ -public final class ActiveLock { - - private ReadWriteLock lock; - private AtomicInteger count; - - /** - * Use ActiveLock#newInstance to create instance. - * - * @param fairness - if true the lock uses a fair ordering policy, else - * non-fair ordering. - */ - private ActiveLock(boolean fairness) { - this.lock = new ReentrantReadWriteLock(fairness); - this.count = new AtomicInteger(0); - } - - /** - * Creates a new instance of ActiveLock. - * - * @return new ActiveLock - */ - public static ActiveLock newInstance(boolean fairness) { - return new ActiveLock(fairness); - } - - /** - * Acquires read lock. - * - *

Acquires the read lock if the write lock is not held by - * another thread and returns immediately. - * - *

If the write lock is held by another thread then - * the current thread becomes disabled for thread scheduling - * purposes and lies dormant until the read lock has been acquired. - */ - void readLock() { - lock.readLock().lock(); - } - - /** - * Attempts to release the read lock. - * - *

If the number of readers is now zero then the lock - * is made available for write lock attempts. - */ - void readUnlock() { - lock.readLock().unlock(); - } - - /** - * Acquires write lock. - * - *

Acquires the write lock if neither the read nor write lock - * are held by another thread - * and returns immediately, setting the write lock hold count to - * one. - * - *

If the current thread already holds the write lock then the - * hold count is incremented by one and the method returns - * immediately. - * - *

If the lock is held by another thread then the current - * thread becomes disabled for thread scheduling purposes and - * lies dormant until the write lock has been acquired. - */ - void writeLock() { - lock.writeLock().lock(); - } - - /** - * Attempts to release the write lock. - * - *

If the current thread is the holder of this lock then - * the hold count is decremented. If the hold count is now - * zero then the lock is released. - */ - void writeUnlock() { - lock.writeLock().unlock(); - } - - /** - * Increment the active count of the lock. - */ - void incrementActiveCount() { - count.incrementAndGet(); - } - - /** - * Decrement the active count of the lock. - */ - void decrementActiveCount() { - count.decrementAndGet(); - } - - /** - * Returns the active count on the lock. - * - * @return Number of active leases on the lock. - */ - int getActiveLockCount() { - return count.get(); - } - - /** - * Resets the active count on the lock. - */ - void resetCounter() { - count.set(0); - } - - @Override - public String toString() { - return lock.toString(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java deleted file mode 100644 index 3c2b5d4a394..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java +++ /dev/null @@ -1,241 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; - -import org.apache.commons.pool2.impl.GenericObjectPool; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Consumer; - -/** - * Manages the locks on a given resource. A new lock is created for each - * and every unique resource. Uniqueness of resource depends on the - * {@code equals} implementation of it. - */ -public class LockManager { - - private static final Logger LOG = LoggerFactory.getLogger(LockManager.class); - - private final Map activeLocks = new ConcurrentHashMap<>(); - private final GenericObjectPool lockPool; - - /** - * Creates new LockManager instance with the given Configuration.and uses - * non-fair mode for locks. - * - * @param conf Configuration object - */ - public LockManager(final Configuration conf) { - this(conf, false); - } - - - /** - * Creates new LockManager instance with the given Configuration. - * - * @param conf Configuration object - * @param fair - true to use fair lock ordering, else non-fair lock ordering. - */ - public LockManager(final Configuration conf, boolean fair) { - final int maxPoolSize = conf.getInt( - HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY, - HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY_DEFAULT); - lockPool = - new GenericObjectPool<>(new PooledLockFactory(fair)); - lockPool.setMaxTotal(maxPoolSize); - } - - /** - * Acquires the lock on given resource. - * - *

If the lock is not available then the current thread becomes - * disabled for thread scheduling purposes and lies dormant until the - * lock has been acquired. - * - * @param resource on which the lock has to be acquired - * @deprecated Use {@link LockManager#writeLock} instead - */ - public void lock(final R resource) { - writeLock(resource); - } - - /** - * Releases the lock on given resource. - * - * @param resource for which the lock has to be released - * @deprecated Use {@link LockManager#writeUnlock} instead - */ - public void unlock(final R resource) { - writeUnlock(resource); - } - - /** - * Acquires the read lock on given resource. - * - *

Acquires the read lock on resource if the write lock is not held by - * another thread and returns immediately. - * - *

If the write lock on resource is held by another thread then - * the current thread becomes disabled for thread scheduling - * purposes and lies dormant until the read lock has been acquired. - * - * @param resource on which the read lock has to be acquired - */ - public void readLock(final R resource) { - acquire(resource, ActiveLock::readLock); - } - - /** - * Releases the read lock on given resource. - * - * @param resource for which the read lock has to be released - * @throws IllegalMonitorStateException if the current thread does not - * hold this lock - */ - public void readUnlock(final R resource) throws IllegalMonitorStateException { - release(resource, ActiveLock::readUnlock); - } - - /** - * Acquires the write lock on given resource. - * - *

Acquires the write lock on resource if neither the read nor write lock - * are held by another thread and returns immediately. - * - *

If the current thread already holds the write lock then the - * hold count is incremented by one and the method returns - * immediately. - * - *

If the lock is held by another thread then the current - * thread becomes disabled for thread scheduling purposes and - * lies dormant until the write lock has been acquired. - * - * @param resource on which the lock has to be acquired - */ - public void writeLock(final R resource) { - acquire(resource, ActiveLock::writeLock); - } - - /** - * Releases the write lock on given resource. - * - * @param resource for which the lock has to be released - * @throws IllegalMonitorStateException if the current thread does not - * hold this lock - */ - public void writeUnlock(final R resource) - throws IllegalMonitorStateException { - release(resource, ActiveLock::writeUnlock); - } - - /** - * Acquires the lock on given resource using the provided lock function. - * - * @param resource on which the lock has to be acquired - * @param lockFn function to acquire the lock - */ - private void acquire(final R resource, final Consumer lockFn) { - lockFn.accept(getLockForLocking(resource)); - } - - /** - * Releases the lock on given resource using the provided release function. - * - * @param resource for which the lock has to be released - * @param releaseFn function to release the lock - */ - private void release(final R resource, final Consumer releaseFn) { - final ActiveLock lock = getLockForReleasing(resource); - releaseFn.accept(lock); - decrementActiveLockCount(resource); - } - - /** - * Returns {@link ActiveLock} instance for the given resource, - * on which the lock can be acquired. - * - * @param resource on which the lock has to be acquired - * @return {@link ActiveLock} instance - */ - private ActiveLock getLockForLocking(final R resource) { - /* - * While getting a lock object for locking we should - * atomically increment the active count of the lock. - * - * This is to avoid cases where the selected lock could - * be removed from the activeLocks map and returned to - * the object pool. - */ - return activeLocks.compute(resource, (k, v) -> { - final ActiveLock lock; - try { - if (v == null) { - lock = lockPool.borrowObject(); - } else { - lock = v; - } - lock.incrementActiveCount(); - } catch (Exception ex) { - LOG.error("Unable to obtain lock.", ex); - throw new RuntimeException(ex); - } - return lock; - }); - } - - /** - * Returns {@link ActiveLock} instance for the given resource, - * for which the lock has to be released. - * - * @param resource for which the lock has to be released - * @return {@link ActiveLock} instance - */ - private ActiveLock getLockForReleasing(final R resource) { - if (activeLocks.containsKey(resource)) { - return activeLocks.get(resource); - } - // Someone is releasing a lock which was never acquired. - LOG.error("Trying to release the lock on {}, which was never acquired.", - resource); - throw new IllegalMonitorStateException("Releasing lock on resource " - + resource + " without acquiring lock"); - } - - /** - * Decrements the active lock count and returns the {@link ActiveLock} - * object to pool if the active count is 0. - * - * @param resource resource to which the ActiveLock is associated - */ - private void decrementActiveLockCount(final R resource) { - activeLocks.computeIfPresent(resource, (k, v) -> { - v.decrementActiveCount(); - if (v.getActiveLockCount() != 0) { - return v; - } - lockPool.returnObject(v); - return null; - }); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java deleted file mode 100644 index 1e3ba05a3a2..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/PooledLockFactory.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; - -import org.apache.commons.pool2.BasePooledObjectFactory; -import org.apache.commons.pool2.PooledObject; -import org.apache.commons.pool2.impl.DefaultPooledObject; - -/** - * Pool factory to create {@code ActiveLock} instances. - */ -public class PooledLockFactory extends BasePooledObjectFactory { - - private boolean fairness; - - PooledLockFactory(boolean fair) { - this.fairness = fair; - } - @Override - public ActiveLock create() throws Exception { - return ActiveLock.newInstance(fairness); - } - - @Override - public PooledObject wrap(ActiveLock activeLock) { - return new DefaultPooledObject<>(activeLock); - } - - @Override - public void activateObject(PooledObject pooledObject) { - pooledObject.getObject().resetCounter(); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java deleted file mode 100644 index 5c677ced745..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; -/* - This package contains the lock related classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java deleted file mode 100644 index db399db25ab..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -/** - This package contains class that support ozone implementation on the datanode - side. - - Main parts of ozone on datanode are: - - 1. REST Interface - This code lives under the web directory and listens to the - WebHDFS port. - - 2. Datanode container classes: This support persistence of ozone objects on - datanode. These classes live under container directory. - - 3. Client and Shell: We also support a ozone REST client lib, they are under - web/client and web/ozShell. - - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ProtocolMessageMetrics.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ProtocolMessageMetrics.java deleted file mode 100644 index 96725f269a1..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ProtocolMessageMetrics.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicLong; - -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; - -import com.google.protobuf.ProtocolMessageEnum; - -/** - * Metrics to count all the subtypes of a specific message. - */ -public class ProtocolMessageMetrics implements MetricsSource { - - private String name; - - private String description; - - private Map counters = - new ConcurrentHashMap<>(); - - public static ProtocolMessageMetrics create(String name, - String description, ProtocolMessageEnum[] types) { - ProtocolMessageMetrics protocolMessageMetrics = - new ProtocolMessageMetrics(name, description, - types); - return protocolMessageMetrics; - } - - public ProtocolMessageMetrics(String name, String description, - ProtocolMessageEnum[] values) { - this.name = name; - this.description = description; - for (ProtocolMessageEnum value : values) { - counters.put(value, new AtomicLong(0)); - } - } - - public void increment(ProtocolMessageEnum key) { - counters.get(key).incrementAndGet(); - } - - public void register() { - DefaultMetricsSystem.instance() - .register(name, description, this); - } - - public void unregister() { - DefaultMetricsSystem.instance().unregisterSource(name); - } - - @Override - public void getMetrics(MetricsCollector collector, boolean all) { - MetricsRecordBuilder builder = collector.addRecord(name); - counters.forEach((key, value) -> { - builder.addCounter(new MetricName(key.toString(), ""), value.longValue()); - }); - builder.endRecord(); - } - - /** - * Simple metrics info implementation. - */ - public static class MetricName implements MetricsInfo { - private String name; - private String description; - - public MetricName(String name, String description) { - this.name = name; - this.description = description; - } - - @Override - public String name() { - return name; - } - - @Override - public String description() { - return description; - } - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java deleted file mode 100644 index 860386d9fdc..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.protocolPB; - -/** - * This package contains classes for the Protocol Buffers binding of Ozone - * protocols. - */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java deleted file mode 100644 index 4177b96a354..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.utils; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.fasterxml.jackson.databind.type.CollectionType; - -import java.io.IOException; -import java.util.List; - -/** - * JSON Utility functions used in ozone. - */ -public final class JsonUtils { - - // Reuse ObjectMapper instance for improving performance. - // ObjectMapper is thread safe as long as we always configure instance - // before use. - private static final ObjectMapper MAPPER = new ObjectMapper(); - private static final ObjectReader READER = MAPPER.readerFor(Object.class); - private static final ObjectWriter WRITTER = - MAPPER.writerWithDefaultPrettyPrinter(); - - private JsonUtils() { - // Never constructed - } - - public static String toJsonStringWithDefaultPrettyPrinter(Object obj) - throws IOException { - return WRITTER.writeValueAsString(obj); - } - - public static String toJsonString(Object obj) throws IOException { - return MAPPER.writeValueAsString(obj); - } - - /** - * Deserialize a list of elements from a given string, - * each element in the list is in the given type. - * - * @param str json string. - * @param elementType element type. - * @return List of elements of type elementType - * @throws IOException - */ - public static List toJsonList(String str, Class elementType) - throws IOException { - CollectionType type = MAPPER.getTypeFactory() - .constructCollectionType(List.class, elementType); - return MAPPER.readValue(str, type); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java deleted file mode 100644 index e5812c00d94..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.utils; diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto deleted file mode 100644 index 1bfe4d1247c..00000000000 --- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto +++ /dev/null @@ -1,469 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and Unstable. - * Please see http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/InterfaceClassification.html - * for what changes are allowed for a *Unstable* .proto interface. - */ - -// This file contains protocol buffers that are used to transfer data -// to and from the datanode. -syntax = "proto2"; -option java_package = "org.apache.hadoop.hdds.protocol.datanode.proto"; -option java_outer_classname = "ContainerProtos"; -option java_generate_equals_and_hash = true; -package hadoop.hdds.datanode; - -/** - * Commands that are used to manipulate the state of containers on a datanode. - * - * These commands allow us to work against the datanode - from - * StorageContainer Manager as well as clients. - * - * 1. CreateContainer - This call is usually made by Storage Container - * manager, when we need to create a new container on a given datanode. - * - * 2. ReadContainer - Allows end user to stat a container. For example - * this allows us to return the metadata of a container. - * - * 3. UpdateContainer - Updates a container metadata. - - * 4. DeleteContainer - This call is made to delete a container. - * - * 5. ListContainer - Returns the list of containers on this - * datanode. This will be used by tests and tools. - * - * 6. PutBlock - Given a valid container, creates a block. - * - * 7. GetBlock - Allows user to read the metadata of a block. - * - * 8. DeleteBlock - Deletes a given block. - * - * 9. ListBlock - Returns a list of blocks that are present inside - * a given container. - * - * 10. ReadChunk - Allows us to read a chunk. - * - * 11. DeleteChunk - Delete an unused chunk. - * - * 12. WriteChunk - Allows us to write a chunk - * - * 13. ListChunk - Given a Container/Block returns the list of Chunks. - * - * 14. CompactChunk - Re-writes a chunk based on Offsets. - * - * 15. PutSmallFile - A single RPC that combines both putBlock and WriteChunk. - * - * 16. GetSmallFile - A single RPC that combines both getBlock and ReadChunk. - * - * 17. CloseContainer - Closes an open container and makes it immutable. - * - * 18. CopyContainer - Copies a container from a remote machine. - */ - -enum Type { - CreateContainer = 1; - ReadContainer = 2; - UpdateContainer = 3; - DeleteContainer = 4; - ListContainer = 5; - - PutBlock = 6; - GetBlock = 7; - DeleteBlock = 8; - ListBlock = 9; - - ReadChunk = 10; - DeleteChunk = 11; - WriteChunk = 12; - ListChunk = 13; - CompactChunk = 14; - - /** Combines Block and Chunk Operation into Single RPC. */ - PutSmallFile = 15; - GetSmallFile = 16; - CloseContainer = 17; - GetCommittedBlockLength = 18; -} - - -enum Result { - SUCCESS = 1; - UNSUPPORTED_REQUEST = 2; - MALFORMED_REQUEST = 3; - CONTAINER_INTERNAL_ERROR = 4; - INVALID_CONFIG = 5; - INVALID_FILE_HASH_FOUND = 6; - CONTAINER_EXISTS = 7; - NO_SUCH_ALGORITHM = 8; - CONTAINER_NOT_FOUND = 9; - IO_EXCEPTION = 10; - UNABLE_TO_READ_METADATA_DB = 11; - NO_SUCH_BLOCK = 12; - OVERWRITE_FLAG_REQUIRED = 13; - UNABLE_TO_FIND_DATA_DIR = 14; - INVALID_WRITE_SIZE = 15; - CHECKSUM_MISMATCH = 16; - UNABLE_TO_FIND_CHUNK = 17; - PROTOC_DECODING_ERROR = 18; - INVALID_ARGUMENT = 19; - PUT_SMALL_FILE_ERROR = 20; - GET_SMALL_FILE_ERROR = 21; - CLOSED_CONTAINER_IO = 22; - ERROR_IN_COMPACT_DB = 24; - UNCLOSED_CONTAINER_IO = 25; - DELETE_ON_OPEN_CONTAINER = 26; - CLOSED_CONTAINER_RETRY = 27; - INVALID_CONTAINER_STATE = 28; - DISK_OUT_OF_SPACE = 29; - CONTAINER_ALREADY_EXISTS = 30; - CONTAINER_METADATA_ERROR = 31; - CONTAINER_FILES_CREATE_ERROR = 32; - CONTAINER_CHECKSUM_ERROR = 33; - UNKNOWN_CONTAINER_TYPE = 34; - BLOCK_NOT_COMMITTED = 35; - CONTAINER_UNHEALTHY = 36; - UNKNOWN_BCSID = 37; - BCSID_MISMATCH = 38; - CONTAINER_NOT_OPEN = 39; - CONTAINER_MISSING = 40; - BLOCK_TOKEN_VERIFICATION_FAILED = 41; - ERROR_IN_DB_SYNC = 42; -} - -/** - * Block ID that uniquely identify a block in Datanode. - */ -message DatanodeBlockID { - required int64 containerID = 1; - required int64 localID = 2; - optional uint64 blockCommitSequenceId = 3 [default = 0]; -} - -message KeyValue { - required string key = 1; - optional string value = 2; -} - -message ContainerCommandRequestProto { - required Type cmdType = 1; // Type of the command - - // A string that identifies this command, we generate Trace ID in Ozone - // frontend and this allows us to trace that command all over ozone. - optional string traceID = 2; - - required int64 containerID = 3; - required string datanodeUuid = 4; - optional string pipelineID = 5; - - // One of the following command is available when the corresponding - // cmdType is set. At the protocol level we allow only - // one command in each packet. - // TODO : Upgrade to Protobuf 2.6 or later. - optional CreateContainerRequestProto createContainer = 6; - optional ReadContainerRequestProto readContainer = 7; - optional UpdateContainerRequestProto updateContainer = 8; - optional DeleteContainerRequestProto deleteContainer = 9; - optional ListContainerRequestProto listContainer = 10; - optional CloseContainerRequestProto closeContainer = 11; - - optional PutBlockRequestProto putBlock = 12; - optional GetBlockRequestProto getBlock = 13; - optional DeleteBlockRequestProto deleteBlock = 14; - optional ListBlockRequestProto listBlock = 15; - - optional ReadChunkRequestProto readChunk = 16; - optional WriteChunkRequestProto writeChunk = 17; - optional DeleteChunkRequestProto deleteChunk = 18; - optional ListChunkRequestProto listChunk = 19; - - optional PutSmallFileRequestProto putSmallFile = 20; - optional GetSmallFileRequestProto getSmallFile = 21; - optional GetCommittedBlockLengthRequestProto getCommittedBlockLength = 22; - optional string encodedToken = 23; -} - -message ContainerCommandResponseProto { - required Type cmdType = 1; - optional string traceID = 2; - - required Result result = 3; - optional string message = 4; - - optional CreateContainerResponseProto createContainer = 5; - optional ReadContainerResponseProto readContainer = 6; - optional UpdateContainerResponseProto updateContainer = 7; - optional DeleteContainerResponseProto deleteContainer = 8; - optional ListContainerResponseProto listContainer = 9; - optional CloseContainerResponseProto closeContainer = 10; - - optional PutBlockResponseProto putBlock = 11; - optional GetBlockResponseProto getBlock = 12; - optional DeleteBlockResponseProto deleteBlock = 13; - optional ListBlockResponseProto listBlock = 14; - - optional WriteChunkResponseProto writeChunk = 15; - optional ReadChunkResponseProto readChunk = 16; - optional DeleteChunkResponseProto deleteChunk = 17; - optional ListChunkResponseProto listChunk = 18; - - optional PutSmallFileResponseProto putSmallFile = 19; - optional GetSmallFileResponseProto getSmallFile = 20; - - optional GetCommittedBlockLengthResponseProto getCommittedBlockLength = 21; -} - -message ContainerDataProto { - enum State { - OPEN = 1; - CLOSING = 2; - QUASI_CLOSED =3; - CLOSED = 4; - UNHEALTHY = 5; - INVALID = 6; - } - required int64 containerID = 1; - repeated KeyValue metadata = 2; - optional string containerPath = 4; - optional int64 bytesUsed = 6; - optional int64 size = 7; - optional int64 blockCount = 8; - optional State state = 9 [default = OPEN]; - optional ContainerType containerType = 10 [default = KeyValueContainer]; -} - -message Container2BCSIDMapProto { - // repeated Container2BCSIDMapEntryProto container2BCSID = 1; - map container2BCSID = 1; -} - -enum ContainerType { - KeyValueContainer = 1; -} - - -// Container Messages. -message CreateContainerRequestProto { - repeated KeyValue metadata = 2; - optional ContainerType containerType = 3 [default = KeyValueContainer]; -} - -message CreateContainerResponseProto { -} - -message ReadContainerRequestProto { -} - -message ReadContainerResponseProto { - optional ContainerDataProto containerData = 1; -} - -message UpdateContainerRequestProto { - repeated KeyValue metadata = 2; - optional bool forceUpdate = 3 [default = false]; -} - -message UpdateContainerResponseProto { -} - -message DeleteContainerRequestProto { - optional bool forceDelete = 2 [default = false]; -} - -message DeleteContainerResponseProto { -} - -message ListContainerRequestProto { - optional uint32 count = 2; // Max Results to return -} - -message ListContainerResponseProto { - repeated ContainerDataProto containerData = 1; -} - -message CloseContainerRequestProto { -} - -message CloseContainerResponseProto { - optional string hash = 1; - optional int64 containerID = 2; -} - -message BlockData { - required DatanodeBlockID blockID = 1; - optional int64 flags = 2; // for future use. - repeated KeyValue metadata = 3; - repeated ChunkInfo chunks = 4; - optional int64 size = 5; -} - -// Block Messages. -message PutBlockRequestProto { - required BlockData blockData = 1; -} - -message PutBlockResponseProto { - required GetCommittedBlockLengthResponseProto committedBlockLength = 1; -} - -message GetBlockRequestProto { - required DatanodeBlockID blockID = 1; -} - -message GetBlockResponseProto { - required BlockData blockData = 1; -} - - -message DeleteBlockRequestProto { - required DatanodeBlockID blockID = 1; -} - -message GetCommittedBlockLengthRequestProto { - required DatanodeBlockID blockID = 1; -} - -message GetCommittedBlockLengthResponseProto { - required DatanodeBlockID blockID = 1; - required int64 blockLength = 2; -} - -message DeleteBlockResponseProto { -} - -message ListBlockRequestProto { - optional int64 startLocalID = 2; - required uint32 count = 3; - -} - -message ListBlockResponseProto { - repeated BlockData blockData = 1; -} - -// Chunk Operations - -message ChunkInfo { - required string chunkName = 1; - required uint64 offset = 2; - required uint64 len = 3; - repeated KeyValue metadata = 4; - required ChecksumData checksumData =5; -} - -message ChecksumData { - required ChecksumType type = 1; - required uint32 bytesPerChecksum = 2; - repeated bytes checksums = 3; -} - -enum ChecksumType { - NONE = 1; - CRC32 = 2; - CRC32C = 3; - SHA256 = 4; - MD5 = 5; -} - -message WriteChunkRequestProto { - required DatanodeBlockID blockID = 1; - required ChunkInfo chunkData = 2; - optional bytes data = 3; -} - -message WriteChunkResponseProto { -} - -message ReadChunkRequestProto { - required DatanodeBlockID blockID = 1; - required ChunkInfo chunkData = 2; -} - -message ReadChunkResponseProto { - required DatanodeBlockID blockID = 1; - required ChunkInfo chunkData = 2; - required bytes data = 3; -} - -message DeleteChunkRequestProto { - required DatanodeBlockID blockID = 1; - required ChunkInfo chunkData = 2; -} - -message DeleteChunkResponseProto { -} - -message ListChunkRequestProto { - required DatanodeBlockID blockID = 1; - required string prevChunkName = 2; - required uint32 count = 3; -} - -message ListChunkResponseProto { - repeated ChunkInfo chunkData = 1; -} - -/** For small file access combines write chunk and putBlock into a single -RPC */ - -message PutSmallFileRequestProto { - required PutBlockRequestProto block = 1; - required ChunkInfo chunkInfo = 2; - required bytes data = 3; -} - - -message PutSmallFileResponseProto { - required GetCommittedBlockLengthResponseProto committedBlockLength = 1; -} - -message GetSmallFileRequestProto { - required GetBlockRequestProto block = 1; -} - -message GetSmallFileResponseProto { - required ReadChunkResponseProto data = 1; -} - -message CopyContainerRequestProto { - required int64 containerID = 1; - required uint64 readOffset = 2; - optional uint64 len = 3; -} - -message CopyContainerResponseProto { - required int64 containerID = 1; - required uint64 readOffset = 2; - required uint64 len = 3; - required bool eof = 4; - required bytes data = 5; - optional int64 checksum = 6; -} - -service XceiverClientProtocolService { - // A client-to-datanode RPC to send container commands - rpc send(stream ContainerCommandRequestProto) returns - (stream ContainerCommandResponseProto) {}; - -} - -service IntraDatanodeProtocolService { - // An intradatanode service to copy the raw container data between nodes - rpc download (CopyContainerRequestProto) returns (stream CopyContainerResponseProto); -} diff --git a/hadoop-hdds/common/src/main/proto/FSProtos.proto b/hadoop-hdds/common/src/main/proto/FSProtos.proto deleted file mode 100644 index c3b768ab67e..00000000000 --- a/hadoop-hdds/common/src/main/proto/FSProtos.proto +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.fs"; -option java_outer_classname = "FSProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.fs; - -message FsPermissionProto { - required uint32 perm = 1; // UNIX-style mode bits -} - -/* - * FileStatus encoding. Field IDs match those from HdfsFileStatusProto, but - * cross-serialization is not an explicitly supported use case. Unlike HDFS, - * most fields are optional and do not define defaults. - */ -message FileStatusProto { - enum FileType { - FT_DIR = 1; - FT_FILE = 2; - FT_SYMLINK = 3; - } - enum Flags { - HAS_ACL = 0x01; // has ACLs - HAS_CRYPT = 0x02; // encrypted - HAS_EC = 0x04; // erasure coded - SNAPSHOT_ENABLED = 0x08; // snapshot enabled - } - required FileType fileType = 1; - required string path = 2; - optional uint64 length = 3; - optional FsPermissionProto permission = 4; - optional string owner = 5; - optional string group = 6; - optional uint64 modification_time = 7; - optional uint64 access_time = 8; - optional string symlink = 9; - optional uint32 block_replication = 10; - optional uint64 block_size = 11; - // locations = 12 - // alias = 13 - // childrenNum = 14 - optional bytes encryption_data = 15; - // storagePolicy = 16 - optional bytes ec_data = 17; - optional uint32 flags = 18 [default = 0]; -} - -/** - * Placeholder type for consistent basic FileSystem operations. - */ -message LocalFileSystemPathHandleProto { - optional uint64 mtime = 1; - optional string path = 2; -} diff --git a/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto b/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto deleted file mode 100644 index 72e0e9f66f7..00000000000 --- a/hadoop-hdds/common/src/main/proto/SCMSecurityProtocol.proto +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and unstable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *unstable* .proto interface. - */ - -option java_package = "org.apache.hadoop.hdds.protocol.proto"; - -option java_outer_classname = "SCMSecurityProtocolProtos"; - -option java_generic_services = true; - -option java_generate_equals_and_hash = true; - -package hadoop.hdds.security; - -import "hdds.proto"; - -/** -All commands is send as request and all response come back via -Response class. If adding new functions please follow this protocol, since -our tracing and visibility tools depend on this pattern. -*/ -message SCMSecurityRequest { - required Type cmdType = 1; // Type of the command - - optional string traceID = 2; - - optional SCMGetDataNodeCertRequestProto getDataNodeCertRequest = 3; - optional SCMGetOMCertRequestProto getOMCertRequest = 4; - optional SCMGetCertificateRequestProto getCertificateRequest = 5; - optional SCMGetCACertificateRequestProto getCACertificateRequest = 6; - -} - -message SCMSecurityResponse { - required Type cmdType = 1; // Type of the command - - // A string that identifies this command, we generate Trace ID in Ozone - // frontend and this allows us to trace that command all over ozone. - optional string traceID = 2; - - optional bool success = 3 [default = true]; - - optional string message = 4; - - required Status status = 5; - - optional SCMGetCertResponseProto getCertResponseProto = 6; - -} - -enum Type { - GetDataNodeCertificate = 1; - GetOMCertificate = 2; - GetCertificate = 3; - GetCACertificate = 4; -} - -enum Status { - OK = 1; -} -/** -* This message is send by data node to prove its identity and get an SCM -* signed certificate. -*/ -message SCMGetDataNodeCertRequestProto { - required DatanodeDetailsProto datanodeDetails = 1; - required string CSR = 2; -} - -/** -* This message is send by OzoneManager to prove its identity and get an SCM -* signed certificate. -*/ -message SCMGetOMCertRequestProto { - required OzoneManagerDetailsProto omDetails = 1; - required string CSR = 2; -} - -/** -* Proto request to get a certificate with given serial id. -*/ -message SCMGetCertificateRequestProto { - required string certSerialId = 1; -} - -/** -* Proto request to get CA certificate. -*/ -message SCMGetCACertificateRequestProto { -} - -/** - * Returns a certificate signed by SCM. - */ -message SCMGetCertResponseProto { - enum ResponseCode { - success = 1; - authenticationFailed = 2; - invalidCSR = 3; - } - required ResponseCode responseCode = 1; - required string x509Certificate = 2; // Base64 encoded X509 certificate. - optional string x509CACertificate = 3; // Base64 encoded CA X509 certificate. -} - - -service SCMSecurityProtocolService { - rpc submitRequest (SCMSecurityRequest) returns (SCMSecurityResponse); -} diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto deleted file mode 100644 index fc7a5988ce6..00000000000 --- a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto +++ /dev/null @@ -1,212 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and unstable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *unstable* .proto interface. - */ - -option java_package = "org.apache.hadoop.hdds.protocol.proto"; -option java_outer_classname = "ScmBlockLocationProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.hdds.block; - -import "hdds.proto"; - - -// SCM Block protocol - -enum Type { - AllocateScmBlock = 11; - DeleteScmKeyBlocks = 12; - GetScmInfo = 13; - SortDatanodes = 14; -} - -message SCMBlockLocationRequest { - required Type cmdType = 1; // Type of the command - - // A string that identifies this command, we generate Trace ID in Ozone - // frontend and this allows us to trace that command all over ozone. - optional string traceID = 2; - - optional UserInfo userInfo = 3; - - optional AllocateScmBlockRequestProto allocateScmBlockRequest = 11; - optional DeleteScmKeyBlocksRequestProto deleteScmKeyBlocksRequest = 12; - optional hadoop.hdds.GetScmInfoRequestProto getScmInfoRequest = 13; - optional SortDatanodesRequestProto sortDatanodesRequest = 14; -} - -message SCMBlockLocationResponse { - required Type cmdType = 1; // Type of the command - - // A string that identifies this command, we generate Trace ID in Ozone - // frontend and this allows us to trace that command all over ozone. - optional string traceID = 2; - - optional bool success = 3 [default=true]; - - optional string message = 4; - - required Status status = 5; - - optional string leaderOMNodeId = 6; - - optional AllocateScmBlockResponseProto allocateScmBlockResponse = 11; - optional DeleteScmKeyBlocksResponseProto deleteScmKeyBlocksResponse = 12; - optional hadoop.hdds.GetScmInfoResponseProto getScmInfoResponse = 13; - optional SortDatanodesResponseProto sortDatanodesResponse = 14; -} - -/** - User information which will be extracted during RPC context and used - during validating Acl. -*/ -message UserInfo { - optional string userName = 1; - optional string remoteAddress = 3; -} - -enum Status { - OK = 1; - FAILED_TO_LOAD_NODEPOOL = 2; - FAILED_TO_FIND_NODE_IN_POOL = 3; - FAILED_TO_FIND_HEALTHY_NODES = 4; - FAILED_TO_FIND_NODES_WITH_SPACE = 5; - FAILED_TO_FIND_SUITABLE_NODE = 6; - INVALID_CAPACITY = 7; - INVALID_BLOCK_SIZE = 8; - SAFE_MODE_EXCEPTION = 9; - FAILED_TO_LOAD_OPEN_CONTAINER = 10; - FAILED_TO_ALLOCATE_CONTAINER = 11; - FAILED_TO_CHANGE_CONTAINER_STATE = 12; - FAILED_TO_CHANGE_PIPELINE_STATE = 13; - CONTAINER_EXISTS = 14; - FAILED_TO_FIND_CONTAINER = 15; - FAILED_TO_FIND_CONTAINER_WITH_SPACE = 16; - BLOCK_EXISTS = 17; - FAILED_TO_FIND_BLOCK = 18; - IO_EXCEPTION = 19; - UNEXPECTED_CONTAINER_STATE = 20; - SCM_NOT_INITIALIZED = 21; - DUPLICATE_DATANODE = 22; - NO_SUCH_DATANODE = 23; - NO_REPLICA_FOUND = 24; - FAILED_TO_FIND_ACTIVE_PIPELINE = 25; - FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY = 26; - FAILED_TO_ALLOCATE_ENOUGH_BLOCKS = 27; - INTERNAL_ERROR = 29; -} - -/** -* Request send to SCM asking allocate block of specified size. -*/ -message AllocateScmBlockRequestProto { - required uint64 size = 1; - required uint32 numBlocks = 2; - required ReplicationType type = 3; - required hadoop.hdds.ReplicationFactor factor = 4; - required string owner = 5; - optional ExcludeListProto excludeList = 7; -} - -/** - * A delete key request sent by OM to SCM, it contains - * multiple number of keys (and their blocks). - */ -message DeleteScmKeyBlocksRequestProto { - repeated KeyBlocks keyBlocks = 1; -} - -/** - * A object key and all its associated blocks. - * We need to encapsulate object key name plus the blocks in this potocol - * because SCM needs to response OM with the keys it has deleted. - * If the response only contains blocks, it will be very expensive for - * OM to figure out what keys have been deleted. - */ -message KeyBlocks { - required string key = 1; - repeated BlockID blocks = 2; -} - -/** - * A delete key response from SCM to OM, it contains multiple child-results. - * Each child-result represents a key deletion result, only if all blocks of - * a key are successfully deleted, this key result is considered as succeed. - */ -message DeleteScmKeyBlocksResponseProto { - repeated DeleteKeyBlocksResultProto results = 1; -} - -/** - * A key deletion result. It contains all the block deletion results. - */ -message DeleteKeyBlocksResultProto { - required string objectKey = 1; - repeated DeleteScmBlockResult blockResults = 2; -} - -message DeleteScmBlockResult { - enum Result { - success = 1; - safeMode = 2; - errorNotFound = 3; - unknownFailure = 4; - } - required Result result = 1; - required BlockID blockID = 2; -} - -message AllocateBlockResponse { - optional ContainerBlockID containerBlockID = 1; - optional hadoop.hdds.Pipeline pipeline = 2; -} - -/** - * Reply from SCM indicating that the container. - */ -message AllocateScmBlockResponseProto { - repeated AllocateBlockResponse blocks = 3; -} - -/** - * Datanode sort request sent by OM to SCM, it contains - * multiple number of datanodes. - */ -message SortDatanodesRequestProto{ - required string client = 1; - repeated string nodeNetworkName = 2; -} - -message SortDatanodesResponseProto{ - repeated DatanodeDetailsProto node = 1; -} - -/** - * Protocol used from OzoneManager to StorageContainerManager. - * See request and response messages for details of the RPC calls. - */ -service ScmBlockLocationProtocolService { - - rpc send(SCMBlockLocationRequest) - returns (SCMBlockLocationResponse); -} diff --git a/hadoop-hdds/common/src/main/proto/Security.proto b/hadoop-hdds/common/src/main/proto/Security.proto deleted file mode 100644 index a3ce7392d0b..00000000000 --- a/hadoop-hdds/common/src/main/proto/Security.proto +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.security.proto"; -option java_outer_classname = "SecurityProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.common; - -/** - * Security token identifier - */ -message TokenProto { - required bytes identifier = 1; - required bytes password = 2; - required string kind = 3; - required string service = 4; -} - -message CredentialsKVProto { - required string alias = 1; - optional hadoop.common.TokenProto token = 2; - optional bytes secret = 3; -} - -message CredentialsProto { - repeated hadoop.common.CredentialsKVProto tokens = 1; - repeated hadoop.common.CredentialsKVProto secrets = 2; -} - -message GetDelegationTokenRequestProto { - required string renewer = 1; -} - -message GetDelegationTokenResponseProto { - optional hadoop.common.TokenProto token = 1; -} - -message RenewDelegationTokenRequestProto { - required hadoop.common.TokenProto token = 1; -} - -message RenewDelegationTokenResponseProto { - required uint64 newExpiryTime = 1; -} - -message CancelDelegationTokenRequestProto { - required hadoop.common.TokenProto token = 1; -} - -message CancelDelegationTokenResponseProto { // void response -} diff --git a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto deleted file mode 100644 index 8ea72b6cd17..00000000000 --- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto +++ /dev/null @@ -1,330 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and unstable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *unstable* .proto interface. - */ - -option java_package = "org.apache.hadoop.hdds.protocol.proto"; -option java_outer_classname = "StorageContainerLocationProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.hdds.container; - -import "hdds.proto"; - -/** - All functions are dispatched as Request/Response under Ozone. - if you add newe functions, please add them using same pattern. -*/ -message ScmContainerLocationRequest { - required Type cmdType = 1; // Type of the command - - // A string that identifies this command, we generate Trace ID in Ozone - // frontend and this allows us to trace that command all over ozone. - optional string traceID = 2; - - optional ContainerRequestProto containerRequest = 6; - optional GetContainerRequestProto getContainerRequest = 7; - optional GetContainerWithPipelineRequestProto getContainerWithPipelineRequest = 8; - optional SCMListContainerRequestProto scmListContainerRequest = 9; - optional SCMDeleteContainerRequestProto scmDeleteContainerRequest = 10; - optional NodeQueryRequestProto nodeQueryRequest = 11; - optional ObjectStageChangeRequestProto objectStageChangeRequest = 12; - optional PipelineRequestProto pipelineRequest = 13; - optional ListPipelineRequestProto listPipelineRequest = 14; - optional ActivatePipelineRequestProto activatePipelineRequest = 15; - optional DeactivatePipelineRequestProto deactivatePipelineRequest = 16; - optional ClosePipelineRequestProto closePipelineRequest = 17; - optional GetScmInfoRequestProto getScmInfoRequest = 18; - optional InSafeModeRequestProto inSafeModeRequest = 19; - optional ForceExitSafeModeRequestProto forceExitSafeModeRequest = 20; - optional StartReplicationManagerRequestProto startReplicationManagerRequest = 21; - optional StopReplicationManagerRequestProto stopReplicationManagerRequest = 22; - optional ReplicationManagerStatusRequestProto seplicationManagerStatusRequest = 23; - -} - -message ScmContainerLocationResponse { - required Type cmdType = 1; // Type of the command - - optional string traceID = 2; - - optional bool success = 3 [default = true]; - - optional string message = 4; - - required Status status = 5; - - optional ContainerResponseProto containerResponse = 6; - optional GetContainerResponseProto getContainerResponse = 7; - optional GetContainerWithPipelineResponseProto getContainerWithPipelineResponse = 8; - optional SCMListContainerResponseProto scmListContainerResponse = 9; - optional SCMDeleteContainerResponseProto scmDeleteContainerResponse = 10; - optional NodeQueryResponseProto nodeQueryResponse = 11; - optional ObjectStageChangeResponseProto objectStageChangeResponse = 12; - optional PipelineResponseProto pipelineResponse = 13; - optional ListPipelineResponseProto listPipelineResponse = 14; - optional ActivatePipelineResponseProto activatePipelineResponse = 15; - optional DeactivatePipelineResponseProto deactivatePipelineResponse = 16; - optional ClosePipelineResponseProto closePipelineResponse = 17; - optional GetScmInfoResponseProto getScmInfoResponse = 18; - optional InSafeModeResponseProto inSafeModeResponse = 19; - optional ForceExitSafeModeResponseProto forceExitSafeModeResponse = 20; - optional StartReplicationManagerResponseProto startReplicationManagerResponse = 21; - optional StopReplicationManagerResponseProto stopReplicationManagerResponse = 22; - optional ReplicationManagerStatusResponseProto replicationManagerStatusResponse = 23; - enum Status { - OK = 1; - CONTAINER_ALREADY_EXISTS = 2; - CONTAINER_IS_MISSING = 3; - } -} - -enum Type { - - AllocateContainer = 1; - GetContainer = 2; - GetContainerWithPipeline = 3; - ListContainer = 4; - DeleteContainer = 5; - QueryNode = 6; - NotifyObjectStageChange = 7; - AllocatePipeline = 8; - ListPipelines = 9; - ActivatePipeline = 10; - DeactivatePipeline = 11; - ClosePipeline = 12; - GetScmInfo = 13; - InSafeMode = 14; - ForceExitSafeMode = 15; - StartReplicationManager = 16; - StopReplicationManager = 17; - GetReplicationManagerStatus = 18; -} - -/** -* Request send to SCM asking where the container should be created. -*/ -message ContainerRequestProto { - // Ozone only support replication of either 1 or 3. - required ReplicationFactor replicationFactor = 2; - required ReplicationType replicationType = 3; - required string owner = 4; - optional string traceID = 5; -} - -/** - * Reply from SCM indicating that the container. - */ -message ContainerResponseProto { - enum Error { - success = 1; - errorContainerAlreadyExists = 2; - errorContainerMissing = 3; - } - required Error errorCode = 1; - required ContainerWithPipeline containerWithPipeline = 2; - optional string errorMessage = 3; -} - -message GetContainerRequestProto { - required int64 containerID = 1; - optional string traceID = 2; - -} - -message GetContainerResponseProto { - required ContainerInfoProto containerInfo = 1; -} - -message GetContainerWithPipelineRequestProto { - required int64 containerID = 1; - optional string traceID = 2; - -} - -message GetContainerWithPipelineResponseProto { - required ContainerWithPipeline containerWithPipeline = 1; -} - -message SCMListContainerRequestProto { - required uint32 count = 1; - optional uint64 startContainerID = 2; - optional string traceID = 3; -} - -message SCMListContainerResponseProto { - repeated ContainerInfoProto containers = 1; -} - -message SCMDeleteContainerRequestProto { - required int64 containerID = 1; - optional string traceID = 2; - -} - -message SCMDeleteContainerResponseProto { - // Empty response -} - -message ObjectStageChangeRequestProto { - enum Type { - container = 1; - pipeline = 2; - } - // delete/copy operation may be added later - enum Op { - create = 1; - close = 2; - } - enum Stage { - begin = 1; - complete = 2; - } - required int64 id = 1; - required Type type = 2; - required Op op= 3; - required Stage stage = 4; - optional string traceID = 5; -} - -message ObjectStageChangeResponseProto { - // Empty response -} - -/* - NodeQueryRequest sends a request to SCM asking to send a list of nodes that - match the NodeState that we are requesting. -*/ -message NodeQueryRequestProto { - required NodeState state = 1; - required QueryScope scope = 2; - optional string poolName = 3; // if scope is pool, then pool name is needed. - optional string traceID = 4; -} - -message NodeQueryResponseProto { - repeated Node datanodes = 1; -} - -/** - Request to create a replication pipeline. - */ -message PipelineRequestProto { - required ReplicationType replicationType = 1; - required ReplicationFactor replicationFactor = 2; - - // if datanodes are specified then pipelines are created using those - // datanodes. - optional NodePool nodePool = 3; - optional string pipelineID = 4; - optional string traceID = 5; -} - -message PipelineResponseProto { - enum Error { - success = 1; - errorPipelineAlreadyExists = 2; - } - required Error errorCode = 1; - optional Pipeline pipeline = 2; - optional string errorMessage = 3; -} - -message ListPipelineRequestProto { - optional string traceID = 1; -} - -message ListPipelineResponseProto { - repeated Pipeline pipelines = 1; -} - -message ActivatePipelineRequestProto { - required PipelineID pipelineID = 1; - optional string traceID = 2; -} - -message ActivatePipelineResponseProto { -} - -message DeactivatePipelineRequestProto { - required PipelineID pipelineID = 1; - optional string traceID = 2; -} - -message DeactivatePipelineResponseProto { -} - -message ClosePipelineRequestProto { - required PipelineID pipelineID = 1; - optional string traceID = 2; - -} - -message ClosePipelineResponseProto { -} - -message InSafeModeRequestProto { - optional string traceID = 1; -} - -message InSafeModeResponseProto { - required bool inSafeMode = 1; -} - -message ForceExitSafeModeRequestProto { - optional string traceID = 1; -} - -message ForceExitSafeModeResponseProto { - required bool exitedSafeMode = 1; -} - -message StartReplicationManagerRequestProto { - optional string traceID = 1; -} - -message StartReplicationManagerResponseProto { -} - -message StopReplicationManagerRequestProto { - optional string traceID = 1; -} - -message StopReplicationManagerResponseProto { -} - -message ReplicationManagerStatusRequestProto { - optional string traceID = 1; -} - -message ReplicationManagerStatusResponseProto { - required bool isRunning = 1; -} - -/** - * Protocol used from an HDFS node to StorageContainerManager. See the request - * and response messages for details of the RPC calls. - */ -service StorageContainerLocationProtocolService { - rpc submitRequest (ScmContainerLocationRequest) returns (ScmContainerLocationResponse); - -} diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto deleted file mode 100644 index d2bb355ff8a..00000000000 --- a/hadoop-hdds/common/src/main/proto/hdds.proto +++ /dev/null @@ -1,249 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and unstable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *unstable* .proto interface. - */ - -option java_package = "org.apache.hadoop.hdds.protocol.proto"; -option java_outer_classname = "HddsProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.hdds; - -message DatanodeDetailsProto { - required string uuid = 1; // UUID assigned to the Datanode. - required string ipAddress = 2; // IP address - required string hostName = 3; // hostname - repeated Port ports = 4; - optional string certSerialId = 5; // Certificate serial id. - // network name, can be Ip address or host name, depends - optional string networkName = 6; - optional string networkLocation = 7; // Network topology location -} - -/** - Proto message encapsulating information required to uniquely identify a - OzoneManager. -*/ -message OzoneManagerDetailsProto { - required string uuid = 1; // UUID assigned to the OzoneManager. - required string ipAddress = 2; // IP address of OM. - required string hostName = 3; // Hostname of OM. - repeated Port ports = 4; -} - -message Port { - required string name = 1; - required uint32 value = 2; -} - -message PipelineID { - required string id = 1; -} - -enum PipelineState { - PIPELINE_ALLOCATED = 1; - PIPELINE_OPEN = 2; - PIPELINE_DORMANT = 3; - PIPELINE_CLOSED = 4; -} - -message Pipeline { - required string leaderID = 1; - repeated DatanodeDetailsProto members = 2; - // TODO: remove the state and leaderID from this class - optional PipelineState state = 3 [default = PIPELINE_ALLOCATED]; - optional ReplicationType type = 4 [default = STAND_ALONE]; - optional ReplicationFactor factor = 5 [default = ONE]; - required PipelineID id = 6; - repeated uint32 memberOrders = 7; -} - -message KeyValue { - required string key = 1; - optional string value = 2; -} - -/** - * Type of the node. - */ -enum NodeType { - OM = 1; // Ozone Manager - SCM = 2; // Storage Container Manager - DATANODE = 3; // DataNode -} - -// Should we rename NodeState to DatanodeState? -/** - * Enum that represents the Node State. This is used in calls to getNodeList - * and getNodeCount. - */ -enum NodeState { - HEALTHY = 1; - STALE = 2; - DEAD = 3; - DECOMMISSIONING = 4; - DECOMMISSIONED = 5; -} - -enum QueryScope { - CLUSTER = 1; - POOL = 2; -} - -message Node { - required DatanodeDetailsProto nodeID = 1; - repeated NodeState nodeStates = 2; -} - -message NodePool { - repeated Node nodes = 1; -} - -/** - * LifeCycleState for SCM object creation state machine: - * ->Allocated: allocated on SCM but clean has not started creating it yet. - * ->Creating: allocated and assigned to client to create but not ack-ed yet. - * ->Open: allocated on SCM and created on datanodes and ack-ed by a client. - * ->Close: container closed due to space all used or error? - * ->Timeout -> container failed to create on datanodes or ack-ed by client. - * ->Deleting(TBD) -> container will be deleted after timeout - * 1. ALLOCATE-ed containers on SCM can't serve key/block related operation - * until ACK-ed explicitly which changes the state to OPEN. - * 2. Only OPEN/CLOSED containers can serve key/block related operation. - * 3. ALLOCATE-ed containers that are not ACK-ed timely will be TIMEOUT and - * CLEANUP asynchronously. - */ - -enum LifeCycleState { - OPEN = 1; - CLOSING = 2; - QUASI_CLOSED = 3; - CLOSED = 4; - DELETING = 5; - DELETED = 6; // object is deleted. -} - -enum LifeCycleEvent { - FINALIZE = 1; - QUASI_CLOSE = 2; - CLOSE = 3; // !!Event after this has not been used yet. - FORCE_CLOSE = 4; - DELETE = 5; - CLEANUP = 6; -} - -message ContainerInfoProto { - required int64 containerID = 1; - required LifeCycleState state = 2; - optional PipelineID pipelineID = 3; - required uint64 usedBytes = 4; - required uint64 numberOfKeys = 5; - optional int64 stateEnterTime = 6; - required string owner = 7; - optional int64 deleteTransactionId = 8; - optional int64 sequenceId = 9; - required ReplicationFactor replicationFactor = 10; - required ReplicationType replicationType = 11; -} - -message ContainerWithPipeline { - required ContainerInfoProto containerInfo = 1; - required Pipeline pipeline = 2; -} - -message GetScmInfoRequestProto { - optional string traceID = 1; -} - -message GetScmInfoResponseProto { - required string clusterId = 1; - required string scmId = 2; -} - - -enum ReplicationType { - RATIS = 1; - STAND_ALONE = 2; - CHAINED = 3; -} - -enum ReplicationFactor { - ONE = 1; - THREE = 3; -} - -enum ScmOps { - allocateBlock = 1; - keyBlocksInfoList = 2; - getScmInfo = 3; - deleteBlock = 4; - createReplicationPipeline = 5; - allocateContainer = 6; - getContainer = 7; - getContainerWithPipeline = 8; - listContainer = 9; - deleteContainer = 10; - queryNode = 11; -} - -message ExcludeListProto { - repeated string datanodes = 1; - repeated int64 containerIds = 2; - repeated PipelineID pipelineIds = 3; -} - -/** - * Block ID that uniquely identify a block by SCM. - */ -message ContainerBlockID { - required int64 containerID = 1; - required int64 localID = 2; -} - - -/** - * Information for the Hdds block token. - * When adding further fields, make sure they are optional as they would - * otherwise not be backwards compatible. - */ -message BlockTokenSecretProto { - /** - * File access permissions mode. - */ - enum AccessModeProto { - READ = 1; - WRITE = 2; - COPY = 3; - DELETE = 4; - } - required string ownerId = 1; - required string blockId = 2; - required uint64 expiryDate = 3; - required string omCertSerialId = 4; - repeated AccessModeProto modes = 5; - required uint64 maxLength = 6; -} - -message BlockID { - required ContainerBlockID containerBlockID = 1; - optional uint64 blockCommitSequenceId = 2 [default = 0]; -} diff --git a/hadoop-hdds/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor b/hadoop-hdds/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor deleted file mode 100644 index f29efdab384..00000000000 --- a/hadoop-hdds/common/src/main/resources/META-INF/services/javax.annotation.processing.Processor +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.hdds.conf.ConfigFileGenerator diff --git a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties b/hadoop-hdds/common/src/main/resources/hdds-version-info.properties deleted file mode 100644 index 2cbd817ebbf..00000000000 --- a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties +++ /dev/null @@ -1,26 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -version=${declared.hdds.version} -revision=${version-info.scm.commit} -branch=${version-info.scm.branch} -user=${user.name} -date=${version-info.build.time} -url=${version-info.scm.uri} -srcChecksum=${version-info.source.md5} -protocVersion=${protobuf.version} diff --git a/hadoop-hdds/common/src/main/resources/network-topology-default.xml b/hadoop-hdds/common/src/main/resources/network-topology-default.xml deleted file mode 100644 index f86597cdeea..00000000000 --- a/hadoop-hdds/common/src/main/resources/network-topology-default.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - 1 - - - - 1 - Root - - - - - rack - - 1 - - InnerNode - - /default-rack - - - - 0 - Leaf - - - - /datacenter/rack/node - - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/resources/network-topology-default.yaml b/hadoop-hdds/common/src/main/resources/network-topology-default.yaml deleted file mode 100644 index 561869fb43b..00000000000 --- a/hadoop-hdds/common/src/main/resources/network-topology-default.yaml +++ /dev/null @@ -1,61 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- -# Cost: The cost of crossing this layer. -# The value should be positive integer or 0. This field is optional. -# When it's not defined, it's value is default "1". -cost: 1 - -# The prefix of this layer. -# If the prefix is "dc", then every name in this layer should start with "dc", -# such as "dc1", "dc2". -# Note that unlike XML schema, the prefix must be specified explicitly if the type is InnerNode. -prefix: / - -# Layer type, optional field, default value InnerNode. -# Current value range : {ROOT, INNER_NODE, LEAF_NODE} -type: ROOT - -# Layer name -defaultName: root - -# Sub layer -# The sub layer property defines as a list which can reflect a node tree, though -# in schema template it always has only one child. -sublayer: - - - cost: 1 - prefix: dc - defaultName: datacenter - type: INNER_NODE - sublayer: - - - cost: 1 - prefix: rack - defaultName: rack - type: INNER_NODE - sublayer: - - - cost: 1 - prefix: ng - defaultName: nodegroup - type: INNER_NODE - sublayer: - - - defaultName: node - type: LEAF_NODE - prefix: node -... \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/resources/network-topology-nodegroup.xml b/hadoop-hdds/common/src/main/resources/network-topology-nodegroup.xml deleted file mode 100644 index b43ebd5d153..00000000000 --- a/hadoop-hdds/common/src/main/resources/network-topology-nodegroup.xml +++ /dev/null @@ -1,74 +0,0 @@ - - - - - - 1 - - - - 1 - Root - - - - - rack - - 1 - - InnerNode - - /default-rack - - - ng - 1 - InnerNode - /default-nodegroup - - - - 0 - Leaf - - - - /datacenter/rack/nodegroup/node - - false - - diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml deleted file mode 100644 index b0a59fa209c..00000000000 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ /dev/null @@ -1,2504 +0,0 @@ - - - - - - - - - - - - - - - - ozone.container.cache.size - 1024 - PERFORMANCE, CONTAINER, STORAGE - The open container is cached on the data node side. We maintain - an LRU - cache for caching the recently used containers. This setting controls the - size of that cache. - - - - dfs.container.ipc - 9859 - OZONE, CONTAINER, MANAGEMENT - The ipc port number of container. - - - dfs.container.ipc.random.port - false - OZONE, DEBUG, CONTAINER - Allocates a random free port for ozone container. This is used - only while - running unit tests. - - - - dfs.container.chunk.write.sync - false - OZONE, CONTAINER, MANAGEMENT - Determines whether the chunk writes in the container happen as - sync I/0 or buffered I/O operation. - - - - dfs.container.ratis.statemachinedata.sync.timeout - 10s - OZONE, DEBUG, CONTAINER, RATIS - Timeout for StateMachine data writes by Ratis. - - - - dfs.container.ratis.statemachinedata.sync.retries - -1 - OZONE, DEBUG, CONTAINER, RATIS - Number of times the WriteStateMachineData op will be tried - before failing, if this value is -1, then this retries indefinitely. - - - - dfs.container.ratis.log.queue.num-elements - 1024 - OZONE, DEBUG, CONTAINER, RATIS - Limit for the number of operations in Ratis Log Worker. - - - - dfs.container.ratis.log.queue.byte-limit - 4GB - OZONE, DEBUG, CONTAINER, RATIS - Byte limit for Ratis Log Worker queue. - - - - dfs.container.ratis.log.appender.queue.num-elements - 1 - OZONE, DEBUG, CONTAINER, RATIS - Limit for number of append entries in ratis leader's - log appender queue. - - - - dfs.container.ratis.log.appender.queue.byte-limit - 32MB - OZONE, DEBUG, CONTAINER, RATIS - Byte limit for ratis leader's log appender queue. - - - - dfs.container.ratis.log.purge.gap - 1000000 - OZONE, DEBUG, CONTAINER, RATIS - Purge gap between the last purged commit index - and the current index, when the leader decides to purge its log. - - - - dfs.container.ratis.datanode.storage.dir - - OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS - This directory is used for storing Ratis metadata like logs. If - this is - not set then default metadata dirs is used. A warning will be logged if - this not set. Ideally, this should be mapped to a fast disk like an SSD. - - - - hdds.datanode.dir - - OZONE, CONTAINER, STORAGE, MANAGEMENT - Determines where on the local filesystem HDDS data will be - stored. Defaults to dfs.datanode.data.dir if not specified. - The directories should be tagged with corresponding storage types - ([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for storage policies. The default - storage type will be DISK if the directory does not have a storage type - tagged explicitly. - - - - hdds.datanode.volume.choosing.policy - - OZONE, CONTAINER, STORAGE, MANAGEMENT - - The class name of the policy for choosing volumes in the list of - directories. Defaults to - org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy. - This volume choosing policy selects volumes in a round-robin order. - - - - dfs.container.ratis.enabled - false - OZONE, MANAGEMENT, PIPELINE, RATIS - Ozone supports different kinds of replication pipelines. Ratis - is one of - the replication pipeline supported by ozone. - - - - dfs.container.ratis.ipc - 9858 - OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT - The ipc port number of container. - - - dfs.container.ratis.ipc.random.port - false - OZONE,DEBUG - Allocates a random free port for ozone ratis port for the - container. This - is used only while running unit tests. - - - - dfs.container.ratis.rpc.type - GRPC - OZONE, RATIS, MANAGEMENT - Ratis supports different kinds of transports like netty, GRPC, - Hadoop RPC - etc. This picks one of those for this cluster. - - - - dfs.ratis.snapshot.threshold - 10000 - OZONE, RATIS - Number of transactions after which a ratis snapshot should be - taken. - - - - dfs.container.ratis.statemachine.max.pending.apply-transactions - 10000 - OZONE, RATIS - Maximum number of pending apply transactions in a data - pipeline. The default value is kept same as default snapshot threshold - dfs.ratis.snapshot.threshold. - - - - dfs.container.ratis.num.write.chunk.threads - 60 - OZONE, RATIS, PERFORMANCE - Maximum number of threads in the thread pool that Ratis - will use for writing chunks (60 by default). - - - - dfs.container.ratis.leader.num.pending.requests - 4096 - OZONE, RATIS, PERFORMANCE - Maximum number of pending requests after which the leader - starts rejecting requests from client. - - - - dfs.container.ratis.replication.level - MAJORITY - OZONE, RATIS - Replication level to be used by datanode for submitting a - container command to ratis. Available replication levels are ALL and - MAJORTIY, MAJORITY is used as the default replication level. - - - - dfs.container.ratis.num.container.op.executors - 10 - OZONE, RATIS, PERFORMANCE - Number of executors that will be used by Ratis to execute - container ops.(10 by default). - - - - dfs.container.ratis.segment.size - 1MB - OZONE, RATIS, PERFORMANCE - The size of the raft segment used by Apache Ratis on datanodes. - (1 MB by default) - - - - dfs.container.ratis.segment.preallocated.size - 16KB - OZONE, RATIS, PERFORMANCE - The size of the buffer which is preallocated for raft segment - used by Apache Ratis on datanodes.(16 KB by default) - - - - dfs.container.ratis.statemachine.cache.expiry.interval - 10s - OZONE, RATIS, PERFORMANCE - The interval till which the stateMachine data in ratis - will be cached inside the ContainerStateMachine. - - - - dfs.ratis.client.request.timeout.duration - 3s - OZONE, RATIS, MANAGEMENT - The timeout duration for ratis client request.It should be - set greater than leader election timeout in Ratis. - - - - dfs.ratis.client.request.max.retries - 180 - OZONE, RATIS, MANAGEMENT - Number of retries for ratis client request. - - - dfs.ratis.client.request.retry.interval - 1000ms - OZONE, RATIS, MANAGEMENT - Interval between successive retries for a ratis client request. - - - - dfs.ratis.server.retry-cache.timeout.duration - 600000ms - OZONE, RATIS, MANAGEMENT - Retry Cache entry timeout for ratis server. - - - dfs.ratis.server.request.timeout.duration - 3s - OZONE, RATIS, MANAGEMENT - The timeout duration for ratis server request. - - - dfs.ratis.leader.election.minimum.timeout.duration - 5s - OZONE, RATIS, MANAGEMENT - The minimum timeout duration for ratis leader election. - Default is 5s. - - - - dfs.ratis.server.failure.duration - 120s - OZONE, RATIS, MANAGEMENT - The timeout duration for ratis server failure detection, - once the threshold has reached, the ratis state machine will be informed - about the failure in the ratis ring - - - - hdds.node.report.interval - 60000ms - OZONE, CONTAINER, MANAGEMENT - Time interval of the datanode to send node report. Each - datanode periodically send node report to SCM. Unit could be - defined with postfix (ns,ms,s,m,h,d) - - - hdds.container.report.interval - 60000ms - OZONE, CONTAINER, MANAGEMENT - Time interval of the datanode to send container report. Each - datanode periodically send container report to SCM. Unit could be - defined with postfix (ns,ms,s,m,h,d) - - - hdds.command.status.report.interval - 60000ms - OZONE, CONTAINER, MANAGEMENT - Time interval of the datanode to send status of command - execution. Each datanode periodically the execution status of commands - received from SCM to SCM. Unit could be defined with postfix - (ns,ms,s,m,h,d) - - - hdds.pipeline.report.interval - 60000ms - OZONE, PIPELINE, MANAGEMENT - Time interval of the datanode to send pipeline report. Each - datanode periodically send pipeline report to SCM. Unit could be - defined with postfix (ns,ms,s,m,h,d) - - - - - hdds.prometheus.endpoint.enabled - true - OZONE, MANAGEMENT - Enable prometheus compatible metric page on the HTTP - servers. - - - - - hdds.profiler.endpoint.enabled - false - OZONE, MANAGEMENT - Enable /prof java profiler servlet page on HTTP server. - - - - - - ozone.administrators - - OZONE, SECURITY - Ozone administrator users delimited by the comma. - If not set, only the user who launches an ozone service will be the admin - user. This property must be set if ozone services are started by different - users. Otherwise, the RPC layer will reject calls from other servers which - are started by users not in the list. - - - - ozone.block.deleting.container.limit.per.interval - 10 - OZONE, PERFORMANCE, SCM - A maximum number of containers to be scanned by block deleting - service per - time interval. The block deleting service spawns a thread to handle block - deletions in a container. This property is used to throttle the number of - threads spawned for block deletions. - - - - ozone.block.deleting.limit.per.task - 1000 - OZONE, PERFORMANCE, SCM - A maximum number of blocks to be deleted by block deleting - service per - time interval. This property is used to throttle the actual number of - block deletions on a data node per container. - - - - ozone.block.deleting.service.interval - 1m - OZONE, PERFORMANCE, SCM - Time interval of the block deleting service. - The block deleting service runs on each datanode periodically and - deletes blocks queued for deletion. Unit could be defined with - postfix (ns,ms,s,m,h,d) - - - - ozone.block.deleting.service.timeout - 300000ms - OZONE, PERFORMANCE, SCM - A timeout value of block deletion service. If this is set - greater than 0, - the service will stop waiting for the block deleting completion after this - time. If timeout happens to a large proportion of block deletion, this - needs to be increased with ozone.block.deleting.limit.per.task. This - setting supports multiple time unit suffixes as described in - dfs.heartbeat.interval. If no suffix is specified, then milliseconds is - assumed. - - - - ozone.UnsafeByteOperations.enabled - true - OZONE, PERFORMANCE, CLIENT - It specifies whether to use unsafe or safe buffer to byteString - copy. - - - - ozone.client.connection.timeout - 5000ms - OZONE, PERFORMANCE, CLIENT - Connection timeout for Ozone client in milliseconds. - - - - ozone.client.stream.buffer.flush.size - 64MB - OZONE, CLIENT - Size which determines at what buffer position , a partial - flush will be initiated during write. It should be ideally a multiple - of chunkSize. - - - - ozone.client.stream.buffer.max.size - 128MB - OZONE, CLIENT - Size which determines at what buffer position, - write call be blocked till acknowledgement of the first partial flush - happens by all servers. - - - - ozone.client.watch.request.timeout - 30s - OZONE, CLIENT - Timeout for the watch API in Ratis client to acknowledge - a particular request getting replayed to all servers. - - - - ozone.client.max.retries - 100 - OZONE, CLIENT - Maximum number of retries by Ozone Client on encountering - exception while writing a key. - - - - ozone.client.retry.interval - 0ms - OZONE, CLIENT - Indicates the time duration a client will wait before - retrying a write key request on encountering an exception. By default - there is no wait. - - - - ozone.client.socket.timeout - 5000ms - OZONE, CLIENT - Socket timeout for Ozone client. Unit could be defined with - postfix (ns,ms,s,m,h,d) - - - ozone.enabled - false - OZONE, REQUIRED - - Status of the Ozone Object Storage service is enabled. - Set to true to enable Ozone. - Set to false to disable Ozone. - Unless this value is set to true, Ozone services will not be started in - the cluster. - - Please note: By default ozone is disabled on a hadoop cluster. - - - - ozone.key.deleting.limit.per.task - 1000 - OM, PERFORMANCE - - A maximum number of keys to be scanned by key deleting service - per time interval in OM. Those keys are sent to delete metadata and - generate transactions in SCM for next async deletion between SCM - and DataNode. - - - - ozone.om.service.ids - - OM, HA - - Comma-separated list of OM service Ids. - - If not set, the default value of "om-service-value" is assigned as the - OM service ID. - - - - ozone.om.nodes.EXAMPLEOMSERVICEID - - OM, HA - - Comma-separated list of OM node Ids for a given OM service ID (eg. - EXAMPLEOMSERVICEID). The OM service ID should be the value (one of the - values if there are multiple) set for the parameter ozone.om.service.ids. - - Unique identifiers for each OM Node, delimited by commas. This will be - used by OzoneManagers in HA setup to determine all the OzoneManagers - belonging to the same OMservice in the cluster. For example, if you - used “omService1” as the OM service ID previously, and you wanted to - use “om1”, “om2” and "om3" as the individual IDs of the OzoneManagers, - you would configure a property ozone.om.nodes.omService1, and its value - "om1,om2,om3". - - - - ozone.om.node.id - - OM, HA - - The ID of this OM node. If the OM node ID is not configured it - is determined automatically by matching the local node's address - with the configured address. - - If node ID is not deterministic from the configuration, then it is set - to the OmId from the OM version file. - - - - ozone.om.address - 0.0.0.0:9862 - OM, REQUIRED - - The address of the Ozone OM service. This allows clients to discover - the address of the OM. - - - - ozone.om.handler.count.key - 20 - OM, PERFORMANCE - - The number of RPC handler threads for OM service endpoints. - - - - ozone.om.http-address - 0.0.0.0:9874 - OM, MANAGEMENT - - The address and the base port where the OM web UI will listen on. - - If the port is 0, then the server will start on a free port. However, it - is best to specify a well-known port, so it is easy to connect and see - the OM management UI. - - - - ozone.om.http-bind-host - 0.0.0.0 - OM, MANAGEMENT - - The actual address the OM web server will bind to. If this optional - the address is set, it overrides only the hostname portion of - ozone.om.http-address. - - - - ozone.om.http.enabled - true - OM, MANAGEMENT - - Property to enable or disable OM web user interface. - - - - ozone.om.https-address - 0.0.0.0:9875 - OM, MANAGEMENT, SECURITY - - The address and the base port where the OM web UI will listen - on using HTTPS. - If the port is 0 then the server will start on a free port. - - - - ozone.om.https-bind-host - 0.0.0.0 - OM, MANAGEMENT, SECURITY - - The actual address the OM web server will bind to using HTTPS. - If this optional address is set, it overrides only the hostname portion of - ozone.om.https-address. - - - - ozone.om.keytab.file - - OM, SECURITY - - The keytab file for Kerberos authentication in OM. - - - - ozone.om.db.cache.size.mb - 128 - OM, PERFORMANCE - - The size of OM DB cache in MB that used for caching files. - This value is set to an abnormally low value in the default configuration. - That is to make unit testing easy. Generally, this value should be set to - something like 16GB or more, if you intend to use Ozone at scale. - - A large value for this key allows a proportionally larger amount of OM - metadata to be cached in memory. This makes OM operations faster. - - - - ozone.om.user.max.volume - 1024 - OM, MANAGEMENT - - The maximum number of volumes a user can have on a cluster.Increasing or - decreasing this number has no real impact on ozone cluster. This is - defined only for operational purposes. Only an administrator can create a - volume, once a volume is created there are no restrictions on the number - of buckets or keys inside each bucket a user can create. - - - - ozone.om.db.dirs - - OZONE, OM, STORAGE, PERFORMANCE - - Directory where the OzoneManager stores its metadata. This should - be specified as a single directory. If the directory does not - exist then the OM will attempt to create it. - - If undefined, then the OM will log a warning and fallback to - ozone.metadata.dirs. This fallback approach is not recommended for - production environments. - - - - ozone.metadata.dirs - - OZONE, OM, SCM, CONTAINER, STORAGE, REQUIRED - - This setting is the fallback location for SCM, OM, Recon and DataNodes - to store their metadata. This setting may be used only in test/PoC - clusters to simplify configuration. - - For production clusters or any time you care about performance, it is - recommended that ozone.om.db.dirs, ozone.scm.db.dirs and - dfs.container.ratis.datanode.storage.dir be configured separately. - - - - ozone.metastore.impl - RocksDB - OZONE, OM, SCM, CONTAINER, STORAGE - - Ozone metadata store implementation. Ozone metadata are well - distributed to multiple services such as ozoneManager, scm. They are stored in - some local key-value databases. This property determines which database - library to use. Supported value is either LevelDB or RocksDB. - - - - - ozone.metastore.rocksdb.statistics - OFF - OZONE, OM, SCM, STORAGE, PERFORMANCE - - The statistics level of the rocksdb store. If you use any value from - org.rocksdb.StatsLevel (eg. ALL or EXCEPT_DETAILED_TIMERS), the rocksdb - statistics will be exposed over JMX bean with the choosed setting. Set - it to OFF to not initialize rocksdb statistics at all. Please note that - collection of statistics could have 5-10% performance penalty. - Check the rocksdb documentation for more details. - - - - ozone.scm.db.dirs - - OZONE, SCM, STORAGE, PERFORMANCE - - Directory where the StorageContainerManager stores its metadata. - This should be specified as a single directory. If the directory - does not exist then the SCM will attempt to create it. - - If undefined, then the SCM will log a warning and fallback to - ozone.metadata.dirs. This fallback approach is not recommended for - production environments. - - - - ozone.scm.block.client.address - - OZONE, SCM - The address of the Ozone SCM block client service. If not - defined value of ozone.scm.client.address is used. - - - - ozone.scm.block.client.bind.host - 0.0.0.0 - OZONE, SCM - - The hostname or IP address used by the SCM block client - endpoint to bind. - - - - ozone.scm.block.client.port - 9863 - OZONE, SCM - - The port number of the Ozone SCM block client service. - - - - ozone.scm.block.deletion.max.retry - 4096 - OZONE, SCM - - SCM wraps up many blocks in a deletion transaction and sends that to data - node for physical deletion periodically. This property determines how many - times SCM is going to retry sending a deletion operation to the data node. - - - - ozone.scm.block.size - 256MB - OZONE, SCM - - The default size of a scm block. This is maps to the default - Ozone block size. - - - - ozone.scm.chunk.size - 16MB - OZONE, SCM, CONTAINER, PERFORMANCE - - The chunk size for reading/writing chunk operations in bytes. - - The chunk size defaults to 8MB. If the value configured is more than the - maximum size (16MB), it will be reset to the maximum size. This maps to - the network packet sizes and file write operations in the client to - datanode protocol. - - - - ozone.scm.client.address - - OZONE, SCM, REQUIRED - - The address of the Ozone SCM client service. This is a required setting. - - It is a string in the host:port format. The port number is optional - and defaults to 9860. - - - - ozone.scm.client.bind.host - 0.0.0.0 - OZONE, SCM, MANAGEMENT - The hostname or IP address used by the SCM client endpoint to - bind. - This setting is used by the SCM only and never used by clients. - - The setting can be useful in multi-homed setups to restrict the - availability of the SCM client service to a specific interface. - - The default is appropriate for most clusters. - - - - ozone.scm.client.port - 9860 - OZONE, SCM, MANAGEMENT - The port number of the Ozone SCM client service. - - - ozone.scm.keyvalue.container.deletion-choosing.policy - - org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy - - OZONE, MANAGEMENT - - The policy used for choosing desired keyvalue containers for block deletion. - Datanode selects some containers to process block deletion - in a certain interval defined by ozone.block.deleting.service.interval. - The number of containers to process in each interval is defined - by ozone.block.deleting.container.limit.per.interval. This property is - used to configure the policy applied while selecting containers. - There are two policies supporting now: - RandomContainerDeletionChoosingPolicy and - TopNOrderedContainerDeletionChoosingPolicy. - org.apache.hadoop.ozone.container.common.impl.RandomContainerDeletionChoosingPolicy - implements a simply random policy that to return a random list of - containers. - org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy - implements a policy that choosing top count number of containers in a - pending-deletion-blocks's num - based descending order. - - - - ozone.scm.container.placement.impl - - org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom - - OZONE, MANAGEMENT - - The full name of class which implements org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy. - The class decides which datanode will be used to host the container replica. If not set, - org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom will be used as default value. - - - - ozone.scm.pipeline.owner.container.count - 3 - OZONE, SCM, PIPELINE - Number of containers per owner in a pipeline. - - - - ozone.scm.container.size - 5GB - OZONE, PERFORMANCE, MANAGEMENT - - Default container size used by Ozone. - There are two considerations while picking this number. The speed at which - a container can be replicated, determined by the network speed and the - metadata that each container generates. So selecting a large number - creates less SCM metadata, but recovery time will be more. 5GB is a number - that maps to quick replication times in gigabit networks, but still - balances the amount of metadata. - - - - ozone.scm.datanode.address - - OZONE, MANAGEMENT - - The address of the Ozone SCM service used for internal - communication between the DataNodes and the SCM. - - It is a string in the host:port format. The port number is optional - and defaults to 9861. - - This setting is optional. If unspecified then the hostname portion - is picked from the ozone.scm.client.address setting and the - default service port of 9861 is chosen. - - - - ozone.scm.datanode.bind.host - - OZONE, MANAGEMENT - - The hostname or IP address used by the SCM service endpoint to - bind. - - - - ozone.scm.datanode.id.dir - - OZONE, MANAGEMENT - The path that datanodes will use to store the datanode ID. - If this value is not set, then datanode ID is created under the - metadata directory. - - - - ozone.scm.datanode.port - 9861 - OZONE, MANAGEMENT - - The port number of the Ozone SCM service. - - - - ozone.scm.db.cache.size.mb - 128 - OZONE, PERFORMANCE - SCM keeps track of the Containers in the cluster. This DB holds - the container metadata. This value is set to a small value to make the - unit - testing runs smooth. In production, we recommend a value of 16GB or - higher. This allows SCM to avoid disk I/O's while looking up the container - location. - - - - ozone.scm.dead.node.interval - 10m - OZONE, MANAGEMENT - - The interval between heartbeats before a node is tagged as dead. - - - - ozone.scm.handler.count.key - 10 - OZONE, MANAGEMENT, PERFORMANCE - - The number of RPC handler threads for each SCM service - endpoint. - - The default is appropriate for small clusters (tens of nodes). - - Set a value that is appropriate for the cluster size. Generally, HDFS - recommends RPC handler count is set to 20 * log2(Cluster Size) with an - upper limit of 200. However, SCM will not have the same amount of - traffic as Namenode, so a value much smaller than that will work well too. - - - - hdds.heartbeat.interval - 30s - OZONE, MANAGEMENT - - The heartbeat interval from a data node to SCM. Yes, - it is not three but 30, since most data nodes will heart beating via Ratis - heartbeats. If a client is not able to talk to a data node, it will notify - OM/SCM eventually. So a 30 second HB seems to work. This assumes that - replication strategy used is Ratis if not, this value should be set to - something smaller like 3 seconds. - ozone.scm.pipeline.close.timeout should also be adjusted accordingly, - if the default value for this config is not used. - - - - ozone.scm.heartbeat.log.warn.interval.count - 10 - OZONE, MANAGEMENT - - Defines how frequently we will log the missing of a heartbeat to SCM. - For example in the default case, we will write a warning message for each - ten consecutive heartbeats that we miss to SCM. This helps in reducing - clutter in a data node log, but trade off is that logs will have less of - this statement. - - - - ozone.scm.heartbeat.rpc-timeout - 1s - OZONE, MANAGEMENT - - Timeout value for the RPC from Datanode to SCM. - - - - ozone.scm.heartbeat.thread.interval - 3s - OZONE, MANAGEMENT - - When a heartbeat from the data node arrives on SCM, It is queued for - processing with the time stamp of when the heartbeat arrived. There is a - heartbeat processing thread inside SCM that runs at a specified interval. - This value controls how frequently this thread is run. - - There are some assumptions build into SCM such as this value should allow - the heartbeat processing thread to run at least three times more - frequently than heartbeats and at least five times more than stale node - detection time. If you specify a wrong value, SCM will gracefully refuse - to run. For more info look at the node manager tests in SCM. - - In short, you don't need to change this. - - - - ozone.scm.http-address - 0.0.0.0:9876 - OZONE, MANAGEMENT - - The address and the base port where the SCM web ui will listen on. - - If the port is 0 then the server will start on a free port. - - - - ozone.scm.http-bind-host - 0.0.0.0 - OZONE, MANAGEMENT - - The actual address the SCM web server will bind to. If this - optional address is set, it overrides only the hostname portion of - ozone.scm.http-address. - - - - ozone.scm.http.enabled - true - OZONE, MANAGEMENT - - Property to enable or disable SCM web ui. - - - - ozone.scm.https-address - 0.0.0.0:9877 - OZONE, MANAGEMENT - - The address and the base port where the SCM web UI will listen - on using HTTPS. - - If the port is 0 then the server will start on a free port. - - - - ozone.scm.https-bind-host - 0.0.0.0 - OZONE, MANAGEMENT - - The actual address the SCM web server will bind to using HTTPS. - If this optional address is set, it overrides only the hostname portion of - ozone.scm.https-address. - - - - ozone.scm.names - - OZONE, REQUIRED - - The value of this property is a set of DNS | DNS:PORT | IP - Address | IP:PORT. Written as a comma separated string. e.g. scm1, - scm2:8020, 7.7.7.7:7777. - This property allows datanodes to discover where SCM is, so that - datanodes can send heartbeat to SCM. - - - - ozone.scm.stale.node.interval - 5m - OZONE, MANAGEMENT - - The interval for stale node flagging. Please - see ozone.scm.heartbeat.thread.interval before changing this value. - - - - ozone.trace.enabled - false - OZONE, DEBUG - - Setting this flag to true dumps the HTTP request/ response in - the logs. Very useful when debugging REST protocol. - - - - - ozone.scm.container.creation.lease.timeout - 60s - OZONE, SCM - - Container creation timeout in milliseconds to be used by SCM. When - BEGIN_CREATE event happens the container is moved from ALLOCATED to - CREATING state, SCM will now wait for the configured amount of time - to get COMPLETE_CREATE event if it doesn't receive it will move the - container to DELETING. - - - - - ozone.key.preallocation.max.blocks - 64 - OZONE, OM, PERFORMANCE - - While allocating blocks from OM, this configuration limits the maximum - number of blocks being allocated. This configuration ensures that the - allocated block response do not exceed rpc payload limit. If client needs - more space for the write, separate block allocation requests will be made. - - - - - ozone.client.list.cache - 1000 - OZONE, PERFORMANCE - - Configuration property to configure the cache size of client list calls. - - - - - ozone.replication - 3 - OZONE, CLIENT - - Default replication value. The actual number of replications can be - specified when writing the key. The default is used if replication - is not specified. Supported values: 1 and 3. - - - - - ozone.replication.type - RATIS - OZONE, CLIENT - - Default replication type to be used while writing key into ozone. The - value can be specified when writing the key, default is used when - nothing is specified. Supported values: RATIS, STAND_ALONE and CHAINED. - - - - hdds.container.close.threshold - 0.9f - OZONE, DATANODE - - This determines the threshold to be used for closing a container. - When the container used percentage reaches this threshold, - the container will be closed. Value should be a positive, non-zero - percentage in float notation (X.Yf), with 1.0f meaning 100%. - - - - ozone.rest.client.http.connection.max - 100 - OZONE, CLIENT - - This defines the overall connection limit for the connection pool used in - RestClient. - - - - ozone.rest.client.http.connection.per-route.max - 20 - OZONE, CLIENT - - This defines the connection limit per one HTTP route/host. Total max - connection is limited by ozone.rest.client.http.connection.max property. - - - - - ozone.open.key.cleanup.service.interval.seconds - 86400 - OZONE, OM, PERFORMANCE - - A background job periodically checks open key entries and delete the expired ones. This entry controls the - interval of this cleanup check. - - - - - ozone.open.key.expire.threshold - 86400 - OZONE, OM, PERFORMANCE - - Controls how long an open key operation is considered active. Specifically, if a key - has been open longer than the value of this config entry, that open key is considered as - expired (e.g. due to client crash). Default to 24 hours. - - - - - hadoop.tags.custom - OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM, - CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,RECON - - - - ozone.tags.system - OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM, - CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY,TOKEN,TLS,RECON - - - - - hdds.rest.rest-csrf.enabled - false - - If true, then enables Object Store REST server protection against - cross-site request forgery (CSRF). - - - - - hdds.rest.http-address - 0.0.0.0:9880 - The http address of Object Store REST server inside the - datanode. - - - - - hdds.rest.netty.high.watermark - 65535 - - High watermark configuration to Netty for Object Store REST server. - - - - - hdds.rest.netty.low.watermark - 32768 - - Low watermark configuration to Netty for Object Store REST server. - - - - - hdds.datanode.plugins - - - Comma-separated list of HDDS datanode plug-ins to be activated when - HDDS service starts as part of datanode. - - - - hdds.datanode.storage.utilization.warning.threshold - 0.75 - OZONE, SCM, MANAGEMENT - - If a datanode overall storage utilization exceeds more than this - value, a warning will be logged while processing the nodeReport in SCM. - - - - - hdds.datanode.storage.utilization.critical.threshold - 0.95 - OZONE, SCM, MANAGEMENT - - If a datanode overall storage utilization exceeds more than this - value, the datanode will be marked out of space. - - - - - hdds.command.status.report.interval - 30s - OZONE, DATANODE, MANAGEMENT - Time interval of the datanode to send status of commands - executed since last report. Unit could be defined with - postfix (ns,ms,s,m,h,d) - - - ozone.scm.pipeline.destroy.timeout - 66s - OZONE, SCM, PIPELINE - - Once a pipeline is closed, SCM should wait for the above configured time - before destroying a pipeline. - - - - ozone.scm.pipeline.creation.interval - 120s - OZONE, SCM, PIPELINE - - SCM schedules a fixed interval job using the configured interval to - create pipelines. - - - - - hdds.scm.safemode.threshold.pct - 0.99 - HDDS,SCM,OPERATION - % of containers which should have at least one - reported replica before SCM comes out of safe mode. - - - - - hdds.scm.wait.time.after.safemode.exit - 5m - HDDS,SCM,OPERATION - After exiting safemode, wait for configured interval of - time to start replication monitor and cleanup activities of unhealthy - pipelines. - - - - - hdds.scm.safemode.enabled - true - HDDS,SCM,OPERATION - Boolean value to enable or disable SCM safe mode. - - - - - hdds.scm.safemode.min.datanode - 1 - HDDS,SCM,OPERATION - Minimum DataNodes which should be registered to get SCM out of - safe mode. - - - - - hdds.scm.safemode.pipeline-availability.check - false - HDDS,SCM,OPERATION - - Boolean value to enable pipeline availability check during SCM safe mode. - - - - - hdds.scm.safemode.healthy.pipelie.pct - 0.10 - HDDS,SCM,OPERATION - - Percentage of healthy pipelines, where all 3 datanodes are reported in the - pipeline. - - - - - hdds.scm.safemode.atleast.one.node.reported.pipeline.pct - 0.90 - HDDS,SCM,OPERATION - - Percentage of pipelines, where at least one datanode is reported in the - pipeline. - - - - - hdds.container.scrub.enabled - false - DATANODE - - Boolean value to enable data and metadata scrubbing in the containers - running on each datanode. - - - - - hdds.container.action.max.limit - 20 - DATANODE - - Maximum number of Container Actions sent by the datanode to SCM in a - single heartbeat. - - - - - hdds.pipeline.action.max.limit - 20 - DATANODE - - Maximum number of Pipeline Actions sent by the datanode to SCM in a - single heartbeat. - - - - hdds.scm.watcher.timeout - 10m - OZONE, SCM, MANAGEMENT - - Timeout for the watchers of the HDDS SCM CommandWatchers. After this - duration the Copy/Delete container commands will be sent again to the - datanode unless the datanode confirms the completion. - - - - - hdds.db.profile - DISK - OZONE, OM, PERFORMANCE - This property allows user to pick a configuration - that tunes the RocksDB settings for the hardware it is running - on. Right now, we have SSD and DISK as profile options. - - - - hdds.datanode.replication.work.dir - DATANODE - Temporary which is used during the container replication - betweeen datanodes. Should have enough space to store multiple container - (in compressed format), but doesn't require fast io access such as SSD. - - - - - hdds.lock.max.concurrency - 100 - HDDS - Locks in HDDS/Ozone uses object pool to maintain active locks - in the system, this property defines the max limit for the locks that - will be maintained in the pool. - - - - - ozone.s3g.authentication.kerberos.principal - - OZONE, S3GATEWAY - The server principal used by Ozone S3Gateway server. This is - typically set to - HTTP/_HOST@REALM.TLD The SPNEGO server principal begins with the prefix - HTTP/ by convention. - - - - ozone.s3g.domain.name - - OZONE, S3GATEWAY - List of Ozone S3Gateway domain names. If multiple - domain names to be provided, they should be a "," seperated. - This parameter is only required when virtual host style pattern is - followed. - - - - ozone.s3g.http-address - 0.0.0.0:9878 - OZONE, S3GATEWAY - The address and the base port where the Ozone S3Gateway - Server will - listen on. - - - - ozone.s3g.http-bind-host - 0.0.0.0 - OZONE, S3GATEWAY - The actual address the HTTP server will bind to. If this optional address - is set, it overrides only the hostname portion of ozone.s3g.http-address. - This is useful for making the Ozone S3Gateway HTTP server listen on all - interfaces by setting it to 0.0.0.0. - - - - ozone.s3g.http.enabled - true - OZONE, S3GATEWAY - The boolean which enables the Ozone S3Gateway server - . - - - - ozone.s3g.https-address - - OZONE, S3GATEWAY - Ozone S3Gateway serverHTTPS server address and port - . - - - - ozone.s3g.https-bind-host - - OZONE, S3GATEWAY - The actual address the HTTPS server will bind to. If this optional address - is set, it overrides only the hostname portion of ozone.s3g.https-address. - This is useful for making the Ozone S3Gateway HTTPS server listen on all - interfaces by setting it to 0.0.0.0. - - - - ozone.s3g.keytab.file - - OZONE, S3GATEWAY - The keytab file used by the S3Gateway server to login as its - service principal. - - - - ozone.om.save.metrics.interval - 5m - OZONE, OM - Time interval used to store the omMetrics in to a - file. Background thread periodically stores the OM metrics in to a - file. Unit could be defined with postfix (ns,ms,s,m,h,d) - - - - ozone.security.enabled - false - OZONE, SECURITY - True if security is enabled for ozone. When this property is - true, hadoop.security.authentication should be Kerberos. - - - - - ozone.client.checksum.type - CRC32 - OZONE, CLIENT, MANAGEMENT - The checksum type [NONE/ CRC32/ CRC32C/ SHA256/ MD5] determines - which algorithm would be used to compute checksum for chunk data. - Default checksum type is SHA256. - - - - - ozone.client.bytes.per.checksum - 1MB - OZONE, CLIENT, MANAGEMENT - Checksum will be computed for every bytes per checksum number - of bytes and stored sequentially. The minimum value for this config is - 256KB. - - - - - ozone.client.verify.checksum - true - OZONE, CLIENT, MANAGEMENT - - Ozone client to verify checksum of the checksum blocksize data. - - - - - ozone.om.lock.fair - false - If this is true, the Ozone Manager lock will be used in Fair - mode, which will schedule threads in the order received/queued. If this is - false, uses non-fair ordering. See - java.util.concurrent.locks.ReentrantReadWriteLock - for more information on fair/non-fair locks. - - - - - ozone.om.ratis.enable - false - OZONE, OM, RATIS, MANAGEMENT - Property to enable or disable Ratis server on OM. - Please note - this is a temporary property to disable OM Ratis server. - - - - - ozone.om.ratis.port - 9872 - OZONE, OM, RATIS - - The port number of the OzoneManager's Ratis server. - - - - - ozone.om.ratis.rpc.type - GRPC - OZONE, OM, RATIS, MANAGEMENT - Ratis supports different kinds of transports like netty, GRPC, - Hadoop RPC etc. This picks one of those for this cluster. - - - - - ozone.om.ratis.storage.dir - - OZONE, OM, STORAGE, MANAGEMENT, RATIS - This directory is used for storing OM's Ratis metadata like - logs. If this is not set then default metadata dirs is used. A warning - will be logged if this not set. Ideally, this should be mapped to a - fast disk like an SSD. - If undefined, OM ratis storage dir will fallback to ozone.metadata.dirs. - This fallback approach is not recommended for production environments. - - - - - ozone.om.ratis.segment.size - 16KB - OZONE, OM, RATIS, PERFORMANCE - The size of the raft segment used by Apache Ratis on OM. - (16 KB by default) - - - - - ozone.om.ratis.segment.preallocated.size - 16KB - OZONE, OM, RATIS, PERFORMANCE - The size of the buffer which is preallocated for raft segment - used by Apache Ratis on OM.(16 KB by default) - - - - - ozone.om.ratis.log.appender.queue.num-elements - 1024 - OZONE, DEBUG, OM, RATIS - Number of operation pending with Raft's Log Worker. - - - - ozone.om.ratis.log.appender.queue.byte-limit - 32MB - OZONE, DEBUG, OM, RATIS - Byte limit for Raft's Log Worker queue. - - - - ozone.om.ratis.log.purge.gap - 1000000 - OZONE, OM, RATIS - The minimum gap between log indices for Raft server to purge - its log segments after taking snapshot. - - - - - ozone.om.ratis.snapshot.auto.trigger.threshold - 400000 - OZONE, DEBUG, OM, RATIS - The log index threshold after ratis will auto trigger - snapshot on the OM state machine. - - - - - ozone.om.ratis.server.request.timeout - 3s - OZONE, OM, RATIS, MANAGEMENT - The timeout duration for OM's ratis server request . - - - - ozone.om.ratis.server.retry.cache.timeout - 600000ms - OZONE, OM, RATIS, MANAGEMENT - Retry Cache entry timeout for OM's ratis server. - - - - ozone.om.ratis.minimum.timeout - 1s - OZONE, OM, RATIS, MANAGEMENT - The minimum timeout duration for OM's Ratis server rpc. - - - - - ozone.om.ratis.client.request.timeout.duration - 3s - OZONE, OM, RATIS, MANAGEMENT - The timeout duration for OM Ratis client request. - - - - ozone.om.ratis.client.request.max.retries - 180 - OZONE, OM, RATIS, MANAGEMENT - Number of retries for OM client request. - - - ozone.om.ratis.client.request.retry.interval - 100ms - OZONE, OM, RATIS, MANAGEMENT - Interval between successive retries for a OM client request. - - - - - ozone.om.leader.election.minimum.timeout.duration - 1s - OZONE, OM, RATIS, MANAGEMENT - The minimum timeout duration for OM ratis leader election. - Default is 1s. - - - - - ozone.om.ratis.server.failure.timeout.duration - 120s - OZONE, OM, RATIS, MANAGEMENT - The timeout duration for ratis server failure detection, - once the threshold has reached, the ratis state machine will be informed - about the failure in the ratis ring. - - - - - ozone.om.ratis.server.role.check.interval - 15s - OZONE, OM, RATIS, MANAGEMENT - The interval between OM leader performing a role - check on its ratis server. Ratis server informs OM if it - loses the leader role. The scheduled check is an secondary - check to ensure that the leader role is updated periodically - . - - - - ozone.om.ratis.snapshot.dir - - OZONE, OM, STORAGE, MANAGEMENT, RATIS - This directory is used for storing OM's snapshot - related files like the ratisSnapshotIndex and DB checkpoint from leader - OM. - If undefined, OM snapshot dir will fallback to ozone.om.ratis.storage.dir. - This fallback approach is not recommended for production environments. - - - - ozone.om.snapshot.provider.socket.timeout - 5000s - OZONE, OM, HA, MANAGEMENT - - Socket timeout for HTTP call made by OM Snapshot Provider to request - OM snapshot from OM Leader. - - - - ozone.om.snapshot.provider.connection.timeout - 5000s - OZONE, OM, HA, MANAGEMENT - - Connection timeout for HTTP call made by OM Snapshot Provider to request - OM snapshot from OM Leader. - - - - ozone.om.snapshot.provider.request.timeout - 5000ms - OZONE, OM, HA, MANAGEMENT - - Connection request timeout for HTTP call made by OM Snapshot Provider to - request OM snapshot from OM Leader. - - - - - ozone.acl.authorizer.class - org.apache.hadoop.ozone.security.acl.OzoneAccessAuthorizer - OZONE, SECURITY, ACL - Acl authorizer for Ozone. - - - - ozone.acl.enabled - false - OZONE, SECURITY, ACL - Key to enable/disable ozone acls. - - - hdds.scm.kerberos.keytab.file - - OZONE, SECURITY - The keytab file used by each SCM daemon to login as its - service principal. The principal name is configured with - hdds.scm.kerberos.principal. - - - - hdds.scm.kerberos.principal - - OZONE, SECURITY - The SCM service principal. Ex scm/_HOST@REALM.COM - - - - ozone.om.kerberos.keytab.file - - OZONE, SECURITY - The keytab file used by OzoneManager daemon to login as its - service principal. The principal name is configured with - ozone.om.kerberos.principal. - - - - ozone.om.kerberos.principal - - OZONE, SECURITY - The OzoneManager service principal. Ex om/_HOST@REALM.COM - - - - hdds.scm.http.kerberos.principal - HTTP/_HOST@EXAMPLE.COM - - - hdds.scm.http.kerberos.keytab - /etc/security/keytabs/HTTP.keytab - - - - ozone.om.http.kerberos.principal - HTTP/_HOST@EXAMPLE.COM - - OzoneManager http server kerberos principal. - - - - ozone.om.http.kerberos.keytab - /etc/security/keytabs/HTTP.keytab - - OzoneManager http server kerberos keytab. - - - - hdds.key.len - 2048 - SCM, HDDS, X509, SECURITY - - SCM CA key length. This is an algorithm-specific metric, such as modulus length, specified in number of bits. - - - - hdds.key.dir.name - keys - SCM, HDDS, X509, SECURITY - - Directory to store public/private key for SCM CA. This is relative to ozone/hdds meteadata dir. - - - - hdds.block.token.expiry.time - 1d - OZONE, HDDS, SECURITY, TOKEN - - Default value for expiry time of block token. This - setting supports multiple time unit suffixes as described in - dfs.heartbeat.interval. If no suffix is specified, then milliseconds is - assumed. - - - - - hdds.block.token.enabled - false - OZONE, HDDS, SECURITY, TOKEN - True if block tokens are enabled, else false. - - - hdds.x509.file.name - certificate.crt - OZONE, HDDS, SECURITY - Certificate file name. - - - hdds.grpc.tls.provider - OPENSSL - OZONE, HDDS, SECURITY, TLS - HDDS GRPC server TLS provider. - - - hdds.grpc.tls.enabled - false - OZONE, HDDS, SECURITY, TLS - If HDDS GRPC server TLS is enabled. - - - hdds.x509.default.duration - P365D - OZONE, HDDS, SECURITY - Default duration for which x509 certificates issued by SCM are - valid. The formats accepted are based on the ISO-8601 duration format - PnDTnHnMn.nS - - - hdds.x509.dir.name - certs - OZONE, HDDS, SECURITY - X509 certificate directory name. - - - hdds.x509.max.duration - P1865D - OZONE, HDDS, SECURITY - Max time for which certificate issued by SCM CA are valid. - . The formats accepted are based on the ISO-8601 duration format - PnDTnHnMn.nS - - - hdds.x509.signature.algorithm - SHA256withRSA - OZONE, HDDS, SECURITY - X509 signature certificate. - - - ozone.scm.security.handler.count.key - 2 - OZONE, HDDS, SECURITY - Threads configured for SCMSecurityProtocolServer. - - - ozone.scm.security.service.address - - OZONE, HDDS, SECURITY - Address of SCMSecurityProtocolServer. - - - ozone.scm.security.service.bind.host - 0.0.0.0 - OZONE, HDDS, SECURITY - SCM security server host. - - - ozone.scm.security.service.port - 9961 - OZONE, HDDS, SECURITY - SCM security server port. - - - - hdds.metadata.dir - - X509, SECURITY - - Absolute path to HDDS metadata dir. - - - - hdds.priv.key.file.name - private.pem - X509, SECURITY - - Name of file which stores private key generated for SCM CA. - - - - hdds.public.key.file.name - public.pem - X509, SECURITY - - Name of file which stores public key generated for SCM CA. - - - - ozone.manager.delegation.remover.scan.interval - 3600000 - - Time interval after which ozone secret manger scans for expired - delegation token. - - - - ozone.manager.delegation.token.renew-interval - 1d - - Default time interval after which ozone delegation token will - require renewal before any further use. - - - - ozone.manager.delegation.token.max-lifetime - 7d - - Default max time interval after which ozone delegation token will - not be renewed. - - - - - ozone.fs.isolated-classloader - - OZONE, OZONEFS - - Enable it for older hadoops to separate the classloading of all the - Ozone classes. With 'true' value, ozonefs can be used with older - hadoop versions as the hadoop3/ozone related classes are loaded by - an isolated classloader. - - Default depends from the used jar. true for ozone-filesystem-lib-legacy - jar and false for the ozone-filesystem-lib-current.jar - - - - ozone.manager.db.checkpoint.transfer.bandwidthPerSec - 0 - OZONE - - Maximum bandwidth used for Ozone Manager DB checkpoint download through - the servlet. - - - - - ozone.freon.http-address - 0.0.0.0:9884 - OZONE, MANAGEMENT - - The address and the base port where the FREON web ui will listen on. - If the port is 0 then the server will start on a free port. - - - - ozone.freon.http-bind-host - 0.0.0.0 - OZONE, MANAGEMENT - - The actual address the Freon web server will bind to. If this - optional address is set, it overrides only the hostname portion of - ozone.freon.http-address. - - - - ozone.freon.http.enabled - true - OZONE, MANAGEMENT - - Property to enable or disable FREON web ui. - - - - ozone.freon.https-address - 0.0.0.0:9885 - OZONE, MANAGEMENT - - The address and the base port where the Freon web server will listen - on using HTTPS. - If the port is 0 then the server will start on a free port. - - - - ozone.freon.https-bind-host - 0.0.0.0 - OZONE, MANAGEMENT - - The actual address the Freon web server will bind to using HTTPS. - If this optional address is set, it overrides only the hostname portion of - ozone.freon.http-address. - - - - ozone.freon.http.kerberos.principal - HTTP/_HOST@EXAMPLE.COM - SECURITY - - Security principal used by freon. - - - - ozone.freon.http.kerberos.keytab - /etc/security/keytabs/HTTP.keytab - SECURITY - - Keytab used by Freon. - - - - hdds.security.client.datanode.container.protocol.acl - * - SECURITY - - Comma separated list of users and groups allowed to access - client datanode container protocol. - - - - hdds.security.client.scm.block.protocol.acl - * - SECURITY - - Comma separated list of users and groups allowed to access - client scm block protocol. - - - - hdds.security.client.scm.certificate.protocol.acl - * - SECURITY - - Comma separated list of users and groups allowed to access - client scm certificate protocol. - - - - hdds.security.client.scm.container.protocol.acl - * - SECURITY - - Comma separated list of users and groups allowed to access - client scm container protocol. - - - - ozone.om.security.client.protocol.acl - * - SECURITY - - Comma separated list of users and groups allowed to access - client ozone manager protocol. - - - - - hdds.datanode.http.kerberos.principal - HTTP/_HOST@EXAMPLE.COM - HDDS, SECURITY, MANAGEMENT - - The kerberos principal for the datanode http server. - - - - hdds.datanode.http.kerberos.keytab - /etc/security/keytabs/HTTP.keytab - HDDS, SECURITY, MANAGEMENT - - The kerberos keytab file for datanode http server - - - - hdds.datanode.http-address - 0.0.0.0:9882 - HDDS, MANAGEMENT - - The address and the base port where the Datanode web ui will listen on. - If the port is 0 then the server will start on a free port. - - - - hdds.datanode.http-bind-host - 0.0.0.0 - HDDS, MANAGEMENT - - The actual address the Datanode web server will bind to. If this - optional address is set, it overrides only the hostname portion of - hdds.datanode.http-address. - - - - hdds.datanode.http.enabled - true - HDDS, MANAGEMENT - - Property to enable or disable Datanode web ui. - - - - hdds.datanode.https-address - 0.0.0.0:9883 - HDDS, MANAGEMENT, SECURITY - - The address and the base port where the Datanode web UI will listen - on using HTTPS. - - If the port is 0 then the server will start on a free port. - - - - hdds.datanode.https-bind-host - 0.0.0.0 - HDDS, MANAGEMENT, SECURITY - - The actual address the Datanode web server will bind to using HTTPS. - If this optional address is set, it overrides only the hostname portion of - hdds.datanode.http-address. - - - - ozone.client.retry.max.attempts - 10 - - Max retry attempts for Ozone RpcClient talking to OzoneManagers. - - - - ozone.client.failover.max.attempts - 15 - - Expert only. The number of client failover attempts that should be - made before the failover is considered failed. - - - - ozone.client.failover.sleep.base.millis - 500 - - Expert only. The time to wait, in milliseconds, between failover - attempts increases exponentially as a function of the number of - attempts made so far, with a random factor of +/- 50%. This option - specifies the base value used in the failover calculation. The - first failover will retry immediately. The 2nd failover attempt - will delay at least ozone.client.failover.sleep.base.millis - milliseconds. And so on. - - - - ozone.client.failover.sleep.max.millis - 15000 - - Expert only. The time to wait, in milliseconds, between failover - attempts increases exponentially as a function of the number of - attempts made so far, with a random factor of +/- 50%. This option - specifies the maximum value to wait between failovers. - Specifically, the time between two failover attempts will not - exceed +/- 50% of ozone.client.failover.sleep.max.millis - milliseconds. - - - - ozone.recon.http.enabled - true - RECON, MANAGEMENT - - Property to enable or disable Recon web user interface. - - - - ozone.recon.http-address - 0.0.0.0:9888 - RECON, MANAGEMENT - - The address and the base port where the Recon web UI will listen on. - - If the port is 0, then the server will start on a free port. However, it - is best to specify a well-known port, so it is easy to connect and see - the Recon management UI. - - - - ozone.recon.http-bind-host - 0.0.0.0 - RECON, MANAGEMENT - - The actual address the Recon server will bind to. If this optional - the address is set, it overrides only the hostname portion of - ozone.recon.http-address. - - - - ozone.recon.https-bind-host - 0.0.0.0 - RECON, MANAGEMENT, SECURITY - - The actual address the Recon web server will bind to using HTTPS. - If this optional address is set, it overrides only the hostname portion of - ozone.recon.https-address. - - - - ozone.recon.https-address - 0.0.0.0:9889 - RECON, MANAGEMENT, SECURITY - - The address and the base port where the Recon web UI will listen - on using HTTPS. If the port is 0 then the server will start on a free - port. - - - - ozone.recon.keytab.file - - RECON, SECURITY - - The keytab file for Kerberos authentication in Recon. - - - - ozone.recon.authentication.kerberos.principal - - RECON - The server principal used by Ozone Recon server. This is - typically set to HTTP/_HOST@REALM.TLD The SPNEGO server principal - begins with the prefix HTTP/ by convention. - - - - ozone.recon.container.db.cache.size.mb - 128 - RECON, PERFORMANCE - - The size of Recon DB cache in MB that used for caching files. - This value is set to an abnormally low value in the default configuration. - That is to make unit testing easy. Generally, this value should be set to - something like 16GB or more, if you intend to use Recon at scale. - - A large value for this key allows a proportionally larger amount of Recon - container DB to be cached in memory. This makes Recon Container-Key - operations faster. - - - - ozone.recon.db.dir - - OZONE, RECON, STORAGE, PERFORMANCE - - Directory where the Recon Server stores its metadata. This should - be specified as a single directory. If the directory does not - exist then the Recon will attempt to create it. - - If undefined, then the Recon will log a warning and fallback to - ozone.metadata.dirs. This fallback approach is not recommended for - production environments. - - - - ozone.scm.network.topology.schema.file - network-topology-default.xml - OZONE, MANAGEMENT - - The schema file defines the ozone network topology. We currently support - xml(default) and yaml format. Refer to the samples in the topology - awareness document for xml and yaml topology definition samples. - - - - ozone.network.topology.aware.read - false - OZONE, PERFORMANCE - - Whether to enable topology aware read to improve the read performance. - - - - ozone.recon.container.db.impl - RocksDB - OZONE, RECON, STORAGE - - Ozone Recon container DB store implementation.Supported value is either - LevelDB or RocksDB. - - - - ozone.recon.om.db.dir - - OZONE, RECON, STORAGE - - Directory where the Recon Server stores its OM snapshot DB. This should - be specified as a single directory. If the directory does not - exist then the Recon will attempt to create it. - - If undefined, then the Recon will log a warning and fallback to - ozone.metadata.dirs. This fallback approach is not recommended for - production environments. - - - - recon.om.connection.request.timeout - 5000 - OZONE, RECON, OM - - Connection request timeout in milliseconds for HTTP call made by Recon to - request OM DB snapshot. - - - - recon.om.connection.timeout - 5s - OZONE, RECON, OM - - Connection timeout for HTTP call in milliseconds made by Recon to request - OM snapshot. - - - - recon.om.socket.timeout - 5s - OZONE, RECON, OM - - Socket timeout in milliseconds for HTTP call made by Recon to request - OM snapshot. - - - - recon.om.snapshot.task.initial.delay - 1m - OZONE, RECON, OM - - Initial delay in MINUTES by Recon to request OM DB Snapshot. - - - - recon.om.snapshot.task.interval.delay - 10m - OZONE, RECON, OM - - Interval in MINUTES by Recon to request OM DB Snapshot. - - - - recon.om.snapshot.task.flush.param - false - OZONE, RECON, OM - - Request to flush the OM DB before taking checkpoint snapshot. - - - - hdds.tracing.enabled - true - OZONE, HDDS - - If enabled, tracing information is sent to tracing server. - - - - ozone.recon.sql.db.driver - org.sqlite.JDBC - OZONE, RECON - - Database driver class name available on the - Ozone Recon classpath. - - - - ozone.recon.sql.db.jdbc.url - jdbc:sqlite:/${ozone.recon.db.dir}/ozone_recon_sqlite.db - OZONE, RECON - - Ozone Recon SQL database jdbc url. - - - - ozone.recon.sql.db.username - - OZONE, RECON - - Ozone Recon SQL database username. - - - - ozone.recon.sql.db.password - - OZONE, RECON - - Ozone Recon database password. - - - - ozone.recon.sql.db.auto.commit - false - OZONE, RECON - - Sets the Ozone Recon database connection property of auto-commit to - true/false. - - - - ozone.recon.sql.db.conn.timeout - 30000 - OZONE, RECON - - Sets time in milliseconds before call to getConnection is timed out. - - - - ozone.recon.sql.db.conn.max.active - 1 - OZONE, RECON - - The max active connections to the SQL database. The default SQLite - database only allows single active connection, set this to a - reasonable value like 10, for external production database. - - - - ozone.recon.sql.db.conn.max.age - 1800 - OZONE, RECON - - Sets maximum time a connection can be active in seconds. - - - - ozone.recon.sql.db.conn.idle.max.age - 3600 - OZONE, RECON - - Sets maximum time to live for idle connection in seconds. - - - - ozone.recon.sql.db.conn.idle.test.period - 60 - OZONE, RECON - - This sets the time (in seconds), for a connection to remain idle before - sending a test query to the DB. This is useful to prevent a DB from - timing out connections on its end. - - - - ozone.recon.sql.db.conn.idle.test - SELECT 1 - OZONE, RECON - - The query to send to the DB to maintain keep-alives and test for dead - connections. - - - - ozone.recon.task.thread.count - 1 - OZONE, RECON - - The number of Recon Tasks that are waiting on updates from OM. - - - diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java deleted file mode 100644 index 75636106498..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds; - -import java.util.Optional; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Testing HddsUtils. - */ -public class TestHddsUtils { - - @Test - public void testGetHostName() { - Assert.assertEquals(Optional.of("localhost"), - HddsUtils.getHostName("localhost:1234")); - - Assert.assertEquals(Optional.of("localhost"), - HddsUtils.getHostName("localhost")); - - Assert.assertEquals(Optional.empty(), - HddsUtils.getHostName(":1234")); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java deleted file mode 100644 index f18fd5e50b6..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import java.util.concurrent.TimeUnit; - -/** - * Example configuration to test the configuration injection. - */ -@ConfigGroup(prefix = "ozone.scm.client") -public class SimpleConfiguration { - - private String clientAddress; - - private String bindHost; - - private boolean enabled; - - private int port = 1234; - - private long waitTime = 1; - - @Config(key = "address", defaultValue = "localhost", description = "Just " - + "for testing", tags = ConfigTag.MANAGEMENT) - public void setClientAddress(String clientAddress) { - this.clientAddress = clientAddress; - } - - @Config(key = "bind.host", defaultValue = "0.0.0.0", description = "Just " - + "for testing", tags = ConfigTag.MANAGEMENT) - public void setBindHost(String bindHost) { - this.bindHost = bindHost; - } - - @Config(key = "enabled", defaultValue = "true", description = "Just for " - + "testing", tags = ConfigTag.MANAGEMENT) - public void setEnabled(boolean enabled) { - this.enabled = enabled; - } - - @Config(key = "port", defaultValue = "9878", description = "Just for " - + "testing", tags = ConfigTag.MANAGEMENT) - public void setPort(int port) { - this.port = port; - } - - @Config(key = "wait", type = ConfigType.TIME, timeUnit = - TimeUnit.SECONDS, defaultValue = "10m", description = "Just for " - + "testing", tags = ConfigTag.MANAGEMENT) - public void setWaitTime(long waitTime) { - this.waitTime = waitTime; - } - - public String getClientAddress() { - return clientAddress; - } - - public String getBindHost() { - return bindHost; - } - - public boolean isEnabled() { - return enabled; - } - - public int getPort() { - return port; - } - - public long getWaitTime() { - return waitTime; - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java deleted file mode 100644 index 0a8047837aa..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.junit.Rule; -import org.junit.Before; -import org.junit.Test; -import org.junit.Assert; -import org.junit.rules.TemporaryFolder; - -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -/** - * Test class for OzoneConfiguration. - */ -public class TestOzoneConfiguration { - - private Configuration conf; - - @Rule - public TemporaryFolder tempConfigs = new TemporaryFolder(); - - @Before - public void setUp() throws Exception { - conf = new OzoneConfiguration(); - } - - private void startConfig(BufferedWriter out) throws IOException { - out.write("\n"); - out.write("\n"); - } - - private void endConfig(BufferedWriter out) throws IOException { - out.write("\n"); - out.flush(); - out.close(); - } - - @Test - public void testGetAllPropertiesByTags() throws Exception { - File coreDefault = tempConfigs.newFile("core-default-test.xml"); - File coreSite = tempConfigs.newFile("core-site-test.xml"); - try (BufferedWriter out = new BufferedWriter(new FileWriter(coreDefault))) { - startConfig(out); - appendProperty(out, "hadoop.tags.system", "YARN,HDFS,NAMENODE"); - appendProperty(out, "hadoop.tags.custom", "MYCUSTOMTAG"); - appendPropertyByTag(out, "dfs.cblock.trace.io", "false", "YARN"); - appendPropertyByTag(out, "dfs.replication", "1", "HDFS"); - appendPropertyByTag(out, "dfs.namenode.logging.level", "INFO", - "NAMENODE"); - appendPropertyByTag(out, "dfs.random.key", "XYZ", "MYCUSTOMTAG"); - endConfig(out); - - Path fileResource = new Path(coreDefault.getAbsolutePath()); - conf.addResource(fileResource); - Assert.assertEquals(conf.getAllPropertiesByTag("MYCUSTOMTAG") - .getProperty("dfs.random.key"), "XYZ"); - } - - try (BufferedWriter out = new BufferedWriter(new FileWriter(coreSite))) { - startConfig(out); - appendProperty(out, "dfs.random.key", "ABC"); - appendProperty(out, "dfs.replication", "3"); - appendProperty(out, "dfs.cblock.trace.io", "true"); - endConfig(out); - - Path fileResource = new Path(coreSite.getAbsolutePath()); - conf.addResource(fileResource); - } - - // Test if values are getting overridden even without tags being present - Assert.assertEquals("3", conf.getAllPropertiesByTag("HDFS") - .getProperty("dfs.replication")); - Assert.assertEquals("ABC", conf.getAllPropertiesByTag("MYCUSTOMTAG") - .getProperty("dfs.random.key")); - Assert.assertEquals("true", conf.getAllPropertiesByTag("YARN") - .getProperty("dfs.cblock.trace.io")); - } - - @Test - public void getConfigurationObject() { - OzoneConfiguration ozoneConfig = new OzoneConfiguration(); - ozoneConfig.set("ozone.scm.client.address", "address"); - ozoneConfig.set("ozone.scm.client.bind.host", "host"); - ozoneConfig.setBoolean("ozone.scm.client.enabled", true); - ozoneConfig.setInt("ozone.scm.client.port", 5555); - ozoneConfig.setTimeDuration("ozone.scm.client.wait", 10, TimeUnit.MINUTES); - - SimpleConfiguration configuration = - ozoneConfig.getObject(SimpleConfiguration.class); - - Assert.assertEquals("host", configuration.getBindHost()); - Assert.assertEquals("address", configuration.getClientAddress()); - Assert.assertEquals(true, configuration.isEnabled()); - Assert.assertEquals(5555, configuration.getPort()); - Assert.assertEquals(600, configuration.getWaitTime()); - } - - @Test - public void getConfigurationObjectWithDefault() { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - - SimpleConfiguration configuration = - ozoneConfiguration.getObject(SimpleConfiguration.class); - - Assert.assertEquals(true, configuration.isEnabled()); - Assert.assertEquals(9878, configuration.getPort()); - } - - - private void appendProperty(BufferedWriter out, String name, String val) - throws IOException { - this.appendProperty(out, name, val, false); - } - - private void appendProperty(BufferedWriter out, String name, String val, - boolean isFinal) throws IOException { - out.write(""); - out.write(""); - out.write(name); - out.write(""); - out.write(""); - out.write(val); - out.write(""); - if (isFinal) { - out.write("true"); - } - out.write("\n"); - } - - private void appendPropertyByTag(BufferedWriter out, String name, String val, - String tags) throws IOException { - this.appendPropertyByTag(out, name, val, false, tags); - } - - private void appendPropertyByTag(BufferedWriter out, String name, String val, - boolean isFinal, - String tag) throws IOException { - out.write(""); - out.write(""); - out.write(name); - out.write(""); - out.write(""); - out.write(val); - out.write(""); - if (isFinal) { - out.write("true"); - } - out.write(""); - out.write(tag); - out.write(""); - out.write("\n"); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/package-info.java deleted file mode 100644 index e72c902045b..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains the OzoneConfiguration related tests. - */ -package org.apache.hadoop.hdds.conf; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java deleted file mode 100644 index bbe6ab7cca7..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.ratis; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutBlockRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; -import org.apache.hadoop.ozone.common.OzoneChecksumException; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.junit.Assert; -import org.junit.Test; - -import java.util.Random; -import java.util.UUID; -import java.util.function.BiFunction; - -/** Testing {@link ContainerCommandRequestMessage}. */ -public class TestContainerCommandRequestMessage { - static final Random RANDOM = new Random(); - - static ByteString newData(int length, Random random) { - final ByteString.Output out = ByteString.newOutput(); - for(int i = 0; i < length; i++) { - out.write(random.nextInt()); - } - return out.toByteString(); - } - - static ChecksumData checksum(ByteString data) { - try { - return new Checksum().computeChecksum(data.toByteArray()); - } catch (OzoneChecksumException e) { - throw new IllegalStateException(e); - } - } - - static ContainerCommandRequestProto newPutSmallFile( - BlockID blockID, ByteString data) { - final BlockData.Builder blockData - = BlockData.newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()); - final PutBlockRequestProto.Builder putBlockRequest - = PutBlockRequestProto.newBuilder() - .setBlockData(blockData); - final KeyValue keyValue = KeyValue.newBuilder() - .setKey("OverWriteRequested") - .setValue("true") - .build(); - final ChunkInfo chunk = ChunkInfo.newBuilder() - .setChunkName(blockID.getLocalID() + "_chunk") - .setOffset(0) - .setLen(data.size()) - .addMetadata(keyValue) - .setChecksumData(checksum(data).getProtoBufMessage()) - .build(); - final PutSmallFileRequestProto putSmallFileRequest - = PutSmallFileRequestProto.newBuilder() - .setChunkInfo(chunk) - .setBlock(putBlockRequest) - .setData(data) - .build(); - return ContainerCommandRequestProto.newBuilder() - .setCmdType(Type.PutSmallFile) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(UUID.randomUUID().toString()) - .setPutSmallFile(putSmallFileRequest) - .build(); - } - - static ContainerCommandRequestProto newWriteChunk( - BlockID blockID, ByteString data) { - final ChunkInfo chunk = ChunkInfo.newBuilder() - .setChunkName(blockID.getLocalID() + "_chunk_" + 1) - .setOffset(0) - .setLen(data.size()) - .setChecksumData(checksum(data).getProtoBufMessage()) - .build(); - - final WriteChunkRequestProto.Builder writeChunkRequest - = WriteChunkRequestProto.newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .setChunkData(chunk) - .setData(data); - return ContainerCommandRequestProto.newBuilder() - .setCmdType(Type.WriteChunk) - .setContainerID(blockID.getContainerID()) - .setDatanodeUuid(UUID.randomUUID().toString()) - .setWriteChunk(writeChunkRequest) - .build(); - } - - @Test - public void testPutSmallFile() throws Exception { - runTest(TestContainerCommandRequestMessage::newPutSmallFile); - } - - @Test - public void testWriteChunk() throws Exception { - runTest(TestContainerCommandRequestMessage::newWriteChunk); - } - - static void runTest( - BiFunction method) - throws Exception { - for(int i = 0; i < 2; i++) { - runTest(i, method); - } - for(int i = 2; i < 1 << 10;) { - runTest(i + 1 + RANDOM.nextInt(i - 1), method); - i <<= 1; - runTest(i, method); - } - } - - static void runTest(int length, - BiFunction method) - throws Exception { - System.out.println("length=" + length); - final BlockID blockID = new BlockID(RANDOM.nextLong(), RANDOM.nextLong()); - final ByteString data = newData(length, RANDOM); - - final ContainerCommandRequestProto original = method.apply(blockID, data); - final ContainerCommandRequestMessage message - = ContainerCommandRequestMessage.toMessage(original, null); - final ContainerCommandRequestProto computed - = ContainerCommandRequestMessage.toProto(message.getContent(), null); - Assert.assertEquals(original, computed); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/exceptions/TestSCMExceptionResultCodes.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/exceptions/TestSCMExceptionResultCodes.java deleted file mode 100644 index b5b4684dda0..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/exceptions/TestSCMExceptionResultCodes.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.exceptions; - -import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; -import org.apache.hadoop.hdds.protocol.proto. - ScmBlockLocationProtocolProtos.Status; -import org.junit.Assert; -import org.junit.Test; - -/** - * Test Result code mappping between SCMException and the protobuf definitions. - */ -public class TestSCMExceptionResultCodes { - - @Test - public void codeMapping() { - // ResultCode = SCMException definition - // Status = protobuf definition - Assert.assertEquals(ResultCodes.values().length, Status.values().length); - for (int i = 0; i < ResultCodes.values().length; i++) { - ResultCodes codeValue = ResultCodes.values()[i]; - Status protoBufValue = Status.values()[i]; - Assert.assertTrue(String - .format("Protobuf/Enum constant name mismatch %s %s", codeValue, - protoBufValue), sameName(codeValue.name(), protoBufValue.name())); - ResultCodes converted = ResultCodes.values()[protoBufValue.ordinal()]; - Assert.assertEquals(codeValue, converted); - } - } - - private boolean sameName(String codeValue, String protoBufValue) { - return codeValue.equals(protoBufValue); - } - -} - diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java deleted file mode 100644 index b31e4a8e996..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java +++ /dev/null @@ -1,953 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.apache.hadoop.conf.Configuration; -import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; -import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR; -import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.REGION_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.DATACENTER_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.NODEGROUP_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; - -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.stream.Collectors; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.junit.Assume.assumeTrue; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; -import org.junit.runner.RunWith; - -/** Test the network topology functions. */ -@RunWith(Parameterized.class) -public class TestNetworkTopologyImpl { - private static final Logger LOG = LoggerFactory.getLogger( - TestNetworkTopologyImpl.class); - private NetworkTopology cluster; - private Node[] dataNodes; - private Random random = new Random(); - - public TestNetworkTopologyImpl(NodeSchema[] schemas, Node[] nodeArray) { - NodeSchemaManager.getInstance().init(schemas, true); - cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance()); - dataNodes = nodeArray; - for (int i = 0; i < dataNodes.length; i++) { - cluster.add(dataNodes[i]); - } - } - - @Rule - public Timeout testTimeout = new Timeout(3000000); - - @Parameters - public static Collection setupDatanodes() { - Object[][] topologies = new Object[][]{ - {new NodeSchema[] {ROOT_SCHEMA, LEAF_SCHEMA}, - new Node[]{ - createDatanode("1.1.1.1", "/"), - createDatanode("2.2.2.2", "/"), - createDatanode("3.3.3.3", "/"), - createDatanode("4.4.4.4", "/"), - createDatanode("5.5.5.5", "/"), - createDatanode("6.6.6.6", "/"), - createDatanode("7.7.7.7", "/"), - createDatanode("8.8.8.8", "/"), - }}, - {new NodeSchema[] {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}, - new Node[]{ - createDatanode("1.1.1.1", "/r1"), - createDatanode("2.2.2.2", "/r1"), - createDatanode("3.3.3.3", "/r2"), - createDatanode("4.4.4.4", "/r2"), - createDatanode("5.5.5.5", "/r2"), - createDatanode("6.6.6.6", "/r3"), - createDatanode("7.7.7.7", "/r3"), - createDatanode("8.8.8.8", "/r3"), - }}, - {new NodeSchema[] - {ROOT_SCHEMA, DATACENTER_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}, - new Node[]{ - createDatanode("1.1.1.1", "/d1/r1"), - createDatanode("2.2.2.2", "/d1/r1"), - createDatanode("3.3.3.3", "/d1/r2"), - createDatanode("4.4.4.4", "/d1/r2"), - createDatanode("5.5.5.5", "/d1/r2"), - createDatanode("6.6.6.6", "/d2/r3"), - createDatanode("7.7.7.7", "/d2/r3"), - createDatanode("8.8.8.8", "/d2/r3"), - }}, - {new NodeSchema[] {ROOT_SCHEMA, DATACENTER_SCHEMA, RACK_SCHEMA, - NODEGROUP_SCHEMA, LEAF_SCHEMA}, - new Node[]{ - createDatanode("1.1.1.1", "/d1/r1/ng1"), - createDatanode("2.2.2.2", "/d1/r1/ng1"), - createDatanode("3.3.3.3", "/d1/r2/ng2"), - createDatanode("4.4.4.4", "/d1/r2/ng2"), - createDatanode("5.5.5.5", "/d1/r2/ng3"), - createDatanode("6.6.6.6", "/d2/r3/ng3"), - createDatanode("7.7.7.7", "/d2/r3/ng3"), - createDatanode("8.8.8.8", "/d2/r3/ng3"), - createDatanode("9.9.9.9", "/d3/r1/ng1"), - createDatanode("10.10.10.10", "/d3/r1/ng1"), - createDatanode("11.11.11.11", "/d3/r1/ng1"), - createDatanode("12.12.12.12", "/d3/r2/ng2"), - createDatanode("13.13.13.13", "/d3/r2/ng2"), - createDatanode("14.14.14.14", "/d4/r1/ng1"), - createDatanode("15.15.15.15", "/d4/r1/ng1"), - createDatanode("16.16.16.16", "/d4/r1/ng1"), - createDatanode("17.17.17.17", "/d4/r1/ng2"), - createDatanode("18.18.18.18", "/d4/r1/ng2"), - createDatanode("19.19.19.19", "/d4/r1/ng3"), - createDatanode("20.20.20.20", "/d4/r1/ng3"), - }}, - {new NodeSchema[] {ROOT_SCHEMA, REGION_SCHEMA, DATACENTER_SCHEMA, - RACK_SCHEMA, NODEGROUP_SCHEMA, LEAF_SCHEMA}, - new Node[]{ - createDatanode("1.1.1.1", "/d1/rg1/r1/ng1"), - createDatanode("2.2.2.2", "/d1/rg1/r1/ng1"), - createDatanode("3.3.3.3", "/d1/rg1/r1/ng2"), - createDatanode("4.4.4.4", "/d1/rg1/r1/ng1"), - createDatanode("5.5.5.5", "/d1/rg1/r1/ng1"), - createDatanode("6.6.6.6", "/d1/rg1/r1/ng2"), - createDatanode("7.7.7.7", "/d1/rg1/r1/ng2"), - createDatanode("8.8.8.8", "/d1/rg1/r1/ng2"), - createDatanode("9.9.9.9", "/d1/rg1/r1/ng2"), - createDatanode("10.10.10.10", "/d1/rg1/r1/ng2"), - createDatanode("11.11.11.11", "/d1/rg1/r2/ng1"), - createDatanode("12.12.12.12", "/d1/rg1/r2/ng1"), - createDatanode("13.13.13.13", "/d1/rg1/r2/ng1"), - createDatanode("14.14.14.14", "/d1/rg1/r2/ng1"), - createDatanode("15.15.15.15", "/d1/rg1/r2/ng1"), - createDatanode("16.16.16.16", "/d1/rg1/r2/ng2"), - createDatanode("17.17.17.17", "/d1/rg1/r2/ng2"), - createDatanode("18.18.18.18", "/d1/rg1/r2/ng2"), - createDatanode("19.19.19.19", "/d1/rg1/r2/ng2"), - createDatanode("20.20.20.20", "/d1/rg1/r2/ng2"), - createDatanode("21.21.21.21", "/d2/rg1/r2/ng1"), - createDatanode("22.22.22.22", "/d2/rg1/r2/ng1"), - createDatanode("23.23.23.23", "/d2/rg2/r2/ng1"), - createDatanode("24.24.24.24", "/d2/rg2/r2/ng1"), - createDatanode("25.25.25.25", "/d2/rg2/r2/ng1"), - }} - }; - return Arrays.asList(topologies); - } - - @Test - public void testContains() { - Node nodeNotInMap = createDatanode("8.8.8.8", "/d2/r4"); - for (int i=0; i < dataNodes.length; i++) { - assertTrue(cluster.contains(dataNodes[i])); - } - assertFalse(cluster.contains(nodeNotInMap)); - } - - @Test - public void testNumOfChildren() { - assertEquals(dataNodes.length, cluster.getNumOfLeafNode(null)); - assertEquals(0, cluster.getNumOfLeafNode("/switch1/node1")); - } - - @Test - public void testGetNode() { - assertEquals(cluster.getNode(""), cluster.getNode(null)); - assertEquals(cluster.getNode(""), cluster.getNode("/")); - assertEquals(null, cluster.getNode("/switch1/node1")); - assertEquals(null, cluster.getNode("/switch1")); - } - - @Test - public void testCreateInvalidTopology() { - List schemas = new ArrayList(); - schemas.add(ROOT_SCHEMA); - schemas.add(RACK_SCHEMA); - schemas.add(LEAF_SCHEMA); - NodeSchemaManager.getInstance().init(schemas.toArray(new NodeSchema[0]), - true); - NetworkTopology newCluster = new NetworkTopologyImpl( - NodeSchemaManager.getInstance()); - Node[] invalidDataNodes = new Node[] { - createDatanode("1.1.1.1", "/r1"), - createDatanode("2.2.2.2", "/r2"), - createDatanode("3.3.3.3", "/d1/r2") - }; - newCluster.add(invalidDataNodes[0]); - newCluster.add(invalidDataNodes[1]); - try { - newCluster.add(invalidDataNodes[2]); - fail("expected InvalidTopologyException"); - } catch (NetworkTopology.InvalidTopologyException e) { - assertTrue(e.getMessage().contains("Failed to add")); - assertTrue(e.getMessage().contains("Its path depth is not " + - newCluster.getMaxLevel())); - } - } - - @Test - public void testInitWithConfigFile() { - ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); - Configuration conf = new Configuration(); - try { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.xml").getPath(); - conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, filePath); - NetworkTopology newCluster = new NetworkTopologyImpl(conf); - LOG.info("network topology max level = " + newCluster.getMaxLevel()); - } catch (Throwable e) { - fail("should succeed"); - } - } - - @Test - public void testAncestor() { - assumeTrue(cluster.getMaxLevel() > 2); - int maxLevel = cluster.getMaxLevel(); - assertTrue(cluster.isSameParent(dataNodes[0], dataNodes[1])); - while(maxLevel > 1) { - assertTrue(cluster.isSameAncestor(dataNodes[0], dataNodes[1], - maxLevel - 1)); - maxLevel--; - } - assertFalse(cluster.isSameParent(dataNodes[1], dataNodes[2])); - assertFalse(cluster.isSameParent(null, dataNodes[2])); - assertFalse(cluster.isSameParent(dataNodes[1], null)); - assertFalse(cluster.isSameParent(null, null)); - - assertFalse(cluster.isSameAncestor(dataNodes[1], dataNodes[2], 0)); - assertFalse(cluster.isSameAncestor(dataNodes[1], null, 1)); - assertFalse(cluster.isSameAncestor(null, dataNodes[2], 1)); - assertFalse(cluster.isSameAncestor(null, null, 1)); - - maxLevel = cluster.getMaxLevel(); - assertTrue(cluster.isSameAncestor( - dataNodes[random.nextInt(cluster.getNumOfLeafNode(null))], - dataNodes[random.nextInt(cluster.getNumOfLeafNode(null))], - maxLevel - 1)); - } - - @Test - public void testAddRemove() { - for(int i = 0; i < dataNodes.length; i++) { - cluster.remove(dataNodes[i]); - } - for(int i = 0; i < dataNodes.length; i++) { - assertFalse(cluster.contains(dataNodes[i])); - } - // no leaf nodes - assertEquals(0, cluster.getNumOfLeafNode(null)); - // no inner nodes - assertEquals(0, cluster.getNumOfNodes(2)); - for(int i = 0; i < dataNodes.length; i++) { - cluster.add(dataNodes[i]); - } - // Inner nodes are created automatically - assertTrue(cluster.getNumOfNodes(2) > 0); - - try { - cluster.add(cluster.chooseRandom(null).getParent()); - fail("Inner node can not be added manually"); - } catch (Exception e) { - assertTrue(e.getMessage().startsWith( - "Not allowed to add an inner node")); - } - - try { - cluster.remove(cluster.chooseRandom(null).getParent()); - fail("Inner node can not be removed manually"); - } catch (Exception e) { - assertTrue(e.getMessage().startsWith( - "Not allowed to remove an inner node")); - } - } - - @Test - public void testGetNodesWithLevel() { - int maxLevel = cluster.getMaxLevel(); - try { - assertEquals(1, cluster.getNumOfNodes(0)); - fail("level 0 is not supported"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().startsWith("Invalid level")); - } - - try { - assertEquals(1, cluster.getNumOfNodes(0)); - fail("level 0 is not supported"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().startsWith("Invalid level")); - } - - try { - assertEquals(1, cluster.getNumOfNodes(maxLevel + 1)); - fail("level out of scope"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().startsWith("Invalid level")); - } - - try { - assertEquals(1, cluster.getNumOfNodes(maxLevel + 1)); - fail("level out of scope"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().startsWith("Invalid level")); - } - // root node - assertEquals(1, cluster.getNumOfNodes(1)); - assertEquals(1, cluster.getNumOfNodes(1)); - // leaf nodes - assertEquals(dataNodes.length, cluster.getNumOfNodes(maxLevel)); - assertEquals(dataNodes.length, cluster.getNumOfNodes(maxLevel)); - } - - @Test - public void testChooseRandomSimple() { - String path = - dataNodes[random.nextInt(dataNodes.length)].getNetworkFullPath(); - assertEquals(path, cluster.chooseRandom(path).getNetworkFullPath()); - path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR)); - // test chooseRandom(String scope) - while (!path.equals(ROOT)) { - assertTrue(cluster.chooseRandom(path).getNetworkLocation() - .startsWith(path)); - Node node = cluster.chooseRandom("~" + path); - assertTrue(!node.getNetworkLocation() - .startsWith(path)); - path = path.substring(0, - path.lastIndexOf(PATH_SEPARATOR_STR)); - } - assertNotNull(cluster.chooseRandom(null)); - assertNotNull(cluster.chooseRandom("")); - assertNotNull(cluster.chooseRandom("/")); - assertNull(cluster.chooseRandom("~")); - assertNull(cluster.chooseRandom("~/")); - - // test chooseRandom(String scope, String excludedScope) - path = dataNodes[random.nextInt(dataNodes.length)].getNetworkFullPath(); - List pathList = new ArrayList<>(); - pathList.add(path); - assertNull(cluster.chooseRandom(path, pathList)); - assertNotNull(cluster.chooseRandom(null, pathList)); - assertNotNull(cluster.chooseRandom("", pathList)); - - // test chooseRandom(String scope, Collection excludedNodes) - assertNull(cluster.chooseRandom("", Arrays.asList(dataNodes))); - assertNull(cluster.chooseRandom("/", Arrays.asList(dataNodes))); - assertNull(cluster.chooseRandom("~", Arrays.asList(dataNodes))); - assertNull(cluster.chooseRandom("~/", Arrays.asList(dataNodes))); - assertNull(cluster.chooseRandom(null, Arrays.asList(dataNodes))); - } - - /** - * Following test checks that chooseRandom works for an excluded scope. - */ - @Test - public void testChooseRandomExcludedScope() { - int[] excludedNodeIndexs = {0, dataNodes.length - 1, - random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)}; - String scope; - Map frequency; - for (int i : excludedNodeIndexs) { - String path = dataNodes[i].getNetworkFullPath(); - while (!path.equals(ROOT)) { - scope = "~" + path; - frequency = pickNodesAtRandom(100, scope, null, 0); - for (Node key : dataNodes) { - if (key.getNetworkFullPath().startsWith(path)) { - assertTrue(frequency.get(key) == 0); - } - } - path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR)); - } - } - - // null excludedScope, every node should be chosen - frequency = pickNodes(100, null, null, null, 0); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) != 0); - } - - // "" excludedScope, no node will ever be chosen - List pathList = new ArrayList(); - pathList.add(""); - frequency = pickNodes(100, pathList, null, null, 0); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) == 0); - } - - // "~" scope, no node will ever be chosen - scope = "~"; - frequency = pickNodesAtRandom(100, scope, null, 0); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) == 0); - } - // out network topology excluded scope, every node should be chosen - pathList.clear(); - pathList.add("/city1"); - frequency = pickNodes( - cluster.getNumOfLeafNode(null), pathList, null, null, 0); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) != 0); - } - } - - /** - * Following test checks that chooseRandom works for an excluded nodes. - */ - @Test - public void testChooseRandomExcludedNode() { - Node[][] excludedNodeLists = { - {}, - {dataNodes[0]}, - {dataNodes[dataNodes.length - 1]}, - {dataNodes[random.nextInt(dataNodes.length)]}, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)] - }, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - }}; - int leafNum = cluster.getNumOfLeafNode(null); - Map frequency; - for(Node[] list : excludedNodeLists) { - List excludedList = Arrays.asList(list); - int ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { - frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen); - List ancestorList = NetUtils.getAncestorList(cluster, - excludedList, ancestorGen); - for (Node key : dataNodes) { - if (excludedList.contains(key) || - (ancestorList.size() > 0 && - ancestorList.stream() - .map(a -> (InnerNode) a) - .filter(a -> a.isAncestor(key)) - .collect(Collectors.toList()).size() > 0)) { - assertTrue(frequency.get(key) == 0); - } - } - ancestorGen++; - } - } - // all nodes excluded, no node will be picked - List excludedList = Arrays.asList(dataNodes); - int ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { - frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) == 0); - } - ancestorGen++; - } - // out scope excluded nodes, each node will be picked - excludedList = Arrays.asList(createDatanode("1.1.1.1.", "/city1/rack1")); - ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { - frequency = pickNodes(leafNum, null, excludedList, null, ancestorGen); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) != 0); - } - ancestorGen++; - } - } - - /** - * Following test checks that chooseRandom works for excluded nodes and scope. - */ - @Test - public void testChooseRandomExcludedNodeAndScope() { - int[] excludedNodeIndexs = {0, dataNodes.length - 1, - random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)}; - Node[][] excludedNodeLists = { - {}, - {dataNodes[0]}, - {dataNodes[dataNodes.length - 1]}, - {dataNodes[random.nextInt(dataNodes.length)]}, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)] - }, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - }}; - int leafNum = cluster.getNumOfLeafNode(null); - Map frequency; - String scope; - for (int i : excludedNodeIndexs) { - String path = dataNodes[i].getNetworkFullPath(); - while (!path.equals(ROOT)) { - scope = "~" + path; - int ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { - for (Node[] list : excludedNodeLists) { - List excludedList = Arrays.asList(list); - frequency = - pickNodesAtRandom(leafNum, scope, excludedList, ancestorGen); - List ancestorList = NetUtils.getAncestorList(cluster, - excludedList, ancestorGen); - for (Node key : dataNodes) { - if (excludedList.contains(key) || - key.getNetworkFullPath().startsWith(path) || - (ancestorList.size() > 0 && - ancestorList.stream() - .map(a -> (InnerNode) a) - .filter(a -> a.isAncestor(key)) - .collect(Collectors.toList()).size() > 0)) { - assertTrue(frequency.get(key) == 0); - } - } - } - ancestorGen++; - } - path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR)); - } - } - // all nodes excluded, no node will be picked - List excludedList = Arrays.asList(dataNodes); - for (int i : excludedNodeIndexs) { - String path = dataNodes[i].getNetworkFullPath(); - while (!path.equals(ROOT)) { - scope = "~" + path; - int ancestorGen = 0; - while (ancestorGen < cluster.getMaxLevel()) { - frequency = - pickNodesAtRandom(leafNum, scope, excludedList, ancestorGen); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) == 0); - } - ancestorGen++; - } - path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR)); - } - } - - // no node excluded and no excluded scope, each node will be picked - int ancestorGen = 0; - while (ancestorGen < cluster.getMaxLevel()) { - frequency = pickNodes(leafNum, null, null, null, ancestorGen); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) != 0); - } - ancestorGen++; - } - } - - /** - * Following test checks that chooseRandom works for excluded nodes, scope - * and ancestor generation. - */ - @Test - public void testChooseRandomWithAffinityNode() { - int[] excludedNodeIndexs = {0, dataNodes.length - 1, - random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)}; - Node[][] excludedNodeLists = { - {}, - {dataNodes[0]}, - {dataNodes[dataNodes.length - 1]}, - {dataNodes[random.nextInt(dataNodes.length)]}, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)] - }, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - }}; - int[] affinityNodeIndexs = {0, dataNodes.length - 1, - random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)}; - Node[][] excludedScopeIndexs = {{dataNodes[0]}, - {dataNodes[dataNodes.length - 1]}, - {dataNodes[random.nextInt(dataNodes.length)]}, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)] - }, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - }}; - int leafNum = cluster.getNumOfLeafNode(null); - Map frequency; - List pathList = new ArrayList<>(); - for (int k : affinityNodeIndexs) { - for (Node[] excludedScopes : excludedScopeIndexs) { - pathList.clear(); - pathList.addAll(Arrays.stream(excludedScopes) - .map(node -> node.getNetworkFullPath()) - .collect(Collectors.toList())); - while (!pathList.get(0).equals(ROOT)) { - int ancestorGen = cluster.getMaxLevel() - 1; - while (ancestorGen > 0) { - for (Node[] list : excludedNodeLists) { - List excludedList = Arrays.asList(list); - frequency = pickNodes(leafNum, pathList, excludedList, - dataNodes[k], ancestorGen); - Node affinityAncestor = dataNodes[k].getAncestor(ancestorGen); - for (Node key : dataNodes) { - if (affinityAncestor != null) { - if (frequency.get(key) > 0) { - assertTrue(affinityAncestor.isAncestor(key)); - } else if (!affinityAncestor.isAncestor(key)) { - continue; - } else if (excludedList != null && - excludedList.contains(key)) { - continue; - } else if (pathList != null && - pathList.stream().anyMatch(path -> - key.getNetworkFullPath().startsWith(path))) { - continue; - } else { - fail("Node is not picked when sequentially going " + - "through ancestor node's leaf nodes. node:" + - key.getNetworkFullPath() + ", ancestor node:" + - affinityAncestor.getNetworkFullPath() + - ", excludedScope: " + pathList.toString() + ", " + - "excludedList:" + (excludedList == null ? "" : - excludedList.toString())); - } - } - } - } - ancestorGen--; - } - pathList = pathList.stream().map(path -> - path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR))) - .collect(Collectors.toList()); - } - } - } - - // all nodes excluded, no node will be picked - String scope; - List excludedList = Arrays.asList(dataNodes); - for (int k : affinityNodeIndexs) { - for (int i : excludedNodeIndexs) { - String path = dataNodes[i].getNetworkFullPath(); - while (!path.equals(ROOT)) { - scope = "~" + path; - int ancestorGen = 0; - while (ancestorGen < cluster.getMaxLevel()) { - frequency = pickNodesAtRandom(leafNum, scope, excludedList, - dataNodes[k], ancestorGen); - for (Node key : dataNodes) { - assertTrue(frequency.get(key) == 0); - } - ancestorGen++; - } - path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR)); - } - } - } - // no node excluded and no excluded scope, each node will be picked - int ancestorGen = cluster.getMaxLevel() - 1; - for (int k : affinityNodeIndexs) { - while (ancestorGen > 0) { - frequency = - pickNodes(leafNum, null, null, dataNodes[k], ancestorGen); - Node affinityAncestor = dataNodes[k].getAncestor(ancestorGen); - for (Node key : dataNodes) { - if (frequency.get(key) > 0) { - if (affinityAncestor != null) { - assertTrue(affinityAncestor.isAncestor(key)); - } - } - } - ancestorGen--; - } - } - // check invalid ancestor generation - try { - cluster.chooseRandom(null, null, null, dataNodes[0], - cluster.getMaxLevel()); - fail("ancestor generation exceeds max level, should fail"); - } catch (Exception e) { - assertTrue(e.getMessage().startsWith("ancestorGen " + - cluster.getMaxLevel() + - " exceeds this network topology acceptable level")); - } - } - - @Test - public void testCost() { - // network topology with default cost - List schemas = new ArrayList<>(); - schemas.add(ROOT_SCHEMA); - schemas.add(RACK_SCHEMA); - schemas.add(NODEGROUP_SCHEMA); - schemas.add(LEAF_SCHEMA); - - NodeSchemaManager manager = NodeSchemaManager.getInstance(); - manager.init(schemas.toArray(new NodeSchema[0]), true); - NetworkTopology newCluster = - new NetworkTopologyImpl(manager); - Node[] nodeList = new Node[] { - createDatanode("1.1.1.1", "/r1/ng1"), - createDatanode("2.2.2.2", "/r1/ng1"), - createDatanode("3.3.3.3", "/r1/ng2"), - createDatanode("4.4.4.4", "/r2/ng1"), - }; - for (Node node: nodeList) { - newCluster.add(node); - } - Node outScopeNode1 = createDatanode("5.5.5.5", "/r2/ng2"); - Node outScopeNode2 = createDatanode("6.6.6.6", "/r2/ng2"); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(nodeList[0], null)); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(null, nodeList[0])); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(outScopeNode1, nodeList[0])); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(nodeList[0], outScopeNode1)); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(outScopeNode1, outScopeNode2)); - - assertEquals(0, newCluster.getDistanceCost(null, null)); - assertEquals(0, newCluster.getDistanceCost(nodeList[0], nodeList[0])); - assertEquals(2, newCluster.getDistanceCost(nodeList[0], nodeList[1])); - assertEquals(4, newCluster.getDistanceCost(nodeList[0], nodeList[2])); - assertEquals(6, newCluster.getDistanceCost(nodeList[0], nodeList[3])); - - // network topology with customized cost - schemas.clear(); - schemas.add(new NodeSchema.Builder() - .setType(NodeSchema.LayerType.ROOT).setCost(5).build()); - schemas.add(new NodeSchema.Builder() - .setType(NodeSchema.LayerType.INNER_NODE).setCost(3).build()); - schemas.add(new NodeSchema.Builder() - .setType(NodeSchema.LayerType.INNER_NODE).setCost(1).build()); - schemas.add(new NodeSchema.Builder() - .setType(NodeSchema.LayerType.LEAF_NODE).build()); - manager = NodeSchemaManager.getInstance(); - manager.init(schemas.toArray(new NodeSchema[0]), true); - newCluster = new NetworkTopologyImpl(manager); - for (Node node: nodeList) { - newCluster.add(node); - } - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(nodeList[0], null)); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(null, nodeList[0])); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(outScopeNode1, nodeList[0])); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(nodeList[0], outScopeNode1)); - assertEquals(Integer.MAX_VALUE, - newCluster.getDistanceCost(outScopeNode1, outScopeNode2)); - - assertEquals(0, newCluster.getDistanceCost(null, null)); - assertEquals(0, newCluster.getDistanceCost(nodeList[0], nodeList[0])); - assertEquals(2, newCluster.getDistanceCost(nodeList[0], nodeList[1])); - assertEquals(8, newCluster.getDistanceCost(nodeList[0], nodeList[2])); - assertEquals(18, newCluster.getDistanceCost(nodeList[0], nodeList[3])); - } - - @Test - public void testSortByDistanceCost() { - Node[][] nodes = { - {}, - {dataNodes[0]}, - {dataNodes[dataNodes.length - 1]}, - {dataNodes[random.nextInt(dataNodes.length)]}, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)] - }, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - }, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - }, - {dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - }}; - Node[] readers = {null, dataNodes[0], dataNodes[dataNodes.length - 1], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)], - dataNodes[random.nextInt(dataNodes.length)] - }; - for (Node reader : readers) { - for (Node[] nodeList : nodes) { - int length = nodeList.length; - while (length > 0) { - List ret = cluster.sortByDistanceCost(reader, - Arrays.asList(nodeList), length); - for (int i = 0; i < ret.size(); i++) { - if ((i + 1) < ret.size()) { - int cost1 = cluster.getDistanceCost(reader, ret.get(i)); - int cost2 = cluster.getDistanceCost(reader, ret.get(i + 1)); - assertTrue("reader:" + (reader != null ? - reader.getNetworkFullPath() : "null") + - ",node1:" + ret.get(i).getNetworkFullPath() + - ",node2:" + ret.get(i + 1).getNetworkFullPath() + - ",cost1:" + cost1 + ",cost2:" + cost2, - cost1 == Integer.MAX_VALUE || cost1 <= cost2); - } - } - length--; - } - } - } - - // sort all nodes - List nodeList = Arrays.asList(dataNodes.clone()); - for (Node reader : readers) { - int length = nodeList.size(); - while (length >= 0) { - List sortedNodeList = - cluster.sortByDistanceCost(reader, nodeList, length); - for (int i = 0; i < sortedNodeList.size(); i++) { - if ((i + 1) < sortedNodeList.size()) { - int cost1 = cluster.getDistanceCost(reader, sortedNodeList.get(i)); - int cost2 = cluster.getDistanceCost( - reader, sortedNodeList.get(i + 1)); - // node can be removed when called in testConcurrentAccess - assertTrue("reader:" + (reader != null ? - reader.getNetworkFullPath() : "null") + - ",node1:" + sortedNodeList.get(i).getNetworkFullPath() + - ",node2:" + sortedNodeList.get(i + 1).getNetworkFullPath() + - ",cost1:" + cost1 + ",cost2:" + cost2, - cost1 == Integer.MAX_VALUE || cost1 <= cost2); - } - } - length--; - } - } - } - - private static Node createDatanode(String name, String path) { - return new NodeImpl(name, path, NetConstants.NODE_COST_DEFAULT); - } - - /** - * This picks a large number of nodes at random in order to ensure coverage. - * - * @param numNodes the number of nodes - * @param excludedScope the excluded scope - * @param excludedNodes the excluded node list - * @param ancestorGen the chosen node cannot share the same ancestor at - * this generation with excludedNodes - * @return the frequency that nodes were chosen - */ - private Map pickNodesAtRandom(int numNodes, - String excludedScope, Collection excludedNodes, int ancestorGen) { - Map frequency = new HashMap(); - for (Node dnd : dataNodes) { - frequency.put(dnd, 0); - } - for (int j = 0; j < numNodes; j++) { - Node node = cluster.chooseRandom(excludedScope, excludedNodes, - ancestorGen); - if (node != null) { - frequency.put(node, frequency.get(node) + 1); - } - } - LOG.info("Result:" + frequency); - return frequency; - } - - /** - * This picks a large number of nodes at random in order to ensure coverage. - * - * @param numNodes the number of nodes - * @param excludedScope the excluded scope - * @param excludedNodes the excluded node list - * @param affinityNode the chosen node should share the same ancestor at - * generation "ancestorGen" with this node - * @param ancestorGen the chosen node cannot share the same ancestor at - * this generation with excludedNodes - * @return the frequency that nodes were chosen - */ - private Map pickNodesAtRandom(int numNodes, - String excludedScope, Collection excludedNodes, Node affinityNode, - int ancestorGen) { - Map frequency = new HashMap(); - for (Node dnd : dataNodes) { - frequency.put(dnd, 0); - } - - List pathList = new ArrayList<>(); - pathList.add(excludedScope.substring(1)); - for (int j = 0; j < numNodes; j++) { - - Node node = cluster.chooseRandom("", pathList, excludedNodes, - affinityNode, ancestorGen); - if (node != null) { - frequency.put(node, frequency.get(node) + 1); - } - } - LOG.info("Result:" + frequency); - return frequency; - } - - /** - * This picks a large amount of nodes sequentially. - * - * @param numNodes the number of nodes - * @param excludedScopes the excluded scopes, should not start with "~" - * @param excludedNodes the excluded node list - * @param affinityNode the chosen node should share the same ancestor at - * generation "ancestorGen" with this node - * @param ancestorGen the chosen node cannot share the same ancestor at - * this generation with excludedNodes - * @return the frequency that nodes were chosen - */ - private Map pickNodes(int numNodes, - List excludedScopes, Collection excludedNodes, - Node affinityNode, int ancestorGen) { - Map frequency = new HashMap<>(); - for (Node dnd : dataNodes) { - frequency.put(dnd, 0); - } - excludedNodes = excludedNodes == null ? null : - excludedNodes.stream().distinct().collect(Collectors.toList()); - for (int j = 0; j < numNodes; j++) { - Node node = cluster.getNode(j, null, excludedScopes, excludedNodes, - affinityNode, ancestorGen); - if (node != null) { - frequency.put(node, frequency.get(node) + 1); - } - } - - LOG.info("Result:" + frequency); - return frequency; - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java deleted file mode 100644 index 0c20353a2ce..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Arrays; -import java.util.Collection; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** Test the node schema loader. */ -@RunWith(Parameterized.class) -public class TestNodeSchemaLoader { - private static final Logger LOG = - LoggerFactory.getLogger(TestNodeSchemaLoader.class); - private ClassLoader classLoader = - Thread.currentThread().getContextClassLoader(); - - public TestNodeSchemaLoader(String schemaFile, String errMsg) { - try { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/" + schemaFile).getPath(); - NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); - fail("expect exceptions"); - } catch (Throwable e) { - assertTrue(e.getMessage().contains(errMsg)); - } - } - - @Rule - public Timeout testTimeout = new Timeout(30000); - - @Parameters - public static Collection getSchemaFiles() { - Object[][] schemaFiles = new Object[][]{ - {"enforce-error.xml", "layer without prefix defined"}, - {"invalid-cost.xml", "Cost should be positive number or 0"}, - {"multiple-leaf.xml", "Multiple LEAF layers are found"}, - {"multiple-root.xml", "Multiple ROOT layers are found"}, - {"no-leaf.xml", "No LEAF layer is found"}, - {"no-root.xml", "No ROOT layer is found"}, - {"path-layers-size-mismatch.xml", - "Topology path depth doesn't match layer element numbers"}, - {"path-with-id-reference-failure.xml", - "No layer found for id"}, - {"unknown-layer-type.xml", "Unsupported layer type"}, - {"wrong-path-order-1.xml", - "Topology path doesn't start with ROOT layer"}, - {"wrong-path-order-2.xml", "Topology path doesn't end with LEAF layer"}, - {"no-topology.xml", "no or multiple element"}, - {"multiple-topology.xml", "no or multiple element"}, - {"invalid-version.xml", "Bad layoutversion value"}, - }; - return Arrays.asList(schemaFiles); - } - - @Test - public void testGood() { - try { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.xml").getPath(); - NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); - } catch (Throwable e) { - fail("should succeed"); - } - } - - @Test - public void testNotExist() { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.xml").getPath() + ".backup"; - try { - NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); - fail("should fail"); - } catch (Throwable e) { - assertTrue(e.getMessage().contains("not found")); - } - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java deleted file mode 100644 index 66980437276..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.hdds.scm.net.NetConstants.DEFAULT_NODEGROUP; -import static org.apache.hadoop.hdds.scm.net.NetConstants.DEFAULT_RACK; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** Test the node schema loader. */ -public class TestNodeSchemaManager { - private static final Logger LOG = - LoggerFactory.getLogger(TestNodeSchemaManager.class); - private ClassLoader classLoader = - Thread.currentThread().getContextClassLoader(); - private NodeSchemaManager manager; - private Configuration conf; - - public TestNodeSchemaManager() { - conf = new Configuration(); - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.xml").getPath(); - conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, filePath); - manager = NodeSchemaManager.getInstance(); - manager.init(conf); - } - - @Rule - public Timeout testTimeout = new Timeout(30000); - - @Test(expected = IllegalArgumentException.class) - public void testFailure1() { - manager.getCost(0); - } - - @Test(expected = IllegalArgumentException.class) - public void testFailure2() { - manager.getCost(manager.getMaxLevel() + 1); - } - - @Test - public void testPass() { - assertEquals(4, manager.getMaxLevel()); - for (int i = 1; i <= manager.getMaxLevel(); i++) { - assertTrue(manager.getCost(i) == 1 || manager.getCost(i) == 0); - } - } - - @Test - public void testInitFailure() { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.xml").getPath() + ".backup"; - conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, filePath); - try { - manager.init(conf); - fail("should fail"); - } catch (Throwable e) { - assertTrue(e.getMessage().contains("Failed to load schema file:" + - filePath)); - } - } - - @Test - public void testComplete() { - // successful complete action - String path = "/node1"; - assertEquals(DEFAULT_RACK + DEFAULT_NODEGROUP + path, - manager.complete(path)); - assertEquals("/rack" + DEFAULT_NODEGROUP + path, - manager.complete("/rack" + path)); - assertEquals(DEFAULT_RACK + "/nodegroup" + path, - manager.complete("/nodegroup" + path)); - - // failed complete action - assertEquals(null, manager.complete("/dc" + path)); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java deleted file mode 100644 index c38bf388363..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.net; - -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Arrays; -import java.util.Collection; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** Test the node schema loader. */ -@RunWith(Parameterized.class) -public class TestYamlSchemaLoader { - private static final Logger LOG = - LoggerFactory.getLogger(TestYamlSchemaLoader.class); - private ClassLoader classLoader = - Thread.currentThread().getContextClassLoader(); - - public TestYamlSchemaLoader(String schemaFile, String errMsg) { - try { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/" + schemaFile).getPath(); - NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); - fail("expect exceptions"); - } catch (Throwable e) { - assertTrue(e.getMessage().contains(errMsg)); - } - } - - @Rule - public Timeout testTimeout = new Timeout(30000); - - @Parameters - public static Collection getSchemaFiles() { - Object[][] schemaFiles = new Object[][]{ - {"multiple-root.yaml", "Multiple root"}, - {"middle-leaf.yaml", "Leaf node in the middle"}, - }; - return Arrays.asList(schemaFiles); - } - - - @Test - public void testGood() { - try { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.yaml").getPath(); - NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); - } catch (Throwable e) { - fail("should succeed"); - } - } - - @Test - public void testNotExist() { - String filePath = classLoader.getResource( - "./networkTopologyTestFiles/good.yaml").getPath() + ".backup"; - try { - NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath); - fail("should fail"); - } catch (Throwable e) { - assertTrue(e.getMessage().contains("not found")); - } - } - -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java deleted file mode 100644 index 796694171f4..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; -/** - Test cases for SCM client classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java deleted file mode 100644 index 77a2cecd79a..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java +++ /dev/null @@ -1,313 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.security.token; - -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.io.File; -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.security.InvalidKeyException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PrivateKey; -import java.security.Signature; -import java.security.SignatureException; -import java.security.cert.Certificate; -import java.security.cert.CertificateEncodingException; -import java.security.cert.X509Certificate; -import java.util.ArrayList; -import java.util.Collections; -import java.util.EnumSet; -import java.util.List; -import java.util.Map; -import javax.crypto.KeyGenerator; -import javax.crypto.Mac; -import javax.crypto.SecretKey; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.Time; -import org.junit.After; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Test class for OzoneManagerDelegationToken. - */ -public class TestOzoneBlockTokenIdentifier { - - private static final Logger LOG = LoggerFactory - .getLogger(TestOzoneBlockTokenIdentifier.class); - private static final String BASEDIR = GenericTestUtils - .getTempPath(TestOzoneBlockTokenIdentifier.class.getSimpleName()); - private static final String KEYSTORES_DIR = - new File(BASEDIR).getAbsolutePath(); - private static long expiryTime; - private static KeyPair keyPair; - private static X509Certificate cert; - - @BeforeClass - public static void setUp() throws Exception { - File base = new File(BASEDIR); - FileUtil.fullyDelete(base); - base.mkdirs(); - expiryTime = Time.monotonicNow() + 60 * 60 * 24; - - // Create Ozone Master key pair. - keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - // Create Ozone Master certificate (SCM CA issued cert) and key store. - cert = KeyStoreTestUtil - .generateCertificate("CN=OzoneMaster", keyPair, 30, "SHA256withRSA"); - } - - @After - public void cleanUp() throws Exception { - // KeyStoreTestUtil.cleanupSSLConfig(KEYSTORES_DIR, sslConfsDir); - } - - @Test - public void testSignToken() throws GeneralSecurityException, IOException { - String keystore = new File(KEYSTORES_DIR, "keystore.jks") - .getAbsolutePath(); - String truststore = new File(KEYSTORES_DIR, "truststore.jks") - .getAbsolutePath(); - String trustPassword = "trustPass"; - String keyStorePassword = "keyStorePass"; - String keyPassword = "keyPass"; - - - KeyStoreTestUtil.createKeyStore(keystore, keyStorePassword, keyPassword, - "OzoneMaster", keyPair.getPrivate(), cert); - - // Create trust store and put the certificate in the trust store - Map certs = Collections.singletonMap("server", - cert); - KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs); - - // Sign the OzoneMaster Token with Ozone Master private key - PrivateKey privateKey = keyPair.getPrivate(); - OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier( - "testUser", "84940", - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), 128L); - byte[] signedToken = signTokenAsymmetric(tokenId, privateKey); - - // Verify a valid signed OzoneMaster Token with Ozone Master - // public key(certificate) - boolean isValidToken = verifyTokenAsymmetric(tokenId, signedToken, cert); - LOG.info("{} is {}", tokenId, isValidToken ? "valid." : "invalid."); - - // Verify an invalid signed OzoneMaster Token with Ozone Master - // public key(certificate) - tokenId = new OzoneBlockTokenIdentifier("", "", - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), 128L); - LOG.info("Unsigned token {} is {}", tokenId, - verifyTokenAsymmetric(tokenId, RandomUtils.nextBytes(128), cert)); - - } - - @Test - public void testTokenSerialization() throws GeneralSecurityException, - IOException { - String keystore = new File(KEYSTORES_DIR, "keystore.jks") - .getAbsolutePath(); - String truststore = new File(KEYSTORES_DIR, "truststore.jks") - .getAbsolutePath(); - String trustPassword = "trustPass"; - String keyStorePassword = "keyStorePass"; - String keyPassword = "keyPass"; - long maxLength = 128L; - - KeyStoreTestUtil.createKeyStore(keystore, keyStorePassword, keyPassword, - "OzoneMaster", keyPair.getPrivate(), cert); - - // Create trust store and put the certificate in the trust store - Map certs = Collections.singletonMap("server", - cert); - KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs); - - // Sign the OzoneMaster Token with Ozone Master private key - PrivateKey privateKey = keyPair.getPrivate(); - OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier( - "testUser", "84940", - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), maxLength); - byte[] signedToken = signTokenAsymmetric(tokenId, privateKey); - - - Token token = new Token(tokenId.getBytes(), - signedToken, tokenId.getKind(), new Text("host:port")); - - String encodeToUrlString = token.encodeToUrlString(); - - TokendecodedToken = new Token(); - decodedToken.decodeFromUrlString(encodeToUrlString); - - OzoneBlockTokenIdentifier decodedTokenId = new OzoneBlockTokenIdentifier(); - decodedTokenId.readFields(new DataInputStream( - new ByteArrayInputStream(decodedToken.getIdentifier()))); - - Assert.assertEquals(decodedTokenId, tokenId); - Assert.assertEquals(decodedTokenId.getMaxLength(), maxLength); - - // Verify a decoded signed Token with public key(certificate) - boolean isValidToken = verifyTokenAsymmetric(decodedTokenId, decodedToken - .getPassword(), cert); - LOG.info("{} is {}", tokenId, isValidToken ? "valid." : "invalid."); - } - - - public byte[] signTokenAsymmetric(OzoneBlockTokenIdentifier tokenId, - PrivateKey privateKey) throws NoSuchAlgorithmException, - InvalidKeyException, SignatureException { - Signature rsaSignature = Signature.getInstance("SHA256withRSA"); - rsaSignature.initSign(privateKey); - rsaSignature.update(tokenId.getBytes()); - byte[] signature = rsaSignature.sign(); - return signature; - } - - public boolean verifyTokenAsymmetric(OzoneBlockTokenIdentifier tokenId, - byte[] signature, Certificate certificate) throws InvalidKeyException, - NoSuchAlgorithmException, SignatureException { - Signature rsaSignature = Signature.getInstance("SHA256withRSA"); - rsaSignature.initVerify(certificate); - rsaSignature.update(tokenId.getBytes()); - boolean isValid = rsaSignature.verify(signature); - return isValid; - } - - private byte[] signTokenSymmetric(OzoneBlockTokenIdentifier identifier, - Mac mac, SecretKey key) { - try { - mac.init(key); - } catch (InvalidKeyException ike) { - throw new IllegalArgumentException("Invalid key to HMAC computation", - ike); - } - return mac.doFinal(identifier.getBytes()); - } - - OzoneBlockTokenIdentifier generateTestToken() { - return new OzoneBlockTokenIdentifier(RandomStringUtils.randomAlphabetic(6), - RandomStringUtils.randomAlphabetic(5), - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), 1024768L); - } - - @Test - public void testAsymmetricTokenPerf() throws NoSuchAlgorithmException, - CertificateEncodingException, NoSuchProviderException, - InvalidKeyException, SignatureException { - final int testTokenCount = 1000; - List tokenIds = new ArrayList<>(); - List tokenPasswordAsym = new ArrayList<>(); - for (int i = 0; i < testTokenCount; i++) { - tokenIds.add(generateTestToken()); - } - - KeyPair kp = KeyStoreTestUtil.generateKeyPair("RSA"); - - // Create Ozone Master certificate (SCM CA issued cert) and key store - X509Certificate certificate; - certificate = KeyStoreTestUtil.generateCertificate("CN=OzoneMaster", - kp, 30, "SHA256withRSA"); - - long startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - tokenPasswordAsym.add( - signTokenAsymmetric(tokenIds.get(i), kp.getPrivate())); - } - long duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token sign time with HmacSha256(RSA/1024 key) is {} ns", - duration / testTokenCount); - - startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - verifyTokenAsymmetric(tokenIds.get(i), tokenPasswordAsym.get(i), - certificate); - } - duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token verify time with HmacSha256(RSA/1024 key) " - + "is {} ns", duration / testTokenCount); - } - - @Test - public void testSymmetricTokenPerf() { - String hmacSHA1 = "HmacSHA1"; - String hmacSHA256 = "HmacSHA256"; - - testSymmetricTokenPerfHelper(hmacSHA1, 64); - testSymmetricTokenPerfHelper(hmacSHA256, 1024); - } - - public void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) { - final int testTokenCount = 1000; - List tokenIds = new ArrayList<>(); - List tokenPasswordSym = new ArrayList<>(); - for (int i = 0; i < testTokenCount; i++) { - tokenIds.add(generateTestToken()); - } - - KeyGenerator keyGen; - try { - keyGen = KeyGenerator.getInstance(hmacAlgorithm); - keyGen.init(keyLen); - } catch (NoSuchAlgorithmException nsa) { - throw new IllegalArgumentException("Can't find " + hmacAlgorithm + - " algorithm."); - } - - Mac mac; - try { - mac = Mac.getInstance(hmacAlgorithm); - } catch (NoSuchAlgorithmException nsa) { - throw new IllegalArgumentException("Can't find " + hmacAlgorithm + - " algorithm."); - } - - SecretKey secretKey = keyGen.generateKey(); - - long startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - tokenPasswordSym.add( - signTokenSymmetric(tokenIds.get(i), mac, secretKey)); - } - long duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token sign time with {}({} symmetric key) is {} ns", - hmacAlgorithm, keyLen, duration / testTokenCount); - } - - // TODO: verify certificate with a trust store - public boolean verifyCert(Certificate certificate) { - return true; - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/package-info.java deleted file mode 100644 index d0566557b54..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains the block token related classes. - */ -package org.apache.hadoop.hdds.security.token; diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockApprover.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockApprover.java deleted file mode 100644 index a8fa0af7b51..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockApprover.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.PKIProfile; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; - -import java.io.IOException; -import java.security.PrivateKey; -import java.util.Date; -import java.util.concurrent.CompletableFuture; - -/** - * A test approver class that makes testing easier. - */ -public class MockApprover extends BaseApprover { - - public MockApprover(PKIProfile pkiProfile, SecurityConfig config) { - super(pkiProfile, config); - } - - @Override - public CompletableFuture - inspectCSR(PKCS10CertificationRequest csr) { - return super.inspectCSR(csr); - } - - @Override - public X509CertificateHolder sign(SecurityConfig config, PrivateKey caPrivate, - X509CertificateHolder caCertificate, - Date validFrom, Date validTill, - PKCS10CertificationRequest request, - String scmId, String clusterId) - throws IOException, OperatorCreationException { - return null; - } - -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java deleted file mode 100644 index 1dea512e4c2..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import java.io.IOException; -import java.math.BigInteger; -import java.security.cert.X509Certificate; - -/** - * - */ -public class MockCAStore implements CertificateStore { - @Override - public void storeValidCertificate(BigInteger serialID, - X509Certificate certificate) - throws IOException { - - } - - @Override - public void revokeCertificate(BigInteger serialID) throws IOException { - - } - - @Override - public void removeExpiredCertificate(BigInteger serialID) - throws IOException { - - } - - @Override - public X509Certificate getCertificateByID(BigInteger serialID, - CertType certType) - throws IOException { - return null; - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java deleted file mode 100644 index 64eb4bafd68..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.test.LambdaTestUtils; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.IOException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.cert.CertificateException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.function.Consumer; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.junit.Assert.*; - -/** - * Tests the Default CA Server. - */ -public class TestDefaultCAServer { - private static OzoneConfiguration conf = new OzoneConfiguration(); - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - private MockCAStore caStore; - - @Before - public void init() throws IOException { - conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().toString()); - caStore = new MockCAStore(); - } - - @Test - public void testInit() throws SCMSecurityException, CertificateException, - IOException { - SecurityConfig securityConfig = new SecurityConfig(conf); - CertificateServer testCA = new DefaultCAServer("testCA", - RandomStringUtils.randomAlphabetic(4), - RandomStringUtils.randomAlphabetic(4), caStore); - testCA.init(securityConfig, CertificateServer.CAType.SELF_SIGNED_CA); - X509CertificateHolder first = testCA.getCACertificate(); - assertNotNull(first); - //Init is idempotent. - testCA.init(securityConfig, CertificateServer.CAType.SELF_SIGNED_CA); - X509CertificateHolder second = testCA.getCACertificate(); - assertEquals(first, second); - - // we only support Self Signed CA for now. - try { - testCA.init(securityConfig, CertificateServer.CAType.INTERMEDIARY_CA); - fail("code should not reach here, exception should have been thrown."); - } catch (IllegalStateException e) { - // This is a run time exception, hence it is not caught by the junit - // expected Exception. - assertTrue(e.toString().contains("Not implemented")); - } - } - - @Test - public void testMissingCertificate() { - SecurityConfig securityConfig = new SecurityConfig(conf); - CertificateServer testCA = new DefaultCAServer("testCA", - RandomStringUtils.randomAlphabetic(4), - RandomStringUtils.randomAlphabetic(4), caStore); - Consumer caInitializer = - ((DefaultCAServer) testCA).processVerificationStatus( - DefaultCAServer.VerificationStatus.MISSING_CERTIFICATE); - try { - - caInitializer.accept(securityConfig); - fail("code should not reach here, exception should have been thrown."); - } catch (IllegalStateException e) { - // This also is a runtime exception. Hence not caught by junit expected - // exception. - assertTrue(e.toString().contains("Missing Root Certs")); - } - } - - @Test - public void testMissingKey() { - SecurityConfig securityConfig = new SecurityConfig(conf); - CertificateServer testCA = new DefaultCAServer("testCA", - RandomStringUtils.randomAlphabetic(4), - RandomStringUtils.randomAlphabetic(4), caStore); - Consumer caInitializer = - ((DefaultCAServer) testCA).processVerificationStatus( - DefaultCAServer.VerificationStatus.MISSING_KEYS); - try { - - caInitializer.accept(securityConfig); - fail("code should not reach here, exception should have been thrown."); - } catch (IllegalStateException e) { - // This also is a runtime exception. Hence not caught by junit expected - // exception. - assertTrue(e.toString().contains("Missing Keys")); - } - } - - /** - * The most important test of this test suite. This tests that we are able - * to create a Test CA, creates it own self-Signed CA and then issue a - * certificate based on a CSR. - * @throws SCMSecurityException - on ERROR. - * @throws ExecutionException - on ERROR. - * @throws InterruptedException - on ERROR. - * @throws NoSuchProviderException - on ERROR. - * @throws NoSuchAlgorithmException - on ERROR. - */ - @Test - public void testRequestCertificate() throws IOException, - ExecutionException, InterruptedException, - NoSuchProviderException, NoSuchAlgorithmException { - String scmId = RandomStringUtils.randomAlphabetic(4); - String clusterId = RandomStringUtils.randomAlphabetic(4); - KeyPair keyPair = - new HDDSKeyGenerator(conf).generateKey(); - PKCS10CertificationRequest csr = new CertificateSignRequest.Builder() - .addDnsName("hadoop.apache.org") - .addIpAddress("8.8.8.8") - .setCA(false) - .setClusterID(clusterId) - .setScmID(scmId) - .setSubject("Ozone Cluster") - .setConfiguration(conf) - .setKey(keyPair) - .build(); - - // Let us convert this to a string to mimic the common use case. - String csrString = CertificateSignRequest.getEncodedString(csr); - - CertificateServer testCA = new DefaultCAServer("testCA", - clusterId, scmId, caStore); - testCA.init(new SecurityConfig(conf), - CertificateServer.CAType.SELF_SIGNED_CA); - - Future holder = testCA.requestCertificate(csrString, - CertificateApprover.ApprovalType.TESTING_AUTOMATIC); - // Right now our calls are synchronous. Eventually this will have to wait. - assertTrue(holder.isDone()); - assertNotNull(holder.get()); - } - - /** - * Tests that we are able - * to create a Test CA, creates it own self-Signed CA and then issue a - * certificate based on a CSR when scmId and clusterId are not set in - * csr subject. - * @throws SCMSecurityException - on ERROR. - * @throws ExecutionException - on ERROR. - * @throws InterruptedException - on ERROR. - * @throws NoSuchProviderException - on ERROR. - * @throws NoSuchAlgorithmException - on ERROR. - */ - @Test - public void testRequestCertificateWithInvalidSubject() throws IOException, - ExecutionException, InterruptedException, - NoSuchProviderException, NoSuchAlgorithmException { - KeyPair keyPair = - new HDDSKeyGenerator(conf).generateKey(); - PKCS10CertificationRequest csr = new CertificateSignRequest.Builder() - .addDnsName("hadoop.apache.org") - .addIpAddress("8.8.8.8") - .setCA(false) - .setSubject("Ozone Cluster") - .setConfiguration(conf) - .setKey(keyPair) - .build(); - - // Let us convert this to a string to mimic the common use case. - String csrString = CertificateSignRequest.getEncodedString(csr); - - CertificateServer testCA = new DefaultCAServer("testCA", - RandomStringUtils.randomAlphabetic(4), - RandomStringUtils.randomAlphabetic(4), caStore); - testCA.init(new SecurityConfig(conf), - CertificateServer.CAType.SELF_SIGNED_CA); - - Future holder = testCA.requestCertificate(csrString, - CertificateApprover.ApprovalType.TESTING_AUTOMATIC); - // Right now our calls are synchronous. Eventually this will have to wait. - assertTrue(holder.isDone()); - assertNotNull(holder.get()); - } - - @Test - public void testRequestCertificateWithInvalidSubjectFailure() - throws Exception { - KeyPair keyPair = - new HDDSKeyGenerator(conf).generateKey(); - PKCS10CertificationRequest csr = new CertificateSignRequest.Builder() - .addDnsName("hadoop.apache.org") - .addIpAddress("8.8.8.8") - .setCA(false) - .setScmID("wrong one") - .setClusterID("223432rf") - .setSubject("Ozone Cluster") - .setConfiguration(conf) - .setKey(keyPair) - .build(); - - // Let us convert this to a string to mimic the common use case. - String csrString = CertificateSignRequest.getEncodedString(csr); - - CertificateServer testCA = new DefaultCAServer("testCA", - RandomStringUtils.randomAlphabetic(4), - RandomStringUtils.randomAlphabetic(4), caStore); - testCA.init(new SecurityConfig(conf), - CertificateServer.CAType.SELF_SIGNED_CA); - - LambdaTestUtils.intercept(ExecutionException.class, "ScmId and " + - "ClusterId in CSR subject are incorrect", - () -> { - Future holder = - testCA.requestCertificate(csrString, - CertificateApprover.ApprovalType.TESTING_AUTOMATIC); - holder.isDone(); - holder.get(); - }); - } - -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultProfile.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultProfile.java deleted file mode 100644 index f892b8d05df..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultProfile.java +++ /dev/null @@ -1,364 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.authority; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.PKIProfiles.DefaultProfile; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers; -import org.bouncycastle.asn1.x500.X500Name; -import org.bouncycastle.asn1.x500.X500NameBuilder; -import org.bouncycastle.asn1.x500.style.BCStyle; -import org.bouncycastle.asn1.x509.ExtendedKeyUsage; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.asn1.x509.Extensions; -import org.bouncycastle.asn1.x509.ExtensionsGenerator; -import org.bouncycastle.asn1.x509.GeneralName; -import org.bouncycastle.asn1.x509.GeneralNames; -import org.bouncycastle.asn1.x509.KeyPurposeId; -import org.bouncycastle.operator.ContentSigner; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.bouncycastle.pkcs.PKCS10CertificationRequestBuilder; -import org.bouncycastle.pkcs.PKCSException; -import org.bouncycastle.pkcs.jcajce.JcaPKCS10CertificationRequestBuilder; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.IOException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -/** - * Tests for the default PKI Profile. - */ -public class TestDefaultProfile { - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - - private OzoneConfiguration configuration; - private SecurityConfig securityConfig; - private DefaultProfile defaultProfile; - private MockApprover testApprover; - private KeyPair keyPair; - - @Before - public void setUp() throws Exception { - configuration = new OzoneConfiguration(); - configuration.set(OZONE_METADATA_DIRS, - temporaryFolder.newFolder().toString()); - securityConfig = new SecurityConfig(configuration); - defaultProfile = new DefaultProfile(); - testApprover = new MockApprover(defaultProfile, - securityConfig); - keyPair = new HDDSKeyGenerator(securityConfig).generateKey(); - } - - /** - * Tests the General Names that we support. The default profile supports only - * two names right now. - */ - @Test - public void testisSupportedGeneralName() { -// Positive tests - assertTrue(defaultProfile.isSupportedGeneralName(GeneralName.iPAddress)); - assertTrue(defaultProfile.isSupportedGeneralName(GeneralName.dNSName)); -// Negative Tests - assertFalse(defaultProfile.isSupportedGeneralName( - GeneralName.directoryName)); - assertFalse(defaultProfile.isSupportedGeneralName(GeneralName.rfc822Name)); - assertFalse(defaultProfile.isSupportedGeneralName(GeneralName.otherName)); - } - - /** - * Test valid keys are validated correctly. - * - * @throws SCMSecurityException - on Error. - * @throws PKCSException - on Error. - * @throws OperatorCreationException - on Error. - */ - @Test - public void testVerifyCertificate() throws SCMSecurityException, - PKCSException, OperatorCreationException { - PKCS10CertificationRequest csr = new CertificateSignRequest.Builder() - .addDnsName("hadoop.apache.org") - .addIpAddress("8.8.8.8") - .setCA(false) - .setClusterID("ClusterID") - .setScmID("SCMID") - .setSubject("Ozone Cluster") - .setConfiguration(configuration) - .setKey(keyPair) - .build(); - assertTrue(testApprover.verifyPkcs10Request(csr)); - } - - - - - /** - * Test invalid keys fail in the validation. - * - * @throws SCMSecurityException - on Error. - * @throws PKCSException - on Error. - * @throws OperatorCreationException - on Error. - * @throws NoSuchProviderException - on Error. - * @throws NoSuchAlgorithmException - on Error. - */ - @Test - public void testVerifyCertificateInvalidKeys() throws SCMSecurityException, - PKCSException, OperatorCreationException, - NoSuchProviderException, NoSuchAlgorithmException { - KeyPair newKeyPair = new HDDSKeyGenerator(securityConfig).generateKey(); - KeyPair wrongKey = new KeyPair(keyPair.getPublic(), - newKeyPair.getPrivate()); - PKCS10CertificationRequest csr = new CertificateSignRequest.Builder() - .addDnsName("hadoop.apache.org") - .addIpAddress("8.8.8.8") - .setCA(false) - .setClusterID("ClusterID") - .setScmID("SCMID") - .setSubject("Ozone Cluster") - .setConfiguration(configuration) - .setKey(wrongKey) - .build(); - // Signature verification should fail here, since the public/private key - // does not match. - assertFalse(testApprover.verifyPkcs10Request(csr)); - } - - /** - * Tests that normal valid extensions work with the default profile. - * - * @throws SCMSecurityException - on Error. - * @throws PKCSException - on Error. - * @throws OperatorCreationException - on Error. - */ - @Test - public void testExtensions() throws SCMSecurityException { - PKCS10CertificationRequest csr = new CertificateSignRequest.Builder() - .addDnsName("hadoop.apache.org") - .addIpAddress("192.10.234.6") - .setCA(false) - .setClusterID("ClusterID") - .setScmID("SCMID") - .setSubject("Ozone Cluster") - .setConfiguration(configuration) - .setKey(keyPair) - .build(); - assertTrue(testApprover.verfiyExtensions(csr)); - } - - /** - * Tests that invalid extensions cause a failure in validation. We will fail - * if CA extension is enabled. - * - * @throws SCMSecurityException - on Error. - */ - - @Test - public void testInvalidExtensionsWithCA() throws SCMSecurityException { - PKCS10CertificationRequest csr = new CertificateSignRequest.Builder() - .addDnsName("hadoop.apache.org") - .addIpAddress("192.10.234.6") - .setCA(true) - .setClusterID("ClusterID") - .setScmID("SCMID") - .setSubject("Ozone Cluster") - .setConfiguration(configuration) - .setKey(keyPair) - .build(); - assertFalse(testApprover.verfiyExtensions(csr)); - } - - /** - * Tests that invalid extensions cause a failure in validation. We will fail - * if rfc222 type names are added, we also add the extension as both - * critical and non-critical fashion to verify that the we catch both cases. - * - * @throws SCMSecurityException - on Error. - */ - - @Test - public void testInvalidExtensionsWithEmail() - throws IOException, OperatorCreationException { - Extensions emailExtension = getSANExtension(GeneralName.rfc822Name, - "bilbo@apache.org", false); - PKCS10CertificationRequest csr = getInvalidCSR(keyPair, emailExtension); - assertFalse(testApprover.verfiyExtensions(csr)); - - emailExtension = getSANExtension(GeneralName.rfc822Name, "bilbo" + - "@apache.org", true); - csr = getInvalidCSR(keyPair, emailExtension); - assertFalse(testApprover.verfiyExtensions(csr)); - - } - - /** - * Same test for URI. - * @throws IOException - On Error. - * @throws OperatorCreationException- on Error. - */ - @Test - public void testInvalidExtensionsWithURI() throws IOException, - OperatorCreationException { - Extensions oExtension = getSANExtension( - GeneralName.uniformResourceIdentifier, "s3g.ozone.org", false); - PKCS10CertificationRequest csr = getInvalidCSR(keyPair, oExtension); - assertFalse(testApprover.verfiyExtensions(csr)); - oExtension = getSANExtension(GeneralName.uniformResourceIdentifier, - "s3g.ozone.org", false); - csr = getInvalidCSR(keyPair, oExtension); - assertFalse(testApprover.verfiyExtensions(csr)); - } - - /** - * Assert that if DNS is marked critical our PKI profile will reject it. - * @throws IOException - on Error. - * @throws OperatorCreationException - on Error. - */ - @Test - public void testInvalidExtensionsWithCriticalDNS() throws IOException, - OperatorCreationException { - Extensions dnsExtension = getSANExtension(GeneralName.dNSName, - "ozone.hadoop.org", - true); - PKCS10CertificationRequest csr = getInvalidCSR(keyPair, dnsExtension); - assertFalse(testApprover.verfiyExtensions(csr)); - // This tests should pass, hence the assertTrue - dnsExtension = getSANExtension(GeneralName.dNSName, - "ozone.hadoop.org", - false); - csr = getInvalidCSR(keyPair, dnsExtension); - assertTrue(testApprover.verfiyExtensions(csr)); - } - - - /** - * Verify that valid Extended Key usage works as expected. - * @throws IOException - on Error. - * @throws OperatorCreationException - on Error. - */ - @Test - public void testValidExtendedKeyUsage() throws IOException, - OperatorCreationException { - Extensions extendedExtension = - getKeyUsageExtension(KeyPurposeId.id_kp_clientAuth, false); - PKCS10CertificationRequest csr = getInvalidCSR(keyPair, extendedExtension); - assertTrue(testApprover.verfiyExtensions(csr)); - - extendedExtension = getKeyUsageExtension(KeyPurposeId.id_kp_serverAuth, - false); - csr = getInvalidCSR(keyPair, extendedExtension); - assertTrue(testApprover.verfiyExtensions(csr)); - } - - - /** - * Verify that Invalid Extended Key usage works as expected, that is rejected. - * @throws IOException - on Error. - * @throws OperatorCreationException - on Error. - */ - @Test - public void testInValidExtendedKeyUsage() throws IOException, - OperatorCreationException { - Extensions extendedExtension = - getKeyUsageExtension(KeyPurposeId.id_kp_clientAuth, true); - PKCS10CertificationRequest csr = getInvalidCSR(keyPair, extendedExtension); - assertFalse(testApprover.verfiyExtensions(csr)); - - extendedExtension = getKeyUsageExtension(KeyPurposeId.id_kp_OCSPSigning, - false); - csr = getInvalidCSR(keyPair, extendedExtension); - assertFalse(testApprover.verfiyExtensions(csr)); - } - - - - /** - * Generates an CSR with the extension specified. - * This function is used to get an Invalid CSR and test that PKI profile - * rejects these invalid extensions, Hence the function name, by itself it - * is a well formed CSR, but our PKI profile will treat it as invalid CSR. - * - * @param kPair - Key Pair. - * @return CSR - PKCS10CertificationRequest - * @throws OperatorCreationException - on Error. - */ - private PKCS10CertificationRequest getInvalidCSR(KeyPair kPair, - Extensions extensions) throws OperatorCreationException { - X500NameBuilder namebuilder = - new X500NameBuilder(X500Name.getDefaultStyle()); - namebuilder.addRDN(BCStyle.CN, "invalidCert"); - PKCS10CertificationRequestBuilder p10Builder = - new JcaPKCS10CertificationRequestBuilder(namebuilder.build(), - keyPair.getPublic()); - p10Builder.addAttribute(PKCSObjectIdentifiers.pkcs_9_at_extensionRequest, - extensions); - JcaContentSignerBuilder csBuilder = - new JcaContentSignerBuilder(this.securityConfig.getSignatureAlgo()); - ContentSigner signer = csBuilder.build(keyPair.getPrivate()); - return p10Builder.build(signer); - } - - /** - * Generate an Extension with rfc822Name. - * @param extensionCode - Extension Code. - * @param value - email to be added to the certificate - * @param critical - boolean value that marks the extension as critical. - * @return - An Extension list with email address. - * @throws IOException - */ - private Extensions getSANExtension(int extensionCode, String value, - boolean critical) throws IOException { - GeneralName extn = new GeneralName(extensionCode, - value); - ExtensionsGenerator extensionsGenerator = new ExtensionsGenerator(); - extensionsGenerator.addExtension(Extension.subjectAlternativeName, critical, - new GeneralNames(extn)); - return extensionsGenerator.generate(); - } - - /** - * Returns a extension with Extended Key usage. - * @param purposeId - Usage that we want to encode. - * @param critical - makes the extension critical. - * @return Extensions. - */ - private Extensions getKeyUsageExtension(KeyPurposeId purposeId, - boolean critical) throws IOException { - ExtendedKeyUsage extendedKeyUsage = new ExtendedKeyUsage(purposeId); - ExtensionsGenerator extensionsGenerator = new ExtensionsGenerator(); - extensionsGenerator.addExtension( - Extension.extendedKeyUsage, critical, extendedKeyUsage); - return extensionsGenerator.generate(); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java deleted file mode 100644 index 1d20a78dcc4..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Tests for Default CA. - */ -package org.apache.hadoop.hdds.security.x509.certificate.authority; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java deleted file mode 100644 index dcd9898cbee..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.certificate.client; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.bouncycastle.cert.X509CertificateHolder; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameter; - -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.KeyPair; -import java.security.cert.X509Certificate; -import java.util.Arrays; -import java.util.Collection; -import java.util.UUID; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.GETCERT; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.RECOVER; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.SUCCESS; -import static org.junit.Assert.assertTrue; - -/** - * Test class for {@link DefaultCertificateClient}. - */ -@RunWith(Parameterized.class) -@SuppressWarnings("visibilitymodifier") -public class TestCertificateClientInit { - - private KeyPair keyPair; - private String certSerialId = "3284792342234"; - private CertificateClient dnCertificateClient; - private CertificateClient omCertificateClient; - private HDDSKeyGenerator keyGenerator; - private Path metaDirPath; - private SecurityConfig securityConfig; - private KeyCodec dnKeyCodec; - private KeyCodec omKeyCodec; - private X509Certificate x509Certificate; - private final static String DN_COMPONENT = DNCertificateClient.COMPONENT_NAME; - private final static String OM_COMPONENT = OMCertificateClient.COMPONENT_NAME; - - @Parameter - public boolean pvtKeyPresent; - @Parameter(1) - public boolean pubKeyPresent; - @Parameter(2) - public boolean certPresent; - @Parameter(3) - public InitResponse expectedResult; - - @Parameterized.Parameters - public static Collection initData() { - return Arrays.asList(new Object[][]{ - {false, false, false, GETCERT}, - {false, false, true, FAILURE}, - {false, true, false, FAILURE}, - {true, false, false, FAILURE}, - {false, true, true, FAILURE}, - {true, true, false, GETCERT}, - {true, false, true, SUCCESS}, - {true, true, true, SUCCESS}}); - } - - @Before - public void setUp() throws Exception { - OzoneConfiguration config = new OzoneConfiguration(); - final String path = GenericTestUtils - .getTempPath(UUID.randomUUID().toString()); - metaDirPath = Paths.get(path, "test"); - config.set(HDDS_METADATA_DIR_NAME, metaDirPath.toString()); - securityConfig = new SecurityConfig(config); - keyGenerator = new HDDSKeyGenerator(securityConfig); - keyPair = keyGenerator.generateKey(); - x509Certificate = getX509Certificate(); - certSerialId = x509Certificate.getSerialNumber().toString(); - dnCertificateClient = new DNCertificateClient(securityConfig, - certSerialId); - omCertificateClient = new OMCertificateClient(securityConfig, - certSerialId); - dnKeyCodec = new KeyCodec(securityConfig, DN_COMPONENT); - omKeyCodec = new KeyCodec(securityConfig, OM_COMPONENT); - - Files.createDirectories(securityConfig.getKeyLocation(DN_COMPONENT)); - Files.createDirectories(securityConfig.getKeyLocation(OM_COMPONENT)); - } - - @After - public void tearDown() { - dnCertificateClient = null; - omCertificateClient = null; - FileUtils.deleteQuietly(metaDirPath.toFile()); - } - - - @Test - public void testInitDatanode() throws Exception { - if (pvtKeyPresent) { - dnKeyCodec.writePrivateKey(keyPair.getPrivate()); - } else { - FileUtils.deleteQuietly(Paths.get( - securityConfig.getKeyLocation(DN_COMPONENT).toString(), - securityConfig.getPrivateKeyFileName()).toFile()); - } - - if (pubKeyPresent) { - if (dnCertificateClient.getPublicKey() == null) { - dnKeyCodec.writePublicKey(keyPair.getPublic()); - } - } else { - FileUtils.deleteQuietly( - Paths.get(securityConfig.getKeyLocation(DN_COMPONENT).toString(), - securityConfig.getPublicKeyFileName()).toFile()); - } - - if (certPresent) { - CertificateCodec codec = new CertificateCodec(securityConfig, - DN_COMPONENT); - codec.writeCertificate(new X509CertificateHolder( - x509Certificate.getEncoded())); - } else { - FileUtils.deleteQuietly(Paths.get( - securityConfig.getKeyLocation(DN_COMPONENT).toString(), - securityConfig.getCertificateFileName()).toFile()); - } - InitResponse response = dnCertificateClient.init(); - - assertTrue(response.equals(expectedResult)); - - if (!response.equals(FAILURE)) { - assertTrue(OzoneSecurityUtil.checkIfFileExist( - securityConfig.getKeyLocation(DN_COMPONENT), - securityConfig.getPrivateKeyFileName())); - assertTrue(OzoneSecurityUtil.checkIfFileExist( - securityConfig.getKeyLocation(DN_COMPONENT), - securityConfig.getPublicKeyFileName())); - } - } - - @Test - public void testInitOzoneManager() throws Exception { - if (pvtKeyPresent) { - omKeyCodec.writePrivateKey(keyPair.getPrivate()); - } else { - FileUtils.deleteQuietly(Paths.get( - securityConfig.getKeyLocation(OM_COMPONENT).toString(), - securityConfig.getPrivateKeyFileName()).toFile()); - } - - if (pubKeyPresent) { - if (omCertificateClient.getPublicKey() == null) { - omKeyCodec.writePublicKey(keyPair.getPublic()); - } - } else { - FileUtils.deleteQuietly(Paths.get( - securityConfig.getKeyLocation(OM_COMPONENT).toString(), - securityConfig.getPublicKeyFileName()).toFile()); - } - - if (certPresent) { - CertificateCodec codec = new CertificateCodec(securityConfig, - OM_COMPONENT); - codec.writeCertificate(new X509CertificateHolder( - x509Certificate.getEncoded())); - } else { - FileUtils.deleteQuietly(Paths.get( - securityConfig.getKeyLocation(OM_COMPONENT).toString(), - securityConfig.getCertificateFileName()).toFile()); - } - InitResponse response = omCertificateClient.init(); - - if (pvtKeyPresent && pubKeyPresent & !certPresent) { - assertTrue(response.equals(RECOVER)); - } else { - assertTrue(response.equals(expectedResult)); - } - - if (!response.equals(FAILURE)) { - assertTrue(OzoneSecurityUtil.checkIfFileExist( - securityConfig.getKeyLocation(OM_COMPONENT), - securityConfig.getPrivateKeyFileName())); - assertTrue(OzoneSecurityUtil.checkIfFileExist( - securityConfig.getKeyLocation(OM_COMPONENT), - securityConfig.getPublicKeyFileName())); - } - } - - private X509Certificate getX509Certificate() throws Exception { - return KeyStoreTestUtil.generateCertificate( - "CN=Test", keyPair, 10, securityConfig.getSignatureAlgo()); - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java deleted file mode 100644 index f389cdb6d22..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java +++ /dev/null @@ -1,480 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.certificate.client; - -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; -import org.bouncycastle.cert.X509CertificateHolder; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.KeyPair; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.Signature; -import java.security.cert.X509Certificate; -import java.util.UUID; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.RandomStringUtils; - - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.LambdaTestUtils; - -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; -import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE; -import static org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec.getPEMEncodedString; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -/** - * Test class for {@link DefaultCertificateClient}. - */ -public class TestDefaultCertificateClient { - - private String certSerialId; - private X509Certificate x509Certificate; - private OMCertificateClient omCertClient; - private DNCertificateClient dnCertClient; - private HDDSKeyGenerator keyGenerator; - private Path omMetaDirPath; - private Path dnMetaDirPath; - private SecurityConfig omSecurityConfig; - private SecurityConfig dnSecurityConfig; - private final static String UTF = "UTF-8"; - private final static String DN_COMPONENT = DNCertificateClient.COMPONENT_NAME; - private final static String OM_COMPONENT = OMCertificateClient.COMPONENT_NAME; - private KeyCodec omKeyCodec; - private KeyCodec dnKeyCodec; - - @Before - public void setUp() throws Exception { - OzoneConfiguration config = new OzoneConfiguration(); - config.setStrings(OZONE_SCM_NAMES, "localhost"); - config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 2); - final String omPath = GenericTestUtils - .getTempPath(UUID.randomUUID().toString()); - final String dnPath = GenericTestUtils - .getTempPath(UUID.randomUUID().toString()); - - omMetaDirPath = Paths.get(omPath, "test"); - dnMetaDirPath = Paths.get(dnPath, "test"); - - config.set(HDDS_METADATA_DIR_NAME, omMetaDirPath.toString()); - omSecurityConfig = new SecurityConfig(config); - config.set(HDDS_METADATA_DIR_NAME, dnMetaDirPath.toString()); - dnSecurityConfig = new SecurityConfig(config); - - - keyGenerator = new HDDSKeyGenerator(omSecurityConfig); - omKeyCodec = new KeyCodec(omSecurityConfig, OM_COMPONENT); - dnKeyCodec = new KeyCodec(dnSecurityConfig, DN_COMPONENT); - - Files.createDirectories(omSecurityConfig.getKeyLocation(OM_COMPONENT)); - Files.createDirectories(dnSecurityConfig.getKeyLocation(DN_COMPONENT)); - x509Certificate = generateX509Cert(null); - certSerialId = x509Certificate.getSerialNumber().toString(); - getCertClient(); - } - - private void getCertClient() { - omCertClient = new OMCertificateClient(omSecurityConfig, certSerialId); - dnCertClient = new DNCertificateClient(dnSecurityConfig, certSerialId); - } - - @After - public void tearDown() { - omCertClient = null; - dnCertClient = null; - FileUtils.deleteQuietly(omMetaDirPath.toFile()); - FileUtils.deleteQuietly(dnMetaDirPath.toFile()); - } - - /** - * Tests: 1. getPrivateKey 2. getPublicKey 3. storePrivateKey 4. - * storePublicKey - */ - @Test - public void testKeyOperations() throws Exception { - cleanupOldKeyPair(); - PrivateKey pvtKey = omCertClient.getPrivateKey(); - PublicKey publicKey = omCertClient.getPublicKey(); - assertNull(publicKey); - assertNull(pvtKey); - - KeyPair keyPair = generateKeyPairFiles(); - pvtKey = omCertClient.getPrivateKey(); - assertNotNull(pvtKey); - assertEquals(pvtKey, keyPair.getPrivate()); - - publicKey = dnCertClient.getPublicKey(); - assertNotNull(publicKey); - assertEquals(publicKey, keyPair.getPublic()); - } - - private KeyPair generateKeyPairFiles() throws Exception { - cleanupOldKeyPair(); - KeyPair keyPair = keyGenerator.generateKey(); - omKeyCodec.writePrivateKey(keyPair.getPrivate()); - omKeyCodec.writePublicKey(keyPair.getPublic()); - - dnKeyCodec.writePrivateKey(keyPair.getPrivate()); - dnKeyCodec.writePublicKey(keyPair.getPublic()); - return keyPair; - } - - private void cleanupOldKeyPair() { - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPrivateKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPublicKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), - dnSecurityConfig.getPrivateKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), - dnSecurityConfig.getPublicKeyFileName()).toFile()); - } - - /** - * Tests: 1. storeCertificate 2. getCertificate 3. verifyCertificate - */ - @Test - public void testCertificateOps() throws Exception { - X509Certificate cert = omCertClient.getCertificate(); - assertNull(cert); - omCertClient.storeCertificate(getPEMEncodedString(x509Certificate), - true); - - cert = omCertClient.getCertificate( - x509Certificate.getSerialNumber().toString()); - assertNotNull(cert); - assertTrue(cert.getEncoded().length > 0); - assertEquals(cert, x509Certificate); - - // TODO: test verifyCertificate once implemented. - } - - private X509Certificate generateX509Cert(KeyPair keyPair) throws Exception { - if (keyPair == null) { - keyPair = generateKeyPairFiles(); - } - return KeyStoreTestUtil.generateCertificate("CN=Test", keyPair, 30, - omSecurityConfig.getSignatureAlgo()); - } - - @Test - public void testSignDataStream() throws Exception { - String data = RandomStringUtils.random(100, UTF); - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPrivateKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPublicKeyFileName()).toFile()); - - // Expect error when there is no private key to sign. - LambdaTestUtils.intercept(IOException.class, "Error while " + - "signing the stream", - () -> omCertClient.signDataStream(IOUtils.toInputStream(data, - UTF))); - - generateKeyPairFiles(); - byte[] sign = omCertClient.signDataStream(IOUtils.toInputStream(data, - UTF)); - validateHash(sign, data.getBytes()); - } - - /** - * Validate hash using public key of KeyPair. - */ - private void validateHash(byte[] hash, byte[] data) - throws Exception { - Signature rsaSignature = - Signature.getInstance(omSecurityConfig.getSignatureAlgo(), - omSecurityConfig.getProvider()); - rsaSignature.initVerify(omCertClient.getPublicKey()); - rsaSignature.update(data); - Assert.assertTrue(rsaSignature.verify(hash)); - } - - /** - * Tests: 1. verifySignature - */ - @Test - public void verifySignatureStream() throws Exception { - String data = RandomStringUtils.random(500, UTF); - byte[] sign = omCertClient.signDataStream(IOUtils.toInputStream(data, - UTF)); - - // Positive tests. - assertTrue(omCertClient.verifySignature(data.getBytes(), sign, - x509Certificate)); - assertTrue(omCertClient.verifySignature(IOUtils.toInputStream(data, UTF), - sign, x509Certificate)); - - // Negative tests. - assertFalse(omCertClient.verifySignature(data.getBytes(), - "abc".getBytes(), x509Certificate)); - assertFalse(omCertClient.verifySignature(IOUtils.toInputStream(data, - UTF), "abc".getBytes(), x509Certificate)); - - } - - /** - * Tests: 1. verifySignature - */ - @Test - public void verifySignatureDataArray() throws Exception { - String data = RandomStringUtils.random(500, UTF); - byte[] sign = omCertClient.signData(data.getBytes()); - - // Positive tests. - assertTrue(omCertClient.verifySignature(data.getBytes(), sign, - x509Certificate)); - assertTrue(omCertClient.verifySignature(IOUtils.toInputStream(data, UTF), - sign, x509Certificate)); - - // Negative tests. - assertFalse(omCertClient.verifySignature(data.getBytes(), - "abc".getBytes(), x509Certificate)); - assertFalse(omCertClient.verifySignature(IOUtils.toInputStream(data, - UTF), "abc".getBytes(), x509Certificate)); - - } - - @Test - public void queryCertificate() throws Exception { - LambdaTestUtils.intercept(UnsupportedOperationException.class, - "Operation not supported", - () -> omCertClient.queryCertificate("")); - } - - @Test - public void testCertificateLoadingOnInit() throws Exception { - KeyPair keyPair = keyGenerator.generateKey(); - X509Certificate cert1 = generateX509Cert(keyPair); - X509Certificate cert2 = generateX509Cert(keyPair); - X509Certificate cert3 = generateX509Cert(keyPair); - - Path certPath = dnSecurityConfig.getCertificateLocation(DN_COMPONENT); - CertificateCodec codec = new CertificateCodec(dnSecurityConfig, - DN_COMPONENT); - - // Certificate not found. - LambdaTestUtils.intercept(CertificateException.class, "Error while" + - " getting certificate", - () -> dnCertClient.getCertificate(cert1.getSerialNumber() - .toString())); - LambdaTestUtils.intercept(CertificateException.class, "Error while" + - " getting certificate", - () -> dnCertClient.getCertificate(cert2.getSerialNumber() - .toString())); - LambdaTestUtils.intercept(CertificateException.class, "Error while" + - " getting certificate", - () -> dnCertClient.getCertificate(cert3.getSerialNumber() - .toString())); - codec.writeCertificate(certPath, "1.crt", - getPEMEncodedString(cert1), true); - codec.writeCertificate(certPath, "2.crt", - getPEMEncodedString(cert2), true); - codec.writeCertificate(certPath, "3.crt", - getPEMEncodedString(cert3), true); - - // Re instantiate DN client which will load certificates from filesystem. - dnCertClient = new DNCertificateClient(dnSecurityConfig, certSerialId); - - assertNotNull(dnCertClient.getCertificate(cert1.getSerialNumber() - .toString())); - assertNotNull(dnCertClient.getCertificate(cert2.getSerialNumber() - .toString())); - assertNotNull(dnCertClient.getCertificate(cert3.getSerialNumber() - .toString())); - - } - - @Test - public void testStoreCertificate() throws Exception { - KeyPair keyPair = keyGenerator.generateKey(); - X509Certificate cert1 = generateX509Cert(keyPair); - X509Certificate cert2 = generateX509Cert(keyPair); - X509Certificate cert3 = generateX509Cert(keyPair); - - dnCertClient.storeCertificate(getPEMEncodedString(cert1), true); - dnCertClient.storeCertificate(getPEMEncodedString(cert2), true); - dnCertClient.storeCertificate(getPEMEncodedString(cert3), true); - - assertNotNull(dnCertClient.getCertificate(cert1.getSerialNumber() - .toString())); - assertNotNull(dnCertClient.getCertificate(cert2.getSerialNumber() - .toString())); - assertNotNull(dnCertClient.getCertificate(cert3.getSerialNumber() - .toString())); - } - - @Test - public void testInitCertAndKeypairValidationFailures() throws Exception { - - GenericTestUtils.LogCapturer dnClientLog = GenericTestUtils.LogCapturer - .captureLogs(dnCertClient.getLogger()); - GenericTestUtils.LogCapturer omClientLog = GenericTestUtils.LogCapturer - .captureLogs(omCertClient.getLogger()); - KeyPair keyPair = keyGenerator.generateKey(); - KeyPair keyPair2 = keyGenerator.generateKey(); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - - // Case 1. Expect failure when keypair validation fails. - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPrivateKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPublicKeyFileName()).toFile()); - - - FileUtils.deleteQuietly(Paths.get( - dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), - dnSecurityConfig.getPrivateKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), - dnSecurityConfig.getPublicKeyFileName()).toFile()); - - omKeyCodec.writePrivateKey(keyPair.getPrivate()); - omKeyCodec.writePublicKey(keyPair2.getPublic()); - - dnKeyCodec.writePrivateKey(keyPair.getPrivate()); - dnKeyCodec.writePublicKey(keyPair2.getPublic()); - - - // Check for DN. - assertEquals(dnCertClient.init(), FAILURE); - assertTrue(dnClientLog.getOutput().contains("Keypair validation " + - "failed")); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - - // Check for OM. - assertEquals(omCertClient.init(), FAILURE); - assertTrue(omClientLog.getOutput().contains("Keypair validation " + - "failed")); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - - // Case 2. Expect failure when certificate is generated from different - // private key and keypair validation fails. - getCertClient(); - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getCertificateFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), - dnSecurityConfig.getCertificateFileName()).toFile()); - - CertificateCodec omCertCodec = new CertificateCodec(omSecurityConfig, - OM_COMPONENT); - omCertCodec.writeCertificate(new X509CertificateHolder( - x509Certificate.getEncoded())); - - CertificateCodec dnCertCodec = new CertificateCodec(dnSecurityConfig, - DN_COMPONENT); - dnCertCodec.writeCertificate(new X509CertificateHolder( - x509Certificate.getEncoded())); - // Check for DN. - assertEquals(dnCertClient.init(), FAILURE); - assertTrue(dnClientLog.getOutput().contains("Keypair validation " + - "failed")); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - - // Check for OM. - assertEquals(omCertClient.init(), FAILURE); - assertTrue(omClientLog.getOutput().contains("Keypair validation failed")); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - - // Case 3. Expect failure when certificate is generated from different - // private key and certificate validation fails. - - // Re write the correct public key. - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPublicKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), - dnSecurityConfig.getPublicKeyFileName()).toFile()); - getCertClient(); - omKeyCodec.writePublicKey(keyPair.getPublic()); - dnKeyCodec.writePublicKey(keyPair.getPublic()); - - // Check for DN. - assertEquals(dnCertClient.init(), FAILURE); - assertTrue(dnClientLog.getOutput().contains("Stored certificate is " + - "generated with different")); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - - //Check for OM. - assertEquals(omCertClient.init(), FAILURE); - assertTrue(omClientLog.getOutput().contains("Stored certificate is " + - "generated with different")); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - - // Case 4. Failure when public key recovery fails. - getCertClient(); - FileUtils.deleteQuietly(Paths.get( - omSecurityConfig.getKeyLocation(OM_COMPONENT).toString(), - omSecurityConfig.getPublicKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), - dnSecurityConfig.getPublicKeyFileName()).toFile()); - - // Check for DN. - assertEquals(dnCertClient.init(), FAILURE); - assertTrue(dnClientLog.getOutput().contains("Can't recover public key")); - - // Check for OM. - assertEquals(omCertClient.init(), FAILURE); - assertTrue(omClientLog.getOutput().contains("Can't recover public key")); - dnClientLog.clearOutput(); - omClientLog.clearOutput(); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateCodec.java deleted file mode 100644 index ded52068395..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCertificateCodec.java +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificate.utils; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.bouncycastle.cert.X509CertificateHolder; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.io.IOException; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.time.LocalDate; -import java.time.temporal.ChronoUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -/** - * Tests the Certificate codecs. - */ -public class TestCertificateCodec { - private static OzoneConfiguration conf = new OzoneConfiguration(); - private static final String COMPONENT = "test"; - private SecurityConfig securityConfig; - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - - @Before - public void init() throws IOException { - conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().toString()); - securityConfig = new SecurityConfig(conf); - } - - /** - * This test converts a X509Certificate Holder object to a PEM encoded String, - * then creates a new X509Certificate object to verify that we are able to - * serialize and deserialize correctly. we follow up with converting these - * objects to standard JCA x509Certificate objects. - * - * @throws NoSuchProviderException - on Error. - * @throws NoSuchAlgorithmException - on Error. - * @throws IOException - on Error. - * @throws SCMSecurityException - on Error. - * @throws CertificateException - on Error. - */ - @Test - public void testGetPEMEncodedString() - throws NoSuchProviderException, NoSuchAlgorithmException, - IOException, SCMSecurityException, CertificateException { - HDDSKeyGenerator keyGenerator = - new HDDSKeyGenerator(conf); - X509CertificateHolder cert = - SelfSignedCertificate.newBuilder() - .setSubject(RandomStringUtils.randomAlphabetic(4)) - .setClusterID(RandomStringUtils.randomAlphabetic(4)) - .setScmID(RandomStringUtils.randomAlphabetic(4)) - .setBeginDate(LocalDate.now()) - .setEndDate(LocalDate.now().plus(1, ChronoUnit.DAYS)) - .setConfiguration(keyGenerator.getSecurityConfig() - .getConfiguration()) - .setKey(keyGenerator.generateKey()) - .makeCA() - .build(); - CertificateCodec codec = new CertificateCodec(securityConfig, COMPONENT); - String pemString = codec.getPEMEncodedString(cert); - assertTrue(pemString.startsWith(CertificateCodec.BEGIN_CERT)); - assertTrue(pemString.endsWith(CertificateCodec.END_CERT + "\n")); - - // Read back the certificate and verify that all the comparisons pass. - X509CertificateHolder newCert = - codec.getCertificateHolder(codec.getX509Certificate(pemString)); - assertEquals(cert, newCert); - - // Just make sure we can decode both these classes to Java Std. lIb classes. - X509Certificate firstCert = CertificateCodec.getX509Certificate(cert); - X509Certificate secondCert = CertificateCodec.getX509Certificate(newCert); - assertEquals(firstCert, secondCert); - } - - /** - * tests writing and reading certificates in PEM encoded form. - * - * @throws NoSuchProviderException - on Error. - * @throws NoSuchAlgorithmException - on Error. - * @throws IOException - on Error. - * @throws SCMSecurityException - on Error. - * @throws CertificateException - on Error. - */ - @Test - public void testwriteCertificate() throws NoSuchProviderException, - NoSuchAlgorithmException, IOException, SCMSecurityException, - CertificateException { - HDDSKeyGenerator keyGenerator = - new HDDSKeyGenerator(conf); - X509CertificateHolder cert = - SelfSignedCertificate.newBuilder() - .setSubject(RandomStringUtils.randomAlphabetic(4)) - .setClusterID(RandomStringUtils.randomAlphabetic(4)) - .setScmID(RandomStringUtils.randomAlphabetic(4)) - .setBeginDate(LocalDate.now()) - .setEndDate(LocalDate.now().plus(1, ChronoUnit.DAYS)) - .setConfiguration(keyGenerator.getSecurityConfig() - .getConfiguration()) - .setKey(keyGenerator.generateKey()) - .makeCA() - .build(); - CertificateCodec codec = new CertificateCodec(securityConfig, COMPONENT); - String pemString = codec.getPEMEncodedString(cert); - File basePath = temporaryFolder.newFolder(); - if (!basePath.exists()) { - Assert.assertTrue(basePath.mkdirs()); - } - codec.writeCertificate(basePath.toPath(), "pemcertificate.crt", - pemString, false); - X509CertificateHolder certHolder = - codec.readCertificate(basePath.toPath(), "pemcertificate.crt"); - assertNotNull(certHolder); - assertEquals(cert.getSerialNumber(), certHolder.getSerialNumber()); - } - - /** - * Tests reading and writing certificates in DER form. - * - * @throws IOException - on Error. - * @throws SCMSecurityException - on Error. - * @throws CertificateException - on Error. - * @throws NoSuchProviderException - on Error. - * @throws NoSuchAlgorithmException - on Error. - */ - @Test - public void testwriteCertificateDefault() - throws IOException, SCMSecurityException, CertificateException, - NoSuchProviderException, NoSuchAlgorithmException { - HDDSKeyGenerator keyGenerator = - new HDDSKeyGenerator(conf); - X509CertificateHolder cert = - SelfSignedCertificate.newBuilder() - .setSubject(RandomStringUtils.randomAlphabetic(4)) - .setClusterID(RandomStringUtils.randomAlphabetic(4)) - .setScmID(RandomStringUtils.randomAlphabetic(4)) - .setBeginDate(LocalDate.now()) - .setEndDate(LocalDate.now().plus(1, ChronoUnit.DAYS)) - .setConfiguration(keyGenerator.getSecurityConfig() - .getConfiguration()) - .setKey(keyGenerator.generateKey()) - .makeCA() - .build(); - CertificateCodec codec = new CertificateCodec(securityConfig, COMPONENT); - codec.writeCertificate(cert); - X509CertificateHolder certHolder = codec.readCertificate(); - assertNotNull(certHolder); - assertEquals(cert.getSerialNumber(), certHolder.getSerialNumber()); - } - - /** - * Tests writing to non-default certificate file name. - * - * @throws IOException - on Error. - * @throws SCMSecurityException - on Error. - * @throws NoSuchProviderException - on Error. - * @throws NoSuchAlgorithmException - on Error. - * @throws CertificateException - on Error. - */ - @Test - public void writeCertificate2() throws IOException, SCMSecurityException, - NoSuchProviderException, NoSuchAlgorithmException, CertificateException { - HDDSKeyGenerator keyGenerator = - new HDDSKeyGenerator(conf); - X509CertificateHolder cert = - SelfSignedCertificate.newBuilder() - .setSubject(RandomStringUtils.randomAlphabetic(4)) - .setClusterID(RandomStringUtils.randomAlphabetic(4)) - .setScmID(RandomStringUtils.randomAlphabetic(4)) - .setBeginDate(LocalDate.now()) - .setEndDate(LocalDate.now().plus(1, ChronoUnit.DAYS)) - .setConfiguration(keyGenerator.getSecurityConfig() - .getConfiguration()) - .setKey(keyGenerator.generateKey()) - .makeCA() - .build(); - CertificateCodec codec = - new CertificateCodec(keyGenerator.getSecurityConfig(), "ca"); - codec.writeCertificate(cert, "newcert.crt", false); - // Rewrite with force support - codec.writeCertificate(cert, "newcert.crt", true); - X509CertificateHolder x509CertificateHolder = - codec.readCertificate(codec.getLocation(), "newcert.crt"); - assertNotNull(x509CertificateHolder); - - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java deleted file mode 100644 index 4551f29ee24..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - Tests for Certificate helpers. - */ -package org.apache.hadoop.hdds.security.x509.certificate.utils; - diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java deleted file mode 100644 index 5720d27b161..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.certificates; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil; -import org.bouncycastle.asn1.ASN1Sequence; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.asn1.x509.Extensions; -import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; -import org.bouncycastle.operator.ContentVerifierProvider; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.operator.jcajce.JcaContentVerifierProviderBuilder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.bouncycastle.pkcs.PKCSException; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.IOException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.util.UUID; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; - -/** - * Certificate Signing Request. - */ -public class TestCertificateSignRequest { - - private static OzoneConfiguration conf = new OzoneConfiguration(); - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - private SecurityConfig securityConfig; - - @Before - public void init() throws IOException { - conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().toString()); - securityConfig = new SecurityConfig(conf); - } - - @Test - public void testGenerateCSR() throws NoSuchProviderException, - NoSuchAlgorithmException, SCMSecurityException, - OperatorCreationException, PKCSException { - String clusterID = UUID.randomUUID().toString(); - String scmID = UUID.randomUUID().toString(); - String subject = "DN001"; - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - - CertificateSignRequest.Builder builder = - new CertificateSignRequest.Builder() - .setSubject(subject) - .setScmID(scmID) - .setClusterID(clusterID) - .setKey(keyPair) - .setConfiguration(conf); - PKCS10CertificationRequest csr = builder.build(); - - // Check the Subject Name is in the expected format. - String dnName = String.format(SecurityUtil.getDistinguishedNameFormat(), - subject, scmID, clusterID); - Assert.assertEquals(csr.getSubject().toString(), dnName); - - // Verify the public key info match - byte[] encoded = keyPair.getPublic().getEncoded(); - SubjectPublicKeyInfo subjectPublicKeyInfo = - SubjectPublicKeyInfo.getInstance(ASN1Sequence.getInstance(encoded)); - SubjectPublicKeyInfo csrPublicKeyInfo = csr.getSubjectPublicKeyInfo(); - Assert.assertEquals(csrPublicKeyInfo, subjectPublicKeyInfo); - - // Verify CSR with attribute for extensions - Assert.assertEquals(1, csr.getAttributes().length); - Extensions extensions = SecurityUtil.getPkcs9Extensions(csr); - - // Verify key usage extension - Extension keyUsageExt = extensions.getExtension(Extension.keyUsage); - Assert.assertEquals(true, keyUsageExt.isCritical()); - - - // Verify San extension not set - Assert.assertEquals(null, - extensions.getExtension(Extension.subjectAlternativeName)); - - // Verify signature in CSR - ContentVerifierProvider verifierProvider = - new JcaContentVerifierProviderBuilder().setProvider(securityConfig - .getProvider()).build(csr.getSubjectPublicKeyInfo()); - Assert.assertEquals(true, csr.isSignatureValid(verifierProvider)); - } - - @Test - public void testGenerateCSRwithSan() throws NoSuchProviderException, - NoSuchAlgorithmException, SCMSecurityException, - OperatorCreationException, PKCSException { - String clusterID = UUID.randomUUID().toString(); - String scmID = UUID.randomUUID().toString(); - String subject = "DN001"; - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - - CertificateSignRequest.Builder builder = - new CertificateSignRequest.Builder() - .setSubject(subject) - .setScmID(scmID) - .setClusterID(clusterID) - .setKey(keyPair) - .setConfiguration(conf); - - // Multi-home - builder.addIpAddress("192.168.1.1"); - builder.addIpAddress("192.168.2.1"); - - builder.addDnsName("dn1.abc.com"); - - PKCS10CertificationRequest csr = builder.build(); - - // Check the Subject Name is in the expected format. - String dnName = String.format(SecurityUtil.getDistinguishedNameFormat(), - subject, scmID, clusterID); - Assert.assertEquals(csr.getSubject().toString(), dnName); - - // Verify the public key info match - byte[] encoded = keyPair.getPublic().getEncoded(); - SubjectPublicKeyInfo subjectPublicKeyInfo = - SubjectPublicKeyInfo.getInstance(ASN1Sequence.getInstance(encoded)); - SubjectPublicKeyInfo csrPublicKeyInfo = csr.getSubjectPublicKeyInfo(); - Assert.assertEquals(csrPublicKeyInfo, subjectPublicKeyInfo); - - // Verify CSR with attribute for extensions - Assert.assertEquals(1, csr.getAttributes().length); - Extensions extensions = SecurityUtil.getPkcs9Extensions(csr); - - // Verify key usage extension - Extension sanExt = extensions.getExtension(Extension.keyUsage); - Assert.assertEquals(true, sanExt.isCritical()); - - - // Verify signature in CSR - ContentVerifierProvider verifierProvider = - new JcaContentVerifierProviderBuilder().setProvider(securityConfig - .getProvider()).build(csr.getSubjectPublicKeyInfo()); - Assert.assertEquals(true, csr.isSignatureValid(verifierProvider)); - } - - @Test - public void testGenerateCSRWithInvalidParams() throws NoSuchProviderException, - NoSuchAlgorithmException, SCMSecurityException { - String clusterID = UUID.randomUUID().toString(); - String scmID = UUID.randomUUID().toString(); - String subject = "DN001"; - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - - CertificateSignRequest.Builder builder = - new CertificateSignRequest.Builder() - .setSubject(subject) - .setScmID(scmID) - .setClusterID(clusterID) - .setKey(keyPair) - .setConfiguration(conf); - - try { - builder.setKey(null); - builder.build(); - Assert.fail("Null Key should have failed."); - } catch (NullPointerException | IllegalArgumentException e) { - builder.setKey(keyPair); - } - - // Now try with blank/null Subject. - try { - builder.setSubject(null); - builder.build(); - Assert.fail("Null/Blank Subject should have thrown."); - } catch (IllegalArgumentException e) { - builder.setSubject(subject); - } - - try { - builder.setSubject(""); - builder.build(); - Assert.fail("Null/Blank Subject should have thrown."); - } catch (IllegalArgumentException e) { - builder.setSubject(subject); - } - - // Now try with invalid IP address - try { - builder.addIpAddress("255.255.255.*"); - builder.build(); - Assert.fail("Invalid ip address"); - } catch (IllegalArgumentException e) { - } - - PKCS10CertificationRequest csr = builder.build(); - - // Check the Subject Name is in the expected format. - String dnName = String.format(SecurityUtil.getDistinguishedNameFormat(), - subject, scmID, clusterID); - Assert.assertEquals(csr.getSubject().toString(), dnName); - - // Verify the public key info match - byte[] encoded = keyPair.getPublic().getEncoded(); - SubjectPublicKeyInfo subjectPublicKeyInfo = - SubjectPublicKeyInfo.getInstance(ASN1Sequence.getInstance(encoded)); - SubjectPublicKeyInfo csrPublicKeyInfo = csr.getSubjectPublicKeyInfo(); - Assert.assertEquals(csrPublicKeyInfo, subjectPublicKeyInfo); - - // Verify CSR with attribute for extensions - Assert.assertEquals(1, csr.getAttributes().length); - } - - @Test - public void testCsrSerialization() throws NoSuchProviderException, - NoSuchAlgorithmException, SCMSecurityException, IOException { - String clusterID = UUID.randomUUID().toString(); - String scmID = UUID.randomUUID().toString(); - String subject = "DN001"; - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - - CertificateSignRequest.Builder builder = - new CertificateSignRequest.Builder() - .setSubject(subject) - .setScmID(scmID) - .setClusterID(clusterID) - .setKey(keyPair) - .setConfiguration(conf); - - PKCS10CertificationRequest csr = builder.build(); - byte[] csrBytes = csr.getEncoded(); - - // Verify de-serialized CSR matches with the original CSR - PKCS10CertificationRequest dsCsr = new PKCS10CertificationRequest(csrBytes); - Assert.assertEquals(csr, dsCsr); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java deleted file mode 100644 index 02d00786442..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.certificates; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.bouncycastle.asn1.x509.Extension; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.IOException; -import java.math.BigInteger; -import java.security.InvalidKeyException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.SignatureException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.time.LocalDate; -import java.time.temporal.ChronoUnit; -import java.util.Date; -import java.util.UUID; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; - -/** - * Test Class for Root Certificate generation. - */ -public class TestRootCertificate { - private static OzoneConfiguration conf = new OzoneConfiguration(); - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - private SecurityConfig securityConfig; - - @Before - public void init() throws IOException { - conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().toString()); - securityConfig = new SecurityConfig(conf); - } - - @Test - public void testAllFieldsAreExpected() - throws SCMSecurityException, NoSuchProviderException, - NoSuchAlgorithmException, CertificateException, - SignatureException, InvalidKeyException, IOException { - LocalDate notBefore = LocalDate.now(); - LocalDate notAfter = notBefore.plus(365, ChronoUnit.DAYS); - String clusterID = UUID.randomUUID().toString(); - String scmID = UUID.randomUUID().toString(); - String subject = "testRootCert"; - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - - SelfSignedCertificate.Builder builder = - SelfSignedCertificate.newBuilder() - .setBeginDate(notBefore) - .setEndDate(notAfter) - .setClusterID(clusterID) - .setScmID(scmID) - .setSubject(subject) - .setKey(keyPair) - .setConfiguration(conf); - - X509CertificateHolder certificateHolder = builder.build(); - - //Assert that we indeed have a self signed certificate. - Assert.assertEquals(certificateHolder.getIssuer(), - certificateHolder.getSubject()); - - - // Make sure that NotBefore is before the current Date - Date invalidDate = java.sql.Date.valueOf( - notBefore.minus(1, ChronoUnit.DAYS)); - Assert.assertFalse( - certificateHolder.getNotBefore() - .before(invalidDate)); - - //Make sure the end date is honored. - invalidDate = java.sql.Date.valueOf( - notAfter.plus(1, ChronoUnit.DAYS)); - Assert.assertFalse( - certificateHolder.getNotAfter() - .after(invalidDate)); - - // Check the Subject Name and Issuer Name is in the expected format. - String dnName = String.format(SelfSignedCertificate.getNameFormat(), - subject, scmID, clusterID); - Assert.assertEquals(certificateHolder.getIssuer().toString(), dnName); - Assert.assertEquals(certificateHolder.getSubject().toString(), dnName); - - // We did not ask for this Certificate to be a CertificateServer - // certificate, hence that - // extension should be null. - Assert.assertNull( - certificateHolder.getExtension(Extension.basicConstraints)); - - // Extract the Certificate and verify that certificate matches the public - // key. - X509Certificate cert = - new JcaX509CertificateConverter().getCertificate(certificateHolder); - cert.verify(keyPair.getPublic()); - } - - @Test - public void testCACert() - throws SCMSecurityException, NoSuchProviderException, - NoSuchAlgorithmException, IOException { - LocalDate notBefore = LocalDate.now(); - LocalDate notAfter = notBefore.plus(365, ChronoUnit.DAYS); - String clusterID = UUID.randomUUID().toString(); - String scmID = UUID.randomUUID().toString(); - String subject = "testRootCert"; - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - - SelfSignedCertificate.Builder builder = - SelfSignedCertificate.newBuilder() - .setBeginDate(notBefore) - .setEndDate(notAfter) - .setClusterID(clusterID) - .setScmID(scmID) - .setSubject(subject) - .setKey(keyPair) - .setConfiguration(conf) - .makeCA(); - - X509CertificateHolder certificateHolder = builder.build(); - // This time we asked for a CertificateServer Certificate, make sure that - // extension is - // present and valid. - Extension basicExt = - certificateHolder.getExtension(Extension.basicConstraints); - - Assert.assertNotNull(basicExt); - Assert.assertTrue(basicExt.isCritical()); - - // Since this code assigns ONE for the root certificate, we check if the - // serial number is the expected number. - Assert.assertEquals(certificateHolder.getSerialNumber(), BigInteger.ONE); - } - - @Test - public void testInvalidParamFails() - throws SCMSecurityException, NoSuchProviderException, - NoSuchAlgorithmException, IOException { - LocalDate notBefore = LocalDate.now(); - LocalDate notAfter = notBefore.plus(365, ChronoUnit.DAYS); - String clusterID = UUID.randomUUID().toString(); - String scmID = UUID.randomUUID().toString(); - String subject = "testRootCert"; - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - - SelfSignedCertificate.Builder builder = - SelfSignedCertificate.newBuilder() - .setBeginDate(notBefore) - .setEndDate(notAfter) - .setClusterID(clusterID) - .setScmID(scmID) - .setSubject(subject) - .setConfiguration(conf) - .setKey(keyPair) - .makeCA(); - try { - builder.setKey(null); - builder.build(); - Assert.fail("Null Key should have failed."); - } catch (NullPointerException | IllegalArgumentException e) { - builder.setKey(keyPair); - } - - // Now try with Blank Subject. - try { - builder.setSubject(""); - builder.build(); - Assert.fail("Null/Blank Subject should have thrown."); - } catch (IllegalArgumentException e) { - builder.setSubject(subject); - } - - // Now try with blank/null SCM ID - try { - builder.setScmID(null); - builder.build(); - Assert.fail("Null/Blank SCM ID should have thrown."); - } catch (IllegalArgumentException e) { - builder.setScmID(scmID); - } - - - // Now try with blank/null SCM ID - try { - builder.setClusterID(null); - builder.build(); - Assert.fail("Null/Blank Cluster ID should have thrown."); - } catch (IllegalArgumentException e) { - builder.setClusterID(clusterID); - } - - - // Swap the Begin and End Date and verify that we cannot create a - // certificate like that. - try { - builder.setBeginDate(notAfter); - builder.setEndDate(notBefore); - builder.build(); - Assert.fail("Illegal dates should have thrown."); - } catch (IllegalArgumentException e) { - builder.setBeginDate(notBefore); - builder.setEndDate(notAfter); - } - - try { - KeyPair newKey = keyGen.generateKey(); - KeyPair wrongKey = new KeyPair(newKey.getPublic(), keyPair.getPrivate()); - builder.setKey(wrongKey); - X509CertificateHolder certificateHolder = builder.build(); - X509Certificate cert = - new JcaX509CertificateConverter().getCertificate(certificateHolder); - cert.verify(wrongKey.getPublic()); - Assert.fail("Invalid Key, should have thrown."); - } catch (SCMSecurityException | CertificateException - | SignatureException | InvalidKeyException e) { - builder.setKey(keyPair); - } - // Assert that we can create a certificate with all sane params. - Assert.assertNotNull(builder.build()); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/package-info.java deleted file mode 100644 index fffe1e5bf97..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Test classes for Certificate utilities. - */ -package org.apache.hadoop.hdds.security.x509.certificate.utils; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java deleted file mode 100644 index 08761f48e88..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.keys; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PublicKey; -import java.security.interfaces.RSAPublicKey; -import java.security.spec.PKCS8EncodedKeySpec; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -/** - * Test class for HDDS Key Generator. - */ -public class TestHDDSKeyGenerator { - private SecurityConfig config; - - @Before - public void init() { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OZONE_METADATA_DIRS, GenericTestUtils.getTempPath("testpath")); - config = new SecurityConfig(conf); - } - /** - * In this test we verify that we are able to create a key pair, then get - * bytes of that and use ASN1. parser to parse it back to a private key. - * @throws NoSuchProviderException - On Error, due to missing Java - * dependencies. - * @throws NoSuchAlgorithmException - On Error, due to missing Java - * dependencies. - */ - @Test - public void testGenerateKey() - throws NoSuchProviderException, NoSuchAlgorithmException { - HDDSKeyGenerator keyGen = new HDDSKeyGenerator(config.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(); - Assert.assertEquals(config.getKeyAlgo(), - keyPair.getPrivate().getAlgorithm()); - PKCS8EncodedKeySpec keySpec = - new PKCS8EncodedKeySpec(keyPair.getPrivate().getEncoded()); - Assert.assertEquals("PKCS#8", keySpec.getFormat()); - } - - /** - * In this test we assert that size that we specified is used for Key - * generation. - * @throws NoSuchProviderException - On Error, due to missing Java - * dependencies. - * @throws NoSuchAlgorithmException - On Error, due to missing Java - * dependencies. - */ - @Test - public void testGenerateKeyWithSize() throws NoSuchProviderException, - NoSuchAlgorithmException { - HDDSKeyGenerator keyGen = new HDDSKeyGenerator(config.getConfiguration()); - KeyPair keyPair = keyGen.generateKey(4096); - PublicKey publicKey = keyPair.getPublic(); - if(publicKey instanceof RSAPublicKey) { - Assert.assertEquals(4096, - ((RSAPublicKey)(publicKey)).getModulus().bitLength()); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java deleted file mode 100644 index d82b02f43c5..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.keys; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; -import static org.junit.Assert.assertNotNull; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.attribute.PosixFilePermission; -import java.security.KeyFactory; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.spec.InvalidKeySpecException; -import java.security.spec.PKCS8EncodedKeySpec; -import java.security.spec.X509EncodedKeySpec; -import java.util.Set; -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -/** - * Test class for HDDS pem writer. - */ -public class TestKeyCodec { - - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - private OzoneConfiguration configuration; - private SecurityConfig securityConfig; - private String component; - private HDDSKeyGenerator keyGenerator; - private String prefix; - - @Before - public void init() throws IOException { - configuration = new OzoneConfiguration(); - prefix = temporaryFolder.newFolder().toString(); - configuration.set(HDDS_METADATA_DIR_NAME, prefix); - keyGenerator = new HDDSKeyGenerator(configuration); - securityConfig = new SecurityConfig(configuration); - component = "test_component"; - } - - /** - * Assert basic things like we are able to create a file, and the names are - * in expected format etc. - * - * @throws NoSuchProviderException - On Error, due to missing Java - * dependencies. - * @throws NoSuchAlgorithmException - On Error, due to missing Java - * dependencies. - * @throws IOException - On I/O failure. - */ - @Test - public void testWriteKey() - throws NoSuchProviderException, NoSuchAlgorithmException, - IOException, InvalidKeySpecException { - KeyPair keys = keyGenerator.generateKey(); - KeyCodec pemWriter = new KeyCodec(securityConfig, component); - pemWriter.writeKey(keys); - - // Assert that locations have been created. - Path keyLocation = pemWriter.getSecurityConfig().getKeyLocation(component); - Assert.assertTrue(keyLocation.toFile().exists()); - - // Assert that locations are created in the locations that we specified - // using the Config. - Assert.assertTrue(keyLocation.toString().startsWith(prefix)); - Path privateKeyPath = Paths.get(keyLocation.toString(), - pemWriter.getSecurityConfig().getPrivateKeyFileName()); - Assert.assertTrue(privateKeyPath.toFile().exists()); - Path publicKeyPath = Paths.get(keyLocation.toString(), - pemWriter.getSecurityConfig().getPublicKeyFileName()); - Assert.assertTrue(publicKeyPath.toFile().exists()); - - // Read the private key and test if the expected String in the PEM file - // format exists. - byte[] privateKey = Files.readAllBytes(privateKeyPath); - String privateKeydata = new String(privateKey, StandardCharsets.UTF_8); - Assert.assertTrue(privateKeydata.contains("PRIVATE KEY")); - - // Read the public key and test if the expected String in the PEM file - // format exists. - byte[] publicKey = Files.readAllBytes(publicKeyPath); - String publicKeydata = new String(publicKey, StandardCharsets.UTF_8); - Assert.assertTrue(publicKeydata.contains("PUBLIC KEY")); - - // Let us decode the PEM file and parse it back into binary. - KeyFactory kf = KeyFactory.getInstance( - pemWriter.getSecurityConfig().getKeyAlgo()); - - // Replace the PEM Human readable guards. - privateKeydata = - privateKeydata.replace("-----BEGIN PRIVATE KEY-----\n", ""); - privateKeydata = - privateKeydata.replace("-----END PRIVATE KEY-----", ""); - - // Decode the bas64 to binary format and then use an ASN.1 parser to - // parse the binary format. - - byte[] keyBytes = Base64.decodeBase64(privateKeydata); - PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec(keyBytes); - PrivateKey privateKeyDecoded = kf.generatePrivate(spec); - assertNotNull("Private Key should not be null", - privateKeyDecoded); - - // Let us decode the public key and veriy that we can parse it back into - // binary. - publicKeydata = - publicKeydata.replace("-----BEGIN PUBLIC KEY-----\n", ""); - publicKeydata = - publicKeydata.replace("-----END PUBLIC KEY-----", ""); - - keyBytes = Base64.decodeBase64(publicKeydata); - X509EncodedKeySpec pubKeyspec = new X509EncodedKeySpec(keyBytes); - PublicKey publicKeyDecoded = kf.generatePublic(pubKeyspec); - assertNotNull("Public Key should not be null", - publicKeyDecoded); - - // Now let us assert the permissions on the Directories and files are as - // expected. - Set expectedSet = pemWriter.getPermissionSet(); - Set currentSet = - Files.getPosixFilePermissions(privateKeyPath); - currentSet.removeAll(expectedSet); - Assert.assertEquals(0, currentSet.size()); - - currentSet = - Files.getPosixFilePermissions(publicKeyPath); - currentSet.removeAll(expectedSet); - Assert.assertEquals(0, currentSet.size()); - - currentSet = - Files.getPosixFilePermissions(keyLocation); - currentSet.removeAll(expectedSet); - Assert.assertEquals(0, currentSet.size()); - } - - /** - * Assert key rewrite fails without force option. - * - * @throws IOException - on I/O failure. - */ - @Test - public void testReWriteKey() - throws Exception { - KeyPair kp = keyGenerator.generateKey(); - KeyCodec pemWriter = new KeyCodec(securityConfig, component); - SecurityConfig secConfig = pemWriter.getSecurityConfig(); - pemWriter.writeKey(kp); - - // Assert that rewriting of keys throws exception with valid messages. - LambdaTestUtils - .intercept(IOException.class, "Private Key file already exists.", - () -> pemWriter.writeKey(kp)); - FileUtils.deleteQuietly(Paths.get( - secConfig.getKeyLocation(component).toString() + "/" + secConfig - .getPrivateKeyFileName()).toFile()); - LambdaTestUtils - .intercept(IOException.class, "Public Key file already exists.", - () -> pemWriter.writeKey(kp)); - FileUtils.deleteQuietly(Paths.get( - secConfig.getKeyLocation(component).toString() + "/" + secConfig - .getPublicKeyFileName()).toFile()); - - // Should succeed now as both public and private key are deleted. - pemWriter.writeKey(kp); - // Should succeed with overwrite flag as true. - pemWriter.writeKey(kp, true); - - } - - /** - * Assert key rewrite fails in non Posix file system. - * - * @throws IOException - on I/O failure. - */ - @Test - public void testWriteKeyInNonPosixFS() - throws Exception { - KeyPair kp = keyGenerator.generateKey(); - KeyCodec pemWriter = new KeyCodec(securityConfig, component); - pemWriter.setIsPosixFileSystem(() -> false); - - // Assert key rewrite fails in non Posix file system. - LambdaTestUtils - .intercept(IOException.class, "Unsupported File System for pem file.", - () -> pemWriter.writeKey(kp)); - } - - @Test - public void testReadWritePublicKeywithoutArgs() - throws NoSuchProviderException, NoSuchAlgorithmException, IOException, - InvalidKeySpecException { - - KeyPair kp = keyGenerator.generateKey(); - KeyCodec keycodec = new KeyCodec(securityConfig, component); - keycodec.writeKey(kp); - - PublicKey pubKey = keycodec.readPublicKey(); - assertNotNull(pubKey); - - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java deleted file mode 100644 index 49e40b4774a..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Test package for keys used in X.509 env. - */ -package org.apache.hadoop.hdds.security.x509.keys; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/package-info.java deleted file mode 100644 index f5414686a25..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * X.509 Certificate and keys related tests. - */ -package org.apache.hadoop.hdds.security.x509; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java deleted file mode 100644 index 10724ab7c28..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.tracing; - -import io.jaegertracing.internal.JaegerSpanContext; -import io.jaegertracing.internal.exceptions.EmptyTracerStateStringException; -import io.jaegertracing.internal.exceptions.MalformedTracerStateStringException; -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.jupiter.api.Test; - -import static org.junit.jupiter.api.Assertions.assertTrue; - -class TestStringCodec { - - @Test - void testExtract() throws Exception { - StringCodec codec = new StringCodec(); - - LambdaTestUtils.intercept(EmptyTracerStateStringException.class, - () -> codec.extract(null)); - - StringBuilder sb = new StringBuilder().append("123"); - LambdaTestUtils.intercept(MalformedTracerStateStringException.class, - "String does not match tracer state format", - () -> codec.extract(sb)); - - sb.append(":456:789"); - LambdaTestUtils.intercept(MalformedTracerStateStringException.class, - "String does not match tracer state format", - () -> codec.extract(sb)); - sb.append(":66"); - JaegerSpanContext context = codec.extract(sb); - String expectedContextString = new String("123:456:789:66"); - assertTrue(context.getTraceId().equals("123")); - assertTrue(context.toString().equals(expectedContextString)); - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java deleted file mode 100644 index 18e1200b4f5..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.tracing; -/** - Test cases for ozone tracing. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestHddsIdFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestHddsIdFactory.java deleted file mode 100644 index 11d0fad55ee..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestHddsIdFactory.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import org.apache.hadoop.hdds.HddsIdFactory; -import org.junit.After; -import static org.junit.Assert.assertEquals; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test the JMX interface for the rocksdb metastore implementation. - */ -public class TestHddsIdFactory { - - private static final Set ID_SET = ConcurrentHashMap.newKeySet(); - private static final int IDS_PER_THREAD = 10000; - private static final int NUM_OF_THREADS = 5; - - @After - public void cleanup() { - ID_SET.clear(); - } - - @Test - public void testGetLongId() throws Exception { - - ExecutorService executor = Executors.newFixedThreadPool(5); - List> tasks = new ArrayList<>(5); - addTasks(tasks); - List> result = executor.invokeAll(tasks); - assertEquals(IDS_PER_THREAD * NUM_OF_THREADS, ID_SET.size()); - for (Future r : result) { - assertEquals(IDS_PER_THREAD, r.get().intValue()); - } - } - - private void addTasks(List> tasks) { - for (int i = 0; i < NUM_OF_THREADS; i++) { - Callable task = () -> { - for (int idNum = 0; idNum < IDS_PER_THREAD; idNum++) { - long var = HddsIdFactory.getLongId(); - if (ID_SET.contains(var)) { - Assert.fail("Duplicate id found"); - } - ID_SET.add(var); - } - return IDS_PER_THREAD; - }; - tasks.add(task); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java deleted file mode 100644 index d24fcf5c3b8..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java +++ /dev/null @@ -1,590 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.utils; - -import com.google.common.collect.Lists; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.tuple.ImmutablePair; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.DFSUtilClient; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter; - -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.slf4j.event.Level; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.NoSuchElementException; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.junit.runners.Parameterized.Parameters; - -/** - * Test class for ozone metadata store. - */ -@RunWith(Parameterized.class) -public class TestMetadataStore { - - private final static int MAX_GETRANGE_LENGTH = 100; - private final String storeImpl; - @Rule - public ExpectedException expectedException = ExpectedException.none(); - private MetadataStore store; - private File testDir; - - public TestMetadataStore(String metadataImpl) { - this.storeImpl = metadataImpl; - } - - @Parameters - public static Collection data() { - return Arrays.asList(new Object[][] { - {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB}, - {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB} - }); - } - - @Before - public void init() throws IOException { - if (OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(storeImpl)) { - // The initialization of RocksDB fails on Windows - assumeNotWindows(); - } - - testDir = GenericTestUtils.getTestDir(getClass().getSimpleName() - + "-" + storeImpl.toLowerCase()); - - Configuration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); - - store = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setCreateIfMissing(true) - .setDbFile(testDir) - .build(); - - // Add 20 entries. - // {a0 : a-value0} to {a9 : a-value9} - // {b0 : b-value0} to {b9 : b-value9} - for (int i = 0; i < 10; i++) { - store.put(getBytes("a" + i), getBytes("a-value" + i)); - store.put(getBytes("b" + i), getBytes("b-value" + i)); - } - } - - @Test - public void testIterator() throws Exception { - Configuration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); - File dbDir = GenericTestUtils.getRandomizedTestDir(); - MetadataStore dbStore = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setCreateIfMissing(true) - .setDbFile(dbDir) - .build(); - - //As database is empty, check whether iterator is working as expected or - // not. - MetaStoreIterator metaStoreIterator = - dbStore.iterator(); - assertFalse(metaStoreIterator.hasNext()); - try { - metaStoreIterator.next(); - fail("testIterator failed"); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Store has no more elements", - ex); - } - - for (int i = 0; i < 10; i++) { - store.put(getBytes("a" + i), getBytes("a-value" + i)); - } - - metaStoreIterator = dbStore.iterator(); - - int i = 0; - while (metaStoreIterator.hasNext()) { - MetadataStore.KeyValue val = metaStoreIterator.next(); - assertEquals("a" + i, getString(val.getKey())); - assertEquals("a-value" + i, getString(val.getValue())); - i++; - } - - // As we have iterated all the keys in database, hasNext should return - // false and next() should throw NoSuchElement exception. - - assertFalse(metaStoreIterator.hasNext()); - try { - metaStoreIterator.next(); - fail("testIterator failed"); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Store has no more elements", - ex); - } - dbStore.close(); - dbStore.destroy(); - FileUtils.deleteDirectory(dbDir); - - } - - @Test - public void testMetaStoreConfigDifferentFromType() throws IOException { - - Configuration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); - String dbType; - GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG); - GenericTestUtils.LogCapturer logCapturer = - GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG); - if (storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) { - dbType = "RocksDB"; - } else { - dbType = "LevelDB"; - } - - File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() - + "-" + dbType.toLowerCase() + "-test"); - MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf) - .setCreateIfMissing(true).setDbFile(dbDir).setDBType(dbType).build(); - assertTrue(logCapturer.getOutput().contains("Using dbType " + dbType + "" + - " for metastore")); - dbStore.close(); - dbStore.destroy(); - FileUtils.deleteDirectory(dbDir); - - } - - @Test - public void testdbTypeNotSet() throws IOException { - - Configuration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); - GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG); - GenericTestUtils.LogCapturer logCapturer = - GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG); - - File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() - + "-" + storeImpl.toLowerCase() + "-test"); - MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf) - .setCreateIfMissing(true).setDbFile(dbDir).build(); - assertTrue(logCapturer.getOutput().contains("dbType is null, using dbType" + - " " + storeImpl)); - dbStore.close(); - dbStore.destroy(); - FileUtils.deleteDirectory(dbDir); - - } - - @After - public void cleanup() throws IOException { - if (store != null) { - store.close(); - store.destroy(); - } - if (testDir != null) { - FileUtils.deleteDirectory(testDir); - } - } - - private byte[] getBytes(String str) { - return str == null ? null : - DFSUtilClient.string2Bytes(str); - } - - private String getString(byte[] bytes) { - return bytes == null ? null : - DFSUtilClient.bytes2String(bytes); - } - - @Test - public void testGetDelete() throws IOException { - for (int i = 0; i < 10; i++) { - byte[] va = store.get(getBytes("a" + i)); - assertEquals("a-value" + i, getString(va)); - - byte[] vb = store.get(getBytes("b" + i)); - assertEquals("b-value" + i, getString(vb)); - } - - String keyToDel = "del-" + UUID.randomUUID().toString(); - store.put(getBytes(keyToDel), getBytes(keyToDel)); - assertEquals(keyToDel, getString(store.get(getBytes(keyToDel)))); - store.delete(getBytes(keyToDel)); - assertEquals(null, store.get(getBytes(keyToDel))); - } - - @Test - public void testPeekFrom() throws IOException { - // Test peek from an element that has prev as well as next - testPeek("a3", "a2", "a4"); - - // Test peek from an element that only has prev - testPeek("b9", "b8", null); - - // Test peek from an element that only has next - testPeek("a0", null, "a1"); - } - - private String getExpectedValue(String key) { - if (key == null) { - return null; - } - char[] arr = key.toCharArray(); - return new StringBuilder().append(arr[0]).append("-value") - .append(arr[arr.length - 1]).toString(); - } - - private void testPeek(String peekKey, String prevKey, String nextKey) - throws IOException { - // Look for current - String k = null; - String v = null; - ImmutablePair current = - store.peekAround(0, getBytes(peekKey)); - if (current != null) { - k = getString(current.getKey()); - v = getString(current.getValue()); - } - assertEquals(peekKey, k); - assertEquals(v, getExpectedValue(peekKey)); - - // Look for prev - k = null; - v = null; - ImmutablePair prev = - store.peekAround(-1, getBytes(peekKey)); - if (prev != null) { - k = getString(prev.getKey()); - v = getString(prev.getValue()); - } - assertEquals(prevKey, k); - assertEquals(v, getExpectedValue(prevKey)); - - // Look for next - k = null; - v = null; - ImmutablePair next = - store.peekAround(1, getBytes(peekKey)); - if (next != null) { - k = getString(next.getKey()); - v = getString(next.getValue()); - } - assertEquals(nextKey, k); - assertEquals(v, getExpectedValue(nextKey)); - } - - @Test - public void testIterateKeys() throws IOException { - // iterate keys from b0 - ArrayList result = Lists.newArrayList(); - store.iterate(getBytes("b0"), (k, v) -> { - // b-value{i} - String value = getString(v); - char num = value.charAt(value.length() - 1); - // each value adds 1 - int i = Character.getNumericValue(num) + 1; - value = value.substring(0, value.length() - 1) + i; - result.add(value); - return true; - }); - - assertFalse(result.isEmpty()); - for (int i = 0; i < result.size(); i++) { - assertEquals("b-value" + (i + 1), result.get(i)); - } - - // iterate from a non exist key - result.clear(); - store.iterate(getBytes("xyz"), (k, v) -> { - result.add(getString(v)); - return true; - }); - assertTrue(result.isEmpty()); - - // iterate from the beginning - result.clear(); - store.iterate(null, (k, v) -> { - result.add(getString(v)); - return true; - }); - assertEquals(20, result.size()); - } - - @Test - public void testGetRangeKVs() throws IOException { - List> result = null; - - // Set empty startKey will return values from beginning. - result = store.getRangeKVs(null, 5); - assertEquals(5, result.size()); - assertEquals("a-value2", getString(result.get(2).getValue())); - - // Empty list if startKey doesn't exist. - result = store.getRangeKVs(getBytes("a12"), 5); - assertEquals(0, result.size()); - - // Returns max available entries after a valid startKey. - result = store.getRangeKVs(getBytes("b0"), MAX_GETRANGE_LENGTH); - assertEquals(10, result.size()); - assertEquals("b0", getString(result.get(0).getKey())); - assertEquals("b-value0", getString(result.get(0).getValue())); - result = store.getRangeKVs(getBytes("b0"), 5); - assertEquals(5, result.size()); - - // Both startKey and count are honored. - result = store.getRangeKVs(getBytes("a9"), 2); - assertEquals(2, result.size()); - assertEquals("a9", getString(result.get(0).getKey())); - assertEquals("a-value9", getString(result.get(0).getValue())); - assertEquals("b0", getString(result.get(1).getKey())); - assertEquals("b-value0", getString(result.get(1).getValue())); - - // Filter keys by prefix. - // It should returns all "b*" entries. - MetadataKeyFilter filter1 = new KeyPrefixFilter().addFilter("b"); - result = store.getRangeKVs(null, 100, filter1); - assertEquals(10, result.size()); - assertTrue(result.stream().allMatch(entry -> - new String(entry.getKey(), UTF_8).startsWith("b") - )); - assertEquals(20, filter1.getKeysScannedNum()); - assertEquals(10, filter1.getKeysHintedNum()); - result = store.getRangeKVs(null, 3, filter1); - assertEquals(3, result.size()); - result = store.getRangeKVs(getBytes("b3"), 1, filter1); - assertEquals("b-value3", getString(result.get(0).getValue())); - - // Define a customized filter that filters keys by suffix. - // Returns all "*2" entries. - MetadataKeyFilter filter2 = (preKey, currentKey, nextKey) - -> getString(currentKey).endsWith("2"); - result = store.getRangeKVs(null, MAX_GETRANGE_LENGTH, filter2); - assertEquals(2, result.size()); - assertEquals("a2", getString(result.get(0).getKey())); - assertEquals("b2", getString(result.get(1).getKey())); - result = store.getRangeKVs(null, 1, filter2); - assertEquals(1, result.size()); - assertEquals("a2", getString(result.get(0).getKey())); - - // Apply multiple filters. - result = store.getRangeKVs(null, MAX_GETRANGE_LENGTH, filter1, filter2); - assertEquals(1, result.size()); - assertEquals("b2", getString(result.get(0).getKey())); - assertEquals("b-value2", getString(result.get(0).getValue())); - - // If filter is null, no effect. - result = store.getRangeKVs(null, 1, (MetadataKeyFilter[]) null); - assertEquals(1, result.size()); - assertEquals("a0", getString(result.get(0).getKey())); - } - - @Test - public void testGetSequentialRangeKVs() throws IOException { - MetadataKeyFilter suffixFilter = (preKey, currentKey, nextKey) - -> DFSUtil.bytes2String(currentKey).endsWith("2"); - // Suppose to return a2 and b2 - List> result = - store.getRangeKVs(null, MAX_GETRANGE_LENGTH, suffixFilter); - assertEquals(2, result.size()); - assertEquals("a2", DFSUtil.bytes2String(result.get(0).getKey())); - assertEquals("b2", DFSUtil.bytes2String(result.get(1).getKey())); - - // Suppose to return just a2, because when it iterates to a3, - // the filter no long matches and it should stop from there. - result = store.getSequentialRangeKVs(null, - MAX_GETRANGE_LENGTH, suffixFilter); - assertEquals(1, result.size()); - assertEquals("a2", DFSUtil.bytes2String(result.get(0).getKey())); - } - - @Test - public void testGetRangeLength() throws IOException { - List> result = null; - - result = store.getRangeKVs(null, 0); - assertEquals(0, result.size()); - - result = store.getRangeKVs(null, 1); - assertEquals(1, result.size()); - - // Count less than zero is invalid. - expectedException.expect(IllegalArgumentException.class); - expectedException.expectMessage("Invalid count given"); - store.getRangeKVs(null, -1); - } - - @Test - public void testInvalidStartKey() throws IOException { - // If startKey is invalid, the returned list should be empty. - List> kvs = - store.getRangeKVs(getBytes("unknownKey"), MAX_GETRANGE_LENGTH); - assertEquals(0, kvs.size()); - } - - @Test - public void testDestroyDB() throws IOException { - // create a new DB to test db destroy - Configuration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); - - File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() - + "-" + storeImpl.toLowerCase() + "-toDestroy"); - MetadataStore dbStore = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setCreateIfMissing(true) - .setDbFile(dbDir) - .build(); - - dbStore.put(getBytes("key1"), getBytes("value1")); - dbStore.put(getBytes("key2"), getBytes("value2")); - - assertFalse(dbStore.isEmpty()); - assertTrue(dbDir.exists()); - assertTrue(dbDir.listFiles().length > 0); - - dbStore.destroy(); - - assertFalse(dbDir.exists()); - } - - @Test - public void testBatchWrite() throws IOException { - Configuration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); - - File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() - + "-" + storeImpl.toLowerCase() + "-batchWrite"); - MetadataStore dbStore = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setCreateIfMissing(true) - .setDbFile(dbDir) - .build(); - - List expectedResult = Lists.newArrayList(); - for (int i = 0; i < 10; i++) { - dbStore.put(getBytes("batch-" + i), getBytes("batch-value-" + i)); - expectedResult.add("batch-" + i); - } - - BatchOperation batch = new BatchOperation(); - batch.delete(getBytes("batch-2")); - batch.delete(getBytes("batch-3")); - batch.delete(getBytes("batch-4")); - batch.put(getBytes("batch-new-2"), getBytes("batch-new-value-2")); - - expectedResult.remove("batch-2"); - expectedResult.remove("batch-3"); - expectedResult.remove("batch-4"); - expectedResult.add("batch-new-2"); - - dbStore.writeBatch(batch); - - Iterator it = expectedResult.iterator(); - AtomicInteger count = new AtomicInteger(0); - dbStore.iterate(null, (key, value) -> { - count.incrementAndGet(); - return it.hasNext() && it.next().equals(getString(key)); - }); - - assertEquals(8, count.get()); - } - - @Test - public void testKeyPrefixFilter() throws IOException { - List> result = null; - RuntimeException exception = null; - - try { - new KeyPrefixFilter().addFilter("b0", true).addFilter("b"); - } catch (IllegalArgumentException e) { - exception = e; - assertTrue(exception.getMessage().contains("KeyPrefix: b already " + - "rejected")); - } - - try { - new KeyPrefixFilter().addFilter("b0").addFilter("b", true); - } catch (IllegalArgumentException e) { - exception = e; - assertTrue(exception.getMessage().contains("KeyPrefix: b already " + - "accepted")); - } - - try { - new KeyPrefixFilter().addFilter("b", true).addFilter("b0"); - } catch (IllegalArgumentException e) { - exception = e; - assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " + - "rejected")); - } - - try { - new KeyPrefixFilter().addFilter("b").addFilter("b0", true); - } catch (IllegalArgumentException e) { - exception = e; - assertTrue(exception.getMessage().contains("KeyPrefix: b0 already " + - "accepted")); - } - - MetadataKeyFilter filter1 = new KeyPrefixFilter(true) - .addFilter("a0") - .addFilter("a1") - .addFilter("b", true); - result = store.getRangeKVs(null, 100, filter1); - assertEquals(2, result.size()); - assertTrue(result.stream().anyMatch(entry -> new String(entry.getKey(), - UTF_8) - .startsWith("a0")) && result.stream().anyMatch(entry -> new String( - entry.getKey(), UTF_8).startsWith("a1"))); - - filter1 = new KeyPrefixFilter(true).addFilter("b", true); - result = store.getRangeKVs(null, 100, filter1); - assertEquals(0, result.size()); - - filter1 = new KeyPrefixFilter().addFilter("b", true); - result = store.getRangeKVs(null, 100, filter1); - assertEquals(10, result.size()); - assertTrue(result.stream().allMatch(entry -> new String(entry.getKey(), - UTF_8) - .startsWith("a"))); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRetriableTask.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRetriableTask.java deleted file mode 100644 index 148ccf94a1b..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRetriableTask.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -import static org.junit.Assert.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; - -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.junit.Test; - -import java.io.IOException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.zip.ZipException; - -/** - * Tests for {@link RetriableTask}. - */ -public class TestRetriableTask { - - @Test - public void returnsSuccessfulResult() throws Exception { - String result = "bilbo"; - RetriableTask task = new RetriableTask<>( - RetryPolicies.RETRY_FOREVER, "test", () -> result); - assertEquals(result, task.call()); - } - - @Test - public void returnsSuccessfulResultAfterFailures() throws Exception { - String result = "gandalf"; - AtomicInteger attempts = new AtomicInteger(); - RetriableTask task = new RetriableTask<>( - RetryPolicies.RETRY_FOREVER, "test", - () -> { - if (attempts.incrementAndGet() <= 2) { - throw new Exception("testing"); - } - return result; - }); - assertEquals(result, task.call()); - } - - @Test - public void respectsRetryPolicy() { - int expectedAttempts = 3; - AtomicInteger attempts = new AtomicInteger(); - RetryPolicy retryPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep( - expectedAttempts, 1, TimeUnit.MILLISECONDS); - RetriableTask task = new RetriableTask<>(retryPolicy, "thr", () -> { - attempts.incrementAndGet(); - throw new ZipException("testing"); - }); - - IOException e = assertThrows(IOException.class, task::call); - assertEquals(ZipException.class, e.getCause().getClass()); - assertEquals(expectedAttempts, attempts.get()); - } - -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java deleted file mode 100644 index 29c780304cb..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java +++ /dev/null @@ -1,234 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.utils; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.metrics2.AbstractMetric; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.MetricsTag; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import javax.management.MBeanServer; -import java.io.File; -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.util.HashMap; -import java.util.Map; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -/** - * Test the JMX interface for the rocksdb metastore implementation. - */ -public class TestRocksDBStoreMBean { - - private Configuration conf; - - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB); - } - - - @Test - public void testJmxBeans() throws Exception { - - RocksDBStore metadataStore = getTestRocksDBStoreWithData(); - - MBeanServer platformMBeanServer = - ManagementFactory.getPlatformMBeanServer(); - Thread.sleep(2000); - - Object keysWritten = platformMBeanServer - .getAttribute(metadataStore.getStatMBeanName(), "NUMBER_KEYS_WRITTEN"); - - assertEquals(10L, keysWritten); - - Object dbWriteAverage = platformMBeanServer - .getAttribute(metadataStore.getStatMBeanName(), "DB_WRITE_AVERAGE"); - assertTrue((double) dbWriteAverage > 0); - - metadataStore.close(); - - } - - @Test() - public void testDisabledStat() throws Exception { - File testDir = GenericTestUtils - .getTestDir(getClass().getSimpleName() + "-withoutstat"); - - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS, - OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF); - - RocksDBStore metadataStore = - (RocksDBStore) MetadataStoreBuilder.newBuilder().setConf(conf) - .setCreateIfMissing(true).setDbFile(testDir).build(); - - Assert.assertNull(metadataStore.getStatMBeanName()); - } - - @Test - public void testMetricsSystemIntegration() throws Exception { - - RocksDBStore metadataStore = getTestRocksDBStoreWithData(); - Thread.sleep(2000); - - MetricsSystem ms = DefaultMetricsSystem.instance(); - MetricsSource rdbSource = - ms.getSource("Rocksdb_TestRocksDBStoreMBean-withstat"); - - BufferedMetricsCollector metricsCollector = new BufferedMetricsCollector(); - rdbSource.getMetrics(metricsCollector, true); - - Map metrics = metricsCollector.getMetricsRecordBuilder() - .getMetrics(); - assertTrue(10.0 == metrics.get("NUMBER_KEYS_WRITTEN")); - assertTrue(metrics.get("DB_WRITE_AVERAGE") > 0); - metadataStore.close(); - } - - private RocksDBStore getTestRocksDBStoreWithData() throws IOException { - File testDir = - GenericTestUtils.getTestDir(getClass().getSimpleName() + "-withstat"); - - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS, "ALL"); - - RocksDBStore metadataStore = - (RocksDBStore) MetadataStoreBuilder.newBuilder().setConf(conf) - .setCreateIfMissing(true).setDbFile(testDir).build(); - - for (int i = 0; i < 10; i++) { - metadataStore.put("key".getBytes(UTF_8), "value".getBytes(UTF_8)); - } - - return metadataStore; - } -} - -/** - * Test class to buffer a single MetricsRecordBuilder instance. - */ -class BufferedMetricsCollector implements MetricsCollector { - - private BufferedMetricsRecordBuilderImpl metricsRecordBuilder; - - BufferedMetricsCollector() { - metricsRecordBuilder = new BufferedMetricsRecordBuilderImpl(); - } - - public BufferedMetricsRecordBuilderImpl getMetricsRecordBuilder() { - return metricsRecordBuilder; - } - - @Override - public MetricsRecordBuilder addRecord(String s) { - metricsRecordBuilder.setContext(s); - return metricsRecordBuilder; - } - - @Override - public MetricsRecordBuilder addRecord(MetricsInfo metricsInfo) { - return metricsRecordBuilder; - } - - /** - * Test class to buffer a single snapshot of metrics. - */ - class BufferedMetricsRecordBuilderImpl extends MetricsRecordBuilder { - - private Map metrics = new HashMap<>(); - private String contextName; - - public Map getMetrics() { - return metrics; - } - - @Override - public MetricsRecordBuilder tag(MetricsInfo metricsInfo, String s) { - return null; - } - - @Override - public MetricsRecordBuilder add(MetricsTag metricsTag) { - return null; - } - - @Override - public MetricsRecordBuilder add(AbstractMetric abstractMetric) { - return null; - } - - @Override - public MetricsRecordBuilder setContext(String s) { - this.contextName = s; - return this; - } - - @Override - public MetricsRecordBuilder addCounter(MetricsInfo metricsInfo, int i) { - return null; - } - - @Override - public MetricsRecordBuilder addCounter(MetricsInfo metricsInfo, long l) { - metrics.put(metricsInfo.name(), (double)l); - return this; - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, int i) { - return null; - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, long l) { - return null; - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, float v) { - return null; - } - - @Override - public MetricsRecordBuilder addGauge(MetricsInfo metricsInfo, double v) { - metrics.put(metricsInfo.name(), v); - return this; - } - - @Override - public MetricsCollector parent() { - return null; - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java deleted file mode 100644 index 4ba54e98fca..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdfs.DFSUtil; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.DBOptions; -import org.rocksdb.RocksDB; - -import java.io.File; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static org.apache.hadoop.hdds.utils.db.DBConfigFromFile.getOptionsFileNameFromDB; - -/** - * DBConf tests. - */ -public class TestDBConfigFromFile { - private final static String DB_FILE = "test.db"; - private final static String INI_FILE = getOptionsFileNameFromDB(DB_FILE); - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Before - public void setUp() throws Exception { - System.setProperty(DBConfigFromFile.CONFIG_DIR, - folder.newFolder().toString()); - ClassLoader classLoader = getClass().getClassLoader(); - File testData = new File(classLoader.getResource(INI_FILE).getFile()); - File dest = Paths.get( - System.getProperty(DBConfigFromFile.CONFIG_DIR), INI_FILE).toFile(); - FileUtils.copyFile(testData, dest); - } - - @After - public void tearDown() throws Exception { - } - - @Test - public void readFromFile() throws IOException { - final List families = - Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY), - "First", "Second", "Third", - "Fourth", "Fifth", - "Sixth"); - final List columnFamilyDescriptors = - new ArrayList<>(); - for (String family : families) { - columnFamilyDescriptors.add( - new ColumnFamilyDescriptor(family.getBytes(StandardCharsets.UTF_8), - new ColumnFamilyOptions())); - } - - final DBOptions options = DBConfigFromFile.readFromFile(DB_FILE, - columnFamilyDescriptors); - - // Some Random Values Defined in the test.db.ini, we verify that we are - // able to get values that are defined in the test.db.ini. - Assert.assertNotNull(options); - Assert.assertEquals(551615L, options.maxManifestFileSize()); - Assert.assertEquals(1000L, options.keepLogFileNum()); - Assert.assertEquals(1048576, options.writableFileMaxBufferSize()); - } - - @Test - public void readFromFileInvalidConfig() throws IOException { - final List families = - Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY), - "First", "Second", "Third", - "Fourth", "Fifth", - "Sixth"); - final List columnFamilyDescriptors = - new ArrayList<>(); - for (String family : families) { - columnFamilyDescriptors.add( - new ColumnFamilyDescriptor(family.getBytes(StandardCharsets.UTF_8), - new ColumnFamilyOptions())); - } - - final DBOptions options = DBConfigFromFile.readFromFile("badfile.db.ini", - columnFamilyDescriptors); - - // This has to return a Null, since we have config defined for badfile.db - Assert.assertNull(options); - } -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java deleted file mode 100644 index d406060165f..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.io.IOException; -import java.nio.charset.StandardCharsets; - -/** - * Tests RDBStore creation. - */ -public class TestDBStoreBuilder { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @Before - public void setUp() throws Exception { - System.setProperty(DBConfigFromFile.CONFIG_DIR, - folder.newFolder().toString()); - } - - @Test - public void builderWithoutAnyParams() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - thrown.expect(IOException.class); - DBStoreBuilder.newBuilder(conf).build(); - } - - @Test - public void builderWithOneParamV1() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - thrown.expect(IOException.class); - DBStoreBuilder.newBuilder(conf) - .setName("Test.db") - .build(); - } - - @Test - public void builderWithOneParamV2() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if(!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - thrown.expect(IOException.class); - DBStoreBuilder.newBuilder(conf) - .setPath(newFolder.toPath()) - .build(); - } - - @Test - public void builderWithOpenClose() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if(!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - DBStore dbStore = DBStoreBuilder.newBuilder(conf) - .setName("Test.db") - .setPath(newFolder.toPath()) - .build(); - // Nothing to do just open and Close. - dbStore.close(); - } - - @Test - public void builderWithDoubleTableName() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if(!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - thrown.expect(IOException.class); - DBStoreBuilder.newBuilder(conf) - .setName("Test.db") - .setPath(newFolder.toPath()) - .addTable("FIRST") - .addTable("FIRST") - .build(); - // Nothing to do , This will throw so we do not have to close. - - } - - @Test - public void builderWithDataWrites() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if(!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - try (DBStore dbStore = DBStoreBuilder.newBuilder(conf) - .setName("Test.db") - .setPath(newFolder.toPath()) - .addTable("First") - .addTable("Second") - .build()) { - try (Table firstTable = dbStore.getTable("First")) { - byte[] key = - RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8); - firstTable.put(key, value); - byte[] temp = firstTable.get(key); - Assert.assertArrayEquals(value, temp); - } - - try (Table secondTable = dbStore.getTable("Second")) { - Assert.assertTrue(secondTable.isEmpty()); - } - } - } - - @Test - public void builderWithDiskProfileWrites() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if(!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - try (DBStore dbStore = DBStoreBuilder.newBuilder(conf) - .setName("Test.db") - .setPath(newFolder.toPath()) - .addTable("First") - .addTable("Second") - .setProfile(DBProfile.DISK) - .build()) { - try (Table firstTable = dbStore.getTable("First")) { - byte[] key = - RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8); - firstTable.put(key, value); - byte[] temp = firstTable.get(key); - Assert.assertArrayEquals(value, temp); - } - - try (Table secondTable = dbStore.getTable("Second")) { - Assert.assertTrue(secondTable.isEmpty()); - } - } - } - - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java deleted file mode 100644 index 6084ae96cd5..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import javax.management.MBeanServer; - -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.commons.codec.binary.StringUtils; -import org.apache.hadoop.hdfs.DFSUtil; - -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.DBOptions; -import org.rocksdb.RocksDB; -import org.rocksdb.Statistics; -import org.rocksdb.StatsLevel; - -/** - * RDBStore Tests. - */ -public class TestRDBStore { - private final List families = - Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY), - "First", "Second", "Third", - "Fourth", "Fifth", - "Sixth"); - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - @Rule - public ExpectedException thrown = ExpectedException.none(); - private RDBStore rdbStore = null; - private DBOptions options = null; - private Set configSet; - - @Before - public void setUp() throws Exception { - options = new DBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); - - Statistics statistics = new Statistics(); - statistics.setStatsLevel(StatsLevel.ALL); - options = options.setStatistics(statistics); - configSet = new HashSet<>(); - for(String name : families) { - TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); - configSet.add(newConfig); - } - rdbStore = new RDBStore(folder.newFolder(), options, configSet); - } - - @After - public void tearDown() throws Exception { - if (rdbStore != null) { - rdbStore.close(); - } - } - - private void insertRandomData(RDBStore dbStore, int familyIndex) - throws Exception { - try (Table firstTable = dbStore.getTable(families.get(familyIndex))) { - Assert.assertNotNull("Table cannot be null", firstTable); - for (int x = 0; x < 100; x++) { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - firstTable.put(key, value); - } - } - } - - @Test - public void compactDB() throws Exception { - try (RDBStore newStore = - new RDBStore(folder.newFolder(), options, configSet)) { - Assert.assertNotNull("DB Store cannot be null", newStore); - insertRandomData(newStore, 1); - // This test does not assert anything if there is any error this test - // will throw and fail. - newStore.compactDB(); - } - } - - @Test - public void close() throws Exception { - RDBStore newStore = - new RDBStore(folder.newFolder(), options, configSet); - Assert.assertNotNull("DBStore cannot be null", newStore); - // This test does not assert anything if there is any error this test - // will throw and fail. - newStore.close(); - } - - @Test - public void moveKey() throws Exception { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - - try (Table firstTable = rdbStore.getTable(families.get(1))) { - firstTable.put(key, value); - try (Table secondTable = rdbStore - .getTable(families.get(2))) { - rdbStore.move(key, firstTable, secondTable); - byte[] newvalue = secondTable.get(key); - // Make sure we have value in the second table - Assert.assertNotNull(newvalue); - //and it is same as what we wrote to the FirstTable - Assert.assertArrayEquals(value, newvalue); - } - // After move this key must not exist in the first table. - Assert.assertNull(firstTable.get(key)); - } - } - - @Test - public void moveWithValue() throws Exception { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - - byte[] nextValue = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - try (Table firstTable = rdbStore.getTable(families.get(1))) { - firstTable.put(key, value); - try (Table secondTable = rdbStore - .getTable(families.get(2))) { - rdbStore.move(key, nextValue, firstTable, secondTable); - byte[] newvalue = secondTable.get(key); - // Make sure we have value in the second table - Assert.assertNotNull(newvalue); - //and it is not same as what we wrote to the FirstTable, and equals - // the new value. - Assert.assertArrayEquals(nextValue, nextValue); - } - } - - } - - @Test - public void getEstimatedKeyCount() throws Exception { - try (RDBStore newStore = - new RDBStore(folder.newFolder(), options, configSet)) { - Assert.assertNotNull("DB Store cannot be null", newStore); - - // Write 100 keys to the first table. - insertRandomData(newStore, 1); - - // Write 100 keys to the secondTable table. - insertRandomData(newStore, 2); - - // Let us make sure that our estimate is not off by 10% - Assert.assertTrue(newStore.getEstimatedKeyCount() > 180 - || newStore.getEstimatedKeyCount() < 220); - } - } - - @Test - public void getStatMBeanName() throws Exception { - - try (Table firstTable = rdbStore.getTable(families.get(1))) { - for (int y = 0; y < 100; y++) { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - firstTable.put(key, value); - } - } - MBeanServer platformMBeanServer = - ManagementFactory.getPlatformMBeanServer(); - Thread.sleep(2000); - - Object keysWritten = platformMBeanServer - .getAttribute(rdbStore.getStatMBeanName(), "NUMBER_KEYS_WRITTEN"); - - Assert.assertTrue(((Long) keysWritten) >= 99L); - - Object dbWriteAverage = platformMBeanServer - .getAttribute(rdbStore.getStatMBeanName(), "DB_WRITE_AVERAGE"); - Assert.assertTrue((double) dbWriteAverage > 0); - } - - @Test - public void getTable() throws Exception { - for (String tableName : families) { - try (Table table = rdbStore.getTable(tableName)) { - Assert.assertNotNull(tableName + "is null", table); - } - } - thrown.expect(IOException.class); - rdbStore.getTable("ATableWithNoName"); - } - - @Test - public void listTables() throws Exception { - List

tableList = rdbStore.listTables(); - Assert.assertNotNull("Table list cannot be null", tableList); - Map hashTable = new HashMap<>(); - - for (Table t : tableList) { - hashTable.put(t.getName(), t); - } - - int count = families.size(); - // Assert that we have all the tables in the list and no more. - for (String name : families) { - Assert.assertTrue(hashTable.containsKey(name)); - count--; - } - Assert.assertEquals(0, count); - } - - @Test - public void testRocksDBCheckpoint() throws Exception { - try (RDBStore newStore = - new RDBStore(folder.newFolder(), options, configSet)) { - Assert.assertNotNull("DB Store cannot be null", newStore); - - insertRandomData(newStore, 1); - DBCheckpoint checkpoint = - newStore.getCheckpoint(true); - Assert.assertNotNull(checkpoint); - - RDBStore restoredStoreFromCheckPoint = - new RDBStore(checkpoint.getCheckpointLocation().toFile(), - options, configSet); - - // Let us make sure that our estimate is not off by 10% - Assert.assertTrue( - restoredStoreFromCheckPoint.getEstimatedKeyCount() > 90 - || restoredStoreFromCheckPoint.getEstimatedKeyCount() < 110); - checkpoint.cleanupCheckpoint(); - } - - } - - @Test - public void testRocksDBCheckpointCleanup() throws Exception { - try (RDBStore newStore = - new RDBStore(folder.newFolder(), options, configSet)) { - Assert.assertNotNull("DB Store cannot be null", newStore); - - insertRandomData(newStore, 1); - DBCheckpoint checkpoint = - newStore.getCheckpoint(true); - Assert.assertNotNull(checkpoint); - - Assert.assertTrue(Files.exists( - checkpoint.getCheckpointLocation())); - checkpoint.cleanupCheckpoint(); - Assert.assertFalse(Files.exists( - checkpoint.getCheckpointLocation())); - } - } - - /** - * Not strictly a unit test. Just a confirmation of the expected behavior - * of RocksDB keyMayExist API. - * Expected behavior - On average, keyMayExist latency < key.get() latency - * for invalid keys. - * @throws Exception if unable to read from RocksDB. - */ - @Test - public void testRocksDBKeyMayExistApi() throws Exception { - try (RDBStore newStore = - new RDBStore(folder.newFolder(), options, configSet)) { - RocksDB db = newStore.getDb(); - - //Test with 50 invalid keys. - long start = System.nanoTime(); - for (int i = 0; i < 50; i++) { - Assert.assertTrue(db.get( - StringUtils.getBytesUtf16("key" + i))== null); - } - long end = System.nanoTime(); - long keyGetLatency = end - start; - - start = System.nanoTime(); - for (int i = 0; i < 50; i++) { - Assert.assertFalse(db.keyMayExist( - StringUtils.getBytesUtf16("key" + i), new StringBuilder())); - } - end = System.nanoTime(); - long keyMayExistLatency = end - start; - - Assert.assertTrue(keyMayExistLatency < keyGetLatency); - } - } - - @Test - public void testGetDBUpdatesSince() throws Exception { - - try (RDBStore newStore = - new RDBStore(folder.newFolder(), options, configSet)) { - - try (Table firstTable = newStore.getTable(families.get(1))) { - firstTable.put(StringUtils.getBytesUtf16("Key1"), StringUtils - .getBytesUtf16("Value1")); - firstTable.put(StringUtils.getBytesUtf16("Key2"), StringUtils - .getBytesUtf16("Value2")); - } - Assert.assertTrue( - newStore.getDb().getLatestSequenceNumber() == 2); - - DBUpdatesWrapper dbUpdatesSince = newStore.getUpdatesSince(0); - Assert.assertEquals(2, dbUpdatesSince.getData().size()); - } - } - - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java deleted file mode 100644 index 788883dbbff..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import org.apache.hadoop.hdfs.DFSUtil; - -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.DBOptions; -import org.rocksdb.RocksDB; -import org.rocksdb.Statistics; -import org.rocksdb.StatsLevel; - -/** - * Tests for RocksDBTable Store. - */ -public class TestRDBTableStore { - private static int count = 0; - private final List families = - Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY), - "First", "Second", "Third", - "Fourth", "Fifth", - "Sixth", "Seventh", - "Eighth"); - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - private RDBStore rdbStore = null; - private DBOptions options = null; - - private static boolean consume(Table.KeyValue keyValue) { - count++; - try { - Assert.assertNotNull(keyValue.getKey()); - } catch(IOException ex) { - Assert.fail("Unexpected Exception " + ex.toString()); - } - return true; - } - - @Before - public void setUp() throws Exception { - options = new DBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); - - Statistics statistics = new Statistics(); - statistics.setStatsLevel(StatsLevel.ALL); - options = options.setStatistics(statistics); - - Set configSet = new HashSet<>(); - for(String name : families) { - TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); - configSet.add(newConfig); - } - rdbStore = new RDBStore(folder.newFolder(), options, configSet); - } - - @After - public void tearDown() throws Exception { - if (rdbStore != null) { - rdbStore.close(); - } - } - - @Test - public void toIOException() { - } - - @Test - public void getHandle() throws Exception { - try (Table testTable = rdbStore.getTable("First")) { - Assert.assertNotNull(testTable); - Assert.assertNotNull(((RDBTable) testTable).getHandle()); - } - } - - @Test - public void putGetAndEmpty() throws Exception { - try (Table testTable = rdbStore.getTable("First")) { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - testTable.put(key, value); - Assert.assertFalse(testTable.isEmpty()); - byte[] readValue = testTable.get(key); - Assert.assertArrayEquals(value, readValue); - } - try (Table secondTable = rdbStore.getTable("Second")) { - Assert.assertTrue(secondTable.isEmpty()); - } - } - - @Test - public void delete() throws Exception { - List deletedKeys = new ArrayList<>(); - List validKeys = new ArrayList<>(); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - for (int x = 0; x < 100; x++) { - deletedKeys.add( - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8)); - } - - for (int x = 0; x < 100; x++) { - validKeys.add( - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8)); - } - - // Write all the keys and delete the keys scheduled for delete. - //Assert we find only expected keys in the Table. - try (Table testTable = rdbStore.getTable("Fourth")) { - for (int x = 0; x < deletedKeys.size(); x++) { - testTable.put(deletedKeys.get(x), value); - testTable.delete(deletedKeys.get(x)); - } - - for (int x = 0; x < validKeys.size(); x++) { - testTable.put(validKeys.get(x), value); - } - - for (int x = 0; x < validKeys.size(); x++) { - Assert.assertNotNull(testTable.get(validKeys.get(0))); - } - - for (int x = 0; x < deletedKeys.size(); x++) { - Assert.assertNull(testTable.get(deletedKeys.get(0))); - } - } - } - - @Test - public void batchPut() throws Exception { - try (Table testTable = rdbStore.getTable("Fifth"); - BatchOperation batch = rdbStore.initBatchOperation()) { - //given - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - Assert.assertNull(testTable.get(key)); - - //when - testTable.putWithBatch(batch, key, value); - rdbStore.commitBatchOperation(batch); - - //then - Assert.assertNotNull(testTable.get(key)); - } - } - - @Test - public void batchDelete() throws Exception { - try (Table testTable = rdbStore.getTable("Fifth"); - BatchOperation batch = rdbStore.initBatchOperation()) { - - //given - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - testTable.put(key, value); - Assert.assertNotNull(testTable.get(key)); - - - //when - testTable.deleteWithBatch(batch, key); - rdbStore.commitBatchOperation(batch); - - //then - Assert.assertNull(testTable.get(key)); - } - } - - @Test - public void forEachAndIterator() throws Exception { - final int iterCount = 100; - try (Table testTable = rdbStore.getTable("Sixth")) { - for (int x = 0; x < iterCount; x++) { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - testTable.put(key, value); - } - int localCount = 0; - try (TableIterator iter = testTable.iterator()) { - while (iter.hasNext()) { - Table.KeyValue keyValue = iter.next(); - localCount++; - } - - Assert.assertEquals(iterCount, localCount); - iter.seekToFirst(); - iter.forEachRemaining(TestRDBTableStore::consume); - Assert.assertEquals(iterCount, count); - - } - } - } - - @Test - public void testIsExist() throws Exception { - try (Table testTable = rdbStore.getTable("Seventh")) { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - testTable.put(key, value); - Assert.assertTrue(testTable.isExist(key)); - - testTable.delete(key); - Assert.assertFalse(testTable.isExist(key)); - - byte[] invalidKey = - RandomStringUtils.random(5).getBytes(StandardCharsets.UTF_8); - Assert.assertFalse(testTable.isExist(invalidKey)); - } - } - - @Test - public void testCountEstimatedRowsInTable() throws Exception { - try (Table testTable = rdbStore.getTable("Eighth")) { - // Add a few keys - final int numKeys = 12345; - for (int i = 0; i < numKeys; i++) { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - testTable.put(key, value); - } - long keyCount = testTable.getEstimatedKeyCount(); - // The result should be larger than zero but not exceed(?) numKeys - Assert.assertTrue(keyCount > 0 && keyCount <= numKeys); - } - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java deleted file mode 100644 index 9ee0d19074a..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java +++ /dev/null @@ -1,373 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Set; - -import com.google.common.base.Optional; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.hdds.utils.db.Table.KeyValue; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.DBOptions; -import org.rocksdb.RocksDB; -import org.rocksdb.Statistics; -import org.rocksdb.StatsLevel; - -/** - * Tests for RocksDBTable Store. - */ -public class TestTypedRDBTableStore { - private static int count = 0; - private final List families = - Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY), - "First", "Second", "Third", - "Fourth", "Fifth", - "Sixth", "Seven", "Eighth", - "Ninth"); - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - private RDBStore rdbStore = null; - private DBOptions options = null; - private CodecRegistry codecRegistry; - - @Before - public void setUp() throws Exception { - options = new DBOptions(); - options.setCreateIfMissing(true); - options.setCreateMissingColumnFamilies(true); - - Statistics statistics = new Statistics(); - statistics.setStatsLevel(StatsLevel.ALL); - options = options.setStatistics(statistics); - - Set configSet = new HashSet<>(); - for (String name : families) { - TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); - configSet.add(newConfig); - } - rdbStore = new RDBStore(folder.newFolder(), options, configSet); - - codecRegistry = new CodecRegistry(); - - } - - @After - public void tearDown() throws Exception { - if (rdbStore != null) { - rdbStore.close(); - } - } - - @Test - public void toIOException() { - } - - @Test - public void putGetAndEmpty() throws Exception { - try (Table testTable = createTypedTable( - "First")) { - String key = - RandomStringUtils.random(10); - String value = RandomStringUtils.random(10); - testTable.put(key, value); - Assert.assertFalse(testTable.isEmpty()); - String readValue = testTable.get(key); - Assert.assertEquals(value, readValue); - } - try (Table secondTable = rdbStore.getTable("Second")) { - Assert.assertTrue(secondTable.isEmpty()); - } - } - - private Table createTypedTable(String name) - throws IOException { - return new TypedTable( - rdbStore.getTable(name), - codecRegistry, - String.class, String.class); - } - - @Test - public void delete() throws Exception { - List deletedKeys = new LinkedList<>(); - List validKeys = new LinkedList<>(); - String value = - RandomStringUtils.random(10); - for (int x = 0; x < 100; x++) { - deletedKeys.add( - RandomStringUtils.random(10)); - } - - for (int x = 0; x < 100; x++) { - validKeys.add( - RandomStringUtils.random(10)); - } - - // Write all the keys and delete the keys scheduled for delete. - //Assert we find only expected keys in the Table. - try (Table testTable = createTypedTable( - "Fourth")) { - for (int x = 0; x < deletedKeys.size(); x++) { - testTable.put(deletedKeys.get(x), value); - testTable.delete(deletedKeys.get(x)); - } - - for (int x = 0; x < validKeys.size(); x++) { - testTable.put(validKeys.get(x), value); - } - - for (int x = 0; x < validKeys.size(); x++) { - Assert.assertNotNull(testTable.get(validKeys.get(0))); - } - - for (int x = 0; x < deletedKeys.size(); x++) { - Assert.assertNull(testTable.get(deletedKeys.get(0))); - } - } - } - - @Test - public void batchPut() throws Exception { - - try (Table testTable = createTypedTable( - "Fourth"); - BatchOperation batch = rdbStore.initBatchOperation()) { - //given - String key = - RandomStringUtils.random(10); - String value = - RandomStringUtils.random(10); - - //when - testTable.putWithBatch(batch, key, value); - rdbStore.commitBatchOperation(batch); - - //then - Assert.assertNotNull(testTable.get(key)); - } - } - - @Test - public void batchDelete() throws Exception { - try (Table testTable = createTypedTable( - "Fourth"); - BatchOperation batch = rdbStore.initBatchOperation()) { - - //given - String key = - RandomStringUtils.random(10); - String value = - RandomStringUtils.random(10); - testTable.put(key, value); - - //when - testTable.deleteWithBatch(batch, key); - rdbStore.commitBatchOperation(batch); - - //then - Assert.assertNull(testTable.get(key)); - } - } - - private static boolean consume(Table.KeyValue keyValue) { - count++; - try { - Assert.assertNotNull(keyValue.getKey()); - } catch (IOException ex) { - Assert.fail(ex.toString()); - } - return true; - } - - @Test - public void forEachAndIterator() throws Exception { - final int iterCount = 100; - try (Table testTable = createTypedTable( - "Sixth")) { - for (int x = 0; x < iterCount; x++) { - String key = - RandomStringUtils.random(10); - String value = - RandomStringUtils.random(10); - testTable.put(key, value); - } - int localCount = 0; - - try (TableIterator> iter = - testTable.iterator()) { - while (iter.hasNext()) { - Table.KeyValue keyValue = iter.next(); - localCount++; - } - - Assert.assertEquals(iterCount, localCount); - iter.seekToFirst(); - iter.forEachRemaining(TestTypedRDBTableStore::consume); - Assert.assertEquals(iterCount, count); - - } - } - } - - @Test - public void testTypedTableWithCache() throws Exception { - int iterCount = 10; - try (Table testTable = createTypedTable( - "Seven")) { - - for (int x = 0; x < iterCount; x++) { - String key = Integer.toString(x); - String value = Integer.toString(x); - testTable.addCacheEntry(new CacheKey<>(key), - new CacheValue<>(Optional.of(value), - x)); - } - - // As we have added to cache, so get should return value even if it - // does not exist in DB. - for (int x = 0; x < iterCount; x++) { - Assert.assertEquals(Integer.toString(1), - testTable.get(Integer.toString(1))); - } - - } - } - - @Test - public void testTypedTableWithCacheWithFewDeletedOperationType() - throws Exception { - int iterCount = 10; - try (Table testTable = createTypedTable( - "Seven")) { - - for (int x = 0; x < iterCount; x++) { - String key = Integer.toString(x); - String value = Integer.toString(x); - if (x % 2 == 0) { - testTable.addCacheEntry(new CacheKey<>(key), - new CacheValue<>(Optional.of(value), x)); - } else { - testTable.addCacheEntry(new CacheKey<>(key), - new CacheValue<>(Optional.absent(), - x)); - } - } - - // As we have added to cache, so get should return value even if it - // does not exist in DB. - for (int x = 0; x < iterCount; x++) { - if (x % 2 == 0) { - Assert.assertEquals(Integer.toString(x), - testTable.get(Integer.toString(x))); - } else { - Assert.assertNull(testTable.get(Integer.toString(x))); - } - } - - testTable.cleanupCache(5); - - GenericTestUtils.waitFor(() -> - ((TypedTable) testTable).getCache().size() == 4, - 100, 5000); - - - //Check remaining values - for (int x = 6; x < iterCount; x++) { - if (x % 2 == 0) { - Assert.assertEquals(Integer.toString(x), - testTable.get(Integer.toString(x))); - } else { - Assert.assertNull(testTable.get(Integer.toString(x))); - } - } - - - } - } - - @Test - public void testIsExist() throws Exception { - try (Table testTable = createTypedTable( - "Eighth")) { - String key = - RandomStringUtils.random(10); - String value = RandomStringUtils.random(10); - testTable.put(key, value); - Assert.assertTrue(testTable.isExist(key)); - - String invalidKey = key + RandomStringUtils.random(1); - Assert.assertFalse(testTable.isExist(invalidKey)); - - testTable.delete(key); - Assert.assertFalse(testTable.isExist(key)); - } - } - - @Test - public void testIsExistCache() throws Exception { - try (Table testTable = createTypedTable( - "Eighth")) { - String key = - RandomStringUtils.random(10); - String value = RandomStringUtils.random(10); - testTable.addCacheEntry(new CacheKey<>(key), - new CacheValue<>(Optional.of(value), 1L)); - Assert.assertTrue(testTable.isExist(key)); - - testTable.addCacheEntry(new CacheKey<>(key), - new CacheValue<>(Optional.absent(), 1L)); - Assert.assertFalse(testTable.isExist(key)); - } - } - - @Test - public void testCountEstimatedRowsInTable() throws Exception { - try (Table testTable = createTypedTable( - "Ninth")) { - // Add a few keys - final int numKeys = 12345; - for (int i = 0; i < numKeys; i++) { - String key = - RandomStringUtils.random(10); - String value = RandomStringUtils.random(10); - testTable.put(key, value); - } - long keyCount = testTable.getEstimatedKeyCount(); - // The result should be larger than zero but not exceed(?) numKeys - Assert.assertTrue(keyCount > 0 && keyCount <= numKeys); - } - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCacheImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCacheImpl.java deleted file mode 100644 index 42391297a0a..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCacheImpl.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.utils.db.cache; - -import java.util.Arrays; -import java.util.Collection; -import java.util.concurrent.CompletableFuture; - -import com.google.common.base.Optional; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import static org.junit.Assert.fail; - -/** - * Class tests partial table cache. - */ -@RunWith(value = Parameterized.class) -public class TestTableCacheImpl { - private TableCache, CacheValue> tableCache; - - private final TableCacheImpl.CacheCleanupPolicy cacheCleanupPolicy; - - - @Parameterized.Parameters - public static Collection policy() { - Object[][] params = new Object[][] { - {TableCacheImpl.CacheCleanupPolicy.NEVER}, - {TableCacheImpl.CacheCleanupPolicy.MANUAL} - }; - return Arrays.asList(params); - } - - public TestTableCacheImpl( - TableCacheImpl.CacheCleanupPolicy cacheCleanupPolicy) { - this.cacheCleanupPolicy = cacheCleanupPolicy; - } - - - @Before - public void create() { - tableCache = - new TableCacheImpl<>(cacheCleanupPolicy); - } - @Test - public void testPartialTableCache() { - - - for (int i = 0; i< 10; i++) { - tableCache.put(new CacheKey<>(Integer.toString(i)), - new CacheValue<>(Optional.of(Integer.toString(i)), i)); - } - - - for (int i=0; i < 10; i++) { - Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); - } - - // On a full table cache if some one calls cleanup it is a no-op. - tableCache.cleanup(4); - - for (int i=5; i < 10; i++) { - Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); - } - } - - - @Test - public void testPartialTableCacheParallel() throws Exception { - - int totalCount = 0; - CompletableFuture future = - CompletableFuture.supplyAsync(() -> { - try { - return writeToCache(10, 1, 0); - } catch (InterruptedException ex) { - fail("writeToCache got interrupt exception"); - } - return 0; - }); - int value = future.get(); - Assert.assertEquals(10, value); - - totalCount += value; - - future = - CompletableFuture.supplyAsync(() -> { - try { - return writeToCache(10, 11, 100); - } catch (InterruptedException ex) { - fail("writeToCache got interrupt exception"); - } - return 0; - }); - - // Check we have first 10 entries in cache. - for (int i=1; i <= 10; i++) { - Assert.assertEquals(Integer.toString(i), - tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); - } - - - value = future.get(); - Assert.assertEquals(10, value); - - totalCount += value; - - if (cacheCleanupPolicy == TableCacheImpl.CacheCleanupPolicy.MANUAL) { - int deleted = 5; - - // cleanup first 5 entires - tableCache.cleanup(deleted); - - // We should totalCount - deleted entries in cache. - final int tc = totalCount; - GenericTestUtils.waitFor(() -> (tc - deleted == tableCache.size()), 100, - 5000); - // Check if we have remaining entries. - for (int i=6; i <= totalCount; i++) { - Assert.assertEquals(Integer.toString(i), tableCache.get( - new CacheKey<>(Integer.toString(i))).getCacheValue()); - } - tableCache.cleanup(10); - - tableCache.cleanup(totalCount); - - // Cleaned up all entries, so cache size should be zero. - GenericTestUtils.waitFor(() -> (0 == tableCache.size()), 100, - 5000); - } else { - tableCache.cleanup(totalCount); - Assert.assertEquals(totalCount, tableCache.size()); - } - - - } - - private int writeToCache(int count, int startVal, long sleep) - throws InterruptedException { - int counter = 1; - while (counter <= count){ - tableCache.put(new CacheKey<>(Integer.toString(startVal)), - new CacheValue<>(Optional.of(Integer.toString(startVal)), startVal)); - startVal++; - counter++; - Thread.sleep(sleep); - } - return count; - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java deleted file mode 100644 index f97fda2d81b..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/cache/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Tests for the DB Cache Utilities. - */ -package org.apache.hadoop.hdds.utils.db.cache; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/package-info.java deleted file mode 100644 index f1c7ce139a8..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Tests for the DB Utilities. - */ -package org.apache.hadoop.hdds.utils.db; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/package-info.java deleted file mode 100644 index f93e3fd68d2..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * DB test Utils. - */ -package org.apache.hadoop.hdds.utils; \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java deleted file mode 100644 index 789560a2c3a..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.audit; - -/** - * Enum to define Dummy AuditAction Type for test. - */ -public enum DummyAction implements AuditAction { - - CREATE_VOLUME, - CREATE_BUCKET, - READ_VOLUME, - READ_BUCKET, - READ_KEY, - UPDATE_VOLUME, - UPDATE_BUCKET, - UPDATE_KEY, - DELETE_VOLUME, - DELETE_BUCKET, - DELETE_KEY, - SET_OWNER, - SET_QUOTA; - - @Override - public String getAction() { - return this.toString(); - } - -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java deleted file mode 100644 index 0c2d98fab29..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit; - -import java.util.HashMap; -import java.util.Map; - -/** - * DummyEntity that implements Auditable for test purpose. - */ -public class DummyEntity implements Auditable { - - private String key1; - private String key2; - - public DummyEntity(){ - this.key1 = "value1"; - this.key2 = "value2"; - } - public String getKey1() { - return key1; - } - - public void setKey1(String key1) { - this.key1 = key1; - } - - public String getKey2() { - return key2; - } - - public void setKey2(String key2) { - this.key2 = key2; - } - - @Override - public Map toAuditMap() { - Map auditMap = new HashMap<>(); - auditMap.put("key1", this.key1); - auditMap.put("key2", this.key2); - return auditMap; - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java deleted file mode 100644 index 518ddaedcf7..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java +++ /dev/null @@ -1,166 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.audit; - -import org.apache.commons.io.FileUtils; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertTrue; - -/** - * Test Ozone Audit Logger. - */ -public class TestOzoneAuditLogger { - - private static final Logger LOG = - LoggerFactory.getLogger(TestOzoneAuditLogger.class.getName()); - - private static final AuditLogger AUDIT = - new AuditLogger(AuditLoggerType.OMLOGGER); - - private static final String SUCCESS = AuditEventStatus.SUCCESS.name(); - private static final String FAILURE = AuditEventStatus.FAILURE.name(); - - private static final Map PARAMS = - new DummyEntity().toAuditMap(); - - private static final AuditMessage WRITE_FAIL_MSG = - new AuditMessage.Builder() - .setUser("john") - .atIp("192.168.0.1") - .forOperation(DummyAction.CREATE_VOLUME.name()) - .withParams(PARAMS) - .withResult(FAILURE) - .withException(null).build(); - - private static final AuditMessage WRITE_SUCCESS_MSG = - new AuditMessage.Builder() - .setUser("john") - .atIp("192.168.0.1") - .forOperation(DummyAction.CREATE_VOLUME.name()) - .withParams(PARAMS) - .withResult(SUCCESS) - .withException(null).build(); - - private static final AuditMessage READ_FAIL_MSG = - new AuditMessage.Builder() - .setUser("john") - .atIp("192.168.0.1") - .forOperation(DummyAction.READ_VOLUME.name()) - .withParams(PARAMS) - .withResult(FAILURE) - .withException(null).build(); - - private static final AuditMessage READ_SUCCESS_MSG = - new AuditMessage.Builder() - .setUser("john") - .atIp("192.168.0.1") - .forOperation(DummyAction.READ_VOLUME.name()) - .withParams(PARAMS) - .withResult(SUCCESS) - .withException(null).build(); - - @BeforeClass - public static void setUp(){ - System.setProperty("log4j.configurationFile", "log4j2.properties"); - } - - @AfterClass - public static void tearDown() { - File file = new File("audit.log"); - if (FileUtils.deleteQuietly(file)) { - LOG.info(file.getName() + - " has been deleted as all tests have completed."); - } else { - LOG.info("audit.log could not be deleted."); - } - } - - /** - * Test to verify default log level is INFO when logging success events. - */ - @Test - public void verifyDefaultLogLevelForSuccess() throws IOException { - AUDIT.logWriteSuccess(WRITE_SUCCESS_MSG); - String expected = - "INFO | OMAudit | " + WRITE_SUCCESS_MSG.getFormattedMessage(); - verifyLog(expected); - } - - /** - * Test to verify default log level is ERROR when logging failure events. - */ - @Test - public void verifyDefaultLogLevelForFailure() throws IOException { - AUDIT.logWriteFailure(WRITE_FAIL_MSG); - String expected = - "ERROR | OMAudit | " + WRITE_FAIL_MSG.getFormattedMessage(); - verifyLog(expected); - } - - /** - * Test to verify no READ event is logged. - */ - @Test - public void notLogReadEvents() throws IOException { - AUDIT.logReadSuccess(READ_SUCCESS_MSG); - AUDIT.logReadFailure(READ_FAIL_MSG); - verifyNoLog(); - } - - private void verifyLog(String expected) throws IOException { - File file = new File("audit.log"); - List lines = FileUtils.readLines(file, (String)null); - final int retry = 5; - int i = 0; - while (lines.isEmpty() && i < retry) { - lines = FileUtils.readLines(file, (String)null); - try { - Thread.sleep(500 * (i + 1)); - } catch(InterruptedException ie) { - Thread.currentThread().interrupt(); - break; - } - i++; - } - - // When log entry is expected, the log file will contain one line and - // that must be equal to the expected string - assertTrue(lines.size() != 0); - assertTrue(expected.equalsIgnoreCase(lines.get(0))); - //empty the file - lines.clear(); - FileUtils.writeLines(file, lines, false); - } - - private void verifyNoLog() throws IOException { - File file = new File("audit.log"); - List lines = FileUtils.readLines(file, (String)null); - // When no log entry is expected, the log file must be empty - assertTrue(lines.size() == 0); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/package-info.java deleted file mode 100644 index 1222ad04e08..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.audit; -/** - * Unit tests of Ozone Audit Logger. - * For test purpose, the log4j2 configuration is loaded from file at: - * src/test/resources/log4j2.properties - */ diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java deleted file mode 100644 index 819c29fd610..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.junit.Assert; -import org.junit.Test; - -/** - * Tests for {@link Checksum} class. - */ -public class TestChecksum { - - private static final int BYTES_PER_CHECKSUM = 10; - private static final ContainerProtos.ChecksumType CHECKSUM_TYPE_DEFAULT = - ContainerProtos.ChecksumType.SHA256; - - private Checksum getChecksum(ContainerProtos.ChecksumType type) { - if (type == null) { - type = CHECKSUM_TYPE_DEFAULT; - } - return new Checksum(type, BYTES_PER_CHECKSUM); - } - - /** - * Tests {@link Checksum#verifyChecksum(byte[], ChecksumData)}. - */ - @Test - public void testVerifyChecksum() throws Exception { - Checksum checksum = getChecksum(null); - int dataLen = 55; - byte[] data = RandomStringUtils.randomAlphabetic(dataLen).getBytes(); - - ChecksumData checksumData = checksum.computeChecksum(data); - - // A checksum is calculate for each bytesPerChecksum number of bytes in - // the data. Since that value is 10 here and the data length is 55, we - // should have 6 checksums in checksumData. - Assert.assertEquals(6, checksumData.getChecksums().size()); - - // Checksum verification should pass - Assert.assertTrue("Checksum mismatch", - Checksum.verifyChecksum(data, checksumData)); - } - - /** - * Tests that if data is modified, then the checksums should not match. - */ - @Test - public void testIncorrectChecksum() throws Exception { - Checksum checksum = getChecksum(null); - byte[] data = RandomStringUtils.randomAlphabetic(55).getBytes(); - ChecksumData originalChecksumData = checksum.computeChecksum(data); - - // Change the data and check if new checksum matches the original checksum. - // Modifying one byte of data should be enough for the checksum data to - // mismatch - data[50] = (byte) (data[50]+1); - ChecksumData newChecksumData = checksum.computeChecksum(data); - Assert.assertNotEquals("Checksums should not match for different data", - originalChecksumData, newChecksumData); - } - - /** - * Tests that checksum calculated using two different checksumTypes should - * not match. - */ - @Test - public void testChecksumMismatchForDifferentChecksumTypes() throws Exception { - byte[] data = RandomStringUtils.randomAlphabetic(55).getBytes(); - - // Checksum1 of type SHA-256 - Checksum checksum1 = getChecksum(null); - ChecksumData checksumData1 = checksum1.computeChecksum(data); - - // Checksum2 of type CRC32 - Checksum checksum2 = getChecksum(ContainerProtos.ChecksumType.CRC32); - ChecksumData checksumData2 = checksum2.computeChecksum(data); - - // The two checksums should not match as they have different types - Assert.assertNotEquals( - "Checksums should not match for different checksum types", - checksum1, checksum2); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java deleted file mode 100644 index 2f466377b4b..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.common; - -import org.apache.hadoop.util.PureJavaCrc32; -import org.apache.hadoop.util.PureJavaCrc32C; -import org.junit.Assert; -import org.junit.Test; - -import java.nio.charset.StandardCharsets; -import java.util.Random; -import java.util.zip.Checksum; - -/** - * Test {@link ChecksumByteBuffer} implementations. - */ -public class TestChecksumByteBuffer { - @Test - public void testPureJavaCrc32ByteBuffer() { - final Checksum expected = new PureJavaCrc32(); - final ChecksumByteBuffer testee = new PureJavaCrc32ByteBuffer(); - new VerifyChecksumByteBuffer(expected, testee).testCorrectness(); - } - - @Test - public void testPureJavaCrc32CByteBuffer() { - final Checksum expected = new PureJavaCrc32C(); - final ChecksumByteBuffer testee = new PureJavaCrc32CByteBuffer(); - new VerifyChecksumByteBuffer(expected, testee).testCorrectness(); - } - - static class VerifyChecksumByteBuffer { - private final Checksum expected; - private final ChecksumByteBuffer testee; - - VerifyChecksumByteBuffer(Checksum expected, ChecksumByteBuffer testee) { - this.expected = expected; - this.testee = testee; - } - - void testCorrectness() { - checkSame(); - - checkBytes("hello world!".getBytes(StandardCharsets.UTF_8)); - - final Random random = new Random(); - final byte[] bytes = new byte[1 << 10]; - for (int i = 0; i < 1000; i++) { - random.nextBytes(bytes); - checkBytes(bytes, random.nextInt(bytes.length)); - } - } - - void checkBytes(byte[] bytes) { - checkBytes(bytes, bytes.length); - } - - void checkBytes(byte[] bytes, int length) { - expected.reset(); - testee.reset(); - checkSame(); - - for (byte b : bytes) { - expected.update(b); - testee.update(b); - checkSame(); - } - - expected.reset(); - testee.reset(); - - for (int i = 0; i < length; i++) { - expected.update(bytes, 0, i); - testee.update(bytes, 0, i); - checkSame(); - } - - expected.reset(); - testee.reset(); - checkSame(); - } - - private void checkSame() { - Assert.assertEquals(expected.getValue(), testee.getValue()); - } - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java deleted file mode 100644 index c1470bb2efc..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.common; - -import org.apache.commons.collections.SetUtils; -import org.apache.hadoop.ozone.common.statemachine - .InvalidStateTransitionException; -import org.apache.hadoop.ozone.common.statemachine.StateMachine; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.util.HashSet; -import java.util.Set; - -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.CLEANUP; -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.CLOSED; -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.CREATING; -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.FINAL; -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.INIT; -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES - .OPERATIONAL; - -/** - * This class is to test ozone common state machine. - */ -public class TestStateMachine { - - /** - * STATES used by the test state machine. - */ - public enum STATES {INIT, CREATING, OPERATIONAL, CLOSED, CLEANUP, FINAL}; - - /** - * EVENTS used by the test state machine. - */ - public enum EVENTS {ALLOCATE, CREATE, UPDATE, CLOSE, DELETE, TIMEOUT}; - - @Rule - public ExpectedException exception = ExpectedException.none(); - - @Test - public void testStateMachineStates() throws InvalidStateTransitionException { - Set finals = new HashSet<>(); - finals.add(FINAL); - - StateMachine stateMachine = - new StateMachine<>(INIT, finals); - - stateMachine.addTransition(INIT, CREATING, EVENTS.ALLOCATE); - stateMachine.addTransition(CREATING, OPERATIONAL, EVENTS.CREATE); - stateMachine.addTransition(OPERATIONAL, OPERATIONAL, EVENTS.UPDATE); - stateMachine.addTransition(OPERATIONAL, CLEANUP, EVENTS.DELETE); - stateMachine.addTransition(OPERATIONAL, CLOSED, EVENTS.CLOSE); - stateMachine.addTransition(CREATING, CLEANUP, EVENTS.TIMEOUT); - - // Initial and Final states - Assert.assertEquals("Initial State", INIT, stateMachine.getInitialState()); - Assert.assertTrue("Final States", SetUtils.isEqualSet(finals, - stateMachine.getFinalStates())); - - // Valid state transitions - Assert.assertEquals("STATE should be OPERATIONAL after being created", - OPERATIONAL, stateMachine.getNextState(CREATING, EVENTS.CREATE)); - Assert.assertEquals("STATE should be OPERATIONAL after being updated", - OPERATIONAL, stateMachine.getNextState(OPERATIONAL, EVENTS.UPDATE)); - Assert.assertEquals("STATE should be CLEANUP after being deleted", - CLEANUP, stateMachine.getNextState(OPERATIONAL, EVENTS.DELETE)); - Assert.assertEquals("STATE should be CLEANUP after being timeout", - CLEANUP, stateMachine.getNextState(CREATING, EVENTS.TIMEOUT)); - Assert.assertEquals("STATE should be CLOSED after being closed", - CLOSED, stateMachine.getNextState(OPERATIONAL, EVENTS.CLOSE)); - - // Negative cases: invalid transition - expectException(); - stateMachine.getNextState(OPERATIONAL, EVENTS.CREATE); - - expectException(); - stateMachine.getNextState(CREATING, EVENTS.CLOSE); - } - - /** - * We expect an InvalidStateTransitionException. - */ - private void expectException() { - exception.expect(InvalidStateTransitionException.class); - exception.expectMessage("Invalid event"); - } - -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java deleted file mode 100644 index 38878334f57..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java +++ /dev/null @@ -1,388 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * A generic lease management API which can be used if a service - * needs any kind of lease management. - */ - -package org.apache.hadoop.ozone.lease; - -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.util.HashMap; -import java.util.Map; - -/** - * Test class to check functionality and consistency of LeaseManager. - */ -public class TestLeaseManager { - - @Rule - public ExpectedException exception = ExpectedException.none(); - - /** - * Dummy resource on which leases can be acquired. - */ - private static final class DummyResource { - - private final String name; - - private DummyResource(String name) { - this.name = name; - } - - @Override - public int hashCode() { - return name.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if(obj instanceof DummyResource) { - return name.equals(((DummyResource) obj).name); - } - return false; - } - - /** - * Adding to String method to fix the ErrorProne warning that this method - * is later used in String functions, which would print out (e.g. - * `org.apache.hadoop.ozone.lease.TestLeaseManager.DummyResource@ - * 4488aabb`) instead of useful information. - * - * @return Name of the Dummy object. - */ - @Override - public String toString() { - return "DummyResource{" + - "name='" + name + '\'' + - '}'; - } - } - - @Test - public void testLeaseAcquireAndRelease() throws LeaseException { - //It is assumed that the test case execution won't take more than 5 seconds, - //if it takes more time increase the defaultTimeout value of LeaseManager. - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - DummyResource resourceTwo = new DummyResource("two"); - DummyResource resourceThree = new DummyResource("three"); - Lease leaseOne = manager.acquire(resourceOne); - Lease leaseTwo = manager.acquire(resourceTwo); - Lease leaseThree = manager.acquire(resourceThree); - Assert.assertEquals(leaseOne, manager.get(resourceOne)); - Assert.assertEquals(leaseTwo, manager.get(resourceTwo)); - Assert.assertEquals(leaseThree, manager.get(resourceThree)); - Assert.assertFalse(leaseOne.hasExpired()); - Assert.assertFalse(leaseTwo.hasExpired()); - Assert.assertFalse(leaseThree.hasExpired()); - //The below releases should not throw LeaseNotFoundException. - manager.release(resourceOne); - manager.release(resourceTwo); - manager.release(resourceThree); - Assert.assertTrue(leaseOne.hasExpired()); - Assert.assertTrue(leaseTwo.hasExpired()); - Assert.assertTrue(leaseThree.hasExpired()); - manager.shutdown(); - } - - @Test - public void testLeaseAlreadyExist() throws LeaseException { - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - DummyResource resourceTwo = new DummyResource("two"); - Lease leaseOne = manager.acquire(resourceOne); - Lease leaseTwo = manager.acquire(resourceTwo); - Assert.assertEquals(leaseOne, manager.get(resourceOne)); - Assert.assertEquals(leaseTwo, manager.get(resourceTwo)); - - exception.expect(LeaseAlreadyExistException.class); - exception.expectMessage("Resource: " + resourceOne); - manager.acquire(resourceOne); - - manager.release(resourceOne); - manager.release(resourceTwo); - manager.shutdown(); - } - - @Test - public void testLeaseNotFound() throws LeaseException, InterruptedException { - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - DummyResource resourceTwo = new DummyResource("two"); - DummyResource resourceThree = new DummyResource("three"); - - //Case 1: lease was never acquired. - exception.expect(LeaseNotFoundException.class); - exception.expectMessage("Resource: " + resourceOne); - manager.get(resourceOne); - - //Case 2: lease is acquired and released. - Lease leaseTwo = manager.acquire(resourceTwo); - Assert.assertEquals(leaseTwo, manager.get(resourceTwo)); - Assert.assertFalse(leaseTwo.hasExpired()); - manager.release(resourceTwo); - Assert.assertTrue(leaseTwo.hasExpired()); - exception.expect(LeaseNotFoundException.class); - exception.expectMessage("Resource: " + resourceTwo); - manager.get(resourceTwo); - - //Case 3: lease acquired and timed out. - Lease leaseThree = manager.acquire(resourceThree); - Assert.assertEquals(leaseThree, manager.get(resourceThree)); - Assert.assertFalse(leaseThree.hasExpired()); - long sleepTime = leaseThree.getRemainingTime() + 1000; - try { - Thread.sleep(sleepTime); - } catch (InterruptedException ex) { - //even in case of interrupt we have to wait till lease times out. - Thread.sleep(sleepTime); - } - Assert.assertTrue(leaseThree.hasExpired()); - exception.expect(LeaseNotFoundException.class); - exception.expectMessage("Resource: " + resourceThree); - manager.get(resourceThree); - manager.shutdown(); - } - - @Test - public void testCustomLeaseTimeout() throws LeaseException { - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - DummyResource resourceTwo = new DummyResource("two"); - DummyResource resourceThree = new DummyResource("three"); - Lease leaseOne = manager.acquire(resourceOne); - Lease leaseTwo = manager.acquire(resourceTwo, 10000); - Lease leaseThree = manager.acquire(resourceThree, 50000); - Assert.assertEquals(leaseOne, manager.get(resourceOne)); - Assert.assertEquals(leaseTwo, manager.get(resourceTwo)); - Assert.assertEquals(leaseThree, manager.get(resourceThree)); - Assert.assertFalse(leaseOne.hasExpired()); - Assert.assertFalse(leaseTwo.hasExpired()); - Assert.assertFalse(leaseThree.hasExpired()); - Assert.assertEquals(5000, leaseOne.getLeaseLifeTime()); - Assert.assertEquals(10000, leaseTwo.getLeaseLifeTime()); - Assert.assertEquals(50000, leaseThree.getLeaseLifeTime()); - // Releasing of leases is done in shutdown, so don't have to worry about - // lease release - manager.shutdown(); - } - - @Test - public void testLeaseCallback() throws LeaseException, InterruptedException { - Map leaseStatus = new HashMap<>(); - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - Lease leaseOne = manager.acquire(resourceOne); - leaseStatus.put(resourceOne, "lease in use"); - leaseOne.registerCallBack(() -> { - leaseStatus.put(resourceOne, "lease expired"); - return null; - }); - // wait for lease to expire - long sleepTime = leaseOne.getRemainingTime() + 1000; - try { - Thread.sleep(sleepTime); - } catch (InterruptedException ex) { - //even in case of interrupt we have to wait till lease times out. - Thread.sleep(sleepTime); - } - Assert.assertTrue(leaseOne.hasExpired()); - exception.expect(LeaseNotFoundException.class); - exception.expectMessage("Resource: " + resourceOne); - manager.get(resourceOne); - // check if callback has been executed - Assert.assertEquals("lease expired", leaseStatus.get(resourceOne)); - } - - @Test - public void testCallbackExecutionInCaseOfLeaseRelease() - throws LeaseException, InterruptedException { - // Callbacks should not be executed in case of lease release - Map leaseStatus = new HashMap<>(); - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - Lease leaseOne = manager.acquire(resourceOne); - leaseStatus.put(resourceOne, "lease in use"); - leaseOne.registerCallBack(() -> { - leaseStatus.put(resourceOne, "lease expired"); - return null; - }); - leaseStatus.put(resourceOne, "lease released"); - manager.release(resourceOne); - Assert.assertTrue(leaseOne.hasExpired()); - exception.expect(LeaseNotFoundException.class); - exception.expectMessage("Resource: " + resourceOne); - manager.get(resourceOne); - Assert.assertEquals("lease released", leaseStatus.get(resourceOne)); - } - - @Test - public void testLeaseCallbackWithMultipleLeases() - throws LeaseException, InterruptedException { - Map leaseStatus = new HashMap<>(); - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - DummyResource resourceTwo = new DummyResource("two"); - DummyResource resourceThree = new DummyResource("three"); - DummyResource resourceFour = new DummyResource("four"); - DummyResource resourceFive = new DummyResource("five"); - Lease leaseOne = manager.acquire(resourceOne); - Lease leaseTwo = manager.acquire(resourceTwo); - Lease leaseThree = manager.acquire(resourceThree); - Lease leaseFour = manager.acquire(resourceFour); - Lease leaseFive = manager.acquire(resourceFive); - leaseStatus.put(resourceOne, "lease in use"); - leaseStatus.put(resourceTwo, "lease in use"); - leaseStatus.put(resourceThree, "lease in use"); - leaseStatus.put(resourceFour, "lease in use"); - leaseStatus.put(resourceFive, "lease in use"); - leaseOne.registerCallBack(() -> { - leaseStatus.put(resourceOne, "lease expired"); - return null; - }); - leaseTwo.registerCallBack(() -> { - leaseStatus.put(resourceTwo, "lease expired"); - return null; - }); - leaseThree.registerCallBack(() -> { - leaseStatus.put(resourceThree, "lease expired"); - return null; - }); - leaseFour.registerCallBack(() -> { - leaseStatus.put(resourceFour, "lease expired"); - return null; - }); - leaseFive.registerCallBack(() -> { - leaseStatus.put(resourceFive, "lease expired"); - return null; - }); - - // release lease one, two and three - leaseStatus.put(resourceOne, "lease released"); - manager.release(resourceOne); - leaseStatus.put(resourceTwo, "lease released"); - manager.release(resourceTwo); - leaseStatus.put(resourceThree, "lease released"); - manager.release(resourceThree); - - // wait for other leases to expire - long sleepTime = leaseFive.getRemainingTime() + 1000; - - try { - Thread.sleep(sleepTime); - } catch (InterruptedException ex) { - //even in case of interrupt we have to wait till lease times out. - Thread.sleep(sleepTime); - } - Assert.assertTrue(leaseOne.hasExpired()); - Assert.assertTrue(leaseTwo.hasExpired()); - Assert.assertTrue(leaseThree.hasExpired()); - Assert.assertTrue(leaseFour.hasExpired()); - Assert.assertTrue(leaseFive.hasExpired()); - - Assert.assertEquals("lease released", leaseStatus.get(resourceOne)); - Assert.assertEquals("lease released", leaseStatus.get(resourceTwo)); - Assert.assertEquals("lease released", leaseStatus.get(resourceThree)); - Assert.assertEquals("lease expired", leaseStatus.get(resourceFour)); - Assert.assertEquals("lease expired", leaseStatus.get(resourceFive)); - manager.shutdown(); - } - - @Test - public void testReuseReleasedLease() throws LeaseException { - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - Lease leaseOne = manager.acquire(resourceOne); - Assert.assertEquals(leaseOne, manager.get(resourceOne)); - Assert.assertFalse(leaseOne.hasExpired()); - - manager.release(resourceOne); - Assert.assertTrue(leaseOne.hasExpired()); - - Lease sameResourceLease = manager.acquire(resourceOne); - Assert.assertEquals(sameResourceLease, manager.get(resourceOne)); - Assert.assertFalse(sameResourceLease.hasExpired()); - - manager.release(resourceOne); - Assert.assertTrue(sameResourceLease.hasExpired()); - manager.shutdown(); - } - - @Test - public void testReuseTimedOutLease() - throws LeaseException, InterruptedException { - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - Lease leaseOne = manager.acquire(resourceOne); - Assert.assertEquals(leaseOne, manager.get(resourceOne)); - Assert.assertFalse(leaseOne.hasExpired()); - // wait for lease to expire - long sleepTime = leaseOne.getRemainingTime() + 1000; - try { - Thread.sleep(sleepTime); - } catch (InterruptedException ex) { - //even in case of interrupt we have to wait till lease times out. - Thread.sleep(sleepTime); - } - Assert.assertTrue(leaseOne.hasExpired()); - - Lease sameResourceLease = manager.acquire(resourceOne); - Assert.assertEquals(sameResourceLease, manager.get(resourceOne)); - Assert.assertFalse(sameResourceLease.hasExpired()); - - manager.release(resourceOne); - Assert.assertTrue(sameResourceLease.hasExpired()); - manager.shutdown(); - } - - @Test - public void testRenewLease() throws LeaseException, InterruptedException { - LeaseManager manager = new LeaseManager<>("Test", 5000); - manager.start(); - DummyResource resourceOne = new DummyResource("one"); - Lease leaseOne = manager.acquire(resourceOne); - Assert.assertEquals(leaseOne, manager.get(resourceOne)); - Assert.assertFalse(leaseOne.hasExpired()); - - // add 5 more seconds to the lease - leaseOne.renew(5000); - - Thread.sleep(5000); - - // lease should still be active - Assert.assertEquals(leaseOne, manager.get(resourceOne)); - Assert.assertFalse(leaseOne.hasExpired()); - manager.release(resourceOne); - manager.shutdown(); - } - -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/package-info.java deleted file mode 100644 index 1071309c730..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lease; -/* - This package contains lease management unit test classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java deleted file mode 100644 index e88b1bb121b..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.junit.Assert; -import org.junit.Test; - -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * Test-cases to test LockManager. - */ -public class TestLockManager { - - @Test(timeout = 1000) - public void testWriteLockWithDifferentResource() { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - manager.writeLock("/resourceOne"); - // This should work, as they are different resource. - manager.writeLock("/resourceTwo"); - manager.writeUnlock("/resourceOne"); - manager.writeUnlock("/resourceTwo"); - Assert.assertTrue(true); - } - - @Test - public void testWriteLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.writeLock("/resourceOne"); - new Thread(() -> { - manager.writeLock("/resourceOne"); - gotLock.set(true); - manager.writeUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the other thread is trying to get write lock on same object, - // it will wait. - Assert.assertFalse(gotLock.get()); - manager.writeUnlock("/resourceOne"); - // Since we have released the write lock, the other thread should have - // the lock now - // Let's give some time for the other thread to run - Thread.sleep(100); - Assert.assertTrue(gotLock.get()); - } - - @Test(timeout = 1000) - public void testReadLockWithDifferentResource() { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - manager.readLock("/resourceOne"); - manager.readLock("/resourceTwo"); - manager.readUnlock("/resourceOne"); - manager.readUnlock("/resourceTwo"); - Assert.assertTrue(true); - } - - @Test - public void testReadLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.readLock("/resourceOne"); - new Thread(() -> { - manager.readLock("/resourceOne"); - gotLock.set(true); - manager.readUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the new thread is trying to get read lock, it should work. - Assert.assertTrue(gotLock.get()); - manager.readUnlock("/resourceOne"); - } - - @Test - public void testWriteReadLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.writeLock("/resourceOne"); - new Thread(() -> { - manager.readLock("/resourceOne"); - gotLock.set(true); - manager.readUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the other thread is trying to get read lock on same object, - // it will wait. - Assert.assertFalse(gotLock.get()); - manager.writeUnlock("/resourceOne"); - // Since we have released the write lock, the other thread should have - // the lock now - // Let's give some time for the other thread to run - Thread.sleep(100); - Assert.assertTrue(gotLock.get()); - } - - @Test - public void testReadWriteLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.readLock("/resourceOne"); - new Thread(() -> { - manager.writeLock("/resourceOne"); - gotLock.set(true); - manager.writeUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the other thread is trying to get write lock on same object, - // it will wait. - Assert.assertFalse(gotLock.get()); - manager.readUnlock("/resourceOne"); - // Since we have released the read lock, the other thread should have - // the lock now - // Let's give some time for the other thread to run - Thread.sleep(100); - Assert.assertTrue(gotLock.get()); - } - - @Test - public void testMultiReadWriteLockWithSameResource() throws Exception { - final LockManager manager = - new LockManager<>(new OzoneConfiguration()); - final AtomicBoolean gotLock = new AtomicBoolean(false); - manager.readLock("/resourceOne"); - manager.readLock("/resourceOne"); - new Thread(() -> { - manager.writeLock("/resourceOne"); - gotLock.set(true); - manager.writeUnlock("/resourceOne"); - }).start(); - // Let's give some time for the other thread to run - Thread.sleep(100); - // Since the other thread is trying to get write lock on same object, - // it will wait. - Assert.assertFalse(gotLock.get()); - manager.readUnlock("/resourceOne"); - //We have only released one read lock, we still hold another read lock. - Thread.sleep(100); - Assert.assertFalse(gotLock.get()); - manager.readUnlock("/resourceOne"); - // Since we have released the read lock, the other thread should have - // the lock now - // Let's give some time for the other thread to run - Thread.sleep(100); - Assert.assertTrue(gotLock.get()); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java deleted file mode 100644 index a96bc16248c..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.lock; -/* - This package contains the lock related test classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/package-info.java deleted file mode 100644 index 0030d2e9e1c..00000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; -/** - * Ozone related test helper classes and tests of common utils. - */ diff --git a/hadoop-hdds/common/src/test/resources/log4j2.properties b/hadoop-hdds/common/src/test/resources/log4j2.properties deleted file mode 100644 index cef69e11b0e..00000000000 --- a/hadoop-hdds/common/src/test/resources/log4j2.properties +++ /dev/null @@ -1,76 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with this -# work for additional information regarding copyright ownership. The ASF -# licenses this file to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -#

-# http://www.apache.org/licenses/LICENSE-2.0 -#

-# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. -# -name=PropertiesConfig - -# Checks for config change periodically and reloads -monitorInterval=5 - -filter=read, write -# filter.read.onMatch = DENY avoids logging all READ events -# filter.read.onMatch = ACCEPT permits logging all READ events -# The above two settings ignore the log levels in configuration -# filter.read.onMatch = NEUTRAL permits logging of only those READ events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.read.type = MarkerFilter -filter.read.marker = READ -filter.read.onMatch = DENY -filter.read.onMismatch = NEUTRAL - -# filter.write.onMatch = DENY avoids logging all WRITE events -# filter.write.onMatch = ACCEPT permits logging all WRITE events -# The above two settings ignore the log levels in configuration -# filter.write.onMatch = NEUTRAL permits logging of only those WRITE events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.write.type = MarkerFilter -filter.write.marker = WRITE -filter.write.onMatch = NEUTRAL -filter.write.onMismatch = NEUTRAL - -# Log Levels are organized from most specific to least: -# OFF (most specific, no logging) -# FATAL (most specific, little data) -# ERROR -# WARN -# INFO -# DEBUG -# TRACE (least specific, a lot of data) -# ALL (least specific, all data) - -appenders = console, audit -appender.console.type = Console -appender.console.name = STDOUT -appender.console.layout.type = PatternLayout -appender.console.layout.pattern = %-5level | %c{1} | %msg%n - -appender.audit.type = File -appender.audit.name = AUDITLOG -appender.audit.fileName=audit.log -appender.audit.layout.type=PatternLayout -appender.audit.layout.pattern= %-5level | %c{1} | %msg%n - -loggers=audit -logger.audit.type=AsyncLogger -logger.audit.name=OMAudit -logger.audit.level = INFO -logger.audit.appenderRefs = audit -logger.audit.appenderRef.file.ref = AUDITLOG - -rootLogger.level = INFO -rootLogger.appenderRefs = stdout -rootLogger.appenderRef.stdout.ref = STDOUT diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/enforce-error.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/enforce-error.xml deleted file mode 100644 index 58c5802d0a8..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/enforce-error.xml +++ /dev/null @@ -1,47 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - - - - 1 - InnerNode - - - - 0 - Leaf - - - - /datacenter/rack/nodegroup/node - true - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.xml deleted file mode 100644 index 25be9c2c5d7..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - 1 - - - - 1 - Root - - - rack - 1 - InnerNode - /default-rack - - - nodegroup - 1 - InnerNode - /default-nodegroup - - - - 0 - Leaf - - - - /datacenter/rack/nodegroup/node - true - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.yaml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.yaml deleted file mode 100644 index d5092ad0dbc..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/good.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- -# Cost: The cost of crossing this layer. -# The value should be positive integer or 0. This field is optional. -# When it's not defined, it's value is default "1". -cost: 1 - -# The prefix of this layer. -# If the prefix is "dc", then every name in this layer should start with "dc", -# such as "dc1", "dc2". -# Note that unlike XML schema, the prefix must be specified explicitly if the type is InnerNode. -prefix: / - -# Layer type, optional field, default value InnerNode. -# Current value range : {ROOT, INNER_NODE, LEAF_NODE} -type: ROOT - -# Layer name -defaultName: root - -# The sub layer of current layer. We use list -sublayer: - - - cost: 1 - prefix: dc - defaultName: datacenter - type: INNER_NODE - sublayer: - - - cost: 1 - prefix: rack - defaultName: rack - type: INNER_NODE - sublayer: - - - cost: 1 - prefix: ng - defaultName: nodegroup - type: INNER_NODE - sublayer: - - - defaultName: node - type: LEAF_NODE - prefix: node -... \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-cost.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-cost.xml deleted file mode 100644 index cf934bc0191..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-cost.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - -1 - InnerNode - default-rack - - - - 0 - leaf - - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-version.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-version.xml deleted file mode 100644 index d69aab14f13..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/invalid-version.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - a - - - - 1 - ROOT - - - rack - -1 - InnerNode - default-rack - - - - 0 - leaf - - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/middle-leaf.yaml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/middle-leaf.yaml deleted file mode 100644 index 0a2d490d5fa..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/middle-leaf.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- -# Cost: The cost of crossing this layer. -# The value should be positive integer or 0. This field is optional. -# When it's not defined, it's value is default "1". -cost: 1 - -# The prefix of this layer. -# If the prefix is "dc", then every name in this layer should start with "dc", -# such as "dc1", "dc2". -# Note that unlike XML schema, the prefix must be specified explicitly if the type is InnerNode. -prefix: / - -# Layer type, optional field, default value InnerNode. -# Current value range : {ROOT, INNER_NODE, LEAF_NODE} -type: ROOT - -# Layer name -defaultName: root - -# The sub layer of current layer. We use list -sublayer: - - - cost: 1 - prefix: dc - defaultName: datacenter - type: INNER_NODE - sublayer: - - - cost: 1 - prefix: node - defaultName: rack - type: LEAF_NODE - sublayer: - - - cost: 1 - prefix: ng - defaultName: nodegroup - type: INNER_NODE - sublayer: - - - defaultName: node - type: LEAF_NODE - prefix: node -... \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-leaf.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-leaf.xml deleted file mode 100644 index a4297af4763..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-leaf.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - Leaf - default-rack - - - - 0 - Leaf - - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.xml deleted file mode 100644 index afc78160d9e..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - ROOT - default-rack - - - - 0 - Leaf - - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.yaml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.yaml deleted file mode 100644 index 536ed23eb6c..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-root.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- -# Cost: The cost of crossing this layer. -# The value should be positive integer or 0. This field is optional. -# When it's not defined, it's value is default "1". -cost: 1 - -# The prefix of this layer. -# If the prefix is "dc", then every name in this layer should start with "dc", -# such as "dc1", "dc2". -# Note that unlike XML schema, the prefix must be specified explicitly if the type is InnerNode. -prefix: / - -# Layer type, optional field, default value InnerNode. -# Current value range : {ROOT, INNER_NODE, LEAF_NODE} -type: ROOT - -# Layer name -defaultName: root - -# The sub layer of current layer. We use list -sublayer: - - - cost: 1 - prefix: root - defaultName: root - type: ROOT - sublayer: - - - cost: 1 - prefix: rack - defaultName: rack - type: INNER_NODE - sublayer: - - - cost: 1 - prefix: ng - defaultName: nodegroup - type: INNER_NODE - sublayer: - - - defaultName: node - type: LEAF_NODE - prefix: node -... \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-topology.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-topology.xml deleted file mode 100644 index a7322ca9cd0..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/multiple-topology.xml +++ /dev/null @@ -1,47 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - Leaf - - - - /datacenter/rack/node - false - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-leaf.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-leaf.xml deleted file mode 100644 index fcc697c875f..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-leaf.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - InnerNode - - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-root.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-root.xml deleted file mode 100644 index 940696c9414..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-root.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - InnerNode - - - rack - 1 - InnerNode - default-rack - - - - 0 - Leaf - - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-topology.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-topology.xml deleted file mode 100644 index c16e2165464..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/no-topology.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - LEAF - - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-layers-size-mismatch.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-layers-size-mismatch.xml deleted file mode 100644 index 2c30219c189..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-layers-size-mismatch.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - Leaf - - - - /datacenter/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-with-id-reference-failure.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-with-id-reference-failure.xml deleted file mode 100644 index fac224be108..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/path-with-id-reference-failure.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - Leaf - - - - /datacenter/room/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/unknown-layer-type.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/unknown-layer-type.xml deleted file mode 100644 index d228eecbed1..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/unknown-layer-type.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - leaves - - - - /datacenter/rack/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-1.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-1.xml deleted file mode 100644 index 221e10b5ad1..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-1.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - Leaf - - - - /rack/datacenter/node - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-2.xml b/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-2.xml deleted file mode 100644 index 51e579e3e60..00000000000 --- a/hadoop-hdds/common/src/test/resources/networkTopologyTestFiles/wrong-path-order-2.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - 1 - - - - 1 - ROOT - - - rack - 1 - InnerNode - default-rack - - - - 0 - Leaf - - - - /datacenter/node/rack - false - - \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/resources/test.db.ini b/hadoop-hdds/common/src/test/resources/test.db.ini deleted file mode 100644 index 6666cd28b2d..00000000000 --- a/hadoop-hdds/common/src/test/resources/test.db.ini +++ /dev/null @@ -1,145 +0,0 @@ -# This is a RocksDB option file. -# -# A typical RocksDB options file has four sections, which are -# Version section, DBOptions section, at least one CFOptions -# section, and one TableOptions section for each column family. -# The RocksDB options file in general follows the basic INI -# file format with the following extensions / modifications: -# -# * Escaped characters -# We escaped the following characters: -# - \n -- line feed - new line -# - \r -- carriage return -# - \\ -- backslash \ -# - \: -- colon symbol : -# - \# -- hash tag # -# * Comments -# We support # style comments. Comments can appear at the ending -# part of a line. -# * Statements -# A statement is of the form option_name = value. -# Each statement contains a '=', where extra white-spaces -# are supported. However, we don't support multi-lined statement. -# Furthermore, each line can only contain at most one statement. -# * Sections -# Sections are of the form [SecitonTitle "SectionArgument"], -# where section argument is optional. -# * List -# We use colon-separated string to represent a list. -# For instance, n1:n2:n3:n4 is a list containing four values. -# -# Below is an example of a RocksDB options file: - - -#----------------------IMPORTANT------------------------------------# -### FAKE VALUES FOR TESTING ONLY ### DO NOT USE THESE FOR PRODUCTION. -#----------------------IMPORTANT------------------------------------# -[DBOptions] - stats_dump_period_sec=600 - max_manifest_file_size=551615 - bytes_per_sync=8388608 - delayed_write_rate=2097152 - WAL_ttl_seconds=0 - WAL_size_limit_MB=0 - max_subcompactions=1 - wal_dir= - wal_bytes_per_sync=0 - db_write_buffer_size=0 - keep_log_file_num=1000 - table_cache_numshardbits=4 - max_file_opening_threads=1 - writable_file_max_buffer_size=1048576 - random_access_max_buffer_size=1048576 - use_fsync=false - max_total_wal_size=0 - max_open_files=-1 - skip_stats_update_on_db_open=false - max_background_compactions=16 - manifest_preallocation_size=4194304 - max_background_flushes=7 - is_fd_close_on_exec=true - max_log_file_size=0 - advise_random_on_open=true - create_missing_column_families=false - paranoid_checks=true - delete_obsolete_files_period_micros=21600000000 - log_file_time_to_roll=0 - compaction_readahead_size=0 - create_if_missing=false - use_adaptive_mutex=false - enable_thread_tracking=false - allow_fallocate=true - error_if_exists=false - recycle_log_file_num=0 - skip_log_error_on_recovery=false - db_log_dir= - new_table_reader_for_compaction_inputs=true - allow_mmap_reads=false - allow_mmap_writes=false - use_direct_reads=false - use_direct_writes=false - - -[CFOptions "default"] - compaction_style=kCompactionStyleLevel - compaction_filter=nullptr - num_levels=6 - table_factory=BlockBasedTable - comparator=leveldb.BytewiseComparator - max_sequential_skip_in_iterations=8 - soft_rate_limit=0.000000 - max_bytes_for_level_base=1073741824 - memtable_prefix_bloom_probes=6 - memtable_prefix_bloom_bits=0 - memtable_prefix_bloom_huge_page_tlb_size=0 - max_successive_merges=0 - arena_block_size=16777216 - min_write_buffer_number_to_merge=1 - target_file_size_multiplier=1 - source_compaction_factor=1 - max_bytes_for_level_multiplier=8 - max_bytes_for_level_multiplier_additional=2:3:5 - compaction_filter_factory=nullptr - max_write_buffer_number=8 - level0_stop_writes_trigger=20 - compression=kSnappyCompression - level0_file_num_compaction_trigger=4 - purge_redundant_kvs_while_flush=true - max_write_buffer_number_to_maintain=0 - memtable_factory=SkipListFactory - max_grandparent_overlap_factor=8 - expanded_compaction_factor=25 - hard_pending_compaction_bytes_limit=137438953472 - inplace_update_num_locks=10000 - level_compaction_dynamic_level_bytes=true - level0_slowdown_writes_trigger=12 - filter_deletes=false - verify_checksums_in_compaction=true - min_partial_merge_operands=2 - paranoid_file_checks=false - target_file_size_base=134217728 - optimize_filters_for_hits=false - merge_operator=PutOperator - compression_per_level=kNoCompression:kNoCompression:kNoCompression:kSnappyCompression:kSnappyCompression:kSnappyCompression - compaction_measure_io_stats=false - prefix_extractor=nullptr - bloom_locality=0 - write_buffer_size=134217728 - disable_auto_compactions=false - inplace_update_support=false - -[TableOptions/BlockBasedTable "default"] - format_version=2 - whole_key_filtering=true - no_block_cache=false - checksum=kCRC32c - filter_policy=rocksdb.BuiltinBloomFilter - block_size_deviation=10 - block_size=8192 - block_restart_interval=16 - cache_index_and_filter_blocks=false - pin_l0_filter_and_index_blocks_in_cache=false - pin_top_level_index_and_filter=false - index_type=kBinarySearch - hash_index_allow_collision=true - flush_block_policy_factory=FlushBlockBySizePolicyFactory \ No newline at end of file diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml deleted file mode 100644 index a5955498fff..00000000000 --- a/hadoop-hdds/config/pom.xml +++ /dev/null @@ -1,45 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-hdds - 0.5.0-SNAPSHOT - - hadoop-hdds-config - 0.5.0-SNAPSHOT - Apache Hadoop Distributed Data Store Config Tools - Apache Hadoop HDDS Config - jar - - - - - - - - - junit - junit - test - - - - - diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java deleted file mode 100644 index 70aa58d5417..00000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; -import java.util.concurrent.TimeUnit; - -/** - * Mark field to be configurable from ozone-site.xml. - */ -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.METHOD) -public @interface Config { - - /** - * Configuration fragment relative to the prefix defined with @ConfigGroup. - */ - String key(); - - /** - * Default value to use if not set. - */ - String defaultValue(); - - /** - * Custom description as a help. - */ - String description(); - - /** - * Type of configuration. Use AUTO to decide it based on the java type. - */ - ConfigType type() default ConfigType.AUTO; - - /** - * If type == TIME the unit should be defined with this attribute. - */ - TimeUnit timeUnit() default TimeUnit.MILLISECONDS; - - ConfigTag[] tags(); -} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java deleted file mode 100644 index 9463f429095..00000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileAppender.java +++ /dev/null @@ -1,127 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.transform.OutputKeys; -import javax.xml.transform.Transformer; -import javax.xml.transform.TransformerException; -import javax.xml.transform.TransformerFactory; -import javax.xml.transform.dom.DOMSource; -import javax.xml.transform.stream.StreamResult; -import java.io.InputStream; -import java.io.Writer; -import java.util.Arrays; -import java.util.stream.Collectors; - -import org.w3c.dom.Document; -import org.w3c.dom.Element; - -/** - * Simple DOM based config file writer. - *

- * This class can init/load existing ozone-default-generated.xml fragments - * and append new entries and write to the file system. - */ -public class ConfigFileAppender { - - private Document document; - - private final DocumentBuilder builder; - - public ConfigFileAppender() { - try { - DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance(); - builder = factory.newDocumentBuilder(); - } catch (Exception ex) { - throw new ConfigurationException("Can initialize new configuration", ex); - } - } - - /** - * Initialize a new ozone-site.xml structure with empty content. - */ - public void init() { - try { - document = builder.newDocument(); - document.appendChild(document.createElement("configuration")); - } catch (Exception ex) { - throw new ConfigurationException("Can initialize new configuration", ex); - } - } - - /** - * Load existing ozone-site.xml content and parse the DOM tree. - */ - public void load(InputStream stream) { - try { - document = builder.parse(stream); - } catch (Exception ex) { - throw new ConfigurationException("Can't load existing configuration", ex); - } - } - - /** - * Add configuration fragment. - */ - public void addConfig(String key, String defaultValue, String description, - ConfigTag[] tags) { - Element root = document.getDocumentElement(); - Element propertyElement = document.createElement("property"); - - addXmlElement(propertyElement, "name", key); - - addXmlElement(propertyElement, "value", defaultValue); - - addXmlElement(propertyElement, "description", description); - - String tagsAsString = Arrays.stream(tags).map(tag -> tag.name()) - .collect(Collectors.joining(", ")); - - addXmlElement(propertyElement, "tag", tagsAsString); - - root.appendChild(propertyElement); - } - - private void addXmlElement(Element parentElement, String tagValue, - String textValue) { - Element element = document.createElement(tagValue); - element.appendChild(document.createTextNode(textValue)); - parentElement.appendChild(element); - } - - /** - * Write out the XML content to a writer. - */ - public void write(Writer writer) { - try { - TransformerFactory transformerFactory = TransformerFactory.newInstance(); - Transformer transf = transformerFactory.newTransformer(); - - transf.setOutputProperty(OutputKeys.ENCODING, "UTF-8"); - transf.setOutputProperty(OutputKeys.INDENT, "yes"); - transf - .setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2"); - - transf.transform(new DOMSource(document), new StreamResult(writer)); - } catch (TransformerException e) { - throw new ConfigurationException("Can't write the configuration xml", e); - } - } -} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java deleted file mode 100644 index 471b679f845..00000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import javax.annotation.processing.AbstractProcessor; -import javax.annotation.processing.Filer; -import javax.annotation.processing.RoundEnvironment; -import javax.annotation.processing.SupportedAnnotationTypes; -import javax.lang.model.element.Element; -import javax.lang.model.element.ElementKind; -import javax.lang.model.element.TypeElement; -import javax.tools.Diagnostic.Kind; -import javax.tools.FileObject; -import javax.tools.StandardLocation; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStreamWriter; -import java.io.Writer; -import java.nio.charset.StandardCharsets; -import java.nio.file.NoSuchFileException; -import java.util.Set; - -/** - * Annotation processor to generate config fragments from Config annotations. - */ -@SupportedAnnotationTypes("org.apache.hadoop.hdds.conf.ConfigGroup") -public class ConfigFileGenerator extends AbstractProcessor { - - public static final String OUTPUT_FILE_NAME = "ozone-default-generated.xml"; - - @Override - public boolean process(Set annotations, - RoundEnvironment roundEnv) { - if (roundEnv.processingOver()) { - return false; - } - - Filer filer = processingEnv.getFiler(); - - try { - - //load existing generated config (if exists) - ConfigFileAppender appender = new ConfigFileAppender(); - try (InputStream input = filer - .getResource(StandardLocation.CLASS_OUTPUT, "", - OUTPUT_FILE_NAME).openInputStream()) { - appender.load(input); - } catch (FileNotFoundException | NoSuchFileException ex) { - appender.init(); - } - - Set annotatedElements = - roundEnv.getElementsAnnotatedWith(ConfigGroup.class); - for (Element annotatedElement : annotatedElements) { - TypeElement configGroup = (TypeElement) annotatedElement; - - //check if any of the setters are annotated with @Config - for (Element element : configGroup.getEnclosedElements()) { - if (element.getKind() == ElementKind.METHOD) { - processingEnv.getMessager() - .printMessage(Kind.WARNING, element.getSimpleName().toString()); - if (element.getSimpleName().toString().startsWith("set") - && element.getAnnotation(Config.class) != null) { - - //update the ozone-site-generated.xml - Config configAnnotation = element.getAnnotation(Config.class); - ConfigGroup configGroupAnnotation = - configGroup.getAnnotation(ConfigGroup.class); - - String key = configGroupAnnotation.prefix() + "." - + configAnnotation.key(); - - appender.addConfig(key, - configAnnotation.defaultValue(), - configAnnotation.description(), - configAnnotation.tags()); - } - } - - } - } - FileObject resource = filer - .createResource(StandardLocation.CLASS_OUTPUT, "", - OUTPUT_FILE_NAME); - - try (Writer writer = new OutputStreamWriter( - resource.openOutputStream(), StandardCharsets.UTF_8)) { - appender.write(writer); - } - - } catch (IOException e) { - processingEnv.getMessager().printMessage(Kind.ERROR, - "Can't generate the config file from annotation: " + e); - } - return false; - } - -} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java deleted file mode 100644 index dd24ccbf003..00000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigGroup.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Mark pojo which holds configuration variables. - */ -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.TYPE) -public @interface ConfigGroup { - String prefix(); -} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java deleted file mode 100644 index de50d2afe9e..00000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -/** - * Available config tags. - *

- * Note: the values are defined in ozone-default.xml by hadoop.tags.custom. - */ -public enum ConfigTag { - OZONE, - MANAGEMENT, - SECURITY, - PERFORMANCE, - DEBUG, - CLIENT, - SERVER, - OM, - SCM, - CRITICAL, - RATIS, - CONTAINER, - REQUIRED, - REST, - STORAGE, - PIPELINE, - STANDALONE, - S3GATEWAY -} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java deleted file mode 100644 index 23a81042b26..00000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -/** - * Possible type of injected configuration. - *

- * AUTO means that the exact type will be identified based on the java type of - * the configuration field. - */ -public enum ConfigType { - AUTO, - STRING, - BOOLEAN, - INT, - LONG, - TIME, - SIZE -} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java deleted file mode 100644 index 2e680126a09..00000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationException.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -/** - * Exception to throw in case of a configuration problem. - */ -public class ConfigurationException extends RuntimeException { - public ConfigurationException() { - } - - public ConfigurationException(String message) { - super(message); - } - - public ConfigurationException(String message, Throwable cause) { - super(message, cause); - } -} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java deleted file mode 100644 index e789040d276..00000000000 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Generic configuration annotations, tools and generators. - */ -package org.apache.hadoop.hdds.conf; diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java deleted file mode 100644 index 2dd26696b27..00000000000 --- a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import java.util.concurrent.TimeUnit; - -/** - * Example configuration to test the configuration injection. - */ -@ConfigGroup(prefix = "ozone.scm.client") -public class ConfigurationExample { - - private String clientAddress; - - private String bindHost; - - private boolean compressionEnabled; - - private int port = 1234; - - private long waitTime = 1; - - @Config(key = "address", defaultValue = "localhost", description = "Client " - + "addres (To test string injection).", tags = ConfigTag.MANAGEMENT) - public void setClientAddress(String clientAddress) { - this.clientAddress = clientAddress; - } - - @Config(key = "bind.host", defaultValue = "0.0.0.0", description = "Bind " - + "host(To test string injection).", tags = ConfigTag.MANAGEMENT) - public void setBindHost(String bindHost) { - this.bindHost = bindHost; - } - - @Config(key = "compression.enabled", defaultValue = "true", description = - "Compression enabled. (Just to test boolean flag)", tags = - ConfigTag.MANAGEMENT) - public void setCompressionEnabled(boolean compressionEnabled) { - this.compressionEnabled = compressionEnabled; - } - - @Config(key = "port", defaultValue = "1234", description = "Port number " - + "config (To test in injection)", tags = ConfigTag.MANAGEMENT) - public void setPort(int port) { - this.port = port; - } - - @Config(key = "wait", type = ConfigType.TIME, timeUnit = - TimeUnit.SECONDS, defaultValue = "30m", description = "Wait time (To " - + "test TIME config type)", tags = ConfigTag.MANAGEMENT) - public void setWaitTime(long waitTime) { - this.waitTime = waitTime; - } - - public String getClientAddress() { - return clientAddress; - } - - public String getBindHost() { - return bindHost; - } - - public boolean isCompressionEnabled() { - return compressionEnabled; - } - - public int getPort() { - return port; - } - - public long getWaitTime() { - return waitTime; - } -} diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java deleted file mode 100644 index 0edb01a02b4..00000000000 --- a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileAppender.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.conf; - -import java.io.StringWriter; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test the utility which loads/writes the config file fragments. - */ -public class TestConfigFileAppender { - - @Test - public void testInit() { - ConfigFileAppender appender = new ConfigFileAppender(); - - appender.init(); - - appender.addConfig("hadoop.scm.enabled", "true", "desc", - new ConfigTag[] {ConfigTag.OZONE, ConfigTag.SECURITY}); - - StringWriter builder = new StringWriter(); - appender.write(builder); - - Assert.assertTrue("Generated config should contain property key entry", - builder.toString().contains("hadoop.scm.enabled")); - - Assert.assertTrue("Generated config should contain tags", - builder.toString().contains("OZONE, SECURITY")); - } -} \ No newline at end of file diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java deleted file mode 100644 index e8b310d109c..00000000000 --- a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *

- * Testing configuration tools. - */ - -/** - * Testing configuration tools. - */ -package org.apache.hadoop.hdds.conf; diff --git a/hadoop-hdds/config/src/test/resources/META-INF/services/javax.annotation.processing.Processor b/hadoop-hdds/config/src/test/resources/META-INF/services/javax.annotation.processing.Processor deleted file mode 100644 index f29efdab384..00000000000 --- a/hadoop-hdds/config/src/test/resources/META-INF/services/javax.annotation.processing.Processor +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.hdds.conf.ConfigFileGenerator diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml deleted file mode 100644 index 18128e89528..00000000000 --- a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - - - - - - diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml deleted file mode 100644 index 0eef961733d..00000000000 --- a/hadoop-hdds/container-service/pom.xml +++ /dev/null @@ -1,103 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-hdds - 0.5.0-SNAPSHOT - - hadoop-hdds-container-service - 0.5.0-SNAPSHOT - Apache Hadoop Distributed Data Store Container Service - Apache Hadoop HDDS Container Service - jar - - - - org.apache.hadoop - hadoop-hdds-common - - - org.apache.hadoop - hadoop-hdds-server-framework - - - io.dropwizard.metrics - metrics-core - - - - org.mockito - mockito-core - 2.2.0 - test - - - - org.yaml - snakeyaml - 1.16 - - - com.github.spotbugs - spotbugs - provided - - - - - - - org.apache.hadoop - hadoop-maven-plugins - - - compile-protoc - - protoc - - - ${protobuf.version} - ${protoc.path} - - - ${basedir}/../../hadoop-hdds/common/src/main/proto/ - - ${basedir}/src/main/proto - - - ${basedir}/src/main/proto - - StorageContainerDatanodeProtocol.proto - - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - - - - diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java deleted file mode 100644 index c1997d6c899..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java +++ /dev/null @@ -1,384 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm; - -import com.google.common.base.Strings; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_HEARTBEAT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_HEARTBEAT_INTERVAL_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DEADNODE_INTERVAL_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_STALENODE_INTERVAL_DEFAULT; -import static org.apache.hadoop.hdds.HddsUtils.*; -import static org.apache.hadoop.hdds.server.ServerUtils.sanitizeUserArgs; - -/** - * Hdds stateless helper functions for server side components. - */ -public final class HddsServerUtil { - - private HddsServerUtil() { - } - - private static final Logger LOG = LoggerFactory.getLogger( - HddsServerUtil.class); - - /** - * Retrieve the socket address that should be used by DataNodes to connect - * to the SCM. - * - * @param conf - * @return Target InetSocketAddress for the SCM service endpoint. - */ - public static InetSocketAddress getScmAddressForDataNodes( - Configuration conf) { - // We try the following settings in decreasing priority to retrieve the - // target host. - // - OZONE_SCM_DATANODE_ADDRESS_KEY - // - OZONE_SCM_CLIENT_ADDRESS_KEY - // - OZONE_SCM_NAMES - // - Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - - if (!host.isPresent()) { - // Fallback to Ozone SCM names. - Collection scmAddresses = getSCMAddresses(conf); - if (scmAddresses.size() > 1) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_NAMES + - " must contain a single hostname. Multiple SCM hosts are " + - "currently unsupported"); - } - host = Optional.of(scmAddresses.iterator().next().getHostName()); - } - - if (!host.isPresent()) { - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY + - " must be defined. See" + - " https://wiki.apache.org/hadoop/Ozone#Configuration " - + "for details on configuring Ozone."); - } - - // If no port number is specified then we'll just try the defaultBindPort. - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY); - - InetSocketAddress addr = NetUtils.createSocketAddr(host.get() + ":" + - port.orElse(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); - - return addr; - } - - /** - * Retrieve the socket address that should be used by clients to connect - * to the SCM. - * - * @param conf - * @return Target InetSocketAddress for the SCM client endpoint. - */ - public static InetSocketAddress getScmClientBindAddress( - Configuration conf) { - final Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY); - - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - - return NetUtils.createSocketAddr( - host.orElse(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT) + ":" + - port.orElse(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT)); - } - - /** - * Retrieve the socket address that should be used by clients to connect - * to the SCM Block service. - * - * @param conf - * @return Target InetSocketAddress for the SCM block client endpoint. - */ - public static InetSocketAddress getScmBlockClientBindAddress( - Configuration conf) { - final Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY); - - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY); - - return NetUtils.createSocketAddr( - host.orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT) - + ":" - + port.orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT)); - } - - /** - * Retrieve the socket address that should be used by scm security server to - * service clients. - * - * @param conf - * @return Target InetSocketAddress for the SCM security service. - */ - public static InetSocketAddress getScmSecurityInetAddress( - Configuration conf) { - final Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY); - - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY); - - return NetUtils.createSocketAddr( - host.orElse(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT) - + ":" + port - .orElse(conf.getInt(ScmConfigKeys - .OZONE_SCM_SECURITY_SERVICE_PORT_KEY, - ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT))); - } - - /** - * Retrieve the socket address that should be used by DataNodes to connect - * to the SCM. - * - * @param conf - * @return Target InetSocketAddress for the SCM service endpoint. - */ - public static InetSocketAddress getScmDataNodeBindAddress( - Configuration conf) { - final Optional host = getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY); - - // If no port number is specified then we'll just try the defaultBindPort. - final Optional port = getPortNumberFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY); - - return NetUtils.createSocketAddr( - host.orElse(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_DEFAULT) + ":" + - port.orElse(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); - } - - - /** - * Returns the interval in which the heartbeat processor thread runs. - * - * @param conf - Configuration - * @return long in Milliseconds. - */ - public static long getScmheartbeatCheckerInterval(Configuration conf) { - return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - } - - /** - * Heartbeat Interval - Defines the heartbeat frequency from a datanode to - * SCM. - * - * @param conf - Ozone Config - * @return - HB interval in milli seconds. - */ - public static long getScmHeartbeatInterval(Configuration conf) { - return conf.getTimeDuration(HDDS_HEARTBEAT_INTERVAL, - HDDS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); - } - - /** - * Get the Stale Node interval, which is used by SCM to flag a datanode as - * stale, if the heartbeat from that node has been missing for this duration. - * - * @param conf - Configuration. - * @return - Long, Milliseconds to wait before flagging a node as stale. - */ - public static long getStaleNodeInterval(Configuration conf) { - - long staleNodeIntervalMs = - conf.getTimeDuration(OZONE_SCM_STALENODE_INTERVAL, - OZONE_SCM_STALENODE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); - - long heartbeatThreadFrequencyMs = getScmheartbeatCheckerInterval(conf); - - long heartbeatIntervalMs = getScmHeartbeatInterval(conf); - - - // Make sure that StaleNodeInterval is configured way above the frequency - // at which we run the heartbeat thread. - // - // Here we check that staleNodeInterval is at least five times more than the - // frequency at which the accounting thread is going to run. - staleNodeIntervalMs = sanitizeUserArgs(OZONE_SCM_STALENODE_INTERVAL, - staleNodeIntervalMs, OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - heartbeatThreadFrequencyMs, 5, 1000); - - // Make sure that stale node value is greater than configured value that - // datanodes are going to send HBs. - staleNodeIntervalMs = sanitizeUserArgs(OZONE_SCM_STALENODE_INTERVAL, - staleNodeIntervalMs, HDDS_HEARTBEAT_INTERVAL, heartbeatIntervalMs, 3, - 1000); - return staleNodeIntervalMs; - } - - /** - * Gets the interval for dead node flagging. This has to be a value that is - * greater than stale node value, and by transitive relation we also know - * that this value is greater than heartbeat interval and heartbeatProcess - * Interval. - * - * @param conf - Configuration. - * @return - the interval for dead node flagging. - */ - public static long getDeadNodeInterval(Configuration conf) { - long staleNodeIntervalMs = getStaleNodeInterval(conf); - long deadNodeIntervalMs = conf.getTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, - OZONE_SCM_DEADNODE_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - - // Make sure that dead nodes Ms is at least twice the time for staleNodes - // with a max of 1000 times the staleNodes. - return sanitizeUserArgs(OZONE_SCM_DEADNODE_INTERVAL, deadNodeIntervalMs, - OZONE_SCM_STALENODE_INTERVAL, staleNodeIntervalMs, 2, 1000); - } - - /** - * Timeout value for the RPC from Datanode to SCM, primarily used for - * Heartbeats and container reports. - * - * @param conf - Ozone Config - * @return - Rpc timeout in Milliseconds. - */ - public static long getScmRpcTimeOutInMilliseconds(Configuration conf) { - return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, - OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - } - - /** - * Log Warn interval. - * - * @param conf - Ozone Config - * @return - Log warn interval. - */ - public static int getLogWarnInterval(Configuration conf) { - return conf.getInt(OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT, - OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT); - } - - /** - * returns the Container port. - * @param conf - Conf - * @return port number. - */ - public static int getContainerPort(Configuration conf) { - return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); - } - - - /** - * Return the list of service addresses for the Ozone SCM. This method is used - * by the DataNodes to determine the service instances to connect to. - * - * @param conf - * @return list of SCM service addresses. - */ - public static Map> - getScmServiceRpcAddresses(Configuration conf) { - - final Map serviceInstances = new HashMap<>(); - serviceInstances.put(OZONE_SCM_SERVICE_INSTANCE_ID, - getScmAddressForDataNodes(conf)); - - final Map> services = - new HashMap<>(); - services.put(OZONE_SCM_SERVICE_ID, serviceInstances); - return services; - } - - public static String getOzoneDatanodeRatisDirectory(Configuration conf) { - String storageDir = conf.get( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); - - if (Strings.isNullOrEmpty(storageDir)) { - storageDir = getDefaultRatisDirectory(conf); - } - return storageDir; - } - - public static String getDefaultRatisDirectory(Configuration conf) { - LOG.warn("Storage directory for Ratis is not configured. It is a good " + - "idea to map this to an SSD disk. Falling back to {}", - HddsConfigKeys.OZONE_METADATA_DIRS); - File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf); - return (new File(metaDirPath, "ratis")).getPath(); - } - - /** - * Get the path for datanode id file. - * - * @param conf - Configuration - * @return the path of datanode id as string - */ - public static String getDatanodeIdFilePath(Configuration conf) { - String dataNodeIDDirPath = - conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR); - if (dataNodeIDDirPath == null) { - File metaDirPath = ServerUtils.getOzoneMetaDirPath(conf); - if (metaDirPath == null) { - // this means meta data is not found, in theory should not happen at - // this point because should've failed earlier. - throw new IllegalArgumentException("Unable to locate meta data" + - "directory when getting datanode id path"); - } - dataNodeIDDirPath = metaDirPath.toString(); - } - // Use default datanode id file name for file path - return new File(dataNodeIDDirPath, - OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT).toString(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java deleted file mode 100644 index 4e520466254..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; - -/** - * This is a class that tracks versions of SCM. - */ -public final class VersionInfo { - - // We will just be normal and use positive counting numbers for versions. - private final static VersionInfo[] VERSION_INFOS = - {new VersionInfo("First version of SCM", 1)}; - - - public static final String DESCRIPTION_KEY = "Description"; - private final String description; - private final int version; - - /** - * Never created outside this class. - * - * @param description -- description - * @param version -- version number - */ - private VersionInfo(String description, int version) { - this.description = description; - this.version = version; - } - - /** - * Returns all versions. - * - * @return Version info array. - */ - public static VersionInfo[] getAllVersions() { - return VERSION_INFOS.clone(); - } - - /** - * Returns the latest version. - * - * @return versionInfo - */ - public static VersionInfo getLatestVersion() { - return VERSION_INFOS[VERSION_INFOS.length - 1]; - } - - /** - * Return description. - * - * @return String - */ - public String getDescription() { - return description; - } - - /** - * Return the version. - * - * @return int. - */ - public int getVersion() { - return version; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/package-info.java deleted file mode 100644 index 590546896a4..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java deleted file mode 100644 index 3dcfcfe547c..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.server.BaseHttpServer; - -/** - * Simple http server to provide basic monitoring for hdds datanode. - *

- * This server is used to access default /conf /prom /prof endpoints. - */ -public class HddsDatanodeHttpServer extends BaseHttpServer { - - public HddsDatanodeHttpServer(Configuration conf) throws IOException { - super(conf, "hddsDatanode"); - } - - @Override - protected String getHttpAddressKey() { - return HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY; - } - - @Override - protected String getHttpBindHostKey() { - return HddsConfigKeys.HDDS_DATANODE_HTTP_BIND_HOST_KEY; - } - - @Override - protected String getHttpsAddressKey() { - return HddsConfigKeys.HDDS_DATANODE_HTTPS_ADDRESS_KEY; - } - - @Override - protected String getHttpsBindHostKey() { - return HddsConfigKeys.HDDS_DATANODE_HTTPS_BIND_HOST_KEY; - } - - @Override - protected String getBindHostDefault() { - return HddsConfigKeys.HDDS_DATANODE_HTTP_BIND_HOST_DEFAULT; - } - - @Override - protected int getHttpBindPortDefault() { - return HddsConfigKeys.HDDS_DATANODE_HTTP_BIND_PORT_DEFAULT; - } - - @Override - protected int getHttpsBindPortDefault() { - return HddsConfigKeys.HDDS_DATANODE_HTTPS_BIND_PORT_DEFAULT; - } - - @Override - protected String getKeytabFile() { - return HddsConfigKeys.HDDS_DATANODE_HTTP_KERBEROS_KEYTAB_FILE_KEY; - } - - @Override - protected String getSpnegoPrincipal() { - return HddsConfigKeys.HDDS_DATANODE_HTTP_KERBEROS_PRINCIPAL_KEY; - } - - @Override - protected String getEnabledKey() { - return HddsConfigKeys.HDDS_DATANODE_HTTP_ENABLED_KEY; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java deleted file mode 100644 index b13c37dd453..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ /dev/null @@ -1,495 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.util.ServicePlugin; -import org.apache.hadoop.util.StringUtils; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; - -import java.io.File; -import java.io.IOException; -import java.net.InetAddress; -import java.security.KeyPair; -import java.security.cert.CertificateException; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec.getX509Certificate; -import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString; -import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY; -import static org.apache.hadoop.util.ExitUtil.terminate; - -/** - * Datanode service plugin to start the HDDS container services. - */ - -@Command(name = "ozone datanode", - hidden = true, description = "Start the datanode for ozone", - versionProvider = HddsVersionProvider.class, - mixinStandardHelpOptions = true) -public class HddsDatanodeService extends GenericCli implements ServicePlugin { - - private static final Logger LOG = LoggerFactory.getLogger( - HddsDatanodeService.class); - - private OzoneConfiguration conf; - private DatanodeDetails datanodeDetails; - private DatanodeStateMachine datanodeStateMachine; - private List plugins; - private CertificateClient dnCertClient; - private String component; - private HddsDatanodeHttpServer httpServer; - private boolean printBanner; - private String[] args; - private volatile AtomicBoolean isStopped = new AtomicBoolean(false); - - public HddsDatanodeService(boolean printBanner, String[] args) { - this.printBanner = printBanner; - this.args = args != null ? Arrays.copyOf(args, args.length) : null; - } - - /** - * Create an Datanode instance based on the supplied command-line arguments. - *

- * This method is intended for unit tests only. It suppresses the - * startup/shutdown message and skips registering Unix signal handlers. - * - * @param args command line arguments. - * @return Datanode instance - */ - @VisibleForTesting - public static HddsDatanodeService createHddsDatanodeService( - String[] args) { - return createHddsDatanodeService(args, false); - } - - /** - * Create an Datanode instance based on the supplied command-line arguments. - * - * @param args command line arguments. - * @param printBanner if true, then log a verbose startup message. - * @return Datanode instance - */ - private static HddsDatanodeService createHddsDatanodeService( - String[] args, boolean printBanner) { - return new HddsDatanodeService(printBanner, args); - } - - public static void main(String[] args) { - try { - HddsDatanodeService hddsDatanodeService = - createHddsDatanodeService(args, true); - hddsDatanodeService.run(args); - } catch (Throwable e) { - LOG.error("Exception in HddsDatanodeService.", e); - terminate(1, e); - } - } - - public static Logger getLogger() { - return LOG; - } - - @Override - public Void call() throws Exception { - if (printBanner) { - StringUtils - .startupShutdownMessage(HddsDatanodeService.class, args, LOG); - } - start(createOzoneConfiguration()); - join(); - return null; - } - - public void setConfiguration(OzoneConfiguration configuration) { - this.conf = configuration; - } - - /** - * Starts HddsDatanode services. - * - * @param service The service instance invoking this method - */ - @Override - public void start(Object service) { - if (service instanceof Configurable) { - start(new OzoneConfiguration(((Configurable) service).getConf())); - } else { - start(new OzoneConfiguration()); - } - } - - public void start(OzoneConfiguration configuration) { - setConfiguration(configuration); - start(); - } - - public void start() { - OzoneConfiguration.activate(); - HddsUtils.initializeMetrics(conf, "HddsDatanode"); - if (HddsUtils.isHddsEnabled(conf)) { - try { - String hostname = HddsUtils.getHostName(conf); - String ip = InetAddress.getByName(hostname).getHostAddress(); - datanodeDetails = initializeDatanodeDetails(); - datanodeDetails.setHostName(hostname); - datanodeDetails.setIpAddress(ip); - TracingUtil.initTracing( - "HddsDatanodeService." + datanodeDetails.getUuidString() - .substring(0, 8)); - LOG.info("HddsDatanodeService host:{} ip:{}", hostname, ip); - // Authenticate Hdds Datanode service if security is enabled - if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - component = "dn-" + datanodeDetails.getUuidString(); - - dnCertClient = new DNCertificateClient(new SecurityConfig(conf), - datanodeDetails.getCertSerialId()); - - if (SecurityUtil.getAuthenticationMethod(conf).equals( - UserGroupInformation.AuthenticationMethod.KERBEROS)) { - LOG.info("Ozone security is enabled. Attempting login for Hdds " + - "Datanode user. Principal: {},keytab: {}", conf.get( - DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY), - conf.get(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY)); - - UserGroupInformation.setConfiguration(conf); - - SecurityUtil.login(conf, DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, - DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hostname); - } else { - throw new AuthenticationException(SecurityUtil. - getAuthenticationMethod(conf) + " authentication method not " + - "supported. Datanode user" + " login " + "failed."); - } - LOG.info("Hdds Datanode login successful."); - } - if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - initializeCertificateClient(conf); - } - datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, conf, - dnCertClient, this::terminateDatanode); - try { - httpServer = new HddsDatanodeHttpServer(conf); - httpServer.start(); - } catch (Exception ex) { - LOG.error("HttpServer failed to start.", ex); - } - startPlugins(); - // Starting HDDS Daemons - datanodeStateMachine.startDaemon(); - } catch (IOException e) { - throw new RuntimeException("Can't start the HDDS datanode plugin", e); - } catch (AuthenticationException ex) { - throw new RuntimeException("Fail to authentication when starting" + - " HDDS datanode plugin", ex); - } - } - } - - /** - * Initializes secure Datanode. - * */ - @VisibleForTesting - public void initializeCertificateClient(OzoneConfiguration config) - throws IOException { - LOG.info("Initializing secure Datanode."); - - CertificateClient.InitResponse response = dnCertClient.init(); - LOG.info("Init response: {}", response); - switch (response) { - case SUCCESS: - LOG.info("Initialization successful, case:{}.", response); - break; - case GETCERT: - getSCMSignedCert(config); - LOG.info("Successfully stored SCM signed certificate, case:{}.", - response); - break; - case FAILURE: - LOG.error("DN security initialization failed, case:{}.", response); - throw new RuntimeException("DN security initialization failed."); - case RECOVER: - LOG.error("DN security initialization failed, case:{}. OM certificate " + - "is missing.", response); - throw new RuntimeException("DN security initialization failed."); - default: - LOG.error("DN security initialization failed. Init response: {}", - response); - throw new RuntimeException("DN security initialization failed."); - } - } - - /** - * Get SCM signed certificate and store it using certificate client. - * @param config - * */ - private void getSCMSignedCert(OzoneConfiguration config) { - try { - PKCS10CertificationRequest csr = getCSR(config); - // TODO: For SCM CA we should fetch certificate from multiple SCMs. - SCMSecurityProtocolClientSideTranslatorPB secureScmClient = - HddsUtils.getScmSecurityClient(config); - SCMGetCertResponseProto response = secureScmClient. - getDataNodeCertificateChain(datanodeDetails.getProtoBufMessage(), - getEncodedString(csr)); - // Persist certificates. - if(response.hasX509CACertificate()) { - String pemEncodedCert = response.getX509Certificate(); - dnCertClient.storeCertificate(pemEncodedCert, true); - dnCertClient.storeCertificate(response.getX509CACertificate(), true, - true); - datanodeDetails.setCertSerialId(getX509Certificate(pemEncodedCert). - getSerialNumber().toString()); - persistDatanodeDetails(datanodeDetails); - } else { - throw new RuntimeException("Unable to retrieve datanode certificate " + - "chain"); - } - } catch (IOException | CertificateException e) { - LOG.error("Error while storing SCM signed certificate.", e); - throw new RuntimeException(e); - } - } - - /** - * Creates CSR for DN. - * @param config - * */ - @VisibleForTesting - public PKCS10CertificationRequest getCSR(Configuration config) - throws IOException { - CertificateSignRequest.Builder builder = dnCertClient.getCSRBuilder(); - KeyPair keyPair = new KeyPair(dnCertClient.getPublicKey(), - dnCertClient.getPrivateKey()); - - String hostname = InetAddress.getLocalHost().getCanonicalHostName(); - String subject = UserGroupInformation.getCurrentUser() - .getShortUserName() + "@" + hostname; - - builder.setCA(false) - .setKey(keyPair) - .setConfiguration(config) - .setSubject(subject); - - LOG.info("Creating csr for DN-> subject:{}", subject); - return builder.build(); - } - - /** - * Returns DatanodeDetails or null in case of Error. - * - * @return DatanodeDetails - */ - private DatanodeDetails initializeDatanodeDetails() - throws IOException { - String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf); - if (idFilePath == null || idFilePath.isEmpty()) { - LOG.error("A valid path is needed for config setting {}", - ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR); - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR + - " must be defined. See" + - " https://wiki.apache.org/hadoop/Ozone#Configuration" + - " for details on configuring Ozone."); - } - - Preconditions.checkNotNull(idFilePath); - File idFile = new File(idFilePath); - if (idFile.exists()) { - return ContainerUtils.readDatanodeDetailsFrom(idFile); - } else { - // There is no datanode.id file, this might be the first time datanode - // is started. - String datanodeUuid = UUID.randomUUID().toString(); - return DatanodeDetails.newBuilder().setUuid(datanodeUuid).build(); - } - } - - /** - * Persist DatanodeDetails to file system. - * @param dnDetails - * - * @return DatanodeDetails - */ - private void persistDatanodeDetails(DatanodeDetails dnDetails) - throws IOException { - String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf); - if (idFilePath == null || idFilePath.isEmpty()) { - LOG.error("A valid path is needed for config setting {}", - ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR); - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR + - " must be defined. See" + - " https://wiki.apache.org/hadoop/Ozone#Configuration" + - " for details on configuring Ozone."); - } - - Preconditions.checkNotNull(idFilePath); - File idFile = new File(idFilePath); - ContainerUtils.writeDatanodeDetailsTo(dnDetails, idFile); - } - - /** - * Starts all the service plugins which are configured using - * OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY. - */ - private void startPlugins() { - try { - plugins = conf.getInstances(HDDS_DATANODE_PLUGINS_KEY, - ServicePlugin.class); - } catch (RuntimeException e) { - String pluginsValue = conf.get(HDDS_DATANODE_PLUGINS_KEY); - LOG.error("Unable to load HDDS DataNode plugins. " + - "Specified list of plugins: {}", - pluginsValue, e); - throw e; - } - for (ServicePlugin plugin : plugins) { - try { - plugin.start(this); - LOG.info("Started plug-in {}", plugin); - } catch (Throwable t) { - LOG.warn("ServicePlugin {} could not be started", plugin, t); - } - } - } - - /** - * Returns the OzoneConfiguration used by this HddsDatanodeService. - * - * @return OzoneConfiguration - */ - public OzoneConfiguration getConf() { - return conf; - } - - /** - * Return DatanodeDetails if set, return null otherwise. - * - * @return DatanodeDetails - */ - @VisibleForTesting - public DatanodeDetails getDatanodeDetails() { - return datanodeDetails; - } - - @VisibleForTesting - public DatanodeStateMachine getDatanodeStateMachine() { - return datanodeStateMachine; - } - - public void join() { - if (datanodeStateMachine != null) { - try { - datanodeStateMachine.join(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - LOG.info("Interrupted during StorageContainerManager join."); - } - } - } - - public void terminateDatanode() { - stop(); - terminate(1); - } - - - @Override - public void stop() { - if (!isStopped.get()) { - isStopped.set(true); - if (plugins != null) { - for (ServicePlugin plugin : plugins) { - try { - plugin.stop(); - LOG.info("Stopped plug-in {}", plugin); - } catch (Throwable t) { - LOG.warn("ServicePlugin {} could not be stopped", plugin, t); - } - } - } - if (datanodeStateMachine != null) { - datanodeStateMachine.stopDaemon(); - } - if (httpServer != null) { - try { - httpServer.stop(); - } catch (Exception e) { - LOG.error("Stopping HttpServer is failed.", e); - } - } - } - } - - @Override - public void close() { - if (plugins != null) { - for (ServicePlugin plugin : plugins) { - try { - plugin.close(); - } catch (Throwable t) { - LOG.warn("ServicePlugin {} could not be closed", plugin, t); - } - } - } - } - - @VisibleForTesting - public String getComponent() { - return component; - } - - public CertificateClient getCertificateClient() { - return dnCertClient; - } - - @VisibleForTesting - public void setCertificateClient(CertificateClient client) { - dnCertClient = client; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeStopService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeStopService.java deleted file mode 100644 index 02c1431fb3a..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeStopService.java +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -/** - * Interface which declares a method to stop HddsDatanodeService. - */ -public interface HddsDatanodeStopService { - - void stopService(); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java deleted file mode 100644 index 2d58c39a151..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common; - -/** - * Datanode layout version which describes information about the layout version - * on the datanode. - */ -public final class DataNodeLayoutVersion { - - // We will just be normal and use positive counting numbers for versions. - private final static DataNodeLayoutVersion[] VERSION_INFOS = - {new DataNodeLayoutVersion(1, "HDDS Datanode LayOut Version 1")}; - - private final String description; - private final int version; - - /** - * Never created outside this class. - * - * @param description -- description - * @param version -- version number - */ - private DataNodeLayoutVersion(int version, String description) { - this.description = description; - this.version = version; - } - - /** - * Returns all versions. - * - * @return Version info array. - */ - public static DataNodeLayoutVersion[] getAllVersions() { - return VERSION_INFOS.clone(); - } - - /** - * Returns the latest version. - * - * @return versionInfo - */ - public static DataNodeLayoutVersion getLatestVersion() { - return VERSION_INFOS[VERSION_INFOS.length - 1]; - } - - /** - * Return description. - * - * @return String - */ - public String getDescription() { - return description; - } - - /** - * Return the version. - * - * @return int. - */ - public int getVersion() { - return version; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java deleted file mode 100644 index 9ea4adf8a72..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MetricsRegistry; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableQuantiles; -import org.apache.hadoop.metrics2.lib.MutableRate; - -/** - * - * This class is for maintaining the various Storage Container - * DataNode statistics and publishing them through the metrics interfaces. - * This also registers the JMX MBean for RPC. - *

- * This class has a number of metrics variables that are publicly accessible; - * these variables (objects) have methods to update their values; - * for example: - *

{@link #numOps}.inc() - * - */ -@InterfaceAudience.Private -@Metrics(about="Storage Container DataNode Metrics", context="dfs") -public class ContainerMetrics { - public static final String STORAGE_CONTAINER_METRICS = - "StorageContainerMetrics"; - @Metric private MutableCounterLong numOps; - private MutableCounterLong[] numOpsArray; - private MutableCounterLong[] opsBytesArray; - private MutableRate[] opsLatency; - private MutableQuantiles[][] opsLatQuantiles; - private MetricsRegistry registry = null; - - public ContainerMetrics(int[] intervals) { - int numEnumEntries = ContainerProtos.Type.values().length; - final int len = intervals.length; - this.numOpsArray = new MutableCounterLong[numEnumEntries]; - this.opsBytesArray = new MutableCounterLong[numEnumEntries]; - this.opsLatency = new MutableRate[numEnumEntries]; - this.opsLatQuantiles = new MutableQuantiles[numEnumEntries][len]; - this.registry = new MetricsRegistry("StorageContainerMetrics"); - for (int i = 0; i < numEnumEntries; i++) { - numOpsArray[i] = registry.newCounter( - "num" + ContainerProtos.Type.forNumber(i + 1), - "number of " + ContainerProtos.Type.forNumber(i + 1) + " ops", - (long) 0); - opsBytesArray[i] = registry.newCounter( - "bytes" + ContainerProtos.Type.forNumber(i + 1), - "bytes used by " + ContainerProtos.Type.forNumber(i + 1) + "op", - (long) 0); - opsLatency[i] = registry.newRate( - "latency" + ContainerProtos.Type.forNumber(i + 1), - ContainerProtos.Type.forNumber(i + 1) + " op"); - - for (int j = 0; j < len; j++) { - int interval = intervals[j]; - String quantileName = ContainerProtos.Type.forNumber(i + 1) + "Nanos" - + interval + "s"; - opsLatQuantiles[i][j] = registry.newQuantiles(quantileName, - "latency of Container ops", "ops", "latency", interval); - } - } - } - - public static ContainerMetrics create(Configuration conf) { - MetricsSystem ms = DefaultMetricsSystem.instance(); - // Percentile measurement is off by default, by watching no intervals - int[] intervals = - conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY); - return ms.register(STORAGE_CONTAINER_METRICS, - "Storage Container Node Metrics", - new ContainerMetrics(intervals)); - } - - public static void remove() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(STORAGE_CONTAINER_METRICS); - } - - public void incContainerOpsMetrics(ContainerProtos.Type type) { - numOps.incr(); - numOpsArray[type.ordinal()].incr(); - } - - public long getContainerOpsMetrics(ContainerProtos.Type type){ - return numOpsArray[type.ordinal()].value(); - } - - public void incContainerOpsLatencies(ContainerProtos.Type type, - long latencyNanos) { - opsLatency[type.ordinal()].add(latencyNanos); - for (MutableQuantiles q: opsLatQuantiles[type.ordinal()]) { - q.add(latencyNanos); - } - } - - public void incContainerBytesStats(ContainerProtos.Type type, long bytes) { - opsBytesArray[type.ordinal()].incr(bytes); - } - - public long getContainerBytesMetrics(ContainerProtos.Type type){ - return opsBytesArray[type.ordinal()].value(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java deleted file mode 100644 index ff6dec83cd0..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -import static org.apache.commons.io.FilenameUtils.removeExtension; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_CHECKSUM_ERROR; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_ALGORITHM; -import static org.apache.hadoop.ozone.container.common.impl.ContainerData.CHARSET_ENCODING; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.nio.file.Paths; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; - -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.hadoop.fs.FileAlreadyExistsException; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.yaml.snakeyaml.Yaml; - -import com.google.common.base.Preconditions; - -/** - * A set of helper functions to create proper responses. - */ -public final class ContainerUtils { - - private static final Logger LOG = - LoggerFactory.getLogger(ContainerUtils.class); - - private ContainerUtils() { - //never constructed. - } - - /** - * Returns a Container Command Response Builder with the specified result - * and message. - * @param request requestProto message. - * @param result result of the command. - * @param message response message. - * @return ContainerCommand Response Builder. - */ - public static ContainerCommandResponseProto.Builder - getContainerCommandResponse( - ContainerCommandRequestProto request, Result result, String message) { - return ContainerCommandResponseProto.newBuilder() - .setCmdType(request.getCmdType()) - .setTraceID(request.getTraceID()) - .setResult(result) - .setMessage(message); - } - - /** - * Returns a Container Command Response Builder. This call is used to build - * success responses. Calling function can add other fields to the response - * as required. - * @param request requestProto message. - * @return ContainerCommand Response Builder with result as SUCCESS. - */ - public static ContainerCommandResponseProto.Builder getSuccessResponseBuilder( - ContainerCommandRequestProto request) { - return - ContainerCommandResponseProto.newBuilder() - .setCmdType(request.getCmdType()) - .setTraceID(request.getTraceID()) - .setResult(Result.SUCCESS); - } - - /** - * Returns a Container Command Response. This call is used for creating null - * success responses. - * @param request requestProto message. - * @return ContainerCommand Response with result as SUCCESS. - */ - public static ContainerCommandResponseProto getSuccessResponse( - ContainerCommandRequestProto request) { - ContainerCommandResponseProto.Builder builder = - getContainerCommandResponse(request, Result.SUCCESS, ""); - return builder.build(); - } - - /** - * We found a command type but no associated payload for the command. Hence - * return malformed Command as response. - * - * @param request - Protobuf message. - * @return ContainerCommandResponseProto - MALFORMED_REQUEST. - */ - public static ContainerCommandResponseProto malformedRequest( - ContainerCommandRequestProto request) { - return getContainerCommandResponse(request, Result.MALFORMED_REQUEST, - "Cmd type does not match the payload.").build(); - } - - /** - * We found a command type that is not supported yet. - * - * @param request - Protobuf message. - * @return ContainerCommandResponseProto - UNSUPPORTED_REQUEST. - */ - public static ContainerCommandResponseProto unsupportedRequest( - ContainerCommandRequestProto request) { - return getContainerCommandResponse(request, Result.UNSUPPORTED_REQUEST, - "Server does not support this command yet.").build(); - } - - /** - * Logs the error and returns a response to the caller. - * - * @param log - Logger - * @param ex - Exception - * @param request - Request Object - * @return Response - */ - public static ContainerCommandResponseProto logAndReturnError( - Logger log, StorageContainerException ex, - ContainerCommandRequestProto request) { - log.info("Operation: {} : Trace ID: {} : Message: {} : Result: {}", - request.getCmdType().name(), request.getTraceID(), - ex.getMessage(), ex.getResult().getValueDescriptor().getName()); - return getContainerCommandResponse(request, ex.getResult(), ex.getMessage()) - .build(); - } - - /** - * get containerName from a container file. - * - * @param containerFile - File - * @return Name of the container. - */ - public static String getContainerNameFromFile(File containerFile) { - Preconditions.checkNotNull(containerFile); - return Paths.get(containerFile.getParent()).resolve( - removeExtension(containerFile.getName())).toString(); - } - - public static long getContainerIDFromFile(File containerFile) { - Preconditions.checkNotNull(containerFile); - String containerID = getContainerNameFromFile(containerFile); - return Long.parseLong(containerID); - } - - /** - * Verifies that this is indeed a new container. - * - * @param containerFile - Container File to verify - * @throws FileAlreadyExistsException - */ - public static void verifyIsNewContainer(File containerFile) throws - FileAlreadyExistsException { - Logger log = LoggerFactory.getLogger(ContainerSet.class); - Preconditions.checkNotNull(containerFile, "containerFile Should not be " + - "null"); - if (containerFile.getParentFile().exists()) { - log.error("Container already exists on disk. File: {}", containerFile - .toPath()); - throw new FileAlreadyExistsException("container already exists on " + - "disk."); - } - } - - public static String getContainerDbFileName(String containerName) { - return containerName + OzoneConsts.DN_CONTAINER_DB; - } - - /** - * Persistent a {@link DatanodeDetails} to a local file. - * - * @throws IOException when read/write error occurs - */ - public synchronized static void writeDatanodeDetailsTo( - DatanodeDetails datanodeDetails, File path) throws IOException { - if (path.exists()) { - if (!path.delete() || !path.createNewFile()) { - throw new IOException("Unable to overwrite the datanode ID file."); - } - } else { - if (!path.getParentFile().exists() && - !path.getParentFile().mkdirs()) { - throw new IOException("Unable to create datanode ID directories."); - } - } - DatanodeIdYaml.createDatanodeIdFile(datanodeDetails, path); - } - - /** - * Read {@link DatanodeDetails} from a local ID file. - * - * @param path ID file local path - * @return {@link DatanodeDetails} - * @throws IOException If the id file is malformed or other I/O exceptions - */ - public synchronized static DatanodeDetails readDatanodeDetailsFrom(File path) - throws IOException { - if (!path.exists()) { - throw new IOException("Datanode ID file not found."); - } - try { - return DatanodeIdYaml.readDatanodeIdFile(path); - } catch (IOException e) { - LOG.warn("Error loading DatanodeDetails yaml from " + - path.getAbsolutePath(), e); - // Try to load as protobuf before giving up - try (FileInputStream in = new FileInputStream(path)) { - return DatanodeDetails.getFromProtoBuf( - HddsProtos.DatanodeDetailsProto.parseFrom(in)); - } catch (IOException io) { - throw new IOException("Failed to parse DatanodeDetails from " - + path.getAbsolutePath(), io); - } - } - } - - /** - * Verify that the checksum stored in containerData is equal to the - * computed checksum. - * @param containerData - * @throws IOException - */ - public static void verifyChecksum(ContainerData containerData) - throws IOException { - String storedChecksum = containerData.getChecksum(); - - Yaml yaml = ContainerDataYaml.getYamlForContainerType( - containerData.getContainerType()); - containerData.computeAndSetChecksum(yaml); - String computedChecksum = containerData.getChecksum(); - - if (storedChecksum == null || !storedChecksum.equals(computedChecksum)) { - throw new StorageContainerException("Container checksum error for " + - "ContainerID: " + containerData.getContainerID() + ". " + - "\nStored Checksum: " + storedChecksum + - "\nExpected Checksum: " + computedChecksum, - CONTAINER_CHECKSUM_ERROR); - } - } - - /** - * Return the SHA-256 chesksum of the containerData. - * @param containerDataYamlStr ContainerData as a Yaml String - * @return Checksum of the container data - * @throws StorageContainerException - */ - public static String getChecksum(String containerDataYamlStr) - throws StorageContainerException { - MessageDigest sha; - try { - sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH); - sha.update(containerDataYamlStr.getBytes(CHARSET_ENCODING)); - return DigestUtils.sha256Hex(sha.digest()); - } catch (NoSuchAlgorithmException e) { - throw new StorageContainerException("Unable to create Message Digest, " + - "usually this is a java configuration issue.", NO_SUCH_ALGORITHM); - } - } - - /** - * Get the .container file from the containerBaseDir. - * @param containerBaseDir container base directory. The name of this - * directory is same as the containerID - * @return the .container file - */ - public static File getContainerFile(File containerBaseDir) { - // Container file layout is - // .../<>/metadata/<>.container - String containerFilePath = OzoneConsts.CONTAINER_META_PATH + File.separator - + getContainerID(containerBaseDir) + OzoneConsts.CONTAINER_EXTENSION; - return new File(containerBaseDir, containerFilePath); - } - - /** - * ContainerID can be decoded from the container base directory name. - */ - public static long getContainerID(File containerBaseDir) { - return Long.parseLong(containerBaseDir.getName()); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java deleted file mode 100644 index d3efa98795a..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java +++ /dev/null @@ -1,182 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.helpers; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.io.Writer; -import java.util.LinkedHashMap; -import java.util.Map; - -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.collections.MapUtils; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.yaml.snakeyaml.DumperOptions; -import org.yaml.snakeyaml.Yaml; - -/** - * Class for creating datanode.id file in yaml format. - */ -public final class DatanodeIdYaml { - - private DatanodeIdYaml() { - // static helper methods only, no state. - } - - /** - * Creates a yaml file using DatnodeDetails. This method expects the path - * validation to be performed by the caller. - * - * @param datanodeDetails {@link DatanodeDetails} - * @param path Path to datnode.id file - */ - public static void createDatanodeIdFile(DatanodeDetails datanodeDetails, - File path) throws IOException { - DumperOptions options = new DumperOptions(); - options.setPrettyFlow(true); - options.setDefaultFlowStyle(DumperOptions.FlowStyle.FLOW); - Yaml yaml = new Yaml(options); - - try (Writer writer = new OutputStreamWriter( - new FileOutputStream(path), "UTF-8")) { - yaml.dump(getDatanodeDetailsYaml(datanodeDetails), writer); - } - } - - /** - * Read datanode.id from file. - */ - public static DatanodeDetails readDatanodeIdFile(File path) - throws IOException { - DatanodeDetails datanodeDetails; - try (FileInputStream inputFileStream = new FileInputStream(path)) { - Yaml yaml = new Yaml(); - DatanodeDetailsYaml datanodeDetailsYaml; - try { - datanodeDetailsYaml = - yaml.loadAs(inputFileStream, DatanodeDetailsYaml.class); - } catch (Exception e) { - throw new IOException("Unable to parse yaml file.", e); - } - - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(datanodeDetailsYaml.getUuid()) - .setIpAddress(datanodeDetailsYaml.getIpAddress()) - .setHostName(datanodeDetailsYaml.getHostName()) - .setCertSerialId(datanodeDetailsYaml.getCertSerialId()); - - if (!MapUtils.isEmpty(datanodeDetailsYaml.getPortDetails())) { - for (Map.Entry portEntry : - datanodeDetailsYaml.getPortDetails().entrySet()) { - builder.addPort(DatanodeDetails.newPort( - DatanodeDetails.Port.Name.valueOf(portEntry.getKey()), - portEntry.getValue())); - } - } - datanodeDetails = builder.build(); - } - - return datanodeDetails; - } - - /** - * Datanode details bean to be written to the yaml file. - */ - public static class DatanodeDetailsYaml { - private String uuid; - private String ipAddress; - private String hostName; - private String certSerialId; - private Map portDetails; - - public DatanodeDetailsYaml() { - // Needed for snake-yaml introspection. - } - - private DatanodeDetailsYaml(String uuid, String ipAddress, - String hostName, String certSerialId, - Map portDetails) { - this.uuid = uuid; - this.ipAddress = ipAddress; - this.hostName = hostName; - this.certSerialId = certSerialId; - this.portDetails = portDetails; - } - - public String getUuid() { - return uuid; - } - - public String getIpAddress() { - return ipAddress; - } - - public String getHostName() { - return hostName; - } - - public String getCertSerialId() { - return certSerialId; - } - - public Map getPortDetails() { - return portDetails; - } - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public void setIpAddress(String ipAddress) { - this.ipAddress = ipAddress; - } - - public void setHostName(String hostName) { - this.hostName = hostName; - } - - public void setCertSerialId(String certSerialId) { - this.certSerialId = certSerialId; - } - - public void setPortDetails(Map portDetails) { - this.portDetails = portDetails; - } - } - - private static DatanodeDetailsYaml getDatanodeDetailsYaml( - DatanodeDetails datanodeDetails) { - - Map portDetails = new LinkedHashMap<>(); - if (!CollectionUtils.isEmpty(datanodeDetails.getPorts())) { - for (DatanodeDetails.Port port : datanodeDetails.getPorts()) { - portDetails.put(port.getName().toString(), port.getValue()); - } - } - - return new DatanodeDetailsYaml( - datanodeDetails.getUuid().toString(), - datanodeDetails.getIpAddress(), - datanodeDetails.getHostName(), - datanodeDetails.getCertSerialId(), - portDetails); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java deleted file mode 100644 index 4db6d3120fd..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -import org.apache.hadoop.ozone.OzoneConsts; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.util.Properties; - -/** - * This is a utility class which helps to create the version file on datanode - * and also validate the content of the version file. - */ -public class DatanodeVersionFile { - - private final String storageId; - private final String clusterId; - private final String datanodeUuid; - private final long cTime; - private final int layOutVersion; - - public DatanodeVersionFile(String storageId, String clusterId, - String datanodeUuid, long cTime, int layOutVersion) { - this.storageId = storageId; - this.clusterId = clusterId; - this.datanodeUuid = datanodeUuid; - this.cTime = cTime; - this.layOutVersion = layOutVersion; - } - - private Properties createProperties() { - Properties properties = new Properties(); - properties.setProperty(OzoneConsts.STORAGE_ID, storageId); - properties.setProperty(OzoneConsts.CLUSTER_ID, clusterId); - properties.setProperty(OzoneConsts.DATANODE_UUID, datanodeUuid); - properties.setProperty(OzoneConsts.CTIME, String.valueOf(cTime)); - properties.setProperty(OzoneConsts.LAYOUTVERSION, String.valueOf( - layOutVersion)); - return properties; - } - - /** - * Creates a version File in specified path. - * @param path - * @throws IOException - */ - public void createVersionFile(File path) throws - IOException { - try (RandomAccessFile file = new RandomAccessFile(path, "rws"); - FileOutputStream out = new FileOutputStream(file.getFD())) { - file.getChannel().truncate(0); - Properties properties = createProperties(); - /* - * If server is interrupted before this line, - * the version file will remain unchanged. - */ - properties.store(out, null); - } - } - - - /** - * Creates a property object from the specified file content. - * @param versionFile - * @return Properties - * @throws IOException - */ - public static Properties readFrom(File versionFile) throws IOException { - try (RandomAccessFile file = new RandomAccessFile(versionFile, "rws"); - FileInputStream in = new FileInputStream(file.getFD())) { - Properties props = new Properties(); - props.load(in); - return props; - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java deleted file mode 100644 index 9d0ec957f28..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.helpers; - -import com.google.common.collect.Maps; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.util.StringUtils; - -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * A helper class to wrap the info about under deletion container blocks. - */ -public final class DeletedContainerBlocksSummary { - - private final List blocks; - // key : txID - // value : times of this tx has been processed - private final Map txSummary; - // key : container name - // value : the number of blocks need to be deleted in this container - // if the message contains multiple entries for same block, - // blocks will be merged - private final Map blockSummary; - // total number of blocks in this message - private int numOfBlocks; - - private DeletedContainerBlocksSummary(List blocks) { - this.blocks = blocks; - txSummary = Maps.newHashMap(); - blockSummary = Maps.newHashMap(); - blocks.forEach(entry -> { - txSummary.put(entry.getTxID(), entry.getCount()); - if (blockSummary.containsKey(entry.getContainerID())) { - blockSummary.put(entry.getContainerID(), - blockSummary.get(entry.getContainerID()) - + entry.getLocalIDCount()); - } else { - blockSummary.put(entry.getContainerID(), entry.getLocalIDCount()); - } - numOfBlocks += entry.getLocalIDCount(); - }); - } - - public static DeletedContainerBlocksSummary getFrom( - List blocks) { - return new DeletedContainerBlocksSummary(blocks); - } - - public int getNumOfBlocks() { - return numOfBlocks; - } - - public int getNumOfContainers() { - return blockSummary.size(); - } - - public String getTXIDs() { - return String.join(",", txSummary.keySet() - .stream().map(String::valueOf).collect(Collectors.toList())); - } - - public String getTxIDSummary() { - List txSummaryEntry = txSummary.entrySet().stream() - .map(entry -> entry.getKey() + "(" + entry.getValue() + ")") - .collect(Collectors.toList()); - return "[" + String.join(",", txSummaryEntry) + "]"; - } - - @Override public String toString() { - StringBuffer sb = new StringBuffer(); - for (DeletedBlocksTransaction blks : blocks) { - sb.append(" ") - .append("TXID=") - .append(blks.getTxID()) - .append(", ") - .append("TimesProceed=") - .append(blks.getCount()) - .append(", ") - .append(blks.getContainerID()) - .append(" : [") - .append(StringUtils.join(',', blks.getLocalIDList())).append("]") - .append("\n"); - } - return sb.toString(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java deleted file mode 100644 index 21f31e1bbe3..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.helpers; -/** - Contains protocol buffer helper classes and utilites used in - impl. - **/ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java deleted file mode 100644 index d1b1bd66493..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.impl; - - -import com.google.common.base.Preconditions; - -/** - * Defines layout versions for the Chunks. - */ - -public final class ChunkLayOutVersion { - - private final static ChunkLayOutVersion[] CHUNK_LAYOUT_VERSION_INFOS = - {new ChunkLayOutVersion(1, "Data without checksums.")}; - - private int version; - private String description; - - - /** - * Never created outside this class. - * - * @param description -- description - * @param version -- version number - */ - private ChunkLayOutVersion(int version, String description) { - this.version = version; - this.description = description; - } - - /** - * Return ChunkLayOutVersion object for the chunkVersion. - * @param chunkVersion - * @return ChunkLayOutVersion - */ - public static ChunkLayOutVersion getChunkLayOutVersion(int chunkVersion) { - Preconditions.checkArgument((chunkVersion <= ChunkLayOutVersion - .getLatestVersion().getVersion())); - for(ChunkLayOutVersion chunkLayOutVersion : CHUNK_LAYOUT_VERSION_INFOS) { - if(chunkLayOutVersion.getVersion() == chunkVersion) { - return chunkLayOutVersion; - } - } - return null; - } - - /** - * Returns all versions. - * - * @return Version info array. - */ - public static ChunkLayOutVersion[] getAllVersions() { - return CHUNK_LAYOUT_VERSION_INFOS.clone(); - } - - /** - * Returns the latest version. - * - * @return versionInfo - */ - public static ChunkLayOutVersion getLatestVersion() { - return CHUNK_LAYOUT_VERSION_INFOS[CHUNK_LAYOUT_VERSION_INFOS.length - 1]; - } - - /** - * Return version. - * - * @return int - */ - public int getVersion() { - return version; - } - - /** - * Returns description. - * @return String - */ - public String getDescription() { - return description; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java deleted file mode 100644 index 85738e24097..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java +++ /dev/null @@ -1,560 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import java.io.IOException; -import java.nio.charset.Charset; -import java.util.List; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - ContainerType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; - -import java.util.Collections; -import java.util.Map; -import java.util.TreeMap; -import java.util.concurrent.atomic.AtomicLong; -import org.yaml.snakeyaml.Yaml; - -import static org.apache.hadoop.ozone.OzoneConsts.CHECKSUM; -import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID; -import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_TYPE; -import static org.apache.hadoop.ozone.OzoneConsts.LAYOUTVERSION; -import static org.apache.hadoop.ozone.OzoneConsts.MAX_SIZE; -import static org.apache.hadoop.ozone.OzoneConsts.METADATA; -import static org.apache.hadoop.ozone.OzoneConsts.ORIGIN_NODE_ID; -import static org.apache.hadoop.ozone.OzoneConsts.ORIGIN_PIPELINE_ID; -import static org.apache.hadoop.ozone.OzoneConsts.STATE; - -/** - * ContainerData is the in-memory representation of container metadata and is - * represented on disk by the .container file. - */ -public abstract class ContainerData { - - //Type of the container. - // For now, we support only KeyValueContainer. - private final ContainerType containerType; - - // Unique identifier for the container - private final long containerID; - - // Layout version of the container data - private final int layOutVersion; - - // Metadata of the container will be a key value pair. - // This can hold information like volume name, owner etc., - private final Map metadata; - - // State of the Container - private ContainerDataProto.State state; - - private final long maxSize; - - private boolean committedSpace; - - //ID of the pipeline where this container is created - private String originPipelineId; - //ID of the datanode where this container is created - private String originNodeId; - - /** parameters for read/write statistics on the container. **/ - private final AtomicLong readBytes; - private final AtomicLong writeBytes; - private final AtomicLong readCount; - private final AtomicLong writeCount; - private final AtomicLong bytesUsed; - private final AtomicLong keyCount; - - private HddsVolume volume; - - private String checksum; - public static final Charset CHARSET_ENCODING = Charset.forName("UTF-8"); - private static final String DUMMY_CHECKSUM = new String(new byte[64], - CHARSET_ENCODING); - - // Common Fields need to be stored in .container file. - protected static final List YAML_FIELDS = - Collections.unmodifiableList(Lists.newArrayList( - CONTAINER_TYPE, - CONTAINER_ID, - LAYOUTVERSION, - STATE, - METADATA, - MAX_SIZE, - CHECKSUM, - ORIGIN_PIPELINE_ID, - ORIGIN_NODE_ID)); - - /** - * Creates a ContainerData Object, which holds metadata of the container. - * @param type - ContainerType - * @param containerId - ContainerId - * @param size - container maximum size in bytes - * @param originPipelineId - Pipeline Id where this container is/was created - * @param originNodeId - Node Id where this container is/was created - */ - protected ContainerData(ContainerType type, long containerId, long size, - String originPipelineId, String originNodeId) { - this(type, containerId, ChunkLayOutVersion.getLatestVersion().getVersion(), - size, originPipelineId, originNodeId); - } - - /** - * Creates a ContainerData Object, which holds metadata of the container. - * @param type - ContainerType - * @param containerId - ContainerId - * @param layOutVersion - Container layOutVersion - * @param size - Container maximum size in bytes - * @param originPipelineId - Pipeline Id where this container is/was created - * @param originNodeId - Node Id where this container is/was created - */ - protected ContainerData(ContainerType type, long containerId, - int layOutVersion, long size, String originPipelineId, - String originNodeId) { - Preconditions.checkNotNull(type); - - this.containerType = type; - this.containerID = containerId; - this.layOutVersion = layOutVersion; - this.metadata = new TreeMap<>(); - this.state = ContainerDataProto.State.OPEN; - this.readCount = new AtomicLong(0L); - this.readBytes = new AtomicLong(0L); - this.writeCount = new AtomicLong(0L); - this.writeBytes = new AtomicLong(0L); - this.bytesUsed = new AtomicLong(0L); - this.keyCount = new AtomicLong(0L); - this.maxSize = size; - this.originPipelineId = originPipelineId; - this.originNodeId = originNodeId; - setChecksumTo0ByteArray(); - } - - /** - * Returns the containerID. - */ - public long getContainerID() { - return containerID; - } - - /** - * Returns the path to base dir of the container. - * @return Path to base dir. - */ - public abstract String getContainerPath(); - - /** - * Returns the type of the container. - * @return ContainerType - */ - public ContainerType getContainerType() { - return containerType; - } - - - /** - * Returns the state of the container. - * @return ContainerLifeCycleState - */ - public synchronized ContainerDataProto.State getState() { - return state; - } - - /** - * Set the state of the container. - * @param state - */ - public synchronized void setState(ContainerDataProto.State state) { - ContainerDataProto.State oldState = this.state; - this.state = state; - - if ((oldState == ContainerDataProto.State.OPEN) && - (state != oldState)) { - releaseCommitSpace(); - } - - /** - * commit space when container transitions (back) to Open. - * when? perhaps closing a container threw an exception - */ - if ((state == ContainerDataProto.State.OPEN) && - (state != oldState)) { - Preconditions.checkState(getMaxSize() > 0); - commitSpace(); - } - } - - /** - * Return's maximum size of the container in bytes. - * @return maxSize in bytes - */ - public long getMaxSize() { - return maxSize; - } - - /** - * Returns the layOutVersion of the actual container data format. - * @return layOutVersion - */ - public int getLayOutVersion() { - return ChunkLayOutVersion.getChunkLayOutVersion(layOutVersion).getVersion(); - } - - /** - * Add/Update metadata. - * We should hold the container lock before updating the metadata as this - * will be persisted on disk. Unless, we are reconstructing ContainerData - * from protoBuf or from on disk .container file in which case lock is not - * required. - */ - public void addMetadata(String key, String value) { - metadata.put(key, value); - } - - /** - * Retuns metadata of the container. - * @return metadata - */ - public Map getMetadata() { - return Collections.unmodifiableMap(this.metadata); - } - - /** - * Set metadata. - * We should hold the container lock before updating the metadata as this - * will be persisted on disk. Unless, we are reconstructing ContainerData - * from protoBuf or from on disk .container file in which case lock is not - * required. - */ - public void setMetadata(Map metadataMap) { - metadata.clear(); - metadata.putAll(metadataMap); - } - - /** - * checks if the container is open. - * @return - boolean - */ - public synchronized boolean isOpen() { - return ContainerDataProto.State.OPEN == state; - } - - /** - * checks if the container is invalid. - * @return - boolean - */ - public synchronized boolean isValid() { - return !(ContainerDataProto.State.INVALID == state); - } - - /** - * checks if the container is closed. - * @return - boolean - */ - public synchronized boolean isClosed() { - return ContainerDataProto.State.CLOSED == state; - } - - /** - * checks if the container is quasi closed. - * @return - boolean - */ - public synchronized boolean isQuasiClosed() { - return ContainerDataProto.State.QUASI_CLOSED == state; - } - - /** - * checks if the container is unhealthy. - * @return - boolean - */ - public synchronized boolean isUnhealthy() { - return ContainerDataProto.State.UNHEALTHY == state; - } - - /** - * Marks this container as quasi closed. - */ - public synchronized void quasiCloseContainer() { - setState(ContainerDataProto.State.QUASI_CLOSED); - } - - /** - * Marks this container as closed. - */ - public synchronized void closeContainer() { - setState(ContainerDataProto.State.CLOSED); - } - - private void releaseCommitSpace() { - long unused = getMaxSize() - getBytesUsed(); - - // only if container size < max size - if (unused > 0 && committedSpace) { - getVolume().incCommittedBytes(0 - unused); - } - committedSpace = false; - } - - /** - * add available space in the container to the committed space in the volume. - * available space is the number of bytes remaining till max capacity. - */ - public void commitSpace() { - long unused = getMaxSize() - getBytesUsed(); - ContainerDataProto.State myState = getState(); - HddsVolume cVol; - - //we don't expect duplicate calls - Preconditions.checkState(!committedSpace); - - // Only Open Containers have Committed Space - if (myState != ContainerDataProto.State.OPEN) { - return; - } - - // junit tests do not always set up volume - cVol = getVolume(); - if (unused > 0 && (cVol != null)) { - cVol.incCommittedBytes(unused); - committedSpace = true; - } - } - - /** - * Get the number of bytes read from the container. - * @return the number of bytes read from the container. - */ - public long getReadBytes() { - return readBytes.get(); - } - - /** - * Increase the number of bytes read from the container. - * @param bytes number of bytes read. - */ - public void incrReadBytes(long bytes) { - this.readBytes.addAndGet(bytes); - } - - /** - * Get the number of times the container is read. - * @return the number of times the container is read. - */ - public long getReadCount() { - return readCount.get(); - } - - /** - * Increase the number of container read count by 1. - */ - public void incrReadCount() { - this.readCount.incrementAndGet(); - } - - /** - * Get the number of bytes write into the container. - * @return the number of bytes write into the container. - */ - public long getWriteBytes() { - return writeBytes.get(); - } - - /** - * Increase the number of bytes write into the container. - * Also decrement committed bytes against the bytes written. - * @param bytes the number of bytes write into the container. - */ - public void incrWriteBytes(long bytes) { - long unused = getMaxSize() - getBytesUsed(); - - this.writeBytes.addAndGet(bytes); - - // only if container size < max size - if (committedSpace && unused > 0) { - //with this write, container size might breach max size - long decrement = Math.min(bytes, unused); - this.getVolume().incCommittedBytes(0 - decrement); - } - } - - /** - * Get the number of writes into the container. - * @return the number of writes into the container. - */ - public long getWriteCount() { - return writeCount.get(); - } - - /** - * Increase the number of writes into the container by 1. - */ - public void incrWriteCount() { - this.writeCount.incrementAndGet(); - } - - /** - * Sets the number of bytes used by the container. - * @param used - */ - public void setBytesUsed(long used) { - this.bytesUsed.set(used); - } - - /** - * Get the number of bytes used by the container. - * @return the number of bytes used by the container. - */ - public long getBytesUsed() { - return bytesUsed.get(); - } - - /** - * Increase the number of bytes used by the container. - * @param used number of bytes used by the container. - * @return the current number of bytes used by the container afert increase. - */ - public long incrBytesUsed(long used) { - return this.bytesUsed.addAndGet(used); - } - - /** - * Decrease the number of bytes used by the container. - * @param reclaimed the number of bytes reclaimed from the container. - * @return the current number of bytes used by the container after decrease. - */ - public long decrBytesUsed(long reclaimed) { - return this.bytesUsed.addAndGet(-1L * reclaimed); - } - - /** - * Set the Volume for the Container. - * This should be called only from the createContainer. - * @param hddsVolume - */ - public void setVolume(HddsVolume hddsVolume) { - this.volume = hddsVolume; - } - - /** - * Returns the volume of the Container. - * @return HddsVolume - */ - public HddsVolume getVolume() { - return volume; - } - - /** - * Increments the number of keys in the container. - */ - public void incrKeyCount() { - this.keyCount.incrementAndGet(); - } - - /** - * Decrements number of keys in the container. - */ - public void decrKeyCount() { - this.keyCount.decrementAndGet(); - } - - /** - * Returns number of keys in the container. - * @return key count - */ - public long getKeyCount() { - return this.keyCount.get(); - } - - /** - * Set's number of keys in the container. - * @param count - */ - public void setKeyCount(long count) { - this.keyCount.set(count); - } - - public void setChecksumTo0ByteArray() { - this.checksum = DUMMY_CHECKSUM; - } - - public void setChecksum(String checkSum) { - this.checksum = checkSum; - } - - public String getChecksum() { - return this.checksum; - } - - - /** - * Returns the origin pipeline Id of this container. - * @return origin node Id - */ - public String getOriginPipelineId() { - return originPipelineId; - } - - /** - * Returns the origin node Id of this container. - * @return origin node Id - */ - public String getOriginNodeId() { - return originNodeId; - } - - /** - * Compute the checksum for ContainerData using the specified Yaml (based - * on ContainerType) and set the checksum. - * - * Checksum of ContainerData is calculated by setting the - * {@link ContainerData#checksum} field to a 64-byte array with all 0's - - * {@link ContainerData#DUMMY_CHECKSUM}. After the checksum is calculated, - * the checksum field is updated with this value. - * - * @param yaml Yaml for ContainerType to get the ContainerData as Yaml String - * @throws IOException - */ - public void computeAndSetChecksum(Yaml yaml) throws IOException { - // Set checksum to dummy value - 0 byte array, to calculate the checksum - // of rest of the data. - setChecksumTo0ByteArray(); - - // Dump yaml data into a string to compute its checksum - String containerDataYamlStr = yaml.dump(this); - - this.checksum = ContainerUtils.getChecksum(containerDataYamlStr); - } - - /** - * Returns a ProtoBuf Message from ContainerData. - * - * @return Protocol Buffer Message - */ - public abstract ContainerProtos.ContainerDataProto getProtoBufMessage(); - - /** - * Returns the blockCommitSequenceId. - */ - public abstract long getBlockCommitSequenceId(); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java deleted file mode 100644 index 1f9966c1a76..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java +++ /dev/null @@ -1,323 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import java.beans.IntrospectionException; -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStreamWriter; -import java.io.Writer; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeSet; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerType; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; - -import com.google.common.base.Preconditions; -import static org.apache.hadoop.ozone.container.keyvalue - .KeyValueContainerData.KEYVALUE_YAML_TAG; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.yaml.snakeyaml.Yaml; -import org.yaml.snakeyaml.constructor.AbstractConstruct; -import org.yaml.snakeyaml.constructor.Constructor; -import org.yaml.snakeyaml.introspector.BeanAccess; -import org.yaml.snakeyaml.introspector.Property; -import org.yaml.snakeyaml.introspector.PropertyUtils; -import org.yaml.snakeyaml.nodes.MappingNode; -import org.yaml.snakeyaml.nodes.Node; -import org.yaml.snakeyaml.nodes.ScalarNode; -import org.yaml.snakeyaml.nodes.Tag; -import org.yaml.snakeyaml.representer.Representer; - -/** - * Class for creating and reading .container files. - */ - -public final class ContainerDataYaml { - - private static final Logger LOG = - LoggerFactory.getLogger(ContainerDataYaml.class); - - private ContainerDataYaml() { - - } - - /** - * Creates a .container file in yaml format. - * - * @param containerFile - * @param containerData - * @throws IOException - */ - public static void createContainerFile(ContainerType containerType, - ContainerData containerData, File containerFile) throws IOException { - Writer writer = null; - FileOutputStream out = null; - try { - // Create Yaml for given container type - Yaml yaml = getYamlForContainerType(containerType); - // Compute Checksum and update ContainerData - containerData.computeAndSetChecksum(yaml); - - // Write the ContainerData with checksum to Yaml file. - out = new FileOutputStream( - containerFile); - writer = new OutputStreamWriter(out, "UTF-8"); - yaml.dump(containerData, writer); - - } finally { - try { - if (writer != null) { - writer.flush(); - // make sure the container metadata is synced to disk. - out.getFD().sync(); - writer.close(); - } - } catch (IOException ex) { - LOG.warn("Error occurred during closing the writer. ContainerID: " + - containerData.getContainerID()); - } - } - } - - /** - * Read the yaml file, and return containerData. - * - * @throws IOException - */ - public static ContainerData readContainerFile(File containerFile) - throws IOException { - Preconditions.checkNotNull(containerFile, "containerFile cannot be null"); - try (FileInputStream inputFileStream = new FileInputStream(containerFile)) { - return readContainer(inputFileStream); - } - - } - - /** - * Read the yaml file content, and return containerData. - * - * @throws IOException - */ - public static ContainerData readContainer(byte[] containerFileContent) - throws IOException { - return readContainer( - new ByteArrayInputStream(containerFileContent)); - } - - /** - * Read the yaml content, and return containerData. - * - * @throws IOException - */ - public static ContainerData readContainer(InputStream input) - throws IOException { - - ContainerData containerData; - PropertyUtils propertyUtils = new PropertyUtils(); - propertyUtils.setBeanAccess(BeanAccess.FIELD); - propertyUtils.setAllowReadOnlyProperties(true); - - Representer representer = new ContainerDataRepresenter(); - representer.setPropertyUtils(propertyUtils); - - Constructor containerDataConstructor = new ContainerDataConstructor(); - - Yaml yaml = new Yaml(containerDataConstructor, representer); - yaml.setBeanAccess(BeanAccess.FIELD); - - containerData = (ContainerData) - yaml.load(input); - - return containerData; - } - - /** - * Given a ContainerType this method returns a Yaml representation of - * the container properties. - * - * @param containerType type of container - * @return Yamal representation of container properties - * - * @throws StorageContainerException if the type is unrecognized - */ - public static Yaml getYamlForContainerType(ContainerType containerType) - throws StorageContainerException { - PropertyUtils propertyUtils = new PropertyUtils(); - propertyUtils.setBeanAccess(BeanAccess.FIELD); - propertyUtils.setAllowReadOnlyProperties(true); - - switch (containerType) { - case KeyValueContainer: - Representer representer = new ContainerDataRepresenter(); - representer.setPropertyUtils(propertyUtils); - representer.addClassTag( - KeyValueContainerData.class, - KeyValueContainerData.KEYVALUE_YAML_TAG); - - Constructor keyValueDataConstructor = new ContainerDataConstructor(); - - return new Yaml(keyValueDataConstructor, representer); - default: - throw new StorageContainerException("Unrecognized container Type " + - "format " + containerType, ContainerProtos.Result - .UNKNOWN_CONTAINER_TYPE); - } - } - - /** - * Representer class to define which fields need to be stored in yaml file. - */ - private static class ContainerDataRepresenter extends Representer { - @Override - protected Set getProperties(Class type) - throws IntrospectionException { - Set set = super.getProperties(type); - Set filtered = new TreeSet(); - - // When a new Container type is added, we need to add what fields need - // to be filtered here - if (type.equals(KeyValueContainerData.class)) { - List yamlFields = KeyValueContainerData.getYamlFields(); - // filter properties - for (Property prop : set) { - String name = prop.getName(); - if (yamlFields.contains(name)) { - filtered.add(prop); - } - } - } - return filtered; - } - } - - /** - * Constructor class for KeyValueData, which will be used by Yaml. - */ - private static class ContainerDataConstructor extends Constructor { - ContainerDataConstructor() { - //Adding our own specific constructors for tags. - // When a new Container type is added, we need to add yamlConstructor - // for that - this.yamlConstructors.put( - KEYVALUE_YAML_TAG, new ConstructKeyValueContainerData()); - this.yamlConstructors.put(Tag.INT, new ConstructLong()); - } - - private class ConstructKeyValueContainerData extends AbstractConstruct { - public Object construct(Node node) { - MappingNode mnode = (MappingNode) node; - Map nodes = constructMapping(mnode); - - //Needed this, as TAG.INT type is by default converted to Long. - long layOutVersion = (long) nodes.get(OzoneConsts.LAYOUTVERSION); - int lv = (int) layOutVersion; - - long size = (long) nodes.get(OzoneConsts.MAX_SIZE); - - String originPipelineId = (String) nodes.get( - OzoneConsts.ORIGIN_PIPELINE_ID); - String originNodeId = (String) nodes.get(OzoneConsts.ORIGIN_NODE_ID); - - //When a new field is added, it needs to be added here. - KeyValueContainerData kvData = new KeyValueContainerData( - (long) nodes.get(OzoneConsts.CONTAINER_ID), lv, size, - originPipelineId, originNodeId); - - kvData.setContainerDBType((String)nodes.get( - OzoneConsts.CONTAINER_DB_TYPE)); - kvData.setMetadataPath((String) nodes.get( - OzoneConsts.METADATA_PATH)); - kvData.setChunksPath((String) nodes.get(OzoneConsts.CHUNKS_PATH)); - Map meta = (Map) nodes.get(OzoneConsts.METADATA); - kvData.setMetadata(meta); - kvData.setChecksum((String) nodes.get(OzoneConsts.CHECKSUM)); - String state = (String) nodes.get(OzoneConsts.STATE); - kvData - .setState(ContainerProtos.ContainerDataProto.State.valueOf(state)); - return kvData; - } - } - - //Below code is taken from snake yaml, as snakeyaml tries to fit the - // number if it fits in integer, otherwise returns long. So, slightly - // modified the code to return long in all cases. - private class ConstructLong extends AbstractConstruct { - public Object construct(Node node) { - String value = constructScalar((ScalarNode) node).toString() - .replaceAll("_", ""); - int sign = +1; - char first = value.charAt(0); - if (first == '-') { - sign = -1; - value = value.substring(1); - } else if (first == '+') { - value = value.substring(1); - } - int base = 10; - if ("0".equals(value)) { - return Long.valueOf(0); - } else if (value.startsWith("0b")) { - value = value.substring(2); - base = 2; - } else if (value.startsWith("0x")) { - value = value.substring(2); - base = 16; - } else if (value.startsWith("0")) { - value = value.substring(1); - base = 8; - } else if (value.indexOf(':') != -1) { - String[] digits = value.split(":"); - int bes = 1; - int val = 0; - for (int i = 0, j = digits.length; i < j; i++) { - val += (Long.parseLong(digits[(j - i) - 1]) * bes); - bes *= 60; - } - return createNumber(sign, String.valueOf(val), 10); - } else { - return createNumber(sign, value, 10); - } - return createNumber(sign, value, base); - } - } - - private Number createNumber(int sign, String number, int radix) { - Number result; - if (sign < 0) { - number = "-" + number; - } - result = Long.valueOf(number, radix); - return result; - } - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java deleted file mode 100644 index 41415ebe0ac..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ /dev/null @@ -1,281 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.Set; -import java.util.List; -import java.util.Collections; -import java.util.Map; -import java.util.concurrent.ConcurrentNavigableMap; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.ConcurrentSkipListSet; - - -/** - * Class that manages Containers created on the datanode. - */ -public class ContainerSet { - - private static final Logger LOG = LoggerFactory.getLogger(ContainerSet.class); - - private final ConcurrentSkipListMap> containerMap = new - ConcurrentSkipListMap<>(); - private final ConcurrentSkipListSet missingContainerSet = - new ConcurrentSkipListSet<>(); - /** - * Add Container to container map. - * @param container container to be added - * @return If container is added to containerMap returns true, otherwise - * false - */ - public boolean addContainer(Container container) throws - StorageContainerException { - Preconditions.checkNotNull(container, "container cannot be null"); - - long containerId = container.getContainerData().getContainerID(); - if (containerMap.putIfAbsent(containerId, container) == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Container with container Id {} is added to containerMap", - containerId); - } - // wish we could have done this from ContainerData.setState - container.getContainerData().commitSpace(); - return true; - } else { - LOG.warn("Container already exists with container Id {}", containerId); - throw new StorageContainerException("Container already exists with " + - "container Id " + containerId, - ContainerProtos.Result.CONTAINER_EXISTS); - } - } - - /** - * Returns the Container with specified containerId. - * @param containerId ID of the container to get - * @return Container - */ - public Container getContainer(long containerId) { - Preconditions.checkState(containerId >= 0, - "Container Id cannot be negative."); - return containerMap.get(containerId); - } - - /** - * Removes the Container matching with specified containerId. - * @param containerId ID of the container to remove - * @return If container is removed from containerMap returns true, otherwise - * false - */ - public boolean removeContainer(long containerId) { - Preconditions.checkState(containerId >= 0, - "Container Id cannot be negative."); - Container removed = containerMap.remove(containerId); - if (removed == null) { - LOG.debug("Container with containerId {} is not present in " + - "containerMap", containerId); - return false; - } else { - LOG.debug("Container with containerId {} is removed from containerMap", - containerId); - return true; - } - } - - /** - * Return number of containers in container map. - * @return container count - */ - @VisibleForTesting - public int containerCount() { - return containerMap.size(); - } - - /** - * Return an container Iterator over {@link ContainerSet#containerMap}. - * @return {@literal Iterator>} - */ - public Iterator> getContainerIterator() { - return containerMap.values().iterator(); - } - - /** - * Return an iterator of containers associated with the specified volume. - * - * @param volume the HDDS volume which should be used to filter containers - * @return {@literal Iterator>} - */ - public Iterator> getContainerIterator(HddsVolume volume) { - Preconditions.checkNotNull(volume); - Preconditions.checkNotNull(volume.getStorageID()); - String volumeUuid = volume.getStorageID(); - return containerMap.values().stream() - .filter(x -> volumeUuid.equals(x.getContainerData().getVolume() - .getStorageID())) - .iterator(); - } - - /** - * Return an containerMap iterator over {@link ContainerSet#containerMap}. - * @return containerMap Iterator - */ - public Iterator>> getContainerMapIterator() { - return containerMap.entrySet().iterator(); - } - - /** - * Return a copy of the containerMap. - * @return containerMap - */ - @VisibleForTesting - public Map> getContainerMapCopy() { - return ImmutableMap.copyOf(containerMap); - } - - public Map> getContainerMap() { - return Collections.unmodifiableMap(containerMap); - } - - /** - * A simple interface for container Iterations. - *

- * This call make no guarantees about consistency of the data between - * different list calls. It just returns the best known data at that point of - * time. It is possible that using this iteration you can miss certain - * container from the listing. - * - * @param startContainerId - Return containers with Id >= startContainerId. - * @param count - how many to return - * @param data - Actual containerData - */ - public void listContainer(long startContainerId, long count, - List data) throws - StorageContainerException { - Preconditions.checkNotNull(data, - "Internal assertion: data cannot be null"); - Preconditions.checkState(startContainerId >= 0, - "Start container Id cannot be negative"); - Preconditions.checkState(count > 0, - "max number of containers returned " + - "must be positive"); - LOG.debug("listContainer returns containerData starting from {} of count " + - "{}", startContainerId, count); - ConcurrentNavigableMap> map; - if (startContainerId == 0) { - map = containerMap.tailMap(containerMap.firstKey(), true); - } else { - map = containerMap.tailMap(startContainerId, true); - } - int currentCount = 0; - for (Container entry : map.values()) { - if (currentCount < count) { - data.add(entry.getContainerData()); - currentCount++; - } else { - return; - } - } - } - - /** - * Get container report. - * - * @return The container report. - */ - public ContainerReportsProto getContainerReport() throws IOException { - LOG.debug("Starting container report iteration."); - - // No need for locking since containerMap is a ConcurrentSkipListMap - // And we can never get the exact state since close might happen - // after we iterate a point. - List> containers = new ArrayList<>(containerMap.values()); - - ContainerReportsProto.Builder crBuilder = - ContainerReportsProto.newBuilder(); - - for (Container container: containers) { - crBuilder.addReports(container.getContainerReport()); - } - - return crBuilder.build(); - } - - public Set getMissingContainerSet() { - return missingContainerSet; - } - - /** - * Builds the missing container set by taking a diff between total no - * containers actually found and number of containers which actually - * got created. It also validates the BCSID stored in the snapshot file - * for each container as against what is reported in containerScan. - * This will only be called during the initialization of Datanode Service - * when it still not a part of any write Pipeline. - * @param container2BCSIDMap Map of containerId to BCSID persisted in the - * Ratis snapshot - */ - public void buildMissingContainerSetAndValidate( - Map container2BCSIDMap) { - container2BCSIDMap.entrySet().parallelStream().forEach((mapEntry) -> { - long id = mapEntry.getKey(); - if (!containerMap.containsKey(id)) { - LOG.warn("Adding container {} to missing container set.", id); - missingContainerSet.add(id); - } else { - Container container = containerMap.get(id); - long containerBCSID = container.getBlockCommitSequenceId(); - long snapshotBCSID = mapEntry.getValue(); - if (containerBCSID < snapshotBCSID) { - LOG.warn( - "Marking container {} unhealthy as reported BCSID {} is smaller" - + " than ratis snapshot recorded value {}", id, - containerBCSID, snapshotBCSID); - // just mark the container unhealthy. Once the DatanodeStateMachine - // thread starts it will send container report to SCM where these - // unhealthy containers would be detected - try { - container.markContainerUnhealthy(); - } catch (StorageContainerException sce) { - // The container will still be marked unhealthy in memory even if - // exception occurs. It won't accept any new transactions and will - // be handled by SCM. Eve if dn restarts, it will still be detected - // as unheathy as its BCSID won't change. - LOG.error("Unable to persist unhealthy state for container {}", id); - } - } - } - }); - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java deleted file mode 100644 index 76f6b3cd2f1..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ /dev/null @@ -1,597 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.hdds.scm.container.common.helpers - .ContainerNotOpenException; -import org.apache.hadoop.hdds.scm.container.common.helpers - .InvalidContainerStateException; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.audit.AuditAction; -import org.apache.hadoop.ozone.audit.AuditEventStatus; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditLoggerType; -import org.apache.hadoop.ozone.audit.AuditMarker; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.audit.Auditor; -import org.apache.hadoop.ozone.container.common.helpers - .ContainerCommandRequestPBHelper; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.ratis - .DispatcherContext; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - ContainerDataProto.State; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; - -import io.opentracing.Scope; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Map; -import java.util.Optional; -import java.util.Set; - -/** - * Ozone Container dispatcher takes a call from the netty server and routes it - * to the right handler function. - */ -public class HddsDispatcher implements ContainerDispatcher, Auditor { - - static final Logger LOG = LoggerFactory.getLogger(HddsDispatcher.class); - private static final AuditLogger AUDIT = - new AuditLogger(AuditLoggerType.DNLOGGER); - private final Map handlers; - private final Configuration conf; - private final ContainerSet containerSet; - private final VolumeSet volumeSet; - private final StateContext context; - private final float containerCloseThreshold; - private String scmID; - private ContainerMetrics metrics; - - /** - * Constructs an OzoneContainer that receives calls from - * XceiverServerHandler. - */ - public HddsDispatcher(Configuration config, ContainerSet contSet, - VolumeSet volumes, Map handlers, - StateContext context, ContainerMetrics metrics) { - this.conf = config; - this.containerSet = contSet; - this.volumeSet = volumes; - this.context = context; - this.handlers = handlers; - this.metrics = metrics; - this.containerCloseThreshold = conf.getFloat( - HddsConfigKeys.HDDS_CONTAINER_CLOSE_THRESHOLD, - HddsConfigKeys.HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT); - } - - @Override - public void init() { - } - - @Override - public void shutdown() { - } - - /** - * Returns true for exceptions which can be ignored for marking the container - * unhealthy. - * @param result ContainerCommandResponse error code. - * @return true if exception can be ignored, false otherwise. - */ - private boolean canIgnoreException(Result result) { - switch (result) { - case SUCCESS: - case CONTAINER_UNHEALTHY: - case CLOSED_CONTAINER_IO: - case DELETE_ON_OPEN_CONTAINER: - return true; - default: - return false; - } - } - - @Override - public void buildMissingContainerSetAndValidate( - Map container2BCSIDMap) { - containerSet - .buildMissingContainerSetAndValidate(container2BCSIDMap); - } - - @Override - public ContainerCommandResponseProto dispatch( - ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) { - String spanName = "HddsDispatcher." + msg.getCmdType().name(); - try (Scope scope = TracingUtil - .importAndCreateScope(spanName, msg.getTraceID())) { - return dispatchRequest(msg, dispatcherContext); - } - } - - @SuppressWarnings("methodlength") - private ContainerCommandResponseProto dispatchRequest( - ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) { - Preconditions.checkNotNull(msg); - if (LOG.isTraceEnabled()) { - LOG.trace("Command {}, trace ID: {} ", msg.getCmdType().toString(), - msg.getTraceID()); - } - - AuditAction action = ContainerCommandRequestPBHelper.getAuditAction( - msg.getCmdType()); - EventType eventType = getEventType(msg); - Map params = - ContainerCommandRequestPBHelper.getAuditParams(msg); - - Container container; - ContainerType containerType; - ContainerCommandResponseProto responseProto = null; - long startTime = System.nanoTime(); - ContainerProtos.Type cmdType = msg.getCmdType(); - long containerID = msg.getContainerID(); - metrics.incContainerOpsMetrics(cmdType); - container = getContainer(containerID); - boolean isWriteStage = - (cmdType == ContainerProtos.Type.WriteChunk && dispatcherContext != null - && dispatcherContext.getStage() - == DispatcherContext.WriteChunkStage.WRITE_DATA); - boolean isWriteCommitStage = - (cmdType == ContainerProtos.Type.WriteChunk && dispatcherContext != null - && dispatcherContext.getStage() - == DispatcherContext.WriteChunkStage.COMMIT_DATA); - - // if the command gets executed other than Ratis, the default wroite stage - // is WriteChunkStage.COMBINED - boolean isCombinedStage = - cmdType == ContainerProtos.Type.WriteChunk && (dispatcherContext == null - || dispatcherContext.getStage() - == DispatcherContext.WriteChunkStage.COMBINED); - Map container2BCSIDMap = null; - if (dispatcherContext != null) { - container2BCSIDMap = dispatcherContext.getContainer2BCSIDMap(); - } - if (isWriteCommitStage) { - // check if the container Id exist in the loaded snapshot file. if - // it does not , it infers that , this is a restart of dn where - // the we are reapplying the transaction which was not captured in the - // snapshot. - // just add it to the list, and remove it from missing container set - // as it might have been added in the list during "init". - Preconditions.checkNotNull(container2BCSIDMap); - if (container2BCSIDMap.get(containerID) == null) { - container2BCSIDMap - .put(containerID, container.getBlockCommitSequenceId()); - containerSet.getMissingContainerSet().remove(containerID); - } - } - if (getMissingContainerSet().contains(containerID)) { - StorageContainerException sce = new StorageContainerException( - "ContainerID " + containerID - + " has been lost and and cannot be recreated on this DataNode", - ContainerProtos.Result.CONTAINER_MISSING); - audit(action, eventType, params, AuditEventStatus.FAILURE, sce); - return ContainerUtils.logAndReturnError(LOG, sce, msg); - } - - if (cmdType != ContainerProtos.Type.CreateContainer) { - /** - * Create Container should happen only as part of Write_Data phase of - * writeChunk. - */ - if (container == null && ((isWriteStage || isCombinedStage) - || cmdType == ContainerProtos.Type.PutSmallFile)) { - // If container does not exist, create one for WriteChunk and - // PutSmallFile request - responseProto = createContainer(msg); - if (responseProto.getResult() != Result.SUCCESS) { - StorageContainerException sce = new StorageContainerException( - "ContainerID " + containerID + " creation failed", - responseProto.getResult()); - audit(action, eventType, params, AuditEventStatus.FAILURE, sce); - return ContainerUtils.logAndReturnError(LOG, sce, msg); - } - Preconditions.checkArgument(isWriteStage && container2BCSIDMap != null - || dispatcherContext == null); - if (container2BCSIDMap != null) { - // adds this container to list of containers created in the pipeline - // with initial BCSID recorded as 0. - container2BCSIDMap.putIfAbsent(containerID, Long.valueOf(0)); - } - container = getContainer(containerID); - } - - // if container not found return error - if (container == null) { - StorageContainerException sce = new StorageContainerException( - "ContainerID " + containerID + " does not exist", - ContainerProtos.Result.CONTAINER_NOT_FOUND); - audit(action, eventType, params, AuditEventStatus.FAILURE, sce); - return ContainerUtils.logAndReturnError(LOG, sce, msg); - } - containerType = getContainerType(container); - } else { - if (!msg.hasCreateContainer()) { - audit(action, eventType, params, AuditEventStatus.FAILURE, - new Exception("MALFORMED_REQUEST")); - return ContainerUtils.malformedRequest(msg); - } - containerType = msg.getCreateContainer().getContainerType(); - } - // Small performance optimization. We check if the operation is of type - // write before trying to send CloseContainerAction. - if (!HddsUtils.isReadOnly(msg)) { - sendCloseContainerActionIfNeeded(container); - } - Handler handler = getHandler(containerType); - if (handler == null) { - StorageContainerException ex = new StorageContainerException("Invalid " + - "ContainerType " + containerType, - ContainerProtos.Result.CONTAINER_INTERNAL_ERROR); - // log failure - audit(action, eventType, params, AuditEventStatus.FAILURE, ex); - return ContainerUtils.logAndReturnError(LOG, ex, msg); - } - responseProto = handler.handle(msg, container, dispatcherContext); - if (responseProto != null) { - metrics.incContainerOpsLatencies(cmdType, System.nanoTime() - startTime); - - // If the request is of Write Type and the container operation - // is unsuccessful, it implies the applyTransaction on the container - // failed. All subsequent transactions on the container should fail and - // hence replica will be marked unhealthy here. In this case, a close - // container action will be sent to SCM to close the container. - - // ApplyTransaction called on closed Container will fail with Closed - // container exception. In such cases, ignore the exception here - // If the container is already marked unhealthy, no need to change the - // state here. - - Result result = responseProto.getResult(); - if (cmdType == ContainerProtos.Type.CreateContainer - && result == Result.SUCCESS && dispatcherContext != null) { - Preconditions.checkNotNull(dispatcherContext.getContainer2BCSIDMap()); - container2BCSIDMap.putIfAbsent(containerID, Long.valueOf(0)); - } - if (!HddsUtils.isReadOnly(msg) && !canIgnoreException(result)) { - // If the container is open/closing and the container operation - // has failed, it should be first marked unhealthy and the initiate the - // close container action. This also implies this is the first - // transaction which has failed, so the container is marked unhealthy - // right here. - // Once container is marked unhealthy, all the subsequent write - // transactions will fail with UNHEALTHY_CONTAINER exception. - - // For container to be moved to unhealthy state here, the container can - // only be in open or closing state. - State containerState = container.getContainerData().getState(); - Preconditions.checkState( - containerState == State.OPEN || containerState == State.CLOSING); - // mark and persist the container state to be unhealthy - try { - handler.markContainerUnhealthy(container); - } catch (IOException ioe) { - // just log the error here in case marking the container fails, - // Return the actual failure response to the client - LOG.error("Failed to mark container " + containerID + " UNHEALTHY. ", - ioe); - } - // in any case, the in memory state of the container should be unhealthy - Preconditions.checkArgument( - container.getContainerData().getState() == State.UNHEALTHY); - sendCloseContainerActionIfNeeded(container); - } - - if (result == Result.SUCCESS) { - updateBCSID(container, dispatcherContext, cmdType); - audit(action, eventType, params, AuditEventStatus.SUCCESS, null); - } else { - audit(action, eventType, params, AuditEventStatus.FAILURE, - new Exception(responseProto.getMessage())); - } - - return responseProto; - } else { - // log failure - audit(action, eventType, params, AuditEventStatus.FAILURE, - new Exception("UNSUPPORTED_REQUEST")); - return ContainerUtils.unsupportedRequest(msg); - } - } - - private void updateBCSID(Container container, - DispatcherContext dispatcherContext, ContainerProtos.Type cmdType) { - if (dispatcherContext != null && (cmdType == ContainerProtos.Type.PutBlock - || cmdType == ContainerProtos.Type.PutSmallFile)) { - Preconditions.checkNotNull(container); - long bcsID = container.getBlockCommitSequenceId(); - long containerId = container.getContainerData().getContainerID(); - Map container2BCSIDMap; - container2BCSIDMap = dispatcherContext.getContainer2BCSIDMap(); - Preconditions.checkNotNull(container2BCSIDMap); - Preconditions.checkArgument(container2BCSIDMap.containsKey(containerId)); - // updates the latest BCSID on every putBlock or putSmallFile - // transaction over Ratis. - container2BCSIDMap.computeIfPresent(containerId, (u, v) -> v = bcsID); - } - } - /** - * Create a container using the input container request. - * @param containerRequest - the container request which requires container - * to be created. - * @return ContainerCommandResponseProto container command response. - */ - @VisibleForTesting - ContainerCommandResponseProto createContainer( - ContainerCommandRequestProto containerRequest) { - ContainerProtos.CreateContainerRequestProto.Builder createRequest = - ContainerProtos.CreateContainerRequestProto.newBuilder(); - ContainerType containerType = - ContainerProtos.ContainerType.KeyValueContainer; - createRequest.setContainerType(containerType); - - ContainerCommandRequestProto.Builder requestBuilder = - ContainerCommandRequestProto.newBuilder() - .setCmdType(ContainerProtos.Type.CreateContainer) - .setContainerID(containerRequest.getContainerID()) - .setCreateContainer(createRequest.build()) - .setPipelineID(containerRequest.getPipelineID()) - .setDatanodeUuid(containerRequest.getDatanodeUuid()) - .setTraceID(containerRequest.getTraceID()); - - // TODO: Assuming the container type to be KeyValueContainer for now. - // We need to get container type from the containerRequest. - Handler handler = getHandler(containerType); - return handler.handle(requestBuilder.build(), null, null); - } - - /** - * This will be called as a part of creating the log entry during - * startTransaction in Ratis on the leader node. In such cases, if the - * container is not in open state for writing we should just fail. - * Leader will propagate the exception to client. - * @param msg container command proto - * @throws StorageContainerException In case container state is open for write - * requests and in invalid state for read requests. - */ - @Override - public void validateContainerCommand( - ContainerCommandRequestProto msg) throws StorageContainerException { - long containerID = msg.getContainerID(); - Container container = getContainer(containerID); - if (container == null) { - return; - } - ContainerType containerType = container.getContainerType(); - ContainerProtos.Type cmdType = msg.getCmdType(); - AuditAction action = - ContainerCommandRequestPBHelper.getAuditAction(cmdType); - EventType eventType = getEventType(msg); - Map params = - ContainerCommandRequestPBHelper.getAuditParams(msg); - Handler handler = getHandler(containerType); - if (handler == null) { - StorageContainerException ex = new StorageContainerException( - "Invalid " + "ContainerType " + containerType, - ContainerProtos.Result.CONTAINER_INTERNAL_ERROR); - audit(action, eventType, params, AuditEventStatus.FAILURE, ex); - throw ex; - } - - State containerState = container.getContainerState(); - if (!HddsUtils.isReadOnly(msg) && containerState != State.OPEN) { - switch (cmdType) { - case CreateContainer: - // Create Container is idempotent. There is nothing to validate. - break; - case CloseContainer: - // If the container is unhealthy, closeContainer will be rejected - // while execution. Nothing to validate here. - break; - default: - // if the container is not open, no updates can happen. Just throw - // an exception - ContainerNotOpenException cex = new ContainerNotOpenException( - "Container " + containerID + " in " + containerState + " state"); - audit(action, eventType, params, AuditEventStatus.FAILURE, cex); - throw cex; - } - } else if (HddsUtils.isReadOnly(msg) && containerState == State.INVALID) { - InvalidContainerStateException iex = new InvalidContainerStateException( - "Container " + containerID + " in " + containerState + " state"); - audit(action, eventType, params, AuditEventStatus.FAILURE, iex); - throw iex; - } - } - - /** - * If the container usage reaches the close threshold or the container is - * marked unhealthy we send Close ContainerAction to SCM. - * @param container current state of container - */ - private void sendCloseContainerActionIfNeeded(Container container) { - // We have to find a more efficient way to close a container. - boolean isSpaceFull = isContainerFull(container); - boolean shouldClose = isSpaceFull || isContainerUnhealthy(container); - if (shouldClose) { - ContainerData containerData = container.getContainerData(); - ContainerAction.Reason reason = - isSpaceFull ? ContainerAction.Reason.CONTAINER_FULL : - ContainerAction.Reason.CONTAINER_UNHEALTHY; - ContainerAction action = ContainerAction.newBuilder() - .setContainerID(containerData.getContainerID()) - .setAction(ContainerAction.Action.CLOSE).setReason(reason).build(); - context.addContainerActionIfAbsent(action); - } - } - - private boolean isContainerFull(Container container) { - boolean isOpen = Optional.ofNullable(container) - .map(cont -> cont.getContainerState() == ContainerDataProto.State.OPEN) - .orElse(Boolean.FALSE); - if (isOpen) { - ContainerData containerData = container.getContainerData(); - double containerUsedPercentage = - 1.0f * containerData.getBytesUsed() / containerData.getMaxSize(); - return containerUsedPercentage >= containerCloseThreshold; - } else { - return false; - } - } - - private boolean isContainerUnhealthy(Container container) { - return Optional.ofNullable(container).map( - cont -> (cont.getContainerState() == - ContainerDataProto.State.UNHEALTHY)) - .orElse(Boolean.FALSE); - } - - @Override - public Handler getHandler(ContainerProtos.ContainerType containerType) { - return handlers.get(containerType); - } - - @Override - public void setScmId(String scmId) { - Preconditions.checkNotNull(scmId, "scmId Cannot be null"); - if (this.scmID == null) { - this.scmID = scmId; - for (Map.Entry handlerMap : handlers.entrySet()) { - handlerMap.getValue().setScmID(scmID); - } - } - } - - @VisibleForTesting - public Container getContainer(long containerID) { - return containerSet.getContainer(containerID); - } - - @VisibleForTesting - public Set getMissingContainerSet() { - return containerSet.getMissingContainerSet(); - } - - private ContainerType getContainerType(Container container) { - return container.getContainerType(); - } - - @VisibleForTesting - public void setMetricsForTesting(ContainerMetrics containerMetrics) { - this.metrics = containerMetrics; - } - - private EventType getEventType(ContainerCommandRequestProto msg) { - return HddsUtils.isReadOnly(msg) ? EventType.READ : EventType.WRITE; - } - - private void audit(AuditAction action, EventType eventType, - Map params, AuditEventStatus result, Throwable exception){ - AuditMessage amsg; - switch (result) { - case SUCCESS: - if(eventType == EventType.READ && - AUDIT.getLogger().isInfoEnabled(AuditMarker.READ.getMarker())) { - amsg = buildAuditMessageForSuccess(action, params); - AUDIT.logReadSuccess(amsg); - } else if(eventType == EventType.WRITE && - AUDIT.getLogger().isInfoEnabled(AuditMarker.WRITE.getMarker())) { - amsg = buildAuditMessageForSuccess(action, params); - AUDIT.logWriteSuccess(amsg); - } - break; - - case FAILURE: - if(eventType == EventType.READ && - AUDIT.getLogger().isErrorEnabled(AuditMarker.READ.getMarker())) { - amsg = buildAuditMessageForFailure(action, params, exception); - AUDIT.logReadFailure(amsg); - } else if(eventType == EventType.WRITE && - AUDIT.getLogger().isErrorEnabled(AuditMarker.WRITE.getMarker())) { - amsg = buildAuditMessageForFailure(action, params, exception); - AUDIT.logWriteFailure(amsg); - } - break; - - default: - if (LOG.isDebugEnabled()) { - LOG.debug("Invalid audit event status - " + result); - } - } - } - - //TODO: use GRPC to fetch user and ip details - @Override - public AuditMessage buildAuditMessageForSuccess(AuditAction op, - Map auditMap) { - return new AuditMessage.Builder() - .setUser(null) - .atIp(null) - .forOperation(op.getAction()) - .withParams(auditMap) - .withResult(AuditEventStatus.SUCCESS.toString()) - .withException(null) - .build(); - } - - //TODO: use GRPC to fetch user and ip details - @Override - public AuditMessage buildAuditMessageForFailure(AuditAction op, - Map auditMap, Throwable throwable) { - return new AuditMessage.Builder() - .setUser(null) - .atIp(null) - .forOperation(op.getAction()) - .withParams(auditMap) - .withResult(AuditEventStatus.FAILURE.toString()) - .withException(throwable) - .build(); - } - - enum EventType { - READ, - WRITE - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java deleted file mode 100644 index b736eb536ed..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.function.Function; - -/** - * Map: containerId {@literal ->} (localId {@literal ->} {@link BlockData}). - * The outer container map does not entail locking for a better performance. - * The inner {@link BlockDataMap} is synchronized. - * - * This class will maintain list of open keys per container when closeContainer - * command comes, it should autocommit all open keys of a open container before - * marking the container as closed. - */ -public class OpenContainerBlockMap { - /** - * Map: localId {@literal ->} BlockData. - * - * In order to support {@link #getAll()}, the update operations are - * synchronized. - */ - static class BlockDataMap { - private final ConcurrentMap blocks = - new ConcurrentHashMap<>(); - - BlockData get(long localId) { - return blocks.get(localId); - } - - synchronized int removeAndGetSize(long localId) { - blocks.remove(localId); - return blocks.size(); - } - - synchronized BlockData computeIfAbsent( - long localId, Function f) { - return blocks.computeIfAbsent(localId, f); - } - - synchronized List getAll() { - return new ArrayList<>(blocks.values()); - } - } - - /** - * TODO : We may construct the openBlockMap by reading the Block Layout - * for each block inside a container listing all chunk files and reading the - * sizes. This will help to recreate the openKeys Map once the DataNode - * restarts. - * - * For now, we will track all open blocks of a container in the blockMap. - */ - private final ConcurrentMap containers = - new ConcurrentHashMap<>(); - - /** - * Removes the Container matching with specified containerId. - * @param containerId containerId - */ - public void removeContainer(long containerId) { - Preconditions - .checkState(containerId >= 0, "Container Id cannot be negative."); - containers.remove(containerId); - } - - public void addChunk(BlockID blockID, ChunkInfo info) { - Preconditions.checkNotNull(info); - containers.computeIfAbsent(blockID.getContainerID(), - id -> new BlockDataMap()).computeIfAbsent(blockID.getLocalID(), - id -> new BlockData(blockID)).addChunk(info); - } - - /** - * Removes the chunk from the chunkInfo list for the given block. - * @param blockID id of the block - * @param chunkInfo chunk info. - */ - public void removeChunk(BlockID blockID, ChunkInfo chunkInfo) { - Preconditions.checkNotNull(chunkInfo); - Preconditions.checkNotNull(blockID); - Optional.ofNullable(containers.get(blockID.getContainerID())) - .map(blocks -> blocks.get(blockID.getLocalID())) - .ifPresent(keyData -> keyData.removeChunk(chunkInfo)); - } - - /** - * Returns the list of open blocks to the openContainerBlockMap. - * @param containerId container id - * @return List of open blocks - */ - public List getOpenBlocks(long containerId) { - return Optional.ofNullable(containers.get(containerId)) - .map(BlockDataMap::getAll) - .orElseGet(Collections::emptyList); - } - - /** - * removes the block from the block map. - * @param blockID - block ID - */ - public void removeFromBlockMap(BlockID blockID) { - Preconditions.checkNotNull(blockID); - containers.computeIfPresent(blockID.getContainerID(), (containerId, blocks) - -> blocks.removeAndGetSize(blockID.getLocalID()) == 0? null: blocks); - } - - /** - * Returns true if the block exists in the map, false otherwise. - * - * @param blockID - Block ID. - * @return True, if it exists, false otherwise - */ - public boolean checkIfBlockExists(BlockID blockID) { - BlockDataMap keyDataMap = containers.get(blockID.getContainerID()); - return keyDataMap != null && keyDataMap.get(blockID.getLocalID()) != null; - } - - @VisibleForTesting - BlockDataMap getBlockDataMap(long containerId) { - return containers.get(containerId); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java deleted file mode 100644 index 4dde3d6cb71..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.container.common.interfaces - .ContainerDeletionChoosingPolicy; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -/** - * Randomly choosing containers for block deletion. - */ -public class RandomContainerDeletionChoosingPolicy - implements ContainerDeletionChoosingPolicy { - private static final Logger LOG = - LoggerFactory.getLogger(RandomContainerDeletionChoosingPolicy.class); - - @Override - public List chooseContainerForBlockDeletion(int count, - Map candidateContainers) - throws StorageContainerException { - Preconditions.checkNotNull(candidateContainers, - "Internal assertion: candidate containers cannot be null"); - - int currentCount = 0; - List result = new LinkedList<>(); - ContainerData[] values = new ContainerData[candidateContainers.size()]; - // to get a shuffle list - for (ContainerData entry : DFSUtil.shuffle( - candidateContainers.values().toArray(values))) { - if (currentCount < count) { - result.add(entry); - currentCount++; - if (LOG.isDebugEnabled()) { - LOG.debug("Select container {} for block deletion, " - + "pending deletion blocks num: {}.", - entry.getContainerID(), - ((KeyValueContainerData) entry).getNumPendingDeletionBlocks()); - } - } else { - break; - } - } - - return result; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java deleted file mode 100644 index 061d09bd4a5..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java +++ /dev/null @@ -1,300 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.StorageTypeProto; -import org.apache.hadoop.ozone.container.common.interfaces - .StorageLocationReportMXBean; - -import java.io.IOException; - -/** - * Storage location stats of datanodes that provide back store for containers. - * - */ -public final class StorageLocationReport implements - StorageLocationReportMXBean { - - private final String id; - private final boolean failed; - private final long capacity; - private final long scmUsed; - private final long remaining; - private final StorageType storageType; - private final String storageLocation; - - private StorageLocationReport(String id, boolean failed, long capacity, - long scmUsed, long remaining, StorageType storageType, - String storageLocation) { - this.id = id; - this.failed = failed; - this.capacity = capacity; - this.scmUsed = scmUsed; - this.remaining = remaining; - this.storageType = storageType; - this.storageLocation = storageLocation; - } - - public String getId() { - return id; - } - - public boolean isFailed() { - return failed; - } - - public long getCapacity() { - return capacity; - } - - public long getScmUsed() { - return scmUsed; - } - - public long getRemaining() { - return remaining; - } - - public String getStorageLocation() { - return storageLocation; - } - - @Override - public String getStorageTypeName() { - return storageType.name(); - } - - public StorageType getStorageType() { - return storageType; - } - - - private StorageTypeProto getStorageTypeProto() throws - IOException { - StorageTypeProto storageTypeProto; - switch (getStorageType()) { - case SSD: - storageTypeProto = StorageTypeProto.SSD; - break; - case DISK: - storageTypeProto = StorageTypeProto.DISK; - break; - case ARCHIVE: - storageTypeProto = StorageTypeProto.ARCHIVE; - break; - case PROVIDED: - storageTypeProto = StorageTypeProto.PROVIDED; - break; - case RAM_DISK: - storageTypeProto = StorageTypeProto.RAM_DISK; - break; - default: - throw new IOException("Illegal Storage Type specified"); - } - return storageTypeProto; - } - - private static StorageType getStorageType(StorageTypeProto proto) throws - IOException { - StorageType storageType; - switch (proto) { - case SSD: - storageType = StorageType.SSD; - break; - case DISK: - storageType = StorageType.DISK; - break; - case ARCHIVE: - storageType = StorageType.ARCHIVE; - break; - case PROVIDED: - storageType = StorageType.PROVIDED; - break; - case RAM_DISK: - storageType = StorageType.RAM_DISK; - break; - default: - throw new IOException("Illegal Storage Type specified"); - } - return storageType; - } - - /** - * Returns the SCMStorageReport protoBuf message for the Storage Location - * report. - * @return SCMStorageReport - * @throws IOException In case, the storage type specified is invalid. - */ - public StorageReportProto getProtoBufMessage() throws IOException{ - StorageReportProto.Builder srb = StorageReportProto.newBuilder(); - return srb.setStorageUuid(getId()) - .setCapacity(getCapacity()) - .setScmUsed(getScmUsed()) - .setRemaining(getRemaining()) - .setStorageType(getStorageTypeProto()) - .setStorageLocation(getStorageLocation()) - .setFailed(isFailed()) - .build(); - } - - /** - * Returns the StorageLocationReport from the protoBuf message. - * @param report SCMStorageReport - * @return StorageLocationReport - * @throws IOException in case of invalid storage type - */ - - public static StorageLocationReport getFromProtobuf(StorageReportProto report) - throws IOException { - StorageLocationReport.Builder builder = StorageLocationReport.newBuilder(); - builder.setId(report.getStorageUuid()) - .setStorageLocation(report.getStorageLocation()); - if (report.hasCapacity()) { - builder.setCapacity(report.getCapacity()); - } - if (report.hasScmUsed()) { - builder.setScmUsed(report.getScmUsed()); - } - if (report.hasStorageType()) { - builder.setStorageType(getStorageType(report.getStorageType())); - } - if (report.hasRemaining()) { - builder.setRemaining(report.getRemaining()); - } - - if (report.hasFailed()) { - builder.setFailed(report.getFailed()); - } - return builder.build(); - } - - /** - * Returns StorageLocation.Builder instance. - * - * @return StorageLocation.Builder - */ - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Builder class for building StorageLocationReport. - */ - public static class Builder { - private String id; - private boolean failed; - private long capacity; - private long scmUsed; - private long remaining; - private StorageType storageType; - private String storageLocation; - - /** - * Sets the storageId. - * - * @param idValue storageId - * @return StorageLocationReport.Builder - */ - public Builder setId(String idValue) { - this.id = idValue; - return this; - } - - /** - * Sets whether the volume failed or not. - * - * @param failedValue whether volume failed or not - * @return StorageLocationReport.Builder - */ - public Builder setFailed(boolean failedValue) { - this.failed = failedValue; - return this; - } - - /** - * Sets the capacity of volume. - * - * @param capacityValue capacity - * @return StorageLocationReport.Builder - */ - public Builder setCapacity(long capacityValue) { - this.capacity = capacityValue; - return this; - } - /** - * Sets the scmUsed Value. - * - * @param scmUsedValue storage space used by scm - * @return StorageLocationReport.Builder - */ - public Builder setScmUsed(long scmUsedValue) { - this.scmUsed = scmUsedValue; - return this; - } - - /** - * Sets the remaining free space value. - * - * @param remainingValue remaining free space - * @return StorageLocationReport.Builder - */ - public Builder setRemaining(long remainingValue) { - this.remaining = remainingValue; - return this; - } - - /** - * Sets the storageType. - * - * @param storageTypeValue type of the storage used - * @return StorageLocationReport.Builder - */ - public Builder setStorageType(StorageType storageTypeValue) { - this.storageType = storageTypeValue; - return this; - } - - /** - * Sets the storageLocation. - * - * @param storageLocationValue location of the volume - * @return StorageLocationReport.Builder - */ - public Builder setStorageLocation(String storageLocationValue) { - this.storageLocation = storageLocationValue; - return this; - } - - /** - * Builds and returns StorageLocationReport instance. - * - * @return StorageLocationReport - */ - public StorageLocationReport build() { - return new StorageLocationReport(id, failed, capacity, scmUsed, - remaining, storageType, storageLocation); - } - - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java deleted file mode 100644 index 41fc26716c1..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.container.common.interfaces - .ContainerDeletionChoosingPolicy; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collections; -import java.util.Comparator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -/** - * TopN Ordered choosing policy that choosing containers based on pending - * deletion blocks' number. - */ -public class TopNOrderedContainerDeletionChoosingPolicy - implements ContainerDeletionChoosingPolicy { - private static final Logger LOG = - LoggerFactory.getLogger(TopNOrderedContainerDeletionChoosingPolicy.class); - - /** customized comparator used to compare differentiate container data. **/ - private static final Comparator - KEY_VALUE_CONTAINER_DATA_COMPARATOR = (KeyValueContainerData c1, - KeyValueContainerData c2) -> - Integer.compare(c2.getNumPendingDeletionBlocks(), - c1.getNumPendingDeletionBlocks()); - - @Override - public List chooseContainerForBlockDeletion(int count, - Map candidateContainers) - throws StorageContainerException { - Preconditions.checkNotNull(candidateContainers, - "Internal assertion: candidate containers cannot be null"); - - List result = new LinkedList<>(); - List orderedList = new LinkedList<>(); - for (ContainerData entry : candidateContainers.values()) { - orderedList.add((KeyValueContainerData)entry); - } - Collections.sort(orderedList, KEY_VALUE_CONTAINER_DATA_COMPARATOR); - - // get top N list ordered by pending deletion blocks' number - int currentCount = 0; - for (KeyValueContainerData entry : orderedList) { - if (currentCount < count) { - if (entry.getNumPendingDeletionBlocks() > 0) { - result.add(entry); - currentCount++; - if (LOG.isDebugEnabled()) { - LOG.debug( - "Select container {} for block deletion, " - + "pending deletion blocks num: {}.", - entry.getContainerID(), - entry.getNumPendingDeletionBlocks()); - } - } else { - LOG.debug("Stop looking for next container, there is no" - + " pending deletion block contained in remaining containers."); - break; - } - } else { - break; - } - } - - return result; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java deleted file mode 100644 index 16da5d9deec..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.impl; - -/** - This package is contains Ozone container implementation. -**/ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java deleted file mode 100644 index f6931e37a4c..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.interfaces; - - -import java.io.IOException; -import java.util.NoSuchElementException; - -/** - * Block Iterator for container. Each container type need to implement this - * interface. - * @param - */ -public interface BlockIterator { - - /** - * This checks if iterator has next element. If it has returns true, - * otherwise false. - * @return boolean - */ - boolean hasNext() throws IOException; - - /** - * Seek to first entry. - */ - void seekToFirst(); - - /** - * Seek to last entry. - */ - void seekToLast(); - - /** - * Get next block in the container. - * @return next block or null if there are no blocks - * @throws IOException - */ - T nextBlock() throws IOException, NoSuchElementException; - - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java deleted file mode 100644 index 7f7deaf9206..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java +++ /dev/null @@ -1,188 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.interfaces; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.Map; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; - -import org.apache.hadoop.hdfs.util.Canceler; -import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.hdfs.util.RwLock; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; - -/** - * Interface for Container Operations. - */ -public interface Container extends RwLock { - - /** - * Creates a container. - * - * @throws StorageContainerException - */ - void create(VolumeSet volumeSet, VolumeChoosingPolicy volumeChoosingPolicy, - String scmId) throws StorageContainerException; - - /** - * Deletes the container. - * - * @throws StorageContainerException - */ - void delete() throws StorageContainerException; - - /** - * Update the container. - * - * @param metaData - * @param forceUpdate if true, update container forcibly. - * @throws StorageContainerException - */ - void update(Map metaData, boolean forceUpdate) - throws StorageContainerException; - - /** - * Get metadata about the container. - * - * @return ContainerData - Container Data. - */ - CONTAINERDATA getContainerData(); - - /** - * Get the Container Lifecycle state. - * - * @return ContainerLifeCycleState - Container State. - */ - ContainerProtos.ContainerDataProto.State getContainerState(); - - /** - * Marks the container for closing. Moves the container to CLOSING state. - */ - void markContainerForClose() throws StorageContainerException; - - /** - * Marks the container replica as unhealthy. - */ - void markContainerUnhealthy() throws StorageContainerException; - - /** - * Quasi Closes a open container, if it is already closed or does not exist a - * StorageContainerException is thrown. - * - * @throws StorageContainerException - */ - void quasiClose() throws StorageContainerException; - - /** - * Closes a open/quasi closed container, if it is already closed or does not - * exist a StorageContainerException is thrown. - * - * @throws StorageContainerException - */ - void close() throws StorageContainerException; - - /** - * Return the ContainerType for the container. - */ - ContainerProtos.ContainerType getContainerType(); - - /** - * Returns containerFile. - */ - File getContainerFile(); - - /** - * updates the DeleteTransactionId. - * @param deleteTransactionId - */ - void updateDeleteTransactionId(long deleteTransactionId); - - /** - * Returns blockIterator for the container. - * @return BlockIterator - * @throws IOException - */ - BlockIterator blockIterator() throws IOException; - - /** - * Import the container from an external archive. - */ - void importContainerData(InputStream stream, - ContainerPacker packer) throws IOException; - - /** - * Export all the data of the container to one output archive with the help - * of the packer. - * - */ - void exportContainerData(OutputStream stream, - ContainerPacker packer) throws IOException; - - /** - * Returns containerReport for the container. - */ - ContainerReplicaProto getContainerReport() - throws StorageContainerException; - - /** - * updates the blockCommitSequenceId. - */ - void updateBlockCommitSequenceId(long blockCommitSequenceId); - - /** - * Returns the blockCommitSequenceId. - */ - long getBlockCommitSequenceId(); - - /** - * check and report the structural integrity of the container. - * @return true if the integrity checks pass - * Scan the container metadata to detect corruption. - */ - boolean scanMetaData(); - - /** - * Return if the container data should be checksum verified to detect - * corruption. The result depends upon the current state of the container - * (e.g. if a container is accepting writes, it may not be a good idea to - * perform checksum verification to avoid concurrency issues). - */ - boolean shouldScanData(); - - /** - * Perform checksum verification for the container data. - * - * @param throttler A reference of {@link DataTransferThrottler} used to - * perform I/O bandwidth throttling - * @param canceler A reference of {@link Canceler} used to cancel the - * I/O bandwidth throttling (e.g. for shutdown purpose). - * @return true if the checksum verification succeeds - * false otherwise - */ - boolean scanData(DataTransferThrottler throttler, Canceler canceler); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java deleted file mode 100644 index 84c4f903f37..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.interfaces; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; - -import java.util.List; -import java.util.Map; - -/** - * This interface is used for choosing desired containers for - * block deletion. - */ -public interface ContainerDeletionChoosingPolicy { - - /** - * Chooses desired containers for block deletion. - * @param count - * how many to return - * @param candidateContainers - * candidate containers collection - * @return container data list - * @throws StorageContainerException - */ - List chooseContainerForBlockDeletion(int count, - Map candidateContainers) - throws StorageContainerException; - - /** - * Determine if the container has suitable type for this policy. - * @param type type of the container - * @return whether the container type suitable for this policy. - */ - default boolean isValidContainerType(ContainerProtos.ContainerType type) { - if (type == ContainerProtos.ContainerType.KeyValueContainer) { - return true; - } - return false; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java deleted file mode 100644 index ee0b6bcb200..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.interfaces; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; - -import java.util.Map; - -/** - * Dispatcher acts as the bridge between the transport layer and - * the actual container layer. This layer is capable of transforming - * protobuf objects into corresponding class and issue the function call - * into the lower layers. - * - * The reply from the request is dispatched to the client. - */ -public interface ContainerDispatcher { - /** - * Dispatches commands to container layer. - * @param msg - Command Request - * @param context - Context info related to ContainerStateMachine - * @return Command Response - */ - ContainerCommandResponseProto dispatch(ContainerCommandRequestProto msg, - DispatcherContext context); - - /** - * Validates whether the container command should be executed on the pipeline - * or not. Will be invoked by the leader node in the Ratis pipeline - * @param msg containerCommand - * @throws StorageContainerException - */ - void validateContainerCommand( - ContainerCommandRequestProto msg) throws StorageContainerException; - - /** - * Initialize the Dispatcher. - */ - void init(); - - /** - * finds and builds the missing containers in case of a lost disk etc - * in the ContainerSet. It also validates the BCSID of the containers found. - */ - void buildMissingContainerSetAndValidate(Map container2BCSIDMap); - - /** - * Shutdown Dispatcher services. - */ - void shutdown(); - - /** - * Returns the handler for the specified containerType. - * @param containerType - * @return - */ - Handler getHandler(ContainerProtos.ContainerType containerType); - - /** - * If scmId is not set, this will set scmId, otherwise it is a no-op. - * @param scmId - */ - void setScmId(String scmId); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java deleted file mode 100644 index 9c5fcea1639..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.interfaces; - -import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; - -import java.io.IOException; -import java.nio.file.Path; - -/** - * Returns physical path locations, where the containers will be created. - */ -public interface ContainerLocationManager { - /** - * Returns the path where the container should be placed from a set of - * locations. - * - * @return A path where we should place this container and metadata. - * @throws IOException - */ - Path getContainerPath() throws IOException; - - /** - * Returns the path where the container Data file are stored. - * - * @return a path where we place the LevelDB and data files of a container. - * @throws IOException - */ - Path getDataPath(String containerName) throws IOException; - - /** - * Returns an array of storage location usage report. - * @return storage location usage report. - */ - StorageLocationReport[] getLocationReport() throws IOException; - - /** - * Supports clean shutdown of container. - * - * @throws IOException - */ - void shutdown() throws IOException; -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java deleted file mode 100644 index 97d2dc3f202..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.interfaces; - -import java.io.IOException; - -/** - * Returns physical path locations, where the containers will be created. - */ -public interface ContainerLocationManagerMXBean { - - /** - * Returns an array of storage location usage report. - * - * @return storage location usage report. - */ - StorageLocationReportMXBean[] getLocationReport() throws IOException; - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerPacker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerPacker.java deleted file mode 100644 index 8308c23866b..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerPacker.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.interfaces; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - -import org.apache.hadoop.ozone.container.common.impl.ContainerData; - -/** - * Service to pack/unpack ContainerData container data to/from a single byte - * stream. - */ -public interface ContainerPacker { - - /** - * Extract the container data to the path defined by the container. - *

- * This doesn't contain the extraction of the container descriptor file. - * - * @return the byte content of the descriptor (which won't be written to a - * file but returned). - */ - byte[] unpackContainerData(Container container, - InputStream inputStream) - throws IOException; - - /** - * Compress all the container data (chunk data, metadata db AND container - * descriptor) to one single archive. - */ - void pack(Container container, OutputStream destination) - throws IOException; - - /** - * Read the descriptor from the finished archive to get the data before - * importing the container. - */ - byte[] unpackContainerDescriptor(InputStream inputStream) - throws IOException; -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java deleted file mode 100644 index 8c3b981a093..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.interfaces; - - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerType; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; -import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; - -/** - * Dispatcher sends ContainerCommandRequests to Handler. Each Container Type - * should have an implementation for Handler. - */ -@SuppressWarnings("visibilitymodifier") -public abstract class Handler { - - protected final Configuration conf; - protected final ContainerSet containerSet; - protected final VolumeSet volumeSet; - protected String scmID; - protected final ContainerMetrics metrics; - - private final StateContext context; - private final DatanodeDetails datanodeDetails; - - protected Handler(Configuration config, StateContext context, - ContainerSet contSet, VolumeSet volumeSet, - ContainerMetrics containerMetrics) { - this.conf = config; - this.context = context; - this.containerSet = contSet; - this.volumeSet = volumeSet; - this.metrics = containerMetrics; - this.datanodeDetails = context.getParent().getDatanodeDetails(); - } - - public static Handler getHandlerForContainerType( - final ContainerType containerType, final Configuration config, - final StateContext context, final ContainerSet contSet, - final VolumeSet volumeSet, final ContainerMetrics metrics) { - switch (containerType) { - case KeyValueContainer: - return new KeyValueHandler(config, context, contSet, volumeSet, metrics); - default: - throw new IllegalArgumentException("Handler for ContainerType: " + - containerType + "doesn't exist."); - } - } - - /** - * Returns the Id of this datanode. - * @return datanode Id - */ - protected DatanodeDetails getDatanodeDetails() { - return datanodeDetails; - } - /** - * This should be called whenever there is state change. It will trigger - * an ICR to SCM. - * - * @param container Container for which ICR has to be sent - */ - protected void sendICR(final Container container) - throws StorageContainerException { - IncrementalContainerReportProto icr = IncrementalContainerReportProto - .newBuilder() - .addReport(container.getContainerReport()) - .build(); - context.addReport(icr); - context.getParent().triggerHeartbeat(); - } - - public abstract ContainerCommandResponseProto handle( - ContainerCommandRequestProto msg, Container container, - DispatcherContext dispatcherContext); - - /** - * Imports container from a raw input stream. - */ - public abstract Container importContainer( - long containerID, - long maxSize, - String originPipelineId, - String originNodeId, - InputStream rawContainerStream, - TarContainerPacker packer) - throws IOException; - - /** - * Exports container to the output stream. - */ - public abstract void exportContainer( - Container container, - OutputStream outputStream, - TarContainerPacker packer) - throws IOException; - - /** - * Stop the Handler. - */ - public abstract void stop(); - - /** - * Marks the container for closing. Moves the container to CLOSING state. - * - * @param container container to update - * @throws IOException in case of exception - */ - public abstract void markContainerForClose(Container container) - throws IOException; - - /** - * Marks the container Unhealthy. Moves the container to UHEALTHY state. - * - * @param container container to update - * @throws IOException in case of exception - */ - public abstract void markContainerUnhealthy(Container container) - throws IOException; - - /** - * Moves the Container to QUASI_CLOSED state. - * - * @param container container to be quasi closed - * @throws IOException - */ - public abstract void quasiCloseContainer(Container container) - throws IOException; - - /** - * Moves the Container to CLOSED state. - * - * @param container container to be closed - * @throws IOException - */ - public abstract void closeContainer(Container container) - throws IOException; - - /** - * Deletes the given container. - * - * @param container container to be deleted - * @param force if this is set to true, we delete container without checking - * state of the container. - * @throws IOException - */ - public abstract void deleteContainer(Container container, boolean force) - throws IOException; - - public void setScmID(String scmId) { - this.scmID = scmId; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java deleted file mode 100644 index fd063678137..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.interfaces; - -/** - * Contract to define properties available on the JMX interface. - */ -public interface StorageLocationReportMXBean { - - String getId(); - - boolean isFailed(); - - long getCapacity(); - - long getScmUsed(); - - long getRemaining(); - - String getStorageLocation(); - - String getStorageTypeName(); - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/VolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/VolumeChoosingPolicy.java deleted file mode 100644 index 7de0e2a967d..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/VolumeChoosingPolicy.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.interfaces; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; - -import java.io.IOException; -import java.util.List; - -/** - * This interface specifies the policy for choosing volumes to store replicas. - */ -@InterfaceAudience.Private -public interface VolumeChoosingPolicy { - - /** - * Choose a volume to place a container, - * given a list of volumes and the max container size sought for storage. - * - * The implementations of this interface must be thread-safe. - * - * @param volumes - a list of available volumes. - * @param maxContainerSize - the maximum size of the container for which a - * volume is sought. - * @return the chosen volume. - * @throws IOException when disks are unavailable or are full. - */ - HddsVolume chooseVolume(List volumes, long maxContainerSize) - throws IOException; -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java deleted file mode 100644 index d83bf95c362..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.interfaces; -/** - This package contains common ozone container interfaces. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java deleted file mode 100644 index 1638a36a13d..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common; -/** - Common Container Layer. At this layer the abstractions are: - - 1. Containers - Both data and metadata containers. - 2. Keys - Key/Value pairs that live inside a container. - 3. Chunks - Keys can be composed of many chunks. - - Ozone uses these abstractions to build Volumes, Buckets and Keys. - - **/ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java deleted file mode 100644 index f52387be19c..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/CommandStatusReportPublisher.java +++ /dev/null @@ -1,87 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.report; - -import java.util.Iterator; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatus.Status; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.ozone.protocol.commands.CommandStatus; - -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_COMMAND_STATUS_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT; - -/** - * Publishes CommandStatusReport which will be sent to SCM as part of - * heartbeat. CommandStatusReport consist of the following information: - * - type : type of command. - * - status : status of command execution (PENDING, EXECUTED, FAILURE). - * - cmdId : Command id. - * - msg : optional message. - */ -public class CommandStatusReportPublisher extends - ReportPublisher { - - private long cmdStatusReportInterval = -1; - - @Override - protected long getReportFrequency() { - if (cmdStatusReportInterval == -1) { - cmdStatusReportInterval = getConf().getTimeDuration( - HDDS_COMMAND_STATUS_REPORT_INTERVAL, - HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - - long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval( - getConf()); - - Preconditions.checkState( - heartbeatFrequency <= cmdStatusReportInterval, - HDDS_COMMAND_STATUS_REPORT_INTERVAL + - " cannot be configured lower than heartbeat frequency."); - } - return cmdStatusReportInterval; - } - - @Override - protected CommandStatusReportsProto getReport() { - Map map = this.getContext() - .getCommandStatusMap(); - Iterator iterator = map.keySet().iterator(); - CommandStatusReportsProto.Builder builder = CommandStatusReportsProto - .newBuilder(); - - iterator.forEachRemaining(key -> { - CommandStatus cmdStatus = map.get(key); - // If status is still pending then don't remove it from map as - // CommandHandler will change its status when it works on this command. - if (!cmdStatus.getStatus().equals(Status.PENDING)) { - builder.addCmdStatus(cmdStatus.getProtoBufMessage()); - map.remove(key); - } - }); - return builder.getCmdStatusCount() > 0 ? builder.build() : null; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java deleted file mode 100644 index b92e3b0e1f4..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ContainerReportPublisher.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.HddsServerUtil; - -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT; - - -/** - * Publishes ContainerReport which will be sent to SCM as part of heartbeat. - * ContainerReport consist of the following information about each containers: - * - containerID - * - size - * - used - * - keyCount - * - readCount - * - writeCount - * - readBytes - * - writeBytes - * - finalHash - * - LifeCycleState - * - */ -public class ContainerReportPublisher extends - ReportPublisher { - - private Long containerReportInterval = null; - - @Override - protected long getReportFrequency() { - if (containerReportInterval == null) { - containerReportInterval = getConf().getTimeDuration( - HDDS_CONTAINER_REPORT_INTERVAL, - HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - - long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval( - getConf()); - - Preconditions.checkState( - heartbeatFrequency <= containerReportInterval, - HDDS_CONTAINER_REPORT_INTERVAL + - " cannot be configured lower than heartbeat frequency."); - } - // Add a random delay (0~30s) on top of the container report - // interval (60s) so tha the SCM is overwhelmed by the container reports - // sent in sync. - return containerReportInterval + getRandomReportDelay(); - } - - private long getRandomReportDelay() { - return RandomUtils.nextLong(0, containerReportInterval); - } - - @Override - protected ContainerReportsProto getReport() throws IOException { - return getContext().getParent().getContainer() - .getController().getContainerReport(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java deleted file mode 100644 index 6ac99dd4d32..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/NodeReportPublisher.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.scm.HddsServerUtil; - -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_NODE_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_NODE_REPORT_INTERVAL_DEFAULT; - -/** - * Publishes NodeReport which will be sent to SCM as part of heartbeat. - * NodeReport consist of: - * - NodeIOStats - * - VolumeReports - */ -public class NodeReportPublisher extends ReportPublisher { - - private Long nodeReportInterval; - - @Override - protected long getReportFrequency() { - if (nodeReportInterval == null) { - nodeReportInterval = getConf().getTimeDuration( - HDDS_NODE_REPORT_INTERVAL, - HDDS_NODE_REPORT_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - - long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval( - getConf()); - - Preconditions.checkState( - heartbeatFrequency <= nodeReportInterval, - HDDS_NODE_REPORT_INTERVAL + - " cannot be configured lower than heartbeat frequency."); - } - return nodeReportInterval; - } - - @Override - protected NodeReportProto getReport() throws IOException { - return getContext().getParent().getContainer().getNodeReport(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java deleted file mode 100644 index e7f4347e9e4..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.HddsServerUtil; - -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT; - - -/** - * Publishes Pipeline which will be sent to SCM as part of heartbeat. - * PipelineReport consist of the following information about each containers: - * - pipelineID - * - */ -public class PipelineReportPublisher extends - ReportPublisher { - - private Long pipelineReportInterval = null; - - @Override - protected long getReportFrequency() { - if (pipelineReportInterval == null) { - pipelineReportInterval = getConf().getTimeDuration( - HDDS_PIPELINE_REPORT_INTERVAL, - HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - - long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval( - getConf()); - - Preconditions.checkState( - heartbeatFrequency <= pipelineReportInterval, - HDDS_PIPELINE_REPORT_INTERVAL + - " cannot be configured lower than heartbeat frequency."); - } - // Add a random delay (0~30s) on top of the pipeline report - // interval (60s) so tha the SCM is overwhelmed by the pipeline reports - // sent in sync. - return pipelineReportInterval + getRandomReportDelay(); - } - - private long getRandomReportDelay() { - return RandomUtils.nextLong(0, pipelineReportInterval); - } - - @Override - protected PipelineReportsProto getReport() { - return getContext().getParent().getContainer().getPipelineReport(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java deleted file mode 100644 index 536d4cc06b3..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java +++ /dev/null @@ -1,158 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -/** - * ReportManager is responsible for managing all the {@link ReportPublisher} - * and also provides {@link ScheduledExecutorService} to ReportPublisher - * which should be used for scheduling the reports. - */ -public final class ReportManager { - private static final Logger LOG = - LoggerFactory.getLogger(ReportManager.class); - - private final StateContext context; - private final List publishers; - private final ScheduledExecutorService executorService; - - /** - * Construction of {@link ReportManager} should be done via - * {@link ReportManager.Builder}. - * - * @param context StateContext which holds the report - * @param publishers List of publishers which generates report - */ - private ReportManager(StateContext context, - List publishers) { - this.context = context; - this.publishers = publishers; - this.executorService = HadoopExecutors.newScheduledThreadPool( - publishers.size(), - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Datanode ReportManager Thread - %d").build()); - } - - /** - * Initializes ReportManager, also initializes all the configured - * report publishers. - */ - public void init() { - for (ReportPublisher publisher : publishers) { - publisher.init(context, executorService); - } - } - - /** - * Shutdown the ReportManager. - */ - public void shutdown() { - executorService.shutdown(); - try { - executorService.awaitTermination(5, TimeUnit.SECONDS); - } catch (Exception e) { - LOG.error("Failed to shutdown Report Manager", e); - } - } - - /** - * Returns new {@link ReportManager.Builder} which can be used to construct. - * {@link ReportManager} - * @param conf - Conf - * @return builder - Builder. - */ - public static Builder newBuilder(Configuration conf) { - return new Builder(conf); - } - - /** - * Builder to construct {@link ReportManager}. - */ - public static final class Builder { - - private StateContext stateContext; - private List reportPublishers; - private ReportPublisherFactory publisherFactory; - - - private Builder(Configuration conf) { - this.reportPublishers = new ArrayList<>(); - this.publisherFactory = new ReportPublisherFactory(conf); - } - - /** - * Sets the {@link StateContext}. - * - * @param context StateContext - - * @return ReportManager.Builder - */ - public Builder setStateContext(StateContext context) { - stateContext = context; - return this; - } - - /** - * Adds publisher for the corresponding report. - * - * @param report report for which publisher needs to be added - * - * @return ReportManager.Builder - */ - public Builder addPublisherFor(Class report) { - reportPublishers.add(publisherFactory.getPublisherFor(report)); - return this; - } - - /** - * Adds new ReportPublisher to the ReportManager. - * - * @param publisher ReportPublisher - * - * @return ReportManager.Builder - */ - public Builder addPublisher(ReportPublisher publisher) { - reportPublishers.add(publisher); - return this; - } - - /** - * Build and returns ReportManager. - * - * @return {@link ReportManager} - */ - public ReportManager build() { - Preconditions.checkNotNull(stateContext); - return new ReportManager(stateContext, reportPublishers); - } - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java deleted file mode 100644 index e3910dbda1a..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java +++ /dev/null @@ -1,115 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine.DatanodeStates; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -/** - * Abstract class responsible for scheduling the reports based on the - * configured interval. All the ReportPublishers should extend this class. - */ -public abstract class ReportPublisher - implements Configurable, Runnable { - - private static final Logger LOG = LoggerFactory.getLogger( - ReportPublisher.class); - - private Configuration config; - private StateContext context; - private ScheduledExecutorService executor; - - /** - * Initializes ReportPublisher with stateContext and executorService. - * - * @param stateContext Datanode state context - * @param executorService ScheduledExecutorService to schedule reports - */ - public void init(StateContext stateContext, - ScheduledExecutorService executorService) { - this.context = stateContext; - this.executor = executorService; - this.executor.schedule(this, - getReportFrequency(), TimeUnit.MILLISECONDS); - } - - @Override - public void setConf(Configuration conf) { - config = conf; - } - - @Override - public Configuration getConf() { - return config; - } - - @Override - public void run() { - publishReport(); - if (!executor.isShutdown() || - !(context.getState() == DatanodeStates.SHUTDOWN)) { - executor.schedule(this, - getReportFrequency(), TimeUnit.MILLISECONDS); - } - } - - /** - * Generates and publishes the report to datanode state context. - */ - private void publishReport() { - try { - context.addReport(getReport()); - } catch (IOException e) { - LOG.error("Exception while publishing report.", e); - } - } - - /** - * Returns the frequency in which this particular report has to be scheduled. - * - * @return report interval in milliseconds - */ - protected abstract long getReportFrequency(); - - /** - * Generate and returns the report which has to be sent as part of heartbeat. - * - * @return datanode report - */ - protected abstract T getReport() throws IOException; - - /** - * Returns {@link StateContext}. - * - * @return stateContext report - */ - protected StateContext getContext() { - return context; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java deleted file mode 100644 index 1c456a0519b..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.util.ReflectionUtils; - -import java.util.HashMap; -import java.util.Map; - -/** - * Factory class to construct {@link ReportPublisher} for a report. - */ -public class ReportPublisherFactory { - - private final Configuration conf; - private final Map, - Class> report2publisher; - - /** - * Constructs {@link ReportPublisherFactory} instance. - * - * @param conf Configuration to be passed to the {@link ReportPublisher} - */ - public ReportPublisherFactory(Configuration conf) { - this.conf = conf; - this.report2publisher = new HashMap<>(); - - report2publisher.put(NodeReportProto.class, NodeReportPublisher.class); - report2publisher.put(ContainerReportsProto.class, - ContainerReportPublisher.class); - report2publisher.put(CommandStatusReportsProto.class, - CommandStatusReportPublisher.class); - report2publisher.put(PipelineReportsProto.class, - PipelineReportPublisher.class); - } - - /** - * Returns the ReportPublisher for the corresponding report. - * - * @param report report - * - * @return report publisher - */ - public ReportPublisher getPublisherFor( - Class report) { - Class publisherClass = - report2publisher.get(report); - if (publisherClass == null) { - throw new RuntimeException("No publisher found for report " + report); - } - return ReflectionUtils.newInstance(publisherClass, conf); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java deleted file mode 100644 index 404b37a7b08..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/package-info.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.report; -/** - * Datanode Reports: As part of heartbeat, datanode has to share its current - * state with SCM. The state of datanode is split into multiple reports which - * are sent along with heartbeat in a configured frequency. - * - * This package contains code which is responsible for sending reports from - * datanode to SCM. - * - * ReportPublisherFactory: Given a report this constructs corresponding - * {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher}. - * - * ReportManager: Manages and initializes all the available ReportPublishers. - * - * ReportPublisher: Abstract class responsible for scheduling the reports - * based on the configured interval. All the ReportPublishers should extend - * {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher} - * - * How to add new report: - * - * 1. Create a new ReportPublisher class which extends - * {@link org.apache.hadoop.ozone.container.common.report.ReportPublisher}. - * - * 2. Add a mapping Report to ReportPublisher entry in ReportPublisherFactory. - * - * 3. In DatanodeStateMachine add the report to ReportManager instance. - * - * - * - * Datanode Reports State Diagram: - * - * DatanodeStateMachine ReportManager ReportPublisher SCM - * | | | | - * | | | | - * | construct | | | - * |----------------->| | | - * | | | | - * | init | | | - * |----------------->| | | - * | | init | | - * | |------------->| | - * | | | | - * +--------+------------------+--------------+--------------------+------+ - * |loop | | | | | - * | | | publish | | | - * | |<-----------------+--------------| | | - * | | | report | | | - * | | | | | | - * | | | | | | - * | | heartbeat(rpc) | | | | - * | |------------------+--------------+------------------->| | - * | | | | | | - * | | | | | | - * +--------+------------------+--------------+--------------------+------+ - * | | | | - * | | | | - * | | | | - * | shutdown | | | - * |----------------->| | | - * | | | | - * | | | | - * - - - - - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java deleted file mode 100644 index c9eb7024eaf..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ /dev/null @@ -1,489 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine; - -import java.io.Closeable; -import java.io.IOException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.ozone.HddsDatanodeStopService; -import org.apache.hadoop.ozone.container.common.report.ReportManager; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .CloseContainerCommandHandler; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .CommandDispatcher; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .DeleteBlocksCommandHandler; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .DeleteContainerCommandHandler; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .ReplicateContainerCommandHandler; -import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.container.replication.ContainerReplicator; -import org.apache.hadoop.ozone.container.replication.DownloadAndImportReplicator; -import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor; -import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.util.JvmPauseMonitor; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopExecutors; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * State Machine Class. - */ -public class DatanodeStateMachine implements Closeable { - @VisibleForTesting - static final Logger LOG = - LoggerFactory.getLogger(DatanodeStateMachine.class); - private final ExecutorService executorService; - private final Configuration conf; - private final SCMConnectionManager connectionManager; - private StateContext context; - private final OzoneContainer container; - private DatanodeDetails datanodeDetails; - private final CommandDispatcher commandDispatcher; - private final ReportManager reportManager; - private long commandsHandled; - private AtomicLong nextHB; - private Thread stateMachineThread = null; - private Thread cmdProcessThread = null; - private final ReplicationSupervisor supervisor; - - private JvmPauseMonitor jvmPauseMonitor; - private CertificateClient dnCertClient; - private final HddsDatanodeStopService hddsDatanodeStopService; - - /** - * Constructs a a datanode state machine. - * @param datanodeDetails - DatanodeDetails used to identify a datanode - * @param conf - Configuration. - * @param certClient - Datanode Certificate client, required if security is - * enabled - */ - public DatanodeStateMachine(DatanodeDetails datanodeDetails, - Configuration conf, CertificateClient certClient, - HddsDatanodeStopService hddsDatanodeStopService) throws IOException { - this.hddsDatanodeStopService = hddsDatanodeStopService; - this.conf = conf; - this.datanodeDetails = datanodeDetails; - executorService = HadoopExecutors.newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Datanode State Machine Thread - %d").build()); - connectionManager = new SCMConnectionManager(conf); - context = new StateContext(this.conf, DatanodeStates.getInitState(), this); - container = new OzoneContainer(this.datanodeDetails, - new OzoneConfiguration(conf), context, certClient); - dnCertClient = certClient; - nextHB = new AtomicLong(Time.monotonicNow()); - - ContainerReplicator replicator = - new DownloadAndImportReplicator(container.getContainerSet(), - container.getController(), - new SimpleContainerDownloader(conf), new TarContainerPacker()); - - supervisor = - new ReplicationSupervisor(container.getContainerSet(), replicator, 10); - - // When we add new handlers just adding a new handler here should do the - // trick. - commandDispatcher = CommandDispatcher.newBuilder() - .addHandler(new CloseContainerCommandHandler()) - .addHandler(new DeleteBlocksCommandHandler(container.getContainerSet(), - conf)) - .addHandler(new ReplicateContainerCommandHandler(conf, supervisor)) - .addHandler(new DeleteContainerCommandHandler()) - .setConnectionManager(connectionManager) - .setContainer(container) - .setContext(context) - .build(); - - reportManager = ReportManager.newBuilder(conf) - .setStateContext(context) - .addPublisherFor(NodeReportProto.class) - .addPublisherFor(ContainerReportsProto.class) - .addPublisherFor(CommandStatusReportsProto.class) - .addPublisherFor(PipelineReportsProto.class) - .build(); - } - - /** - * - * Return DatanodeDetails if set, return null otherwise. - * - * @return DatanodeDetails - */ - public DatanodeDetails getDatanodeDetails() { - return datanodeDetails; - } - - - /** - * Returns the Connection manager for this state machine. - * - * @return - SCMConnectionManager. - */ - public SCMConnectionManager getConnectionManager() { - return connectionManager; - } - - public OzoneContainer getContainer() { - return this.container; - } - - /** - * Runs the state machine at a fixed frequency. - */ - private void start() throws IOException { - long now = 0; - - reportManager.init(); - initCommandHandlerThread(conf); - - // Start jvm monitor - jvmPauseMonitor = new JvmPauseMonitor(); - jvmPauseMonitor.init(conf); - jvmPauseMonitor.start(); - - while (context.getState() != DatanodeStates.SHUTDOWN) { - try { - LOG.debug("Executing cycle Number : {}", context.getExecutionCount()); - long heartbeatFrequency = context.getHeartbeatFrequency(); - nextHB.set(Time.monotonicNow() + heartbeatFrequency); - context.execute(executorService, heartbeatFrequency, - TimeUnit.MILLISECONDS); - now = Time.monotonicNow(); - if (now < nextHB.get()) { - if(!Thread.interrupted()) { - Thread.sleep(nextHB.get() - now); - } - } - } catch (InterruptedException e) { - // Some one has sent interrupt signal, this could be because - // 1. Trigger heartbeat immediately - // 2. Shutdown has be initiated. - } catch (Exception e) { - LOG.error("Unable to finish the execution.", e); - } - } - - // If we have got some exception in stateMachine we set the state to - // shutdown to stop the stateMachine thread. Along with this we should - // also stop the datanode. - if (context.getShutdownOnError()) { - LOG.error("DatanodeStateMachine Shutdown due to an critical error"); - hddsDatanodeStopService.stopService(); - } - } - - /** - * Gets the current context. - * - * @return StateContext - */ - public StateContext getContext() { - return context; - } - - /** - * Sets the current context. - * - * @param context - Context - */ - public void setContext(StateContext context) { - this.context = context; - } - - /** - * Closes this stream and releases any system resources associated with it. If - * the stream is already closed then invoking this method has no effect. - *

- *

As noted in {@link AutoCloseable#close()}, cases where the close may - * fail require careful attention. It is strongly advised to relinquish the - * underlying resources and to internally mark the {@code Closeable} - * as closed, prior to throwing the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - if (stateMachineThread != null) { - stateMachineThread.interrupt(); - } - if (cmdProcessThread != null) { - cmdProcessThread.interrupt(); - } - context.setState(DatanodeStates.getLastState()); - executorService.shutdown(); - try { - if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { - executorService.shutdownNow(); - } - - if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { - LOG.error("Unable to shutdown state machine properly."); - } - } catch (InterruptedException e) { - LOG.error("Error attempting to shutdown.", e); - executorService.shutdownNow(); - Thread.currentThread().interrupt(); - } - - if (connectionManager != null) { - connectionManager.close(); - } - - if(container != null) { - container.stop(); - } - - if (jvmPauseMonitor != null) { - jvmPauseMonitor.stop(); - } - } - - /** - * States that a datanode can be in. GetNextState will move this enum from - * getInitState to getLastState. - */ - public enum DatanodeStates { - INIT(1), - RUNNING(2), - SHUTDOWN(3); - private final int value; - - /** - * Constructs states. - * - * @param value Enum Value - */ - DatanodeStates(int value) { - this.value = value; - } - - /** - * Returns the first State. - * - * @return First State. - */ - public static DatanodeStates getInitState() { - return INIT; - } - - /** - * The last state of endpoint states. - * - * @return last state. - */ - public static DatanodeStates getLastState() { - return SHUTDOWN; - } - - /** - * returns the numeric value associated with the endPoint. - * - * @return int. - */ - public int getValue() { - return value; - } - - /** - * Returns the next logical state that endPoint should move to. This - * function assumes the States are sequentially numbered. - * - * @return NextState. - */ - public DatanodeStates getNextState() { - if (this.value < getLastState().getValue()) { - int stateValue = this.getValue() + 1; - for (DatanodeStates iter : values()) { - if (stateValue == iter.getValue()) { - return iter; - } - } - } - return getLastState(); - } - } - - /** - * Start datanode state machine as a single thread daemon. - */ - public void startDaemon() { - Runnable startStateMachineTask = () -> { - try { - start(); - LOG.info("Ozone container server started."); - } catch (Exception ex) { - LOG.error("Unable to start the DatanodeState Machine", ex); - } - }; - stateMachineThread = new ThreadFactoryBuilder() - .setDaemon(true) - .setNameFormat("Datanode State Machine Thread - %d") - .build().newThread(startStateMachineTask); - stateMachineThread.start(); - } - - /** - * Calling this will immediately trigger a heartbeat to the SCMs. - * This heartbeat will also include all the reports which are ready to - * be sent by datanode. - */ - public void triggerHeartbeat() { - stateMachineThread.interrupt(); - } - - /** - * Waits for DatanodeStateMachine to exit. - * - * @throws InterruptedException - */ - public void join() throws InterruptedException { - if (stateMachineThread != null) { - stateMachineThread.join(); - } - - if (cmdProcessThread != null) { - cmdProcessThread.join(); - } - } - - /** - * Stop the daemon thread of the datanode state machine. - */ - public synchronized void stopDaemon() { - try { - supervisor.stop(); - context.setState(DatanodeStates.SHUTDOWN); - reportManager.shutdown(); - this.close(); - LOG.info("Ozone container server stopped."); - } catch (IOException e) { - LOG.error("Stop ozone container server failed.", e); - } - } - - /** - * - * Check if the datanode state machine daemon is stopped. - * - * @return True if datanode state machine daemon is stopped - * and false otherwise. - */ - @VisibleForTesting - public boolean isDaemonStopped() { - return this.executorService.isShutdown() - && this.getContext().getState() == DatanodeStates.SHUTDOWN; - } - - /** - * Create a command handler thread. - * - * @param config - */ - private void initCommandHandlerThread(Configuration config) { - - /** - * Task that periodically checks if we have any outstanding commands. - * It is assumed that commands can be processed slowly and in order. - * This assumption might change in future. Right now due to this assumption - * we have single command queue process thread. - */ - Runnable processCommandQueue = () -> { - long now; - while (getContext().getState() != DatanodeStates.SHUTDOWN) { - SCMCommand command = getContext().getNextCommand(); - if (command != null) { - commandDispatcher.handle(command); - commandsHandled++; - } else { - try { - // Sleep till the next HB + 1 second. - now = Time.monotonicNow(); - if (nextHB.get() > now) { - Thread.sleep((nextHB.get() - now) + 1000L); - } - } catch (InterruptedException e) { - // Ignore this exception. - } - } - } - }; - - // We will have only one thread for command processing in a datanode. - cmdProcessThread = getCommandHandlerThread(processCommandQueue); - cmdProcessThread.start(); - } - - private Thread getCommandHandlerThread(Runnable processCommandQueue) { - Thread handlerThread = new Thread(processCommandQueue); - handlerThread.setDaemon(true); - handlerThread.setName("Command processor thread"); - handlerThread.setUncaughtExceptionHandler((Thread t, Throwable e) -> { - // Let us just restart this thread after logging a critical error. - // if this thread is not running we cannot handle commands from SCM. - LOG.error("Critical Error : Command processor thread encountered an " + - "error. Thread: {}", t.toString(), e); - getCommandHandlerThread(processCommandQueue).start(); - }); - return handlerThread; - } - - /** - * Returns the number of commands handled by the datanode. - * @return count - */ - @VisibleForTesting - public long getCommandHandled() { - return commandsHandled; - } - - /** - * returns the Command Dispatcher. - * @return CommandDispatcher - */ - @VisibleForTesting - public CommandDispatcher getCommandDispatcher() { - return commandDispatcher; - } - - @VisibleForTesting - public ReplicationSupervisor getSupervisor() { - return supervisor; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java deleted file mode 100644 index f0064ec5d74..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java +++ /dev/null @@ -1,296 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.ozone.protocolPB - .StorageContainerDatanodeProtocolClientSideTranslatorPB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.time.ZonedDateTime; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -import static org.apache.hadoop.hdds.scm.HddsServerUtil.getLogWarnInterval; -import static org.apache.hadoop.hdds.scm.HddsServerUtil.getScmHeartbeatInterval; - -/** - * Endpoint is used as holder class that keeps state around the RPC endpoint. - */ -public class EndpointStateMachine - implements Closeable, EndpointStateMachineMBean { - static final Logger - LOG = LoggerFactory.getLogger(EndpointStateMachine.class); - private final StorageContainerDatanodeProtocolClientSideTranslatorPB endPoint; - private final AtomicLong missedCount; - private final InetSocketAddress address; - private final Lock lock; - private final Configuration conf; - private EndPointStates state; - private VersionResponse version; - private ZonedDateTime lastSuccessfulHeartbeat; - - /** - * Constructs RPC Endpoints. - * - * @param endPoint - RPC endPoint. - */ - public EndpointStateMachine(InetSocketAddress address, - StorageContainerDatanodeProtocolClientSideTranslatorPB endPoint, - Configuration conf) { - this.endPoint = endPoint; - this.missedCount = new AtomicLong(0); - this.address = address; - state = EndPointStates.getInitState(); - lock = new ReentrantLock(); - this.conf = conf; - } - - /** - * Takes a lock on this EndPoint so that other threads don't use this while we - * are trying to communicate via this endpoint. - */ - public void lock() { - lock.lock(); - } - - /** - * Unlocks this endpoint. - */ - public void unlock() { - lock.unlock(); - } - - /** - * Returns the version that we read from the server if anyone asks . - * - * @return - Version Response. - */ - public VersionResponse getVersion() { - return version; - } - - /** - * Sets the Version reponse we recieved from the SCM. - * - * @param version VersionResponse - */ - public void setVersion(VersionResponse version) { - this.version = version; - } - - /** - * Returns the current State this end point is in. - * - * @return - getState. - */ - public EndPointStates getState() { - return state; - } - - @Override - public int getVersionNumber() { - if (version != null) { - return version.getProtobufMessage().getSoftwareVersion(); - } else { - return -1; - } - } - - /** - * Sets the endpoint state. - * - * @param epState - end point state. - */ - public EndPointStates setState(EndPointStates epState) { - this.state = epState; - return this.state; - } - - /** - * Closes the connection. - * - * @throws IOException - */ - @Override - public void close() throws IOException { - if (endPoint != null) { - endPoint.close(); - } - } - - /** - * We maintain a count of how many times we missed communicating with a - * specific SCM. This is not made atomic since the access to this is always - * guarded by the read or write lock. That is, it is serialized. - */ - public void incMissed() { - this.missedCount.incrementAndGet(); - } - - /** - * Returns the value of the missed count. - * - * @return int - */ - public long getMissedCount() { - return this.missedCount.get(); - } - - @Override - public String getAddressString() { - return getAddress().toString(); - } - - public void zeroMissedCount() { - this.missedCount.set(0); - } - - /** - * Returns the InetAddress of the endPoint. - * - * @return - EndPoint. - */ - public InetSocketAddress getAddress() { - return this.address; - } - - /** - * Returns real RPC endPoint. - * - * @return rpc client. - */ - public StorageContainerDatanodeProtocolClientSideTranslatorPB - getEndPoint() { - return endPoint; - } - - /** - * Returns the string that represents this endpoint. - * - * @return - String - */ - public String toString() { - return address.toString(); - } - - /** - * Logs exception if needed. - * @param ex - Exception - */ - public void logIfNeeded(Exception ex) { - if (this.getMissedCount() % getLogWarnInterval(conf) == 0) { - LOG.error( - "Unable to communicate to SCM server at {} for past {} seconds.", - this.getAddress().getHostString() + ":" + this.getAddress().getPort(), - TimeUnit.MILLISECONDS.toSeconds( - this.getMissedCount() * getScmHeartbeatInterval(this.conf)), ex); - } - if (LOG.isTraceEnabled()) { - LOG.trace("Incrementing the Missed count. Ex : {}", ex); - } - this.incMissed(); - } - - - /** - * States that an Endpoint can be in. - *

- * This is a sorted list of states that EndPoint will traverse. - *

- * GetNextState will move this enum from getInitState to getLastState. - */ - public enum EndPointStates { - GETVERSION(1), - REGISTER(2), - HEARTBEAT(3), - SHUTDOWN(4); // if you add value after this please edit getLastState too. - private final int value; - - /** - * Constructs endPointStates. - * - * @param value state. - */ - EndPointStates(int value) { - this.value = value; - } - - /** - * Returns the first State. - * - * @return First State. - */ - public static EndPointStates getInitState() { - return GETVERSION; - } - - /** - * The last state of endpoint states. - * - * @return last state. - */ - public static EndPointStates getLastState() { - return SHUTDOWN; - } - - /** - * returns the numeric value associated with the endPoint. - * - * @return int. - */ - public int getValue() { - return value; - } - - /** - * Returns the next logical state that endPoint should move to. - * The next state is computed by adding 1 to the current state. - * - * @return NextState. - */ - public EndPointStates getNextState() { - if (this.getValue() < getLastState().getValue()) { - int stateValue = this.getValue() + 1; - for (EndPointStates iter : values()) { - if (stateValue == iter.getValue()) { - return iter; - } - } - } - return getLastState(); - } - } - - public long getLastSuccessfulHeartbeat() { - return lastSuccessfulHeartbeat == null ? - 0 : - lastSuccessfulHeartbeat.toEpochSecond(); - } - - public void setLastSuccessfulHeartbeat( - ZonedDateTime lastSuccessfulHeartbeat) { - this.lastSuccessfulHeartbeat = lastSuccessfulHeartbeat; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachineMBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachineMBean.java deleted file mode 100644 index 4f64bde0b3e..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachineMBean.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine; - - -/** - * JMX representation of an EndpointStateMachine. - */ -public interface EndpointStateMachineMBean { - - long getMissedCount(); - - String getAddressString(); - - EndpointStateMachine.EndPointStates getState(); - - int getVersionNumber(); - - long getLastSuccessfulHeartbeat(); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java deleted file mode 100644 index ce31ebdf4d6..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java +++ /dev/null @@ -1,221 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.protocolPB - .StorageContainerDatanodeProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.ObjectName; -import java.io.Closeable; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import static java.util.Collections.unmodifiableList; -import static org.apache.hadoop.hdds.scm.HddsServerUtil - .getScmRpcTimeOutInMilliseconds; - -/** - * SCMConnectionManager - Acts as a class that manages the membership - * information of the SCMs that we are working with. - */ -public class SCMConnectionManager - implements Closeable, SCMConnectionManagerMXBean { - private static final Logger LOG = - LoggerFactory.getLogger(SCMConnectionManager.class); - - private final ReadWriteLock mapLock; - private final Map scmMachines; - - private final int rpcTimeout; - private final Configuration conf; - private ObjectName jmxBean; - - public SCMConnectionManager(Configuration conf) { - this.mapLock = new ReentrantReadWriteLock(); - Long timeOut = getScmRpcTimeOutInMilliseconds(conf); - this.rpcTimeout = timeOut.intValue(); - this.scmMachines = new HashMap<>(); - this.conf = conf; - jmxBean = MBeans.register("HddsDatanode", - "SCMConnectionManager", - this); - } - - - /** - * Returns Config. - * - * @return ozoneConfig. - */ - public Configuration getConf() { - return conf; - } - - /** - * Get RpcTimeout. - * - * @return - Return RPC timeout. - */ - public int getRpcTimeout() { - return rpcTimeout; - } - - - /** - * Takes a read lock. - */ - public void readLock() { - this.mapLock.readLock().lock(); - } - - /** - * Releases the read lock. - */ - public void readUnlock() { - this.mapLock.readLock().unlock(); - } - - /** - * Takes the write lock. - */ - public void writeLock() { - this.mapLock.writeLock().lock(); - } - - /** - * Releases the write lock. - */ - public void writeUnlock() { - this.mapLock.writeLock().unlock(); - } - - /** - * adds a new SCM machine to the target set. - * - * @param address - Address of the SCM machine to send heatbeat to. - * @throws IOException - */ - public void addSCMServer(InetSocketAddress address) throws IOException { - writeLock(); - try { - if (scmMachines.containsKey(address)) { - LOG.warn("Trying to add an existing SCM Machine to Machines group. " + - "Ignoring the request."); - return; - } - RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class, - ProtobufRpcEngine.class); - long version = - RPC.getProtocolVersion(StorageContainerDatanodeProtocolPB.class); - - RetryPolicy retryPolicy = - RetryPolicies.retryForeverWithFixedSleep( - 1000, TimeUnit.MILLISECONDS); - StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy( - StorageContainerDatanodeProtocolPB.class, version, - address, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), getRpcTimeout(), - retryPolicy).getProxy(); - - StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient = - new StorageContainerDatanodeProtocolClientSideTranslatorPB(rpcProxy); - - EndpointStateMachine endPoint = - new EndpointStateMachine(address, rpcClient, conf); - scmMachines.put(address, endPoint); - } finally { - writeUnlock(); - } - } - - /** - * Removes a SCM machine for the target set. - * - * @param address - Address of the SCM machine to send heatbeat to. - * @throws IOException - */ - public void removeSCMServer(InetSocketAddress address) throws IOException { - writeLock(); - try { - if (!scmMachines.containsKey(address)) { - LOG.warn("Trying to remove a non-existent SCM machine. " + - "Ignoring the request."); - return; - } - - EndpointStateMachine endPoint = scmMachines.get(address); - endPoint.close(); - scmMachines.remove(address); - } finally { - writeUnlock(); - } - } - - /** - * Returns all known RPCEndpoints. - * - * @return - List of RPC Endpoints. - */ - public Collection getValues() { - readLock(); - try { - return unmodifiableList(new ArrayList<>(scmMachines.values())); - } finally { - readUnlock(); - } - } - - @Override - public void close() throws IOException { - getValues().forEach(endpointStateMachine - -> IOUtils.cleanupWithLogger(LOG, endpointStateMachine)); - if (jmxBean != null) { - MBeans.unregister(jmxBean); - jmxBean = null; - } - } - - @Override - public List getSCMServers() { - readLock(); - try { - return unmodifiableList(new ArrayList<>(scmMachines.values())); - } finally { - readUnlock(); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManagerMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManagerMXBean.java deleted file mode 100644 index 25ef16379a6..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManagerMXBean.java +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine; - -import java.util.List; - -/** - * JMX information about the connected SCM servers. - */ -public interface SCMConnectionManagerMXBean { - - List getSCMServers(); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java deleted file mode 100644 index 2c01f3a73d0..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ /dev/null @@ -1,502 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine; - -import com.google.common.base.Preconditions; -import com.google.protobuf.GeneratedMessage; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatus.Status; -import org.apache.hadoop.ozone.container.common.states.DatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode - .InitDatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode - .RunningDatanodeState; -import org.apache.hadoop.ozone.protocol.commands.CommandStatus; -import org.apache.hadoop.ozone.protocol.commands - .DeleteBlockCommandStatus.DeleteBlockCommandStatusBuilder; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; - -import static java.lang.Math.min; -import static org.apache.hadoop.hdds.scm.HddsServerUtil.getScmHeartbeatInterval; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.LinkedList; -import java.util.List; -import java.util.Queue; -import java.util.ArrayList; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Consumer; - -/** - * Current Context of State Machine. - */ -public class StateContext { - static final Logger LOG = - LoggerFactory.getLogger(StateContext.class); - private final Queue commandQueue; - private final Map cmdStatusMap; - private final Lock lock; - private final DatanodeStateMachine parent; - private final AtomicLong stateExecutionCount; - private final Configuration conf; - private final List reports; - private final Queue containerActions; - private final Queue pipelineActions; - private DatanodeStateMachine.DatanodeStates state; - private boolean shutdownOnError = false; - - /** - * Starting with a 2 sec heartbeat frequency which will be updated to the - * real HB frequency after scm registration. With this method the - * initial registration could be significant faster. - */ - private AtomicLong heartbeatFrequency = new AtomicLong(2000); - - /** - * Constructs a StateContext. - * - * @param conf - Configration - * @param state - State - * @param parent Parent State Machine - */ - public StateContext(Configuration conf, DatanodeStateMachine.DatanodeStates - state, DatanodeStateMachine parent) { - this.conf = conf; - this.state = state; - this.parent = parent; - commandQueue = new LinkedList<>(); - cmdStatusMap = new ConcurrentHashMap<>(); - reports = new LinkedList<>(); - containerActions = new LinkedList<>(); - pipelineActions = new LinkedList<>(); - lock = new ReentrantLock(); - stateExecutionCount = new AtomicLong(0); - } - - /** - * Returns the ContainerStateMachine class that holds this state. - * - * @return ContainerStateMachine. - */ - public DatanodeStateMachine getParent() { - return parent; - } - - /** - * Returns true if we are entering a new state. - * - * @return boolean - */ - boolean isEntering() { - return stateExecutionCount.get() == 0; - } - - /** - * Returns true if we are exiting from the current state. - * - * @param newState - newState. - * @return boolean - */ - boolean isExiting(DatanodeStateMachine.DatanodeStates newState) { - boolean isExiting = state != newState && stateExecutionCount.get() > 0; - if(isExiting) { - stateExecutionCount.set(0); - } - return isExiting; - } - - /** - * Returns the current state the machine is in. - * - * @return state. - */ - public DatanodeStateMachine.DatanodeStates getState() { - return state; - } - - /** - * Sets the current state of the machine. - * - * @param state state. - */ - public void setState(DatanodeStateMachine.DatanodeStates state) { - this.state = state; - } - - /** - * Sets the shutdownOnError. This method needs to be called when we - * set DatanodeState to SHUTDOWN when executing a task of a DatanodeState. - * @param value - */ - private void setShutdownOnError(boolean value) { - this.shutdownOnError = value; - } - - /** - * Get shutdownStateMachine. - * @return boolean - */ - public boolean getShutdownOnError() { - return shutdownOnError; - } - /** - * Adds the report to report queue. - * - * @param report report to be added - */ - public void addReport(GeneratedMessage report) { - if (report != null) { - synchronized (reports) { - reports.add(report); - } - } - } - - /** - * Adds the reports which could not be sent by heartbeat back to the - * reports list. - * - * @param reportsToPutBack list of reports which failed to be sent by - * heartbeat. - */ - public void putBackReports(List reportsToPutBack) { - synchronized (reports) { - reports.addAll(0, reportsToPutBack); - } - } - - /** - * Returns all the available reports from the report queue, or empty list if - * the queue is empty. - * - * @return List of reports - */ - public List getAllAvailableReports() { - return getReports(Integer.MAX_VALUE); - } - - /** - * Returns available reports from the report queue with a max limit on - * list size, or empty list if the queue is empty. - * - * @return List of reports - */ - public List getReports(int maxLimit) { - List reportsToReturn = new LinkedList<>(); - synchronized (reports) { - List tempList = reports.subList( - 0, min(reports.size(), maxLimit)); - reportsToReturn.addAll(tempList); - tempList.clear(); - } - return reportsToReturn; - } - - - /** - * Adds the ContainerAction to ContainerAction queue. - * - * @param containerAction ContainerAction to be added - */ - public void addContainerAction(ContainerAction containerAction) { - synchronized (containerActions) { - containerActions.add(containerAction); - } - } - - /** - * Add ContainerAction to ContainerAction queue if it's not present. - * - * @param containerAction ContainerAction to be added - */ - public void addContainerActionIfAbsent(ContainerAction containerAction) { - synchronized (containerActions) { - if (!containerActions.contains(containerAction)) { - containerActions.add(containerAction); - } - } - } - - /** - * Returns all the pending ContainerActions from the ContainerAction queue, - * or empty list if the queue is empty. - * - * @return {@literal List} - */ - public List getAllPendingContainerActions() { - return getPendingContainerAction(Integer.MAX_VALUE); - } - - /** - * Returns pending ContainerActions from the ContainerAction queue with a - * max limit on list size, or empty list if the queue is empty. - * - * @return {@literal List} - */ - public List getPendingContainerAction(int maxLimit) { - List containerActionList = new ArrayList<>(); - synchronized (containerActions) { - if (!containerActions.isEmpty()) { - int size = containerActions.size(); - int limit = size > maxLimit ? maxLimit : size; - for (int count = 0; count < limit; count++) { - // we need to remove the action from the containerAction queue - // as well - ContainerAction action = containerActions.poll(); - Preconditions.checkNotNull(action); - containerActionList.add(action); - } - } - return containerActionList; - } - } - - /** - * Add PipelineAction to PipelineAction queue if it's not present. - * - * @param pipelineAction PipelineAction to be added - */ - public void addPipelineActionIfAbsent(PipelineAction pipelineAction) { - synchronized (pipelineActions) { - /** - * If pipelineAction queue already contains entry for the pipeline id - * with same action, we should just return. - * Note: We should not use pipelineActions.contains(pipelineAction) here - * as, pipelineAction has a msg string. So even if two msgs differ though - * action remains same on the given pipeline, it will end up adding it - * multiple times here. - */ - for (PipelineAction pipelineActionIter : pipelineActions) { - if (pipelineActionIter.getAction() == pipelineAction.getAction() - && pipelineActionIter.hasClosePipeline() && pipelineAction - .hasClosePipeline() - && pipelineActionIter.getClosePipeline().getPipelineID() - .equals(pipelineAction.getClosePipeline().getPipelineID())) { - return; - } - } - pipelineActions.add(pipelineAction); - } - } - - /** - * Returns pending PipelineActions from the PipelineAction queue with a - * max limit on list size, or empty list if the queue is empty. - * - * @return {@literal List} - */ - public List getPendingPipelineAction(int maxLimit) { - List pipelineActionList = new ArrayList<>(); - synchronized (pipelineActions) { - if (!pipelineActions.isEmpty()) { - int size = pipelineActions.size(); - int limit = size > maxLimit ? maxLimit : size; - for (int count = 0; count < limit; count++) { - pipelineActionList.add(pipelineActions.poll()); - } - } - return pipelineActionList; - } - } - - /** - * Returns the next task to get executed by the datanode state machine. - * @return A callable that will be executed by the - * {@link DatanodeStateMachine} - */ - @SuppressWarnings("unchecked") - public DatanodeState getTask() { - switch (this.state) { - case INIT: - return new InitDatanodeState(this.conf, parent.getConnectionManager(), - this); - case RUNNING: - return new RunningDatanodeState(this.conf, parent.getConnectionManager(), - this); - case SHUTDOWN: - return null; - default: - throw new IllegalArgumentException("Not Implemented yet."); - } - } - - /** - * Executes the required state function. - * - * @param service - Executor Service - * @param time - seconds to wait - * @param unit - Seconds. - * @throws InterruptedException - * @throws ExecutionException - * @throws TimeoutException - */ - public void execute(ExecutorService service, long time, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - stateExecutionCount.incrementAndGet(); - DatanodeState task = getTask(); - - // Adding not null check, in a case where datanode is still starting up, but - // we called stop DatanodeStateMachine, this sets state to SHUTDOWN, and - // there is a chance of getting task as null. - if (task != null) { - if (this.isEntering()) { - task.onEnter(); - } - task.execute(service); - DatanodeStateMachine.DatanodeStates newState = task.await(time, unit); - if (this.state != newState) { - if (LOG.isDebugEnabled()) { - LOG.debug("Task {} executed, state transited from {} to {}", - task.getClass().getSimpleName(), this.state, newState); - } - if (isExiting(newState)) { - task.onExit(); - } - this.setState(newState); - } - - if (this.state == DatanodeStateMachine.DatanodeStates.SHUTDOWN) { - LOG.error("Critical error occurred in StateMachine, setting " + - "shutDownMachine"); - // When some exception occurred, set shutdownStateMachine to true, so - // that we can terminate the datanode. - setShutdownOnError(true); - } - } - } - - /** - * Returns the next command or null if it is empty. - * - * @return SCMCommand or Null. - */ - public SCMCommand getNextCommand() { - lock.lock(); - try { - return commandQueue.poll(); - } finally { - lock.unlock(); - } - } - - /** - * Adds a command to the State Machine queue. - * - * @param command - SCMCommand. - */ - public void addCommand(SCMCommand command) { - lock.lock(); - try { - commandQueue.add(command); - } finally { - lock.unlock(); - } - this.addCmdStatus(command); - } - - /** - * Returns the count of the Execution. - * @return long - */ - public long getExecutionCount() { - return stateExecutionCount.get(); - } - - /** - * Returns the next {@link CommandStatus} or null if it is empty. - * - * @return {@link CommandStatus} or Null. - */ - public CommandStatus getCmdStatus(Long key) { - return cmdStatusMap.get(key); - } - - /** - * Adds a {@link CommandStatus} to the State Machine. - * - * @param status - {@link CommandStatus}. - */ - public void addCmdStatus(Long key, CommandStatus status) { - cmdStatusMap.put(key, status); - } - - /** - * Adds a {@link CommandStatus} to the State Machine for given SCMCommand. - * - * @param cmd - {@link SCMCommand}. - */ - public void addCmdStatus(SCMCommand cmd) { - if (cmd.getType() == SCMCommandProto.Type.deleteBlocksCommand) { - addCmdStatus(cmd.getId(), - DeleteBlockCommandStatusBuilder.newBuilder() - .setCmdId(cmd.getId()) - .setStatus(Status.PENDING) - .setType(cmd.getType()) - .build()); - } - } - - /** - * Get map holding all {@link CommandStatus} objects. - * - */ - public Map getCommandStatusMap() { - return cmdStatusMap; - } - - /** - * Updates status of a pending status command. - * @param cmdId command id - * @param cmdStatusUpdater Consumer to update command status. - * @return true if command status updated successfully else false. - */ - public boolean updateCommandStatus(Long cmdId, - Consumer cmdStatusUpdater) { - if(cmdStatusMap.containsKey(cmdId)) { - cmdStatusUpdater.accept(cmdStatusMap.get(cmdId)); - return true; - } - return false; - } - - public void configureHeartbeatFrequency(){ - heartbeatFrequency.set(getScmHeartbeatInterval(conf)); - } - - /** - * Return current heartbeat frequency in ms. - */ - public long getHeartbeatFrequency() { - return heartbeatFrequency.get(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java deleted file mode 100644 index 2dec08fe83c..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java +++ /dev/null @@ -1,179 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto - .ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.util.Time; -import org.apache.ratis.protocol.NotLeaderException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Handler for close container command received from SCM. - */ -public class CloseContainerCommandHandler implements CommandHandler { - - private static final Logger LOG = - LoggerFactory.getLogger(CloseContainerCommandHandler.class); - - private int invocationCount; - private long totalTime; - - /** - * Constructs a ContainerReport handler. - */ - public CloseContainerCommandHandler() { - } - - /** - * Handles a given SCM command. - * - * @param command - SCM Command - * @param ozoneContainer - Ozone Container. - * @param context - Current Context. - * @param connectionManager - The SCMs that we are talking to. - */ - @Override - public void handle(SCMCommand command, OzoneContainer ozoneContainer, - StateContext context, SCMConnectionManager connectionManager) { - LOG.debug("Processing Close Container command."); - invocationCount++; - final long startTime = Time.monotonicNow(); - final DatanodeDetails datanodeDetails = context.getParent() - .getDatanodeDetails(); - final CloseContainerCommandProto closeCommand = - ((CloseContainerCommand)command).getProto(); - final ContainerController controller = ozoneContainer.getController(); - final long containerId = closeCommand.getContainerID(); - try { - final Container container = controller.getContainer(containerId); - - if (container == null) { - LOG.error("Container #{} does not exist in datanode. " - + "Container close failed.", containerId); - return; - } - - // move the container to CLOSING if in OPEN state - controller.markContainerForClose(containerId); - - switch (container.getContainerState()) { - case OPEN: - case CLOSING: - // If the container is part of open pipeline, close it via write channel - if (ozoneContainer.getWriteChannel() - .isExist(closeCommand.getPipelineID())) { - ContainerCommandRequestProto request = - getContainerCommandRequestProto(datanodeDetails, - closeCommand.getContainerID()); - ozoneContainer.getWriteChannel() - .submitRequest(request, closeCommand.getPipelineID()); - } else { - // Container should not exist in CLOSING state without a pipeline - controller.markContainerUnhealthy(containerId); - } - break; - case QUASI_CLOSED: - if (closeCommand.getForce()) { - controller.closeContainer(containerId); - break; - } - case CLOSED: - break; - case UNHEALTHY: - case INVALID: - if (LOG.isDebugEnabled()) { - LOG.debug("Cannot close the container #{}, the container is" - + " in {} state.", containerId, container.getContainerState()); - } - default: - break; - } - } catch (NotLeaderException e) { - LOG.debug("Follower cannot close container #{}.", containerId); - } catch (IOException e) { - LOG.error("Can't close container #{}", containerId, e); - } finally { - long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; - } - } - - private ContainerCommandRequestProto getContainerCommandRequestProto( - final DatanodeDetails datanodeDetails, final long containerId) { - final ContainerCommandRequestProto.Builder command = - ContainerCommandRequestProto.newBuilder(); - command.setCmdType(ContainerProtos.Type.CloseContainer); - command.setTraceID(TracingUtil.exportCurrentSpan()); - command.setContainerID(containerId); - command.setCloseContainer( - ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); - command.setDatanodeUuid(datanodeDetails.getUuidString()); - return command.build(); - } - - /** - * Returns the command type that this command handler handles. - * - * @return Type - */ - @Override - public SCMCommandProto.Type getCommandType() { - return SCMCommandProto.Type.closeContainerCommand; - } - - /** - * Returns number of times this handler has been invoked. - * - * @return int - */ - @Override - public int getInvocationCount() { - return invocationCount; - } - - /** - * Returns the average time this function takes to run. - * - * @return long - */ - @Override - public long getAverageRunTime() { - if (invocationCount > 0) { - return totalTime / invocationCount; - } - return 0; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java deleted file mode 100644 index af854ec3d61..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java +++ /dev/null @@ -1,188 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -/** - * Dispatches command to the correct handler. - */ -public final class CommandDispatcher { - static final Logger LOG = - LoggerFactory.getLogger(CommandDispatcher.class); - private final StateContext context; - private final Map handlerMap; - private final OzoneContainer container; - private final SCMConnectionManager connectionManager; - - /** - * Constructs a command Dispatcher. - * @param context - Context. - */ - /** - * Constructs a command dispatcher. - * - * @param container - Ozone Container - * @param context - Context - * @param handlers - Set of handlers. - */ - private CommandDispatcher(OzoneContainer container, SCMConnectionManager - connectionManager, StateContext context, - CommandHandler... handlers) { - Preconditions.checkNotNull(context); - Preconditions.checkNotNull(handlers); - Preconditions.checkArgument(handlers.length > 0); - Preconditions.checkNotNull(container); - Preconditions.checkNotNull(connectionManager); - this.context = context; - this.container = container; - this.connectionManager = connectionManager; - handlerMap = new HashMap<>(); - for (CommandHandler h : handlers) { - if(handlerMap.containsKey(h.getCommandType())){ - LOG.error("Duplicate handler for the same command. Exiting. Handle " + - "key : { }", h.getCommandType().getDescriptorForType().getName()); - throw new IllegalArgumentException("Duplicate handler for the same " + - "command."); - } - handlerMap.put(h.getCommandType(), h); - } - } - - public CommandHandler getCloseContainerHandler() { - return handlerMap.get(Type.closeContainerCommand); - } - - @VisibleForTesting - public CommandHandler getDeleteBlocksCommandHandler() { - return handlerMap.get(Type.deleteBlocksCommand); - } - - /** - * Dispatch the command to the correct handler. - * - * @param command - SCM Command. - */ - public void handle(SCMCommand command) { - Preconditions.checkNotNull(command); - CommandHandler handler = handlerMap.get(command.getType()); - if (handler != null) { - handler.handle(command, container, context, connectionManager); - } else { - LOG.error("Unknown SCM Command queued. There is no handler for this " + - "command. Command: {}", command.getType().getDescriptorForType() - .getName()); - } - } - - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Helper class to construct command dispatcher. - */ - public static class Builder { - private final List handlerList; - private OzoneContainer container; - private StateContext context; - private SCMConnectionManager connectionManager; - - public Builder() { - handlerList = new LinkedList<>(); - } - - /** - * Adds a handler. - * - * @param handler - handler - * @return Builder - */ - public Builder addHandler(CommandHandler handler) { - Preconditions.checkNotNull(handler); - handlerList.add(handler); - return this; - } - - /** - * Add the OzoneContainer. - * - * @param ozoneContainer - ozone container. - * @return Builder - */ - public Builder setContainer(OzoneContainer ozoneContainer) { - Preconditions.checkNotNull(ozoneContainer); - this.container = ozoneContainer; - return this; - } - - /** - * Set the Connection Manager. - * - * @param scmConnectionManager - * @return this - */ - public Builder setConnectionManager(SCMConnectionManager - scmConnectionManager) { - Preconditions.checkNotNull(scmConnectionManager); - this.connectionManager = scmConnectionManager; - return this; - } - - /** - * Sets the Context. - * - * @param stateContext - StateContext - * @return this - */ - public Builder setContext(StateContext stateContext) { - Preconditions.checkNotNull(stateContext); - this.context = stateContext; - return this; - } - - /** - * Builds a command Dispatcher. - * @return Command Dispatcher. - */ - public CommandDispatcher build() { - Preconditions.checkNotNull(this.connectionManager, "Missing connection" + - " manager."); - Preconditions.checkNotNull(this.container, "Missing container."); - Preconditions.checkNotNull(this.context, "Missing context."); - Preconditions.checkArgument(this.handlerList.size() > 0); - return new CommandDispatcher(this.container, this.connectionManager, - this.context, handlerList.toArray( - new CommandHandler[handlerList.size()])); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java deleted file mode 100644 index 1ea0ea84515..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.CommandStatus; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.slf4j.Logger; - -import java.util.function.Consumer; - -/** - * Generic interface for handlers. - */ -public interface CommandHandler { - - /** - * Handles a given SCM command. - * @param command - SCM Command - * @param container - Ozone Container. - * @param context - Current Context. - * @param connectionManager - The SCMs that we are talking to. - */ - void handle(SCMCommand command, OzoneContainer container, - StateContext context, SCMConnectionManager connectionManager); - - /** - * Returns the command type that this command handler handles. - * @return Type - */ - SCMCommandProto.Type getCommandType(); - - /** - * Returns number of times this handler has been invoked. - * @return int - */ - int getInvocationCount(); - - /** - * Returns the average time this function takes to run. - * @return long - */ - long getAverageRunTime(); - - /** - * Default implementation for updating command status. - */ - default void updateCommandStatus(StateContext context, SCMCommand command, - Consumer cmdStatusUpdater, Logger log) { - if (!context.updateCommandStatus(command.getId(), cmdStatusUpdater)) { - log.debug("{} with Id:{} not found.", command.getType(), - command.getId()); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java deleted file mode 100644 index cdecf5d7ed4..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ /dev/null @@ -1,281 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto - .DeleteBlockTransactionResult; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers - .DeletedContainerBlocksSummary; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.CommandStatus; -import org.apache.hadoop.ozone.protocol.commands.DeleteBlockCommandStatus; -import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.BatchOperation; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; -import java.util.function.Consumer; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.CONTAINER_NOT_FOUND; - -/** - * Handle block deletion commands. - */ -public class DeleteBlocksCommandHandler implements CommandHandler { - - public static final Logger LOG = - LoggerFactory.getLogger(DeleteBlocksCommandHandler.class); - - private final ContainerSet containerSet; - private final Configuration conf; - private int invocationCount; - private long totalTime; - private boolean cmdExecuted; - - public DeleteBlocksCommandHandler(ContainerSet cset, - Configuration conf) { - this.containerSet = cset; - this.conf = conf; - } - - @Override - public void handle(SCMCommand command, OzoneContainer container, - StateContext context, SCMConnectionManager connectionManager) { - cmdExecuted = false; - long startTime = Time.monotonicNow(); - ContainerBlocksDeletionACKProto blockDeletionACK = null; - try { - if (command.getType() != SCMCommandProto.Type.deleteBlocksCommand) { - LOG.warn("Skipping handling command, expected command " - + "type {} but found {}", - SCMCommandProto.Type.deleteBlocksCommand, command.getType()); - return; - } - LOG.debug("Processing block deletion command."); - invocationCount++; - - // move blocks to deleting state. - // this is a metadata update, the actual deletion happens in another - // recycling thread. - DeleteBlocksCommand cmd = (DeleteBlocksCommand) command; - List containerBlocks = cmd.blocksTobeDeleted(); - - DeletedContainerBlocksSummary summary = - DeletedContainerBlocksSummary.getFrom(containerBlocks); - LOG.info("Start to delete container blocks, TXIDs={}, " - + "numOfContainers={}, numOfBlocks={}", - summary.getTxIDSummary(), - summary.getNumOfContainers(), - summary.getNumOfBlocks()); - - ContainerBlocksDeletionACKProto.Builder resultBuilder = - ContainerBlocksDeletionACKProto.newBuilder(); - containerBlocks.forEach(entry -> { - DeleteBlockTransactionResult.Builder txResultBuilder = - DeleteBlockTransactionResult.newBuilder(); - txResultBuilder.setTxID(entry.getTxID()); - long containerId = entry.getContainerID(); - try { - Container cont = containerSet.getContainer(containerId); - if (cont == null) { - throw new StorageContainerException("Unable to find the container " - + containerId, CONTAINER_NOT_FOUND); - } - ContainerProtos.ContainerType containerType = cont.getContainerType(); - switch (containerType) { - case KeyValueContainer: - KeyValueContainerData containerData = (KeyValueContainerData) - cont.getContainerData(); - cont.writeLock(); - try { - deleteKeyValueContainerBlocks(containerData, entry); - } finally { - cont.writeUnlock(); - } - txResultBuilder.setContainerID(containerId) - .setSuccess(true); - break; - default: - LOG.error( - "Delete Blocks Command Handler is not implemented for " + - "containerType {}", containerType); - } - } catch (IOException e) { - LOG.warn("Failed to delete blocks for container={}, TXID={}", - entry.getContainerID(), entry.getTxID(), e); - txResultBuilder.setContainerID(containerId) - .setSuccess(false); - } - resultBuilder.addResults(txResultBuilder.build()) - .setDnId(context.getParent().getDatanodeDetails() - .getUuid().toString()); - }); - blockDeletionACK = resultBuilder.build(); - - // Send ACK back to SCM as long as meta updated - // TODO Or we should wait until the blocks are actually deleted? - if (!containerBlocks.isEmpty()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Sending following block deletion ACK to SCM"); - for (DeleteBlockTransactionResult result : blockDeletionACK - .getResultsList()) { - LOG.debug(result.getTxID() + " : " + result.getSuccess()); - } - } - } - cmdExecuted = true; - } finally { - final ContainerBlocksDeletionACKProto deleteAck = - blockDeletionACK; - Consumer statusUpdater = (cmdStatus) -> { - cmdStatus.setStatus(cmdExecuted); - ((DeleteBlockCommandStatus) cmdStatus).setBlocksDeletionAck(deleteAck); - }; - updateCommandStatus(context, command, statusUpdater, LOG); - long endTime = Time.monotonicNow(); - totalTime += endTime - startTime; - } - } - - /** - * Move a bunch of blocks from a container to deleting state. This is a meta - * update, the actual deletes happen in async mode. - * - * @param containerData - KeyValueContainerData - * @param delTX a block deletion transaction. - * @throws IOException if I/O error occurs. - */ - private void deleteKeyValueContainerBlocks( - KeyValueContainerData containerData, DeletedBlocksTransaction delTX) - throws IOException { - long containerId = delTX.getContainerID(); - if (LOG.isDebugEnabled()) { - LOG.debug("Processing Container : {}, DB path : {}", containerId, - containerData.getMetadataPath()); - } - - if (delTX.getTxID() < containerData.getDeleteTransactionId()) { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format("Ignoring delete blocks for containerId: %d." - + " Outdated delete transactionId %d < %d", containerId, - delTX.getTxID(), containerData.getDeleteTransactionId())); - } - return; - } - - int newDeletionBlocks = 0; - try(ReferenceCountedDB containerDB = - BlockUtils.getDB(containerData, conf)) { - for (Long blk : delTX.getLocalIDList()) { - BatchOperation batch = new BatchOperation(); - byte[] blkBytes = Longs.toByteArray(blk); - byte[] blkInfo = containerDB.getStore().get(blkBytes); - if (blkInfo != null) { - byte[] deletingKeyBytes = - DFSUtil.string2Bytes(OzoneConsts.DELETING_KEY_PREFIX + blk); - byte[] deletedKeyBytes = - DFSUtil.string2Bytes(OzoneConsts.DELETED_KEY_PREFIX + blk); - if (containerDB.getStore().get(deletingKeyBytes) != null - || containerDB.getStore().get(deletedKeyBytes) != null) { - if (LOG.isDebugEnabled()) { - LOG.debug(String.format( - "Ignoring delete for block %d in container %d." - + " Entry already added.", blk, containerId)); - } - continue; - } - // Found the block in container db, - // use an atomic update to change its state to deleting. - batch.put(deletingKeyBytes, blkInfo); - batch.delete(blkBytes); - try { - containerDB.getStore().writeBatch(batch); - newDeletionBlocks++; - if (LOG.isDebugEnabled()) { - LOG.debug("Transited Block {} to DELETING state in container {}", - blk, containerId); - } - } catch (IOException e) { - // if some blocks failed to delete, we fail this TX, - // without sending this ACK to SCM, SCM will resend the TX - // with a certain number of retries. - throw new IOException( - "Failed to delete blocks for TXID = " + delTX.getTxID(), e); - } - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("Block {} not found or already under deletion in" - + " container {}, skip deleting it.", blk, containerId); - } - } - } - - containerDB.getStore() - .put(DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX), - Longs.toByteArray(delTX.getTxID())); - containerData - .updateDeleteTransactionId(delTX.getTxID()); - // update pending deletion blocks count in in-memory container status - containerData.incrPendingDeletionBlocks(newDeletionBlocks); - } - } - - @Override - public SCMCommandProto.Type getCommandType() { - return SCMCommandProto.Type.deleteBlocksCommand; - } - - @Override - public int getInvocationCount() { - return this.invocationCount; - } - - @Override - public long getAverageRunTime() { - if (invocationCount > 0) { - return totalTime / invocationCount; - } - return 0; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java deleted file mode 100644 index b54fb1a17ac..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteContainerCommandHandler.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Handler to process the DeleteContainerCommand from SCM. - */ -public class DeleteContainerCommandHandler implements CommandHandler { - - private static final Logger LOG = - LoggerFactory.getLogger(DeleteContainerCommandHandler.class); - - private int invocationCount; - private long totalTime; - - @Override - public void handle(final SCMCommand command, - final OzoneContainer ozoneContainer, - final StateContext context, - final SCMConnectionManager connectionManager) { - final long startTime = Time.monotonicNow(); - invocationCount++; - try { - final DeleteContainerCommand deleteContainerCommand = - (DeleteContainerCommand) command; - final ContainerController controller = ozoneContainer.getController(); - controller.deleteContainer(deleteContainerCommand.getContainerID(), - deleteContainerCommand.isForce()); - } catch (IOException e) { - LOG.error("Exception occurred while deleting the container.", e); - } finally { - totalTime += Time.monotonicNow() - startTime; - } - - } - - @Override - public SCMCommandProto.Type getCommandType() { - return SCMCommandProto.Type.deleteContainerCommand; - } - - @Override - public int getInvocationCount() { - return this.invocationCount; - } - - @Override - public long getAverageRunTime() { - return invocationCount == 0 ? 0 : totalTime / invocationCount; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java deleted file mode 100644 index a028041b196..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor; -import org.apache.hadoop.ozone.container.replication.ReplicationTask; -import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; - -import com.google.common.base.Preconditions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Command handler to copy containers from sources. - */ -public class ReplicateContainerCommandHandler implements CommandHandler { - - static final Logger LOG = - LoggerFactory.getLogger(ReplicateContainerCommandHandler.class); - - private int invocationCount; - - private long totalTime; - - private Configuration conf; - - private ReplicationSupervisor supervisor; - - public ReplicateContainerCommandHandler( - Configuration conf, - ReplicationSupervisor supervisor) { - this.conf = conf; - this.supervisor = supervisor; - } - - @Override - public void handle(SCMCommand command, OzoneContainer container, - StateContext context, SCMConnectionManager connectionManager) { - - final ReplicateContainerCommand replicateCommand = - (ReplicateContainerCommand) command; - final List sourceDatanodes = - replicateCommand.getSourceDatanodes(); - final long containerID = replicateCommand.getContainerID(); - - Preconditions.checkArgument(sourceDatanodes.size() > 0, - String.format("Replication command is received for container %d " - + "but the size of source datanodes was 0.", containerID)); - - supervisor.addTask(new ReplicationTask(containerID, sourceDatanodes)); - } - - @Override - public SCMCommandProto.Type getCommandType() { - return Type.replicateContainerCommand; - } - - @Override - public int getInvocationCount() { - return this.invocationCount; - } - - @Override - public long getAverageRunTime() { - if (invocationCount > 0) { - return totalTime / invocationCount; - } - return 0; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java deleted file mode 100644 index 1e9c8dc5eee..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java deleted file mode 100644 index feb2f812ac8..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine; -/** - - State machine class is used by the container to denote various states a - container can be in and also is used for command processing. - - Container has the following states. - - Start - > getVersion -> Register -> Running -> Shutdown - - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/DatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/DatanodeState.java deleted file mode 100644 index 25be207dcd9..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/DatanodeState.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.states; - -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * State Interface that allows tasks to maintain states. - */ -public interface DatanodeState { - /** - * Called before entering this state. - */ - void onEnter(); - - /** - * Called After exiting this state. - */ - void onExit(); - - /** - * Executes one or more tasks that is needed by this state. - * - * @param executor - ExecutorService - */ - void execute(ExecutorService executor); - - /** - * Wait for execute to finish. - * - * @param time - Time - * @param timeUnit - Unit of time. - * @throws InterruptedException - * @throws ExecutionException - * @throws TimeoutException - */ - T await(long time, TimeUnit timeUnit) - throws InterruptedException, ExecutionException, TimeoutException; - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java deleted file mode 100644 index 273886228f6..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java +++ /dev/null @@ -1,182 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.states.datanode; - -import com.google.common.base.Strings; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.states.DatanodeState; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.HddsUtils.getSCMAddresses; - -/** - * Init Datanode State is the task that gets run when we are in Init State. - */ -public class InitDatanodeState implements DatanodeState, - Callable { - static final Logger LOG = LoggerFactory.getLogger(InitDatanodeState.class); - private final SCMConnectionManager connectionManager; - private final Configuration conf; - private final StateContext context; - private Future result; - - /** - * Create InitDatanodeState Task. - * - * @param conf - Conf - * @param connectionManager - Connection Manager - * @param context - Current Context - */ - public InitDatanodeState(Configuration conf, - SCMConnectionManager connectionManager, - StateContext context) { - this.conf = conf; - this.connectionManager = connectionManager; - this.context = context; - } - - /** - * Computes a result, or throws an exception if unable to do so. - * - * @return computed result - * @throws Exception if unable to compute a result - */ - @Override - public DatanodeStateMachine.DatanodeStates call() throws Exception { - Collection addresses = null; - try { - addresses = getSCMAddresses(conf); - } catch (IllegalArgumentException e) { - if(!Strings.isNullOrEmpty(e.getMessage())) { - LOG.error("Failed to get SCM addresses: " + e.getMessage()); - } - return DatanodeStateMachine.DatanodeStates.SHUTDOWN; - } - - if (addresses == null || addresses.isEmpty()) { - LOG.error("Null or empty SCM address list found."); - return DatanodeStateMachine.DatanodeStates.SHUTDOWN; - } else { - for (InetSocketAddress addr : addresses) { - if (addr.isUnresolved()) { - LOG.warn("One SCM address ({}) can't (yet?) be resolved. Postpone " - + "initialization.", addr); - - //skip any further initialization. DatanodeStateMachine will try it - // again after the hb frequency - return this.context.getState(); - } - } - for (InetSocketAddress addr : addresses) { - connectionManager.addSCMServer(addr); - } - } - - // If datanode ID is set, persist it to the ID file. - persistContainerDatanodeDetails(); - - return this.context.getState().getNextState(); - } - - /** - * Persist DatanodeDetails to datanode.id file. - */ - private void persistContainerDatanodeDetails() { - String dataNodeIDPath = HddsServerUtil.getDatanodeIdFilePath(conf); - if (Strings.isNullOrEmpty(dataNodeIDPath)) { - LOG.error("A valid path is needed for config setting {}", - ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR); - this.context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN); - return; - } - File idPath = new File(dataNodeIDPath); - DatanodeDetails datanodeDetails = this.context.getParent() - .getDatanodeDetails(); - if (datanodeDetails != null && !idPath.exists()) { - try { - ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath); - } catch (IOException ex) { - // As writing DatanodeDetails in to datanodeid file failed, which is - // a critical thing, so shutting down the state machine. - LOG.error("Writing to {} failed {}", dataNodeIDPath, ex.getMessage()); - this.context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN); - return; - } - LOG.info("DatanodeDetails is persisted to {}", dataNodeIDPath); - } - } - - /** - * Called before entering this state. - */ - @Override - public void onEnter() { - LOG.trace("Entering init container state"); - } - - /** - * Called After exiting this state. - */ - @Override - public void onExit() { - LOG.trace("Exiting init container state"); - } - - /** - * Executes one or more tasks that is needed by this state. - * - * @param executor - ExecutorService - */ - @Override - public void execute(ExecutorService executor) { - result = executor.submit(this); - } - - /** - * Wait for execute to finish. - * - * @param time - Time - * @param timeUnit - Unit of time. - */ - @Override - public DatanodeStateMachine.DatanodeStates await(long time, - TimeUnit timeUnit) throws InterruptedException, - ExecutionException, TimeoutException { - return result.get(time, timeUnit); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java deleted file mode 100644 index 6b596fe14f4..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java +++ /dev/null @@ -1,187 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.states.datanode; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.states.DatanodeState; -import org.apache.hadoop.ozone.container.common.states.endpoint.HeartbeatEndpointTask; -import org.apache.hadoop.ozone.container.common.states.endpoint.RegisterEndpointTask; -import org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletionService; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorCompletionService; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * Class that implements handshake with SCM. - */ -public class RunningDatanodeState implements DatanodeState { - static final Logger - LOG = LoggerFactory.getLogger(RunningDatanodeState.class); - private final SCMConnectionManager connectionManager; - private final Configuration conf; - private final StateContext context; - private CompletionService ecs; - - public RunningDatanodeState(Configuration conf, - SCMConnectionManager connectionManager, - StateContext context) { - this.connectionManager = connectionManager; - this.conf = conf; - this.context = context; - } - - /** - * Called before entering this state. - */ - @Override - public void onEnter() { - LOG.trace("Entering handshake task."); - } - - /** - * Called After exiting this state. - */ - @Override - public void onExit() { - LOG.trace("Exiting handshake task."); - } - - /** - * Executes one or more tasks that is needed by this state. - * - * @param executor - ExecutorService - */ - @Override - public void execute(ExecutorService executor) { - ecs = new ExecutorCompletionService<>(executor); - for (EndpointStateMachine endpoint : connectionManager.getValues()) { - Callable endpointTask - = getEndPointTask(endpoint); - if (endpointTask != null) { - ecs.submit(endpointTask); - } else { - // This can happen if a task is taking more time than the timeOut - // specified for the task in await, and when it is completed the task - // has set the state to Shutdown, we may see the state as shutdown - // here. So, we need to Shutdown DatanodeStateMachine. - LOG.error("State is Shutdown in RunningDatanodeState"); - context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN); - } - } - } - //TODO : Cache some of these tasks instead of creating them - //all the time. - private Callable - getEndPointTask(EndpointStateMachine endpoint) { - switch (endpoint.getState()) { - case GETVERSION: - return new VersionEndpointTask(endpoint, conf, context.getParent() - .getContainer()); - case REGISTER: - return RegisterEndpointTask.newBuilder() - .setConfig(conf) - .setEndpointStateMachine(endpoint) - .setContext(context) - .setDatanodeDetails(context.getParent().getDatanodeDetails()) - .setOzoneContainer(context.getParent().getContainer()) - .build(); - case HEARTBEAT: - return HeartbeatEndpointTask.newBuilder() - .setConfig(conf) - .setEndpointStateMachine(endpoint) - .setDatanodeDetails(context.getParent().getDatanodeDetails()) - .setContext(context) - .build(); - case SHUTDOWN: - break; - default: - throw new IllegalArgumentException("Illegal Argument."); - } - return null; - } - - /** - * Computes the next state the container state machine must move to by looking - * at all the state of endpoints. - *

- * if any endpoint state has moved to Shutdown, either we have an - * unrecoverable error or we have been told to shutdown. Either case the - * datanode state machine should move to Shutdown state, otherwise we - * remain in the Running state. - * - * @return next container state. - */ - private DatanodeStateMachine.DatanodeStates - computeNextContainerState( - List> results) { - for (Future state : results) { - try { - if (state.get() == EndpointStateMachine.EndPointStates.SHUTDOWN) { - // if any endpoint tells us to shutdown we move to shutdown state. - return DatanodeStateMachine.DatanodeStates.SHUTDOWN; - } - } catch (InterruptedException | ExecutionException e) { - LOG.error("Error in executing end point task.", e); - } - } - return DatanodeStateMachine.DatanodeStates.RUNNING; - } - - /** - * Wait for execute to finish. - * - * @param duration - Time - * @param timeUnit - Unit of duration. - */ - @Override - public DatanodeStateMachine.DatanodeStates - await(long duration, TimeUnit timeUnit) - throws InterruptedException, ExecutionException, TimeoutException { - int count = connectionManager.getValues().size(); - int returned = 0; - long timeLeft = timeUnit.toMillis(duration); - long startTime = Time.monotonicNow(); - List> results = new - LinkedList<>(); - - while (returned < count && timeLeft > 0) { - Future result = - ecs.poll(timeLeft, TimeUnit.MILLISECONDS); - if (result != null) { - results.add(result); - returned++; - } - timeLeft = timeLeft - (Time.monotonicNow() - startTime); - } - return computeNextContainerState(results); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java deleted file mode 100644 index 6b8d16c6d39..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.states.datanode; -/** - This package contians files that guide the state transitions from - Init->Running->Shutdown for the datanode. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java deleted file mode 100644 index c50f4573d07..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ /dev/null @@ -1,402 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.states.endpoint; - -import com.google.common.base.Preconditions; -import com.google.protobuf.Descriptors; -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineActionsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerActionsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.ozone.container.common.helpers - .DeletedContainerBlocksSummary; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine.EndPointStates; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; -import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.time.ZonedDateTime; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.Callable; - -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_CONTAINER_ACTION_MAX_LIMIT; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_PIPELINE_ACTION_MAX_LIMIT; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_PIPELINE_ACTION_MAX_LIMIT_DEFAULT; - -/** - * Heartbeat class for SCMs. - */ -public class HeartbeatEndpointTask - implements Callable { - static final Logger LOG = - LoggerFactory.getLogger(HeartbeatEndpointTask.class); - private final EndpointStateMachine rpcEndpoint; - private final Configuration conf; - private DatanodeDetailsProto datanodeDetailsProto; - private StateContext context; - private int maxContainerActionsPerHB; - private int maxPipelineActionsPerHB; - - /** - * Constructs a SCM heart beat. - * - * @param conf Config. - */ - public HeartbeatEndpointTask(EndpointStateMachine rpcEndpoint, - Configuration conf, StateContext context) { - this.rpcEndpoint = rpcEndpoint; - this.conf = conf; - this.context = context; - this.maxContainerActionsPerHB = conf.getInt(HDDS_CONTAINER_ACTION_MAX_LIMIT, - HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT); - this.maxPipelineActionsPerHB = conf.getInt(HDDS_PIPELINE_ACTION_MAX_LIMIT, - HDDS_PIPELINE_ACTION_MAX_LIMIT_DEFAULT); - } - - /** - * Get the container Node ID proto. - * - * @return ContainerNodeIDProto - */ - public DatanodeDetailsProto getDatanodeDetailsProto() { - return datanodeDetailsProto; - } - - /** - * Set container node ID proto. - * - * @param datanodeDetailsProto - the node id. - */ - public void setDatanodeDetailsProto(DatanodeDetailsProto - datanodeDetailsProto) { - this.datanodeDetailsProto = datanodeDetailsProto; - } - - /** - * Computes a result, or throws an exception if unable to do so. - * - * @return computed result - * @throws Exception if unable to compute a result - */ - @Override - public EndpointStateMachine.EndPointStates call() throws Exception { - rpcEndpoint.lock(); - SCMHeartbeatRequestProto.Builder requestBuilder = null; - try { - Preconditions.checkState(this.datanodeDetailsProto != null); - - requestBuilder = SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(datanodeDetailsProto); - addReports(requestBuilder); - addContainerActions(requestBuilder); - addPipelineActions(requestBuilder); - SCMHeartbeatRequestProto request = requestBuilder.build(); - if (LOG.isDebugEnabled()) { - LOG.debug("Sending heartbeat message :: {}", request.toString()); - } - SCMHeartbeatResponseProto reponse = rpcEndpoint.getEndPoint() - .sendHeartbeat(request); - processResponse(reponse, datanodeDetailsProto); - rpcEndpoint.setLastSuccessfulHeartbeat(ZonedDateTime.now()); - rpcEndpoint.zeroMissedCount(); - } catch (IOException ex) { - // put back the reports which failed to be sent - if (requestBuilder != null) { - putBackReports(requestBuilder); - } - rpcEndpoint.logIfNeeded(ex); - } finally { - rpcEndpoint.unlock(); - } - return rpcEndpoint.getState(); - } - - // TODO: Make it generic. - private void putBackReports(SCMHeartbeatRequestProto.Builder requestBuilder) { - List reports = new LinkedList<>(); - if (requestBuilder.hasContainerReport()) { - reports.add(requestBuilder.getContainerReport()); - } - if (requestBuilder.hasNodeReport()) { - reports.add(requestBuilder.getNodeReport()); - } - if (requestBuilder.getCommandStatusReportsCount() != 0) { - reports.addAll(requestBuilder.getCommandStatusReportsList()); - } - if (requestBuilder.getIncrementalContainerReportCount() != 0) { - reports.addAll(requestBuilder.getIncrementalContainerReportList()); - } - context.putBackReports(reports); - } - - /** - * Adds all the available reports to heartbeat. - * - * @param requestBuilder builder to which the report has to be added. - */ - private void addReports(SCMHeartbeatRequestProto.Builder requestBuilder) { - for (GeneratedMessage report : context.getAllAvailableReports()) { - String reportName = report.getDescriptorForType().getFullName(); - for (Descriptors.FieldDescriptor descriptor : - SCMHeartbeatRequestProto.getDescriptor().getFields()) { - String heartbeatFieldName = descriptor.getMessageType().getFullName(); - if (heartbeatFieldName.equals(reportName)) { - if (descriptor.isRepeated()) { - requestBuilder.addRepeatedField(descriptor, report); - } else { - requestBuilder.setField(descriptor, report); - } - } - } - } - } - - /** - * Adds all the pending ContainerActions to the heartbeat. - * - * @param requestBuilder builder to which the report has to be added. - */ - private void addContainerActions( - SCMHeartbeatRequestProto.Builder requestBuilder) { - List actions = context.getPendingContainerAction( - maxContainerActionsPerHB); - if (!actions.isEmpty()) { - ContainerActionsProto cap = ContainerActionsProto.newBuilder() - .addAllContainerActions(actions) - .build(); - requestBuilder.setContainerActions(cap); - } - } - - /** - * Adds all the pending PipelineActions to the heartbeat. - * - * @param requestBuilder builder to which the report has to be added. - */ - private void addPipelineActions( - SCMHeartbeatRequestProto.Builder requestBuilder) { - List actions = context.getPendingPipelineAction( - maxPipelineActionsPerHB); - if (!actions.isEmpty()) { - PipelineActionsProto pap = PipelineActionsProto.newBuilder() - .addAllPipelineActions(actions) - .build(); - requestBuilder.setPipelineActions(pap); - } - } - - /** - * Returns a builder class for HeartbeatEndpointTask task. - * @return Builder. - */ - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Add this command to command processing Queue. - * - * @param response - SCMHeartbeat response. - */ - private void processResponse(SCMHeartbeatResponseProto response, - final DatanodeDetailsProto datanodeDetails) { - Preconditions.checkState(response.getDatanodeUUID() - .equalsIgnoreCase(datanodeDetails.getUuid()), - "Unexpected datanode ID in the response."); - // Verify the response is indeed for this datanode. - for (SCMCommandProto commandResponseProto : response - .getCommandsList()) { - switch (commandResponseProto.getCommandType()) { - case reregisterCommand: - if (rpcEndpoint.getState() == EndPointStates.HEARTBEAT) { - if (LOG.isDebugEnabled()) { - LOG.debug("Received SCM notification to register." - + " Interrupt HEARTBEAT and transit to REGISTER state."); - } - rpcEndpoint.setState(EndPointStates.REGISTER); - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("Illegal state {} found, expecting {}.", - rpcEndpoint.getState().name(), EndPointStates.HEARTBEAT); - } - } - break; - case deleteBlocksCommand: - DeleteBlocksCommand db = DeleteBlocksCommand - .getFromProtobuf( - commandResponseProto.getDeleteBlocksCommandProto()); - if (!db.blocksTobeDeleted().isEmpty()) { - if (LOG.isDebugEnabled()) { - LOG.debug(DeletedContainerBlocksSummary - .getFrom(db.blocksTobeDeleted()) - .toString()); - } - this.context.addCommand(db); - } - break; - case closeContainerCommand: - CloseContainerCommand closeContainer = - CloseContainerCommand.getFromProtobuf( - commandResponseProto.getCloseContainerCommandProto()); - if (LOG.isDebugEnabled()) { - LOG.debug("Received SCM container close request for container {}", - closeContainer.getContainerID()); - } - this.context.addCommand(closeContainer); - break; - case replicateContainerCommand: - ReplicateContainerCommand replicateContainerCommand = - ReplicateContainerCommand.getFromProtobuf( - commandResponseProto.getReplicateContainerCommandProto()); - if (LOG.isDebugEnabled()) { - LOG.debug("Received SCM container replicate request for container {}", - replicateContainerCommand.getContainerID()); - } - this.context.addCommand(replicateContainerCommand); - break; - case deleteContainerCommand: - DeleteContainerCommand deleteContainerCommand = - DeleteContainerCommand.getFromProtobuf( - commandResponseProto.getDeleteContainerCommandProto()); - if (LOG.isDebugEnabled()) { - LOG.debug("Received SCM delete container request for container {}", - deleteContainerCommand.getContainerID()); - } - this.context.addCommand(deleteContainerCommand); - break; - default: - throw new IllegalArgumentException("Unknown response : " - + commandResponseProto.getCommandType().name()); - } - } - } - - /** - * Builder class for HeartbeatEndpointTask. - */ - public static class Builder { - private EndpointStateMachine endPointStateMachine; - private Configuration conf; - private DatanodeDetails datanodeDetails; - private StateContext context; - - /** - * Constructs the builder class. - */ - public Builder() { - } - - /** - * Sets the endpoint state machine. - * - * @param rpcEndPoint - Endpoint state machine. - * @return Builder - */ - public Builder setEndpointStateMachine(EndpointStateMachine rpcEndPoint) { - this.endPointStateMachine = rpcEndPoint; - return this; - } - - /** - * Sets the Config. - * - * @param config - config - * @return Builder - */ - public Builder setConfig(Configuration config) { - this.conf = config; - return this; - } - - /** - * Sets the NodeID. - * - * @param dnDetails - NodeID proto - * @return Builder - */ - public Builder setDatanodeDetails(DatanodeDetails dnDetails) { - this.datanodeDetails = dnDetails; - return this; - } - - /** - * Sets the context. - * @param stateContext - State context. - * @return this. - */ - public Builder setContext(StateContext stateContext) { - this.context = stateContext; - return this; - } - - public HeartbeatEndpointTask build() { - if (endPointStateMachine == null) { - LOG.error("No endpoint specified."); - throw new IllegalArgumentException("A valid endpoint state machine is" + - " needed to construct HeartbeatEndpointTask task"); - } - - if (conf == null) { - LOG.error("No config specified."); - throw new IllegalArgumentException("A valid configration is needed to" + - " construct HeartbeatEndpointTask task"); - } - - if (datanodeDetails == null) { - LOG.error("No datanode specified."); - throw new IllegalArgumentException("A vaild Node ID is needed to " + - "construct HeartbeatEndpointTask task"); - } - - HeartbeatEndpointTask task = new HeartbeatEndpointTask(this - .endPointStateMachine, this.conf, this.context); - task.setDatanodeDetailsProto(datanodeDetails.getProtoBufMessage()); - return task; - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java deleted file mode 100644 index b94b1cfc85d..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ /dev/null @@ -1,261 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.states.endpoint; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.Future; - -/** - * Register a datanode with SCM. - */ -public final class RegisterEndpointTask implements - Callable { - static final Logger LOG = LoggerFactory.getLogger(RegisterEndpointTask.class); - - private final EndpointStateMachine rpcEndPoint; - private final Configuration conf; - private Future result; - private DatanodeDetails datanodeDetails; - private final OzoneContainer datanodeContainerManager; - private StateContext stateContext; - - /** - * Creates a register endpoint task. - * - * @param rpcEndPoint - endpoint - * @param conf - conf - * @param ozoneContainer - container - */ - @VisibleForTesting - public RegisterEndpointTask(EndpointStateMachine rpcEndPoint, - Configuration conf, OzoneContainer ozoneContainer, - StateContext context) { - this.rpcEndPoint = rpcEndPoint; - this.conf = conf; - this.datanodeContainerManager = ozoneContainer; - this.stateContext = context; - - } - - /** - * Get the DatanodeDetails. - * - * @return DatanodeDetailsProto - */ - public DatanodeDetails getDatanodeDetails() { - return datanodeDetails; - } - - /** - * Set the contiainerNodeID Proto. - * - * @param datanodeDetails - Container Node ID. - */ - public void setDatanodeDetails( - DatanodeDetails datanodeDetails) { - this.datanodeDetails = datanodeDetails; - } - - /** - * Computes a result, or throws an exception if unable to do so. - * - * @return computed result - * @throws Exception if unable to compute a result - */ - @Override - public EndpointStateMachine.EndPointStates call() throws Exception { - - if (getDatanodeDetails() == null) { - LOG.error("DatanodeDetails cannot be null in RegisterEndpoint task, " + - "shutting down the endpoint."); - return rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN); - } - - rpcEndPoint.lock(); - try { - - ContainerReportsProto containerReport = datanodeContainerManager - .getController().getContainerReport(); - NodeReportProto nodeReport = datanodeContainerManager.getNodeReport(); - PipelineReportsProto pipelineReportsProto = - datanodeContainerManager.getPipelineReport(); - // TODO : Add responses to the command Queue. - SCMRegisteredResponseProto response = rpcEndPoint.getEndPoint() - .register(datanodeDetails.getProtoBufMessage(), nodeReport, - containerReport, pipelineReportsProto); - Preconditions.checkState(UUID.fromString(response.getDatanodeUUID()) - .equals(datanodeDetails.getUuid()), - "Unexpected datanode ID in the response."); - Preconditions.checkState(!StringUtils.isBlank(response.getClusterID()), - "Invalid cluster ID in the response."); - if (response.hasHostname() && response.hasIpAddress()) { - datanodeDetails.setHostName(response.getHostname()); - datanodeDetails.setIpAddress(response.getIpAddress()); - } - if (response.hasNetworkName() && response.hasNetworkLocation()) { - datanodeDetails.setNetworkName(response.getNetworkName()); - datanodeDetails.setNetworkLocation(response.getNetworkLocation()); - } - EndpointStateMachine.EndPointStates nextState = - rpcEndPoint.getState().getNextState(); - rpcEndPoint.setState(nextState); - rpcEndPoint.zeroMissedCount(); - this.stateContext.configureHeartbeatFrequency(); - } catch (IOException ex) { - rpcEndPoint.logIfNeeded(ex); - } finally { - rpcEndPoint.unlock(); - } - - return rpcEndPoint.getState(); - } - - /** - * Returns a builder class for RegisterEndPoint task. - * - * @return Builder. - */ - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Builder class for RegisterEndPoint task. - */ - public static class Builder { - private EndpointStateMachine endPointStateMachine; - private Configuration conf; - private DatanodeDetails datanodeDetails; - private OzoneContainer container; - private StateContext context; - - /** - * Constructs the builder class. - */ - public Builder() { - } - - /** - * Sets the endpoint state machine. - * - * @param rpcEndPoint - Endpoint state machine. - * @return Builder - */ - public Builder setEndpointStateMachine(EndpointStateMachine rpcEndPoint) { - this.endPointStateMachine = rpcEndPoint; - return this; - } - - /** - * Sets the Config. - * - * @param config - config - * @return Builder. - */ - public Builder setConfig(Configuration config) { - this.conf = config; - return this; - } - - /** - * Sets the NodeID. - * - * @param dnDetails - NodeID proto - * @return Builder - */ - public Builder setDatanodeDetails(DatanodeDetails dnDetails) { - this.datanodeDetails = dnDetails; - return this; - } - - /** - * Sets the ozonecontainer. - * @param ozoneContainer - * @return Builder - */ - public Builder setOzoneContainer(OzoneContainer ozoneContainer) { - this.container = ozoneContainer; - return this; - } - - public Builder setContext(StateContext stateContext) { - this.context = stateContext; - return this; - } - - public RegisterEndpointTask build() { - if (endPointStateMachine == null) { - LOG.error("No endpoint specified."); - throw new IllegalArgumentException("A valid endpoint state machine is" + - " needed to construct RegisterEndPoint task"); - } - - if (conf == null) { - LOG.error("No config specified."); - throw new IllegalArgumentException( - "A valid configuration is needed to construct RegisterEndpoint " - + "task"); - } - - if (datanodeDetails == null) { - LOG.error("No datanode specified."); - throw new IllegalArgumentException("A vaild Node ID is needed to " + - "construct RegisterEndpoint task"); - } - - if (container == null) { - LOG.error("Container is not specified"); - throw new IllegalArgumentException("Container is not specified to " + - "construct RegisterEndpoint task"); - } - - if (context == null) { - LOG.error("StateContext is not specified"); - throw new IllegalArgumentException("Container is not specified to " + - "construct RegisterEndpoint task"); - } - - RegisterEndpointTask task = new RegisterEndpointTask(this - .endPointStateMachine, this.conf, this.container, this.context); - task.setDatanodeDetails(datanodeDetails); - return task; - } - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java deleted file mode 100644 index 04eaa05f44c..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.states.endpoint; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.Callable; - -/** - * Task that returns version. - */ -public class VersionEndpointTask implements - Callable { - public static final Logger LOG = LoggerFactory.getLogger(VersionEndpointTask - .class); - private final EndpointStateMachine rpcEndPoint; - private final Configuration configuration; - private final OzoneContainer ozoneContainer; - - public VersionEndpointTask(EndpointStateMachine rpcEndPoint, - Configuration conf, OzoneContainer container) { - this.rpcEndPoint = rpcEndPoint; - this.configuration = conf; - this.ozoneContainer = container; - } - - /** - * Computes a result, or throws an exception if unable to do so. - * - * @return computed result - * @throws Exception if unable to compute a result - */ - @Override - public EndpointStateMachine.EndPointStates call() throws Exception { - rpcEndPoint.lock(); - try{ - if (rpcEndPoint.getState().equals( - EndpointStateMachine.EndPointStates.GETVERSION)) { - SCMVersionResponseProto versionResponse = - rpcEndPoint.getEndPoint().getVersion(null); - VersionResponse response = VersionResponse.getFromProtobuf( - versionResponse); - rpcEndPoint.setVersion(response); - - String scmId = response.getValue(OzoneConsts.SCM_ID); - String clusterId = response.getValue(OzoneConsts.CLUSTER_ID); - - // Check volumes - VolumeSet volumeSet = ozoneContainer.getVolumeSet(); - volumeSet.writeLock(); - try { - Map volumeMap = volumeSet.getVolumeMap(); - - Preconditions.checkNotNull(scmId, "Reply from SCM: scmId cannot be " + - "null"); - Preconditions.checkNotNull(clusterId, "Reply from SCM: clusterId " + - "cannot be null"); - - // If version file does not exist - // create version file and also set scmId - - for (Map.Entry entry : volumeMap.entrySet()) { - HddsVolume hddsVolume = entry.getValue(); - boolean result = HddsVolumeUtil.checkVolume(hddsVolume, scmId, - clusterId, LOG); - if (!result) { - volumeSet.failVolume(hddsVolume.getHddsRootDir().getPath()); - } - } - if (volumeSet.getVolumesList().size() == 0) { - // All volumes are in inconsistent state - throw new DiskOutOfSpaceException("All configured Volumes are in " + - "Inconsistent State"); - } - } finally { - volumeSet.writeUnlock(); - } - - // Start the container services after getting the version information - ozoneContainer.start(scmId); - - EndpointStateMachine.EndPointStates nextState = - rpcEndPoint.getState().getNextState(); - rpcEndPoint.setState(nextState); - rpcEndPoint.zeroMissedCount(); - } else { - LOG.debug("Cannot execute GetVersion task as endpoint state machine " + - "is in {} state", rpcEndPoint.getState()); - } - } catch (DiskOutOfSpaceException ex) { - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN); - } catch(IOException ex) { - rpcEndPoint.logIfNeeded(ex); - } finally { - rpcEndPoint.unlock(); - } - return rpcEndPoint.getState(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java deleted file mode 100644 index 112259834dd..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.states.endpoint; -/** - This package contains code for RPC endpoints transitions. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java deleted file mode 100644 index 92c953ff410..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.states; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java deleted file mode 100644 index dc5f5bc8547..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto - .XceiverClientProtocolServiceGrpc; -import org.apache.hadoop.hdds.security.token.TokenVerifier; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * Grpc Service for handling Container Commands on datanode. - */ -public class GrpcXceiverService extends - XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceImplBase { - public static final Logger - LOG = LoggerFactory.getLogger(GrpcXceiverService.class); - - private final ContainerDispatcher dispatcher; - private final boolean isGrpcTokenEnabled; - private final TokenVerifier tokenVerifier; - - public GrpcXceiverService(ContainerDispatcher dispatcher) { - this(dispatcher, false, null); - } - - public GrpcXceiverService(ContainerDispatcher dispatcher, - boolean grpcTokenEnabled, TokenVerifier tokenVerifier) { - this.dispatcher = dispatcher; - this.isGrpcTokenEnabled = grpcTokenEnabled; - this.tokenVerifier = tokenVerifier; - } - - @Override - public StreamObserver send( - StreamObserver responseObserver) { - return new StreamObserver() { - private final AtomicBoolean isClosed = new AtomicBoolean(false); - - @Override - public void onNext(ContainerCommandRequestProto request) { - try { - if(isGrpcTokenEnabled) { - // ServerInterceptors intercepts incoming request and creates ugi. - tokenVerifier.verify(UserGroupInformation.getCurrentUser() - .getShortUserName(), request.getEncodedToken()); - } - ContainerCommandResponseProto resp = - dispatcher.dispatch(request, null); - responseObserver.onNext(resp); - } catch (Throwable e) { - LOG.error("{} got exception when processing" - + " ContainerCommandRequestProto {}: {}", request, e); - responseObserver.onError(e); - } - } - - @Override - public void onError(Throwable t) { - // for now we just log a msg - LOG.error("{}: ContainerCommand send on error. Exception: {}", t); - } - - @Override - public void onCompleted() { - if (isClosed.compareAndSet(false, true)) { - LOG.debug("{}: ContainerCommand send completed"); - responseObserver.onCompleted(); - } - } - }; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ServerCredentialInterceptor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ServerCredentialInterceptor.java deleted file mode 100644 index 968f0c80711..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ServerCredentialInterceptor.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server; - -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.token.TokenVerifier; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.thirdparty.io.grpc.Context; -import org.apache.ratis.thirdparty.io.grpc.Contexts; -import org.apache.ratis.thirdparty.io.grpc.Metadata; -import org.apache.ratis.thirdparty.io.grpc.ServerCall; -import org.apache.ratis.thirdparty.io.grpc.ServerCallHandler; -import org.apache.ratis.thirdparty.io.grpc.ServerInterceptor; -import org.apache.ratis.thirdparty.io.grpc.Status; - -import static org.apache.hadoop.ozone.OzoneConsts.OBT_METADATA_KEY; -import static org.apache.hadoop.ozone.OzoneConsts.USER_METADATA_KEY; -import static org.apache.hadoop.ozone.OzoneConsts.UGI_CTX_KEY; -/** - * Grpc Server Interceptor for Ozone Block token. - */ -public class ServerCredentialInterceptor implements ServerInterceptor { - - - private static final ServerCall.Listener NOOP_LISTENER = - new ServerCall.Listener() { - }; - - private final TokenVerifier verifier; - - ServerCredentialInterceptor(TokenVerifier verifier) { - this.verifier = verifier; - } - - @Override - public ServerCall.Listener interceptCall( - ServerCall call, Metadata headers, - ServerCallHandler next) { - String token = headers.get(OBT_METADATA_KEY); - String user = headers.get(USER_METADATA_KEY); - Context ctx = Context.current(); - try { - UserGroupInformation ugi = verifier.verify(user, token); - if (ugi == null) { - call.close(Status.UNAUTHENTICATED.withDescription("Missing Block " + - "Token from headers when block token is required."), headers); - return NOOP_LISTENER; - } else { - ctx = ctx.withValue(UGI_CTX_KEY, ugi); - } - } catch (SCMSecurityException e) { - call.close(Status.UNAUTHENTICATED.withDescription(e.getMessage()) - .withCause(e), headers); - return NOOP_LISTENER; - } - return Contexts.interceptCall(ctx, call, headers, next); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java deleted file mode 100644 index c6b0d9238bc..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.token.BlockTokenVerifier; -import org.apache.hadoop.hdds.security.token.TokenVerifier; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.MISSING_BLOCK_TOKEN; - -/** - * A server endpoint that acts as the communication layer for Ozone containers. - */ -public abstract class XceiverServer implements XceiverServerSpi { - - private final SecurityConfig secConfig; - private final TokenVerifier tokenVerifier; - private final CertificateClient caClient; - - public XceiverServer(Configuration conf, CertificateClient client) { - Preconditions.checkNotNull(conf); - this.secConfig = new SecurityConfig(conf); - this.caClient = client; - tokenVerifier = new BlockTokenVerifier(secConfig, getCaClient()); - } - - /** - * Default implementation which just validates security token if security is - * enabled. - * - * @param request ContainerCommandRequest - */ - @Override - public void submitRequest(ContainerCommandRequestProto request, - HddsProtos.PipelineID pipelineID) throws IOException { - if (secConfig.isSecurityEnabled()) { - String encodedToken = request.getEncodedToken(); - if (encodedToken == null) { - throw new SCMSecurityException("Security is enabled but client " + - "request is missing block token.", MISSING_BLOCK_TOKEN); - } - tokenVerifier.verify(encodedToken, encodedToken); - } - } - - @VisibleForTesting - protected CertificateClient getCaClient() { - return caClient; - } - - protected SecurityConfig getSecurityConfig() { - return secConfig; - } - - protected TokenVerifier getBlockTokenVerifier() { - return tokenVerifier; - } - - public SecurityConfig getSecConfig() { - return secConfig; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java deleted file mode 100644 index bb352ea5165..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReport; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.container.common.helpers. - StorageContainerException; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.tracing.GrpcServerInterceptor; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; - -import io.opentracing.Scope; -import org.apache.ratis.thirdparty.io.grpc.BindableService; -import org.apache.ratis.thirdparty.io.grpc.Server; -import org.apache.ratis.thirdparty.io.grpc.ServerBuilder; -import org.apache.ratis.thirdparty.io.grpc.ServerInterceptors; -import org.apache.ratis.thirdparty.io.grpc.netty.GrpcSslContexts; -import org.apache.ratis.thirdparty.io.grpc.netty.NettyServerBuilder; -import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -/** - * Creates a Grpc server endpoint that acts as the communication layer for - * Ozone containers. - */ -public final class XceiverServerGrpc extends XceiverServer { - private static final Logger - LOG = LoggerFactory.getLogger(XceiverServerGrpc.class); - private static final String COMPONENT = "dn"; - private int port; - private UUID id; - private Server server; - private final ContainerDispatcher storageContainer; - private boolean isStarted; - private DatanodeDetails datanodeDetails; - - - /** - * Constructs a Grpc server class. - * - * @param conf - Configuration - */ - public XceiverServerGrpc(DatanodeDetails datanodeDetails, Configuration conf, - ContainerDispatcher dispatcher, CertificateClient caClient, - BindableService... additionalServices) { - super(conf, caClient); - Preconditions.checkNotNull(conf); - - this.id = datanodeDetails.getUuid(); - this.datanodeDetails = datanodeDetails; - this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); - - if (conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) { - this.port = 0; - } - - NettyServerBuilder nettyServerBuilder = - ((NettyServerBuilder) ServerBuilder.forPort(port)) - .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE); - - ServerCredentialInterceptor credInterceptor = - new ServerCredentialInterceptor(getBlockTokenVerifier()); - GrpcServerInterceptor tracingInterceptor = new GrpcServerInterceptor(); - nettyServerBuilder.addService(ServerInterceptors.intercept( - new GrpcXceiverService(dispatcher, - getSecurityConfig().isBlockTokenEnabled(), - getBlockTokenVerifier()), credInterceptor, - tracingInterceptor)); - - for (BindableService service : additionalServices) { - nettyServerBuilder.addService(service); - } - - if (getSecConfig().isGrpcTlsEnabled()) { - try { - SslContextBuilder sslClientContextBuilder = SslContextBuilder.forServer( - caClient.getPrivateKey(), caClient.getCertificate()); - SslContextBuilder sslContextBuilder = GrpcSslContexts.configure( - sslClientContextBuilder, getSecurityConfig().getGrpcSslProvider()); - nettyServerBuilder.sslContext(sslContextBuilder.build()); - } catch (Exception ex) { - LOG.error("Unable to setup TLS for secure datanode GRPC endpoint.", ex); - } - } - server = nettyServerBuilder.build(); - storageContainer = dispatcher; - } - - @Override - public int getIPCPort() { - return this.port; - } - - /** - * Returns the Replication type supported by this end-point. - * - * @return enum -- {Stand_Alone, Ratis, Grpc, Chained} - */ - @Override - public HddsProtos.ReplicationType getServerType() { - return HddsProtos.ReplicationType.STAND_ALONE; - } - - @Override - public void start() throws IOException { - if (!isStarted) { - server.start(); - int realPort = server.getPort(); - - if (port == 0) { - LOG.info("{} {} is started using port {}", getClass().getSimpleName(), - this.id, realPort); - port = realPort; - } - - //register the real port to the datanode details. - datanodeDetails.setPort(DatanodeDetails - .newPort(Name.STANDALONE, - realPort)); - - isStarted = true; - } - } - - @Override - public void stop() { - if (isStarted) { - server.shutdown(); - try { - server.awaitTermination(5, TimeUnit.SECONDS); - } catch (Exception e) { - LOG.error("failed to shutdown XceiverServerGrpc", e); - } - isStarted = false; - } - } - - @Override - public void submitRequest(ContainerCommandRequestProto request, - HddsProtos.PipelineID pipelineID) throws IOException { - try (Scope scope = TracingUtil - .importAndCreateScope( - "XceiverServerGrpc." + request.getCmdType().name(), - request.getTraceID())) { - - super.submitRequest(request, pipelineID); - ContainerProtos.ContainerCommandResponseProto response = - storageContainer.dispatch(request, null); - if (response.getResult() != ContainerProtos.Result.SUCCESS) { - throw new StorageContainerException(response.getMessage(), - response.getResult()); - } - } - } - - @Override - public boolean isExist(HddsProtos.PipelineID pipelineId) { - return PipelineID.valueOf(id).getProtobuf().equals(pipelineId); - } - - @Override - public List getPipelineReport() { - return Collections.singletonList( - PipelineReport.newBuilder() - .setPipelineID(PipelineID.valueOf(id).getProtobuf()) - .build()); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java deleted file mode 100644 index 4e0d34384ce..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReport; - -import java.io.IOException; -import java.util.List; - -/** A server endpoint that acts as the communication layer for Ozone - * containers. */ -public interface XceiverServerSpi { - /** Starts the server. */ - void start() throws IOException; - - /** Stops a running server. */ - void stop(); - - /** Get server IPC port. */ - int getIPCPort(); - - /** - * Returns the Replication type supported by this end-point. - * @return enum -- {Stand_Alone, Ratis, Chained} - */ - HddsProtos.ReplicationType getServerType(); - - /** - * submits a containerRequest to be performed by the replication pipeline. - * @param request ContainerCommandRequest - */ - void submitRequest(ContainerCommandRequestProto request, - HddsProtos.PipelineID pipelineID) - throws IOException; - - /** - * Returns true if the given pipeline exist. - * - * @return true if pipeline present, else false - */ - boolean isExist(HddsProtos.PipelineID pipelineId); - - /** - * Get pipeline report for the XceiverServer instance. - * @return list of report for each pipeline. - */ - List getPipelineReport(); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java deleted file mode 100644 index 59c96f13496..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server; - -/** - * This package contains classes for the server of the storage container - * protocol. - */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java deleted file mode 100644 index 9893ae48347..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java +++ /dev/null @@ -1,221 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.transport.server.ratis; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableRate; -import org.apache.hadoop.metrics2.lib.MetricsRegistry; -import org.apache.ratis.protocol.RaftGroupId; - -/** - * This class is for maintaining Container State Machine statistics. - */ -@InterfaceAudience.Private -@Metrics(about="Container State Machine Metrics", context="dfs") -public class CSMMetrics { - public static final String SOURCE_NAME = - CSMMetrics.class.getSimpleName(); - - // ratis op metrics metrics - private @Metric MutableCounterLong numWriteStateMachineOps; - private @Metric MutableCounterLong numQueryStateMachineOps; - private @Metric MutableCounterLong numApplyTransactionOps; - private @Metric MutableCounterLong numReadStateMachineOps; - private @Metric MutableCounterLong numBytesWrittenCount; - private @Metric MutableCounterLong numBytesCommittedCount; - - private @Metric MutableRate transactionLatency; - private MutableRate[] opsLatency; - private MetricsRegistry registry = null; - - // Failure Metrics - private @Metric MutableCounterLong numWriteStateMachineFails; - private @Metric MutableCounterLong numWriteDataFails; - private @Metric MutableCounterLong numQueryStateMachineFails; - private @Metric MutableCounterLong numApplyTransactionFails; - private @Metric MutableCounterLong numReadStateMachineFails; - private @Metric MutableCounterLong numReadStateMachineMissCount; - private @Metric MutableCounterLong numStartTransactionVerifyFailures; - private @Metric MutableCounterLong numContainerNotOpenVerifyFailures; - - private @Metric MutableRate applyTransaction; - private @Metric MutableRate writeStateMachineData; - - public CSMMetrics() { - int numCmdTypes = ContainerProtos.Type.values().length; - this.opsLatency = new MutableRate[numCmdTypes]; - this.registry = new MetricsRegistry(CSMMetrics.class.getSimpleName()); - for (int i = 0; i < numCmdTypes; i++) { - opsLatency[i] = registry.newRate( - ContainerProtos.Type.forNumber(i + 1).toString(), - ContainerProtos.Type.forNumber(i + 1) + " op"); - } - } - - public static CSMMetrics create(RaftGroupId gid) { - MetricsSystem ms = DefaultMetricsSystem.instance(); - return ms.register(SOURCE_NAME + gid.toString(), - "Container State Machine", - new CSMMetrics()); - } - - public void incNumWriteStateMachineOps() { - numWriteStateMachineOps.incr(); - } - - public void incNumQueryStateMachineOps() { - numQueryStateMachineOps.incr(); - } - - public void incNumReadStateMachineOps() { - numReadStateMachineOps.incr(); - } - - public void incNumApplyTransactionsOps() { - numApplyTransactionOps.incr(); - } - - public void incNumWriteStateMachineFails() { - numWriteStateMachineFails.incr(); - } - - public void incNumWriteDataFails() { - numWriteDataFails.incr(); - } - - public void incNumQueryStateMachineFails() { - numQueryStateMachineFails.incr(); - } - - public void incNumBytesWrittenCount(long value) { - numBytesWrittenCount.incr(value); - } - - public void incNumBytesCommittedCount(long value) { - numBytesCommittedCount.incr(value); - } - - public void incNumReadStateMachineFails() { - numReadStateMachineFails.incr(); - } - - public void incNumReadStateMachineMissCount() { - numReadStateMachineMissCount.incr(); - } - - public void incNumApplyTransactionsFails() { - numApplyTransactionFails.incr(); - } - - @VisibleForTesting - public long getNumWriteStateMachineOps() { - return numWriteStateMachineOps.value(); - } - - @VisibleForTesting - public long getNumQueryStateMachineOps() { - return numQueryStateMachineOps.value(); - } - - @VisibleForTesting - public long getNumApplyTransactionsOps() { - return numApplyTransactionOps.value(); - } - - @VisibleForTesting - public long getNumWriteStateMachineFails() { - return numWriteStateMachineFails.value(); - } - - @VisibleForTesting - public long getNumWriteDataFails() { - return numWriteDataFails.value(); - } - - @VisibleForTesting - public long getNumQueryStateMachineFails() { - return numQueryStateMachineFails.value(); - } - - @VisibleForTesting - public long getNumApplyTransactionsFails() { - return numApplyTransactionFails.value(); - } - - @VisibleForTesting - public long getNumReadStateMachineFails() { - return numReadStateMachineFails.value(); - } - - @VisibleForTesting - public long getNumReadStateMachineMissCount() { - return numReadStateMachineMissCount.value(); - } - - @VisibleForTesting - public long getNumReadStateMachineOps() { - return numReadStateMachineOps.value(); - } - - @VisibleForTesting - public long getNumBytesWrittenCount() { - return numBytesWrittenCount.value(); - } - - @VisibleForTesting - public long getNumBytesCommittedCount() { - return numBytesCommittedCount.value(); - } - - public MutableRate getApplyTransactionLatency() { - return applyTransaction; - } - - public void incPipelineLatency(ContainerProtos.Type type, long latencyNanos) { - opsLatency[type.ordinal()].add(latencyNanos); - transactionLatency.add(latencyNanos); - } - - public void incNumStartTransactionVerifyFailures() { - numStartTransactionVerifyFailures.incr(); - } - - public void incNumContainerNotOpenVerifyFailures() { - numContainerNotOpenVerifyFailures.incr(); - } - - public void recordApplyTransactionCompletion(long latencyNanos) { - applyTransaction.add(latencyNanos); - } - - public void recordWriteStateMachineCompletion(long latencyNanos) { - writeStateMachineData.add(latencyNanos); - } - - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java deleted file mode 100644 index b89ec730f7c..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ /dev/null @@ -1,871 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server.ratis; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.util.Time; -import org.apache.ratis.proto.RaftProtos.RaftPeerRole; -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.protocol.StateMachineException; -import org.apache.ratis.server.RaftServer; -import org.apache.ratis.server.impl.RaftServerProxy; -import org.apache.ratis.server.protocol.TermIndex; -import org.apache.ratis.server.raftlog.RaftLog; -import org.apache.ratis.statemachine.impl.SingleFileSnapshotInfo; -import org.apache.ratis.thirdparty.com.google.protobuf - .InvalidProtocolBufferException; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - Container2BCSIDMapProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .WriteChunkRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadChunkRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadChunkResponseProto; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.hdds.security.token.TokenVerifier; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; -import org.apache.ratis.server.storage.RaftStorage; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.proto.RaftProtos.RoleInfoProto; -import org.apache.ratis.proto.RaftProtos.LogEntryProto; -import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto; -import org.apache.ratis.statemachine.StateMachineStorage; -import org.apache.ratis.statemachine.TransactionContext; -import org.apache.ratis.statemachine.impl.BaseStateMachine; -import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; -import java.util.concurrent.Executors; -import java.io.FileOutputStream; -import java.io.FileInputStream; -import java.io.OutputStream; - -/** A {@link org.apache.ratis.statemachine.StateMachine} for containers. - * - * The stateMachine is responsible for handling different types of container - * requests. The container requests can be divided into readonly and write - * requests. - * - * Read only requests are classified in - * {@link org.apache.hadoop.hdds.HddsUtils#isReadOnly} - * and these readonly requests are replied from the {@link #query(Message)}. - * - * The write requests can be divided into requests with user data - * (WriteChunkRequest) and other request without user data. - * - * Inorder to optimize the write throughput, the writeChunk request is - * processed in 2 phases. The 2 phases are divided in - * {@link #startTransaction(RaftClientRequest)}, in the first phase the user - * data is written directly into the state machine via - * {@link #writeStateMachineData} and in the second phase the - * transaction is committed via {@link #applyTransaction(TransactionContext)} - * - * For the requests with no stateMachine data, the transaction is directly - * committed through - * {@link #applyTransaction(TransactionContext)} - * - * There are 2 ordering operation which are enforced right now in the code, - * 1) Write chunk operation are executed after the create container operation, - * the write chunk operation will fail otherwise as the container still hasn't - * been created. Hence the create container operation has been split in the - * {@link #startTransaction(RaftClientRequest)}, this will help in synchronizing - * the calls in {@link #writeStateMachineData} - * - * 2) Write chunk commit operation is executed after write chunk state machine - * operation. This will ensure that commit operation is sync'd with the state - * machine operation.For example, synchronization between writeChunk and - * createContainer in {@link ContainerStateMachine}. - **/ - -public class ContainerStateMachine extends BaseStateMachine { - static final Logger LOG = - LoggerFactory.getLogger(ContainerStateMachine.class); - private final SimpleStateMachineStorage storage = - new SimpleStateMachineStorage(); - private final RaftGroupId gid; - private final ContainerDispatcher dispatcher; - private final ContainerController containerController; - private ThreadPoolExecutor chunkExecutor; - private final XceiverServerRatis ratisServer; - private final ConcurrentHashMap> writeChunkFutureMap; - - // keeps track of the containers created per pipeline - private final Map container2BCSIDMap; - private ExecutorService[] executors; - private final Map applyTransactionCompletionMap; - private final Cache stateMachineDataCache; - private final boolean isBlockTokenEnabled; - private final TokenVerifier tokenVerifier; - private final AtomicBoolean stateMachineHealthy; - - private final Semaphore applyTransactionSemaphore; - /** - * CSM metrics. - */ - private final CSMMetrics metrics; - - @SuppressWarnings("parameternumber") - public ContainerStateMachine(RaftGroupId gid, ContainerDispatcher dispatcher, - ContainerController containerController, ThreadPoolExecutor chunkExecutor, - XceiverServerRatis ratisServer, long expiryInterval, - boolean isBlockTokenEnabled, TokenVerifier tokenVerifier, - Configuration conf) { - this.gid = gid; - this.dispatcher = dispatcher; - this.containerController = containerController; - this.chunkExecutor = chunkExecutor; - this.ratisServer = ratisServer; - metrics = CSMMetrics.create(gid); - this.writeChunkFutureMap = new ConcurrentHashMap<>(); - applyTransactionCompletionMap = new ConcurrentHashMap<>(); - stateMachineDataCache = CacheBuilder.newBuilder() - .expireAfterAccess(expiryInterval, TimeUnit.MILLISECONDS) - // set the limit on no of cached entries equal to no of max threads - // executing writeStateMachineData - .maximumSize(chunkExecutor.getCorePoolSize()).build(); - this.isBlockTokenEnabled = isBlockTokenEnabled; - this.tokenVerifier = tokenVerifier; - this.container2BCSIDMap = new ConcurrentHashMap<>(); - - final int numContainerOpExecutors = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); - int maxPendingApplyTransactions = conf.getInt( - ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS, - ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); - applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions); - stateMachineHealthy = new AtomicBoolean(true); - this.executors = new ExecutorService[numContainerOpExecutors]; - for (int i = 0; i < numContainerOpExecutors; i++) { - final int index = i; - this.executors[index] = Executors.newSingleThreadExecutor(r -> { - Thread t = new Thread(r); - t.setName("RatisApplyTransactionExecutor " + index); - return t; - }); - } - } - - @Override - public StateMachineStorage getStateMachineStorage() { - return storage; - } - - public CSMMetrics getMetrics() { - return metrics; - } - - @Override - public void initialize( - RaftServer server, RaftGroupId id, RaftStorage raftStorage) - throws IOException { - super.initialize(server, id, raftStorage); - storage.init(raftStorage); - ratisServer.notifyGroupAdd(gid); - - loadSnapshot(storage.getLatestSnapshot()); - } - - private long loadSnapshot(SingleFileSnapshotInfo snapshot) - throws IOException { - if (snapshot == null) { - TermIndex empty = - TermIndex.newTermIndex(0, RaftLog.INVALID_LOG_INDEX); - LOG.info("{}: The snapshot info is null. Setting the last applied index" + - "to:{}", gid, empty); - setLastAppliedTermIndex(empty); - return empty.getIndex(); - } - - final File snapshotFile = snapshot.getFile().getPath().toFile(); - final TermIndex last = - SimpleStateMachineStorage.getTermIndexFromSnapshotFile(snapshotFile); - LOG.info("{}: Setting the last applied index to {}", gid, last); - setLastAppliedTermIndex(last); - - // initialize the dispatcher with snapshot so that it build the missing - // container list - try (FileInputStream fin = new FileInputStream(snapshotFile)) { - byte[] container2BCSIDData = IOUtils.toByteArray(fin); - ContainerProtos.Container2BCSIDMapProto proto = - ContainerProtos.Container2BCSIDMapProto - .parseFrom(container2BCSIDData); - // read the created containers list from the snapshot file and add it to - // the container2BCSIDMap here. - // container2BCSIDMap will further grow as and when containers get created - container2BCSIDMap.putAll(proto.getContainer2BCSIDMap()); - dispatcher.buildMissingContainerSetAndValidate(container2BCSIDMap); - } - return last.getIndex(); - } - - /** - * As a part of taking snapshot with Ratis StateMachine, it will persist - * the existing container set in the snapshotFile. - * @param out OutputStream mapped to the Ratis snapshot file - * @throws IOException - */ - public void persistContainerSet(OutputStream out) throws IOException { - Container2BCSIDMapProto.Builder builder = - Container2BCSIDMapProto.newBuilder(); - builder.putAllContainer2BCSID(container2BCSIDMap); - // TODO : while snapshot is being taken, deleteContainer call should not - // should not happen. Lock protection will be required if delete - // container happens outside of Ratis. - IOUtils.write(builder.build().toByteArray(), out); - } - - public boolean isStateMachineHealthy() { - return stateMachineHealthy.get(); - } - - @Override - public long takeSnapshot() throws IOException { - TermIndex ti = getLastAppliedTermIndex(); - long startTime = Time.monotonicNow(); - if (!isStateMachineHealthy()) { - String msg = - "Failed to take snapshot " + " for " + gid + " as the stateMachine" - + " is unhealthy. The last applied index is at " + ti; - StateMachineException sme = new StateMachineException(msg); - LOG.error(msg); - throw sme; - } - if (ti != null && ti.getIndex() != RaftLog.INVALID_LOG_INDEX) { - final File snapshotFile = - storage.getSnapshotFile(ti.getTerm(), ti.getIndex()); - LOG.info("{}: Taking a snapshot at:{} file {}", gid, ti, snapshotFile); - try (FileOutputStream fos = new FileOutputStream(snapshotFile)) { - persistContainerSet(fos); - fos.flush(); - // make sure the snapshot file is synced - fos.getFD().sync(); - } catch (IOException ioe) { - LOG.error("{}: Failed to write snapshot at:{} file {}", gid, ti, - snapshotFile); - throw ioe; - } - LOG.info("{}: Finished taking a snapshot at:{} file:{} time:{}", gid, ti, - snapshotFile, (Time.monotonicNow() - startTime)); - return ti.getIndex(); - } - return -1; - } - - @Override - public TransactionContext startTransaction(RaftClientRequest request) - throws IOException { - long startTime = Time.monotonicNowNanos(); - final ContainerCommandRequestProto proto = - message2ContainerCommandRequestProto(request.getMessage()); - Preconditions.checkArgument(request.getRaftGroupId().equals(gid)); - try { - dispatcher.validateContainerCommand(proto); - } catch (IOException ioe) { - if (ioe instanceof ContainerNotOpenException) { - metrics.incNumContainerNotOpenVerifyFailures(); - } else { - metrics.incNumStartTransactionVerifyFailures(); - LOG.error("startTransaction validation failed on leader", ioe); - } - TransactionContext ctxt = TransactionContext.newBuilder() - .setClientRequest(request) - .setStateMachine(this) - .setServerRole(RaftPeerRole.LEADER) - .build(); - ctxt.setException(ioe); - return ctxt; - } - if (proto.getCmdType() == Type.WriteChunk) { - final WriteChunkRequestProto write = proto.getWriteChunk(); - // create the log entry proto - final WriteChunkRequestProto commitWriteChunkProto = - WriteChunkRequestProto.newBuilder() - .setBlockID(write.getBlockID()) - .setChunkData(write.getChunkData()) - // skipping the data field as it is - // already set in statemachine data proto - .build(); - ContainerCommandRequestProto commitContainerCommandProto = - ContainerCommandRequestProto - .newBuilder(proto) - .setWriteChunk(commitWriteChunkProto) - .setTraceID(proto.getTraceID()) - .build(); - - return TransactionContext.newBuilder() - .setClientRequest(request) - .setStateMachine(this) - .setServerRole(RaftPeerRole.LEADER) - .setStateMachineContext(startTime) - .setStateMachineData(write.getData()) - .setLogData(commitContainerCommandProto.toByteString()) - .build(); - } else { - return TransactionContext.newBuilder() - .setClientRequest(request) - .setStateMachine(this) - .setServerRole(RaftPeerRole.LEADER) - .setStateMachineContext(startTime) - .setLogData(proto.toByteString()) - .build(); - } - - } - - private ByteString getStateMachineData(StateMachineLogEntryProto entryProto) { - return entryProto.getStateMachineEntry().getStateMachineData(); - } - - private ContainerCommandRequestProto getContainerCommandRequestProto( - ByteString request) throws InvalidProtocolBufferException { - // TODO: We can avoid creating new builder and set pipeline Id if - // the client is already sending the pipeline id, then we just have to - // validate the pipeline Id. - return ContainerCommandRequestProto.newBuilder( - ContainerCommandRequestProto.parseFrom(request)) - .setPipelineID(gid.getUuid().toString()).build(); - } - - private ContainerCommandRequestProto message2ContainerCommandRequestProto( - Message message) throws InvalidProtocolBufferException { - return ContainerCommandRequestMessage.toProto(message.getContent(), gid); - } - - private ContainerCommandResponseProto dispatchCommand( - ContainerCommandRequestProto requestProto, DispatcherContext context) { - if (LOG.isTraceEnabled()) { - LOG.trace("{}: dispatch {} containerID={} pipelineID={} traceID={}", gid, - requestProto.getCmdType(), requestProto.getContainerID(), - requestProto.getPipelineID(), requestProto.getTraceID()); - } - if (isBlockTokenEnabled) { - try { - // ServerInterceptors intercepts incoming request and creates ugi. - tokenVerifier - .verify(UserGroupInformation.getCurrentUser().getShortUserName(), - requestProto.getEncodedToken()); - } catch (IOException ioe) { - StorageContainerException sce = new StorageContainerException( - "Block token verification failed. " + ioe.getMessage(), ioe, - ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED); - return ContainerUtils.logAndReturnError(LOG, sce, requestProto); - } - } - ContainerCommandResponseProto response = - dispatcher.dispatch(requestProto, context); - if (LOG.isTraceEnabled()) { - LOG.trace("{}: response {}", gid, response); - } - return response; - } - - private ContainerCommandResponseProto runCommand( - ContainerCommandRequestProto requestProto, - DispatcherContext context) { - return dispatchCommand(requestProto, context); - } - - private ExecutorService getCommandExecutor( - ContainerCommandRequestProto requestProto) { - int executorId = (int)(requestProto.getContainerID() % executors.length); - return executors[executorId]; - } - - private CompletableFuture handleWriteChunk( - ContainerCommandRequestProto requestProto, long entryIndex, long term, - long startTime) { - final WriteChunkRequestProto write = requestProto.getWriteChunk(); - RaftServer server = ratisServer.getServer(); - Preconditions.checkState(server instanceof RaftServerProxy); - try { - if (((RaftServerProxy) server).getImpl(gid).isLeader()) { - stateMachineDataCache.put(entryIndex, write.getData()); - } - } catch (IOException ioe) { - return completeExceptionally(ioe); - } - DispatcherContext context = - new DispatcherContext.Builder() - .setTerm(term) - .setLogIndex(entryIndex) - .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA) - .setContainer2BCSIDMap(container2BCSIDMap) - .build(); - CompletableFuture raftFuture = new CompletableFuture<>(); - // ensure the write chunk happens asynchronously in writeChunkExecutor pool - // thread. - CompletableFuture writeChunkFuture = - CompletableFuture.supplyAsync(() -> { - try { - return runCommand(requestProto, context); - } catch (Exception e) { - LOG.error(gid + ": writeChunk writeStateMachineData failed: blockId" - + write.getBlockID() + " logIndex " + entryIndex + " chunkName " - + write.getChunkData().getChunkName() + e); - raftFuture.completeExceptionally(e); - throw e; - } - }, chunkExecutor); - - writeChunkFutureMap.put(entryIndex, writeChunkFuture); - if (LOG.isDebugEnabled()) { - LOG.debug(gid + ": writeChunk writeStateMachineData : blockId " + - write.getBlockID() + " logIndex " + entryIndex + " chunkName " - + write.getChunkData().getChunkName()); - } - // Remove the future once it finishes execution from the - // writeChunkFutureMap. - writeChunkFuture.thenApply(r -> { - if (r.getResult() != ContainerProtos.Result.SUCCESS) { - StorageContainerException sce = - new StorageContainerException(r.getMessage(), r.getResult()); - LOG.error(gid + ": writeChunk writeStateMachineData failed: blockId" + - write.getBlockID() + " logIndex " + entryIndex + " chunkName " + - write.getChunkData().getChunkName() + " Error message: " + - r.getMessage() + " Container Result: " + r.getResult()); - metrics.incNumWriteDataFails(); - raftFuture.completeExceptionally(sce); - } else { - metrics.incNumBytesWrittenCount( - requestProto.getWriteChunk().getChunkData().getLen()); - if (LOG.isDebugEnabled()) { - LOG.debug(gid + - ": writeChunk writeStateMachineData completed: blockId" + - write.getBlockID() + " logIndex " + entryIndex + " chunkName " + - write.getChunkData().getChunkName()); - } - raftFuture.complete(r::toByteString); - metrics.recordWriteStateMachineCompletion( - Time.monotonicNowNanos() - startTime); - } - - writeChunkFutureMap.remove(entryIndex); - return r; - }); - return raftFuture; - } - - /* - * writeStateMachineData calls are not synchronized with each other - * and also with applyTransaction. - */ - @Override - public CompletableFuture writeStateMachineData(LogEntryProto entry) { - try { - metrics.incNumWriteStateMachineOps(); - long writeStateMachineStartTime = Time.monotonicNowNanos(); - ContainerCommandRequestProto requestProto = - getContainerCommandRequestProto( - entry.getStateMachineLogEntry().getLogData()); - WriteChunkRequestProto writeChunk = - WriteChunkRequestProto.newBuilder(requestProto.getWriteChunk()) - .setData(getStateMachineData(entry.getStateMachineLogEntry())) - .build(); - requestProto = ContainerCommandRequestProto.newBuilder(requestProto) - .setWriteChunk(writeChunk).build(); - Type cmdType = requestProto.getCmdType(); - - // For only writeChunk, there will be writeStateMachineData call. - // CreateContainer will happen as a part of writeChunk only. - switch (cmdType) { - case WriteChunk: - return handleWriteChunk(requestProto, entry.getIndex(), - entry.getTerm(), writeStateMachineStartTime); - default: - throw new IllegalStateException("Cmd Type:" + cmdType - + " should not have state machine data"); - } - } catch (IOException e) { - metrics.incNumWriteStateMachineFails(); - return completeExceptionally(e); - } - } - - @Override - public CompletableFuture query(Message request) { - try { - metrics.incNumQueryStateMachineOps(); - final ContainerCommandRequestProto requestProto = - message2ContainerCommandRequestProto(request); - return CompletableFuture - .completedFuture(runCommand(requestProto, null)::toByteString); - } catch (IOException e) { - metrics.incNumQueryStateMachineFails(); - return completeExceptionally(e); - } - } - - private ByteString readStateMachineData( - ContainerCommandRequestProto requestProto, long term, long index) - throws IOException { - // the stateMachine data is not present in the stateMachine cache, - // increment the stateMachine cache miss count - metrics.incNumReadStateMachineMissCount(); - WriteChunkRequestProto writeChunkRequestProto = - requestProto.getWriteChunk(); - ContainerProtos.ChunkInfo chunkInfo = writeChunkRequestProto.getChunkData(); - // prepare the chunk to be read - ReadChunkRequestProto.Builder readChunkRequestProto = - ReadChunkRequestProto.newBuilder() - .setBlockID(writeChunkRequestProto.getBlockID()) - .setChunkData(chunkInfo); - ContainerCommandRequestProto dataContainerCommandProto = - ContainerCommandRequestProto.newBuilder(requestProto) - .setCmdType(Type.ReadChunk).setReadChunk(readChunkRequestProto) - .build(); - DispatcherContext context = - new DispatcherContext.Builder().setTerm(term).setLogIndex(index) - .setReadFromTmpFile(true).build(); - // read the chunk - ContainerCommandResponseProto response = - dispatchCommand(dataContainerCommandProto, context); - if (response.getResult() != ContainerProtos.Result.SUCCESS) { - StorageContainerException sce = - new StorageContainerException(response.getMessage(), - response.getResult()); - LOG.error("gid {} : ReadStateMachine failed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, response.getCmdType(), index, - response.getMessage(), response.getResult()); - throw sce; - } - - ReadChunkResponseProto responseProto = response.getReadChunk(); - - ByteString data = responseProto.getData(); - // assert that the response has data in it. - Preconditions - .checkNotNull(data, "read chunk data is null for chunk:" + chunkInfo); - Preconditions.checkState(data.size() == chunkInfo.getLen(), String.format( - "read chunk len=%d does not match chunk expected len=%d for chunk:%s", - data.size(), chunkInfo.getLen(), chunkInfo)); - return data; - } - - /** - * Reads the Entry from the Cache or loads it back by reading from disk. - */ - private ByteString getCachedStateMachineData(Long logIndex, long term, - ContainerCommandRequestProto requestProto) throws ExecutionException { - return stateMachineDataCache.get(logIndex, - () -> readStateMachineData(requestProto, term, logIndex)); - } - - /** - * Returns the combined future of all the writeChunks till the given log - * index. The Raft log worker will wait for the stateMachineData to complete - * flush as well. - * - * @param index log index till which the stateMachine data needs to be flushed - * @return Combined future of all writeChunks till the log index given. - */ - @Override - public CompletableFuture flushStateMachineData(long index) { - List> futureList = - writeChunkFutureMap.entrySet().stream().filter(x -> x.getKey() <= index) - .map(Map.Entry::getValue).collect(Collectors.toList()); - return CompletableFuture.allOf( - futureList.toArray(new CompletableFuture[futureList.size()])); - } - /* - * This api is used by the leader while appending logs to the follower - * This allows the leader to read the state machine data from the - * state machine implementation in case cached state machine data has been - * evicted. - */ - @Override - public CompletableFuture readStateMachineData( - LogEntryProto entry) { - StateMachineLogEntryProto smLogEntryProto = entry.getStateMachineLogEntry(); - metrics.incNumReadStateMachineOps(); - if (!getStateMachineData(smLogEntryProto).isEmpty()) { - return CompletableFuture.completedFuture(ByteString.EMPTY); - } - try { - final ContainerCommandRequestProto requestProto = - getContainerCommandRequestProto( - entry.getStateMachineLogEntry().getLogData()); - // readStateMachineData should only be called for "write" to Ratis. - Preconditions.checkArgument(!HddsUtils.isReadOnly(requestProto)); - if (requestProto.getCmdType() == Type.WriteChunk) { - final CompletableFuture future = new CompletableFuture<>(); - CompletableFuture.supplyAsync(() -> { - try { - future.complete( - getCachedStateMachineData(entry.getIndex(), entry.getTerm(), - requestProto)); - } catch (ExecutionException e) { - metrics.incNumReadStateMachineFails(); - future.completeExceptionally(e); - } - return future; - }, chunkExecutor); - return future; - } else { - throw new IllegalStateException("Cmd type:" + requestProto.getCmdType() - + " cannot have state machine data"); - } - } catch (Exception e) { - metrics.incNumReadStateMachineFails(); - LOG.error("{} unable to read stateMachineData:", gid, e); - return completeExceptionally(e); - } - } - - private synchronized void updateLastApplied() { - Long appliedTerm = null; - long appliedIndex = -1; - for(long i = getLastAppliedTermIndex().getIndex() + 1;; i++) { - final Long removed = applyTransactionCompletionMap.remove(i); - if (removed == null) { - break; - } - appliedTerm = removed; - appliedIndex = i; - } - if (appliedTerm != null) { - updateLastAppliedTermIndex(appliedTerm, appliedIndex); - } - } - - /** - * Notifies the state machine about index updates because of entries - * which do not cause state machine update, i.e. conf entries, metadata - * entries - * @param term term of the log entry - * @param index index of the log entry - */ - @Override - public void notifyIndexUpdate(long term, long index) { - applyTransactionCompletionMap.put(index, term); - } - - /* - * ApplyTransaction calls in Ratis are sequential. - */ - @Override - public CompletableFuture applyTransaction(TransactionContext trx) { - long index = trx.getLogEntry().getIndex(); - DispatcherContext.Builder builder = - new DispatcherContext.Builder() - .setTerm(trx.getLogEntry().getTerm()) - .setLogIndex(index); - - long applyTxnStartTime = Time.monotonicNowNanos(); - try { - applyTransactionSemaphore.acquire(); - metrics.incNumApplyTransactionsOps(); - ContainerCommandRequestProto requestProto = - getContainerCommandRequestProto( - trx.getStateMachineLogEntry().getLogData()); - Type cmdType = requestProto.getCmdType(); - // Make sure that in write chunk, the user data is not set - if (cmdType == Type.WriteChunk) { - Preconditions - .checkArgument(requestProto.getWriteChunk().getData().isEmpty()); - builder - .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA); - } - if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile - || cmdType == Type.PutBlock || cmdType == Type.CreateContainer) { - builder.setContainer2BCSIDMap(container2BCSIDMap); - } - CompletableFuture applyTransactionFuture = - new CompletableFuture<>(); - // Ensure the command gets executed in a separate thread than - // stateMachineUpdater thread which is calling applyTransaction here. - CompletableFuture future = - CompletableFuture.supplyAsync(() -> { - try { - return runCommand(requestProto, builder.build()); - } catch (Exception e) { - LOG.error("gid {} : ApplyTransaction failed. cmd {} logIndex " - + "{} exception {}", gid, requestProto.getCmdType(), - index, e); - applyTransactionFuture.completeExceptionally(e); - throw e; - } - }, getCommandExecutor(requestProto)); - future.thenApply(r -> { - if (trx.getServerRole() == RaftPeerRole.LEADER) { - long startTime = (long) trx.getStateMachineContext(); - metrics.incPipelineLatency(cmdType, - Time.monotonicNowNanos() - startTime); - } - // ignore close container exception while marking the stateMachine - // unhealthy - if (r.getResult() != ContainerProtos.Result.SUCCESS - && r.getResult() != ContainerProtos.Result.CONTAINER_NOT_OPEN - && r.getResult() != ContainerProtos.Result.CLOSED_CONTAINER_IO) { - StorageContainerException sce = - new StorageContainerException(r.getMessage(), r.getResult()); - LOG.error( - "gid {} : ApplyTransaction failed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, r.getCmdType(), index, - r.getMessage(), r.getResult()); - metrics.incNumApplyTransactionsFails(); - // Since the applyTransaction now is completed exceptionally, - // before any further snapshot is taken , the exception will be - // caught in stateMachineUpdater in Ratis and ratis server will - // shutdown. - applyTransactionFuture.completeExceptionally(sce); - stateMachineHealthy.compareAndSet(true, false); - ratisServer.handleApplyTransactionFailure(gid, trx.getServerRole()); - } else { - if (LOG.isDebugEnabled()) { - LOG.debug( - "gid {} : ApplyTransaction completed. cmd {} logIndex {} msg : " - + "{} Container Result: {}", gid, r.getCmdType(), index, - r.getMessage(), r.getResult()); - } - applyTransactionFuture.complete(r::toByteString); - if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile) { - metrics.incNumBytesCommittedCount( - requestProto.getWriteChunk().getChunkData().getLen()); - } - // add the entry to the applyTransactionCompletionMap only if the - // stateMachine is healthy i.e, there has been no applyTransaction - // failures before. - if (isStateMachineHealthy()) { - final Long previous = applyTransactionCompletionMap - .put(index, trx.getLogEntry().getTerm()); - Preconditions.checkState(previous == null); - updateLastApplied(); - } - } - return applyTransactionFuture; - }).whenComplete((r, t) -> { - applyTransactionSemaphore.release(); - metrics.recordApplyTransactionCompletion( - Time.monotonicNowNanos() - applyTxnStartTime); - }); - return applyTransactionFuture; - } catch (IOException | InterruptedException e) { - metrics.incNumApplyTransactionsFails(); - return completeExceptionally(e); - } - } - - private static CompletableFuture completeExceptionally(Exception e) { - final CompletableFuture future = new CompletableFuture<>(); - future.completeExceptionally(e); - return future; - } - - @VisibleForTesting - public void evictStateMachineCache() { - stateMachineDataCache.invalidateAll(); - stateMachineDataCache.cleanUp(); - } - - @Override - public void notifySlowness(RoleInfoProto roleInfoProto) { - ratisServer.handleNodeSlowness(gid, roleInfoProto); - } - - @Override - public void notifyExtendedNoLeader(RoleInfoProto roleInfoProto) { - ratisServer.handleNoLeader(gid, roleInfoProto); - } - - @Override - public void notifyNotLeader(Collection pendingEntries) - throws IOException { - evictStateMachineCache(); - } - - @Override - public void notifyLogFailed(Throwable t, LogEntryProto failedEntry) { - ratisServer.handleNodeLogFailure(gid, t); - } - - @Override - public CompletableFuture notifyInstallSnapshotFromLeader( - RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { - ratisServer.handleInstallSnapshotFromLeader(gid, roleInfoProto, - firstTermIndexInLog); - final CompletableFuture future = new CompletableFuture<>(); - future.complete(firstTermIndexInLog); - return future; - } - - @Override - public void notifyGroupRemove() { - ratisServer.notifyGroupRemove(gid); - // Make best effort to quasi-close all the containers on group removal. - // Containers already in terminal state like CLOSED or UNHEALTHY will not - // be affected. - for (Long cid : container2BCSIDMap.keySet()) { - try { - containerController.markContainerForClose(cid); - containerController.quasiCloseContainer(cid); - } catch (IOException e) { - } - } - } - - @Override - public void close() throws IOException { - evictStateMachineCache(); - for (ExecutorService executor : executors) { - executor.shutdown(); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java deleted file mode 100644 index 7d46910164e..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.transport.server.ratis; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -import java.util.Map; - -/** - * DispatcherContext class holds transport protocol specific context info - * required for execution of container commands over the container dispatcher. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public final class DispatcherContext { - /** - * Determines which stage of writeChunk a write chunk request is for. - */ - public enum WriteChunkStage { - WRITE_DATA, COMMIT_DATA, COMBINED - } - - // whether the chunk data needs to be written or committed or both - private final WriteChunkStage stage; - // indicates whether the read from tmp chunk files is allowed - private final boolean readFromTmpFile; - // which term the request is being served in Ratis - private final long term; - // the log index in Ratis log to which the request belongs to - private final long logIndex; - - private final Map container2BCSIDMap; - - private DispatcherContext(long term, long index, WriteChunkStage stage, - boolean readFromTmpFile, Map container2BCSIDMap) { - this.term = term; - this.logIndex = index; - this.stage = stage; - this.readFromTmpFile = readFromTmpFile; - this.container2BCSIDMap = container2BCSIDMap; - } - - public long getLogIndex() { - return logIndex; - } - - public boolean isReadFromTmpFile() { - return readFromTmpFile; - } - - public long getTerm() { - return term; - } - - public WriteChunkStage getStage() { - return stage; - } - - public Map getContainer2BCSIDMap() { - return container2BCSIDMap; - } - - /** - * Builder class for building DispatcherContext. - */ - public static final class Builder { - private WriteChunkStage stage = WriteChunkStage.COMBINED; - private boolean readFromTmpFile = false; - private long term; - private long logIndex; - private Map container2BCSIDMap; - - /** - * Sets the WriteChunkStage. - * - * @param writeChunkStage WriteChunk Stage - * @return DispatcherContext.Builder - */ - public Builder setStage(WriteChunkStage writeChunkStage) { - this.stage = writeChunkStage; - return this; - } - - /** - * Sets the flag for reading from tmp chunk files. - * - * @param setReadFromTmpFile whether to read from tmp chunk file or not - * @return DispatcherContext.Builder - */ - public Builder setReadFromTmpFile(boolean setReadFromTmpFile) { - this.readFromTmpFile = setReadFromTmpFile; - return this; - } - - /** - * Sets the current term for the container request from Ratis. - * - * @param currentTerm current term - * @return DispatcherContext.Builder - */ - public Builder setTerm(long currentTerm) { - this.term = currentTerm; - return this; - } - - /** - * Sets the logIndex for the container request from Ratis. - * - * @param index log index - * @return DispatcherContext.Builder - */ - public Builder setLogIndex(long index) { - this.logIndex = index; - return this; - } - - /** - * Sets the container2BCSIDMap to contain all the containerIds per - * RaftGroup. - * @param map container2BCSIDMap - * @return Builder - */ - public Builder setContainer2BCSIDMap(Map map) { - this.container2BCSIDMap = map; - return this; - } - /** - * Builds and returns DispatcherContext instance. - * - * @return DispatcherContext - */ - public DispatcherContext build() { - return new DispatcherContext(term, logIndex, stage, readFromTmpFile, - container2BCSIDMap); - } - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/RatisServerConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/RatisServerConfiguration.java deleted file mode 100644 index 7f112eacd81..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/RatisServerConfiguration.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server.ratis; - -import org.apache.hadoop.hdds.conf.Config; -import org.apache.hadoop.hdds.conf.ConfigGroup; -import org.apache.hadoop.hdds.conf.ConfigTag; -import org.apache.hadoop.hdds.conf.ConfigType; - -/** - * Holds configuration items for Ratis/Raft server. - */ -@ConfigGroup(prefix = "hdds.ratis.server") -public class RatisServerConfiguration { - - private int numSnapshotsRetained; - - @Config(key = "num.snapshots.retained", - type = ConfigType.INT, - defaultValue = "5", - tags = {ConfigTag.STORAGE}, - description = "Config parameter to specify number of old snapshots " + - "retained at the Ratis leader.") - public void setNumSnapshotsRetained(int numSnapshotsRetained) { - this.numSnapshotsRetained = numSnapshotsRetained; - } - - public int getNumSnapshotsRetained() { - return numSnapshotsRetained; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java deleted file mode 100644 index 80e91cdf55d..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ /dev/null @@ -1,689 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.transport.server.ratis; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction; -import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.OzoneConfigKeys; - -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServer; - -import io.opentracing.Scope; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.ratis.RaftConfigKeys; -import org.apache.hadoop.hdds.ratis.RatisHelper; -import org.apache.ratis.conf.RaftProperties; -import org.apache.ratis.grpc.GrpcConfigKeys; -import org.apache.ratis.grpc.GrpcFactory; -import org.apache.ratis.grpc.GrpcTlsConfig; -import org.apache.ratis.netty.NettyConfigKeys; -import org.apache.ratis.protocol.*; -import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.rpc.SupportedRpcType; -import org.apache.ratis.server.RaftServer; -import org.apache.ratis.server.RaftServerConfigKeys; -import org.apache.ratis.proto.RaftProtos; -import org.apache.ratis.proto.RaftProtos.RoleInfoProto; -import org.apache.ratis.proto.RaftProtos.ReplicationLevel; -import org.apache.ratis.server.protocol.TermIndex; -import org.apache.ratis.server.impl.RaftServerProxy; -import org.apache.ratis.util.SizeInBytes; -import org.apache.ratis.util.TimeDuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.HashSet; -import java.util.List; -import java.util.Objects; -import java.util.Collections; -import java.util.Set; -import java.util.UUID; -import java.util.ArrayList; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -/** - * Creates a ratis server endpoint that acts as the communication layer for - * Ozone containers. - */ -public final class XceiverServerRatis extends XceiverServer { - private static final Logger LOG = LoggerFactory - .getLogger(XceiverServerRatis.class); - private static final AtomicLong CALL_ID_COUNTER = new AtomicLong(); - - private static long nextCallId() { - return CALL_ID_COUNTER.getAndIncrement() & Long.MAX_VALUE; - } - - private int port; - private final RaftServer server; - private ThreadPoolExecutor chunkExecutor; - private final ContainerDispatcher dispatcher; - private final ContainerController containerController; - private ClientId clientId = ClientId.randomId(); - private final StateContext context; - private final ReplicationLevel replicationLevel; - private long nodeFailureTimeoutMs; - private final long cacheEntryExpiryInteval; - private boolean isStarted = false; - private DatanodeDetails datanodeDetails; - private final OzoneConfiguration conf; - // TODO: Remove the gids set when Ratis supports an api to query active - // pipelines - private final Set raftGids = new HashSet<>(); - - @SuppressWarnings("parameternumber") - private XceiverServerRatis(DatanodeDetails dd, int port, - ContainerDispatcher dispatcher, ContainerController containerController, - StateContext context, GrpcTlsConfig tlsConfig, CertificateClient caClient, - OzoneConfiguration conf) - throws IOException { - super(conf, caClient); - this.conf = conf; - Objects.requireNonNull(dd, "id == null"); - datanodeDetails = dd; - this.port = port; - RaftProperties serverProperties = newRaftProperties(); - final int numWriteChunkThreads = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT); - chunkExecutor = - new ThreadPoolExecutor(numWriteChunkThreads, numWriteChunkThreads, - 100, TimeUnit.SECONDS, - new ArrayBlockingQueue<>(1024), - new ThreadPoolExecutor.CallerRunsPolicy()); - this.context = context; - this.replicationLevel = - conf.getEnum(OzoneConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT); - cacheEntryExpiryInteval = conf.getTimeDuration(OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL, - OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - this.dispatcher = dispatcher; - this.containerController = containerController; - - RaftServer.Builder builder = - RaftServer.newBuilder().setServerId(RatisHelper.toRaftPeerId(dd)) - .setProperties(serverProperties) - .setStateMachineRegistry(this::getStateMachine); - if (tlsConfig != null) { - builder.setParameters(GrpcFactory.newRaftParameters(tlsConfig)); - } - this.server = builder.build(); - } - - private ContainerStateMachine getStateMachine(RaftGroupId gid) { - return new ContainerStateMachine(gid, dispatcher, containerController, - chunkExecutor, this, cacheEntryExpiryInteval, - getSecurityConfig().isBlockTokenEnabled(), getBlockTokenVerifier(), - conf); - } - - private RaftProperties newRaftProperties() { - final RaftProperties properties = new RaftProperties(); - - // Set rpc type - final RpcType rpc = setRpcType(properties); - - // set raft segment size - setRaftSegmentSize(properties); - - // set raft segment pre-allocated size - final int raftSegmentPreallocatedSize = - setRaftSegmentPreallocatedSize(properties); - - // Set max write buffer size, which is the scm chunk size - final int maxChunkSize = setMaxWriteBuffer(properties); - TimeUnit timeUnit; - long duration; - - // set the configs enable and set the stateMachineData sync timeout - RaftServerConfigKeys.Log.StateMachineData.setSync(properties, true); - timeUnit = OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit(); - duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, - OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT - .getDuration(), timeUnit); - final TimeDuration dataSyncTimeout = - TimeDuration.valueOf(duration, timeUnit); - RaftServerConfigKeys.Log.StateMachineData - .setSyncTimeout(properties, dataSyncTimeout); - - // Set the server Request timeout - setServerRequestTimeout(properties); - - // set timeout for a retry cache entry - setTimeoutForRetryCache(properties); - - // Set the ratis leader election timeout - setRatisLeaderElectionTimeout(properties); - - // Set the maximum cache segments - RaftServerConfigKeys.Log.setMaxCachedSegmentNum(properties, 2); - - // set the node failure timeout - setNodeFailureTimeout(properties); - - // Set the ratis storage directory - String storageDir = HddsServerUtil.getOzoneDatanodeRatisDirectory(conf); - RaftServerConfigKeys.setStorageDirs(properties, - Collections.singletonList(new File(storageDir))); - - // For grpc set the maximum message size - GrpcConfigKeys.setMessageSizeMax(properties, - SizeInBytes.valueOf(maxChunkSize + raftSegmentPreallocatedSize)); - - // Set the ratis port number - if (rpc == SupportedRpcType.GRPC) { - GrpcConfigKeys.Server.setPort(properties, port); - } else if (rpc == SupportedRpcType.NETTY) { - NettyConfigKeys.Server.setPort(properties, port); - } - - long snapshotThreshold = - conf.getLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, - OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT); - RaftServerConfigKeys.Snapshot. - setAutoTriggerEnabled(properties, true); - RaftServerConfigKeys.Snapshot. - setAutoTriggerThreshold(properties, snapshotThreshold); - int maxPendingRequets = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT - ); - RaftServerConfigKeys.Write.setElementLimit(properties, maxPendingRequets); - int logQueueNumElements = - conf.getInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT); - final int logQueueByteLimit = (int) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, - StorageUnit.BYTES); - RaftServerConfigKeys.Log.setQueueElementLimit( - properties, logQueueNumElements); - RaftServerConfigKeys.Log.setQueueByteLimit(properties, logQueueByteLimit); - - int numSyncRetries = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, - OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES_DEFAULT); - RaftServerConfigKeys.Log.StateMachineData.setSyncTimeoutRetry(properties, - numSyncRetries); - - // Enable the StateMachineCaching - RaftServerConfigKeys.Log.StateMachineData.setCachingEnabled( - properties, true); - - RaftServerConfigKeys.Log.Appender.setInstallSnapshotEnabled(properties, - false); - - int purgeGap = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT); - RaftServerConfigKeys.Log.setPurgeGap(properties, purgeGap); - - //Set the number of Snapshots Retained. - RatisServerConfiguration ratisServerConfiguration = - conf.getObject(RatisServerConfiguration.class); - int numSnapshotsRetained = - ratisServerConfiguration.getNumSnapshotsRetained(); - RaftServerConfigKeys.Snapshot.setRetentionFileNum(properties, - numSnapshotsRetained); - return properties; - } - - private void setNodeFailureTimeout(RaftProperties properties) { - TimeUnit timeUnit; - long duration; - timeUnit = OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT - .getUnit(); - duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY, - OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT - .getDuration(), timeUnit); - final TimeDuration nodeFailureTimeout = - TimeDuration.valueOf(duration, timeUnit); - RaftServerConfigKeys.Notification.setNoLeaderTimeout(properties, - nodeFailureTimeout); - RaftServerConfigKeys.Rpc.setSlownessTimeout(properties, - nodeFailureTimeout); - nodeFailureTimeoutMs = nodeFailureTimeout.toLong(TimeUnit.MILLISECONDS); - } - - private void setRatisLeaderElectionTimeout(RaftProperties properties) { - long duration; - TimeUnit leaderElectionMinTimeoutUnit = - OzoneConfigKeys. - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT - .getUnit(); - duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, - OzoneConfigKeys. - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT - .getDuration(), leaderElectionMinTimeoutUnit); - final TimeDuration leaderElectionMinTimeout = - TimeDuration.valueOf(duration, leaderElectionMinTimeoutUnit); - RaftServerConfigKeys.Rpc - .setTimeoutMin(properties, leaderElectionMinTimeout); - long leaderElectionMaxTimeout = - leaderElectionMinTimeout.toLong(TimeUnit.MILLISECONDS) + 200; - RaftServerConfigKeys.Rpc.setTimeoutMax(properties, - TimeDuration.valueOf(leaderElectionMaxTimeout, TimeUnit.MILLISECONDS)); - } - - private void setTimeoutForRetryCache(RaftProperties properties) { - TimeUnit timeUnit; - long duration; - timeUnit = - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT - .getUnit(); - duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY, - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT - .getDuration(), timeUnit); - final TimeDuration retryCacheTimeout = - TimeDuration.valueOf(duration, timeUnit); - RaftServerConfigKeys.RetryCache - .setExpiryTime(properties, retryCacheTimeout); - } - - private void setServerRequestTimeout(RaftProperties properties) { - TimeUnit timeUnit; - long duration; - timeUnit = OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT - .getUnit(); - duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY, - OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT - .getDuration(), timeUnit); - final TimeDuration serverRequestTimeout = - TimeDuration.valueOf(duration, timeUnit); - RaftServerConfigKeys.Rpc - .setRequestTimeout(properties, serverRequestTimeout); - } - - private int setMaxWriteBuffer(RaftProperties properties) { - final int maxChunkSize = OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE; - RaftServerConfigKeys.Log.setWriteBufferSize(properties, - SizeInBytes.valueOf(maxChunkSize)); - return maxChunkSize; - } - - private int setRaftSegmentPreallocatedSize(RaftProperties properties) { - final int raftSegmentPreallocatedSize = (int) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, - StorageUnit.BYTES); - int logAppenderQueueNumElements = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, - OzoneConfigKeys - .DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); - final int logAppenderQueueByteLimit = (int) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, - OzoneConfigKeys - .DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, - StorageUnit.BYTES); - RaftServerConfigKeys.Log.Appender - .setBufferElementLimit(properties, logAppenderQueueNumElements); - RaftServerConfigKeys.Log.Appender.setBufferByteLimit(properties, - SizeInBytes.valueOf(logAppenderQueueByteLimit)); - RaftServerConfigKeys.Log.setPreallocatedSize(properties, - SizeInBytes.valueOf(raftSegmentPreallocatedSize)); - return raftSegmentPreallocatedSize; - } - - private void setRaftSegmentSize(RaftProperties properties) { - final int raftSegmentSize = (int)conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT, - StorageUnit.BYTES); - RaftServerConfigKeys.Log.setSegmentSizeMax(properties, - SizeInBytes.valueOf(raftSegmentSize)); - } - - private RpcType setRpcType(RaftProperties properties) { - final String rpcType = conf.get( - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); - final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType); - RaftConfigKeys.Rpc.setType(properties, rpc); - return rpc; - } - - public static XceiverServerRatis newXceiverServerRatis( - DatanodeDetails datanodeDetails, OzoneConfiguration ozoneConf, - ContainerDispatcher dispatcher, ContainerController containerController, - CertificateClient caClient, StateContext context) throws IOException { - int localPort = ozoneConf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT); - - // Get an available port on current node and - // use that as the container port - if (ozoneConf.getBoolean(OzoneConfigKeys - .DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT)) { - localPort = 0; - } - GrpcTlsConfig tlsConfig = RatisHelper.createTlsServerConfigForDN( - new SecurityConfig(ozoneConf), caClient); - - return new XceiverServerRatis(datanodeDetails, localPort, dispatcher, - containerController, context, tlsConfig, caClient, ozoneConf); - } - - @Override - public void start() throws IOException { - if (!isStarted) { - LOG.info("Starting {} {} at port {}", getClass().getSimpleName(), - server.getId(), getIPCPort()); - chunkExecutor.prestartAllCoreThreads(); - server.start(); - - int realPort = - ((RaftServerProxy) server).getServerRpc().getInetSocketAddress() - .getPort(); - - if (port == 0) { - LOG.info("{} {} is started using port {}", getClass().getSimpleName(), - server.getId(), realPort); - port = realPort; - } - - //register the real port to the datanode details. - datanodeDetails.setPort(DatanodeDetails - .newPort(DatanodeDetails.Port.Name.RATIS, - realPort)); - - isStarted = true; - } - } - - @Override - public void stop() { - if (isStarted) { - try { - // shutdown server before the executors as while shutting down, - // some of the tasks would be executed using the executors. - server.close(); - chunkExecutor.shutdown(); - isStarted = false; - } catch (IOException e) { - throw new RuntimeException(e); - } - } - } - - @Override - public int getIPCPort() { - return port; - } - - /** - * Returns the Replication type supported by this end-point. - * - * @return enum -- {Stand_Alone, Ratis, Chained} - */ - @Override - public HddsProtos.ReplicationType getServerType() { - return HddsProtos.ReplicationType.RATIS; - } - - @VisibleForTesting - public RaftServer getServer() { - return server; - } - - private void processReply(RaftClientReply reply) throws IOException { - // NotLeader exception is thrown only when the raft server to which the - // request is submitted is not the leader. The request will be rejected - // and will eventually be executed once the request comes via the leader - // node. - NotLeaderException notLeaderException = reply.getNotLeaderException(); - if (notLeaderException != null) { - throw notLeaderException; - } - StateMachineException stateMachineException = - reply.getStateMachineException(); - if (stateMachineException != null) { - throw stateMachineException; - } - } - - @Override - public void submitRequest(ContainerCommandRequestProto request, - HddsProtos.PipelineID pipelineID) throws IOException { - super.submitRequest(request, pipelineID); - RaftClientReply reply; - try (Scope scope = TracingUtil - .importAndCreateScope( - "XceiverServerRatis." + request.getCmdType().name(), - request.getTraceID())) { - - RaftClientRequest raftClientRequest = - createRaftClientRequest(request, pipelineID, - RaftClientRequest.writeRequestType()); - try { - reply = server.submitClientRequestAsync(raftClientRequest).get(); - } catch (Exception e) { - throw new IOException(e.getMessage(), e); - } - processReply(reply); - } - } - - private RaftClientRequest createRaftClientRequest( - ContainerCommandRequestProto request, HddsProtos.PipelineID pipelineID, - RaftClientRequest.Type type) { - return new RaftClientRequest(clientId, server.getId(), - RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineID).getId()), - nextCallId(), ContainerCommandRequestMessage.toMessage(request, null), - type, null); - } - - private GroupInfoRequest createGroupInfoRequest( - HddsProtos.PipelineID pipelineID) { - return new GroupInfoRequest(clientId, server.getId(), - RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineID).getId()), - nextCallId()); - } - - private void handlePipelineFailure(RaftGroupId groupId, - RoleInfoProto roleInfoProto) { - String msg; - UUID datanode = RatisHelper.toDatanodeId(roleInfoProto.getSelf()); - RaftPeerId id = RaftPeerId.valueOf(roleInfoProto.getSelf().getId()); - switch (roleInfoProto.getRole()) { - case CANDIDATE: - msg = datanode + " is in candidate state for " + - roleInfoProto.getCandidateInfo().getLastLeaderElapsedTimeMs() + "ms"; - break; - case LEADER: - StringBuilder sb = new StringBuilder(); - sb.append(datanode).append(" has not seen follower/s"); - for (RaftProtos.ServerRpcProto follower : roleInfoProto.getLeaderInfo() - .getFollowerInfoList()) { - if (follower.getLastRpcElapsedTimeMs() > nodeFailureTimeoutMs) { - sb.append(" ").append(RatisHelper.toDatanodeId(follower.getId())) - .append(" for ").append(follower.getLastRpcElapsedTimeMs()) - .append("ms"); - } - } - msg = sb.toString(); - break; - default: - LOG.error("unknown state:" + roleInfoProto.getRole()); - throw new IllegalStateException("node" + id + " is in illegal role " - + roleInfoProto.getRole()); - } - - triggerPipelineClose(groupId, msg, - ClosePipelineInfo.Reason.PIPELINE_FAILED, false); - } - - private void triggerPipelineClose(RaftGroupId groupId, String detail, - ClosePipelineInfo.Reason reasonCode, boolean triggerHB) { - PipelineID pipelineID = PipelineID.valueOf(groupId.getUuid()); - ClosePipelineInfo.Builder closePipelineInfo = - ClosePipelineInfo.newBuilder() - .setPipelineID(pipelineID.getProtobuf()) - .setReason(reasonCode) - .setDetailedReason(detail); - - PipelineAction action = PipelineAction.newBuilder() - .setClosePipeline(closePipelineInfo) - .setAction(PipelineAction.Action.CLOSE) - .build(); - context.addPipelineActionIfAbsent(action); - // wait for the next HB timeout or right away? - if (triggerHB) { - context.getParent().triggerHeartbeat(); - } - LOG.error( - "pipeline Action " + action.getAction() + " on pipeline " + pipelineID - + ".Reason : " + action.getClosePipeline().getDetailedReason()); - } - - @Override - public boolean isExist(HddsProtos.PipelineID pipelineId) { - return raftGids.contains( - RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineId).getId())); - } - - @Override - public List getPipelineReport() { - try { - Iterable gids = server.getGroupIds(); - List reports = new ArrayList<>(); - for (RaftGroupId groupId : gids) { - reports.add(PipelineReport.newBuilder() - .setPipelineID(PipelineID.valueOf(groupId.getUuid()).getProtobuf()) - .build()); - } - return reports; - } catch (Exception e) { - return null; - } - } - - @VisibleForTesting - public List getPipelineIds() { - Iterable gids = server.getGroupIds(); - List pipelineIDs = new ArrayList<>(); - for (RaftGroupId groupId : gids) { - pipelineIDs.add(PipelineID.valueOf(groupId.getUuid())); - LOG.info("pipeline id {}", PipelineID.valueOf(groupId.getUuid())); - } - return pipelineIDs; - } - - void handleNodeSlowness(RaftGroupId groupId, RoleInfoProto roleInfoProto) { - handlePipelineFailure(groupId, roleInfoProto); - } - - void handleNoLeader(RaftGroupId groupId, RoleInfoProto roleInfoProto) { - handlePipelineFailure(groupId, roleInfoProto); - } - - void handleApplyTransactionFailure(RaftGroupId groupId, - RaftProtos.RaftPeerRole role) { - UUID dnId = RatisHelper.toDatanodeId(getServer().getId()); - String msg = - "Ratis Transaction failure in datanode " + dnId + " with role " + role - + " .Triggering pipeline close action."; - triggerPipelineClose(groupId, msg, - ClosePipelineInfo.Reason.STATEMACHINE_TRANSACTION_FAILED, true); - } - /** - * The fact that the snapshot contents cannot be used to actually catch up - * the follower, it is the reason to initiate close pipeline and - * not install the snapshot. The follower will basically never be able to - * catch up. - * - * @param groupId raft group information - * @param roleInfoProto information about the current node role and - * rpc delay information. - * @param firstTermIndexInLog After the snapshot installation is complete, - * return the last included term index in the snapshot. - */ - void handleInstallSnapshotFromLeader(RaftGroupId groupId, - RoleInfoProto roleInfoProto, - TermIndex firstTermIndexInLog) { - LOG.warn("Install snapshot notification received from Leader with " + - "termIndex: {}, terminating pipeline: {}", - firstTermIndexInLog, groupId); - handlePipelineFailure(groupId, roleInfoProto); - } - - /** - * Notify the Datanode Ratis endpoint of Ratis log failure. - * Expected to be invoked from the Container StateMachine - * @param groupId the Ratis group/pipeline for which log has failed - * @param t exception encountered at the time of the failure - * - */ - @VisibleForTesting - public void handleNodeLogFailure(RaftGroupId groupId, Throwable t) { - String msg = (t == null) ? "Unspecified failure reported in Ratis log" - : t.getMessage(); - - triggerPipelineClose(groupId, msg, - ClosePipelineInfo.Reason.PIPELINE_LOG_FAILED, true); - } - - public long getMinReplicatedIndex(PipelineID pipelineID) throws IOException { - Long minIndex; - GroupInfoReply reply = getServer() - .getGroupInfo(createGroupInfoRequest(pipelineID.getProtobuf())); - minIndex = RatisHelper.getMinReplicatedIndex(reply.getCommitInfos()); - return minIndex == null ? -1 : minIndex.longValue(); - } - - void notifyGroupRemove(RaftGroupId gid) { - raftGids.remove(gid); - } - - void notifyGroupAdd(RaftGroupId gid) { - raftGids.add(gid); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java deleted file mode 100644 index 8debfe02837..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.transport.server.ratis; - -/** - * This package contains classes for the server implementation - * using Apache Ratis - */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java deleted file mode 100644 index 4ddb4e48792..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.utils; - -import com.google.common.base.Preconditions; -import org.apache.commons.collections.MapIterator; -import org.apache.commons.collections.map.LRUMap; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -/** - * container cache is a LRUMap that maintains the DB handles. - */ -public final class ContainerCache extends LRUMap { - private static final Logger LOG = - LoggerFactory.getLogger(ContainerCache.class); - private final Lock lock = new ReentrantLock(); - private static ContainerCache cache; - private static final float LOAD_FACTOR = 0.75f; - /** - * Constructs a cache that holds DBHandle references. - */ - private ContainerCache(int maxSize, float loadFactor, boolean - scanUntilRemovable) { - super(maxSize, loadFactor, scanUntilRemovable); - } - - /** - * Return a singleton instance of {@link ContainerCache} - * that holds the DB handlers. - * - * @param conf - Configuration. - * @return A instance of {@link ContainerCache}. - */ - public synchronized static ContainerCache getInstance(Configuration conf) { - if (cache == null) { - int cacheSize = conf.getInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, - OzoneConfigKeys.OZONE_CONTAINER_CACHE_DEFAULT); - cache = new ContainerCache(cacheSize, LOAD_FACTOR, true); - } - return cache; - } - - /** - * Closes all the db instances and resets the cache. - */ - public void shutdownCache() { - lock.lock(); - try { - // iterate the cache and close each db - MapIterator iterator = cache.mapIterator(); - while (iterator.hasNext()) { - iterator.next(); - ReferenceCountedDB db = (ReferenceCountedDB) iterator.getValue(); - Preconditions.checkArgument(db.cleanup(), "refCount:", - db.getReferenceCount()); - } - // reset the cache - cache.clear(); - } finally { - lock.unlock(); - } - } - - /** - * {@inheritDoc} - */ - @Override - protected boolean removeLRU(LinkEntry entry) { - ReferenceCountedDB db = (ReferenceCountedDB) entry.getValue(); - lock.lock(); - try { - return db.cleanup(); - } finally { - lock.unlock(); - } - } - - /** - * Returns a DB handle if available, create the handler otherwise. - * - * @param containerID - ID of the container. - * @param containerDBType - DB type of the container. - * @param containerDBPath - DB path of the container. - * @param conf - Hadoop Configuration. - * @return ReferenceCountedDB. - */ - public ReferenceCountedDB getDB(long containerID, String containerDBType, - String containerDBPath, Configuration conf) - throws IOException { - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative."); - lock.lock(); - try { - ReferenceCountedDB db = (ReferenceCountedDB) this.get(containerDBPath); - - if (db == null) { - MetadataStore metadataStore = - MetadataStoreBuilder.newBuilder() - .setDbFile(new File(containerDBPath)) - .setCreateIfMissing(false) - .setConf(conf) - .setDBType(containerDBType) - .build(); - db = new ReferenceCountedDB(metadataStore, containerDBPath); - this.put(containerDBPath, db); - } - // increment the reference before returning the object - db.incrementReference(); - return db; - } catch (Exception e) { - LOG.error("Error opening DB. Container:{} ContainerPath:{}", - containerID, containerDBPath, e); - throw e; - } finally { - lock.unlock(); - } - } - - /** - * Remove a DB handler from cache. - * - * @param containerDBPath - path of the container db file. - */ - public void removeDB(String containerDBPath) { - lock.lock(); - try { - ReferenceCountedDB db = (ReferenceCountedDB)this.get(containerDBPath); - if (db != null) { - Preconditions.checkArgument(db.cleanup(), "refCount:", - db.getReferenceCount()); - } - this.remove(containerDBPath); - } finally { - lock.unlock(); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java deleted file mode 100644 index cb356dadeb2..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java +++ /dev/null @@ -1,219 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.utils; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.InconsistentStorageStateException; -import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; - -import java.io.File; -import java.io.IOException; -import java.util.Properties; -import java.util.UUID; - -/** - * A util class for {@link HddsVolume}. - */ -public final class HddsVolumeUtil { - - // Private constructor for Utility class. Unused. - private HddsVolumeUtil() { - } - - private static final String VERSION_FILE = "VERSION"; - private static final String STORAGE_ID_PREFIX = "DS-"; - - public static File getVersionFile(File rootDir) { - return new File(rootDir, VERSION_FILE); - } - - public static String generateUuid() { - return STORAGE_ID_PREFIX + UUID.randomUUID(); - } - - /** - * Get hddsRoot from volume root. If volumeRoot points to hddsRoot, it is - * returned as is. - * For a volumeRoot /data/disk1, the hddsRoot is /data/disk1/hdds. - * @param volumeRoot root of the volume. - * @return hddsRoot of the volume. - */ - public static String getHddsRoot(String volumeRoot) { - if (volumeRoot.endsWith(HddsVolume.HDDS_VOLUME_DIR)) { - return volumeRoot; - } else { - File hddsRoot = new File(volumeRoot, HddsVolume.HDDS_VOLUME_DIR); - return hddsRoot.getPath(); - } - } - - /** - * Returns storageID if it is valid. Throws an exception otherwise. - */ - @VisibleForTesting - public static String getStorageID(Properties props, File versionFile) - throws InconsistentStorageStateException { - return getProperty(props, OzoneConsts.STORAGE_ID, versionFile); - } - - /** - * Returns clusterID if it is valid. It should match the clusterID from the - * Datanode. Throws an exception otherwise. - */ - @VisibleForTesting - public static String getClusterID(Properties props, File versionFile, - String clusterID) throws InconsistentStorageStateException { - String cid = getProperty(props, OzoneConsts.CLUSTER_ID, versionFile); - - if (clusterID == null) { - return cid; - } - if (!clusterID.equals(cid)) { - throw new InconsistentStorageStateException("Mismatched " + - "ClusterIDs. Version File : " + versionFile + " has clusterID: " + - cid + " and Datanode has clusterID: " + clusterID); - } - return cid; - } - - /** - * Returns datanodeUuid if it is valid. It should match the UUID of the - * Datanode. Throws an exception otherwise. - */ - @VisibleForTesting - public static String getDatanodeUUID(Properties props, File versionFile, - String datanodeUuid) - throws InconsistentStorageStateException { - String datanodeID = getProperty(props, OzoneConsts.DATANODE_UUID, - versionFile); - - if (datanodeUuid != null && !datanodeUuid.equals(datanodeID)) { - throw new InconsistentStorageStateException("Mismatched " + - "DatanodeUUIDs. Version File : " + versionFile + " has datanodeUuid: " - + datanodeID + " and Datanode has datanodeUuid: " + datanodeUuid); - } - return datanodeID; - } - - /** - * Returns creationTime if it is valid. Throws an exception otherwise. - */ - @VisibleForTesting - public static long getCreationTime(Properties props, File versionFile) - throws InconsistentStorageStateException { - String cTimeStr = getProperty(props, OzoneConsts.CTIME, versionFile); - - long cTime = Long.parseLong(cTimeStr); - long currentTime = Time.now(); - if (cTime > currentTime || cTime < 0) { - throw new InconsistentStorageStateException("Invalid Creation time in " + - "Version File : " + versionFile + " - " + cTime + ". Current system" + - " time is " + currentTime); - } - return cTime; - } - - /** - * Returns layOutVersion if it is valid. Throws an exception otherwise. - */ - @VisibleForTesting - public static int getLayOutVersion(Properties props, File versionFile) throws - InconsistentStorageStateException { - String lvStr = getProperty(props, OzoneConsts.LAYOUTVERSION, versionFile); - - int lv = Integer.parseInt(lvStr); - if(DataNodeLayoutVersion.getLatestVersion().getVersion() != lv) { - throw new InconsistentStorageStateException("Invalid layOutVersion. " + - "Version file has layOutVersion as " + lv + " and latest Datanode " + - "layOutVersion is " + - DataNodeLayoutVersion.getLatestVersion().getVersion()); - } - return lv; - } - - private static String getProperty(Properties props, String propName, File - versionFile) - throws InconsistentStorageStateException { - String value = props.getProperty(propName); - if (StringUtils.isBlank(value)) { - throw new InconsistentStorageStateException("Invalid " + propName + - ". Version File : " + versionFile + " has null or empty " + propName); - } - return value; - } - - /** - * Check Volume is in consistent state or not. - * @param hddsVolume - * @param scmId - * @param clusterId - * @param logger - * @return true - if volume is in consistent state, otherwise false. - */ - public static boolean checkVolume(HddsVolume hddsVolume, String scmId, String - clusterId, Logger logger) { - File hddsRoot = hddsVolume.getHddsRootDir(); - String volumeRoot = hddsRoot.getPath(); - File scmDir = new File(hddsRoot, scmId); - - try { - hddsVolume.format(clusterId); - } catch (IOException ex) { - logger.error("Error during formatting volume {}, exception is {}", - volumeRoot, ex); - return false; - } - - File[] hddsFiles = hddsRoot.listFiles(); - - if(hddsFiles == null) { - // This is the case for IOException, where listFiles returns null. - // So, we fail the volume. - return false; - } else if (hddsFiles.length == 1) { - // DN started for first time or this is a newly added volume. - // So we create scm directory. - if (!scmDir.mkdir()) { - logger.error("Unable to create scmDir {}", scmDir); - return false; - } - return true; - } else if(hddsFiles.length == 2) { - // The files should be Version and SCM directory - if (scmDir.exists()) { - return true; - } else { - logger.error("Volume {} is in Inconsistent state, expected scm " + - "directory {} does not exist", volumeRoot, scmDir - .getAbsolutePath()); - return false; - } - } else { - // The hdds root dir should always have 2 files. One is Version file - // and other is SCM directory. - return false; - } - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java deleted file mode 100644 index fb143a407f7..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ReferenceCountedDB.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.utils; - -import com.google.common.base.Preconditions; - -import org.apache.commons.lang.exception.ExceptionUtils; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Class to implement reference counting over instances handed by Container - * Cache. - * Enable DEBUG log below will enable us quickly locate the leaked reference - * from caller stack. When JDK9 StackWalker is available, we can switch to - * StackWalker instead of new Exception().printStackTrace(). - */ -public class ReferenceCountedDB implements Closeable { - private static final Logger LOG = - LoggerFactory.getLogger(ReferenceCountedDB.class); - private final AtomicInteger referenceCount; - private final MetadataStore store; - private final String containerDBPath; - - public ReferenceCountedDB(MetadataStore store, String containerDBPath) { - this.referenceCount = new AtomicInteger(0); - this.store = store; - this.containerDBPath = containerDBPath; - } - - public long getReferenceCount() { - return referenceCount.get(); - } - - public void incrementReference() { - this.referenceCount.incrementAndGet(); - if (LOG.isTraceEnabled()) { - LOG.trace("IncRef {} to refCnt {}, stackTrace: {}", containerDBPath, - referenceCount.get(), ExceptionUtils.getStackTrace(new Throwable())); - } - } - - public void decrementReference() { - int refCount = this.referenceCount.decrementAndGet(); - Preconditions.checkArgument(refCount >= 0, "refCount:", refCount); - if (LOG.isTraceEnabled()) { - LOG.trace("DecRef {} to refCnt {}, stackTrace: {}", containerDBPath, - referenceCount.get(), ExceptionUtils.getStackTrace(new Throwable())); - } - } - - public boolean cleanup() { - if (referenceCount.get() == 0 && store != null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Close {} refCnt {}", containerDBPath, - referenceCount.get()); - } - try { - store.close(); - return true; - } catch (Exception e) { - LOG.error("Error closing DB. Container: " + containerDBPath, e); - return false; - } - } else { - return false; - } - } - - public MetadataStore getStore() { - return store; - } - - public void close() { - decrementReference(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java deleted file mode 100644 index 08264f084a0..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.utils; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java deleted file mode 100644 index c0c719bbc85..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java +++ /dev/null @@ -1,1298 +0,0 @@ -/* - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * Some portions of this class have been modified to make it functional in this - * package. - */ -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.annotations.Beta; -import com.google.common.annotations.GwtCompatible; -import com.google.common.base.Preconditions; -import static com.google.common.base.Preconditions.checkNotNull; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; -import com.google.common.util.concurrent.Uninterruptibles; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater - .newUpdater; - -import javax.annotation.Nullable; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; -import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy; -import java.util.concurrent.locks.LockSupport; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * An abstract implementation of {@link ListenableFuture}, intended for - * advanced users only. More common ways to create a {@code ListenableFuture} - * include instantiating a {@link SettableFuture}, submitting a task to a - * {@link ListeningExecutorService}, and deriving a {@code Future} from an - * existing one, typically using methods like {@link Futures#transform - * (ListenableFuture, com.google.common.base.Function) Futures.transform} - * and its overloaded versions. - *

- *

This class implements all methods in {@code ListenableFuture}. - * Subclasses should provide a way to set the result of the computation - * through the protected methods {@link #set(Object)}, - * {@link #setFuture(ListenableFuture)} and {@link #setException(Throwable)}. - * Subclasses may also override {@link #interruptTask()}, which will be - * invoked automatically if a call to {@link #cancel(boolean) cancel(true)} - * succeeds in canceling the future. Subclasses should rarely override other - * methods. - */ - -@GwtCompatible(emulated = true) -public abstract class AbstractFuture implements ListenableFuture { - // NOTE: Whenever both tests are cheap and functional, it's faster to use &, - // | instead of &&, || - - private static final boolean GENERATE_CANCELLATION_CAUSES = - Boolean.parseBoolean( - System.getProperty("guava.concurrent.generate_cancellation_cause", - "false")); - - /** - * A less abstract subclass of AbstractFuture. This can be used to optimize - * setFuture by ensuring that {@link #get} calls exactly the implementation - * of {@link AbstractFuture#get}. - */ - abstract static class TrustedFuture extends AbstractFuture { - @Override - public final V get() throws InterruptedException, ExecutionException { - return super.get(); - } - - @Override - public final V get(long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - return super.get(timeout, unit); - } - - @Override - public final boolean isDone() { - return super.isDone(); - } - - @Override - public final boolean isCancelled() { - return super.isCancelled(); - } - - @Override - public final void addListener(Runnable listener, Executor executor) { - super.addListener(listener, executor); - } - - @Override - public final boolean cancel(boolean mayInterruptIfRunning) { - return super.cancel(mayInterruptIfRunning); - } - } - - // Logger to log exceptions caught when running listeners. - private static final Logger LOG = Logger - .getLogger(AbstractFuture.class.getName()); - - // A heuristic for timed gets. If the remaining timeout is less than this, - // spin instead of - // blocking. This value is what AbstractQueuedSynchronizer uses. - private static final long SPIN_THRESHOLD_NANOS = 1000L; - - private static final AtomicHelper ATOMIC_HELPER; - - static { - AtomicHelper helper; - - try { - helper = new UnsafeAtomicHelper(); - } catch (Throwable unsafeFailure) { - // catch absolutely everything and fall through to our 'SafeAtomicHelper' - // The access control checks that ARFU does means the caller class has - // to be AbstractFuture - // instead of SafeAtomicHelper, so we annoyingly define these here - try { - helper = - new SafeAtomicHelper( - newUpdater(Waiter.class, Thread.class, "thread"), - newUpdater(Waiter.class, Waiter.class, "next"), - newUpdater(AbstractFuture.class, Waiter.class, "waiters"), - newUpdater(AbstractFuture.class, Listener.class, "listeners"), - newUpdater(AbstractFuture.class, Object.class, "value")); - } catch (Throwable atomicReferenceFieldUpdaterFailure) { - // Some Android 5.0.x Samsung devices have bugs in JDK reflection APIs - // that cause getDeclaredField to throw a NoSuchFieldException when - // the field is definitely there. - // For these users fallback to a suboptimal implementation, based on - // synchronized. This will be a definite performance hit to those users. - LOG.log(Level.SEVERE, "UnsafeAtomicHelper is broken!", unsafeFailure); - LOG.log( - Level.SEVERE, "SafeAtomicHelper is broken!", - atomicReferenceFieldUpdaterFailure); - helper = new SynchronizedHelper(); - } - } - ATOMIC_HELPER = helper; - - // Prevent rare disastrous classloading in first call to LockSupport.park. - // See: https://bugs.openjdk.java.net/browse/JDK-8074773 - @SuppressWarnings("unused") - @SuppressFBWarnings - Class ensureLoaded = LockSupport.class; - } - - /** - * Waiter links form a Treiber stack, in the {@link #waiters} field. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Waiter { - static final Waiter TOMBSTONE = new Waiter(false /* ignored param */); - - @Nullable volatile Thread thread; - @Nullable volatile Waiter next; - - /** - * Constructor for the TOMBSTONE, avoids use of ATOMIC_HELPER in case this - * class is loaded before the ATOMIC_HELPER. Apparently this is possible - * on some android platforms. - */ - Waiter(boolean unused) { - } - - Waiter() { - // avoid volatile write, write is made visible by subsequent CAS on - // waiters field - ATOMIC_HELPER.putThread(this, Thread.currentThread()); - } - - // non-volatile write to the next field. Should be made visible by - // subsequent CAS on waiters field. - void setNext(Waiter next) { - ATOMIC_HELPER.putNext(this, next); - } - - void unpark() { - // This is racy with removeWaiter. The consequence of the race is that - // we may spuriously call unpark even though the thread has already - // removed itself from the list. But even if we did use a CAS, that - // race would still exist (it would just be ever so slightly smaller). - Thread w = thread; - if (w != null) { - thread = null; - LockSupport.unpark(w); - } - } - } - - /** - * Marks the given node as 'deleted' (null waiter) and then scans the list - * to unlink all deleted nodes. This is an O(n) operation in the common - * case (and O(n^2) in the worst), but we are saved by two things. - *

    - *
  • This is only called when a waiting thread times out or is - * interrupted. Both of which should be rare. - *
  • The waiters list should be very short. - *
- */ - private void removeWaiter(Waiter node) { - node.thread = null; // mark as 'deleted' - restart: - while (true) { - Waiter pred = null; - Waiter curr = waiters; - if (curr == Waiter.TOMBSTONE) { - return; // give up if someone is calling complete - } - Waiter succ; - while (curr != null) { - succ = curr.next; - if (curr.thread != null) { // we aren't unlinking this node, update - // pred. - pred = curr; - } else if (pred != null) { // We are unlinking this node and it has a - // predecessor. - pred.next = succ; - if (pred.thread == null) { // We raced with another node that - // unlinked pred. Restart. - continue restart; - } - } else if (!ATOMIC_HELPER - .casWaiters(this, curr, succ)) { // We are unlinking head - continue restart; // We raced with an add or complete - } - curr = succ; - } - break; - } - } - - /** - * Listeners also form a stack through the {@link #listeners} field. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Listener { - static final Listener TOMBSTONE = new Listener(null, null); - final Runnable task; - final Executor executor; - - // writes to next are made visible by subsequent CAS's on the listeners - // field - @Nullable Listener next; - - Listener(Runnable task, Executor executor) { - this.task = task; - this.executor = executor; - } - } - - /** - * A special value to represent {@code null}. - */ - private static final Object NULL = new Object(); - - /** - * A special value to represent failure, when {@link #setException} is - * called successfully. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Failure { - static final Failure FALLBACK_INSTANCE = - new Failure( - new Throwable("Failure occurred while trying to finish a future" + - ".") { - @Override - public synchronized Throwable fillInStackTrace() { - return this; // no stack trace - } - }); - final Throwable exception; - - Failure(Throwable exception) { - this.exception = checkNotNull(exception); - } - } - - /** - * A special value to represent cancellation and the 'wasInterrupted' bit. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Cancellation { - final boolean wasInterrupted; - @Nullable final Throwable cause; - - Cancellation(boolean wasInterrupted, @Nullable Throwable cause) { - this.wasInterrupted = wasInterrupted; - this.cause = cause; - } - } - - /** - * A special value that encodes the 'setFuture' state. - */ - @SuppressWarnings("visibilitymodifier") - private static final class SetFuture implements Runnable { - final AbstractFuture owner; - final ListenableFuture future; - - SetFuture(AbstractFuture owner, ListenableFuture future) { - this.owner = owner; - this.future = future; - } - - @Override - public void run() { - if (owner.value != this) { - // nothing to do, we must have been cancelled, don't bother inspecting - // the future. - return; - } - Object valueToSet = getFutureValue(future); - if (ATOMIC_HELPER.casValue(owner, this, valueToSet)) { - complete(owner); - } - } - } - - /** - * This field encodes the current state of the future. - *

- *

The valid values are: - *

    - *
  • {@code null} initial state, nothing has happened. - *
  • {@link Cancellation} terminal state, {@code cancel} was called. - *
  • {@link Failure} terminal state, {@code setException} was called. - *
  • {@link SetFuture} intermediate state, {@code setFuture} was called. - *
  • {@link #NULL} terminal state, {@code set(null)} was called. - *
  • Any other non-null value, terminal state, {@code set} was called with - * a non-null argument. - *
- */ - private volatile Object value; - - /** - * All listeners. - */ - private volatile Listener listeners; - - /** - * All waiting threads. - */ - private volatile Waiter waiters; - - /** - * Constructor for use by subclasses. - */ - protected AbstractFuture() { - } - - // Gets and Timed Gets - // - // * Be responsive to interruption - // * Don't create Waiter nodes if you aren't going to park, this helps - // reduce contention on the waiters field. - // * Future completion is defined by when #value becomes non-null/non - // SetFuture - // * Future completion can be observed if the waiters field contains a - // TOMBSTONE - - // Timed Get - // There are a few design constraints to consider - // * We want to be responsive to small timeouts, unpark() has non trivial - // latency overheads (I have observed 12 micros on 64 bit linux systems to - // wake up a parked thread). So if the timeout is small we shouldn't park(). - // This needs to be traded off with the cpu overhead of spinning, so we use - // SPIN_THRESHOLD_NANOS which is what AbstractQueuedSynchronizer uses for - // similar purposes. - // * We want to behave reasonably for timeouts of 0 - // * We are more responsive to completion than timeouts. This is because - // parkNanos depends on system scheduling and as such we could either miss - // our deadline, or unpark() could be delayed so that it looks like we - // timed out even though we didn't. For comparison FutureTask respects - // completion preferably and AQS is non-deterministic (depends on where in - // the queue the waiter is). If we wanted to be strict about it, we could - // store the unpark() time in the Waiter node and we could use that to make - // a decision about whether or not we timed out prior to being unparked. - - /* - * Improve the documentation of when InterruptedException is thrown. Our - * behavior matches the JDK's, but the JDK's documentation is misleading. - */ - - /** - * {@inheritDoc} - *

- *

The default {@link AbstractFuture} implementation throws {@code - * InterruptedException} if the current thread is interrupted before or - * during the call, even if the value is already available. - * - * @throws InterruptedException if the current thread was interrupted - * before or during the call - * (optional but recommended). - * @throws CancellationException {@inheritDoc} - */ - @Override - public V get(long timeout, TimeUnit unit) - throws InterruptedException, TimeoutException, ExecutionException { - // NOTE: if timeout < 0, remainingNanos will be < 0 and we will fall into - // the while(true) loop at the bottom and throw a timeoutexception. - long remainingNanos = unit - .toNanos(timeout); // we rely on the implicit null check on unit. - if (Thread.interrupted()) { - throw new InterruptedException(); - } - Object localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - // we delay calling nanoTime until we know we will need to either park or - // spin - final long endNanos = remainingNanos > 0 ? System - .nanoTime() + remainingNanos : 0; - long_wait_loop: - if (remainingNanos >= SPIN_THRESHOLD_NANOS) { - Waiter oldHead = waiters; - if (oldHead != Waiter.TOMBSTONE) { - Waiter node = new Waiter(); - do { - node.setNext(oldHead); - if (ATOMIC_HELPER.casWaiters(this, oldHead, node)) { - while (true) { - LockSupport.parkNanos(this, remainingNanos); - // Check interruption first, if we woke up due to interruption - // we need to honor that. - if (Thread.interrupted()) { - removeWaiter(node); - throw new InterruptedException(); - } - - // Otherwise re-read and check doneness. If we loop then it must - // have been a spurious wakeup - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - - // timed out? - remainingNanos = endNanos - System.nanoTime(); - if (remainingNanos < SPIN_THRESHOLD_NANOS) { - // Remove the waiter, one way or another we are done parking - // this thread. - removeWaiter(node); - break long_wait_loop; // jump down to the busy wait loop - } - } - } - oldHead = waiters; // re-read and loop. - } while (oldHead != Waiter.TOMBSTONE); - } - // re-read value, if we get here then we must have observed a TOMBSTONE - // while trying to add a waiter. - return getDoneValue(value); - } - // If we get here then we have remainingNanos < SPIN_THRESHOLD_NANOS and - // there is no node on the waiters list - while (remainingNanos > 0) { - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - if (Thread.interrupted()) { - throw new InterruptedException(); - } - remainingNanos = endNanos - System.nanoTime(); - } - throw new TimeoutException(); - } - - /* - * Improve the documentation of when InterruptedException is thrown. Our - * behavior matches the JDK's, but the JDK's documentation is misleading. - */ - - /** - * {@inheritDoc} - *

- *

The default {@link AbstractFuture} implementation throws {@code - * InterruptedException} if the current thread is interrupted before or - * during the call, even if the value is already available. - * - * @throws InterruptedException if the current thread was interrupted - * before or during the call - * (optional but recommended). - * @throws CancellationException {@inheritDoc} - */ - @Override - public V get() throws InterruptedException, ExecutionException { - if (Thread.interrupted()) { - throw new InterruptedException(); - } - Object localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - Waiter oldHead = waiters; - if (oldHead != Waiter.TOMBSTONE) { - Waiter node = new Waiter(); - do { - node.setNext(oldHead); - if (ATOMIC_HELPER.casWaiters(this, oldHead, node)) { - // we are on the stack, now wait for completion. - while (true) { - LockSupport.park(this); - // Check interruption first, if we woke up due to interruption we - // need to honor that. - if (Thread.interrupted()) { - removeWaiter(node); - throw new InterruptedException(); - } - // Otherwise re-read and check doneness. If we loop then it must - // have been a spurious wakeup - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - } - } - oldHead = waiters; // re-read and loop. - } while (oldHead != Waiter.TOMBSTONE); - } - // re-read value, if we get here then we must have observed a TOMBSTONE - // while trying to add a waiter. - return getDoneValue(value); - } - - /** - * Unboxes {@code obj}. Assumes that obj is not {@code null} or a - * {@link SetFuture}. - */ - private V getDoneValue(Object obj) throws ExecutionException { - // While this seems like it might be too branch-y, simple benchmarking - // proves it to be unmeasurable (comparing done AbstractFutures with - // immediateFuture) - if (obj instanceof Cancellation) { - throw cancellationExceptionWithCause( - "Task was cancelled.", ((Cancellation) obj).cause); - } else if (obj instanceof Failure) { - throw new ExecutionException(((Failure) obj).exception); - } else if (obj == NULL) { - return null; - } else { - @SuppressWarnings("unchecked") // this is the only other option - V asV = (V) obj; - return asV; - } - } - - @Override - public boolean isDone() { - final Object localValue = value; - return localValue != null & !(localValue instanceof SetFuture); - } - - @Override - public boolean isCancelled() { - final Object localValue = value; - return localValue instanceof Cancellation; - } - - /** - * {@inheritDoc} - *

- *

If a cancellation attempt succeeds on a {@code Future} that had - * previously been {@linkplain#setFuture set asynchronously}, then the - * cancellation will also be propagated to the delegate {@code Future} that - * was supplied in the {@code setFuture} call. - */ - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - Object localValue = value; - boolean rValue = false; - if (localValue == null | localValue instanceof SetFuture) { - // Try to delay allocating the exception. At this point we may still - // lose the CAS, but it is certainly less likely. - Throwable cause = - GENERATE_CANCELLATION_CAUSES - ? new CancellationException("Future.cancel() was called.") - : null; - Object valueToSet = new Cancellation(mayInterruptIfRunning, cause); - AbstractFuture abstractFuture = this; - while (true) { - if (ATOMIC_HELPER.casValue(abstractFuture, localValue, valueToSet)) { - rValue = true; - // We call interuptTask before calling complete(), which is - // consistent with FutureTask - if (mayInterruptIfRunning) { - abstractFuture.interruptTask(); - } - complete(abstractFuture); - if (localValue instanceof SetFuture) { - // propagate cancellation to the future set in setfuture, this is - // racy, and we don't care if we are successful or not. - ListenableFuture futureToPropagateTo = ((SetFuture) localValue) - .future; - if (futureToPropagateTo instanceof TrustedFuture) { - // If the future is a TrustedFuture then we specifically avoid - // calling cancel() this has 2 benefits - // 1. for long chains of futures strung together with setFuture - // we consume less stack - // 2. we avoid allocating Cancellation objects at every level of - // the cancellation chain - // We can only do this for TrustedFuture, because - // TrustedFuture.cancel is final and does nothing but delegate - // to this method. - AbstractFuture trusted = (AbstractFuture) - futureToPropagateTo; - localValue = trusted.value; - if (localValue == null | localValue instanceof SetFuture) { - abstractFuture = trusted; - continue; // loop back up and try to complete the new future - } - } else { - // not a TrustedFuture, call cancel directly. - futureToPropagateTo.cancel(mayInterruptIfRunning); - } - } - break; - } - // obj changed, reread - localValue = abstractFuture.value; - if (!(localValue instanceof SetFuture)) { - // obj cannot be null at this point, because value can only change - // from null to non-null. So if value changed (and it did since we - // lost the CAS), then it cannot be null and since it isn't a - // SetFuture, then the future must be done and we should exit the loop - break; - } - } - } - return rValue; - } - - /** - * Subclasses can override this method to implement interruption of the - * future's computation. The method is invoked automatically by a - * successful call to {@link #cancel(boolean) cancel(true)}. - *

- *

The default implementation does nothing. - * - * @since 10.0 - */ - protected void interruptTask() { - } - - /** - * Returns true if this future was cancelled with {@code - * mayInterruptIfRunning} set to {@code true}. - * - * @since 14.0 - */ - protected final boolean wasInterrupted() { - final Object localValue = value; - return (localValue instanceof Cancellation) && ((Cancellation) localValue) - .wasInterrupted; - } - - /** - * {@inheritDoc} - * - * @since 10.0 - */ - @Override - public void addListener(Runnable listener, Executor executor) { - checkNotNull(listener, "Runnable was null."); - checkNotNull(executor, "Executor was null."); - Listener oldHead = listeners; - if (oldHead != Listener.TOMBSTONE) { - Listener newNode = new Listener(listener, executor); - do { - newNode.next = oldHead; - if (ATOMIC_HELPER.casListeners(this, oldHead, newNode)) { - return; - } - oldHead = listeners; // re-read - } while (oldHead != Listener.TOMBSTONE); - } - // If we get here then the Listener TOMBSTONE was set, which means the - // future is done, call the listener. - executeListener(listener, executor); - } - - /** - * Sets the result of this {@code Future} unless this {@code Future} has - * already been cancelled or set (including - * {@linkplain #setFuture set asynchronously}). When a call to this method - * returns, the {@code Future} is guaranteed to be - * {@linkplain #isDone done} only if the call was accepted (in which - * case it returns {@code true}). If it returns {@code false}, the {@code - * Future} may have previously been set asynchronously, in which case its - * result may not be known yet. That result, though not yet known, cannot - * be overridden by a call to a {@code set*} method, only by a call to - * {@link #cancel}. - * - * @param value the value to be used as the result - * @return true if the attempt was accepted, completing the {@code Future} - */ - protected boolean set(@Nullable V val) { - Object valueToSet = value == null ? NULL : val; - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - complete(this); - return true; - } - return false; - } - - /** - * Sets the failed result of this {@code Future} unless this {@code Future} - * has already been cancelled or set (including - * {@linkplain #setFuture set asynchronously}). When a call to this method - * returns, the {@code Future} is guaranteed to be - * {@linkplain #isDone done} only if the call was accepted (in which - * case it returns {@code true}). If it returns {@code false}, the - * {@code Future} may have previously been set asynchronously, in which case - * its result may not be known yet. That result, though not yet known, - * cannot be overridden by a call to a {@code set*} method, only by a call - * to {@link #cancel}. - * - * @param throwable the exception to be used as the failed result - * @return true if the attempt was accepted, completing the {@code Future} - */ - protected boolean setException(Throwable throwable) { - Object valueToSet = new Failure(checkNotNull(throwable)); - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - complete(this); - return true; - } - return false; - } - - /** - * Sets the result of this {@code Future} to match the supplied input - * {@code Future} once the supplied {@code Future} is done, unless this - * {@code Future} has already been cancelled or set (including "set - * asynchronously," defined below). - *

- *

If the supplied future is {@linkplain #isDone done} when this method - * is called and the call is accepted, then this future is guaranteed to - * have been completed with the supplied future by the time this method - * returns. If the supplied future is not done and the call is accepted, then - * the future will be set asynchronously. Note that such a result, - * though not yet known, cannot be overridden by a call to a {@code set*} - * method, only by a call to {@link #cancel}. - *

- *

If the call {@code setFuture(delegate)} is accepted and this {@code - * Future} is later cancelled, cancellation will be propagated to {@code - * delegate}. Additionally, any call to {@code setFuture} after any - * cancellation will propagate cancellation to the supplied {@code Future}. - * - * @param future the future to delegate to - * @return true if the attempt was accepted, indicating that the {@code - * Future} was not previously cancelled or set. - * @since 19.0 - */ - @Beta - @SuppressWarnings("deadstore") - protected boolean setFuture(ListenableFuture future) { - checkNotNull(future); - Object localValue = value; - if (localValue == null) { - if (future.isDone()) { - Object val = getFutureValue(future); - if (ATOMIC_HELPER.casValue(this, null, val)) { - complete(this); - return true; - } - return false; - } - SetFuture valueToSet = new SetFuture(this, future); - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - // the listener is responsible for calling completeWithFuture, - // directExecutor is appropriate since all we are doing is unpacking - // a completed future which should be fast. - try { - future.addListener(valueToSet, directExecutor()); - } catch (Throwable t) { - // addListener has thrown an exception! SetFuture.run can't throw - // any exceptions so this must have been caused by addListener - // itself. The most likely explanation is a misconfigured mock. Try - // to switch to Failure. - Failure failure; - try { - failure = new Failure(t); - } catch (Throwable oomMostLikely) { - failure = Failure.FALLBACK_INSTANCE; - } - // Note: The only way this CAS could fail is if cancel() has raced - // with us. That is ok. - boolean unused = ATOMIC_HELPER.casValue(this, valueToSet, failure); - } - return true; - } - localValue = value; // we lost the cas, fall through and maybe cancel - } - // The future has already been set to something. If it is cancellation we - // should cancel the incoming future. - if (localValue instanceof Cancellation) { - // we don't care if it fails, this is best-effort. - future.cancel(((Cancellation) localValue).wasInterrupted); - } - return false; - } - - /** - * Returns a value, suitable for storing in the {@link #value} field. From - * the given future, which is assumed to be done. - *

- *

This is approximately the inverse of {@link #getDoneValue(Object)} - */ - private static Object getFutureValue(ListenableFuture future) { - Object valueToSet; - if (future instanceof TrustedFuture) { - // Break encapsulation for TrustedFuture instances since we know that - // subclasses cannot override .get() (since it is final) and therefore - // this is equivalent to calling .get() and unpacking the exceptions - // like we do below (just much faster because it is a single field read - // instead of a read, several branches and possibly creating exceptions). - return ((AbstractFuture) future).value; - } else { - // Otherwise calculate valueToSet by calling .get() - try { - Object v = getDone(future); - valueToSet = v == null ? NULL : v; - } catch (ExecutionException exception) { - valueToSet = new Failure(exception.getCause()); - } catch (CancellationException cancellation) { - valueToSet = new Cancellation(false, cancellation); - } catch (Throwable t) { - valueToSet = new Failure(t); - } - } - return valueToSet; - } - - /** - * Unblocks all threads and runs all listeners. - */ - private static void complete(AbstractFuture future) { - Listener next = null; - outer: - while (true) { - future.releaseWaiters(); - // We call this before the listeners in order to avoid needing to manage - // a separate stack data structure for them. afterDone() should be - // generally fast and only used for cleanup work... but in theory can - // also be recursive and create StackOverflowErrors - future.afterDone(); - // push the current set of listeners onto next - next = future.clearListeners(next); - future = null; - while (next != null) { - Listener curr = next; - next = next.next; - Runnable task = curr.task; - if (task instanceof SetFuture) { - SetFuture setFuture = (SetFuture) task; - // We unwind setFuture specifically to avoid StackOverflowErrors in - // the case of long chains of SetFutures - // Handling this special case is important because there is no way - // to pass an executor to setFuture, so a user couldn't break the - // chain by doing this themselves. It is also potentially common - // if someone writes a recursive Futures.transformAsync transformer. - future = setFuture.owner; - if (future.value == setFuture) { - Object valueToSet = getFutureValue(setFuture.future); - if (ATOMIC_HELPER.casValue(future, setFuture, valueToSet)) { - continue outer; - } - } - // other wise the future we were trying to set is already done. - } else { - executeListener(task, curr.executor); - } - } - break; - } - } - - public static V getDone(Future future) throws ExecutionException { - /* - * We throw IllegalStateException, since the call could succeed later. - * Perhaps we "should" throw IllegalArgumentException, since the call - * could succeed with a different argument. Those exceptions' docs - * suggest that either is acceptable. Google's Java Practices page - * recommends IllegalArgumentException here, in part to keep its - * recommendation simple: Static methods should throw - * IllegalStateException only when they use static state. - * - * - * Why do we deviate here? The answer: We want for fluentFuture.getDone() - * to throw the same exception as Futures.getDone(fluentFuture). - */ - Preconditions.checkState(future.isDone(), "Future was expected to be " + - "done:" + - " %s", future); - return Uninterruptibles.getUninterruptibly(future); - } - - /** - * Callback method that is called exactly once after the future is completed. - *

- *

If {@link #interruptTask} is also run during completion, - * {@link #afterDone} runs after it. - *

- *

The default implementation of this method in {@code AbstractFuture} - * does nothing. This is intended for very lightweight cleanup work, for - * example, timing statistics or clearing fields. - * If your task does anything heavier consider, just using a listener with - * an executor. - * - * @since 20.0 - */ - @Beta - protected void afterDone() { - } - - /** - * If this future has been cancelled (and possibly interrupted), cancels - * (and possibly interrupts) the given future (if available). - *

- *

This method should be used only when this future is completed. It is - * designed to be called from {@code done}. - */ - final void maybePropagateCancellation(@Nullable Future related) { - if (related != null & isCancelled()) { - related.cancel(wasInterrupted()); - } - } - - /** - * Releases all threads in the {@link #waiters} list, and clears the list. - */ - private void releaseWaiters() { - Waiter head; - do { - head = waiters; - } while (!ATOMIC_HELPER.casWaiters(this, head, Waiter.TOMBSTONE)); - for (Waiter currentWaiter = head; - currentWaiter != null; currentWaiter = currentWaiter.next) { - currentWaiter.unpark(); - } - } - - /** - * Clears the {@link #listeners} list and prepends its contents to {@code - * onto}, least recently added first. - */ - private Listener clearListeners(Listener onto) { - // We need to - // 1. atomically swap the listeners with TOMBSTONE, this is because - // addListener uses that to to synchronize with us - // 2. reverse the linked list, because despite our rather clear contract, - // people depend on us executing listeners in the order they were added - // 3. push all the items onto 'onto' and return the new head of the stack - Listener head; - do { - head = listeners; - } while (!ATOMIC_HELPER.casListeners(this, head, Listener.TOMBSTONE)); - Listener reversedList = onto; - while (head != null) { - Listener tmp = head; - head = head.next; - tmp.next = reversedList; - reversedList = tmp; - } - return reversedList; - } - - /** - * Submits the given runnable to the given {@link Executor} catching and - * logging all {@linkplain RuntimeException runtime exceptions} thrown by - * the executor. - */ - private static void executeListener(Runnable runnable, Executor executor) { - try { - executor.execute(runnable); - } catch (RuntimeException e) { - // Log it and keep going -- bad runnable and/or executor. Don't punish - // the other runnables if we're given a bad one. We only catch - // RuntimeException because we want Errors to propagate up. - LOG.log( - Level.SEVERE, - "RuntimeException while executing runnable " + runnable + " with " + - "executor " + executor, - e); - } - } - - private abstract static class AtomicHelper { - /** - * Non volatile write of the thread to the {@link Waiter#thread} field. - */ - abstract void putThread(Waiter waiter, Thread newValue); - - /** - * Non volatile write of the waiter to the {@link Waiter#next} field. - */ - abstract void putNext(Waiter waiter, Waiter newValue); - - /** - * Performs a CAS operation on the {@link #waiters} field. - */ - abstract boolean casWaiters( - AbstractFuture future, Waiter expect, - Waiter update); - - /** - * Performs a CAS operation on the {@link #listeners} field. - */ - abstract boolean casListeners( - AbstractFuture future, Listener expect, - Listener update); - - /** - * Performs a CAS operation on the {@link #value} field. - */ - abstract boolean casValue( - AbstractFuture future, Object expect, Object update); - } - - /** - * {@link AtomicHelper} based on {@link sun.misc.Unsafe}. - *

- *

Static initialization of this class will fail if the - * {@link sun.misc.Unsafe} object cannot be accessed. - */ - private static final class UnsafeAtomicHelper extends AtomicHelper { - static final sun.misc.Unsafe UNSAFE; - static final long LISTENERS_OFFSET; - static final long WAITERS_OFFSET; - static final long VALUE_OFFSET; - static final long WAITER_THREAD_OFFSET; - static final long WAITER_NEXT_OFFSET; - - static { - sun.misc.Unsafe unsafe = null; - try { - unsafe = sun.misc.Unsafe.getUnsafe(); - } catch (SecurityException tryReflectionInstead) { - try { - unsafe = - AccessController.doPrivileged( - new PrivilegedExceptionAction() { - @Override - public sun.misc.Unsafe run() throws Exception { - Class k = sun.misc.Unsafe.class; - for (java.lang.reflect.Field f : k.getDeclaredFields()) { - f.setAccessible(true); - Object x = f.get(null); - if (k.isInstance(x)) { - return k.cast(x); - } - } - throw new NoSuchFieldError("the Unsafe"); - } - }); - } catch (PrivilegedActionException e) { - throw new RuntimeException( - "Could not initialize intrinsics", e.getCause()); - } - } - try { - Class abstractFuture = AbstractFuture.class; - WAITERS_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("waiters")); - LISTENERS_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("listeners")); - VALUE_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("value")); - WAITER_THREAD_OFFSET = unsafe - .objectFieldOffset(Waiter.class.getDeclaredField("thread")); - WAITER_NEXT_OFFSET = unsafe - .objectFieldOffset(Waiter.class.getDeclaredField("next")); - UNSAFE = unsafe; - } catch (Exception e) { - throwIfUnchecked(e); - throw new RuntimeException(e); - } - } - - public static void throwIfUnchecked(Throwable throwable) { - checkNotNull(throwable); - if (throwable instanceof RuntimeException) { - throw (RuntimeException) throwable; - } - if (throwable instanceof Error) { - throw (Error) throwable; - } - } - - @Override - void putThread(Waiter waiter, Thread newValue) { - UNSAFE.putObject(waiter, WAITER_THREAD_OFFSET, newValue); - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - UNSAFE.putObject(waiter, WAITER_NEXT_OFFSET, newValue); - } - - /** - * Performs a CAS operation on the {@link #waiters} field. - */ - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - return UNSAFE - .compareAndSwapObject(future, WAITERS_OFFSET, expect, update); - } - - /** - * Performs a CAS operation on the {@link #listeners} field. - */ - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - return UNSAFE - .compareAndSwapObject(future, LISTENERS_OFFSET, expect, update); - } - - /** - * Performs a CAS operation on the {@link #value} field. - */ - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - return UNSAFE.compareAndSwapObject(future, VALUE_OFFSET, expect, update); - } - } - - /** - * {@link AtomicHelper} based on {@link AtomicReferenceFieldUpdater}. - */ - @SuppressWarnings("visibilitymodifier") - private static final class SafeAtomicHelper extends AtomicHelper { - final AtomicReferenceFieldUpdater waiterThreadUpdater; - final AtomicReferenceFieldUpdater waiterNextUpdater; - final AtomicReferenceFieldUpdater waitersUpdater; - final AtomicReferenceFieldUpdater - listenersUpdater; - final AtomicReferenceFieldUpdater valueUpdater; - - SafeAtomicHelper( - AtomicReferenceFieldUpdater waiterThreadUpdater, - AtomicReferenceFieldUpdater waiterNextUpdater, - AtomicReferenceFieldUpdater waitersUpdater, - AtomicReferenceFieldUpdater listenersUpdater, - AtomicReferenceFieldUpdater valueUpdater) { - this.waiterThreadUpdater = waiterThreadUpdater; - this.waiterNextUpdater = waiterNextUpdater; - this.waitersUpdater = waitersUpdater; - this.listenersUpdater = listenersUpdater; - this.valueUpdater = valueUpdater; - } - - @Override - void putThread(Waiter waiter, Thread newValue) { - waiterThreadUpdater.lazySet(waiter, newValue); - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - waiterNextUpdater.lazySet(waiter, newValue); - } - - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - return waitersUpdater.compareAndSet(future, expect, update); - } - - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - return listenersUpdater.compareAndSet(future, expect, update); - } - - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - return valueUpdater.compareAndSet(future, expect, update); - } - } - - /** - * {@link AtomicHelper} based on {@code synchronized} and volatile writes. - *

- *

This is an implementation of last resort for when certain basic VM - * features are broken (like AtomicReferenceFieldUpdater). - */ - private static final class SynchronizedHelper extends AtomicHelper { - @Override - void putThread(Waiter waiter, Thread newValue) { - waiter.thread = newValue; - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - waiter.next = newValue; - } - - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - synchronized (future) { - if (future.waiters == expect) { - future.waiters = update; - return true; - } - return false; - } - } - - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - synchronized (future) { - if (future.listeners == expect) { - future.listeners = update; - return true; - } - return false; - } - } - - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - synchronized (future) { - if (future.value == expect) { - future.value = update; - return true; - } - return false; - } - } - } - - private static CancellationException cancellationExceptionWithCause( - @Nullable String message, @Nullable Throwable cause) { - CancellationException exception = new CancellationException(message); - exception.initCause(cause); - return exception; - } - - /** - * Returns an {@link Executor} that runs each task in the thread that invokes - * {@link Executor#execute execute}, as in {@link CallerRunsPolicy}. - *

- *

This instance is equivalent to:

   {@code
-   *   final class DirectExecutor implements Executor {
-   *     public void execute(Runnable r) {
-   *       r.run();
-   *     }
-   *   }}
- */ - public static Executor directExecutor() { - return DirectExecutor.INSTANCE; - } - - /** - * See {@link #directExecutor} for behavioral notes. - */ - private enum DirectExecutor implements Executor { - INSTANCE; - - @Override - public void execute(Runnable command) { - command.run(); - } - - @Override - public String toString() { - return "MoreExecutors.directExecutor()"; - } - } - -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AsyncChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AsyncChecker.java deleted file mode 100644 index f7391e3cca0..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AsyncChecker.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.util.concurrent.ListenableFuture; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.server.datanode.checker.Checkable; - -import java.util.Optional; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; - -/** - * A class that can be used to schedule an asynchronous check on a given - * {@link Checkable}. If the check is successfully scheduled then a - * {@link ListenableFuture} is returned. - * - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public interface AsyncChecker { - - /** - * Schedule an asynchronous check for the given object. - * - * @param target object to be checked. - * - * @param context the interpretation of the context depends on the - * target. - * - * @return returns a {@link Optional of ListenableFuture} that can be used to - * retrieve the result of the asynchronous check. - */ - Optional> schedule(Checkable target, K context); - - /** - * Cancel all executing checks and wait for them to complete. - * First attempts a graceful cancellation, then cancels forcefully. - * Waits for the supplied timeout after both attempts. - * - * See {@link ExecutorService#awaitTermination} for a description of - * the parameters. - * - * @throws InterruptedException - */ - void shutdownAndWait(long timeout, TimeUnit timeUnit) - throws InterruptedException; -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java deleted file mode 100644 index 3e89f903138..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ /dev/null @@ -1,455 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import javax.annotation.Nullable; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.GetSpaceUsed; -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdfs.server.datanode.StorageLocation; -import org.apache.hadoop.hdfs.server.datanode.checker.Checkable; -import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; -import org.apache.hadoop.ozone.common.InconsistentStorageStateException; -import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion; -import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; - -import org.apache.hadoop.util.DiskChecker; -import org.apache.hadoop.util.Time; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.Properties; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; - -/** - * HddsVolume represents volume in a datanode. {@link VolumeSet} maintains a - * list of HddsVolumes, one for each volume in the Datanode. - * {@link VolumeInfo} in encompassed by this class. - *

- * The disk layout per volume is as follows: - *

../hdds/VERSION - *

{@literal ../hdds/<>/current/<>/<>/metadata} - *

{@literal ../hdds/<>/current/<>/<>/<>} - *

- * Each hdds volume has its own VERSION file. The hdds volume will have one - * scmUuid directory for each SCM it is a part of (currently only one SCM is - * supported). - * - * During DN startup, if the VERSION file exists, we verify that the - * clusterID in the version file matches the clusterID from SCM. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -@SuppressWarnings("finalclass") -public class HddsVolume - implements Checkable { - - private static final Logger LOG = LoggerFactory.getLogger(HddsVolume.class); - - public static final String HDDS_VOLUME_DIR = "hdds"; - - private final File hddsRootDir; - private final VolumeInfo volumeInfo; - private VolumeState state; - private final VolumeIOStats volumeIOStats; - - // VERSION file properties - private String storageID; // id of the file system - private String clusterID; // id of the cluster - private String datanodeUuid; // id of the DataNode - private long cTime; // creation time of the file system state - private int layoutVersion; // layout version of the storage data - private final AtomicLong committedBytes; // till Open containers become full - - /** - * Run a check on the current volume to determine if it is healthy. - * @param unused context for the check, ignored. - * @return result of checking the volume. - * @throws Exception if an exception was encountered while running - * the volume check. - */ - @Override - public VolumeCheckResult check(@Nullable Boolean unused) throws Exception { - DiskChecker.checkDir(hddsRootDir); - return VolumeCheckResult.HEALTHY; - } - - /** - * Builder for HddsVolume. - */ - public static class Builder { - private final String volumeRootStr; - private Configuration conf; - private StorageType storageType; - private long configuredCapacity; - - private String datanodeUuid; - private String clusterID; - private boolean failedVolume = false; - - public Builder(String rootDirStr) { - this.volumeRootStr = rootDirStr; - } - - public Builder conf(Configuration config) { - this.conf = config; - return this; - } - - public Builder storageType(StorageType st) { - this.storageType = st; - return this; - } - - public Builder configuredCapacity(long capacity) { - this.configuredCapacity = capacity; - return this; - } - - public Builder datanodeUuid(String datanodeUUID) { - this.datanodeUuid = datanodeUUID; - return this; - } - - public Builder clusterID(String cid) { - this.clusterID = cid; - return this; - } - - // This is added just to create failed volume objects, which will be used - // to create failed HddsVolume objects in the case of any exceptions caused - // during creating HddsVolume object. - public Builder failedVolume(boolean failed) { - this.failedVolume = failed; - return this; - } - - public HddsVolume build() throws IOException { - return new HddsVolume(this); - } - } - - private HddsVolume(Builder b) throws IOException { - if (!b.failedVolume) { - StorageLocation location = StorageLocation.parse(b.volumeRootStr); - hddsRootDir = new File(location.getUri().getPath(), HDDS_VOLUME_DIR); - this.state = VolumeState.NOT_INITIALIZED; - this.clusterID = b.clusterID; - this.datanodeUuid = b.datanodeUuid; - this.volumeIOStats = new VolumeIOStats(); - - VolumeInfo.Builder volumeBuilder = - new VolumeInfo.Builder(b.volumeRootStr, b.conf) - .storageType(b.storageType) - .configuredCapacity(b.configuredCapacity); - this.volumeInfo = volumeBuilder.build(); - this.committedBytes = new AtomicLong(0); - - LOG.info("Creating Volume: " + this.hddsRootDir + " of storage type : " + - b.storageType + " and capacity : " + volumeInfo.getCapacity()); - - initialize(); - } else { - // Builder is called with failedVolume set, so create a failed volume - // HddsVolumeObject. - hddsRootDir = new File(b.volumeRootStr); - volumeIOStats = null; - volumeInfo = null; - storageID = UUID.randomUUID().toString(); - state = VolumeState.FAILED; - committedBytes = null; - } - } - - public VolumeInfo getVolumeInfo() { - return volumeInfo; - } - - /** - * Initializes the volume. - * Creates the Version file if not present, - * otherwise returns with IOException. - * @throws IOException - */ - private void initialize() throws IOException { - VolumeState intialVolumeState = analyzeVolumeState(); - switch (intialVolumeState) { - case NON_EXISTENT: - // Root directory does not exist. Create it. - if (!hddsRootDir.mkdirs()) { - throw new IOException("Cannot create directory " + hddsRootDir); - } - setState(VolumeState.NOT_FORMATTED); - createVersionFile(); - break; - case NOT_FORMATTED: - // Version File does not exist. Create it. - createVersionFile(); - break; - case NOT_INITIALIZED: - // Version File exists. Verify its correctness and update property fields. - readVersionFile(); - setState(VolumeState.NORMAL); - break; - case INCONSISTENT: - // Volume Root is in an inconsistent state. Skip loading this volume. - throw new IOException("Volume is in an " + VolumeState.INCONSISTENT + - " state. Skipped loading volume: " + hddsRootDir.getPath()); - default: - throw new IOException("Unrecognized initial state : " + - intialVolumeState + "of volume : " + hddsRootDir); - } - } - - private VolumeState analyzeVolumeState() { - if (!hddsRootDir.exists()) { - // Volume Root does not exist. - return VolumeState.NON_EXISTENT; - } - if (!hddsRootDir.isDirectory()) { - // Volume Root exists but is not a directory. - return VolumeState.INCONSISTENT; - } - File[] files = hddsRootDir.listFiles(); - if (files == null || files.length == 0) { - // Volume Root exists and is empty. - return VolumeState.NOT_FORMATTED; - } - if (!getVersionFile().exists()) { - // Volume Root is non empty but VERSION file does not exist. - return VolumeState.INCONSISTENT; - } - // Volume Root and VERSION file exist. - return VolumeState.NOT_INITIALIZED; - } - - public void format(String cid) throws IOException { - Preconditions.checkNotNull(cid, "clusterID cannot be null while " + - "formatting Volume"); - this.clusterID = cid; - initialize(); - } - - /** - * Create Version File and write property fields into it. - * @throws IOException - */ - private void createVersionFile() throws IOException { - this.storageID = HddsVolumeUtil.generateUuid(); - this.cTime = Time.now(); - this.layoutVersion = ChunkLayOutVersion.getLatestVersion().getVersion(); - - if (this.clusterID == null || datanodeUuid == null) { - // HddsDatanodeService does not have the cluster information yet. Wait - // for registration with SCM. - LOG.debug("ClusterID not available. Cannot format the volume {}", - this.hddsRootDir.getPath()); - setState(VolumeState.NOT_FORMATTED); - } else { - // Write the version file to disk. - writeVersionFile(); - setState(VolumeState.NORMAL); - } - } - - private void writeVersionFile() throws IOException { - Preconditions.checkNotNull(this.storageID, - "StorageID cannot be null in Version File"); - Preconditions.checkNotNull(this.clusterID, - "ClusterID cannot be null in Version File"); - Preconditions.checkNotNull(this.datanodeUuid, - "DatanodeUUID cannot be null in Version File"); - Preconditions.checkArgument(this.cTime > 0, - "Creation Time should be positive"); - Preconditions.checkArgument(this.layoutVersion == - DataNodeLayoutVersion.getLatestVersion().getVersion(), - "Version File should have the latest LayOutVersion"); - - File versionFile = getVersionFile(); - LOG.debug("Writing Version file to disk, {}", versionFile); - - DatanodeVersionFile dnVersionFile = new DatanodeVersionFile(this.storageID, - this.clusterID, this.datanodeUuid, this.cTime, this.layoutVersion); - dnVersionFile.createVersionFile(versionFile); - } - - /** - * Read Version File and update property fields. - * Get common storage fields. - * Should be overloaded if additional fields need to be read. - * - * @throws IOException on error - */ - private void readVersionFile() throws IOException { - File versionFile = getVersionFile(); - Properties props = DatanodeVersionFile.readFrom(versionFile); - if (props.isEmpty()) { - throw new InconsistentStorageStateException( - "Version file " + versionFile + " is missing"); - } - - LOG.debug("Reading Version file from disk, {}", versionFile); - this.storageID = HddsVolumeUtil.getStorageID(props, versionFile); - this.clusterID = HddsVolumeUtil.getClusterID(props, versionFile, - this.clusterID); - this.datanodeUuid = HddsVolumeUtil.getDatanodeUUID(props, versionFile, - this.datanodeUuid); - this.cTime = HddsVolumeUtil.getCreationTime(props, versionFile); - this.layoutVersion = HddsVolumeUtil.getLayOutVersion(props, versionFile); - } - - private File getVersionFile() { - return HddsVolumeUtil.getVersionFile(hddsRootDir); - } - - public File getHddsRootDir() { - return hddsRootDir; - } - - public StorageType getStorageType() { - if(volumeInfo != null) { - return volumeInfo.getStorageType(); - } - return StorageType.DEFAULT; - } - - public String getStorageID() { - return storageID; - } - - public String getClusterID() { - return clusterID; - } - - public String getDatanodeUuid() { - return datanodeUuid; - } - - public long getCTime() { - return cTime; - } - - public int getLayoutVersion() { - return layoutVersion; - } - - public VolumeState getStorageState() { - return state; - } - - public long getCapacity() throws IOException { - if(volumeInfo != null) { - return volumeInfo.getCapacity(); - } - return 0; - } - - public long getAvailable() throws IOException { - if(volumeInfo != null) { - return volumeInfo.getAvailable(); - } - return 0; - } - - public void setState(VolumeState state) { - this.state = state; - } - - public boolean isFailed() { - return (state == VolumeState.FAILED); - } - - public VolumeIOStats getVolumeIOStats() { - return volumeIOStats; - } - - public void failVolume() { - setState(VolumeState.FAILED); - if (volumeInfo != null) { - volumeInfo.shutdownUsageThread(); - } - } - - public void shutdown() { - this.state = VolumeState.NON_EXISTENT; - if (volumeInfo != null) { - volumeInfo.shutdownUsageThread(); - } - } - - /** - * VolumeState represents the different states a HddsVolume can be in. - * NORMAL => Volume can be used for storage - * FAILED => Volume has failed due and can no longer be used for - * storing containers. - * NON_EXISTENT => Volume Root dir does not exist - * INCONSISTENT => Volume Root dir is not empty but VERSION file is - * missing or Volume Root dir is not a directory - * NOT_FORMATTED => Volume Root exists but not formatted(no VERSION file) - * NOT_INITIALIZED => VERSION file exists but has not been verified for - * correctness. - */ - public enum VolumeState { - NORMAL, - FAILED, - NON_EXISTENT, - INCONSISTENT, - NOT_FORMATTED, - NOT_INITIALIZED - } - - /** - * add "delta" bytes to committed space in the volume. - * @param delta bytes to add to committed space counter - * @return bytes of committed space - */ - public long incCommittedBytes(long delta) { - return committedBytes.addAndGet(delta); - } - - /** - * return the committed space in the volume. - * @return bytes of committed space - */ - public long getCommittedBytes() { - return committedBytes.get(); - } - - /** - * Only for testing. Do not use otherwise. - */ - @VisibleForTesting - public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) { - if (volumeInfo != null) { - volumeInfo.setScmUsageForTesting(scmUsageForTest); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java deleted file mode 100644 index 800789f6e0e..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java +++ /dev/null @@ -1,424 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Sets; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; -import org.apache.hadoop.util.DiskChecker.DiskErrorException; -import org.apache.hadoop.util.Timer; - -import static org.apache.hadoop.hdfs.server.datanode.DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; -import java.util.Optional; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY; - - -/** - * A class that encapsulates running disk checks against each HDDS volume and - * allows retrieving a list of failed volumes. - */ -public class HddsVolumeChecker { - - public static final Logger LOG = - LoggerFactory.getLogger(HddsVolumeChecker.class); - - private AsyncChecker delegateChecker; - - private final AtomicLong numVolumeChecks = new AtomicLong(0); - private final AtomicLong numAllVolumeChecks = new AtomicLong(0); - private final AtomicLong numSkippedChecks = new AtomicLong(0); - - /** - * Max allowed time for a disk check in milliseconds. If the check - * doesn't complete within this time we declare the disk as dead. - */ - private final long maxAllowedTimeForCheckMs; - - /** - * Minimum time between two successive disk checks of a volume. - */ - private final long minDiskCheckGapMs; - - /** - * Timestamp of the last check of all volumes. - */ - private long lastAllVolumesCheck; - - private final Timer timer; - - private final ExecutorService checkVolumeResultHandlerExecutorService; - - /** - * @param conf Configuration object. - * @param timer {@link Timer} object used for throttling checks. - */ - public HddsVolumeChecker(Configuration conf, Timer timer) - throws DiskErrorException { - maxAllowedTimeForCheckMs = conf.getTimeDuration( - DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, - DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - - if (maxAllowedTimeForCheckMs <= 0) { - throw new DiskErrorException("Invalid value configured for " - + DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - " - + maxAllowedTimeForCheckMs + " (should be > 0)"); - } - - this.timer = timer; - - /** - * Maximum number of volume failures that can be tolerated without - * declaring a fatal error. - */ - int maxVolumeFailuresTolerated = conf.getInt( - DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, - DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT); - - minDiskCheckGapMs = conf.getTimeDuration( - DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY, - DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_DEFAULT, - TimeUnit.MILLISECONDS); - - if (minDiskCheckGapMs < 0) { - throw new DiskErrorException("Invalid value configured for " - + DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY + " - " - + minDiskCheckGapMs + " (should be >= 0)"); - } - - long diskCheckTimeout = conf.getTimeDuration( - DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, - DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - - if (diskCheckTimeout < 0) { - throw new DiskErrorException("Invalid value configured for " - + DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - " - + diskCheckTimeout + " (should be >= 0)"); - } - - lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs; - - if (maxVolumeFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT) { - throw new DiskErrorException("Invalid value configured for " - + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " - + maxVolumeFailuresTolerated + " " - + DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG); - } - - delegateChecker = new ThrottledAsyncChecker<>( - timer, minDiskCheckGapMs, diskCheckTimeout, - Executors.newCachedThreadPool( - new ThreadFactoryBuilder() - .setNameFormat("DataNode DiskChecker thread %d") - .setDaemon(true) - .build())); - - checkVolumeResultHandlerExecutorService = Executors.newCachedThreadPool( - new ThreadFactoryBuilder() - .setNameFormat("VolumeCheck ResultHandler thread %d") - .setDaemon(true) - .build()); - } - - /** - * Run checks against all HDDS volumes. - * - * This check may be performed at service startup and subsequently at - * regular intervals to detect and handle failed volumes. - * - * @param volumes - Set of volumes to be checked. This set must be immutable - * for the duration of the check else the results will be - * unexpected. - * - * @return set of failed volumes. - */ - public Set checkAllVolumes(Collection volumes) - throws InterruptedException { - final long gap = timer.monotonicNow() - lastAllVolumesCheck; - if (gap < minDiskCheckGapMs) { - numSkippedChecks.incrementAndGet(); - if (LOG.isTraceEnabled()) { - LOG.trace( - "Skipped checking all volumes, time since last check {} is less " + - "than the minimum gap between checks ({} ms).", - gap, minDiskCheckGapMs); - } - return Collections.emptySet(); - } - - lastAllVolumesCheck = timer.monotonicNow(); - final Set healthyVolumes = new HashSet<>(); - final Set failedVolumes = new HashSet<>(); - final Set allVolumes = new HashSet<>(); - - final AtomicLong numVolumes = new AtomicLong(volumes.size()); - final CountDownLatch latch = new CountDownLatch(1); - - for (HddsVolume v : volumes) { - Optional> olf = - delegateChecker.schedule(v, null); - LOG.info("Scheduled health check for volume {}", v); - if (olf.isPresent()) { - allVolumes.add(v); - Futures.addCallback(olf.get(), - new ResultHandler(v, healthyVolumes, failedVolumes, - numVolumes, (ignored1, ignored2) -> latch.countDown())); - } else { - if (numVolumes.decrementAndGet() == 0) { - latch.countDown(); - } - } - } - - // Wait until our timeout elapses, after which we give up on - // the remaining volumes. - if (!latch.await(maxAllowedTimeForCheckMs, TimeUnit.MILLISECONDS)) { - LOG.warn("checkAllVolumes timed out after {} ms" + - maxAllowedTimeForCheckMs); - } - - numAllVolumeChecks.incrementAndGet(); - synchronized (this) { - // All volumes that have not been detected as healthy should be - // considered failed. This is a superset of 'failedVolumes'. - // - // Make a copy under the mutex as Sets.difference() returns a view - // of a potentially changing set. - return new HashSet<>(Sets.difference(allVolumes, healthyVolumes)); - } - } - - /** - * A callback interface that is supplied the result of running an - * async disk check on multiple volumes. - */ - public interface Callback { - /** - * @param healthyVolumes set of volumes that passed disk checks. - * @param failedVolumes set of volumes that failed disk checks. - */ - void call(Set healthyVolumes, - Set failedVolumes); - } - - /** - * Check a single volume asynchronously, returning a {@link ListenableFuture} - * that can be used to retrieve the final result. - * - * If the volume cannot be referenced then it is already closed and - * cannot be checked. No error is propagated to the callback. - * - * @param volume the volume that is to be checked. - * @param callback callback to be invoked when the volume check completes. - * @return true if the check was scheduled and the callback will be invoked. - * false otherwise. - */ - public boolean checkVolume(final HddsVolume volume, Callback callback) { - if (volume == null) { - LOG.debug("Cannot schedule check on null volume"); - return false; - } - - Optional> olf = - delegateChecker.schedule(volume, null); - if (olf.isPresent()) { - numVolumeChecks.incrementAndGet(); - Futures.addCallback(olf.get(), - new ResultHandler(volume, new HashSet<>(), new HashSet<>(), - new AtomicLong(1), callback), - checkVolumeResultHandlerExecutorService - ); - return true; - } - return false; - } - - /** - * A callback to process the results of checking a volume. - */ - private class ResultHandler - implements FutureCallback { - private final HddsVolume volume; - private final Set failedVolumes; - private final Set healthyVolumes; - private final AtomicLong volumeCounter; - - @Nullable - private final Callback callback; - - /** - * - * @param healthyVolumes set of healthy volumes. If the disk check is - * successful, add the volume here. - * @param failedVolumes set of failed volumes. If the disk check fails, - * add the volume here. - * @param volumeCounter volumeCounter used to trigger callback invocation. - * @param callback invoked when the volumeCounter reaches 0. - */ - ResultHandler(HddsVolume volume, - Set healthyVolumes, - Set failedVolumes, - AtomicLong volumeCounter, - @Nullable Callback callback) { - this.volume = volume; - this.healthyVolumes = healthyVolumes; - this.failedVolumes = failedVolumes; - this.volumeCounter = volumeCounter; - this.callback = callback; - } - - @Override - public void onSuccess(@Nonnull VolumeCheckResult result) { - switch (result) { - case HEALTHY: - case DEGRADED: - if (LOG.isDebugEnabled()) { - LOG.debug("Volume {} is {}.", volume, result); - } - markHealthy(); - break; - case FAILED: - LOG.warn("Volume {} detected as being unhealthy", volume); - markFailed(); - break; - default: - LOG.error("Unexpected health check result {} for volume {}", - result, volume); - markHealthy(); - break; - } - cleanup(); - } - - @Override - public void onFailure(@Nonnull Throwable t) { - Throwable exception = (t instanceof ExecutionException) ? - t.getCause() : t; - LOG.warn("Exception running disk checks against volume " + - volume, exception); - markFailed(); - cleanup(); - } - - private void markHealthy() { - synchronized (HddsVolumeChecker.this) { - healthyVolumes.add(volume); - } - } - - private void markFailed() { - synchronized (HddsVolumeChecker.this) { - failedVolumes.add(volume); - } - } - - private void cleanup() { - invokeCallback(); - } - - private void invokeCallback() { - try { - final long remaining = volumeCounter.decrementAndGet(); - if (callback != null && remaining == 0) { - callback.call(healthyVolumes, failedVolumes); - } - } catch(Exception e) { - // Propagating this exception is unlikely to be helpful. - LOG.warn("Unexpected exception", e); - } - } - } - - /** - * Shutdown the checker and its associated ExecutorService. - * - * See {@link ExecutorService#awaitTermination} for the interpretation - * of the parameters. - */ - void shutdownAndWait(int gracePeriod, TimeUnit timeUnit) { - try { - delegateChecker.shutdownAndWait(gracePeriod, timeUnit); - } catch (InterruptedException e) { - LOG.warn("{} interrupted during shutdown.", - this.getClass().getSimpleName()); - Thread.currentThread().interrupt(); - } - } - - /** - * This method is for testing only. - * - * @param testDelegate - */ - @VisibleForTesting - void setDelegateChecker( - AsyncChecker testDelegate) { - delegateChecker = testDelegate; - } - - /** - * Return the number of {@link #checkVolume} invocations. - */ - public long getNumVolumeChecks() { - return numVolumeChecks.get(); - } - - /** - * Return the number of {@link #checkAllVolumes} invocations. - */ - public long getNumAllVolumeChecks() { - return numAllVolumeChecks.get(); - } - - /** - * Return the number of checks skipped because the minimum gap since the - * last check had not elapsed. - */ - public long getNumSkippedChecks() { - return numSkippedChecks.get(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java deleted file mode 100644 index f503149aca4..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; -import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; - -import java.io.IOException; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Choose volumes in round-robin order. - * The caller should synchronize access to the list of volumes. - */ -public class RoundRobinVolumeChoosingPolicy implements VolumeChoosingPolicy { - - public static final Log LOG = LogFactory.getLog( - RoundRobinVolumeChoosingPolicy.class); - - // Stores the index of the next volume to be returned. - private AtomicInteger nextVolumeIndex = new AtomicInteger(0); - - @Override - public HddsVolume chooseVolume(List volumes, - long maxContainerSize) throws IOException { - - // No volumes available to choose from - if (volumes.size() < 1) { - throw new DiskOutOfSpaceException("No more available volumes"); - } - - // since volumes could've been removed because of the failure - // make sure we are not out of bounds - int nextIndex = nextVolumeIndex.get(); - int currentVolumeIndex = nextIndex < volumes.size() ? nextIndex : 0; - - int startVolumeIndex = currentVolumeIndex; - long maxAvailable = 0; - - while (true) { - final HddsVolume volume = volumes.get(currentVolumeIndex); - // adjust for remaining capacity in Open containers - long availableVolumeSize = volume.getAvailable() - - volume.getCommittedBytes(); - - currentVolumeIndex = (currentVolumeIndex + 1) % volumes.size(); - - if (availableVolumeSize > maxContainerSize) { - nextVolumeIndex.compareAndSet(nextIndex, currentVolumeIndex); - return volume; - } - - if (availableVolumeSize > maxAvailable) { - maxAvailable = availableVolumeSize; - } - - if (currentVolumeIndex == startVolumeIndex) { - throw new DiskOutOfSpaceException("Out of space: " - + "The volume with the most available space (=" + maxAvailable - + " B) is less than the container size (=" + maxContainerSize - + " B)."); - } - - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java deleted file mode 100644 index 836fdf3e395..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java +++ /dev/null @@ -1,248 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.MoreExecutors; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.server.datanode.checker.Checkable; -import org.apache.hadoop.util.Timer; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import java.util.WeakHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -/** - * An implementation of {@link AsyncChecker} that skips checking recently - * checked objects. It will enforce at least minMsBetweenChecks - * milliseconds between two successive checks of any one object. - * - * It is assumed that the total number of Checkable objects in the system - * is small, (not more than a few dozen) since the checker uses O(Checkables) - * storage and also potentially O(Checkables) threads. - * - * minMsBetweenChecks should be configured reasonably - * by the caller to avoid spinning up too many threads frequently. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public class ThrottledAsyncChecker implements AsyncChecker { - public static final Logger LOG = - LoggerFactory.getLogger(ThrottledAsyncChecker.class); - - private final Timer timer; - - /** - * The ExecutorService used to schedule asynchronous checks. - */ - private final ListeningExecutorService executorService; - private final ScheduledExecutorService scheduledExecutorService; - - /** - * The minimum gap in milliseconds between two successive checks - * of the same object. This is the throttle. - */ - private final long minMsBetweenChecks; - private final long diskCheckTimeout; - - /** - * Map of checks that are currently in progress. Protected by the object - * lock. - */ - private final Map> checksInProgress; - - /** - * Maps Checkable objects to a future that can be used to retrieve - * the results of the operation. - * Protected by the object lock. - */ - private final Map> - completedChecks; - - public ThrottledAsyncChecker(final Timer timer, - final long minMsBetweenChecks, - final long diskCheckTimeout, - final ExecutorService executorService) { - this.timer = timer; - this.minMsBetweenChecks = minMsBetweenChecks; - this.diskCheckTimeout = diskCheckTimeout; - this.executorService = MoreExecutors.listeningDecorator(executorService); - this.checksInProgress = new HashMap<>(); - this.completedChecks = new WeakHashMap<>(); - - if (this.diskCheckTimeout > 0) { - ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = new - ScheduledThreadPoolExecutor(1); - this.scheduledExecutorService = MoreExecutors - .getExitingScheduledExecutorService(scheduledThreadPoolExecutor); - } else { - this.scheduledExecutorService = null; - } - } - - /** - * See {@link AsyncChecker#schedule} - * - * If the object has been checked recently then the check will - * be skipped. Multiple concurrent checks for the same object - * will receive the same Future. - */ - @Override - public Optional> schedule( - Checkable target, K context) { - if (checksInProgress.containsKey(target)) { - return Optional.empty(); - } - - if (completedChecks.containsKey(target)) { - final ThrottledAsyncChecker.LastCheckResult result = - completedChecks.get(target); - final long msSinceLastCheck = timer.monotonicNow() - result.completedAt; - if (msSinceLastCheck < minMsBetweenChecks) { - if (LOG.isDebugEnabled()) { - LOG.debug("Skipped checking {}. Time since last check {}ms " + - "is less than the min gap {}ms.", - target, msSinceLastCheck, minMsBetweenChecks); - } - return Optional.empty(); - } - } - - LOG.info("Scheduling a check for {}", target); - final ListenableFuture lfWithoutTimeout = executorService.submit( - () -> target.check(context)); - final ListenableFuture lf; - - if (diskCheckTimeout > 0) { - lf = TimeoutFuture - .create(lfWithoutTimeout, diskCheckTimeout, TimeUnit.MILLISECONDS, - scheduledExecutorService); - } else { - lf = lfWithoutTimeout; - } - - checksInProgress.put(target, lf); - addResultCachingCallback(target, lf); - return Optional.of(lf); - } - - /** - * Register a callback to cache the result of a check. - * @param target - * @param lf - */ - private void addResultCachingCallback( - Checkable target, ListenableFuture lf) { - Futures.addCallback(lf, new FutureCallback() { - @Override - public void onSuccess(@Nullable V result) { - synchronized (ThrottledAsyncChecker.this) { - checksInProgress.remove(target); - completedChecks.put(target, new LastCheckResult<>( - result, timer.monotonicNow())); - } - } - - @Override - public void onFailure(@Nonnull Throwable t) { - synchronized (ThrottledAsyncChecker.this) { - checksInProgress.remove(target); - completedChecks.put(target, new LastCheckResult<>( - t, timer.monotonicNow())); - } - } - }); - } - - /** - * {@inheritDoc}. - * - * The results of in-progress checks are not useful during shutdown, - * so we optimize for faster shutdown by interrupt all actively - * executing checks. - */ - @Override - public void shutdownAndWait(long timeout, TimeUnit timeUnit) - throws InterruptedException { - if (scheduledExecutorService != null) { - scheduledExecutorService.shutdownNow(); - scheduledExecutorService.awaitTermination(timeout, timeUnit); - } - - executorService.shutdownNow(); - executorService.awaitTermination(timeout, timeUnit); - } - - /** - * Status of running a check. It can either be a result or an - * exception, depending on whether the check completed or threw. - */ - private static final class LastCheckResult { - /** - * Timestamp at which the check completed. - */ - private final long completedAt; - - /** - * Result of running the check if it completed. null if it threw. - */ - @Nullable - private final V result; - - /** - * Exception thrown by the check. null if it returned a result. - */ - private final Throwable exception; // null on success. - - /** - * Initialize with a result. - * @param result - */ - private LastCheckResult(V result, long completedAt) { - this.result = result; - this.exception = null; - this.completedAt = completedAt; - } - - /** - * Initialize with an exception. - * @param completedAt - * @param t - */ - private LastCheckResult(Throwable t, long completedAt) { - this.result = null; - this.exception = t; - this.completedAt = completedAt; - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java deleted file mode 100644 index 626814e96c1..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * Some portions of this class have been modified to make it functional in this - * package. - */ -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ListenableFuture; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.annotation.Nullable; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * Implementation of {@code Futures#withTimeout}. - *

- *

Future that delegates to another but will finish early (via a - * {@link TimeoutException} wrapped in an {@link ExecutionException}) if the - * specified duration expires. The delegate future is interrupted and - * cancelled if it times out. - */ -final class TimeoutFuture extends AbstractFuture.TrustedFuture { - public static final Logger LOG = LoggerFactory.getLogger( - TimeoutFuture.class); - - static ListenableFuture create( - ListenableFuture delegate, - long time, - TimeUnit unit, - ScheduledExecutorService scheduledExecutor) { - TimeoutFuture result = new TimeoutFuture(delegate); - TimeoutFuture.Fire fire = new TimeoutFuture.Fire(result); - result.timer = scheduledExecutor.schedule(fire, time, unit); - delegate.addListener(fire, directExecutor()); - return result; - } - - /* - * Memory visibility of these fields. There are two cases to consider. - * - * 1. visibility of the writes to these fields to Fire.run: - * - * The initial write to delegateRef is made definitely visible via the - * semantics of addListener/SES.schedule. The later racy write in cancel() - * is not guaranteed to be observed, however that is fine since the - * correctness is based on the atomic state in our base class. The initial - * write to timer is never definitely visible to Fire.run since it is - * assigned after SES.schedule is called. Therefore Fire.run has to check - * for null. However, it should be visible if Fire.run is called by - * delegate.addListener since addListener is called after the assignment - * to timer, and importantly this is the main situation in which we need to - * be able to see the write. - * - * 2. visibility of the writes to an afterDone() call triggered by cancel(): - * - * Since these fields are non-final that means that TimeoutFuture is not - * being 'safely published', thus a motivated caller may be able to expose - * the reference to another thread that would then call cancel() and be - * unable to cancel the delegate. There are a number of ways to solve this, - * none of which are very pretty, and it is currently believed to be a - * purely theoretical problem (since the other actions should supply - * sufficient write-barriers). - */ - - @Nullable private ListenableFuture delegateRef; - @Nullable private Future timer; - - private TimeoutFuture(ListenableFuture delegate) { - this.delegateRef = Preconditions.checkNotNull(delegate); - } - - /** - * A runnable that is called when the delegate or the timer completes. - */ - private static final class Fire implements Runnable { - @Nullable - private TimeoutFuture timeoutFutureRef; - - Fire( - TimeoutFuture timeoutFuture) { - this.timeoutFutureRef = timeoutFuture; - } - - @Override - public void run() { - // If either of these reads return null then we must be after a - // successful cancel or another call to this method. - TimeoutFuture timeoutFuture = timeoutFutureRef; - if (timeoutFuture == null) { - return; - } - ListenableFuture delegate = timeoutFuture.delegateRef; - if (delegate == null) { - return; - } - - /* - * If we're about to complete the TimeoutFuture, we want to release our - * reference to it. Otherwise, we'll pin it (and its result) in memory - * until the timeout task is GCed. (The need to clear our reference to - * the TimeoutFuture is the reason we use a *static* nested class with - * a manual reference back to the "containing" class.) - * - * This has the nice-ish side effect of limiting reentrancy: run() calls - * timeoutFuture.setException() calls run(). That reentrancy would - * already be harmless, since timeoutFuture can be set (and delegate - * cancelled) only once. (And "set only once" is important for other - * reasons: run() can still be invoked concurrently in different threads, - * even with the above null checks.) - */ - timeoutFutureRef = null; - if (delegate.isDone()) { - timeoutFuture.setFuture(delegate); - } else { - try { - timeoutFuture.setException( - new TimeoutException("Future timed out: " + delegate)); - } finally { - delegate.cancel(true); - } - } - } - } - - @Override - protected void afterDone() { - maybePropagateCancellation(delegateRef); - - Future localTimer = timer; - // Try to cancel the timer as an optimization. - // timer may be null if this call to run was by the timer task since there - // is no happens-before edge between the assignment to timer and an - // execution of the timer task. - if (localTimer != null) { - localTimer.cancel(false); - } - - delegateRef = null; - timer = null; - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java deleted file mode 100644 index 9e2eb221e81..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeIOStats.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import java.util.concurrent.atomic.AtomicLong; - -/** - * This class is used to track Volume IO stats for each HDDS Volume. - */ -public class VolumeIOStats { - - private final AtomicLong readBytes; - private final AtomicLong readOpCount; - private final AtomicLong writeBytes; - private final AtomicLong writeOpCount; - private final AtomicLong readTime; - private final AtomicLong writeTime; - - public VolumeIOStats() { - readBytes = new AtomicLong(0); - readOpCount = new AtomicLong(0); - writeBytes = new AtomicLong(0); - writeOpCount = new AtomicLong(0); - readTime = new AtomicLong(0); - writeTime = new AtomicLong(0); - } - - /** - * Increment number of bytes read from the volume. - * @param bytesRead - */ - public void incReadBytes(long bytesRead) { - readBytes.addAndGet(bytesRead); - } - - /** - * Increment the read operations performed on the volume. - */ - public void incReadOpCount() { - readOpCount.incrementAndGet(); - } - - /** - * Increment number of bytes written on to the volume. - * @param bytesWritten - */ - public void incWriteBytes(long bytesWritten) { - writeBytes.addAndGet(bytesWritten); - } - - /** - * Increment the write operations performed on the volume. - */ - public void incWriteOpCount() { - writeOpCount.incrementAndGet(); - } - - /** - * Increment the time taken by read operation on the volume. - * @param time - */ - public void incReadTime(long time) { - readTime.addAndGet(time); - } - - /** - * Increment the time taken by write operation on the volume. - * @param time - */ - public void incWriteTime(long time) { - writeTime.addAndGet(time); - } - - /** - * Returns total number of bytes read from the volume. - * @return long - */ - public long getReadBytes() { - return readBytes.get(); - } - - /** - * Returns total number of bytes written to the volume. - * @return long - */ - public long getWriteBytes() { - return writeBytes.get(); - } - - /** - * Returns total number of read operations performed on the volume. - * @return long - */ - public long getReadOpCount() { - return readOpCount.get(); - } - - /** - * Returns total number of write operations performed on the volume. - * @return long - */ - public long getWriteOpCount() { - return writeOpCount.get(); - } - - /** - * Returns total read operations time on the volume. - * @return long - */ - public long getReadTime() { - return readTime.get(); - } - - /** - * Returns total write operations time on the volume. - * @return long - */ - public long getWriteTime() { - return writeTime.get(); - } - - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java deleted file mode 100644 index 31f83ec8dab..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java +++ /dev/null @@ -1,141 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.GetSpaceUsed; -import org.apache.hadoop.fs.StorageType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; - -/** - * Stores information about a disk/volume. - */ -public final class VolumeInfo { - - private static final Logger LOG = LoggerFactory.getLogger(VolumeInfo.class); - - private final String rootDir; - private final StorageType storageType; - - // Space usage calculator - private final VolumeUsage usage; - - // Capacity configured. This is useful when we want to - // limit the visible capacity for tests. If negative, then we just - // query from the filesystem. - private long configuredCapacity; - - /** - * Builder for VolumeInfo. - */ - public static class Builder { - private final Configuration conf; - private final String rootDir; - private StorageType storageType; - private long configuredCapacity; - - public Builder(String root, Configuration config) { - this.rootDir = root; - this.conf = config; - } - - public Builder storageType(StorageType st) { - this.storageType = st; - return this; - } - - public Builder configuredCapacity(long capacity) { - this.configuredCapacity = capacity; - return this; - } - - public VolumeInfo build() throws IOException { - return new VolumeInfo(this); - } - } - - private VolumeInfo(Builder b) throws IOException { - - this.rootDir = b.rootDir; - File root = new File(this.rootDir); - - Boolean succeeded = root.isDirectory() || root.mkdirs(); - - if (!succeeded) { - LOG.error("Unable to create the volume root dir at : {}", root); - throw new IOException("Unable to create the volume root dir at " + root); - } - - this.storageType = (b.storageType != null ? - b.storageType : StorageType.DEFAULT); - - this.configuredCapacity = (b.configuredCapacity != 0 ? - b.configuredCapacity : -1); - - this.usage = new VolumeUsage(root, b.conf); - } - - public long getCapacity() throws IOException { - if (configuredCapacity < 0) { - return usage.getCapacity(); - } - return configuredCapacity; - } - - public long getAvailable() throws IOException { - return usage.getAvailable(); - } - - public long getScmUsed() throws IOException { - return usage.getScmUsed(); - } - - protected void shutdownUsageThread() { - usage.shutdown(); - } - - public String getRootDir() { - return this.rootDir; - } - - public StorageType getStorageType() { - return this.storageType; - } - - /** - * Only for testing. Do not use otherwise. - */ - @VisibleForTesting - public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) { - usage.setScmUsageForTesting(scmUsageForTest); - } - - /** - * Only for testing. Do not use otherwise. - */ - @VisibleForTesting - public VolumeUsage getUsageForTesting() { - return usage; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java deleted file mode 100644 index 875e96a0a96..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java +++ /dev/null @@ -1,519 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.EnumMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdfs.server.datanode.StorageLocation; -import org.apache.hadoop.ozone.common.InconsistentStorageStateException; -import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume.VolumeState; -import org.apache.hadoop.util.DiskChecker; -import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; -import org.apache.hadoop.util.ShutdownHookManager; -import org.apache.hadoop.util.Timer; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; -import static org.apache.hadoop.util.RunJar.SHUTDOWN_HOOK_PRIORITY; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * VolumeSet to manage HDDS volumes in a DataNode. - */ -public class VolumeSet { - - private static final Logger LOG = LoggerFactory.getLogger(VolumeSet.class); - - private Configuration conf; - - /** - * {@link VolumeSet#volumeMap} maintains a map of all active volumes in the - * DataNode. Each volume has one-to-one mapping with a volumeInfo object. - */ - private Map volumeMap; - /** - * {@link VolumeSet#failedVolumeMap} maintains a map of volumes which have - * failed. The keys in this map and {@link VolumeSet#volumeMap} are - * mutually exclusive. - */ - private Map failedVolumeMap; - - /** - * {@link VolumeSet#volumeStateMap} maintains a list of active volumes per - * StorageType. - */ - private EnumMap> volumeStateMap; - - /** - * An executor for periodic disk checks. - */ - private final ScheduledExecutorService diskCheckerservice; - private final ScheduledFuture periodicDiskChecker; - - private static final long DISK_CHECK_INTERVAL_MINUTES = 15; - - /** - * A Reentrant Read Write Lock to synchronize volume operations in VolumeSet. - * Any update to {@link VolumeSet#volumeMap}, - * {@link VolumeSet#failedVolumeMap}, or {@link VolumeSet#volumeStateMap} - * should be done after acquiring the write lock. - */ - private final ReentrantReadWriteLock volumeSetRWLock; - - private final String datanodeUuid; - private String clusterID; - - private Runnable shutdownHook; - private final HddsVolumeChecker volumeChecker; - - public VolumeSet(String dnUuid, Configuration conf) - throws IOException { - this(dnUuid, null, conf); - } - - public VolumeSet(String dnUuid, String clusterID, Configuration conf) - throws IOException { - this.datanodeUuid = dnUuid; - this.clusterID = clusterID; - this.conf = conf; - this.volumeSetRWLock = new ReentrantReadWriteLock(); - this.volumeChecker = getVolumeChecker(conf); - this.diskCheckerservice = Executors.newScheduledThreadPool( - 1, r -> new Thread(r, "Periodic HDDS volume checker")); - this.periodicDiskChecker = - diskCheckerservice.scheduleWithFixedDelay(() -> { - try { - checkAllVolumes(); - } catch (IOException e) { - LOG.warn("Exception while checking disks", e); - } - }, DISK_CHECK_INTERVAL_MINUTES, DISK_CHECK_INTERVAL_MINUTES, - TimeUnit.MINUTES); - initializeVolumeSet(); - } - - @VisibleForTesting - HddsVolumeChecker getVolumeChecker(Configuration configuration) - throws DiskChecker.DiskErrorException { - return new HddsVolumeChecker(configuration, new Timer()); - } - - /** - * Add DN volumes configured through ConfigKeys to volumeMap. - */ - private void initializeVolumeSet() throws IOException { - volumeMap = new ConcurrentHashMap<>(); - failedVolumeMap = new ConcurrentHashMap<>(); - volumeStateMap = new EnumMap<>(StorageType.class); - - Collection rawLocations = conf.getTrimmedStringCollection( - HDDS_DATANODE_DIR_KEY); - if (rawLocations.isEmpty()) { - rawLocations = conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY); - } - if (rawLocations.isEmpty()) { - throw new IllegalArgumentException("No location configured in either " - + HDDS_DATANODE_DIR_KEY + " or " + DFS_DATANODE_DATA_DIR_KEY); - } - - for (StorageType storageType : StorageType.values()) { - volumeStateMap.put(storageType, new ArrayList<>()); - } - - for (String locationString : rawLocations) { - try { - StorageLocation location = StorageLocation.parse(locationString); - - HddsVolume hddsVolume = createVolume(location.getUri().getPath(), - location.getStorageType()); - - checkAndSetClusterID(hddsVolume.getClusterID()); - - LOG.info("Added Volume : {} to VolumeSet", - hddsVolume.getHddsRootDir().getPath()); - - if (!hddsVolume.getHddsRootDir().mkdirs() && - !hddsVolume.getHddsRootDir().exists()) { - throw new IOException("Failed to create HDDS storage dir " + - hddsVolume.getHddsRootDir()); - } - volumeMap.put(hddsVolume.getHddsRootDir().getPath(), hddsVolume); - volumeStateMap.get(hddsVolume.getStorageType()).add(hddsVolume); - } catch (IOException e) { - HddsVolume volume = new HddsVolume.Builder(locationString) - .failedVolume(true).build(); - failedVolumeMap.put(locationString, volume); - LOG.error("Failed to parse the storage location: " + locationString, e); - } - } - - // First checking if we have any volumes, if all volumes are failed the - // volumeMap size will be zero, and we throw Exception. - if (volumeMap.size() == 0) { - throw new DiskOutOfSpaceException("No storage locations configured"); - } - - checkAllVolumes(); - - // Ensure volume threads are stopped and scm df is saved during shutdown. - shutdownHook = () -> { - saveVolumeSetUsed(); - }; - ShutdownHookManager.get().addShutdownHook(shutdownHook, - SHUTDOWN_HOOK_PRIORITY); - } - - /** - * Run a synchronous parallel check of all HDDS volumes, removing - * failed volumes. - */ - private void checkAllVolumes() throws IOException { - List allVolumes = getVolumesList(); - Set failedVolumes; - try { - failedVolumes = volumeChecker.checkAllVolumes(allVolumes); - } catch (InterruptedException e) { - throw new IOException("Interrupted while running disk check", e); - } - - if (failedVolumes.size() > 0) { - LOG.warn("checkAllVolumes got {} failed volumes - {}", - failedVolumes.size(), failedVolumes); - handleVolumeFailures(failedVolumes); - } else { - LOG.debug("checkAllVolumes encountered no failures"); - } - } - - /** - * Handle one or more failed volumes. - * @param failedVolumes - */ - private void handleVolumeFailures(Set failedVolumes) { - for (HddsVolume v: failedVolumes) { - this.writeLock(); - try { - // Immediately mark the volume as failed so it is unavailable - // for new containers. - volumeMap.remove(v.getHddsRootDir().getPath()); - failedVolumeMap.putIfAbsent(v.getHddsRootDir().getPath(), v); - } finally { - this.writeUnlock(); - } - - // TODO: - // 1. Mark all closed containers on the volume as unhealthy. - // 2. Consider stopping IO on open containers and tearing down - // active pipelines. - // 3. Handle Ratis log disk failure. - } - } - - /** - * If Version file exists and the {@link VolumeSet#clusterID} is not set yet, - * assign it the value from Version file. Otherwise, check that the given - * id matches with the id from version file. - * @param idFromVersionFile value of the property from Version file - * @throws InconsistentStorageStateException - */ - private void checkAndSetClusterID(String idFromVersionFile) - throws InconsistentStorageStateException { - // If the clusterID is null (not set), assign it the value - // from version file. - if (this.clusterID == null) { - this.clusterID = idFromVersionFile; - return; - } - - // If the clusterID is already set, it should match with the value from the - // version file. - if (!idFromVersionFile.equals(this.clusterID)) { - throw new InconsistentStorageStateException( - "Mismatched ClusterIDs. VolumeSet has: " + this.clusterID + - ", and version file has: " + idFromVersionFile); - } - } - - /** - * Acquire Volume Set Read lock. - */ - public void readLock() { - volumeSetRWLock.readLock().lock(); - } - - /** - * Release Volume Set Read lock. - */ - public void readUnlock() { - volumeSetRWLock.readLock().unlock(); - } - - /** - * Acquire Volume Set Write lock. - */ - public void writeLock() { - volumeSetRWLock.writeLock().lock(); - } - - /** - * Release Volume Set Write lock. - */ - public void writeUnlock() { - volumeSetRWLock.writeLock().unlock(); - } - - - private HddsVolume createVolume(String locationString, - StorageType storageType) throws IOException { - HddsVolume.Builder volumeBuilder = new HddsVolume.Builder(locationString) - .conf(conf) - .datanodeUuid(datanodeUuid) - .clusterID(clusterID) - .storageType(storageType); - return volumeBuilder.build(); - } - - - // Add a volume to VolumeSet - boolean addVolume(String dataDir) { - return addVolume(dataDir, StorageType.DEFAULT); - } - - // Add a volume to VolumeSet - private boolean addVolume(String volumeRoot, StorageType storageType) { - String hddsRoot = HddsVolumeUtil.getHddsRoot(volumeRoot); - boolean success; - - this.writeLock(); - try { - if (volumeMap.containsKey(hddsRoot)) { - LOG.warn("Volume : {} already exists in VolumeMap", hddsRoot); - success = false; - } else { - if (failedVolumeMap.containsKey(hddsRoot)) { - failedVolumeMap.remove(hddsRoot); - } - - HddsVolume hddsVolume = createVolume(volumeRoot, storageType); - volumeMap.put(hddsVolume.getHddsRootDir().getPath(), hddsVolume); - volumeStateMap.get(hddsVolume.getStorageType()).add(hddsVolume); - - LOG.info("Added Volume : {} to VolumeSet", - hddsVolume.getHddsRootDir().getPath()); - success = true; - } - } catch (IOException ex) { - LOG.error("Failed to add volume " + volumeRoot + " to VolumeSet", ex); - success = false; - } finally { - this.writeUnlock(); - } - return success; - } - - // Mark a volume as failed - public void failVolume(String dataDir) { - String hddsRoot = HddsVolumeUtil.getHddsRoot(dataDir); - - this.writeLock(); - try { - if (volumeMap.containsKey(hddsRoot)) { - HddsVolume hddsVolume = volumeMap.get(hddsRoot); - hddsVolume.failVolume(); - - volumeMap.remove(hddsRoot); - volumeStateMap.get(hddsVolume.getStorageType()).remove(hddsVolume); - failedVolumeMap.put(hddsRoot, hddsVolume); - - LOG.info("Moving Volume : {} to failed Volumes", hddsRoot); - } else if (failedVolumeMap.containsKey(hddsRoot)) { - LOG.info("Volume : {} is not active", hddsRoot); - } else { - LOG.warn("Volume : {} does not exist in VolumeSet", hddsRoot); - } - } finally { - this.writeUnlock(); - } - } - - // Remove a volume from the VolumeSet completely. - public void removeVolume(String dataDir) throws IOException { - String hddsRoot = HddsVolumeUtil.getHddsRoot(dataDir); - - this.writeLock(); - try { - if (volumeMap.containsKey(hddsRoot)) { - HddsVolume hddsVolume = volumeMap.get(hddsRoot); - hddsVolume.shutdown(); - - volumeMap.remove(hddsRoot); - volumeStateMap.get(hddsVolume.getStorageType()).remove(hddsVolume); - - LOG.info("Removed Volume : {} from VolumeSet", hddsRoot); - } else if (failedVolumeMap.containsKey(hddsRoot)) { - HddsVolume hddsVolume = failedVolumeMap.get(hddsRoot); - hddsVolume.setState(VolumeState.NON_EXISTENT); - - failedVolumeMap.remove(hddsRoot); - LOG.info("Removed Volume : {} from failed VolumeSet", hddsRoot); - } else { - LOG.warn("Volume : {} does not exist in VolumeSet", hddsRoot); - } - } finally { - this.writeUnlock(); - } - } - - /** - * This method, call shutdown on each volume to shutdown volume usage - * thread and write scmUsed on each volume. - */ - private void saveVolumeSetUsed() { - for (HddsVolume hddsVolume : volumeMap.values()) { - try { - hddsVolume.shutdown(); - } catch (Exception ex) { - LOG.error("Failed to shutdown volume : " + hddsVolume.getHddsRootDir(), - ex); - } - } - } - - /** - * Shutdown the volumeset. - */ - public void shutdown() { - saveVolumeSetUsed(); - stopDiskChecker(); - if (shutdownHook != null) { - ShutdownHookManager.get().removeShutdownHook(shutdownHook); - } - } - - private void stopDiskChecker() { - periodicDiskChecker.cancel(true); - volumeChecker.shutdownAndWait(0, TimeUnit.SECONDS); - diskCheckerservice.shutdownNow(); - } - - @VisibleForTesting - public List getVolumesList() { - return ImmutableList.copyOf(volumeMap.values()); - } - - @VisibleForTesting - public List getFailedVolumesList() { - return ImmutableList.copyOf(failedVolumeMap.values()); - } - - @VisibleForTesting - public Map getVolumeMap() { - return ImmutableMap.copyOf(volumeMap); - } - - @VisibleForTesting - public Map> getVolumeStateMap() { - return ImmutableMap.copyOf(volumeStateMap); - } - - public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() - throws IOException { - boolean failed; - this.readLock(); - try { - StorageLocationReport[] reports = new StorageLocationReport[volumeMap - .size() + failedVolumeMap.size()]; - int counter = 0; - HddsVolume hddsVolume; - for (Map.Entry entry : volumeMap.entrySet()) { - hddsVolume = entry.getValue(); - VolumeInfo volumeInfo = hddsVolume.getVolumeInfo(); - long scmUsed; - long remaining; - long capacity; - failed = false; - try { - scmUsed = volumeInfo.getScmUsed(); - remaining = volumeInfo.getAvailable(); - capacity = volumeInfo.getCapacity(); - } catch (IOException ex) { - LOG.warn("Failed to get scmUsed and remaining for container " + - "storage location {}", volumeInfo.getRootDir(), ex); - // reset scmUsed and remaining if df/du failed. - scmUsed = 0; - remaining = 0; - capacity = 0; - failed = true; - } - - StorageLocationReport.Builder builder = - StorageLocationReport.newBuilder(); - builder.setStorageLocation(volumeInfo.getRootDir()) - .setId(hddsVolume.getStorageID()) - .setFailed(failed) - .setCapacity(capacity) - .setRemaining(remaining) - .setScmUsed(scmUsed) - .setStorageType(hddsVolume.getStorageType()); - StorageLocationReport r = builder.build(); - reports[counter++] = r; - } - for (Map.Entry entry : failedVolumeMap.entrySet()) { - hddsVolume = entry.getValue(); - StorageLocationReport.Builder builder = StorageLocationReport - .newBuilder(); - builder.setStorageLocation(hddsVolume.getHddsRootDir() - .getAbsolutePath()).setId(hddsVolume.getStorageID()).setFailed(true) - .setCapacity(0).setRemaining(0).setScmUsed(0).setStorageType( - hddsVolume.getStorageType()); - StorageLocationReport r = builder.build(); - reports[counter++] = r; - } - NodeReportProto.Builder nrb = NodeReportProto.newBuilder(); - for (int i = 0; i < reports.length; i++) { - nrb.addStorageReport(reports[i].getProtoBufMessage()); - } - return nrb.build(); - } finally { - this.readUnlock(); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java deleted file mode 100644 index 693bcb50cc5..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.annotations.VisibleForTesting; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CachingGetSpaceUsed; -import org.apache.hadoop.fs.DF; -import org.apache.hadoop.fs.GetSpaceUsed; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.nio.charset.StandardCharsets; -import java.util.Scanner; -import java.util.concurrent.atomic.AtomicReference; - -/** - * Class that wraps the space df of the Datanode Volumes used by SCM - * containers. - */ -public class VolumeUsage { - private static final Logger LOG = LoggerFactory.getLogger(VolumeUsage.class); - - private final File rootDir; - private final DF df; - private final File scmUsedFile; - private AtomicReference scmUsage; - private boolean shutdownComplete; - - private static final String DU_CACHE_FILE = "scmUsed"; - private volatile boolean scmUsedSaved = false; - - VolumeUsage(File dataLoc, Configuration conf) - throws IOException { - this.rootDir = dataLoc; - - // SCM used cache file - scmUsedFile = new File(rootDir, DU_CACHE_FILE); - // get overall disk df - this.df = new DF(rootDir, conf); - - startScmUsageThread(conf); - } - - void startScmUsageThread(Configuration conf) throws IOException { - // get SCM specific df - scmUsage = new AtomicReference<>( - new CachingGetSpaceUsed.Builder().setPath(rootDir) - .setConf(conf) - .setInitialUsed(loadScmUsed()) - .build()); - } - - long getCapacity() { - long capacity = df.getCapacity(); - return (capacity > 0) ? capacity : 0; - } - - /* - * Calculate the available space in the volume. - */ - long getAvailable() throws IOException { - long remaining = getCapacity() - getScmUsed(); - long available = df.getAvailable(); - if (remaining > available) { - remaining = available; - } - return (remaining > 0) ? remaining : 0; - } - - long getScmUsed() throws IOException{ - return scmUsage.get().getUsed(); - } - - public synchronized void shutdown() { - if (!shutdownComplete) { - saveScmUsed(); - - if (scmUsage.get() instanceof CachingGetSpaceUsed) { - IOUtils.cleanupWithLogger( - null, ((CachingGetSpaceUsed) scmUsage.get())); - } - shutdownComplete = true; - } - } - - /** - * Read in the cached DU value and return it if it is less than 600 seconds - * old (DU update interval). Slight imprecision of scmUsed is not critical - * and skipping DU can significantly shorten the startup time. - * If the cached value is not available or too old, -1 is returned. - */ - long loadScmUsed() { - long cachedScmUsed; - long mtime; - Scanner sc; - - try { - sc = new Scanner(scmUsedFile, "UTF-8"); - } catch (FileNotFoundException fnfe) { - return -1; - } - - try { - // Get the recorded scmUsed from the file. - if (sc.hasNextLong()) { - cachedScmUsed = sc.nextLong(); - } else { - return -1; - } - // Get the recorded mtime from the file. - if (sc.hasNextLong()) { - mtime = sc.nextLong(); - } else { - return -1; - } - - // Return the cached value if mtime is okay. - if (mtime > 0 && (Time.now() - mtime < 600000L)) { - LOG.info("Cached ScmUsed found for {} : {} ", rootDir, - cachedScmUsed); - return cachedScmUsed; - } - return -1; - } finally { - sc.close(); - } - } - - /** - * Write the current scmUsed to the cache file. - */ - void saveScmUsed() { - if (scmUsedFile.exists() && !scmUsedFile.delete()) { - LOG.warn("Failed to delete old scmUsed file in {}.", rootDir); - } - OutputStreamWriter out = null; - try { - long used = getScmUsed(); - if (used > 0) { - out = new OutputStreamWriter(new FileOutputStream(scmUsedFile), - StandardCharsets.UTF_8); - // mtime is written last, so that truncated writes won't be valid. - out.write(Long.toString(used) + " " + Long.toString(Time.now())); - out.flush(); - out.close(); - out = null; - } - } catch (IOException ioe) { - // If write failed, the volume might be bad. Since the cache file is - // not critical, log the error and continue. - LOG.warn("Failed to write scmUsed to " + scmUsedFile, ioe); - } finally { - IOUtils.cleanupWithLogger(null, out); - } - } - - /** - * Only for testing. Do not use otherwise. - */ - @VisibleForTesting - @SuppressFBWarnings( - value = "IS2_INCONSISTENT_SYNC", - justification = "scmUsage is an AtomicReference. No additional " + - "synchronization is needed.") - public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) { - scmUsage.set(scmUsageForTest); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java deleted file mode 100644 index 86093c6015c..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; -/** - This package contains volume/ disk related classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java deleted file mode 100644 index ad68c4dc96c..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; -import org.apache.hadoop.hdds.utils.MetaStoreIterator; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.apache.hadoop.hdds.utils.MetadataStore.KeyValue; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.File; -import java.io.IOException; -import java.util.NoSuchElementException; - - -/** - * Block Iterator for KeyValue Container. This block iterator returns blocks - * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no - * filter is specified, then default filter used is - * {@link MetadataKeyFilters#getNormalKeyFilter()} - */ -@InterfaceAudience.Public -public class KeyValueBlockIterator implements BlockIterator, - Closeable { - - private static final Logger LOG = LoggerFactory.getLogger( - KeyValueBlockIterator.class); - - private MetaStoreIterator blockIterator; - private final ReferenceCountedDB db; - private static KeyPrefixFilter defaultBlockFilter = MetadataKeyFilters - .getNormalKeyFilter(); - private KeyPrefixFilter blockFilter; - private BlockData nextBlock; - private long containerId; - - /** - * KeyValueBlockIterator to iterate blocks in a container. - * @param id - container id - * @param path - container base path - * @throws IOException - */ - - public KeyValueBlockIterator(long id, File path) - throws IOException { - this(id, path, defaultBlockFilter); - } - - /** - * KeyValueBlockIterator to iterate blocks in a container. - * @param id - container id - * @param path - container base path - * @param filter - Block filter, filter to be applied for blocks - * @throws IOException - */ - public KeyValueBlockIterator(long id, File path, KeyPrefixFilter filter) - throws IOException { - containerId = id; - File metdataPath = new File(path, OzoneConsts.METADATA); - File containerFile = ContainerUtils.getContainerFile(metdataPath - .getParentFile()); - ContainerData containerData = ContainerDataYaml.readContainerFile( - containerFile); - KeyValueContainerData keyValueContainerData = (KeyValueContainerData) - containerData; - keyValueContainerData.setDbFile(KeyValueContainerLocationUtil - .getContainerDBFile(metdataPath, containerId)); - db = BlockUtils.getDB(keyValueContainerData, new - OzoneConfiguration()); - blockIterator = db.getStore().iterator(); - blockFilter = filter; - } - - /** - * This method returns blocks matching with the filter. - * @return next block or null if no more blocks - * @throws IOException - */ - @Override - public BlockData nextBlock() throws IOException, NoSuchElementException { - if (nextBlock != null) { - BlockData currentBlock = nextBlock; - nextBlock = null; - return currentBlock; - } - if(hasNext()) { - return nextBlock(); - } - throw new NoSuchElementException("Block Iterator reached end for " + - "ContainerID " + containerId); - } - - @Override - public boolean hasNext() throws IOException { - if (nextBlock != null) { - return true; - } - if (blockIterator.hasNext()) { - KeyValue block = blockIterator.next(); - if (blockFilter.filterKey(null, block.getKey(), null)) { - nextBlock = BlockUtils.getBlockData(block.getValue()); - if (LOG.isTraceEnabled()) { - LOG.trace("Block matching with filter found: blockID is : {} for " + - "containerID {}", nextBlock.getLocalID(), containerId); - } - return true; - } - hasNext(); - } - return false; - } - - @Override - public void seekToFirst() { - nextBlock = null; - blockIterator.seekToFirst(); - } - - @Override - public void seekToLast() { - nextBlock = null; - blockIterator.seekToLast(); - } - - public void close() { - db.close(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java deleted file mode 100644 index a6e914b90b8..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ /dev/null @@ -1,730 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.file.Files; -import java.nio.file.StandardCopyOption; -import java.util.Map; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileAlreadyExistsException; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerType; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdfs.util.Canceler; -import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.io.nativeio.NativeIO; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker; -import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers - .KeyValueContainerLocationUtil; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; -import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; - -import com.google.common.base.Preconditions; -import org.apache.commons.io.FileUtils; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.CONTAINER_ALREADY_EXISTS; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.CONTAINER_FILES_CREATE_ERROR; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.CONTAINER_INTERNAL_ERROR; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_OPEN; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.DISK_OUT_OF_SPACE; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.ERROR_IN_COMPACT_DB; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.ERROR_IN_DB_SYNC; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.INVALID_CONTAINER_STATE; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.UNSUPPORTED_REQUEST; - -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class to perform KeyValue Container operations. Any modifications to - * KeyValueContainer object should ideally be done via api exposed in - * KeyValueHandler class. - */ -public class KeyValueContainer implements Container { - - private static final Logger LOG = LoggerFactory.getLogger(Container.class); - - // Use a non-fair RW lock for better throughput, we may revisit this decision - // if this causes fairness issues. - private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); - - private final KeyValueContainerData containerData; - private Configuration config; - - public KeyValueContainer(KeyValueContainerData containerData, Configuration - ozoneConfig) { - Preconditions.checkNotNull(containerData, "KeyValueContainerData cannot " + - "be null"); - Preconditions.checkNotNull(ozoneConfig, "Ozone configuration cannot " + - "be null"); - this.config = ozoneConfig; - this.containerData = containerData; - } - - @Override - public void create(VolumeSet volumeSet, VolumeChoosingPolicy - volumeChoosingPolicy, String scmId) throws StorageContainerException { - Preconditions.checkNotNull(volumeChoosingPolicy, "VolumeChoosingPolicy " + - "cannot be null"); - Preconditions.checkNotNull(volumeSet, "VolumeSet cannot be null"); - Preconditions.checkNotNull(scmId, "scmId cannot be null"); - - File containerMetaDataPath = null; - //acquiring volumeset read lock - long maxSize = containerData.getMaxSize(); - volumeSet.readLock(); - try { - HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet - .getVolumesList(), maxSize); - String hddsVolumeDir = containerVolume.getHddsRootDir().toString(); - - long containerID = containerData.getContainerID(); - - containerMetaDataPath = KeyValueContainerLocationUtil - .getContainerMetaDataPath(hddsVolumeDir, scmId, containerID); - containerData.setMetadataPath(containerMetaDataPath.getPath()); - - File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath( - hddsVolumeDir, scmId, containerID); - - // Check if it is new Container. - ContainerUtils.verifyIsNewContainer(containerMetaDataPath); - - //Create Metadata path chunks path and metadata db - File dbFile = getContainerDBFile(); - KeyValueContainerUtil.createContainerMetaData(containerMetaDataPath, - chunksPath, dbFile, config); - - String impl = config.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT); - - //Set containerData for the KeyValueContainer. - containerData.setChunksPath(chunksPath.getPath()); - containerData.setContainerDBType(impl); - containerData.setDbFile(dbFile); - containerData.setVolume(containerVolume); - - // Create .container file - File containerFile = getContainerFile(); - createContainerFile(containerFile); - - } catch (StorageContainerException ex) { - if (containerMetaDataPath != null && containerMetaDataPath.getParentFile() - .exists()) { - FileUtil.fullyDelete(containerMetaDataPath.getParentFile()); - } - throw ex; - } catch (DiskOutOfSpaceException ex) { - throw new StorageContainerException("Container creation failed, due to " + - "disk out of space", ex, DISK_OUT_OF_SPACE); - } catch (FileAlreadyExistsException ex) { - throw new StorageContainerException("Container creation failed because " + - "ContainerFile already exists", ex, CONTAINER_ALREADY_EXISTS); - } catch (IOException ex) { - if (containerMetaDataPath != null && containerMetaDataPath.getParentFile() - .exists()) { - FileUtil.fullyDelete(containerMetaDataPath.getParentFile()); - } - throw new StorageContainerException("Container creation failed.", ex, - CONTAINER_INTERNAL_ERROR); - } finally { - volumeSet.readUnlock(); - } - } - - /** - * Set all of the path realted container data fields based on the name - * conventions. - * - * @param scmId - * @param containerVolume - * @param hddsVolumeDir - */ - public void populatePathFields(String scmId, - HddsVolume containerVolume, String hddsVolumeDir) { - - long containerId = containerData.getContainerID(); - - File containerMetaDataPath = KeyValueContainerLocationUtil - .getContainerMetaDataPath(hddsVolumeDir, scmId, containerId); - - File chunksPath = KeyValueContainerLocationUtil.getChunksLocationPath( - hddsVolumeDir, scmId, containerId); - File dbFile = KeyValueContainerLocationUtil.getContainerDBFile( - containerMetaDataPath, containerId); - - //Set containerData for the KeyValueContainer. - containerData.setMetadataPath(containerMetaDataPath.getPath()); - containerData.setChunksPath(chunksPath.getPath()); - containerData.setDbFile(dbFile); - containerData.setVolume(containerVolume); - } - - /** - * Writes to .container file. - * - * @param containerFile container file name - * @param isCreate True if creating a new file. False is updating an - * existing container file. - * @throws StorageContainerException - */ - private void writeToContainerFile(File containerFile, boolean isCreate) - throws StorageContainerException { - File tempContainerFile = null; - long containerId = containerData.getContainerID(); - try { - tempContainerFile = createTempFile(containerFile); - ContainerDataYaml.createContainerFile( - ContainerType.KeyValueContainer, containerData, tempContainerFile); - - // NativeIO.renameTo is an atomic function. But it might fail if the - // container file already exists. Hence, we handle the two cases - // separately. - if (isCreate) { - NativeIO.renameTo(tempContainerFile, containerFile); - } else { - Files.move(tempContainerFile.toPath(), containerFile.toPath(), - StandardCopyOption.REPLACE_EXISTING); - } - - } catch (IOException ex) { - throw new StorageContainerException("Error while creating/ updating " + - ".container file. ContainerID: " + containerId, ex, - CONTAINER_FILES_CREATE_ERROR); - } finally { - if (tempContainerFile != null && tempContainerFile.exists()) { - if (!tempContainerFile.delete()) { - LOG.warn("Unable to delete container temporary file: {}.", - tempContainerFile.getAbsolutePath()); - } - } - } - } - - private void createContainerFile(File containerFile) - throws StorageContainerException { - writeToContainerFile(containerFile, true); - } - - private void updateContainerFile(File containerFile) - throws StorageContainerException { - writeToContainerFile(containerFile, false); - } - - - @Override - public void delete() throws StorageContainerException { - long containerId = containerData.getContainerID(); - try { - KeyValueContainerUtil.removeContainer(containerData, config); - } catch (StorageContainerException ex) { - throw ex; - } catch (IOException ex) { - // TODO : An I/O error during delete can leave partial artifacts on the - // disk. We will need the cleaner thread to cleanup this information. - String errMsg = String.format("Failed to cleanup container. ID: %d", - containerId); - LOG.error(errMsg, ex); - throw new StorageContainerException(errMsg, ex, CONTAINER_INTERNAL_ERROR); - } - } - - @Override - public void markContainerForClose() throws StorageContainerException { - writeLock(); - try { - if (getContainerState() != ContainerDataProto.State.OPEN) { - throw new StorageContainerException( - "Attempting to close a " + getContainerState() + " container.", - CONTAINER_NOT_OPEN); - } - updateContainerData(() -> - containerData.setState(ContainerDataProto.State.CLOSING)); - } finally { - writeUnlock(); - } - } - - @Override - public void markContainerUnhealthy() throws StorageContainerException { - writeLock(); - try { - updateContainerData(() -> - containerData.setState(ContainerDataProto.State.UNHEALTHY)); - } finally { - writeUnlock(); - } - } - - @Override - public void quasiClose() throws StorageContainerException { - // The DB must be synced during close operation - flushAndSyncDB(); - - writeLock(); - try { - // Second sync should be a very light operation as sync has already - // been done outside the lock. - flushAndSyncDB(); - updateContainerData(containerData::quasiCloseContainer); - } finally { - writeUnlock(); - } - } - - @Override - public void close() throws StorageContainerException { - // The DB must be synced during close operation - flushAndSyncDB(); - - writeLock(); - try { - // Second sync should be a very light operation as sync has already - // been done outside the lock. - flushAndSyncDB(); - updateContainerData(containerData::closeContainer); - } finally { - writeUnlock(); - } - LOG.info("Container {} is closed with bcsId {}.", - containerData.getContainerID(), - containerData.getBlockCommitSequenceId()); - } - - /** - * - * Must be invoked with the writeLock held. - * - * @param update - * @throws StorageContainerException - */ - private void updateContainerData(Runnable update) - throws StorageContainerException { - Preconditions.checkState(hasWriteLock()); - ContainerDataProto.State oldState = null; - try { - oldState = containerData.getState(); - update.run(); - File containerFile = getContainerFile(); - // update the new container data to .container File - updateContainerFile(containerFile); - - } catch (StorageContainerException ex) { - if (oldState != null - && containerData.getState() != ContainerDataProto.State.UNHEALTHY) { - // Failed to update .container file. Reset the state to old state only - // if the current state is not unhealthy. - containerData.setState(oldState); - } - throw ex; - } - } - - private void compactDB() throws StorageContainerException { - try { - try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { - db.getStore().compactDB(); - } - } catch (StorageContainerException ex) { - throw ex; - } catch (IOException ex) { - LOG.error("Error in DB compaction while closing container", ex); - throw new StorageContainerException(ex, ERROR_IN_COMPACT_DB); - } - } - - private void flushAndSyncDB() throws StorageContainerException { - try { - try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { - db.getStore().flushDB(true); - LOG.info("Container {} is synced with bcsId {}.", - containerData.getContainerID(), - containerData.getBlockCommitSequenceId()); - } - } catch (StorageContainerException ex) { - throw ex; - } catch (IOException ex) { - LOG.error("Error in DB sync while closing container", ex); - throw new StorageContainerException(ex, ERROR_IN_DB_SYNC); - } - } - - @Override - public KeyValueContainerData getContainerData() { - return containerData; - } - - @Override - public ContainerProtos.ContainerDataProto.State getContainerState() { - return containerData.getState(); - } - - @Override - public ContainerType getContainerType() { - return ContainerType.KeyValueContainer; - } - - @Override - public void update( - Map metadata, boolean forceUpdate) - throws StorageContainerException { - - // TODO: Now, when writing the updated data to .container file, we are - // holding lock and writing data to disk. We can have async implementation - // to flush the update container data to disk. - long containerId = containerData.getContainerID(); - if(!containerData.isValid()) { - LOG.debug("Invalid container data. ContainerID: {}", containerId); - throw new StorageContainerException("Invalid container data. " + - "ContainerID: " + containerId, INVALID_CONTAINER_STATE); - } - if (!forceUpdate && !containerData.isOpen()) { - throw new StorageContainerException( - "Updating a closed container without force option is not allowed. " + - "ContainerID: " + containerId, UNSUPPORTED_REQUEST); - } - - Map oldMetadata = containerData.getMetadata(); - try { - writeLock(); - for (Map.Entry entry : metadata.entrySet()) { - containerData.addMetadata(entry.getKey(), entry.getValue()); - } - - File containerFile = getContainerFile(); - // update the new container data to .container File - updateContainerFile(containerFile); - } catch (StorageContainerException ex) { - containerData.setMetadata(oldMetadata); - throw ex; - } finally { - writeUnlock(); - } - } - - @Override - public void updateDeleteTransactionId(long deleteTransactionId) { - containerData.updateDeleteTransactionId(deleteTransactionId); - } - - @Override - public KeyValueBlockIterator blockIterator() throws IOException{ - return new KeyValueBlockIterator(containerData.getContainerID(), new File( - containerData.getContainerPath())); - } - - @Override - public void importContainerData(InputStream input, - ContainerPacker packer) throws IOException { - writeLock(); - try { - if (getContainerFile().exists()) { - String errorMessage = String.format( - "Can't import container (cid=%d) data to a specific location" - + " as the container descriptor (%s) has already been exist.", - getContainerData().getContainerID(), - getContainerFile().getAbsolutePath()); - throw new IOException(errorMessage); - } - //copy the values from the input stream to the final destination - // directory. - byte[] descriptorContent = packer.unpackContainerData(this, input); - - Preconditions.checkNotNull(descriptorContent, - "Container descriptor is missing from the container archive: " - + getContainerData().getContainerID()); - - //now, we have extracted the container descriptor from the previous - //datanode. We can load it and upload it with the current data - // (original metadata + current filepath fields) - KeyValueContainerData originalContainerData = - (KeyValueContainerData) ContainerDataYaml - .readContainer(descriptorContent); - - - containerData.setState(originalContainerData.getState()); - containerData - .setContainerDBType(originalContainerData.getContainerDBType()); - containerData.setBytesUsed(originalContainerData.getBytesUsed()); - - //rewriting the yaml file with new checksum calculation. - update(originalContainerData.getMetadata(), true); - - //fill in memory stat counter (keycount, byte usage) - KeyValueContainerUtil.parseKVContainerData(containerData, config); - - } catch (Exception ex) { - //delete all the temporary data in case of any exception. - try { - FileUtils.deleteDirectory(new File(containerData.getMetadataPath())); - FileUtils.deleteDirectory(new File(containerData.getChunksPath())); - FileUtils.deleteDirectory(getContainerFile()); - } catch (Exception deleteex) { - LOG.error( - "Can not cleanup destination directories after a container import" - + " error (cid" + - containerData.getContainerID() + ")", deleteex); - } - throw ex; - } finally { - writeUnlock(); - } - } - - @Override - public void exportContainerData(OutputStream destination, - ContainerPacker packer) throws IOException { - if (getContainerData().getState() != - ContainerProtos.ContainerDataProto.State.CLOSED) { - throw new IllegalStateException( - "Only closed containers could be exported: ContainerId=" - + getContainerData().getContainerID()); - } - compactDB(); - packer.pack(this, destination); - } - - /** - * Acquire read lock. - */ - public void readLock() { - this.lock.readLock().lock(); - - } - - /** - * Release read lock. - */ - public void readUnlock() { - this.lock.readLock().unlock(); - } - - /** - * Check if the current thread holds read lock. - */ - public boolean hasReadLock() { - return this.lock.readLock().tryLock(); - } - - /** - * Acquire write lock. - */ - public void writeLock() { - // TODO: The lock for KeyValueContainer object should not be exposed - // publicly. - this.lock.writeLock().lock(); - } - - /** - * Release write lock. - */ - public void writeUnlock() { - this.lock.writeLock().unlock(); - - } - - /** - * Check if the current thread holds write lock. - */ - public boolean hasWriteLock() { - return this.lock.writeLock().isHeldByCurrentThread(); - } - - /** - * Acquire read lock, unless interrupted while waiting. - * @throws InterruptedException - */ - @Override - public void readLockInterruptibly() throws InterruptedException { - this.lock.readLock().lockInterruptibly(); - } - - /** - * Acquire write lock, unless interrupted while waiting. - * @throws InterruptedException - */ - @Override - public void writeLockInterruptibly() throws InterruptedException { - this.lock.writeLock().lockInterruptibly(); - - } - - /** - * Returns containerFile. - * @return .container File name - */ - @Override - public File getContainerFile() { - return getContainerFile(containerData.getMetadataPath(), - containerData.getContainerID()); - } - - static File getContainerFile(String metadataPath, long containerId) { - return new File(metadataPath, - containerId + OzoneConsts.CONTAINER_EXTENSION); - } - - @Override - public void updateBlockCommitSequenceId(long blockCommitSequenceId) { - containerData.updateBlockCommitSequenceId(blockCommitSequenceId); - } - - @Override - public long getBlockCommitSequenceId() { - return containerData.getBlockCommitSequenceId(); - } - - - /** - * Returns KeyValueContainerReport for the KeyValueContainer. - */ - @Override - public ContainerReplicaProto getContainerReport() - throws StorageContainerException { - ContainerReplicaProto.Builder ciBuilder = - ContainerReplicaProto.newBuilder(); - ciBuilder.setContainerID(containerData.getContainerID()) - .setReadCount(containerData.getReadCount()) - .setWriteCount(containerData.getWriteCount()) - .setReadBytes(containerData.getReadBytes()) - .setWriteBytes(containerData.getWriteBytes()) - .setKeyCount(containerData.getKeyCount()) - .setUsed(containerData.getBytesUsed()) - .setState(getHddsState()) - .setDeleteTransactionId(containerData.getDeleteTransactionId()) - .setBlockCommitSequenceId(containerData.getBlockCommitSequenceId()) - .setOriginNodeId(containerData.getOriginNodeId()); - return ciBuilder.build(); - } - - /** - * Returns LifeCycle State of the container. - * @return LifeCycle State of the container in HddsProtos format - * @throws StorageContainerException - */ - private ContainerReplicaProto.State getHddsState() - throws StorageContainerException { - ContainerReplicaProto.State state; - switch (containerData.getState()) { - case OPEN: - state = ContainerReplicaProto.State.OPEN; - break; - case CLOSING: - state = ContainerReplicaProto.State.CLOSING; - break; - case QUASI_CLOSED: - state = ContainerReplicaProto.State.QUASI_CLOSED; - break; - case CLOSED: - state = ContainerReplicaProto.State.CLOSED; - break; - case UNHEALTHY: - state = ContainerReplicaProto.State.UNHEALTHY; - break; - default: - throw new StorageContainerException("Invalid Container state found: " + - containerData.getContainerID(), INVALID_CONTAINER_STATE); - } - return state; - } - - /** - * Returns container DB file. - * @return - */ - public File getContainerDBFile() { - return new File(containerData.getMetadataPath(), containerData - .getContainerID() + OzoneConsts.DN_CONTAINER_DB); - } - - public boolean scanMetaData() { - long containerId = containerData.getContainerID(); - KeyValueContainerCheck checker = - new KeyValueContainerCheck(containerData.getMetadataPath(), config, - containerId); - return checker.fastCheck(); - } - - @Override - public boolean shouldScanData() { - return containerData.getState() == ContainerDataProto.State.CLOSED - || containerData.getState() == ContainerDataProto.State.QUASI_CLOSED; - } - - public boolean scanData(DataTransferThrottler throttler, Canceler canceler) { - if (!shouldScanData()) { - throw new IllegalStateException("The checksum verification can not be" + - " done for container in state " - + containerData.getState()); - } - - long containerId = containerData.getContainerID(); - KeyValueContainerCheck checker = - new KeyValueContainerCheck(containerData.getMetadataPath(), config, - containerId); - - return checker.fullCheck(throttler, canceler); - } - - private enum ContainerCheckLevel { - NO_CHECK, FAST_CHECK, FULL_CHECK - } - - /** - * Creates a temporary file. - * @param file - * @return - * @throws IOException - */ - private File createTempFile(File file) throws IOException{ - return File.createTempFile("tmp_" + System.currentTimeMillis() + "_", - file.getName(), file.getParentFile()); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java deleted file mode 100644 index a4bd3762311..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdfs.util.Canceler; -import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; -import org.apache.hadoop.ozone.common.OzoneChecksumException; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.Arrays; - -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; - -/** - * Class to run integrity checks on Datanode Containers. - * Provide infra for Data Scrubbing - */ - -public class KeyValueContainerCheck { - - private static final Logger LOG = LoggerFactory.getLogger(Container.class); - - private long containerID; - private KeyValueContainerData onDiskContainerData; //loaded from fs/disk - private Configuration checkConfig; - - private String metadataPath; - - public KeyValueContainerCheck(String metadataPath, Configuration conf, - long containerID) { - Preconditions.checkArgument(metadataPath != null); - - this.checkConfig = conf; - this.containerID = containerID; - this.onDiskContainerData = null; - this.metadataPath = metadataPath; - } - - /** - * Run basic integrity checks on container metadata. - * These checks do not look inside the metadata files. - * Applicable for OPEN containers. - * - * @return true : integrity checks pass, false : otherwise. - */ - public boolean fastCheck() { - LOG.info("Running basic checks for container {};", containerID); - boolean valid = false; - try { - loadContainerData(); - checkLayout(); - checkContainerFile(); - valid = true; - - } catch (IOException e) { - handleCorruption(e); - } - - return valid; - } - - /** - * full checks comprise scanning all metadata inside the container. - * Including the KV database. These checks are intrusive, consume more - * resources compared to fast checks and should only be done on Closed - * or Quasi-closed Containers. Concurrency being limited to delete - * workflows. - *

- * fullCheck is a superset of fastCheck - * - * @return true : integrity checks pass, false : otherwise. - */ - public boolean fullCheck(DataTransferThrottler throttler, Canceler canceler) { - boolean valid; - - try { - valid = fastCheck(); - if (valid) { - scanData(throttler, canceler); - } - } catch (IOException e) { - handleCorruption(e); - valid = false; - } - - return valid; - } - - /** - * Check the integrity of the directory structure of the container. - */ - private void checkLayout() throws IOException { - - // is metadataPath accessible as a directory? - checkDirPath(metadataPath); - - // is chunksPath accessible as a directory? - String chunksPath = onDiskContainerData.getChunksPath(); - checkDirPath(chunksPath); - } - - private void checkDirPath(String path) throws IOException { - - File dirPath = new File(path); - String errStr; - - try { - if (!dirPath.isDirectory()) { - errStr = "Not a directory [" + path + "]"; - throw new IOException(errStr); - } - } catch (SecurityException se) { - throw new IOException("Security exception checking dir [" - + path + "]", se); - } - - String[] ls = dirPath.list(); - if (ls == null) { - // null result implies operation failed - errStr = "null listing for directory [" + path + "]"; - throw new IOException(errStr); - } - } - - private void checkContainerFile() throws IOException { - /* - * compare the values in the container file loaded from disk, - * with the values we are expecting - */ - String dbType; - Preconditions - .checkState(onDiskContainerData != null, "Container File not loaded"); - - ContainerUtils.verifyChecksum(onDiskContainerData); - - if (onDiskContainerData.getContainerType() - != ContainerProtos.ContainerType.KeyValueContainer) { - String errStr = "Bad Container type in Containerdata for " + containerID; - throw new IOException(errStr); - } - - if (onDiskContainerData.getContainerID() != containerID) { - String errStr = - "Bad ContainerID field in Containerdata for " + containerID; - throw new IOException(errStr); - } - - dbType = onDiskContainerData.getContainerDBType(); - if (!dbType.equals(OZONE_METADATA_STORE_IMPL_ROCKSDB) && - !dbType.equals(OZONE_METADATA_STORE_IMPL_LEVELDB)) { - String errStr = "Unknown DBType [" + dbType - + "] in Container File for [" + containerID + "]"; - throw new IOException(errStr); - } - - KeyValueContainerData kvData = onDiskContainerData; - if (!metadataPath.equals(kvData.getMetadataPath())) { - String errStr = - "Bad metadata path in Containerdata for " + containerID + "Expected [" - + metadataPath + "] Got [" + kvData.getMetadataPath() - + "]"; - throw new IOException(errStr); - } - } - - private void scanData(DataTransferThrottler throttler, Canceler canceler) - throws IOException { - /* - * Check the integrity of the DB inside each container. - * 1. iterate over each key (Block) and locate the chunks for the block - * 2. garbage detection (TBD): chunks which exist in the filesystem, - * but not in the DB. This function will be implemented in HDDS-1202 - * 3. chunk checksum verification. - */ - Preconditions.checkState(onDiskContainerData != null, - "invoke loadContainerData prior to calling this function"); - File dbFile; - File metaDir = new File(metadataPath); - - dbFile = KeyValueContainerLocationUtil - .getContainerDBFile(metaDir, containerID); - - if (!dbFile.exists() || !dbFile.canRead()) { - String dbFileErrorMsg = "Unable to access DB File [" + dbFile.toString() - + "] for Container [" + containerID + "] metadata path [" - + metadataPath + "]"; - throw new IOException(dbFileErrorMsg); - } - - onDiskContainerData.setDbFile(dbFile); - try(ReferenceCountedDB db = - BlockUtils.getDB(onDiskContainerData, checkConfig); - KeyValueBlockIterator kvIter = new KeyValueBlockIterator(containerID, - new File(onDiskContainerData.getContainerPath()))) { - - while(kvIter.hasNext()) { - BlockData block = kvIter.nextBlock(); - for(ContainerProtos.ChunkInfo chunk : block.getChunks()) { - File chunkFile = ChunkUtils.getChunkFile(onDiskContainerData, - ChunkInfo.getFromProtoBuf(chunk)); - if (!chunkFile.exists()) { - // concurrent mutation in Block DB? lookup the block again. - byte[] bdata = db.getStore().get( - Longs.toByteArray(block.getBlockID().getLocalID())); - if (bdata != null) { - throw new IOException("Missing chunk file " - + chunkFile.getAbsolutePath()); - } - } else if (chunk.getChecksumData().getType() - != ContainerProtos.ChecksumType.NONE){ - int length = chunk.getChecksumData().getChecksumsList().size(); - ChecksumData cData = new ChecksumData( - chunk.getChecksumData().getType(), - chunk.getChecksumData().getBytesPerChecksum(), - chunk.getChecksumData().getChecksumsList()); - Checksum cal = new Checksum(cData.getChecksumType(), - cData.getBytesPerChecksum()); - long bytesRead = 0; - byte[] buffer = new byte[cData.getBytesPerChecksum()]; - try (InputStream fs = new FileInputStream(chunkFile)) { - for (int i = 0; i < length; i++) { - int v = fs.read(buffer); - if (v == -1) { - break; - } - bytesRead += v; - throttler.throttle(v, canceler); - ByteString expected = cData.getChecksums().get(i); - ByteString actual = cal.computeChecksum(buffer, 0, v) - .getChecksums().get(0); - if (!Arrays.equals(expected.toByteArray(), - actual.toByteArray())) { - throw new OzoneChecksumException(String - .format("Inconsistent read for chunk=%s len=%d expected" + - " checksum %s actual checksum %s for block %s", - chunk.getChunkName(), chunk.getLen(), - Arrays.toString(expected.toByteArray()), - Arrays.toString(actual.toByteArray()), - block.getBlockID())); - } - - } - if (bytesRead != chunk.getLen()) { - throw new OzoneChecksumException(String - .format("Inconsistent read for chunk=%s expected length=%d" - + " actual length=%d for block %s", - chunk.getChunkName(), - chunk.getLen(), bytesRead, block.getBlockID())); - } - } - } - } - } - } - } - - private void loadContainerData() throws IOException { - File containerFile = KeyValueContainer - .getContainerFile(metadataPath, containerID); - - onDiskContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - } - - private void handleCorruption(IOException e) { - String errStr = - "Corruption detected in container: [" + containerID + "] "; - String logMessage = errStr + "Exception: [" + e.getMessage() + "]"; - LOG.error(logMessage); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java deleted file mode 100644 index 2a9eedc6d1e..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ /dev/null @@ -1,276 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import com.google.common.collect.Lists; -import java.util.Collections; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.yaml.snakeyaml.nodes.Tag; - - -import java.io.File; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; - -import static java.lang.Math.max; -import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH; -import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_TYPE; -import static org.apache.hadoop.ozone.OzoneConsts.METADATA_PATH; - -/** - * This class represents the KeyValueContainer metadata, which is the - * in-memory representation of container metadata and is represented on disk - * by the .container file. - */ -public class KeyValueContainerData extends ContainerData { - - // Yaml Tag used for KeyValueContainerData. - public static final Tag KEYVALUE_YAML_TAG = new Tag("KeyValueContainerData"); - - // Fields need to be stored in .container file. - private static final List KV_YAML_FIELDS; - - // Path to Container metadata Level DB/RocksDB Store and .container file. - private String metadataPath; - - // Path to Physical file system where chunks are stored. - private String chunksPath; - - //Type of DB used to store key to chunks mapping - private String containerDBType; - - private File dbFile = null; - - /** - * Number of pending deletion blocks in KeyValueContainer. - */ - private final AtomicInteger numPendingDeletionBlocks; - - private long deleteTransactionId; - - private long blockCommitSequenceId; - - static { - // Initialize YAML fields - KV_YAML_FIELDS = Lists.newArrayList(); - KV_YAML_FIELDS.addAll(YAML_FIELDS); - KV_YAML_FIELDS.add(METADATA_PATH); - KV_YAML_FIELDS.add(CHUNKS_PATH); - KV_YAML_FIELDS.add(CONTAINER_DB_TYPE); - } - - /** - * Constructs KeyValueContainerData object. - * @param id - ContainerId - * @param size - maximum size of the container in bytes - */ - public KeyValueContainerData(long id, long size, - String originPipelineId, String originNodeId) { - super(ContainerProtos.ContainerType.KeyValueContainer, id, size, - originPipelineId, originNodeId); - this.numPendingDeletionBlocks = new AtomicInteger(0); - this.deleteTransactionId = 0; - } - - /** - * Constructs KeyValueContainerData object. - * @param id - ContainerId - * @param layOutVersion - * @param size - maximum size of the container in bytes - */ - public KeyValueContainerData(long id, int layOutVersion, long size, - String originPipelineId, String originNodeId) { - super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion, - size, originPipelineId, originNodeId); - this.numPendingDeletionBlocks = new AtomicInteger(0); - this.deleteTransactionId = 0; - } - - - /** - * Sets Container dbFile. This should be called only during creation of - * KeyValue container. - * @param containerDbFile - */ - public void setDbFile(File containerDbFile) { - dbFile = containerDbFile; - } - - /** - * Returns container DB file. - * @return dbFile - */ - public File getDbFile() { - return dbFile; - } - - /** - * Returns container metadata path. - * @return - Physical path where container file and checksum is stored. - */ - public String getMetadataPath() { - return metadataPath; - } - - /** - * Sets container metadata path. - * - * @param path - String. - */ - public void setMetadataPath(String path) { - this.metadataPath = path; - } - - /** - * Returns the path to base dir of the container. - * @return Path to base dir - */ - public String getContainerPath() { - if (metadataPath == null) { - return null; - } - return new File(metadataPath).getParent(); - } - - /** - * Returns the blockCommitSequenceId. - */ - public long getBlockCommitSequenceId() { - return blockCommitSequenceId; - } - - /** - * updates the blockCommitSequenceId. - */ - public void updateBlockCommitSequenceId(long id) { - this.blockCommitSequenceId = id; - } - - /** - * Get chunks path. - * @return - Path where chunks are stored - */ - public String getChunksPath() { - return chunksPath; - } - - /** - * Set chunks Path. - * @param chunkPath - File path. - */ - public void setChunksPath(String chunkPath) { - this.chunksPath = chunkPath; - } - - /** - * Returns the DBType used for the container. - * @return containerDBType - */ - public String getContainerDBType() { - return containerDBType; - } - - /** - * Sets the DBType used for the container. - * @param containerDBType - */ - public void setContainerDBType(String containerDBType) { - this.containerDBType = containerDBType; - } - - /** - * Increase the count of pending deletion blocks. - * - * @param numBlocks increment number - */ - public void incrPendingDeletionBlocks(int numBlocks) { - this.numPendingDeletionBlocks.addAndGet(numBlocks); - } - - /** - * Decrease the count of pending deletion blocks. - * - * @param numBlocks decrement number - */ - public void decrPendingDeletionBlocks(int numBlocks) { - this.numPendingDeletionBlocks.addAndGet(-1 * numBlocks); - } - - /** - * Get the number of pending deletion blocks. - */ - public int getNumPendingDeletionBlocks() { - return this.numPendingDeletionBlocks.get(); - } - - /** - * Sets deleteTransactionId to latest delete transactionId for the container. - * - * @param transactionId latest transactionId of the container. - */ - public void updateDeleteTransactionId(long transactionId) { - deleteTransactionId = max(transactionId, deleteTransactionId); - } - - /** - * Return the latest deleteTransactionId of the container. - */ - public long getDeleteTransactionId() { - return deleteTransactionId; - } - - /** - * Returns a ProtoBuf Message from ContainerData. - * - * @return Protocol Buffer Message - */ - public ContainerDataProto getProtoBufMessage() { - ContainerDataProto.Builder builder = ContainerDataProto.newBuilder(); - builder.setContainerID(this.getContainerID()); - builder.setContainerPath(this.getMetadataPath()); - builder.setState(this.getState()); - - for (Map.Entry entry : getMetadata().entrySet()) { - ContainerProtos.KeyValue.Builder keyValBuilder = - ContainerProtos.KeyValue.newBuilder(); - builder.addMetadata(keyValBuilder.setKey(entry.getKey()) - .setValue(entry.getValue()).build()); - } - - if (this.getBytesUsed() >= 0) { - builder.setBytesUsed(this.getBytesUsed()); - } - - if(this.getContainerType() != null) { - builder.setContainerType(ContainerProtos.ContainerType.KeyValueContainer); - } - - return builder.build(); - } - - public static List getYamlFields() { - return Collections.unmodifiableList(KV_YAML_FIELDS); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java deleted file mode 100644 index bc418839f28..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ /dev/null @@ -1,1043 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Function; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto.State; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .GetSmallFileRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .PutSmallFileRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.hdds.scm.ByteStringConversion; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.ratis - .DispatcherContext; -import org.apache.hadoop.ozone.container.common.transport.server.ratis - .DispatcherContext.WriteChunkStage; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume - .RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; -import org.apache.hadoop.ozone.container.keyvalue.helpers.SmallFileUtils; -import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory; -import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; -import org.apache.hadoop.util.AutoCloseableLock; -import org.apache.hadoop.util.ReflectionUtils; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_DATANODE_VOLUME_CHOOSING_POLICY; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - Result.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handler for KeyValue Container type. - */ -public class KeyValueHandler extends Handler { - - private static final Logger LOG = LoggerFactory.getLogger( - KeyValueHandler.class); - - private final ContainerType containerType; - private final BlockManager blockManager; - private final ChunkManager chunkManager; - private final VolumeChoosingPolicy volumeChoosingPolicy; - private final long maxContainerSize; - private final Function byteBufferToByteString; - - // A lock that is held during container creation. - private final AutoCloseableLock containerCreationLock; - private final boolean doSyncWrite; - - public KeyValueHandler(Configuration config, StateContext context, - ContainerSet contSet, VolumeSet volSet, ContainerMetrics metrics) { - super(config, context, contSet, volSet, metrics); - containerType = ContainerType.KeyValueContainer; - blockManager = new BlockManagerImpl(config); - doSyncWrite = - conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY, - OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT); - chunkManager = ChunkManagerFactory.getChunkManager(config, doSyncWrite); - volumeChoosingPolicy = ReflectionUtils.newInstance(conf.getClass( - HDDS_DATANODE_VOLUME_CHOOSING_POLICY, RoundRobinVolumeChoosingPolicy - .class, VolumeChoosingPolicy.class), conf); - maxContainerSize = (long)config.getStorageSize( - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); - // this handler lock is used for synchronizing createContainer Requests, - // so using a fair lock here. - containerCreationLock = new AutoCloseableLock(new ReentrantLock(true)); - byteBufferToByteString = - ByteStringConversion.createByteBufferConversion(conf); - } - - @VisibleForTesting - public VolumeChoosingPolicy getVolumeChoosingPolicyForTesting() { - return volumeChoosingPolicy; - } - - @Override - public void stop() { - } - - @Override - public ContainerCommandResponseProto handle( - ContainerCommandRequestProto request, Container container, - DispatcherContext dispatcherContext) { - - Type cmdType = request.getCmdType(); - KeyValueContainer kvContainer = (KeyValueContainer) container; - switch(cmdType) { - case CreateContainer: - return handleCreateContainer(request, kvContainer); - case ReadContainer: - return handleReadContainer(request, kvContainer); - case UpdateContainer: - return handleUpdateContainer(request, kvContainer); - case DeleteContainer: - return handleDeleteContainer(request, kvContainer); - case ListContainer: - return handleUnsupportedOp(request); - case CloseContainer: - return handleCloseContainer(request, kvContainer); - case PutBlock: - return handlePutBlock(request, kvContainer, dispatcherContext); - case GetBlock: - return handleGetBlock(request, kvContainer); - case DeleteBlock: - return handleDeleteBlock(request, kvContainer); - case ListBlock: - return handleUnsupportedOp(request); - case ReadChunk: - return handleReadChunk(request, kvContainer, dispatcherContext); - case DeleteChunk: - return handleDeleteChunk(request, kvContainer); - case WriteChunk: - return handleWriteChunk(request, kvContainer, dispatcherContext); - case ListChunk: - return handleUnsupportedOp(request); - case CompactChunk: - return handleUnsupportedOp(request); - case PutSmallFile: - return handlePutSmallFile(request, kvContainer, dispatcherContext); - case GetSmallFile: - return handleGetSmallFile(request, kvContainer); - case GetCommittedBlockLength: - return handleGetCommittedBlockLength(request, kvContainer); - default: - return null; - } - } - - @VisibleForTesting - public ChunkManager getChunkManager() { - return this.chunkManager; - } - - @VisibleForTesting - public BlockManager getBlockManager() { - return this.blockManager; - } - - /** - * Handles Create Container Request. If successful, adds the container to - * ContainerSet and sends an ICR to the SCM. - */ - ContainerCommandResponseProto handleCreateContainer( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - if (!request.hasCreateContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Create Container request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - // Create Container request should be passed a null container as the - // container would be created here. - Preconditions.checkArgument(kvContainer == null); - - long containerID = request.getContainerID(); - - KeyValueContainerData newContainerData = new KeyValueContainerData( - containerID, maxContainerSize, request.getPipelineID(), - getDatanodeDetails().getUuidString()); - // TODO: Add support to add metadataList to ContainerData. Add metadata - // to container during creation. - KeyValueContainer newContainer = new KeyValueContainer( - newContainerData, conf); - - boolean created = false; - try (AutoCloseableLock l = containerCreationLock.acquire()) { - if (containerSet.getContainer(containerID) == null) { - newContainer.create(volumeSet, volumeChoosingPolicy, scmID); - created = containerSet.addContainer(newContainer); - } else { - // The create container request for an already existing container can - // arrive in case the ContainerStateMachine reapplies the transaction - // on datanode restart. Just log a warning msg here. - LOG.debug("Container already exists." + - "container Id " + containerID); - } - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } - - if (created) { - try { - sendICR(newContainer); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } - } - return ContainerUtils.getSuccessResponse(request); - } - - public void populateContainerPathFields(KeyValueContainer container, - long maxSize) throws IOException { - volumeSet.readLock(); - try { - HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet - .getVolumesList(), maxSize); - String hddsVolumeDir = containerVolume.getHddsRootDir().toString(); - container.populatePathFields(scmID, containerVolume, hddsVolumeDir); - } finally { - volumeSet.readUnlock(); - } - } - - /** - * Handles Read Container Request. Returns the ContainerData as response. - */ - ContainerCommandResponseProto handleReadContainer( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - if (!request.hasReadContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Read Container request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - // The container can become unhealthy after the lock is released. - // The operation will likely fail/timeout in that happens. - try { - checkContainerIsHealthy(kvContainer); - } catch (StorageContainerException sce) { - return ContainerUtils.logAndReturnError(LOG, sce, request); - } - - KeyValueContainerData containerData = kvContainer.getContainerData(); - return KeyValueContainerUtil.getReadContainerResponse( - request, containerData); - } - - - /** - * Handles Update Container Request. If successful, the container metadata - * is updated. - */ - ContainerCommandResponseProto handleUpdateContainer( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - - if (!request.hasUpdateContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Update Container request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - boolean forceUpdate = request.getUpdateContainer().getForceUpdate(); - List keyValueList = - request.getUpdateContainer().getMetadataList(); - Map metadata = new HashMap<>(); - for (KeyValue keyValue : keyValueList) { - metadata.put(keyValue.getKey(), keyValue.getValue()); - } - - try { - if (!metadata.isEmpty()) { - kvContainer.update(metadata, forceUpdate); - } - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } - return ContainerUtils.getSuccessResponse(request); - } - - /** - * Handles Delete Container Request. - * Open containers cannot be deleted. - * Holds writeLock on ContainerSet till the container is removed from - * containerMap. On disk deletion of container files will happen - * asynchronously without the lock. - */ - ContainerCommandResponseProto handleDeleteContainer( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - - if (!request.hasDeleteContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Delete container request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - boolean forceDelete = request.getDeleteContainer().getForceDelete(); - try { - deleteInternal(kvContainer, forceDelete); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } - return ContainerUtils.getSuccessResponse(request); - } - - /** - * Handles Close Container Request. An open container is closed. - * Close Container call is idempotent. - */ - ContainerCommandResponseProto handleCloseContainer( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - - if (!request.hasCloseContainer()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Update Container request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - try { - markContainerForClose(kvContainer); - closeContainer(kvContainer); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Close Container failed", ex, - IO_EXCEPTION), request); - } - - return ContainerUtils.getSuccessResponse(request); - } - - /** - * Handle Put Block operation. Calls BlockManager to process the request. - */ - ContainerCommandResponseProto handlePutBlock( - ContainerCommandRequestProto request, KeyValueContainer kvContainer, - DispatcherContext dispatcherContext) { - - long blockLength; - if (!request.hasPutBlock()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Put Key request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - BlockData blockData; - try { - checkContainerOpen(kvContainer); - - blockData = BlockData.getFromProtoBuf( - request.getPutBlock().getBlockData()); - Preconditions.checkNotNull(blockData); - long bcsId = - dispatcherContext == null ? 0 : dispatcherContext.getLogIndex(); - blockData.setBlockCommitSequenceId(bcsId); - long numBytes = blockData.getProtoBufMessage().toByteArray().length; - blockManager.putBlock(kvContainer, blockData); - metrics.incContainerBytesStats(Type.PutBlock, numBytes); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Put Key failed", ex, IO_EXCEPTION), - request); - } - - return BlockUtils.putBlockResponseSuccess(request, blockData); - } - - /** - * Handle Get Block operation. Calls BlockManager to process the request. - */ - ContainerCommandResponseProto handleGetBlock( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - - if (!request.hasGetBlock()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Get Key request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - // The container can become unhealthy after the lock is released. - // The operation will likely fail/timeout in that happens. - try { - checkContainerIsHealthy(kvContainer); - } catch (StorageContainerException sce) { - return ContainerUtils.logAndReturnError(LOG, sce, request); - } - - BlockData responseData; - try { - BlockID blockID = BlockID.getFromProtobuf( - request.getGetBlock().getBlockID()); - responseData = blockManager.getBlock(kvContainer, blockID); - long numBytes = responseData.getProtoBufMessage().toByteArray().length; - metrics.incContainerBytesStats(Type.GetBlock, numBytes); - - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Get Key failed", ex, IO_EXCEPTION), - request); - } - - return BlockUtils.getBlockDataResponse(request, responseData); - } - - /** - * Handles GetCommittedBlockLength operation. - * Calls BlockManager to process the request. - */ - ContainerCommandResponseProto handleGetCommittedBlockLength( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - if (!request.hasGetCommittedBlockLength()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Get Key request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - // The container can become unhealthy after the lock is released. - // The operation will likely fail/timeout in that happens. - try { - checkContainerIsHealthy(kvContainer); - } catch (StorageContainerException sce) { - return ContainerUtils.logAndReturnError(LOG, sce, request); - } - - long blockLength; - try { - BlockID blockID = BlockID - .getFromProtobuf(request.getGetCommittedBlockLength().getBlockID()); - blockLength = blockManager.getCommittedBlockLength(kvContainer, blockID); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("GetCommittedBlockLength failed", ex, - IO_EXCEPTION), request); - } - - return BlockUtils.getBlockLengthResponse(request, blockLength); - } - - /** - * Handle Delete Block operation. Calls BlockManager to process the request. - */ - ContainerCommandResponseProto handleDeleteBlock( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - - if (!request.hasDeleteBlock()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Delete Key request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - try { - checkContainerOpen(kvContainer); - - BlockID blockID = BlockID.getFromProtobuf( - request.getDeleteBlock().getBlockID()); - - blockManager.deleteBlock(kvContainer, blockID); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Delete Key failed", ex, IO_EXCEPTION), - request); - } - - return BlockUtils.getBlockResponseSuccess(request); - } - - /** - * Handle Read Chunk operation. Calls ChunkManager to process the request. - */ - ContainerCommandResponseProto handleReadChunk( - ContainerCommandRequestProto request, KeyValueContainer kvContainer, - DispatcherContext dispatcherContext) { - - if (!request.hasReadChunk()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Read Chunk request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - // The container can become unhealthy after the lock is released. - // The operation will likely fail/timeout if that happens. - try { - checkContainerIsHealthy(kvContainer); - } catch (StorageContainerException sce) { - return ContainerUtils.logAndReturnError(LOG, sce, request); - } - - ChunkInfo chunkInfo; - ByteBuffer data; - try { - BlockID blockID = BlockID.getFromProtobuf( - request.getReadChunk().getBlockID()); - chunkInfo = ChunkInfo.getFromProtoBuf(request.getReadChunk() - .getChunkData()); - Preconditions.checkNotNull(chunkInfo); - - if (dispatcherContext == null) { - dispatcherContext = new DispatcherContext.Builder().build(); - } - - data = chunkManager - .readChunk(kvContainer, blockID, chunkInfo, dispatcherContext); - metrics.incContainerBytesStats(Type.ReadChunk, chunkInfo.getLen()); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Read Chunk failed", ex, IO_EXCEPTION), - request); - } - - Preconditions.checkNotNull(data, "Chunk data is null"); - - ContainerProtos.ReadChunkResponseProto.Builder response = - ContainerProtos.ReadChunkResponseProto.newBuilder(); - response.setChunkData(chunkInfo.getProtoBufMessage()); - response.setData(byteBufferToByteString.apply(data)); - response.setBlockID(request.getReadChunk().getBlockID()); - - ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(request); - builder.setReadChunk(response); - return builder.build(); - } - - /** - * Throw an exception if the container is unhealthy. - * - * @throws StorageContainerException if the container is unhealthy. - * @param kvContainer - */ - @VisibleForTesting - void checkContainerIsHealthy(KeyValueContainer kvContainer) - throws StorageContainerException { - kvContainer.readLock(); - try { - if (kvContainer.getContainerData().getState() == State.UNHEALTHY) { - throw new StorageContainerException( - "The container replica is unhealthy.", - CONTAINER_UNHEALTHY); - } - } finally { - kvContainer.readUnlock(); - } - } - - /** - * Handle Delete Chunk operation. Calls ChunkManager to process the request. - */ - ContainerCommandResponseProto handleDeleteChunk( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - - if (!request.hasDeleteChunk()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Delete Chunk request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - // The container can become unhealthy after the lock is released. - // The operation will likely fail/timeout in that happens. - try { - checkContainerIsHealthy(kvContainer); - } catch (StorageContainerException sce) { - return ContainerUtils.logAndReturnError(LOG, sce, request); - } - - try { - checkContainerOpen(kvContainer); - - BlockID blockID = BlockID.getFromProtobuf( - request.getDeleteChunk().getBlockID()); - ContainerProtos.ChunkInfo chunkInfoProto = request.getDeleteChunk() - .getChunkData(); - ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto); - Preconditions.checkNotNull(chunkInfo); - - chunkManager.deleteChunk(kvContainer, blockID, chunkInfo); - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Delete Chunk failed", ex, - IO_EXCEPTION), request); - } - - return ChunkUtils.getChunkResponseSuccess(request); - } - - /** - * Handle Write Chunk operation. Calls ChunkManager to process the request. - */ - ContainerCommandResponseProto handleWriteChunk( - ContainerCommandRequestProto request, KeyValueContainer kvContainer, - DispatcherContext dispatcherContext) { - - if (!request.hasWriteChunk()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Write Chunk request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - try { - checkContainerOpen(kvContainer); - - BlockID blockID = BlockID.getFromProtobuf( - request.getWriteChunk().getBlockID()); - ContainerProtos.ChunkInfo chunkInfoProto = - request.getWriteChunk().getChunkData(); - ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto); - Preconditions.checkNotNull(chunkInfo); - - ByteBuffer data = null; - if (dispatcherContext == null) { - dispatcherContext = new DispatcherContext.Builder().build(); - } - WriteChunkStage stage = dispatcherContext.getStage(); - if (stage == WriteChunkStage.WRITE_DATA || - stage == WriteChunkStage.COMBINED) { - data = request.getWriteChunk().getData().asReadOnlyByteBuffer(); - } - - chunkManager - .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); - - // We should increment stats after writeChunk - if (stage == WriteChunkStage.WRITE_DATA|| - stage == WriteChunkStage.COMBINED) { - metrics.incContainerBytesStats(Type.WriteChunk, request.getWriteChunk() - .getChunkData().getLen()); - } - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Write Chunk failed", ex, IO_EXCEPTION), - request); - } - - return ChunkUtils.getChunkResponseSuccess(request); - } - - /** - * Handle Put Small File operation. Writes the chunk and associated key - * using a single RPC. Calls BlockManager and ChunkManager to process the - * request. - */ - ContainerCommandResponseProto handlePutSmallFile( - ContainerCommandRequestProto request, KeyValueContainer kvContainer, - DispatcherContext dispatcherContext) { - - if (!request.hasPutSmallFile()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Put Small File request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - PutSmallFileRequestProto putSmallFileReq = - request.getPutSmallFile(); - BlockData blockData; - - try { - checkContainerOpen(kvContainer); - - BlockID blockID = BlockID.getFromProtobuf(putSmallFileReq.getBlock() - .getBlockData().getBlockID()); - blockData = BlockData.getFromProtoBuf( - putSmallFileReq.getBlock().getBlockData()); - Preconditions.checkNotNull(blockData); - - ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf( - putSmallFileReq.getChunkInfo()); - Preconditions.checkNotNull(chunkInfo); - ByteBuffer data = putSmallFileReq.getData().asReadOnlyByteBuffer(); - if (dispatcherContext == null) { - dispatcherContext = new DispatcherContext.Builder().build(); - } - - // chunks will be committed as a part of handling putSmallFile - // here. There is no need to maintain this info in openContainerBlockMap. - chunkManager - .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); - - List chunks = new LinkedList<>(); - chunks.add(chunkInfo.getProtoBufMessage()); - blockData.setChunks(chunks); - blockData.setBlockCommitSequenceId(dispatcherContext.getLogIndex()); - - blockManager.putBlock(kvContainer, blockData); - metrics.incContainerBytesStats(Type.PutSmallFile, data.capacity()); - - } catch (StorageContainerException ex) { - return ContainerUtils.logAndReturnError(LOG, ex, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Read Chunk failed", ex, - PUT_SMALL_FILE_ERROR), request); - } - - return SmallFileUtils.getPutFileResponseSuccess(request, blockData); - } - - /** - * Handle Get Small File operation. Gets a data stream using a key. This - * helps in reducing the RPC overhead for small files. Calls BlockManager and - * ChunkManager to process the request. - */ - ContainerCommandResponseProto handleGetSmallFile( - ContainerCommandRequestProto request, KeyValueContainer kvContainer) { - - if (!request.hasGetSmallFile()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Malformed Get Small File request. trace ID: {}", - request.getTraceID()); - } - return ContainerUtils.malformedRequest(request); - } - - // The container can become unhealthy after the lock is released. - // The operation will likely fail/timeout in that happens. - try { - checkContainerIsHealthy(kvContainer); - } catch (StorageContainerException sce) { - return ContainerUtils.logAndReturnError(LOG, sce, request); - } - - GetSmallFileRequestProto getSmallFileReq = request.getGetSmallFile(); - - try { - BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getBlock() - .getBlockID()); - BlockData responseData = blockManager.getBlock(kvContainer, blockID); - - ContainerProtos.ChunkInfo chunkInfo = null; - ByteString dataBuf = ByteString.EMPTY; - DispatcherContext dispatcherContext = - new DispatcherContext.Builder().build(); - for (ContainerProtos.ChunkInfo chunk : responseData.getChunks()) { - // if the block is committed, all chunks must have been committed. - // Tmp chunk files won't exist here. - ByteBuffer data = chunkManager.readChunk(kvContainer, blockID, - ChunkInfo.getFromProtoBuf(chunk), dispatcherContext); - ByteString current = byteBufferToByteString.apply(data); - dataBuf = dataBuf.concat(current); - chunkInfo = chunk; - } - metrics.incContainerBytesStats(Type.GetSmallFile, dataBuf.size()); - return SmallFileUtils.getGetSmallFileResponseSuccess(request, dataBuf - .toByteArray(), ChunkInfo.getFromProtoBuf(chunkInfo)); - } catch (StorageContainerException e) { - return ContainerUtils.logAndReturnError(LOG, e, request); - } catch (IOException ex) { - return ContainerUtils.logAndReturnError(LOG, - new StorageContainerException("Write Chunk failed", ex, - GET_SMALL_FILE_ERROR), request); - } - } - - /** - * Handle unsupported operation. - */ - ContainerCommandResponseProto handleUnsupportedOp( - ContainerCommandRequestProto request) { - // TODO : remove all unsupported operations or handle them. - return ContainerUtils.unsupportedRequest(request); - } - - /** - * Check if container is open. Throw exception otherwise. - * @param kvContainer - * @throws StorageContainerException - */ - private void checkContainerOpen(KeyValueContainer kvContainer) - throws StorageContainerException { - - final State containerState = kvContainer.getContainerState(); - - /* - * In a closing state, follower will receive transactions from leader. - * Once the leader is put to closing state, it will reject further requests - * from clients. Only the transactions which happened before the container - * in the leader goes to closing state, will arrive here even the container - * might already be in closing state here. - */ - if (containerState == State.OPEN || containerState == State.CLOSING) { - return; - } - - final ContainerProtos.Result result; - switch (containerState) { - case QUASI_CLOSED: - result = CLOSED_CONTAINER_IO; - break; - case CLOSED: - result = CLOSED_CONTAINER_IO; - break; - case UNHEALTHY: - result = CONTAINER_UNHEALTHY; - break; - case INVALID: - result = INVALID_CONTAINER_STATE; - break; - default: - result = CONTAINER_INTERNAL_ERROR; - } - String msg = "Requested operation not allowed as ContainerState is " + - containerState; - throw new StorageContainerException(msg, result); - } - - @Override - public Container importContainer(final long containerID, - final long maxSize, final String originPipelineId, - final String originNodeId, final InputStream rawContainerStream, - final TarContainerPacker packer) - throws IOException { - - // TODO: Add layout version! - KeyValueContainerData containerData = - new KeyValueContainerData(containerID, - maxSize, originPipelineId, originNodeId); - - KeyValueContainer container = new KeyValueContainer(containerData, - conf); - - populateContainerPathFields(container, maxSize); - container.importContainerData(rawContainerStream, packer); - sendICR(container); - return container; - - } - - @Override - public void exportContainer(final Container container, - final OutputStream outputStream, - final TarContainerPacker packer) - throws IOException{ - container.readLock(); - try { - final KeyValueContainer kvc = (KeyValueContainer) container; - kvc.exportContainerData(outputStream, packer); - } finally { - container.readUnlock(); - } - } - - @Override - public void markContainerForClose(Container container) - throws IOException { - container.writeLock(); - try { - // Move the container to CLOSING state only if it's OPEN - if (container.getContainerState() == State.OPEN) { - container.markContainerForClose(); - sendICR(container); - } - } finally { - container.writeUnlock(); - } - } - - @Override - public void markContainerUnhealthy(Container container) - throws IOException { - container.writeLock(); - try { - if (container.getContainerState() != State.UNHEALTHY) { - try { - container.markContainerUnhealthy(); - } catch (IOException ex) { - // explicitly catch IOException here since the this operation - // will fail if the Rocksdb metadata is corrupted. - long id = container.getContainerData().getContainerID(); - LOG.warn("Unexpected error while marking container " + id - + " as unhealthy", ex); - } finally { - sendICR(container); - } - } - } finally { - container.writeUnlock(); - } - } - - @Override - public void quasiCloseContainer(Container container) - throws IOException { - container.writeLock(); - try { - final State state = container.getContainerState(); - // Quasi close call is idempotent. - if (state == State.QUASI_CLOSED) { - return; - } - // The container has to be in CLOSING state. - if (state != State.CLOSING) { - ContainerProtos.Result error = - state == State.INVALID ? INVALID_CONTAINER_STATE : - CONTAINER_INTERNAL_ERROR; - throw new StorageContainerException( - "Cannot quasi close container #" + container.getContainerData() - .getContainerID() + " while in " + state + " state.", error); - } - container.quasiClose(); - sendICR(container); - } finally { - container.writeUnlock(); - } - } - - @Override - public void closeContainer(Container container) - throws IOException { - container.writeLock(); - try { - final State state = container.getContainerState(); - // Close call is idempotent. - if (state == State.CLOSED) { - return; - } - if (state == State.UNHEALTHY) { - throw new StorageContainerException( - "Cannot close container #" + container.getContainerData() - .getContainerID() + " while in " + state + " state.", - ContainerProtos.Result.CONTAINER_UNHEALTHY); - } - // The container has to be either in CLOSING or in QUASI_CLOSED state. - if (state != State.CLOSING && state != State.QUASI_CLOSED) { - ContainerProtos.Result error = - state == State.INVALID ? INVALID_CONTAINER_STATE : - CONTAINER_INTERNAL_ERROR; - throw new StorageContainerException( - "Cannot close container #" + container.getContainerData() - .getContainerID() + " while in " + state + " state.", error); - } - container.close(); - sendICR(container); - } finally { - container.writeUnlock(); - } - } - - @Override - public void deleteContainer(Container container, boolean force) - throws IOException { - deleteInternal(container, force); - } - - private void deleteInternal(Container container, boolean force) - throws StorageContainerException { - container.writeLock(); - try { - // If force is false, we check container state. - if (!force) { - // Check if container is open - if (container.getContainerData().isOpen()) { - throw new StorageContainerException( - "Deletion of Open Container is not allowed.", - DELETE_ON_OPEN_CONTAINER); - } - } - long containerId = container.getContainerData().getContainerID(); - containerSet.removeContainer(containerId); - } finally { - container.writeUnlock(); - } - // Avoid holding write locks for disk operations - container.delete(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java deleted file mode 100644 index 13689a705ce..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java +++ /dev/null @@ -1,249 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue; - -import java.io.BufferedOutputStream; -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.stream.Collectors; - -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker; - -import com.google.common.base.Preconditions; -import org.apache.commons.compress.archivers.ArchiveEntry; -import org.apache.commons.compress.archivers.ArchiveOutputStream; -import org.apache.commons.compress.archivers.tar.TarArchiveEntry; -import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; -import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; -import org.apache.commons.compress.compressors.CompressorException; -import org.apache.commons.compress.compressors.CompressorInputStream; -import org.apache.commons.compress.compressors.CompressorOutputStream; -import org.apache.commons.compress.compressors.CompressorStreamFactory; -import org.apache.commons.io.IOUtils; - -/** - * Compress/uncompress KeyValueContainer data to a tar.gz archive. - */ -public class TarContainerPacker - implements ContainerPacker { - - private static final String CHUNKS_DIR_NAME = OzoneConsts.STORAGE_DIR_CHUNKS; - - private static final String DB_DIR_NAME = "db"; - - private static final String CONTAINER_FILE_NAME = "container.yaml"; - - - - /** - * Given an input stream (tar file) extract the data to the specified - * directories. - * - * @param container container which defines the destination structure. - * @param inputStream the input stream. - * @throws IOException - */ - @Override - public byte[] unpackContainerData(Container container, - InputStream inputStream) - throws IOException { - byte[] descriptorFileContent = null; - try { - KeyValueContainerData containerData = container.getContainerData(); - CompressorInputStream compressorInputStream = - new CompressorStreamFactory() - .createCompressorInputStream(CompressorStreamFactory.GZIP, - inputStream); - - TarArchiveInputStream tarInput = - new TarArchiveInputStream(compressorInputStream); - - TarArchiveEntry entry = tarInput.getNextTarEntry(); - while (entry != null) { - String name = entry.getName(); - if (name.startsWith(DB_DIR_NAME + "/")) { - Path destinationPath = containerData.getDbFile().toPath() - .resolve(name.substring(DB_DIR_NAME.length() + 1)); - extractEntry(tarInput, entry.getSize(), destinationPath); - } else if (name.startsWith(CHUNKS_DIR_NAME + "/")) { - Path destinationPath = Paths.get(containerData.getChunksPath()) - .resolve(name.substring(CHUNKS_DIR_NAME.length() + 1)); - extractEntry(tarInput, entry.getSize(), destinationPath); - } else if (name.equals(CONTAINER_FILE_NAME)) { - //Don't do anything. Container file should be unpacked in a - //separated step by unpackContainerDescriptor call. - descriptorFileContent = readEntry(tarInput, entry); - } else { - throw new IllegalArgumentException( - "Unknown entry in the tar file: " + "" + name); - } - entry = tarInput.getNextTarEntry(); - } - return descriptorFileContent; - - } catch (CompressorException e) { - throw new IOException( - "Can't uncompress the given container: " + container - .getContainerData().getContainerID(), - e); - } - } - - private void extractEntry(TarArchiveInputStream tarInput, long size, - Path path) throws IOException { - Preconditions.checkNotNull(path, "Path element should not be null"); - Path parent = Preconditions.checkNotNull(path.getParent(), - "Path element should have a parent directory"); - Files.createDirectories(parent); - try (BufferedOutputStream bos = new BufferedOutputStream( - new FileOutputStream(path.toAbsolutePath().toString()))) { - int bufferSize = 1024; - byte[] buffer = new byte[bufferSize + 1]; - long remaining = size; - while (remaining > 0) { - int read = - tarInput.read(buffer, 0, (int) Math.min(remaining, bufferSize)); - if (read >= 0) { - remaining -= read; - bos.write(buffer, 0, read); - } else { - remaining = 0; - } - } - } - - } - - /** - * Given a containerData include all the required container data/metadata - * in a tar file. - * - * @param container Container to archive (data + metadata). - * @param destination Destination tar file/stream. - * @throws IOException - */ - @Override - public void pack(Container container, - OutputStream destination) - throws IOException { - - KeyValueContainerData containerData = container.getContainerData(); - - try (CompressorOutputStream gzippedOut = new CompressorStreamFactory() - .createCompressorOutputStream(CompressorStreamFactory.GZIP, - destination)) { - - try (ArchiveOutputStream archiveOutputStream = new TarArchiveOutputStream( - gzippedOut)) { - - includePath(containerData.getDbFile().toString(), DB_DIR_NAME, - archiveOutputStream); - - includePath(containerData.getChunksPath(), CHUNKS_DIR_NAME, - archiveOutputStream); - - includeFile(container.getContainerFile(), - CONTAINER_FILE_NAME, - archiveOutputStream); - } - } catch (CompressorException e) { - throw new IOException( - "Can't compress the container: " + containerData.getContainerID(), - e); - } - - } - - @Override - public byte[] unpackContainerDescriptor(InputStream inputStream) - throws IOException { - try { - CompressorInputStream compressorInputStream = - new CompressorStreamFactory() - .createCompressorInputStream(CompressorStreamFactory.GZIP, - inputStream); - - TarArchiveInputStream tarInput = - new TarArchiveInputStream(compressorInputStream); - - TarArchiveEntry entry = tarInput.getNextTarEntry(); - while (entry != null) { - String name = entry.getName(); - if (name.equals(CONTAINER_FILE_NAME)) { - return readEntry(tarInput, entry); - } - entry = tarInput.getNextTarEntry(); - } - - } catch (CompressorException e) { - throw new IOException( - "Can't read the container descriptor from the container archive", - e); - } - throw new IOException( - "Container descriptor is missing from the container archive."); - } - - private byte[] readEntry(TarArchiveInputStream tarInput, - TarArchiveEntry entry) throws IOException { - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - int bufferSize = 1024; - byte[] buffer = new byte[bufferSize + 1]; - long remaining = entry.getSize(); - while (remaining > 0) { - int read = - tarInput.read(buffer, 0, (int) Math.min(remaining, bufferSize)); - remaining -= read; - bos.write(buffer, 0, read); - } - return bos.toByteArray(); - } - - private void includePath(String containerPath, String subdir, - ArchiveOutputStream archiveOutputStream) throws IOException { - - for (Path path : Files.list(Paths.get(containerPath)) - .collect(Collectors.toList())) { - - includeFile(path.toFile(), subdir + "/" + path.getFileName(), - archiveOutputStream); - } - } - - private void includeFile(File file, String entryName, - ArchiveOutputStream archiveOutputStream) throws IOException { - ArchiveEntry archiveEntry = - archiveOutputStream.createArchiveEntry(file, entryName); - archiveOutputStream.putArchiveEntry(archiveEntry); - try (FileInputStream fis = new FileInputStream(file)) { - IOUtils.copy(fis, archiveOutputStream); - } - archiveOutputStream.closeArchiveEntry(); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java deleted file mode 100644 index da7c8579d88..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.helpers; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .GetBlockResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - GetCommittedBlockLengthResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - PutBlockResponseProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; - -import java.io.IOException; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.NO_SUCH_BLOCK; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.UNABLE_TO_READ_METADATA_DB; - -/** - * Utils functions to help block functions. - */ -public final class BlockUtils { - - /** Never constructed. **/ - private BlockUtils() { - - } - /** - * Get a DB handler for a given container. - * If the handler doesn't exist in cache yet, first create one and - * add into cache. This function is called with containerManager - * ReadLock held. - * - * @param containerData containerData. - * @param conf configuration. - * @return MetadataStore handle. - * @throws StorageContainerException - */ - public static ReferenceCountedDB getDB(KeyValueContainerData containerData, - Configuration conf) throws - StorageContainerException { - Preconditions.checkNotNull(containerData); - ContainerCache cache = ContainerCache.getInstance(conf); - Preconditions.checkNotNull(cache); - Preconditions.checkNotNull(containerData.getDbFile()); - try { - return cache.getDB(containerData.getContainerID(), containerData - .getContainerDBType(), containerData.getDbFile().getAbsolutePath(), - conf); - } catch (IOException ex) { - String message = String.format("Error opening DB. Container:%s " + - "ContainerPath:%s", containerData.getContainerID(), containerData - .getDbFile().getPath()); - throw new StorageContainerException(message, UNABLE_TO_READ_METADATA_DB); - } - } - /** - * Remove a DB handler from cache. - * - * @param container - Container data. - * @param conf - Configuration. - */ - public static void removeDB(KeyValueContainerData container, Configuration - conf) { - Preconditions.checkNotNull(container); - ContainerCache cache = ContainerCache.getInstance(conf); - Preconditions.checkNotNull(cache); - cache.removeDB(container.getDbFile().getAbsolutePath()); - } - - /** - * Shutdown all DB Handles. - * - * @param cache - Cache for DB Handles. - */ - @SuppressWarnings("unchecked") - public static void shutdownCache(ContainerCache cache) { - cache.shutdownCache(); - } - - /** - * Parses the {@link BlockData} from a bytes array. - * - * @param bytes Block data in bytes. - * @return Block data. - * @throws IOException if the bytes array is malformed or invalid. - */ - public static BlockData getBlockData(byte[] bytes) throws IOException { - try { - ContainerProtos.BlockData blockData = ContainerProtos.BlockData.parseFrom( - bytes); - BlockData data = BlockData.getFromProtoBuf(blockData); - return data; - } catch (IOException e) { - throw new StorageContainerException("Failed to parse block data from " + - "the bytes array.", NO_SUCH_BLOCK); - } - } - - /** - * Returns putBlock response success. - * @param msg - Request. - * @return Response. - */ - public static ContainerCommandResponseProto putBlockResponseSuccess( - ContainerCommandRequestProto msg, BlockData blockData) { - ContainerProtos.BlockData blockDataProto = blockData.getProtoBufMessage(); - GetCommittedBlockLengthResponseProto.Builder - committedBlockLengthResponseBuilder = - getCommittedBlockLengthResponseBuilder(blockData.getSize(), - blockDataProto.getBlockID()); - PutBlockResponseProto.Builder putKeyResponse = - PutBlockResponseProto.newBuilder(); - putKeyResponse - .setCommittedBlockLength(committedBlockLengthResponseBuilder); - ContainerProtos.ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(msg); - builder.setPutBlock(putKeyResponse); - return builder.build(); - } - /** - * Returns successful blockResponse. - * @param msg - Request. - * @return Response. - */ - public static ContainerCommandResponseProto getBlockResponseSuccess( - ContainerCommandRequestProto msg) { - return ContainerUtils.getSuccessResponse(msg); - } - - - public static ContainerCommandResponseProto getBlockDataResponse( - ContainerCommandRequestProto msg, BlockData data) { - GetBlockResponseProto.Builder getBlock = ContainerProtos - .GetBlockResponseProto - .newBuilder(); - getBlock.setBlockData(data.getProtoBufMessage()); - ContainerProtos.ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(msg); - builder.setGetBlock(getBlock); - return builder.build(); - } - - /** - * Returns successful getCommittedBlockLength Response. - * @param msg - Request. - * @return Response. - */ - public static ContainerCommandResponseProto getBlockLengthResponse( - ContainerCommandRequestProto msg, long blockLength) { - GetCommittedBlockLengthResponseProto.Builder - committedBlockLengthResponseBuilder = - getCommittedBlockLengthResponseBuilder(blockLength, - msg.getGetCommittedBlockLength().getBlockID()); - ContainerProtos.ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(msg); - builder.setGetCommittedBlockLength(committedBlockLengthResponseBuilder); - return builder.build(); - } - - public static GetCommittedBlockLengthResponseProto.Builder - getCommittedBlockLengthResponseBuilder(long blockLength, - ContainerProtos.DatanodeBlockID blockID) { - ContainerProtos.GetCommittedBlockLengthResponseProto.Builder - getCommittedBlockLengthResponseBuilder = ContainerProtos. - GetCommittedBlockLengthResponseProto.newBuilder(); - getCommittedBlockLengthResponseBuilder.setBlockLength(blockLength); - getCommittedBlockLengthResponseBuilder.setBlockID(blockID); - return getCommittedBlockLengthResponseBuilder; - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java deleted file mode 100644 index 8ca59b59146..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java +++ /dev/null @@ -1,319 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.helpers; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl; -import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats; -import org.apache.hadoop.util.Time; -import org.apache.ratis.util.function.CheckedSupplier; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.channels.FileLock; -import java.nio.file.Path; -import java.nio.file.StandardOpenOption; -import java.security.NoSuchAlgorithmException; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutionException; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*; - -/** - * Utility methods for chunk operations for KeyValue container. - */ -public final class ChunkUtils { - - private static final Set LOCKS = ConcurrentHashMap.newKeySet(); - - /** Never constructed. **/ - private ChunkUtils() { - - } - - /** - * Writes the data in chunk Info to the specified location in the chunkfile. - * - * @param chunkFile - File to write data to. - * @param chunkInfo - Data stream to write. - * @param data - The data buffer. - * @param volumeIOStats statistics collector - * @param sync whether to do fsync or not - */ - public static void writeData(File chunkFile, ChunkInfo chunkInfo, - ByteBuffer data, VolumeIOStats volumeIOStats, boolean sync) - throws StorageContainerException, ExecutionException, - InterruptedException, NoSuchAlgorithmException { - final int bufferSize = data.remaining(); - Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); - if (bufferSize != chunkInfo.getLen()) { - String err = String.format("data array does not match the length " + - "specified. DataLen: %d Byte Array: %d", - chunkInfo.getLen(), bufferSize); - log.error(err); - throw new StorageContainerException(err, INVALID_WRITE_SIZE); - } - - Path path = chunkFile.toPath(); - long startTime = Time.monotonicNow(); - processFileExclusively(path, () -> { - FileChannel file = null; - try { - // skip SYNC and DSYNC to reduce contention on file.lock - file = FileChannel.open(path, - StandardOpenOption.CREATE, - StandardOpenOption.WRITE, - StandardOpenOption.SPARSE); - - int size; - try (FileLock ignored = file.lock()) { - size = file.write(data, chunkInfo.getOffset()); - } - - // Increment volumeIO stats here. - volumeIOStats.incWriteTime(Time.monotonicNow() - startTime); - volumeIOStats.incWriteOpCount(); - volumeIOStats.incWriteBytes(size); - if (size != bufferSize) { - log.error("Invalid write size found. Size:{} Expected: {} ", size, - bufferSize); - throw new StorageContainerException("Invalid write size found. " + - "Size: " + size + " Expected: " + bufferSize, INVALID_WRITE_SIZE); - } - } catch (StorageContainerException ex) { - throw ex; - } catch (IOException e) { - throw new StorageContainerException(e, IO_EXCEPTION); - } finally { - closeFile(file, sync); - } - - return null; - }); - - if (log.isDebugEnabled()) { - log.debug("Write Chunk completed for chunkFile: {}, size {}", chunkFile, - bufferSize); - } - } - - /** - * Reads data from an existing chunk file. - * - * @param chunkFile - file where data lives. - * @param data - chunk definition. - * @param volumeIOStats statistics collector - * @return ByteBuffer - */ - public static ByteBuffer readData(File chunkFile, ChunkInfo data, - VolumeIOStats volumeIOStats) throws StorageContainerException { - Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); - - if (!chunkFile.exists()) { - log.error("Unable to find the chunk file. chunk info : {}", - data.toString()); - throw new StorageContainerException("Unable to find the chunk file. " + - "chunk info " + - data.toString(), UNABLE_TO_FIND_CHUNK); - } - - long offset = data.getOffset(); - long len = data.getLen(); - ByteBuffer buf = ByteBuffer.allocate((int) len); - - Path path = chunkFile.toPath(); - long startTime = Time.monotonicNow(); - return processFileExclusively(path, () -> { - FileChannel file = null; - - try { - file = FileChannel.open(path, StandardOpenOption.READ); - - try (FileLock ignored = file.lock(offset, len, true)) { - file.read(buf, offset); - buf.flip(); - } - - // Increment volumeIO stats here. - volumeIOStats.incReadTime(Time.monotonicNow() - startTime); - volumeIOStats.incReadOpCount(); - volumeIOStats.incReadBytes(len); - - return buf; - } catch (IOException e) { - throw new StorageContainerException(e, IO_EXCEPTION); - } finally { - if (file != null) { - IOUtils.closeStream(file); - } - } - }); - } - - /** - * Validates chunk data and returns a file object to Chunk File that we are - * expected to write data to. - * - * @param chunkFile - chunkFile to write data into. - * @param info - chunk info. - * @return true if the chunkFile exists and chunkOffset < chunkFile length, - * false otherwise. - */ - public static boolean validateChunkForOverwrite(File chunkFile, - ChunkInfo info) { - - Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); - - if (isOverWriteRequested(chunkFile, info)) { - if (!isOverWritePermitted(info)) { - log.warn("Duplicate write chunk request. Chunk overwrite " + - "without explicit request. {}", info.toString()); - } - return true; - } - return false; - } - - /** - * Validates that Path to chunk file exists. - * - * @param containerData - Container Data - * @param info - Chunk info - * @return - File. - * @throws StorageContainerException - */ - public static File getChunkFile(KeyValueContainerData containerData, - ChunkInfo info) throws - StorageContainerException { - - Preconditions.checkNotNull(containerData, "Container data can't be null"); - Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); - - String chunksPath = containerData.getChunksPath(); - if (chunksPath == null) { - log.error("Chunks path is null in the container data"); - throw new StorageContainerException("Unable to get Chunks directory.", - UNABLE_TO_FIND_DATA_DIR); - } - File chunksLoc = new File(chunksPath); - if (!chunksLoc.exists()) { - log.error("Chunks path does not exist"); - throw new StorageContainerException("Unable to get Chunks directory.", - UNABLE_TO_FIND_DATA_DIR); - } - - return chunksLoc.toPath().resolve(info.getChunkName()).toFile(); - } - - /** - * Checks if we are getting a request to overwrite an existing range of - * chunk. - * - * @param chunkFile - File - * @param chunkInfo - Buffer to write - * @return bool - */ - public static boolean isOverWriteRequested(File chunkFile, ChunkInfo - chunkInfo) { - - if (!chunkFile.exists()) { - return false; - } - - long offset = chunkInfo.getOffset(); - return offset < chunkFile.length(); - } - - /** - * Overwrite is permitted if an only if the user explicitly asks for it. We - * permit this iff the key/value pair contains a flag called - * [OverWriteRequested, true]. - * - * @param chunkInfo - Chunk info - * @return true if the user asks for it. - */ - public static boolean isOverWritePermitted(ChunkInfo chunkInfo) { - String overWrite = chunkInfo.getMetadata().get(OzoneConsts.CHUNK_OVERWRITE); - return (overWrite != null) && - (!overWrite.isEmpty()) && - (Boolean.valueOf(overWrite)); - } - - /** - * Returns a CreateContainer Response. This call is used by create and delete - * containers which have null success responses. - * - * @param msg Request - * @return Response. - */ - public static ContainerCommandResponseProto getChunkResponseSuccess( - ContainerCommandRequestProto msg) { - return ContainerUtils.getSuccessResponse(msg); - } - - @VisibleForTesting - static T processFileExclusively( - Path path, CheckedSupplier op - ) throws E { - for (;;) { - if (LOCKS.add(path)) { - break; - } - } - - try { - return op.get(); - } finally { - LOCKS.remove(path); - } - } - - private static void closeFile(FileChannel file, boolean sync) - throws StorageContainerException { - if (file != null) { - try { - if (sync) { - // ensure data and metadata is persisted - file.force(true); - } - file.close(); - } catch (IOException e) { - throw new StorageContainerException("Error closing chunk file", - e, CONTAINER_INTERNAL_ERROR); - } - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java deleted file mode 100644 index 0c7a04e51da..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.helpers; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.Storage; - -import java.io.File; - -/** - * Class which provides utility methods for container locations. - */ -public final class KeyValueContainerLocationUtil { - - /* Never constructed. */ - private KeyValueContainerLocationUtil() { - - } - /** - * Returns Container Metadata Location. - * @param hddsVolumeDir base dir of the hdds volume where scm directories - * are stored - * @param scmId - * @param containerId - * @return containerMetadata Path to container metadata location where - * .container file will be stored. - */ - public static File getContainerMetaDataPath(String hddsVolumeDir, - String scmId, - long containerId) { - String containerMetaDataPath = - getBaseContainerLocation(hddsVolumeDir, scmId, - containerId); - containerMetaDataPath = containerMetaDataPath + File.separator + - OzoneConsts.CONTAINER_META_PATH; - return new File(containerMetaDataPath); - } - - - /** - * Returns Container Chunks Location. - * @param baseDir - * @param scmId - * @param containerId - * @return chunksPath - */ - public static File getChunksLocationPath(String baseDir, String scmId, - long containerId) { - String chunksPath = getBaseContainerLocation(baseDir, scmId, containerId) - + File.separator + OzoneConsts.STORAGE_DIR_CHUNKS; - return new File(chunksPath); - } - - /** - * Returns base directory for specified container. - * @param hddsVolumeDir - * @param scmId - * @param containerId - * @return base directory for container. - */ - private static String getBaseContainerLocation(String hddsVolumeDir, - String scmId, - long containerId) { - Preconditions.checkNotNull(hddsVolumeDir, "Base Directory cannot be null"); - Preconditions.checkNotNull(scmId, "scmUuid cannot be null"); - Preconditions.checkState(containerId >= 0, - "Container Id cannot be negative."); - - String containerSubDirectory = getContainerSubDirectory(containerId); - - String containerMetaDataPath = hddsVolumeDir + File.separator + scmId + - File.separator + Storage.STORAGE_DIR_CURRENT + File.separator + - containerSubDirectory + File.separator + containerId; - - return containerMetaDataPath; - } - - /** - * Returns subdirectory, where this container needs to be placed. - * @param containerId - * @return container sub directory - */ - private static String getContainerSubDirectory(long containerId){ - int directory = (int) ((containerId >> 9) & 0xFF); - return Storage.CONTAINER_DIR + directory; - } - - /** - * Return containerDB File. - */ - public static File getContainerDBFile(File containerMetaDataPath, - long containerID) { - return new File(containerMetaDataPath, containerID + OzoneConsts - .DN_CONTAINER_DB); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java deleted file mode 100644 index 3733b06b735..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.helpers; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.List; -import java.util.Map; - -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; - -import com.google.common.base.Preconditions; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class which defines utility methods for KeyValueContainer. - */ - -public final class KeyValueContainerUtil { - - /* Never constructed. */ - private KeyValueContainerUtil() { - - } - - private static final Logger LOG = LoggerFactory.getLogger( - KeyValueContainerUtil.class); - - /** - * creates metadata path, chunks path and metadata DB for the specified - * container. - * - * @param containerMetaDataPath - * @throws IOException - */ - public static void createContainerMetaData(File containerMetaDataPath, File - chunksPath, File dbFile, Configuration conf) throws IOException { - Preconditions.checkNotNull(containerMetaDataPath); - Preconditions.checkNotNull(conf); - - if (!containerMetaDataPath.mkdirs()) { - LOG.error("Unable to create directory for metadata storage. Path: {}", - containerMetaDataPath); - throw new IOException("Unable to create directory for metadata storage." + - " Path: " + containerMetaDataPath); - } - MetadataStore store = MetadataStoreBuilder.newBuilder().setConf(conf) - .setCreateIfMissing(true).setDbFile(dbFile).build(); - - // we close since the SCM pre-creates containers. - // we will open and put Db handle into a cache when keys are being created - // in a container. - - store.close(); - - if (!chunksPath.mkdirs()) { - LOG.error("Unable to create chunks directory Container {}", - chunksPath); - //clean up container metadata path and metadata db - FileUtils.deleteDirectory(containerMetaDataPath); - FileUtils.deleteDirectory(containerMetaDataPath.getParentFile()); - throw new IOException("Unable to create directory for data storage." + - " Path: " + chunksPath); - } - } - - /** - * remove Container if it is empty. - *

- * There are three things we need to delete. - *

- * 1. Container file and metadata file. 2. The Level DB file 3. The path that - * we created on the data location. - * - * @param containerData - Data of the container to remove. - * @param conf - configuration of the cluster. - * @throws IOException - */ - public static void removeContainer(KeyValueContainerData containerData, - Configuration conf) - throws IOException { - Preconditions.checkNotNull(containerData); - File containerMetaDataPath = new File(containerData - .getMetadataPath()); - File chunksPath = new File(containerData.getChunksPath()); - - // Close the DB connection and remove the DB handler from cache - BlockUtils.removeDB(containerData, conf); - - // Delete the Container MetaData path. - FileUtils.deleteDirectory(containerMetaDataPath); - - //Delete the Container Chunks Path. - FileUtils.deleteDirectory(chunksPath); - - //Delete Container directory - FileUtils.deleteDirectory(containerMetaDataPath.getParentFile()); - } - - /** - * Returns a ReadContainer Response. - * - * @param request Request - * @param containerData - data - * @return Response. - */ - public static ContainerCommandResponseProto getReadContainerResponse( - ContainerCommandRequestProto request, - KeyValueContainerData containerData) { - Preconditions.checkNotNull(containerData); - - ContainerProtos.ReadContainerResponseProto.Builder response = - ContainerProtos.ReadContainerResponseProto.newBuilder(); - response.setContainerData(containerData.getProtoBufMessage()); - - ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(request); - builder.setReadContainer(response); - return builder.build(); - } - - /** - * Parse KeyValueContainerData and verify checksum. - * @param kvContainerData - * @param config - * @throws IOException - */ - public static void parseKVContainerData(KeyValueContainerData kvContainerData, - Configuration config) throws IOException { - - long containerID = kvContainerData.getContainerID(); - File metadataPath = new File(kvContainerData.getMetadataPath()); - - // Verify Checksum - ContainerUtils.verifyChecksum(kvContainerData); - - File dbFile = KeyValueContainerLocationUtil.getContainerDBFile( - metadataPath, containerID); - if (!dbFile.exists()) { - LOG.error("Container DB file is missing for ContainerID {}. " + - "Skipping loading of this container.", containerID); - // Don't further process this container, as it is missing db file. - return; - } - kvContainerData.setDbFile(dbFile); - - try(ReferenceCountedDB metadata = - BlockUtils.getDB(kvContainerData, config)) { - long bytesUsed = 0; - List> liveKeys = metadata.getStore() - .getRangeKVs(null, Integer.MAX_VALUE, - MetadataKeyFilters.getNormalKeyFilter()); - - bytesUsed = liveKeys.parallelStream().mapToLong(e-> { - BlockData blockData; - try { - blockData = BlockUtils.getBlockData(e.getValue()); - return blockData.getSize(); - } catch (IOException ex) { - return 0L; - } - }).sum(); - kvContainerData.setBytesUsed(bytesUsed); - kvContainerData.setKeyCount(liveKeys.size()); - byte[] bcsId = metadata.getStore().get(DFSUtil.string2Bytes( - OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX)); - if (bcsId != null) { - kvContainerData.updateBlockCommitSequenceId(Longs.fromByteArray(bcsId)); - } - } - } - - /** - * Returns the path where data or chunks live for a given container. - * - * @param kvContainerData - KeyValueContainerData - * @return - Path to the chunks directory - */ - public static Path getDataDirectory(KeyValueContainerData kvContainerData) { - - String chunksPath = kvContainerData.getChunksPath(); - Preconditions.checkNotNull(chunksPath); - - return Paths.get(chunksPath); - } - - /** - * Container metadata directory -- here is where the level DB and - * .container file lives. - * - * @param kvContainerData - KeyValueContainerData - * @return Path to the metadata directory - */ - public static Path getMetadataDirectory( - KeyValueContainerData kvContainerData) { - - String metadataPath = kvContainerData.getMetadataPath(); - Preconditions.checkNotNull(metadataPath); - - return Paths.get(metadataPath); - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java deleted file mode 100644 index ba2b02c88b6..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.helpers; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; - -/** - * File Utils are helper routines used by putSmallFile and getSmallFile - * RPCs. - */ -public final class SmallFileUtils { - /** - * Never Constructed. - */ - private SmallFileUtils() { - } - - /** - * Gets a response for the putSmallFile RPC. - * @param msg - ContainerCommandRequestProto - * @return - ContainerCommandResponseProto - */ - public static ContainerCommandResponseProto getPutFileResponseSuccess( - ContainerCommandRequestProto msg, BlockData blockData) { - ContainerProtos.PutSmallFileResponseProto.Builder getResponse = - ContainerProtos.PutSmallFileResponseProto.newBuilder(); - ContainerProtos.BlockData blockDataProto = blockData.getProtoBufMessage(); - ContainerProtos.GetCommittedBlockLengthResponseProto.Builder - committedBlockLengthResponseBuilder = BlockUtils - .getCommittedBlockLengthResponseBuilder(blockDataProto.getSize(), - blockDataProto.getBlockID()); - getResponse.setCommittedBlockLength(committedBlockLengthResponseBuilder); - ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(msg); - builder.setCmdType(ContainerProtos.Type.PutSmallFile); - builder.setPutSmallFile(getResponse); - return builder.build(); - } - - /** - * Gets a response to the read small file call. - * @param msg - Msg - * @param data - Data - * @param info - Info - * @return Response. - */ - public static ContainerCommandResponseProto getGetSmallFileResponseSuccess( - ContainerCommandRequestProto msg, byte[] data, ChunkInfo info) { - Preconditions.checkNotNull(msg); - - ContainerProtos.ReadChunkResponseProto.Builder readChunkresponse = - ContainerProtos.ReadChunkResponseProto.newBuilder(); - readChunkresponse.setChunkData(info.getProtoBufMessage()); - readChunkresponse.setData(ByteString.copyFrom(data)); - readChunkresponse.setBlockID(msg.getGetSmallFile().getBlock().getBlockID()); - - ContainerProtos.GetSmallFileResponseProto.Builder getSmallFile = - ContainerProtos.GetSmallFileResponseProto.newBuilder(); - getSmallFile.setData(readChunkresponse.build()); - ContainerCommandResponseProto.Builder builder = - ContainerUtils.getSuccessResponseBuilder(msg); - builder.setCmdType(ContainerProtos.Type.GetSmallFile); - builder.setGetSmallFile(getSmallFile); - return builder.build(); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/package-info.java deleted file mode 100644 index 041f485deae..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.helpers; -/** - This package contains utility classes for KeyValue container type. - **/ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java deleted file mode 100644 index 4272861c57e..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.impl; - -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; - -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache; -import org.apache.hadoop.hdds.utils.BatchOperation; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNKNOWN_BCSID; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BCSID_MISMATCH; -/** - * This class is for performing block related operations on the KeyValue - * Container. - */ -public class BlockManagerImpl implements BlockManager { - - static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class); - private static byte[] blockCommitSequenceIdKey = - DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX); - - private Configuration config; - - /** - * Constructs a Block Manager. - * - * @param conf - Ozone configuration - */ - public BlockManagerImpl(Configuration conf) { - Preconditions.checkNotNull(conf, "Config cannot be null"); - this.config = conf; - } - - /** - * Puts or overwrites a block. - * - * @param container - Container for which block need to be added. - * @param data - BlockData. - * @return length of the block. - * @throws IOException - */ - public long putBlock(Container container, BlockData data) throws IOException { - Preconditions.checkNotNull(data, "BlockData cannot be null for put " + - "operation."); - Preconditions.checkState(data.getContainerID() >= 0, "Container Id " + - "cannot be negative"); - // We are not locking the key manager since LevelDb serializes all actions - // against a single DB. We rely on DB level locking to avoid conflicts. - try(ReferenceCountedDB db = BlockUtils. - getDB((KeyValueContainerData) container.getContainerData(), config)) { - // This is a post condition that acts as a hint to the user. - // Should never fail. - Preconditions.checkNotNull(db, "DB cannot be null here"); - - long bcsId = data.getBlockCommitSequenceId(); - long containerBCSId = ((KeyValueContainerData) container. - getContainerData()).getBlockCommitSequenceId(); - - // default blockCommitSequenceId for any block is 0. It the putBlock - // request is not coming via Ratis(for test scenarios), it will be 0. - // In such cases, we should overwrite the block as well - if (bcsId != 0) { - if (bcsId <= containerBCSId) { - // Since the blockCommitSequenceId stored in the db is greater than - // equal to blockCommitSequenceId to be updated, it means the putBlock - // transaction is reapplied in the ContainerStateMachine on restart. - // It also implies that the given block must already exist in the db. - // just log and return - LOG.warn("blockCommitSequenceId " + containerBCSId - + " in the Container Db is greater than" + " the supplied value " - + bcsId + " .Ignoring it"); - return data.getSize(); - } - } - // update the blockData as well as BlockCommitSequenceId here - BatchOperation batch = new BatchOperation(); - batch.put(Longs.toByteArray(data.getLocalID()), - data.getProtoBufMessage().toByteArray()); - batch.put(blockCommitSequenceIdKey, - Longs.toByteArray(bcsId)); - db.getStore().writeBatch(batch); - container.updateBlockCommitSequenceId(bcsId); - // Increment keycount here - container.getContainerData().incrKeyCount(); - if (LOG.isDebugEnabled()) { - LOG.debug( - "Block " + data.getBlockID() + " successfully committed with bcsId " - + bcsId + " chunk size " + data.getChunks().size()); - } - return data.getSize(); - } - } - - /** - * Gets an existing block. - * - * @param container - Container from which block need to be fetched. - * @param blockID - BlockID of the block. - * @return Key Data. - * @throws IOException - */ - @Override - public BlockData getBlock(Container container, BlockID blockID) - throws IOException { - long bcsId = blockID.getBlockCommitSequenceId(); - Preconditions.checkNotNull(blockID, - "BlockID cannot be null in GetBlock request"); - Preconditions.checkNotNull(container, - "Container cannot be null"); - - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { - // This is a post condition that acts as a hint to the user. - // Should never fail. - Preconditions.checkNotNull(db, "DB cannot be null here"); - - long containerBCSId = containerData.getBlockCommitSequenceId(); - if (containerBCSId < bcsId) { - throw new StorageContainerException( - "Unable to find the block with bcsID " + bcsId + " .Container " - + container.getContainerData().getContainerID() + " bcsId is " - + containerBCSId + ".", UNKNOWN_BCSID); - } - byte[] kData = db.getStore().get(Longs.toByteArray(blockID.getLocalID())); - if (kData == null) { - throw new StorageContainerException("Unable to find the block." + - blockID, NO_SUCH_BLOCK); - } - ContainerProtos.BlockData blockData = - ContainerProtos.BlockData.parseFrom(kData); - long id = blockData.getBlockID().getBlockCommitSequenceId(); - if (id < bcsId) { - throw new StorageContainerException( - "bcsId " + bcsId + " mismatches with existing block Id " - + id + " for block " + blockID + ".", BCSID_MISMATCH); - } - return BlockData.getFromProtoBuf(blockData); - } - } - - /** - * Returns the length of the committed block. - * - * @param container - Container from which block need to be fetched. - * @param blockID - BlockID of the block. - * @return length of the block. - * @throws IOException in case, the block key does not exist in db. - */ - @Override - public long getCommittedBlockLength(Container container, BlockID blockID) - throws IOException { - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { - // This is a post condition that acts as a hint to the user. - // Should never fail. - Preconditions.checkNotNull(db, "DB cannot be null here"); - byte[] kData = db.getStore().get(Longs.toByteArray(blockID.getLocalID())); - if (kData == null) { - throw new StorageContainerException("Unable to find the block.", - NO_SUCH_BLOCK); - } - ContainerProtos.BlockData blockData = - ContainerProtos.BlockData.parseFrom(kData); - return blockData.getSize(); - } - } - - /** - * Deletes an existing block. - * - * @param container - Container from which block need to be deleted. - * @param blockID - ID of the block. - * @throws StorageContainerException - */ - public void deleteBlock(Container container, BlockID blockID) throws - IOException { - Preconditions.checkNotNull(blockID, "block ID cannot be null."); - Preconditions.checkState(blockID.getContainerID() >= 0, - "Container ID cannot be negative."); - Preconditions.checkState(blockID.getLocalID() >= 0, - "Local ID cannot be negative."); - - KeyValueContainerData cData = (KeyValueContainerData) container - .getContainerData(); - try(ReferenceCountedDB db = BlockUtils.getDB(cData, config)) { - // This is a post condition that acts as a hint to the user. - // Should never fail. - Preconditions.checkNotNull(db, "DB cannot be null here"); - // Note : There is a race condition here, since get and delete - // are not atomic. Leaving it here since the impact is refusing - // to delete a Block which might have just gotten inserted after - // the get check. - byte[] kKey = Longs.toByteArray(blockID.getLocalID()); - - byte[] kData = db.getStore().get(kKey); - if (kData == null) { - throw new StorageContainerException("Unable to find the block.", - NO_SUCH_BLOCK); - } - db.getStore().delete(kKey); - // Decrement blockcount here - container.getContainerData().decrKeyCount(); - } - } - - /** - * List blocks in a container. - * - * @param container - Container from which blocks need to be listed. - * @param startLocalID - Key to start from, 0 to begin. - * @param count - Number of blocks to return. - * @return List of Blocks that match the criteria. - */ - @Override - public List listBlock(Container container, long startLocalID, int - count) throws IOException { - Preconditions.checkNotNull(container, "container cannot be null"); - Preconditions.checkState(startLocalID >= 0, "startLocal ID cannot be " + - "negative"); - Preconditions.checkArgument(count > 0, - "Count must be a positive number."); - container.readLock(); - try { - List result = null; - KeyValueContainerData cData = - (KeyValueContainerData) container.getContainerData(); - try (ReferenceCountedDB db = BlockUtils.getDB(cData, config)) { - result = new ArrayList<>(); - byte[] startKeyInBytes = Longs.toByteArray(startLocalID); - List> range = db.getStore() - .getSequentialRangeKVs(startKeyInBytes, count, - MetadataKeyFilters.getNormalKeyFilter()); - for (Map.Entry entry : range) { - BlockData value = BlockUtils.getBlockData(entry.getValue()); - BlockData data = new BlockData(value.getBlockID()); - result.add(data); - } - return result; - } - } finally { - container.readUnlock(); - } - } - - /** - * Shutdown KeyValueContainerManager. - */ - public void shutdown() { - BlockUtils.shutdownCache(ContainerCache.getInstance(config)); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java deleted file mode 100644 index fa9e205786e..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.impl; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*; - -/** - * Implementation of ChunkManager built for running performance tests. - * Chunks are not written to disk, Reads are returned with zero-filled buffers - */ -public class ChunkManagerDummyImpl extends ChunkManagerImpl { - static final Logger LOG = LoggerFactory.getLogger( - ChunkManagerDummyImpl.class); - - public ChunkManagerDummyImpl(boolean sync) { - super(sync); - } - - /** - * writes a given chunk. - * - * @param container - Container for the chunk - * @param blockID - ID of the block - * @param info - ChunkInfo - * @param data - data of the chunk - * @param dispatcherContext - dispatcherContextInfo - * @throws StorageContainerException - */ - @Override - public void writeChunk(Container container, BlockID blockID, ChunkInfo info, - ByteBuffer data, DispatcherContext dispatcherContext) - throws StorageContainerException { - long writeTimeStart = Time.monotonicNow(); - - Preconditions.checkNotNull(dispatcherContext); - DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage(); - - Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); - - try { - KeyValueContainerData containerData = - (KeyValueContainerData) container.getContainerData(); - HddsVolume volume = containerData.getVolume(); - VolumeIOStats volumeIOStats = volume.getVolumeIOStats(); - int bufferSize; - - switch (stage) { - case WRITE_DATA: - bufferSize = data.capacity(); - if (bufferSize != info.getLen()) { - String err = String.format("data array does not match the length " - + "specified. DataLen: %d Byte Array: %d", - info.getLen(), bufferSize); - log.error(err); - throw new StorageContainerException(err, INVALID_WRITE_SIZE); - } - - // Increment volumeIO stats here. - volumeIOStats.incWriteTime(Time.monotonicNow() - writeTimeStart); - volumeIOStats.incWriteOpCount(); - volumeIOStats.incWriteBytes(info.getLen()); - break; - case COMMIT_DATA: - updateContainerWriteStats(container, info, false); - break; - case COMBINED: - updateContainerWriteStats(container, info, false); - break; - default: - throw new IOException("Can not identify write operation."); - } - } catch (IOException ex) { - LOG.error("write data failed. error: {}", ex); - throw new StorageContainerException("Internal error: ", ex, - CONTAINER_INTERNAL_ERROR); - } - } - - /** - * return a zero-filled buffer. - * - * @param container - Container for the chunk - * @param blockID - ID of the block. - * @param info - ChunkInfo. - * @param dispatcherContext dispatcher context info. - * @return byte array - * TODO: Right now we do not support partial reads and writes of chunks. - * TODO: Explore if we need to do that for ozone. - */ - @Override - public ByteBuffer readChunk(Container container, BlockID blockID, - ChunkInfo info, DispatcherContext dispatcherContext) { - - long readStartTime = Time.monotonicNow(); - - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - ByteBuffer data; - HddsVolume volume = containerData.getVolume(); - VolumeIOStats volumeIOStats = volume.getVolumeIOStats(); - - data = ByteBuffer.allocate((int) info.getLen()); - - // Increment volumeIO stats here. - volumeIOStats.incReadTime(Time.monotonicNow() - readStartTime); - volumeIOStats.incReadOpCount(); - volumeIOStats.incReadBytes(info.getLen()); - - return data; - } - - /** - * Delete a given chunk - Do nothing except stats. - * - * @param container - Container for the chunk - * @param blockID - ID of the block - * @param info - Chunk Info - */ - @Override - public void deleteChunk(Container container, BlockID blockID, - ChunkInfo info) { - Preconditions.checkNotNull(blockID, "Block ID cannot be null."); - KeyValueContainerData containerData = - (KeyValueContainerData) container.getContainerData(); - - if (info.getOffset() == 0) { - containerData.decrBytesUsed(info.getLen()); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java deleted file mode 100644 index 85495783cc8..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.impl; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_SCRUB_ENABLED; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_SCRUB_ENABLED_DEFAULT; - -/** - * Select an appropriate ChunkManager implementation as per config setting. - * Ozone ChunkManager is a Singleton - */ -public final class ChunkManagerFactory { - static final Logger LOG = LoggerFactory.getLogger(ChunkManagerFactory.class); - - private static volatile ChunkManager instance = null; - private static boolean syncChunks = false; - - private ChunkManagerFactory() { - } - - public static ChunkManager getChunkManager(Configuration config, - boolean sync) { - if (instance == null) { - synchronized (ChunkManagerFactory.class) { - if (instance == null) { - instance = createChunkManager(config, sync); - syncChunks = sync; - } - } - } - - Preconditions.checkArgument((syncChunks == sync), - "value of sync conflicts with previous invocation"); - return instance; - } - - private static ChunkManager createChunkManager(Configuration config, - boolean sync) { - ChunkManager manager = null; - boolean persist = config.getBoolean(HDDS_CONTAINER_PERSISTDATA, - HDDS_CONTAINER_PERSISTDATA_DEFAULT); - - if (!persist) { - boolean scrubber = config.getBoolean( - HDDS_CONTAINER_SCRUB_ENABLED, - HDDS_CONTAINER_SCRUB_ENABLED_DEFAULT); - if (scrubber) { - // Data Scrubber needs to be disabled for non-persistent chunks. - LOG.warn("Failed to set " + HDDS_CONTAINER_PERSISTDATA + " to false." - + " Please set " + HDDS_CONTAINER_SCRUB_ENABLED - + " also to false to enable non-persistent containers."); - persist = true; - } - } - - if (persist) { - manager = new ChunkManagerImpl(sync); - } else { - LOG.warn(HDDS_CONTAINER_PERSISTDATA - + " is set to false. This should be used only for testing." - + " All user data will be discarded."); - manager = new ChunkManagerDummyImpl(sync); - } - - return manager; - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java deleted file mode 100644 index e22841eec8a..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerImpl.java +++ /dev/null @@ -1,312 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.impl; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats; -import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.nio.file.StandardCopyOption; -import java.security.NoSuchAlgorithmException; -import java.util.concurrent.ExecutionException; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.CONTAINER_INTERNAL_ERROR; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.NO_SUCH_ALGORITHM; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST; - -/** - * This class is for performing chunk related operations. - */ -public class ChunkManagerImpl implements ChunkManager { - static final Logger LOG = LoggerFactory.getLogger(ChunkManagerImpl.class); - private final boolean doSyncWrite; - - public ChunkManagerImpl(boolean sync) { - doSyncWrite = sync; - } - - /** - * writes a given chunk. - * - * @param container - Container for the chunk - * @param blockID - ID of the block - * @param info - ChunkInfo - * @param data - data of the chunk - * @param dispatcherContext - dispatcherContextInfo - * @throws StorageContainerException - */ - public void writeChunk(Container container, BlockID blockID, ChunkInfo info, - ByteBuffer data, DispatcherContext dispatcherContext) - throws StorageContainerException { - Preconditions.checkNotNull(dispatcherContext); - DispatcherContext.WriteChunkStage stage = dispatcherContext.getStage(); - try { - - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - HddsVolume volume = containerData.getVolume(); - VolumeIOStats volumeIOStats = volume.getVolumeIOStats(); - - File chunkFile = ChunkUtils.getChunkFile(containerData, info); - - boolean isOverwrite = ChunkUtils.validateChunkForOverwrite( - chunkFile, info); - File tmpChunkFile = getTmpChunkFile(chunkFile, dispatcherContext); - if (LOG.isDebugEnabled()) { - LOG.debug( - "writing chunk:{} chunk stage:{} chunk file:{} tmp chunk file:{}", - info.getChunkName(), stage, chunkFile, tmpChunkFile); - } - - switch (stage) { - case WRITE_DATA: - if (isOverwrite) { - // if the actual chunk file already exists here while writing the temp - // chunk file, then it means the same ozone client request has - // generated two raft log entries. This can happen either because - // retryCache expired in Ratis (or log index mismatch/corruption in - // Ratis). This can be solved by two approaches as of now: - // 1. Read the complete data in the actual chunk file , - // verify the data integrity and in case it mismatches , either - // 2. Delete the chunk File and write the chunk again. For now, - // let's rewrite the chunk file - // TODO: once the checksum support for write chunks gets plugged in, - // the checksum needs to be verified for the actual chunk file and - // the data to be written here which should be efficient and - // it matches we can safely return without rewriting. - LOG.warn("ChunkFile already exists" + chunkFile + ".Deleting it."); - FileUtil.fullyDelete(chunkFile); - } - if (tmpChunkFile.exists()) { - // If the tmp chunk file already exists it means the raft log got - // appended, but later on the log entry got truncated in Ratis leaving - // behind garbage. - // TODO: once the checksum support for data chunks gets plugged in, - // instead of rewriting the chunk here, let's compare the checkSums - LOG.warn( - "tmpChunkFile already exists" + tmpChunkFile + "Overwriting it."); - } - // Initially writes to temporary chunk file. - ChunkUtils - .writeData(tmpChunkFile, info, data, volumeIOStats, doSyncWrite); - // No need to increment container stats here, as still data is not - // committed here. - break; - case COMMIT_DATA: - // commit the data, means move chunk data from temporary chunk file - // to actual chunk file. - if (isOverwrite) { - // if the actual chunk file already exists , it implies the write - // chunk transaction in the containerStateMachine is getting - // reapplied. This can happen when a node restarts. - // TODO: verify the checkSums for the existing chunkFile and the - // chunkInfo to be committed here - LOG.warn("ChunkFile already exists" + chunkFile); - return; - } - // While committing a chunk , just rename the tmp chunk file which has - // the same term and log index appended as the current transaction - commitChunk(tmpChunkFile, chunkFile); - // Increment container stats here, as we commit the data. - updateContainerWriteStats(container, info, isOverwrite); - break; - case COMBINED: - // directly write to the chunk file - ChunkUtils.writeData(chunkFile, info, data, volumeIOStats, doSyncWrite); - updateContainerWriteStats(container, info, isOverwrite); - break; - default: - throw new IOException("Can not identify write operation."); - } - } catch (StorageContainerException ex) { - throw ex; - } catch (NoSuchAlgorithmException ex) { - LOG.error("write data failed. error: {}", ex); - throw new StorageContainerException("Internal error: ", ex, - NO_SUCH_ALGORITHM); - } catch (ExecutionException | IOException ex) { - LOG.error("write data failed. error: {}", ex); - throw new StorageContainerException("Internal error: ", ex, - CONTAINER_INTERNAL_ERROR); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - LOG.error("write data failed. error: {}", e); - throw new StorageContainerException("Internal error: ", e, - CONTAINER_INTERNAL_ERROR); - } - } - - protected void updateContainerWriteStats(Container container, ChunkInfo info, - boolean isOverwrite) { - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - - if (!isOverwrite) { - containerData.incrBytesUsed(info.getLen()); - } - containerData.incrWriteCount(); - containerData.incrWriteBytes(info.getLen()); - } - - /** - * reads the data defined by a chunk. - * - * @param container - Container for the chunk - * @param blockID - ID of the block. - * @param info - ChunkInfo. - * @param dispatcherContext dispatcher context info. - * @return byte array - * @throws StorageContainerException - * TODO: Right now we do not support partial reads and writes of chunks. - * TODO: Explore if we need to do that for ozone. - */ - public ByteBuffer readChunk(Container container, BlockID blockID, - ChunkInfo info, DispatcherContext dispatcherContext) - throws StorageContainerException { - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - ByteBuffer data; - HddsVolume volume = containerData.getVolume(); - VolumeIOStats volumeIOStats = volume.getVolumeIOStats(); - - // Checking here, which layout version the container is, and reading - // the chunk file in that format. - // In version1, we verify checksum if it is available and return data - // of the chunk file. - if (containerData.getLayOutVersion() == ChunkLayOutVersion - .getLatestVersion().getVersion()) { - File chunkFile = ChunkUtils.getChunkFile(containerData, info); - - // In case the chunk file does not exist but tmp chunk file exist, - // read from tmp chunk file if readFromTmpFile is set to true - if (!chunkFile.exists() && dispatcherContext != null - && dispatcherContext.isReadFromTmpFile()) { - chunkFile = getTmpChunkFile(chunkFile, dispatcherContext); - } - data = ChunkUtils.readData(chunkFile, info, volumeIOStats); - containerData.incrReadCount(); - long length = chunkFile.length(); - containerData.incrReadBytes(length); - return data; - } - return null; - } - - /** - * Deletes a given chunk. - * - * @param container - Container for the chunk - * @param blockID - ID of the block - * @param info - Chunk Info - * @throws StorageContainerException - */ - public void deleteChunk(Container container, BlockID blockID, ChunkInfo info) - throws StorageContainerException { - Preconditions.checkNotNull(blockID, "Block ID cannot be null."); - KeyValueContainerData containerData = (KeyValueContainerData) container - .getContainerData(); - // Checking here, which layout version the container is, and performing - // deleting chunk operation. - // In version1, we have only chunk file. - if (containerData.getLayOutVersion() == ChunkLayOutVersion - .getLatestVersion().getVersion()) { - File chunkFile = ChunkUtils.getChunkFile(containerData, info); - - // if the chunk file does not exist, it might have already been deleted. - // The call might be because of reapply of transactions on datanode - // restart. - if (!chunkFile.exists()) { - LOG.warn("Chunk file doe not exist. chunk info :" + info.toString()); - return; - } - if ((info.getOffset() == 0) && (info.getLen() == chunkFile.length())) { - FileUtil.fullyDelete(chunkFile); - containerData.decrBytesUsed(chunkFile.length()); - } else { - LOG.error("Not Supported Operation. Trying to delete a " + - "chunk that is in shared file. chunk info : " + info.toString()); - throw new StorageContainerException("Not Supported Operation. " + - "Trying to delete a chunk that is in shared file. chunk info : " - + info.toString(), UNSUPPORTED_REQUEST); - } - } - } - - /** - * Shutdown the chunkManager. - * - * In the chunkManager we haven't acquired any resources, so nothing to do - * here. - */ - - public void shutdown() { - //TODO: need to revisit this during integration of container IO. - } - - /** - * Returns the temporary chunkFile path. - * @param chunkFile chunkFileName - * @param dispatcherContext dispatcher context info - * @return temporary chunkFile path - * @throws StorageContainerException - */ - private File getTmpChunkFile(File chunkFile, - DispatcherContext dispatcherContext) { - return new File(chunkFile.getParent(), - chunkFile.getName() + - OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + - OzoneConsts.CONTAINER_TEMPORARY_CHUNK_PREFIX + - OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + - dispatcherContext.getTerm() + - OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + - dispatcherContext.getLogIndex()); - } - - /** - * Commit the chunk by renaming the temporary chunk file to chunk file. - * @param tmpChunkFile - * @param chunkFile - * @throws IOException - */ - private void commitChunk(File tmpChunkFile, File chunkFile) throws - IOException { - Files.move(tmpChunkFile.toPath(), chunkFile.toPath(), - StandardCopyOption.REPLACE_EXISTING); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java deleted file mode 100644 index 564b50e8a4d..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.impl; -/** - * Chunk manager and block manager implementations for keyvalue container type. - */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java deleted file mode 100644 index 6812b0d8ff8..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.interfaces; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.interfaces.Container; - -import java.io.IOException; -import java.util.List; - -/** - * BlockManager is for performing key related operations on the container. - */ -public interface BlockManager { - - /** - * Puts or overwrites a block. - * - * @param container - Container for which block need to be added. - * @param data - Block Data. - * @return length of the Block. - * @throws IOException - */ - long putBlock(Container container, BlockData data) throws IOException; - - /** - * Gets an existing block. - * - * @param container - Container from which block need to be get. - * @param blockID - BlockID of the Block. - * @return Block Data. - * @throws IOException - */ - BlockData getBlock(Container container, BlockID blockID) - throws IOException; - - /** - * Deletes an existing block. - * - * @param container - Container from which block need to be deleted. - * @param blockID - ID of the block. - * @throws StorageContainerException - */ - void deleteBlock(Container container, BlockID blockID) throws IOException; - - /** - * List blocks in a container. - * - * @param container - Container from which blocks need to be listed. - * @param startLocalID - Block to start from, 0 to begin. - * @param count - Number of blocks to return. - * @return List of Blocks that match the criteria. - */ - List listBlock(Container container, long startLocalID, int count) - throws IOException; - - /** - * Returns the last committed block length for the block. - * @param blockID blockId - */ - long getCommittedBlockLength(Container container, BlockID blockID) - throws IOException; - - /** - * Shutdown ContainerManager. - */ - void shutdown(); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java deleted file mode 100644 index 5adb6415ec1..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java +++ /dev/null @@ -1,83 +0,0 @@ -package org.apache.hadoop.ozone.container.keyvalue.interfaces; - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; - -import java.nio.ByteBuffer; - -/** - * Chunk Manager allows read, write, delete and listing of chunks in - * a container. - */ - -public interface ChunkManager { - - /** - * writes a given chunk. - * - * @param container - Container for the chunk - * @param blockID - ID of the block. - * @param info - ChunkInfo. - * @param dispatcherContext - dispatcher context info. - * @throws StorageContainerException - */ - void writeChunk(Container container, BlockID blockID, ChunkInfo info, - ByteBuffer data, DispatcherContext dispatcherContext) - throws StorageContainerException; - - /** - * reads the data defined by a chunk. - * - * @param container - Container for the chunk - * @param blockID - ID of the block. - * @param info - ChunkInfo. - * @param dispatcherContext - dispatcher context info. - * @return byte array - * @throws StorageContainerException - * - * TODO: Right now we do not support partial reads and writes of chunks. - * TODO: Explore if we need to do that for ozone. - */ - ByteBuffer readChunk(Container container, BlockID blockID, ChunkInfo info, - DispatcherContext dispatcherContext) throws StorageContainerException; - - /** - * Deletes a given chunk. - * - * @param container - Container for the chunk - * @param blockID - ID of the block. - * @param info - Chunk Info - * @throws StorageContainerException - */ - void deleteChunk(Container container, BlockID blockID, ChunkInfo info) throws - StorageContainerException; - - // TODO : Support list operations. - - /** - * Shutdown the chunkManager. - */ - void shutdown(); - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java deleted file mode 100644 index 512909451f0..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.interfaces; -/** - * Chunk manager and block manager interfaces for keyvalue container type. - */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java deleted file mode 100644 index 53c9f1e0f97..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue; -/** - This package contains classes for KeyValue container type. - **/ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java deleted file mode 100644 index bc3f51a54ef..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ /dev/null @@ -1,332 +0,0 @@ - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue.statemachine.background; - -import com.google.common.collect.Lists; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.util.ReflectionUtils; -import org.apache.ratis.thirdparty.com.google.protobuf - .InvalidProtocolBufferException; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.BackgroundService; -import org.apache.hadoop.hdds.utils.BackgroundTask; -import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; -import org.apache.hadoop.hdds.utils.BackgroundTaskResult; -import org.apache.hadoop.hdds.utils.BatchOperation; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT; - -/** - * A per-datanode container block deleting service takes in charge - * of deleting staled ozone blocks. - */ -// TODO: Fix BlockDeletingService to work with new StorageLayer -public class BlockDeletingService extends BackgroundService { - - private static final Logger LOG = - LoggerFactory.getLogger(BlockDeletingService.class); - - private OzoneContainer ozoneContainer; - private ContainerDeletionChoosingPolicy containerDeletionPolicy; - private final Configuration conf; - - // Throttle number of blocks to delete per task, - // set to 1 for testing - private final int blockLimitPerTask; - - // Throttle the number of containers to process concurrently at a time, - private final int containerLimitPerInterval; - - // Task priority is useful when a to-delete block has weight. - private final static int TASK_PRIORITY_DEFAULT = 1; - // Core pool size for container tasks - private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 10; - - public BlockDeletingService(OzoneContainer ozoneContainer, - long serviceInterval, long serviceTimeout, TimeUnit timeUnit, - Configuration conf) { - super("BlockDeletingService", serviceInterval, timeUnit, - BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout); - this.ozoneContainer = ozoneContainer; - containerDeletionPolicy = ReflectionUtils.newInstance(conf.getClass( - ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, - TopNOrderedContainerDeletionChoosingPolicy.class, - ContainerDeletionChoosingPolicy.class), conf); - this.conf = conf; - this.blockLimitPerTask = - conf.getInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, - OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT); - this.containerLimitPerInterval = - conf.getInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, - OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT); - } - - - @Override - public BackgroundTaskQueue getTasks() { - BackgroundTaskQueue queue = new BackgroundTaskQueue(); - List containers = Lists.newArrayList(); - try { - // We at most list a number of containers a time, - // in case there are too many containers and start too many workers. - // We must ensure there is no empty container in this result. - // The chosen result depends on what container deletion policy is - // configured. - containers = chooseContainerForBlockDeletion(containerLimitPerInterval, - containerDeletionPolicy); - if (containers.size() > 0) { - LOG.info("Plan to choose {} containers for block deletion, " - + "actually returns {} valid containers.", - containerLimitPerInterval, containers.size()); - } - - for(ContainerData container : containers) { - BlockDeletingTask containerTask = - new BlockDeletingTask(container, TASK_PRIORITY_DEFAULT); - queue.add(containerTask); - } - } catch (StorageContainerException e) { - LOG.warn("Failed to initiate block deleting tasks, " - + "caused by unable to get containers info. " - + "Retry in next interval. ", e); - } catch (Exception e) { - // In case listContainer call throws any uncaught RuntimeException. - if (LOG.isDebugEnabled()) { - LOG.debug("Unexpected error occurs during deleting blocks.", e); - } - } - return queue; - } - - public List chooseContainerForBlockDeletion(int count, - ContainerDeletionChoosingPolicy deletionPolicy) - throws StorageContainerException { - Map containerDataMap = - ozoneContainer.getContainerSet().getContainerMap().entrySet().stream() - .filter(e -> isDeletionAllowed(e.getValue().getContainerData(), - deletionPolicy)).collect(Collectors - .toMap(Map.Entry::getKey, e -> e.getValue().getContainerData())); - return deletionPolicy - .chooseContainerForBlockDeletion(count, containerDataMap); - } - - private boolean isDeletionAllowed(ContainerData containerData, - ContainerDeletionChoosingPolicy deletionPolicy) { - if (!deletionPolicy - .isValidContainerType(containerData.getContainerType())) { - return false; - } else if (!containerData.isClosed()) { - return false; - } else { - if (ozoneContainer.getWriteChannel() instanceof XceiverServerRatis) { - XceiverServerRatis ratisServer = - (XceiverServerRatis) ozoneContainer.getWriteChannel(); - PipelineID pipelineID = PipelineID - .valueOf(UUID.fromString(containerData.getOriginPipelineId())); - // in case te ratis group does not exist, just mark it for deletion. - if (!ratisServer.isExist(pipelineID.getProtobuf())) { - return true; - } - try { - long minReplicatedIndex = - ratisServer.getMinReplicatedIndex(pipelineID); - long containerBCSID = containerData.getBlockCommitSequenceId(); - if (minReplicatedIndex >= 0 && minReplicatedIndex < containerBCSID) { - LOG.warn("Close Container log Index {} is not replicated across all" - + "the servers in the pipeline {} as the min replicated " - + "index is {}. Deletion is not allowed in this container " - + "yet.", containerBCSID, - containerData.getOriginPipelineId(), minReplicatedIndex); - return false; - } else { - return true; - } - } catch (IOException ioe) { - // in case of any exception check again whether the pipeline exist - // and in case the pipeline got destroyed, just mark it for deletion - if (!ratisServer.isExist(pipelineID.getProtobuf())) { - return true; - } else { - LOG.info(ioe.getMessage()); - return false; - } - } - } - return true; - } - } - - private static class ContainerBackgroundTaskResult - implements BackgroundTaskResult { - private List deletedBlockIds; - - ContainerBackgroundTaskResult() { - deletedBlockIds = new LinkedList<>(); - } - - public void addBlockId(String blockId) { - deletedBlockIds.add(blockId); - } - - public void addAll(List blockIds) { - deletedBlockIds.addAll(blockIds); - } - - public List getDeletedBlocks() { - return deletedBlockIds; - } - - @Override - public int getSize() { - return deletedBlockIds.size(); - } - } - - private class BlockDeletingTask - implements BackgroundTask { - - private final int priority; - private final KeyValueContainerData containerData; - - BlockDeletingTask(ContainerData containerName, int priority) { - this.priority = priority; - this.containerData = (KeyValueContainerData) containerName; - } - - @Override - public BackgroundTaskResult call() throws Exception { - ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult(); - final Container container = ozoneContainer.getContainerSet() - .getContainer(containerData.getContainerID()); - container.writeLock(); - long startTime = Time.monotonicNow(); - // Scan container's db and get list of under deletion blocks - try (ReferenceCountedDB meta = BlockUtils.getDB(containerData, conf)) { - // # of blocks to delete is throttled - KeyPrefixFilter filter = - new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX); - List> toDeleteBlocks = - meta.getStore().getSequentialRangeKVs(null, blockLimitPerTask, - filter); - if (toDeleteBlocks.isEmpty()) { - LOG.debug("No under deletion block found in container : {}", - containerData.getContainerID()); - } - - List succeedBlocks = new LinkedList<>(); - LOG.debug("Container : {}, To-Delete blocks : {}", - containerData.getContainerID(), toDeleteBlocks.size()); - File dataDir = new File(containerData.getChunksPath()); - if (!dataDir.exists() || !dataDir.isDirectory()) { - LOG.error("Invalid container data dir {} : " - + "does not exist or not a directory", dataDir.getAbsolutePath()); - return crr; - } - - toDeleteBlocks.forEach(entry -> { - String blockName = DFSUtil.bytes2String(entry.getKey()); - LOG.debug("Deleting block {}", blockName); - try { - ContainerProtos.BlockData data = - ContainerProtos.BlockData.parseFrom(entry.getValue()); - for (ContainerProtos.ChunkInfo chunkInfo : data.getChunksList()) { - File chunkFile = dataDir.toPath() - .resolve(chunkInfo.getChunkName()).toFile(); - if (FileUtils.deleteQuietly(chunkFile)) { - if (LOG.isDebugEnabled()) { - LOG.debug("block {} chunk {} deleted", blockName, - chunkFile.getAbsolutePath()); - } - } - } - succeedBlocks.add(blockName); - } catch (InvalidProtocolBufferException e) { - LOG.error("Failed to parse block info for block {}", blockName, e); - } - }); - - // Once files are deleted... replace deleting entries with deleted - // entries - BatchOperation batch = new BatchOperation(); - succeedBlocks.forEach(entry -> { - String blockId = - entry.substring(OzoneConsts.DELETING_KEY_PREFIX.length()); - String deletedEntry = OzoneConsts.DELETED_KEY_PREFIX + blockId; - batch.put(DFSUtil.string2Bytes(deletedEntry), - DFSUtil.string2Bytes(blockId)); - batch.delete(DFSUtil.string2Bytes(entry)); - }); - meta.getStore().writeBatch(batch); - // update count of pending deletion blocks in in-memory container status - containerData.decrPendingDeletionBlocks(succeedBlocks.size()); - - if (!succeedBlocks.isEmpty()) { - LOG.info("Container: {}, deleted blocks: {}, task elapsed time: {}ms", - containerData.getContainerID(), succeedBlocks.size(), - Time.monotonicNow() - startTime); - } - crr.addAll(succeedBlocks); - return crr; - } finally { - container.writeUnlock(); - } - } - - @Override - public int getPriority() { - return priority; - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java deleted file mode 100644 index 69d80425ab7..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.statemachine.background; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java deleted file mode 100644 index 8bbdec96695..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.hadoop.hdds.protocol.datanode.proto - .ContainerProtos.ContainerType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto.State; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.Iterator; -import java.util.Map; - -/** - * Control plane for container management in datanode. - */ -public class ContainerController { - - private final ContainerSet containerSet; - private final Map handlers; - - public ContainerController(final ContainerSet containerSet, - final Map handlers) { - this.containerSet = containerSet; - this.handlers = handlers; - } - - /** - * Returns the Container given a container id. - * - * @param containerId ID of the container - * @return Container - */ - public Container getContainer(final long containerId) { - return containerSet.getContainer(containerId); - } - - /** - * Marks the container for closing. Moves the container to CLOSING state. - * - * @param containerId Id of the container to update - * @throws IOException in case of exception - */ - public void markContainerForClose(final long containerId) - throws IOException { - Container container = containerSet.getContainer(containerId); - - if (container.getContainerState() == State.OPEN) { - getHandler(container).markContainerForClose(container); - } - } - - /** - * Marks the container as UNHEALTHY. - * - * @param containerId Id of the container to update - * @throws IOException in case of exception - */ - public void markContainerUnhealthy(final long containerId) - throws IOException { - Container container = containerSet.getContainer(containerId); - getHandler(container).markContainerUnhealthy(container); - } - - /** - * Returns the container report. - * - * @return ContainerReportsProto - * @throws IOException in case of exception - */ - public ContainerReportsProto getContainerReport() - throws IOException { - return containerSet.getContainerReport(); - } - - /** - * Quasi closes a container given its id. - * - * @param containerId Id of the container to quasi close - * @throws IOException in case of exception - */ - public void quasiCloseContainer(final long containerId) throws IOException { - final Container container = containerSet.getContainer(containerId); - getHandler(container).quasiCloseContainer(container); - } - - /** - * Closes a container given its Id. - * - * @param containerId Id of the container to close - * @throws IOException in case of exception - */ - public void closeContainer(final long containerId) throws IOException { - final Container container = containerSet.getContainer(containerId); - getHandler(container).closeContainer(container); - } - - public Container importContainer(final ContainerType type, - final long containerId, final long maxSize, final String originPipelineId, - final String originNodeId, final InputStream rawContainerStream, - final TarContainerPacker packer) - throws IOException { - return handlers.get(type).importContainer(containerId, maxSize, - originPipelineId, originNodeId, rawContainerStream, packer); - } - - public void exportContainer(final ContainerType type, - final long containerId, final OutputStream outputStream, - final TarContainerPacker packer) throws IOException { - handlers.get(type).exportContainer( - containerSet.getContainer(containerId), outputStream, packer); - } - - /** - * Deletes a container given its Id. - * @param containerId Id of the container to be deleted - * @param force if this is set to true, we delete container without checking - * state of the container. - */ - public void deleteContainer(final long containerId, boolean force) - throws IOException { - final Container container = containerSet.getContainer(containerId); - if (container != null) { - getHandler(container).deleteContainer(container, force); - } - } - - /** - * Given a container, returns its handler instance. - * - * @param container Container - * @return handler of the container - */ - private Handler getHandler(final Container container) { - return handlers.get(container.getContainerType()); - } - - public Iterator> getContainers() { - return containerSet.getContainerIterator(); - } - - /** - * Return an iterator of containers which are associated with the specified - * volume. - * - * @param volume the HDDS volume which should be used to filter containers - * @return {@literal Iterator} - */ - public Iterator> getContainers(HddsVolume volume) { - return containerSet.getContainerIterator(volume); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java deleted file mode 100644 index 1141951dcc0..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java +++ /dev/null @@ -1,178 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; - -import java.io.IOException; -import java.util.Iterator; -import java.util.concurrent.TimeUnit; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdfs.util.Canceler; -import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * VolumeScanner scans a single volume. Each VolumeScanner has its own thread. - *

They are all managed by the DataNode's BlockScanner. - */ -public class ContainerDataScanner extends Thread { - public static final Logger LOG = - LoggerFactory.getLogger(ContainerDataScanner.class); - - /** - * The volume that we're scanning. - */ - private final HddsVolume volume; - private final ContainerController controller; - private final DataTransferThrottler throttler; - private final Canceler canceler; - private final ContainerDataScrubberMetrics metrics; - private final long dataScanInterval; - - /** - * True if the thread is stopping.

- * Protected by this object's lock. - */ - private volatile boolean stopping = false; - - - public ContainerDataScanner(ContainerScrubberConfiguration conf, - ContainerController controller, - HddsVolume volume) { - this.controller = controller; - this.volume = volume; - dataScanInterval = conf.getDataScanInterval(); - throttler = new HddsDataTransferThrottler(conf.getBandwidthPerVolume()); - canceler = new Canceler(); - metrics = ContainerDataScrubberMetrics.create(volume.toString()); - setName("ContainerDataScanner(" + volume + ")"); - setDaemon(true); - } - - @Override - public void run() { - if (LOG.isTraceEnabled()) { - LOG.trace("{}: thread starting.", this); - } - try { - while (!stopping) { - runIteration(); - metrics.resetNumContainersScanned(); - metrics.resetNumUnhealthyContainers(); - } - LOG.info("{} exiting.", this); - } catch (Throwable e) { - LOG.error("{} exiting because of exception ", this, e); - } finally { - if (metrics != null) { - metrics.unregister(); - } - } - } - - @VisibleForTesting - public void runIteration() { - long startTime = System.nanoTime(); - Iterator> itr = controller.getContainers(volume); - while (!stopping && itr.hasNext()) { - Container c = itr.next(); - if (c.shouldScanData()) { - try { - if (!c.scanData(throttler, canceler)) { - metrics.incNumUnHealthyContainers(); - controller.markContainerUnhealthy( - c.getContainerData().getContainerID()); - } - } catch (IOException ex) { - long containerId = c.getContainerData().getContainerID(); - LOG.warn("Unexpected exception while scanning container " - + containerId, ex); - } finally { - metrics.incNumContainersScanned(); - } - } - } - long totalDuration = System.nanoTime() - startTime; - if (!stopping) { - if (metrics.getNumContainersScanned() > 0) { - metrics.incNumScanIterations(); - LOG.info("Completed an iteration of container data scrubber in" + - " {} minutes." + - " Number of iterations (since the data-node restart) : {}" + - ", Number of containers scanned in this iteration : {}" + - ", Number of unhealthy containers found in this iteration : {}", - TimeUnit.NANOSECONDS.toMinutes(totalDuration), - metrics.getNumScanIterations(), - metrics.getNumContainersScanned(), - metrics.getNumUnHealthyContainers()); - } - long elapsedMillis = TimeUnit.NANOSECONDS.toMillis(totalDuration); - long remainingSleep = dataScanInterval - elapsedMillis; - if (remainingSleep > 0) { - try { - Thread.sleep(remainingSleep); - } catch (InterruptedException ignored) { - } - } - } - } - - public synchronized void shutdown() { - this.stopping = true; - this.canceler.cancel("ContainerDataScanner("+volume+") is shutting down"); - this.interrupt(); - try { - this.join(); - } catch (InterruptedException ex) { - LOG.warn("Unexpected exception while stopping data scanner for volume " - + volume, ex); - } - } - - @VisibleForTesting - public ContainerDataScrubberMetrics getMetrics() { - return metrics; - } - - @Override - public String toString() { - return "ContainerDataScanner(" + volume + - ", " + volume.getStorageID() + ")"; - } - - private class HddsDataTransferThrottler extends DataTransferThrottler { - HddsDataTransferThrottler(long bandwidthPerSec) { - super(bandwidthPerSec); - } - - @Override - public synchronized void throttle(long numOfBytes) { - ContainerDataScanner.this.metrics.incNumBytesScanned(numOfBytes); - super.throttle(numOfBytes); - } - - @Override - public synchronized void throttle(long numOfBytes, Canceler c) { - ContainerDataScanner.this.metrics.incNumBytesScanned(numOfBytes); - super.throttle(numOfBytes, c); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java deleted file mode 100644 index 3cf4f588322..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterInt; -import org.apache.hadoop.metrics2.lib.MutableGaugeInt; -import org.apache.hadoop.metrics2.lib.MutableRate; - -import java.util.concurrent.ThreadLocalRandom; - -/** - * This class captures the container data scrubber metrics on the data-node. - **/ -@InterfaceAudience.Private -@Metrics(about="DataNode container data scrubber metrics", context="dfs") -public final class ContainerDataScrubberMetrics { - private final String name; - private final MetricsSystem ms; - @Metric("number of containers scanned in the current iteration") - private MutableGaugeInt numContainersScanned; - @Metric("number of unhealthy containers found in the current iteration") - private MutableGaugeInt numUnHealthyContainers; - @Metric("number of iterations of scanner completed since the restart") - private MutableCounterInt numScanIterations; - @Metric("disk bandwidth used by the container data scrubber per volume") - private MutableRate numBytesScanned; - - public int getNumContainersScanned() { - return numContainersScanned.value(); - } - - public void incNumContainersScanned() { - numContainersScanned.incr(); - } - - public void resetNumContainersScanned() { - numContainersScanned.decr(getNumContainersScanned()); - } - - public int getNumUnHealthyContainers() { - return numUnHealthyContainers.value(); - } - - public void incNumUnHealthyContainers() { - numUnHealthyContainers.incr(); - } - - public void resetNumUnhealthyContainers() { - numUnHealthyContainers.decr(getNumUnHealthyContainers()); - } - - public int getNumScanIterations() { - return numScanIterations.value(); - } - - public void incNumScanIterations() { - numScanIterations.incr(); - } - - public double getNumBytesScannedMean() { - return numBytesScanned.lastStat().mean(); - } - - public long getNumBytesScannedSampleCount() { - return numBytesScanned.lastStat().numSamples(); - } - - public double getNumBytesScannedStdDev() { - return numBytesScanned.lastStat().stddev(); - } - - public void incNumBytesScanned(long bytes) { - numBytesScanned.add(bytes); - } - - public void unregister() { - ms.unregisterSource(name); - } - - private ContainerDataScrubberMetrics(String name, MetricsSystem ms) { - this.name = name; - this.ms = ms; - } - - public static ContainerDataScrubberMetrics create(final String volumeName) { - MetricsSystem ms = DefaultMetricsSystem.instance(); - String name = "ContainerDataScrubberMetrics-"+ (volumeName.isEmpty() - ? "UndefinedDataNodeVolume"+ ThreadLocalRandom.current().nextInt() - : volumeName.replace(':', '-')); - - return ms.register(name, null, new ContainerDataScrubberMetrics(name, ms)); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java deleted file mode 100644 index 46aaf73a12d..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Iterator; -import java.util.concurrent.TimeUnit; - -/** - * This class is responsible to perform metadata verification of the - * containers. - */ -public class ContainerMetadataScanner extends Thread { - public static final Logger LOG = - LoggerFactory.getLogger(ContainerMetadataScanner.class); - - private final ContainerController controller; - private final long metadataScanInterval; - private final ContainerMetadataScrubberMetrics metrics; - /** - * True if the thread is stopping.

- * Protected by this object's lock. - */ - private boolean stopping = false; - - public ContainerMetadataScanner(ContainerScrubberConfiguration conf, - ContainerController controller) { - this.controller = controller; - this.metadataScanInterval = conf.getMetadataScanInterval(); - this.metrics = ContainerMetadataScrubberMetrics.create(); - setName("ContainerMetadataScanner"); - setDaemon(true); - } - - @Override - public void run() { - /* - * the outer daemon loop exits on shutdown() - */ - LOG.info("Background ContainerMetadataScanner starting up"); - while (!stopping) { - runIteration(); - if(!stopping) { - metrics.resetNumUnhealthyContainers(); - metrics.resetNumContainersScanned(); - } - } - } - - @VisibleForTesting - void runIteration() { - long start = System.nanoTime(); - Iterator> containerIt = controller.getContainers(); - while (!stopping && containerIt.hasNext()) { - Container container = containerIt.next(); - try { - scrub(container); - } catch (IOException e) { - LOG.info("Unexpected error while scrubbing container {}", - container.getContainerData().getContainerID()); - } finally { - metrics.incNumContainersScanned(); - } - } - long interval = System.nanoTime()-start; - if (!stopping) { - metrics.incNumScanIterations(); - LOG.info("Completed an iteration of container metadata scrubber in" + - " {} minutes." + - " Number of iterations (since the data-node restart) : {}" + - ", Number of containers scanned in this iteration : {}" + - ", Number of unhealthy containers found in this iteration : {}", - TimeUnit.NANOSECONDS.toMinutes(interval), - metrics.getNumScanIterations(), - metrics.getNumContainersScanned(), - metrics.getNumUnHealthyContainers()); - // ensure to delay next metadata scan with respect to user config. - if (interval < metadataScanInterval) { - try { - Thread.sleep(metadataScanInterval - interval); - } catch (InterruptedException e) { - LOG.info("Background ContainerMetadataScanner interrupted." + - " Going to exit"); - } - } - } - } - - @VisibleForTesting - public void scrub(Container container) throws IOException { - if (!container.scanMetaData()) { - metrics.incNumUnHealthyContainers(); - controller.markContainerUnhealthy( - container.getContainerData().getContainerID()); - } - } - - @VisibleForTesting - public ContainerMetadataScrubberMetrics getMetrics() { - return metrics; - } - - public synchronized void shutdown() { - this.stopping = true; - this.interrupt(); - try { - this.join(); - } catch (InterruptedException ex) { - LOG.warn("Unexpected exception while stopping metadata scanner.", ex); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java deleted file mode 100644 index 3effc351b00..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterInt; -import org.apache.hadoop.metrics2.lib.MutableGaugeInt; - -/** - * This class captures the container meta-data scrubber metrics on the - * data-node. - **/ -@InterfaceAudience.Private -@Metrics(about="DataNode container data scrubber metrics", context="dfs") -public final class ContainerMetadataScrubberMetrics { - private final String name; - private final MetricsSystem ms; - @Metric("number of containers scanned in the current iteration") - private MutableGaugeInt numContainersScanned; - @Metric("number of unhealthy containers found in the current iteration") - private MutableGaugeInt numUnHealthyContainers; - @Metric("number of iterations of scanner completed since the restart") - private MutableCounterInt numScanIterations; - - public int getNumContainersScanned() { - return numContainersScanned.value(); - } - - public void incNumContainersScanned() { - numContainersScanned.incr(); - } - - public void resetNumContainersScanned() { - numContainersScanned.decr(getNumContainersScanned()); - } - - public int getNumUnHealthyContainers() { - return numUnHealthyContainers.value(); - } - - public void incNumUnHealthyContainers() { - numUnHealthyContainers.incr(); - } - - public void resetNumUnhealthyContainers() { - numUnHealthyContainers.decr(getNumUnHealthyContainers()); - } - - public int getNumScanIterations() { - return numScanIterations.value(); - } - - public void incNumScanIterations() { - numScanIterations.incr(); - } - - public void unregister() { - ms.unregisterSource(name); - } - - private ContainerMetadataScrubberMetrics(String name, MetricsSystem ms) { - this.name = name; - this.ms = ms; - } - - public static ContainerMetadataScrubberMetrics create() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - String name = "ContainerMetadataScrubberMetrics"; - return ms.register(name, null, - new ContainerMetadataScrubberMetrics(name, ms)); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java deleted file mode 100644 index 621da70735d..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.ozoneimpl; - -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.Storage; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.FileFilter; -import java.io.IOException; -import java.util.List; - -/** - * Class used to read .container files from Volume and build container map. - * - * Layout of the container directory on disk is as follows: - * - *

../hdds/VERSION - *

{@literal ../hdds/<>/current/<>/</metadata/<>.container} - *

{@literal ../hdds/<>/current/<>/</<>} - *

- * Some ContainerTypes will have extra metadata other than the .container - * file. For example, KeyValueContainer will have a .db file. This .db file - * will also be stored in the metadata folder along with the .container file. - *

- * {@literal ../hdds/<>/current/<>/</metadata/<>.db} - *

- * Note that the {@literal <>} is dependent on the ContainerType. - * For KeyValueContainers, the data is stored in a "chunks" folder. As such, - * the {@literal <>} layout for KeyValueContainers is: - *

{@literal ../hdds/<>/current/<>/</chunks/<>} - * - */ -public class ContainerReader implements Runnable { - - private static final Logger LOG = LoggerFactory.getLogger( - ContainerReader.class); - private HddsVolume hddsVolume; - private final ContainerSet containerSet; - private final OzoneConfiguration config; - private final File hddsVolumeDir; - private final VolumeSet volumeSet; - - ContainerReader(VolumeSet volSet, HddsVolume volume, ContainerSet cset, - OzoneConfiguration conf) { - Preconditions.checkNotNull(volume); - this.hddsVolume = volume; - this.hddsVolumeDir = hddsVolume.getHddsRootDir(); - this.containerSet = cset; - this.config = conf; - this.volumeSet = volSet; - } - - @Override - public void run() { - try { - readVolume(hddsVolumeDir); - } catch (RuntimeException ex) { - LOG.error("Caught a Run time exception during reading container files" + - " from Volume {} {}", hddsVolumeDir, ex); - } - } - - public void readVolume(File hddsVolumeRootDir) { - Preconditions.checkNotNull(hddsVolumeRootDir, "hddsVolumeRootDir" + - "cannot be null"); - - //filtering scm directory - File[] scmDir = hddsVolumeRootDir.listFiles(new FileFilter() { - @Override - public boolean accept(File pathname) { - return pathname.isDirectory(); - } - }); - - if (scmDir == null) { - LOG.error("IO error for the volume {}, skipped loading", - hddsVolumeRootDir); - volumeSet.failVolume(hddsVolumeRootDir.getPath()); - return; - } - - if (scmDir.length > 1) { - LOG.error("Volume {} is in Inconsistent state", hddsVolumeRootDir); - volumeSet.failVolume(hddsVolumeRootDir.getPath()); - return; - } - - for (File scmLoc : scmDir) { - File currentDir = new File(scmLoc, Storage.STORAGE_DIR_CURRENT); - File[] containerTopDirs = currentDir.listFiles(); - if (containerTopDirs != null) { - for (File containerTopDir : containerTopDirs) { - if (containerTopDir.isDirectory()) { - File[] containerDirs = containerTopDir.listFiles(); - if (containerDirs != null) { - for (File containerDir : containerDirs) { - File containerFile = ContainerUtils.getContainerFile( - containerDir); - long containerID = ContainerUtils.getContainerID(containerDir); - if (containerFile.exists()) { - verifyContainerFile(containerID, containerFile); - } else { - LOG.error("Missing .container file for ContainerID: {}", - containerDir.getName()); - } - } - } - } - } - } - } - } - - private void verifyContainerFile(long containerID, File containerFile) { - try { - ContainerData containerData = ContainerDataYaml.readContainerFile( - containerFile); - if (containerID != containerData.getContainerID()) { - LOG.error("Invalid ContainerID in file {}. " + - "Skipping loading of this container.", containerFile); - return; - } - verifyAndFixupContainerData(containerData); - } catch (IOException ex) { - LOG.error("Failed to parse ContainerFile for ContainerID: {}", - containerID, ex); - } - } - - /** - * verify ContainerData loaded from disk and fix-up stale members. - * Specifically blockCommitSequenceId, delete related metadata - * and bytesUsed - * @param containerData - * @throws IOException - */ - public void verifyAndFixupContainerData(ContainerData containerData) - throws IOException { - switch (containerData.getContainerType()) { - case KeyValueContainer: - if (containerData instanceof KeyValueContainerData) { - KeyValueContainerData kvContainerData = (KeyValueContainerData) - containerData; - containerData.setVolume(hddsVolume); - - KeyValueContainerUtil.parseKVContainerData(kvContainerData, config); - KeyValueContainer kvContainer = new KeyValueContainer( - kvContainerData, config); - try(ReferenceCountedDB containerDB = BlockUtils.getDB(kvContainerData, - config)) { - MetadataKeyFilters.KeyPrefixFilter filter = - new MetadataKeyFilters.KeyPrefixFilter() - .addFilter(OzoneConsts.DELETING_KEY_PREFIX); - int numPendingDeletionBlocks = - containerDB.getStore().getSequentialRangeKVs(null, - Integer.MAX_VALUE, filter) - .size(); - kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks); - byte[] delTxnId = containerDB.getStore().get( - DFSUtil.string2Bytes(OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX)); - if (delTxnId != null) { - kvContainerData - .updateDeleteTransactionId(Longs.fromByteArray(delTxnId)); - } - // sets the BlockCommitSequenceId. - byte[] bcsId = containerDB.getStore().get(DFSUtil.string2Bytes( - OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX)); - if (bcsId != null) { - kvContainerData - .updateBlockCommitSequenceId(Longs.fromByteArray(bcsId)); - } - if (kvContainer.getContainerState() - == ContainerProtos.ContainerDataProto.State.OPEN) { - // commitSpace for Open Containers relies on usedBytes - initializeUsedBytes(kvContainer); - } - containerSet.addContainer(kvContainer); - } - } else { - throw new StorageContainerException("Container File is corrupted. " + - "ContainerType is KeyValueContainer but cast to " + - "KeyValueContainerData failed. ", - ContainerProtos.Result.CONTAINER_METADATA_ERROR); - } - break; - default: - throw new StorageContainerException("Unrecognized ContainerType " + - containerData.getContainerType(), - ContainerProtos.Result.UNKNOWN_CONTAINER_TYPE); - } - } - - private void initializeUsedBytes(KeyValueContainer container) - throws IOException { - try (KeyValueBlockIterator blockIter = new KeyValueBlockIterator( - container.getContainerData().getContainerID(), - new File(container.getContainerData().getContainerPath()))) { - long usedBytes = 0; - - while (blockIter.hasNext()) { - BlockData block = blockIter.nextBlock(); - long blockLen = 0; - - List chunkInfoList = block.getChunks(); - for (ContainerProtos.ChunkInfo chunk : chunkInfoList) { - ChunkInfo info = ChunkInfo.getFromProtoBuf(chunk); - blockLen += info.getLen(); - } - - usedBytes += blockLen; - } - - container.getContainerData().setBytesUsed(usedBytes); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java deleted file mode 100644 index 454ce84310a..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.hadoop.hdds.conf.Config; -import org.apache.hadoop.hdds.conf.ConfigGroup; -import org.apache.hadoop.hdds.conf.ConfigTag; -import org.apache.hadoop.hdds.conf.ConfigType; - -/** - * This class defines configuration parameters for container scrubber. - **/ -@ConfigGroup(prefix = "hdds.containerscrub") -public class ContainerScrubberConfiguration { - private boolean enabled; - private long metadataScanInterval; - private long dataScanInterval; - private long bandwidthPerVolume; - - @Config(key = "enabled", - type = ConfigType.BOOLEAN, - defaultValue = "false", - tags = {ConfigTag.STORAGE}, - description = "Config parameter to enable container scrubber.") - public void setEnabled(boolean enabled) { - this.enabled = enabled; - } - - public boolean isEnabled() { - return enabled; - } - - @Config(key = "metadata.scan.interval", - type = ConfigType.TIME, - defaultValue = "3h", - tags = {ConfigTag.STORAGE}, - description = "Config parameter define time interval in milliseconds" + - " between two metadata scans by container scrubber.") - public void setMetadataScanInterval(long metadataScanInterval) { - this.metadataScanInterval = metadataScanInterval; - } - - public long getMetadataScanInterval() { - return metadataScanInterval; - } - - @Config(key = "data.scan.interval", - type = ConfigType.TIME, - defaultValue = "1m", - tags = { ConfigTag.STORAGE }, - description = "Minimum time interval between two iterations of container" - + " data scanning. If an iteration takes less time than this, the" - + " scanner will wait before starting the next iteration." - ) - public void setDataScanInterval(long dataScanInterval) { - this.dataScanInterval = dataScanInterval; - } - - public long getDataScanInterval() { - return dataScanInterval; - } - - @Config(key = "volume.bytes.per.second", - type = ConfigType.LONG, - defaultValue = "1048576", - tags = {ConfigTag.STORAGE}, - description = "Config parameter to throttle I/O bandwidth used" - + " by scrubber per volume.") - public void setBandwidthPerVolume(long bandwidthPerVolume) { - this.bandwidthPerVolume = bandwidthPerVolume; - } - - public long getBandwidthPerVolume() { - return bandwidthPerVolume; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java deleted file mode 100644 index a026f0e8757..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.ozoneimpl; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Maps; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto - .ContainerProtos.ContainerType; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; - -import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService; -import org.apache.hadoop.ozone.container.replication.GrpcReplicationService; -import org.apache.hadoop.ozone.container.replication - .OnDemandContainerReplicationSource; -import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.*; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.*; - -/** - * Ozone main class sets up the network servers and initializes the container - * layer. - */ -public class OzoneContainer { - - private static final Logger LOG = LoggerFactory.getLogger( - OzoneContainer.class); - - private final HddsDispatcher hddsDispatcher; - private final Map handlers; - private final OzoneConfiguration config; - private final VolumeSet volumeSet; - private final ContainerSet containerSet; - private final XceiverServerSpi writeChannel; - private final XceiverServerSpi readChannel; - private final ContainerController controller; - private ContainerMetadataScanner metadataScanner; - private List dataScanners; - private final BlockDeletingService blockDeletingService; - - /** - * Construct OzoneContainer object. - * @param datanodeDetails - * @param conf - * @param certClient - * @throws DiskOutOfSpaceException - * @throws IOException - */ - public OzoneContainer(DatanodeDetails datanodeDetails, OzoneConfiguration - conf, StateContext context, CertificateClient certClient) - throws IOException { - this.config = conf; - this.volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf); - this.containerSet = new ContainerSet(); - this.metadataScanner = null; - - buildContainerSet(); - final ContainerMetrics metrics = ContainerMetrics.create(conf); - this.handlers = Maps.newHashMap(); - for (ContainerType containerType : ContainerType.values()) { - handlers.put(containerType, - Handler.getHandlerForContainerType( - containerType, conf, context, containerSet, volumeSet, metrics)); - } - this.hddsDispatcher = new HddsDispatcher(config, containerSet, volumeSet, - handlers, context, metrics); - - /* - * ContainerController is the control plane - * XceiverServerRatis is the write channel - * XceiverServerGrpc is the read channel - */ - this.controller = new ContainerController(containerSet, handlers); - this.writeChannel = XceiverServerRatis.newXceiverServerRatis( - datanodeDetails, config, hddsDispatcher, controller, certClient, - context); - this.readChannel = new XceiverServerGrpc( - datanodeDetails, config, hddsDispatcher, certClient, - createReplicationService()); - long svcInterval = config - .getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, - OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - long serviceTimeout = config - .getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, - OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - this.blockDeletingService = - new BlockDeletingService(this, svcInterval, serviceTimeout, - TimeUnit.MILLISECONDS, config); - } - - private GrpcReplicationService createReplicationService() { - return new GrpcReplicationService( - new OnDemandContainerReplicationSource(controller)); - } - - /** - * Build's container map. - */ - private void buildContainerSet() { - Iterator volumeSetIterator = volumeSet.getVolumesList() - .iterator(); - ArrayList volumeThreads = new ArrayList(); - - //TODO: diskchecker should be run before this, to see how disks are. - // And also handle disk failure tolerance need to be added - while (volumeSetIterator.hasNext()) { - HddsVolume volume = volumeSetIterator.next(); - Thread thread = new Thread(new ContainerReader(volumeSet, volume, - containerSet, config)); - thread.start(); - volumeThreads.add(thread); - } - - try { - for (int i = 0; i < volumeThreads.size(); i++) { - volumeThreads.get(i).join(); - } - } catch (InterruptedException ex) { - LOG.info("Volume Threads Interrupted exception", ex); - } - - } - - - /** - * Start background daemon thread for performing container integrity checks. - */ - private void startContainerScrub() { - ContainerScrubberConfiguration c = config.getObject( - ContainerScrubberConfiguration.class); - boolean enabled = c.isEnabled(); - - if (!enabled) { - LOG.info("Background container scanner has been disabled."); - } else { - if (this.metadataScanner == null) { - this.metadataScanner = new ContainerMetadataScanner(c, controller); - } - this.metadataScanner.start(); - - dataScanners = new ArrayList<>(); - for (HddsVolume v : volumeSet.getVolumesList()) { - ContainerDataScanner s = new ContainerDataScanner(c, controller, v); - s.start(); - dataScanners.add(s); - } - } - } - - /** - * Stop the scanner thread and wait for thread to die. - */ - private void stopContainerScrub() { - if (metadataScanner == null) { - return; - } - metadataScanner.shutdown(); - metadataScanner = null; - for (ContainerDataScanner s : dataScanners) { - s.shutdown(); - } - } - - /** - * Starts serving requests to ozone container. - * - * @throws IOException - */ - public void start(String scmId) throws IOException { - LOG.info("Attempting to start container services."); - startContainerScrub(); - writeChannel.start(); - readChannel.start(); - hddsDispatcher.init(); - hddsDispatcher.setScmId(scmId); - blockDeletingService.start(); - } - - /** - * Stop Container Service on the datanode. - */ - public void stop() { - //TODO: at end of container IO integration work. - LOG.info("Attempting to stop container services."); - stopContainerScrub(); - writeChannel.stop(); - readChannel.stop(); - this.handlers.values().forEach(Handler::stop); - hddsDispatcher.shutdown(); - volumeSet.shutdown(); - blockDeletingService.shutdown(); - ContainerMetrics.remove(); - } - - - @VisibleForTesting - public ContainerSet getContainerSet() { - return containerSet; - } - /** - * Returns container report. - * @return - container report. - */ - - public PipelineReportsProto getPipelineReport() { - PipelineReportsProto.Builder pipelineReportsProto = - PipelineReportsProto.newBuilder(); - pipelineReportsProto.addAllPipelineReport(writeChannel.getPipelineReport()); - return pipelineReportsProto.build(); - } - - public XceiverServerSpi getWriteChannel() { - return writeChannel; - } - - public XceiverServerSpi getReadChannel() { - return readChannel; - } - - public ContainerController getController() { - return controller; - } - - /** - * Returns node report of container storage usage. - */ - public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() - throws IOException { - return volumeSet.getNodeReport(); - } - - @VisibleForTesting - public ContainerDispatcher getDispatcher() { - return this.hddsDispatcher; - } - - public VolumeSet getVolumeSet() { - return volumeSet; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java deleted file mode 100644 index c99c038b244..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; -/** - Ozone main that calls into the container layer -**/ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerDownloader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerDownloader.java deleted file mode 100644 index 9511241fb5f..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerDownloader.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.io.Closeable; -import java.nio.file.Path; -import java.util.List; -import java.util.concurrent.CompletableFuture; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -/** - * Service to download container data from other datanodes. - *

- * The implementation of this interface should copy the raw container data in - * compressed form to working directory. - *

- * A smart implementation would use multiple sources to do parallel download. - */ -public interface ContainerDownloader extends Closeable { - - CompletableFuture getContainerDataFromReplicas(long containerId, - List sources); - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicationSource.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicationSource.java deleted file mode 100644 index 69582f799f8..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicationSource.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.io.IOException; -import java.io.OutputStream; - -/** - * Contract to prepare provide the container in binary form.. - *

- * Prepare will be called when container is closed. An implementation could - * precache any binary representation of a container and store the pre packede - * images. - */ -public interface ContainerReplicationSource { - - /** - * Prepare for the replication. - * - * @param containerId The name of the container the package. - */ - void prepare(long containerId); - - /** - * Copy the container data to an output stream. - * - * @param containerId Container to replicate - * @param destination The destination stream to copy all the container data. - * @throws IOException - */ - void copyData(long containerId, OutputStream destination) - throws IOException; - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicator.java deleted file mode 100644 index 827b9d69e8b..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicator.java +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -/** - * Service to do the real replication task. - * - * An implementation should download the container and im - */ -public interface ContainerReplicator { - void replicate(ReplicationTask task); -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerStreamingOutput.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerStreamingOutput.java deleted file mode 100644 index f7fd8a4957d..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerStreamingOutput.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.StreamingOutput; -import java.io.IOException; -import java.io.OutputStream; - -/** - * JAX-RS streaming output to return the binary container data. - */ -public class ContainerStreamingOutput implements StreamingOutput { - - private long containerId; - - private ContainerReplicationSource containerReplicationSource; - - public ContainerStreamingOutput(long containerId, - ContainerReplicationSource containerReplicationSource) { - this.containerId = containerId; - this.containerReplicationSource = containerReplicationSource; - } - - @Override - public void write(OutputStream outputStream) - throws IOException, WebApplicationException { - containerReplicationSource.copyData(containerId, outputStream); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/DownloadAndImportReplicator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/DownloadAndImportReplicator.java deleted file mode 100644 index eef01a13f2f..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/DownloadAndImportReplicator.java +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.io.FileInputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.List; -import java.util.concurrent.CompletableFuture; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.container.replication.ReplicationTask.Status; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Default replication implementation. - *

- * This class does the real job. Executes the download and import the container - * to the container set. - */ -public class DownloadAndImportReplicator implements ContainerReplicator { - - private static final Logger LOG = - LoggerFactory.getLogger(DownloadAndImportReplicator.class); - - private final ContainerSet containerSet; - - private final ContainerController controller; - - private final ContainerDownloader downloader; - - private final TarContainerPacker packer; - - public DownloadAndImportReplicator( - ContainerSet containerSet, - ContainerController controller, - ContainerDownloader downloader, - TarContainerPacker packer) { - this.containerSet = containerSet; - this.controller = controller; - this.downloader = downloader; - this.packer = packer; - } - - public void importContainer(long containerID, Path tarFilePath) { - try { - ContainerData originalContainerData; - try (FileInputStream tempContainerTarStream = new FileInputStream( - tarFilePath.toFile())) { - byte[] containerDescriptorYaml = - packer.unpackContainerDescriptor(tempContainerTarStream); - originalContainerData = ContainerDataYaml.readContainer( - containerDescriptorYaml); - } - - try (FileInputStream tempContainerTarStream = new FileInputStream( - tarFilePath.toFile())) { - - Container container = controller.importContainer( - originalContainerData.getContainerType(), - containerID, - originalContainerData.getMaxSize(), - originalContainerData.getOriginPipelineId(), - originalContainerData.getOriginNodeId(), - tempContainerTarStream, - packer); - - containerSet.addContainer(container); - } - - } catch (Exception e) { - LOG.error( - "Can't import the downloaded container data id=" + containerID, - e); - } finally { - try { - Files.delete(tarFilePath); - } catch (Exception ex) { - LOG.error("Got exception while deleting downloaded container file: " - + tarFilePath.toAbsolutePath().toString(), ex); - } - } - } - - @Override - public void replicate(ReplicationTask task) { - long containerID = task.getContainerId(); - - List sourceDatanodes = task.getSources(); - - LOG.info("Starting replication of container {} from {}", containerID, - sourceDatanodes); - - CompletableFuture tempTarFile = downloader - .getContainerDataFromReplicas(containerID, - sourceDatanodes); - - try { - //wait for the download. This thread pool is limiting the paralell - //downloads, so it's ok to block here and wait for the full download. - Path path = tempTarFile.get(); - LOG.info("Container {} is downloaded, starting to import.", - containerID); - importContainer(containerID, path); - LOG.info("Container {} is replicated successfully", containerID); - task.setStatus(Status.DONE); - } catch (Exception e) { - LOG.error("Container replication was unsuccessful .", e); - task.setStatus(Status.FAILED); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java deleted file mode 100644 index 8494a152744..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.replication; - -import java.io.BufferedOutputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .CopyContainerRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .CopyContainerResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto - .IntraDatanodeProtocolServiceGrpc; -import org.apache.hadoop.hdds.protocol.datanode.proto - .IntraDatanodeProtocolServiceGrpc.IntraDatanodeProtocolServiceStub; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.ratis.thirdparty.io.grpc.ManagedChannel; -import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder; -import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Client to read container data from Grpc. - */ -public class GrpcReplicationClient { - - private static final Logger LOG = - LoggerFactory.getLogger(GrpcReplicationClient.class); - - private final ManagedChannel channel; - - private final IntraDatanodeProtocolServiceStub client; - - private final Path workingDirectory; - - public GrpcReplicationClient(String host, - int port, Path workingDir) { - - channel = NettyChannelBuilder.forAddress(host, port) - .usePlaintext() - .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) - .build(); - client = IntraDatanodeProtocolServiceGrpc.newStub(channel); - this.workingDirectory = workingDir; - - } - - public CompletableFuture download(long containerId) { - CopyContainerRequestProto request = - CopyContainerRequestProto.newBuilder() - .setContainerID(containerId) - .setLen(-1) - .setReadOffset(0) - .build(); - - CompletableFuture response = new CompletableFuture<>(); - - Path destinationPath = - getWorkingDirectory().resolve("container-" + containerId + ".tar.gz"); - - client.download(request, - new StreamDownloader(containerId, response, destinationPath)); - return response; - } - - private Path getWorkingDirectory() { - return workingDirectory; - } - - public void shutdown() { - channel.shutdown(); - try { - channel.awaitTermination(5, TimeUnit.SECONDS); - } catch (Exception e) { - LOG.error("failed to shutdown replication channel", e); - } - } - - /** - * Grpc stream observer to ComletableFuture adapter. - */ - public static class StreamDownloader - implements StreamObserver { - - private final CompletableFuture response; - - private final long containerId; - - private BufferedOutputStream stream; - - private Path outputPath; - - public StreamDownloader(long containerId, CompletableFuture response, - Path outputPath) { - this.response = response; - this.containerId = containerId; - this.outputPath = outputPath; - try { - Preconditions.checkNotNull(outputPath, "Output path cannot be null"); - Path parentPath = Preconditions.checkNotNull(outputPath.getParent()); - Files.createDirectories(parentPath); - stream = - new BufferedOutputStream(new FileOutputStream(outputPath.toFile())); - } catch (IOException e) { - throw new RuntimeException("OutputPath can't be used: " + outputPath, - e); - } - - } - - @Override - public void onNext(CopyContainerResponseProto chunk) { - try { - stream.write(chunk.getData().toByteArray()); - } catch (IOException e) { - response.completeExceptionally(e); - } - } - - @Override - public void onError(Throwable throwable) { - try { - stream.close(); - LOG.error("Container download was unsuccessfull", throwable); - try { - Files.delete(outputPath); - } catch (IOException ex) { - LOG.error( - "Error happened during the download but can't delete the " - + "temporary destination.", ex); - } - response.completeExceptionally(throwable); - } catch (IOException e) { - response.completeExceptionally(e); - } - } - - @Override - public void onCompleted() { - try { - stream.close(); - LOG.info("Container is downloaded to {}", outputPath); - response.complete(outputPath); - } catch (IOException e) { - response.completeExceptionally(e); - } - - } - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java deleted file mode 100644 index 7919e549531..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.replication; - -import java.io.ByteArrayOutputStream; -import java.io.Closeable; -import java.io.IOException; -import java.io.OutputStream; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .CopyContainerRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .CopyContainerResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto - .IntraDatanodeProtocolServiceGrpc; - -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Service to make containers available for replication. - */ -public class GrpcReplicationService extends - IntraDatanodeProtocolServiceGrpc.IntraDatanodeProtocolServiceImplBase { - - private static final Logger LOG = - LoggerFactory.getLogger(GrpcReplicationService.class); - - private final ContainerReplicationSource containerReplicationSource; - - public GrpcReplicationService( - ContainerReplicationSource containerReplicationSource) { - this.containerReplicationSource = containerReplicationSource; - } - - @Override - public void download(CopyContainerRequestProto request, - StreamObserver responseObserver) { - LOG.info("Streaming container data ({}) to other datanode", - request.getContainerID()); - try { - GrpcOutputStream outputStream = - new GrpcOutputStream(responseObserver, request.getContainerID()); - containerReplicationSource - .copyData(request.getContainerID(), outputStream); - } catch (IOException e) { - LOG.error("Can't stream the container data", e); - responseObserver.onError(e); - } - } - - private static class GrpcOutputStream extends OutputStream - implements Closeable { - - private static final int BUFFER_SIZE_IN_BYTES = 1024 * 1024; - - private final StreamObserver responseObserver; - - private final ByteArrayOutputStream buffer = new ByteArrayOutputStream(); - - private long containerId; - - private int readOffset = 0; - - private int writtenBytes; - - GrpcOutputStream( - StreamObserver responseObserver, - long containerId) { - this.responseObserver = responseObserver; - this.containerId = containerId; - } - - @Override - public void write(int b) throws IOException { - try { - buffer.write(b); - if (buffer.size() > BUFFER_SIZE_IN_BYTES) { - flushBuffer(false); - } - } catch (Exception ex) { - responseObserver.onError(ex); - } - } - - private void flushBuffer(boolean eof) { - if (buffer.size() > 0) { - CopyContainerResponseProto response = - CopyContainerResponseProto.newBuilder() - .setContainerID(containerId) - .setData(ByteString.copyFrom(buffer.toByteArray())) - .setEof(eof) - .setReadOffset(readOffset) - .setLen(buffer.size()) - .build(); - responseObserver.onNext(response); - readOffset += buffer.size(); - writtenBytes += buffer.size(); - buffer.reset(); - } - } - - @Override - public void close() throws IOException { - flushBuffer(true); - LOG.info("{} bytes written to the rpc stream from container {}", - writtenBytes, containerId); - responseObserver.onCompleted(); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/OnDemandContainerReplicationSource.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/OnDemandContainerReplicationSource.java deleted file mode 100644 index d318ffa257f..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/OnDemandContainerReplicationSource.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.io.IOException; -import java.io.OutputStream; - -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A naive implementation of the replication source which creates a tar file - * on-demand without pre-create the compressed archives. - */ -public class OnDemandContainerReplicationSource - implements ContainerReplicationSource { - - private static final Logger LOG = - LoggerFactory.getLogger(ContainerReplicationSource.class); - - private ContainerController controller; - - private TarContainerPacker packer = new TarContainerPacker(); - - public OnDemandContainerReplicationSource( - ContainerController controller) { - this.controller = controller; - } - - @Override - public void prepare(long containerId) { - - } - - @Override - public void copyData(long containerId, OutputStream destination) - throws IOException { - - Container container = controller.getContainer(containerId); - - Preconditions.checkNotNull( - container, "Container is not found " + containerId); - - controller.exportContainer( - container.getContainerType(), containerId, destination, packer); - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java deleted file mode 100644 index 7a07c4df71e..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentHashMap.KeySetView; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.replication.ReplicationTask.Status; - -import com.google.common.annotations.VisibleForTesting; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Single point to schedule the downloading tasks based on priorities. - */ -public class ReplicationSupervisor { - - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationSupervisor.class); - - private final ContainerSet containerSet; - private final ContainerReplicator replicator; - private final ThreadPoolExecutor executor; - private final AtomicLong replicationCounter; - - /** - * A set of container IDs that are currently being downloaded - * or queued for download. Tracked so we don't schedule > 1 - * concurrent download for the same container. - */ - private final KeySetView containersInFlight; - - public ReplicationSupervisor( - ContainerSet containerSet, - ContainerReplicator replicator, int poolSize) { - this.containerSet = containerSet; - this.replicator = replicator; - this.containersInFlight = ConcurrentHashMap.newKeySet(); - replicationCounter = new AtomicLong(); - this.executor = new ThreadPoolExecutor( - 0, poolSize, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue<>(), - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("ContainerReplicationThread-%d") - .build()); - } - - /** - * Queue an asynchronous download of the given container. - */ - public void addTask(ReplicationTask task) { - if (containersInFlight.add(task.getContainerId())) { - executor.submit(new TaskRunner(task)); - } - } - - public void stop() { - try { - executor.shutdown(); - if (!executor.awaitTermination(3, TimeUnit.SECONDS)) { - executor.shutdownNow(); - } - } catch (InterruptedException ie) { - // Ignore, we don't really care about the failure. - Thread.currentThread().interrupt(); - } - } - - /** - * Get the number of containers currently being downloaded - * or scheduled for download. - * @return Count of in-flight replications. - */ - @VisibleForTesting - public int getInFlightReplications() { - return containersInFlight.size(); - } - - private final class TaskRunner implements Runnable { - private final ReplicationTask task; - - private TaskRunner(ReplicationTask task) { - this.task = task; - } - - @Override - public void run() { - try { - if (containerSet.getContainer(task.getContainerId()) != null) { - LOG.debug("Container {} has already been downloaded.", - task.getContainerId()); - return; - } - - task.setStatus(Status.DOWNLOADING); - replicator.replicate(task); - - if (task.getStatus() == Status.FAILED) { - LOG.error( - "Container {} can't be downloaded from any of the datanodes.", - task.getContainerId()); - } else if (task.getStatus() == Status.DONE) { - LOG.info("Container {} is replicated.", task.getContainerId()); - } - } finally { - containersInFlight.remove(task.getContainerId()); - replicationCounter.incrementAndGet(); - } - } - } - - public long getReplicationCounter() { - return replicationCounter.get(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java deleted file mode 100644 index 90198110b59..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; - -import java.time.Instant; -import java.util.List; -import java.util.Objects; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -/** - * The task to download a container from the sources. - */ -public class ReplicationTask { - - private volatile Status status = Status.QUEUED; - - private final long containerId; - - private List sources; - - private final Instant queued = Instant.now(); - - public ReplicationTask(long containerId, - List sources) { - this.containerId = containerId; - this.sources = sources; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ReplicationTask that = (ReplicationTask) o; - return containerId == that.containerId; - } - - @Override - public int hashCode() { - return Objects.hash(containerId); - } - - public long getContainerId() { - return containerId; - } - - public List getSources() { - return sources; - } - - public Status getStatus() { - return status; - } - - public void setStatus( - Status status) { - this.status = status; - } - - @Override - public String toString() { - return "ReplicationTask{" + - "status=" + status + - ", containerId=" + containerId + - ", sources=" + sources + - ", queued=" + queued + - '}'; - } - - public Instant getQueued() { - return queued; - } - - /** - * Status of the replication. - */ - public enum Status { - QUEUED, - DOWNLOADING, - FAILED, - DONE - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java deleted file mode 100644 index 37a44acf74c..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.replication; - -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.function.Function; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; -import org.apache.hadoop.ozone.OzoneConfigKeys; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Simple ContainerDownloaderImplementation to download the missing container - * from the first available datanode. - *

- * This is not the most effective implementation as it uses only one source - * for he container download. - */ -public class SimpleContainerDownloader implements ContainerDownloader { - - private static final Logger LOG = - LoggerFactory.getLogger(SimpleContainerDownloader.class); - - private final Path workingDirectory; - - public SimpleContainerDownloader(Configuration conf) { - - String workDirString = - conf.get(OzoneConfigKeys.OZONE_CONTAINER_COPY_WORKDIR); - - if (workDirString == null) { - workingDirectory = Paths.get(System.getProperty("java.io.tmpdir")) - .resolve("container-copy"); - } else { - workingDirectory = Paths.get(workDirString); - } - } - - @Override - public CompletableFuture getContainerDataFromReplicas(long containerId, - List sourceDatanodes) { - - CompletableFuture result = null; - for (DatanodeDetails datanode : sourceDatanodes) { - try { - - if (result == null) { - GrpcReplicationClient grpcReplicationClient = - new GrpcReplicationClient(datanode.getIpAddress(), - datanode.getPort(Name.STANDALONE).getValue(), - workingDirectory); - result = grpcReplicationClient.download(containerId); - } else { - result = result.thenApply(CompletableFuture::completedFuture) - .exceptionally(t -> { - LOG.error("Error on replicating container: " + containerId, t); - GrpcReplicationClient grpcReplicationClient = - new GrpcReplicationClient(datanode.getIpAddress(), - datanode.getPort(Name.STANDALONE).getValue(), - workingDirectory); - return grpcReplicationClient.download(containerId); - }).thenCompose(Function.identity()); - } - } catch (Exception ex) { - LOG.error(String.format( - "Container %s download from datanode %s was unsuccessful. " - + "Trying the next datanode", containerId, datanode), ex); - } - - } - return result; - - } - - @Override - public void close() { - // noop - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java deleted file mode 100644 index 38a853c72a0..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.replication; -/** - Classes to replicate container data between datanodes. -**/ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java deleted file mode 100644 index 1a510128398..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -/** - * Generic ozone specific classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java deleted file mode 100644 index 61bdb27f4cd..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; - -import java.io.IOException; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.security.KerberosInfo; - -/** - * The protocol spoken between datanodes and SCM. For specifics please the - * Protoc file that defines this protocol. - */ -@KerberosInfo( - serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -@InterfaceAudience.Private -public interface StorageContainerDatanodeProtocol { - - @SuppressWarnings("checkstyle:ConstantName") - /** - * Version 1: Initial version. - */ - long versionID = 1L; - - /** - * Returns SCM version. - * @return Version info. - */ - SCMVersionResponseProto getVersion(SCMVersionRequestProto versionRequest) - throws IOException; - - /** - * Used by data node to send a Heartbeat. - * @param heartbeat Heartbeat - * @return - SCMHeartbeatResponseProto - * @throws IOException - */ - SCMHeartbeatResponseProto sendHeartbeat(SCMHeartbeatRequestProto heartbeat) - throws IOException; - - /** - * Register Datanode. - * @param datanodeDetails - Datanode Details. - * @param nodeReport - Node Report. - * @param containerReportsRequestProto - Container Reports. - * @return SCM Command. - */ - SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetails, - NodeReportProto nodeReport, - ContainerReportsProto containerReportsRequestProto, - PipelineReportsProto pipelineReports) throws IOException; - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java deleted file mode 100644 index b5d75ef01cb..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; - -import java.util.List; - -/** - * The protocol spoken between datanodes and SCM. - * - * Please note that the full protocol spoken between a datanode and SCM is - * separated into 2 interfaces. One interface that deals with node state and - * another interface that deals with containers. - * - * This interface has functions that deals with the state of datanode. - */ -@InterfaceAudience.Private -public interface StorageContainerNodeProtocol { - /** - * Gets the version info from SCM. - * @param versionRequest - version Request. - * @return - returns SCM version info and other required information needed - * by datanode. - */ - VersionResponse getVersion(SCMVersionRequestProto versionRequest); - - /** - * Register the node if the node finds that it is not registered with any SCM. - * @param datanodeDetails DatanodeDetails - * @param nodeReport NodeReportProto - * @param pipelineReport PipelineReportsProto - * @return SCMHeartbeatResponseProto - */ - RegisteredCommand register(DatanodeDetails datanodeDetails, - NodeReportProto nodeReport, - PipelineReportsProto pipelineReport); - - /** - * Send heartbeat to indicate the datanode is alive and doing well. - * @param datanodeDetails - Datanode ID. - * @return SCMheartbeat response list - */ - List processHeartbeat(DatanodeDetails datanodeDetails); - - /** - * Check if node is registered or not. - * Return true if Node is registered and false otherwise. - * @param datanodeDetails - Datanode ID. - * @return true if Node is registered, false otherwise - */ - Boolean isNodeRegistered(DatanodeDetails datanodeDetails); - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java deleted file mode 100644 index 4d328d3d1e6..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java +++ /dev/null @@ -1,154 +0,0 @@ - -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.protocol; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; - -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * Version response class. - */ -public class VersionResponse { - private final int version; - private final Map values; - - /** - * Creates a version response class. - * @param version - * @param values - */ - public VersionResponse(int version, Map values) { - this.version = version; - this.values = values; - } - - /** - * Creates a version Response class. - * @param version - */ - public VersionResponse(int version) { - this.version = version; - this.values = new HashMap<>(); - } - - /** - * Returns a new Builder. - * @return - Builder. - */ - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Returns this class from protobuf message. - * @param response - SCMVersionResponseProto - * @return VersionResponse - */ - public static VersionResponse getFromProtobuf(SCMVersionResponseProto - response) { - return new VersionResponse(response.getSoftwareVersion(), - response.getKeysList().stream() - .collect(Collectors.toMap(KeyValue::getKey, - KeyValue::getValue))); - } - - /** - * Adds a value to version Response. - * @param key - String - * @param value - String - */ - public void put(String key, String value) { - if (this.values.containsKey(key)) { - throw new IllegalArgumentException("Duplicate key in version response"); - } - values.put(key, value); - } - - /** - * Return a protobuf message. - * @return SCMVersionResponseProto. - */ - public SCMVersionResponseProto getProtobufMessage() { - - List list = new LinkedList<>(); - for (Map.Entry entry : values.entrySet()) { - list.add(KeyValue.newBuilder().setKey(entry.getKey()). - setValue(entry.getValue()).build()); - } - return - SCMVersionResponseProto.newBuilder() - .setSoftwareVersion(this.version) - .addAllKeys(list).build(); - } - - public String getValue(String key) { - return this.values.get(key); - } - - /** - * Builder class. - */ - public static class Builder { - private int version; - private Map values; - - Builder() { - values = new HashMap<>(); - } - - /** - * Sets the version. - * @param ver - version - * @return Builder - */ - public Builder setVersion(int ver) { - this.version = ver; - return this; - } - - /** - * Adds a value to version Response. - * @param key - String - * @param value - String - */ - public Builder addValue(String key, String value) { - if (this.values.containsKey(key)) { - throw new IllegalArgumentException("Duplicate key in version response"); - } - values.put(key, value); - return this; - } - - /** - * Builds the version response. - * @return VersionResponse. - */ - public VersionResponse build() { - return new VersionResponse(this.version, this.values); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java deleted file mode 100644 index ded0464ef4b..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; - -/** - * Asks datanode to close a container. - */ -public class CloseContainerCommand - extends SCMCommand { - - private final PipelineID pipelineID; - private boolean force; - - public CloseContainerCommand(final long containerID, - final PipelineID pipelineID) { - this(containerID, pipelineID, false); - } - - public CloseContainerCommand(final long containerID, - final PipelineID pipelineID, boolean force) { - super(containerID); - this.pipelineID = pipelineID; - this.force = force; - } - - /** - * Returns the type of this command. - * - * @return Type - */ - @Override - public SCMCommandProto.Type getType() { - return SCMCommandProto.Type.closeContainerCommand; - } - - @Override - public CloseContainerCommandProto getProto() { - return CloseContainerCommandProto.newBuilder() - .setContainerID(getId()) - .setCmdId(getId()) - .setPipelineID(pipelineID.getProtobuf()) - .setForce(force) - .build(); - } - - public static CloseContainerCommand getFromProtobuf( - CloseContainerCommandProto closeContainerProto) { - Preconditions.checkNotNull(closeContainerProto); - return new CloseContainerCommand(closeContainerProto.getCmdId(), - PipelineID.getFromProtobuf(closeContainerProto.getPipelineID()), - closeContainerProto.getForce()); - } - - public long getContainerID() { - return getId(); - } - - public PipelineID getPipelineID() { - return pipelineID; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java deleted file mode 100644 index 66bf6230936..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandForDatanode.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import java.util.UUID; - -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload; - -/** - * Command for the datanode with the destination address. - */ -public class CommandForDatanode implements - IdentifiableEventPayload { - - private final UUID datanodeId; - - private final SCMCommand command; - - // TODO: Command for datanode should take DatanodeDetails as parameter. - public CommandForDatanode(UUID datanodeId, SCMCommand command) { - this.datanodeId = datanodeId; - this.command = command; - } - - public UUID getDatanodeId() { - return datanodeId; - } - - public SCMCommand getCommand() { - return command; - } - - public long getId() { - return command.getId(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandStatus.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandStatus.java deleted file mode 100644 index 4b3ce840dce..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CommandStatus.java +++ /dev/null @@ -1,164 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatus.Status; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; - -/** - * A class that is used to communicate status of datanode commands. - */ -public class CommandStatus { - - private SCMCommandProto.Type type; - private Long cmdId; - private Status status; - private String msg; - - CommandStatus(Type type, Long cmdId, Status status, String msg) { - this.type = type; - this.cmdId = cmdId; - this.status = status; - this.msg = msg; - } - - public Type getType() { - return type; - } - - public Long getCmdId() { - return cmdId; - } - - public Status getStatus() { - return status; - } - - public String getMsg() { - return msg; - } - - /** - * To allow change of status once commandStatus is initialized. - * - * @param status - */ - public void setStatus(Status status) { - this.status = status; - } - - public void setStatus(boolean cmdExecuted) { - setStatus(cmdExecuted ? Status.EXECUTED : Status.FAILED); - } - - /** - * Returns a CommandStatus from the protocol buffers. - * - * @param cmdStatusProto - protoBuf Message - * @return CommandStatus - */ - public CommandStatus getFromProtoBuf( - StorageContainerDatanodeProtocolProtos.CommandStatus cmdStatusProto) { - return CommandStatusBuilder.newBuilder() - .setCmdId(cmdStatusProto.getCmdId()) - .setStatus(cmdStatusProto.getStatus()) - .setType(cmdStatusProto.getType()) - .setMsg(cmdStatusProto.getMsg()) - .build(); - } - /** - * Returns a CommandStatus from the protocol buffers. - * - * @return StorageContainerDatanodeProtocolProtos.CommandStatus - */ - public StorageContainerDatanodeProtocolProtos.CommandStatus - getProtoBufMessage() { - StorageContainerDatanodeProtocolProtos.CommandStatus.Builder builder = - StorageContainerDatanodeProtocolProtos.CommandStatus.newBuilder() - .setCmdId(this.getCmdId()) - .setStatus(this.getStatus()) - .setType(this.getType()); - if (this.getMsg() != null) { - builder.setMsg(this.getMsg()); - } - return builder.build(); - } - - /** - * Builder class for CommandStatus. - */ - public static class CommandStatusBuilder { - - private SCMCommandProto.Type type; - private Long cmdId; - private StorageContainerDatanodeProtocolProtos.CommandStatus.Status status; - private String msg; - - CommandStatusBuilder() { - } - - public static CommandStatusBuilder newBuilder() { - return new CommandStatusBuilder(); - } - - public Type getType() { - return type; - } - - public Long getCmdId() { - return cmdId; - } - - public Status getStatus() { - return status; - } - - public String getMsg() { - return msg; - } - - public CommandStatusBuilder setType(Type commandType) { - this.type = commandType; - return this; - } - - public CommandStatusBuilder setCmdId(Long commandId) { - this.cmdId = commandId; - return this; - } - - public CommandStatusBuilder setStatus(Status commandStatus) { - this.status = commandStatus; - return this; - } - - public CommandStatusBuilder setMsg(String message) { - this.msg = message; - return this; - } - - public CommandStatus build() { - return new CommandStatus(type, cmdId, status, msg); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlockCommandStatus.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlockCommandStatus.java deleted file mode 100644 index e9ccb08a141..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlockCommandStatus.java +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; - -/** - * Command status to report about block deletion. - */ -public class DeleteBlockCommandStatus extends CommandStatus { - - private ContainerBlocksDeletionACKProto blocksDeletionAck = null; - - public DeleteBlockCommandStatus(Type type, Long cmdId, - StorageContainerDatanodeProtocolProtos.CommandStatus.Status status, - String msg, - ContainerBlocksDeletionACKProto blocksDeletionAck) { - super(type, cmdId, status, msg); - this.blocksDeletionAck = blocksDeletionAck; - } - - public void setBlocksDeletionAck( - ContainerBlocksDeletionACKProto deletionAck) { - blocksDeletionAck = deletionAck; - } - - @Override - public CommandStatus getFromProtoBuf( - StorageContainerDatanodeProtocolProtos.CommandStatus cmdStatusProto) { - return DeleteBlockCommandStatusBuilder.newBuilder() - .setBlockDeletionAck(cmdStatusProto.getBlockDeletionAck()) - .setCmdId(cmdStatusProto.getCmdId()) - .setStatus(cmdStatusProto.getStatus()) - .setType(cmdStatusProto.getType()) - .setMsg(cmdStatusProto.getMsg()) - .build(); - } - - @Override - public StorageContainerDatanodeProtocolProtos.CommandStatus - getProtoBufMessage() { - StorageContainerDatanodeProtocolProtos.CommandStatus.Builder builder = - StorageContainerDatanodeProtocolProtos.CommandStatus.newBuilder() - .setCmdId(this.getCmdId()) - .setStatus(this.getStatus()) - .setType(this.getType()); - if (blocksDeletionAck != null) { - builder.setBlockDeletionAck(blocksDeletionAck); - } - if (this.getMsg() != null) { - builder.setMsg(this.getMsg()); - } - return builder.build(); - } - - /** - * Builder for DeleteBlockCommandStatus. - */ - public static final class DeleteBlockCommandStatusBuilder - extends CommandStatusBuilder { - private ContainerBlocksDeletionACKProto blocksDeletionAck = null; - - public static DeleteBlockCommandStatusBuilder newBuilder() { - return new DeleteBlockCommandStatusBuilder(); - } - - public DeleteBlockCommandStatusBuilder setBlockDeletionAck( - ContainerBlocksDeletionACKProto deletionAck) { - this.blocksDeletionAck = deletionAck; - return this; - } - - @Override - public CommandStatus build() { - return new DeleteBlockCommandStatus(getType(), getCmdId(), getStatus(), - getMsg(), blocksDeletionAck); - } - - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java deleted file mode 100644 index 03a876cee34..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeleteBlocksCommandProto; - -import java.util.List; - -/** - * A SCM command asks a datanode to delete a number of blocks. - */ -public class DeleteBlocksCommand extends - SCMCommand { - - private List blocksTobeDeleted; - - - public DeleteBlocksCommand(List blocks) { - super(); - this.blocksTobeDeleted = blocks; - } - - // Should be called only for protobuf conversion - private DeleteBlocksCommand(List blocks, - long id) { - super(id); - this.blocksTobeDeleted = blocks; - } - - public List blocksTobeDeleted() { - return this.blocksTobeDeleted; - } - - @Override - public SCMCommandProto.Type getType() { - return SCMCommandProto.Type.deleteBlocksCommand; - } - - public static DeleteBlocksCommand getFromProtobuf( - DeleteBlocksCommandProto deleteBlocksProto) { - return new DeleteBlocksCommand(deleteBlocksProto - .getDeletedBlocksTransactionsList(), deleteBlocksProto.getCmdId()); - } - - @Override - public DeleteBlocksCommandProto getProto() { - return DeleteBlocksCommandProto.newBuilder() - .setCmdId(getId()) - .addAllDeletedBlocksTransactions(blocksTobeDeleted).build(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteContainerCommand.java deleted file mode 100644 index 48aa83bcc8c..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteContainerCommand.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.protocol.commands; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeleteContainerCommandProto; - -/** - * SCM command which tells the datanode to delete a container. - */ -public class DeleteContainerCommand extends - SCMCommand { - - private final long containerId; - private final boolean force; - - /** - * DeleteContainerCommand, to send a command for datanode to delete a - * container. - * @param containerId - */ - public DeleteContainerCommand(long containerId) { - this(containerId, false); - } - - /** - * DeleteContainerCommand, to send a command for datanode to delete a - * container. - * @param containerId - * @param forceFlag if this is set to true, we delete container without - * checking state of the container. - */ - - public DeleteContainerCommand(long containerId, boolean forceFlag) { - this.containerId = containerId; - this.force = forceFlag; - } - - @Override - public SCMCommandProto.Type getType() { - return SCMCommandProto.Type.deleteContainerCommand; - } - - @Override - public DeleteContainerCommandProto getProto() { - DeleteContainerCommandProto.Builder builder = - DeleteContainerCommandProto.newBuilder(); - builder.setCmdId(getId()) - .setContainerID(getContainerID()).setForce(force); - return builder.build(); - } - - public long getContainerID() { - return containerId; - } - - public boolean isForce() { - return force; - } - - public static DeleteContainerCommand getFromProtobuf( - DeleteContainerCommandProto protoMessage) { - Preconditions.checkNotNull(protoMessage); - return new DeleteContainerCommand(protoMessage.getContainerID(), - protoMessage.getForce()); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java deleted file mode 100644 index 42778cb6e49..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java +++ /dev/null @@ -1,164 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import com.google.common.base.Strings; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto - .ErrorCode; - -/** - * Response to Datanode Register call. - */ -public class RegisteredCommand { - private String clusterID; - private ErrorCode error; - private DatanodeDetails datanode; - - public RegisteredCommand(final ErrorCode error, final DatanodeDetails node, - final String clusterID) { - this.datanode = node; - this.clusterID = clusterID; - this.error = error; - } - - /** - * Returns a new builder. - * - * @return - Builder - */ - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Returns datanode. - * - * @return - Datanode. - */ - public DatanodeDetails getDatanode() { - return datanode; - } - - /** - * Returns cluster ID. - * - * @return -- ClusterID - */ - public String getClusterID() { - return clusterID; - } - - /** - * Returns ErrorCode. - * - * @return - ErrorCode - */ - public ErrorCode getError() { - return error; - } - - /** - * Gets the protobuf message of this object. - * - * @return A protobuf message. - */ - public SCMRegisteredResponseProto getProtoBufMessage() { - SCMRegisteredResponseProto.Builder builder = - SCMRegisteredResponseProto.newBuilder() - // TODO : Fix this later when we have multiple SCM support. - // .setAddressList(addressList) - .setClusterID(this.clusterID) - .setDatanodeUUID(this.datanode.getUuidString()) - .setErrorCode(this.error); - if (!Strings.isNullOrEmpty(datanode.getHostName())) { - builder.setHostname(datanode.getHostName()); - } - if (!Strings.isNullOrEmpty(datanode.getIpAddress())) { - builder.setIpAddress(datanode.getIpAddress()); - } - if (!Strings.isNullOrEmpty(datanode.getNetworkName())) { - builder.setNetworkName(datanode.getNetworkName()); - } - if (!Strings.isNullOrEmpty(datanode.getNetworkLocation())) { - builder.setNetworkLocation(datanode.getNetworkLocation()); - } - - return builder.build(); - } - - /** - * A builder class to verify all values are sane. - */ - public static class Builder { - private DatanodeDetails datanode; - private String clusterID; - private ErrorCode error; - - /** - * sets datanode details. - * - * @param node - datanode details - * @return Builder - */ - public Builder setDatanode(DatanodeDetails node) { - this.datanode = node; - return this; - } - - /** - * Sets cluster ID. - * - * @param cluster - clusterID - * @return Builder - */ - public Builder setClusterID(String cluster) { - this.clusterID = cluster; - return this; - } - - /** - * Sets Error code. - * - * @param errorCode - error code - * @return Builder - */ - public Builder setErrorCode(ErrorCode errorCode) { - this.error = errorCode; - return this; - } - - /** - * Build the command object. - * - * @return RegisteredCommand - */ - public RegisteredCommand build() { - if ((this.error == ErrorCode.success) && (this.datanode == null - || Strings.isNullOrEmpty(this.datanode.getUuidString()) - || Strings.isNullOrEmpty(this.clusterID))) { - throw new IllegalArgumentException("On success, RegisteredCommand " - + "needs datanodeUUID and ClusterID."); - } - return new RegisteredCommand(this.error, this.datanode, this.clusterID); - } - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java deleted file mode 100644 index e663bed794f..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReplicateContainerCommand.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import java.util.List; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto - .Builder; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; - -import com.google.common.base.Preconditions; - -/** - * SCM command to request replication of a container. - */ -public class ReplicateContainerCommand - extends SCMCommand { - - private final long containerID; - private final List sourceDatanodes; - - public ReplicateContainerCommand(long containerID, - List sourceDatanodes) { - super(); - this.containerID = containerID; - this.sourceDatanodes = sourceDatanodes; - } - - // Should be called only for protobuf conversion - public ReplicateContainerCommand(long containerID, - List sourceDatanodes, long id) { - super(id); - this.containerID = containerID; - this.sourceDatanodes = sourceDatanodes; - } - - @Override - public Type getType() { - return SCMCommandProto.Type.replicateContainerCommand; - } - - @Override - public ReplicateContainerCommandProto getProto() { - Builder builder = ReplicateContainerCommandProto.newBuilder() - .setCmdId(getId()) - .setContainerID(containerID); - for (DatanodeDetails dd : sourceDatanodes) { - builder.addSources(dd.getProtoBufMessage()); - } - return builder.build(); - } - - public static ReplicateContainerCommand getFromProtobuf( - ReplicateContainerCommandProto protoMessage) { - Preconditions.checkNotNull(protoMessage); - - List datanodeDetails = - protoMessage.getSourcesList() - .stream() - .map(DatanodeDetails::getFromProtoBuf) - .collect(Collectors.toList()); - - return new ReplicateContainerCommand(protoMessage.getContainerID(), - datanodeDetails, protoMessage.getCmdId()); - - } - - public long getContainerID() { - return containerID; - } - - public List getSourceDatanodes() { - return sourceDatanodes; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java deleted file mode 100644 index e3ea4aeeaff..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; - -import static org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ReregisterCommandProto; - -/** - * Informs a datanode to register itself with SCM again. - */ -public class ReregisterCommand extends - SCMCommand{ - - /** - * Returns the type of this command. - * - * @return Type - */ - @Override - public SCMCommandProto.Type getType() { - return SCMCommandProto.Type.reregisterCommand; - } - - /** - * Not implemented for ReregisterCommand. - * - * @return cmdId. - */ - @Override - public long getId() { - return 0; - } - - @Override - public ReregisterCommandProto getProto() { - return ReregisterCommandProto - .newBuilder() - .build(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java deleted file mode 100644 index 3c4e05b424a..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.hdds.HddsIdFactory; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload; - -/** - * A class that acts as the base class to convert between Java and SCM - * commands in protobuf format. - * @param - */ -public abstract class SCMCommand implements - IdentifiableEventPayload { - private long id; - - SCMCommand() { - this.id = HddsIdFactory.getLongId(); - } - - SCMCommand(long id) { - this.id = id; - } - /** - * Returns the type of this command. - * @return Type - */ - public abstract SCMCommandProto.Type getType(); - - /** - * Gets the protobuf message of this object. - * @return A protobuf message. - */ - public abstract T getProto(); - - /** - * Gets the commandId of this object. - * @return uuid. - */ - public long getId() { - return id; - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java deleted file mode 100644 index 7083c1b154d..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; -/** - Set of classes that help in protoc conversions. - **/ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java deleted file mode 100644 index a718fa7476f..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.protocol; - -/** - * This package contains classes for HDDS protocol definitions. - */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java deleted file mode 100644 index 9b446666e5d..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java +++ /dev/null @@ -1,177 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; - -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest.Builder; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeResponse; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.Type; -import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtocolTranslator; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; - -import java.io.Closeable; -import java.io.IOException; -import java.util.function.Consumer; - -/** - * This class is the client-side translator to translate the requests made on - * the {@link StorageContainerDatanodeProtocol} interface to the RPC server - * implementing {@link StorageContainerDatanodeProtocolPB}. - */ -public class StorageContainerDatanodeProtocolClientSideTranslatorPB - implements StorageContainerDatanodeProtocol, ProtocolTranslator, Closeable { - - /** - * RpcController is not used and hence is set to null. - */ - private static final RpcController NULL_RPC_CONTROLLER = null; - private final StorageContainerDatanodeProtocolPB rpcProxy; - - /** - * Constructs a Client side interface that calls into SCM datanode protocol. - * - * @param rpcProxy - Proxy for RPC. - */ - public StorageContainerDatanodeProtocolClientSideTranslatorPB( - StorageContainerDatanodeProtocolPB rpcProxy) { - this.rpcProxy = rpcProxy; - } - - /** - * Closes this stream and releases any system resources associated with it. If - * the stream is already closed then invoking this method has no effect. - *

- *

As noted in {@link AutoCloseable#close()}, cases where the close may - * fail require careful attention. It is strongly advised to relinquish the - * underlying resources and to internally mark the {@code Closeable} - * as closed, prior to throwing the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - RPC.stopProxy(rpcProxy); - } - - /** - * Return the proxy object underlying this protocol translator. - * - * @return the proxy object underlying this protocol translator. - */ - @Override - public Object getUnderlyingProxyObject() { - return rpcProxy; - } - - /** - * Helper method to wrap the request and send the message. - */ - private SCMDatanodeResponse submitRequest(Type type, - Consumer builderConsumer) throws IOException { - final SCMDatanodeResponse response; - try { - Builder builder = SCMDatanodeRequest.newBuilder() - .setCmdType(type); - builderConsumer.accept(builder); - SCMDatanodeRequest wrapper = builder.build(); - - response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper); - } catch (ServiceException ex) { - throw ProtobufHelper.getRemoteException(ex); - } - return response; - } - - /** - * Returns SCM version. - * - * @param unused - set to null and unused. - * @return Version info. - */ - @Override - public SCMVersionResponseProto getVersion(SCMVersionRequestProto - request) throws IOException { - return submitRequest(Type.GetVersion, - (builder) -> builder - .setGetVersionRequest(SCMVersionRequestProto.newBuilder().build())) - .getGetVersionResponse(); - } - - /** - * Send by datanode to SCM. - * - * @param heartbeat node heartbeat - * @throws IOException - */ - - @Override - public SCMHeartbeatResponseProto sendHeartbeat( - SCMHeartbeatRequestProto heartbeat) throws IOException { - return submitRequest(Type.SendHeartbeat, - (builder) -> builder.setSendHeartbeatRequest(heartbeat)) - .getSendHeartbeatResponse(); - } - - /** - * Register Datanode. - * - * @param datanodeDetailsProto - Datanode Details - * @param nodeReport - Node Report. - * @param containerReportsRequestProto - Container Reports. - * @return SCM Command. - */ - @Override - public SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport, - ContainerReportsProto containerReportsRequestProto, - PipelineReportsProto pipelineReportsProto) - throws IOException { - SCMRegisterRequestProto.Builder req = - SCMRegisterRequestProto.newBuilder(); - req.setDatanodeDetails(datanodeDetailsProto); - req.setContainerReport(containerReportsRequestProto); - req.setPipelineReports(pipelineReportsProto); - req.setNodeReport(nodeReport); - return submitRequest(Type.Register, - (builder) -> builder.setRegisterRequest(req)) - .getRegisterResponse(); - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java deleted file mode 100644 index 9006e9175ac..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos - .StorageContainerDatanodeProtocolService; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.ipc.ProtocolInfo; -import org.apache.hadoop.security.KerberosInfo; - -/** - * Protocol used from a datanode to StorageContainerManager. This extends - * the Protocol Buffers service interface to add Hadoop-specific annotations. - */ - -@ProtocolInfo(protocolName = - "org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol", - protocolVersion = 1) -@KerberosInfo( - serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY, - clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY) -public interface StorageContainerDatanodeProtocolPB extends - StorageContainerDatanodeProtocolService.BlockingInterface { -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java deleted file mode 100644 index ed704ebf431..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -import java.io.IOException; - -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeRequest; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDatanodeResponse; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.Status; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.Type; -import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher; -import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; - -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class is the server-side translator that forwards requests received on - * {@link StorageContainerDatanodeProtocolPB} to the {@link - * StorageContainerDatanodeProtocol} server implementation. - */ -public class StorageContainerDatanodeProtocolServerSideTranslatorPB - implements StorageContainerDatanodeProtocolPB { - - private static final Logger LOG = LoggerFactory - .getLogger(StorageContainerDatanodeProtocolServerSideTranslatorPB.class); - - private final StorageContainerDatanodeProtocol impl; - private final OzoneProtocolMessageDispatcher dispatcher; - - public StorageContainerDatanodeProtocolServerSideTranslatorPB( - StorageContainerDatanodeProtocol impl, - ProtocolMessageMetrics protocolMessageMetrics) { - this.impl = impl; - dispatcher = - new OzoneProtocolMessageDispatcher<>("SCMDatanodeProtocol", - protocolMessageMetrics, - LOG); - } - - public SCMRegisteredResponseProto register( - SCMRegisterRequestProto request) throws IOException { - ContainerReportsProto containerRequestProto = request - .getContainerReport(); - NodeReportProto dnNodeReport = request.getNodeReport(); - PipelineReportsProto pipelineReport = request.getPipelineReports(); - return impl.register(request.getDatanodeDetails(), dnNodeReport, - containerRequestProto, pipelineReport); - - } - - @Override - public SCMDatanodeResponse submitRequest(RpcController controller, - SCMDatanodeRequest request) throws ServiceException { - return dispatcher.processRequest(request, this::processMessage, - request.getCmdType(), request.getTraceID()); - } - - public SCMDatanodeResponse processMessage(SCMDatanodeRequest request) - throws ServiceException { - try { - Type cmdType = request.getCmdType(); - switch (cmdType) { - case GetVersion: - return SCMDatanodeResponse.newBuilder() - .setCmdType(cmdType) - .setStatus(Status.OK) - .setGetVersionResponse( - impl.getVersion(request.getGetVersionRequest())) - .build(); - case SendHeartbeat: - return SCMDatanodeResponse.newBuilder() - .setCmdType(cmdType) - .setStatus(Status.OK) - .setSendHeartbeatResponse( - impl.sendHeartbeat(request.getSendHeartbeatRequest())) - .build(); - case Register: - return SCMDatanodeResponse.newBuilder() - .setCmdType(cmdType) - .setStatus(Status.OK) - .setRegisterResponse(register(request.getRegisterRequest())) - .build(); - default: - throw new ServiceException("Unknown command type: " + cmdType); - } - } catch (IOException e) { - throw new ServiceException(e); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java deleted file mode 100644 index 378a8f389cf..00000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.protocolPB; diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto deleted file mode 100644 index a975cd5605f..00000000000 --- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto +++ /dev/null @@ -1,429 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and unstable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *unstable* .proto interface. - */ - -option java_package = "org.apache.hadoop.hdds.protocol.proto"; - -option java_outer_classname = "StorageContainerDatanodeProtocolProtos"; - -option java_generic_services = true; - -option java_generate_equals_and_hash = true; - -package hadoop.hdds; - -import "hdds.proto"; - - -message SCMDatanodeRequest { - required Type cmdType = 1; // Type of the command - - optional string traceID = 2; - - optional SCMVersionRequestProto getVersionRequest = 3; - optional SCMRegisterRequestProto registerRequest = 4; - optional SCMHeartbeatRequestProto sendHeartbeatRequest = 5; -} - -message SCMDatanodeResponse { - required Type cmdType = 1; // Type of the command - - optional string traceID = 2; - - optional bool success = 3 [default = true]; - - optional string message = 4; - - required Status status = 5; - - optional SCMVersionResponseProto getVersionResponse = 6; - optional SCMRegisteredResponseProto registerResponse = 7; - optional SCMHeartbeatResponseProto sendHeartbeatResponse = 8; - -} - -enum Type { - GetVersion = 1; - Register = 2; - SendHeartbeat = 3; -} - -enum Status { - OK = 1; - ERROR = 2; -} - -/** - * Request for version info of the software stack on the server. - */ -message SCMVersionRequestProto {} - -/** -* Generic response that is send to a version request. This allows keys to be -* added on the fly and protocol to remain stable. -*/ -message SCMVersionResponseProto { - required uint32 softwareVersion = 1; - repeated hadoop.hdds.KeyValue keys = 2; -} - -message SCMRegisterRequestProto { - required DatanodeDetailsProto datanodeDetails = 1; - required NodeReportProto nodeReport = 2; - required ContainerReportsProto containerReport = 3; - required PipelineReportsProto pipelineReports = 4; -} - -/** - * Datanode ID returned by the SCM. This is similar to name node - * registeration of a datanode. - */ -message SCMRegisteredResponseProto { - enum ErrorCode { - success = 1; - errorNodeNotPermitted = 2; - } - required ErrorCode errorCode = 1; - required string datanodeUUID = 2; - required string clusterID = 3; - optional SCMNodeAddressList addressList = 4; - optional string hostname = 5; - optional string ipAddress = 6; - optional string networkName = 7; - optional string networkLocation = 8; -} - -/** -* This message is send by data node to indicate that it is alive or it is -* registering with the node manager. -*/ -message SCMHeartbeatRequestProto { - required DatanodeDetailsProto datanodeDetails = 1; - optional NodeReportProto nodeReport = 2; - optional ContainerReportsProto containerReport = 3; - repeated IncrementalContainerReportProto incrementalContainerReport = 4; - repeated CommandStatusReportsProto commandStatusReports = 5; - optional ContainerActionsProto containerActions = 6; - optional PipelineActionsProto pipelineActions = 7; - optional PipelineReportsProto pipelineReports = 8; -} - -/* - * A group of commands for the datanode to execute - */ -message SCMHeartbeatResponseProto { - required string datanodeUUID = 1; - repeated SCMCommandProto commands = 2; -} - -message SCMNodeAddressList { - repeated string addressList = 1; -} - -/** -* This message is send along with the heart beat to report datanode -* storage utilization to SCM. -*/ -message NodeReportProto { - repeated StorageReportProto storageReport = 1; -} - -message StorageReportProto { - required string storageUuid = 1; - required string storageLocation = 2; - optional uint64 capacity = 3 [default = 0]; - optional uint64 scmUsed = 4 [default = 0]; - optional uint64 remaining = 5 [default = 0]; - optional StorageTypeProto storageType = 6 [default = DISK]; - optional bool failed = 7 [default = false]; -} - -/** - * Types of recognized storage media. - */ -enum StorageTypeProto { - DISK = 1; - SSD = 2; - ARCHIVE = 3; - RAM_DISK = 4; - PROVIDED = 5; -} - -message ContainerReportsProto { - repeated ContainerReplicaProto reports = 1; -} - -message IncrementalContainerReportProto { - repeated ContainerReplicaProto report = 1; -} - -message ContainerReplicaProto { - enum State { - OPEN = 1; - CLOSING = 2; - QUASI_CLOSED = 3; - CLOSED = 4; - UNHEALTHY = 5; - INVALID = 6; - } - required int64 containerID = 1; - required State state = 2; - optional int64 size = 3; - optional int64 used = 4; - optional int64 keyCount = 5; - optional int64 readCount = 6; - optional int64 writeCount = 7; - optional int64 readBytes = 8; - optional int64 writeBytes = 9; - optional string finalhash = 10; - optional int64 deleteTransactionId = 11; - optional uint64 blockCommitSequenceId = 12; - optional string originNodeId = 13; -} - -message CommandStatusReportsProto { - repeated CommandStatus cmdStatus = 1; -} - -message CommandStatus { - enum Status { - PENDING = 1; - EXECUTED = 2; - FAILED = 3; - } - required int64 cmdId = 1; - required Status status = 2 [default = PENDING]; - required SCMCommandProto.Type type = 3; - optional string msg = 4; - optional ContainerBlocksDeletionACKProto blockDeletionAck = 5; -} - -message ContainerActionsProto { - repeated ContainerAction containerActions = 1; -} - -message ContainerAction { - enum Action { - CLOSE = 1; - } - - enum Reason { - CONTAINER_FULL = 1; - CONTAINER_UNHEALTHY = 2; - } - - required int64 containerID = 1; - required Action action = 2; - optional Reason reason = 3; -} - -message PipelineReport { - required PipelineID pipelineID = 1; -} - -message PipelineReportsProto { - repeated PipelineReport pipelineReport = 1; -} - -message PipelineActionsProto { - repeated PipelineAction pipelineActions = 1; -} - -message ClosePipelineInfo { - enum Reason { - PIPELINE_FAILED = 1; - PIPELINE_LOG_FAILED = 2; - STATEMACHINE_TRANSACTION_FAILED = 3; - } - required PipelineID pipelineID = 1; - optional Reason reason = 3; - optional string detailedReason = 4; -} - -message PipelineAction { - enum Action { - CLOSE = 1; - } - - /** - * Action will be used to identify the correct pipeline action. - */ - required Action action = 1; - optional ClosePipelineInfo closePipeline = 2; -} - -/* - * These are commands returned by SCM for to the datanode to execute. - */ -message SCMCommandProto { - enum Type { - reregisterCommand = 1; - deleteBlocksCommand = 2; - closeContainerCommand = 3; - deleteContainerCommand = 4; - replicateContainerCommand = 5; - } - // TODO: once we start using protoc 3.x, refactor this message using "oneof" - required Type commandType = 1; - optional ReregisterCommandProto reregisterCommandProto = 2; - optional DeleteBlocksCommandProto deleteBlocksCommandProto = 3; - optional CloseContainerCommandProto closeContainerCommandProto = 4; - optional DeleteContainerCommandProto deleteContainerCommandProto = 5; - optional ReplicateContainerCommandProto replicateContainerCommandProto = 6; -} - -/** - * SCM informs a datanode to register itself again. - * With recieving this command, datanode will transit to REGISTER state. - */ -message ReregisterCommandProto {} - - -// HB response from SCM, contains a list of block deletion transactions. -message DeleteBlocksCommandProto { - repeated DeletedBlocksTransaction deletedBlocksTransactions = 1; - required int64 cmdId = 3; -} - -// The deleted blocks which are stored in deletedBlock.db of scm. -// We don't use BlockID because this only contians multiple localIDs -// of the same containerID. -message DeletedBlocksTransaction { - required int64 txID = 1; - required int64 containerID = 2; - repeated int64 localID = 3; - // the retry time of sending deleting command to datanode. - required int32 count = 4; -} - -// ACK message datanode sent to SCM, contains the result of -// block deletion transactions. -message ContainerBlocksDeletionACKProto { - message DeleteBlockTransactionResult { - required int64 txID = 1; - required int64 containerID = 2; - required bool success = 3; - } - repeated DeleteBlockTransactionResult results = 1; - required string dnId = 2; -} - -/** -This command asks the datanode to close a specific container. -*/ -message CloseContainerCommandProto { - required int64 containerID = 1; - required PipelineID pipelineID = 2; - // cmdId will be removed - required int64 cmdId = 3; - // Force will be used when closing a container out side of ratis. - optional bool force = 4 [default = false]; -} - -/** -This command asks the datanode to delete a specific container. -*/ -message DeleteContainerCommandProto { - required int64 containerID = 1; - required int64 cmdId = 2; - required bool force = 3 [default = false]; -} - -/** -This command asks the datanode to replicate a container from specific sources. -*/ -message ReplicateContainerCommandProto { - required int64 containerID = 1; - repeated DatanodeDetailsProto sources = 2; - required int64 cmdId = 3; -} - -/** - * Protocol used from a datanode to StorageContainerManager. - * - * Please see the request and response messages for details of the RPC calls. - * - * Here is a simple state diagram that shows how a datanode would boot up and - * communicate with SCM. - * - * ----------------------- - * | Start | - * ---------- ------------ - * | - * | - * | - * | - * | - * | - * | - * ----------v------------- - * | Searching for SCM ------------ - * ---------- ------------- | - * | | - * | | - * | ----------v------------- - * | | Register if needed | - * | ----------- ------------ - * | | - * v | - * ----------- ---------------- | - * --------- Heartbeat state <-------- - * | --------^------------------- - * | | - * | | - * | | - * | | - * | | - * | | - * | | - * ------------------ - * - * - * - * Here is how this protocol is used by the datanode. When a datanode boots up - * it moves into a stated called SEARCHING_SCM. In this state datanode is - * trying to establish communication with the SCM. The address of the SCMs are - * retrieved from the configuration information. - * - * In the SEARCHING_SCM state, only rpc call made by datanode is a getVersion - * call to SCM. Once any of the SCMs reply, datanode checks if it has a local - * persisted datanode ID. If it has this means that this datanode is already - * registered with some SCM. If this file is not found, datanode assumes that - * it needs to do a registration. - * - * If registration is need datanode moves into REGISTER state. It will - * send a register call with DatanodeDetailsProto data structure and presist - * that info. - * - * The response to the command contains clusterID. This information is - * also persisted by the datanode and moves into heartbeat state. - * - * Once in the heartbeat state, datanode sends heartbeats and container reports - * to SCM and process commands issued by SCM until it is shutdown. - * - */ -service StorageContainerDatanodeProtocolService { - - //Message sent from Datanode to SCM as a heartbeat. - rpc submitRequest (SCMDatanodeRequest) returns (SCMDatanodeResponse); -} diff --git a/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider b/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider deleted file mode 100644 index 2e103fea7b7..00000000000 --- a/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainerProvider diff --git a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep deleted file mode 100644 index ff1232e5fca..00000000000 --- a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/.gitkeep +++ /dev/null @@ -1,17 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java deleted file mode 100644 index af56d0643d5..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; - -import java.io.File; -import java.io.IOException; - -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.ServicePlugin; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -/** - * Test class for {@link HddsDatanodeService}. - */ -public class TestHddsDatanodeService { - private File testDir; - private OzoneConfiguration conf; - private HddsDatanodeService service; - private String[] args = new String[] {}; - - @Before - public void setUp() { - testDir = GenericTestUtils.getRandomizedTestDir(); - conf = new OzoneConfiguration(); - conf.setBoolean(OZONE_ENABLED, true); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath()); - conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY, MockService.class, - ServicePlugin.class); - - String volumeDir = testDir + "/disk1"; - conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, volumeDir); - } - - @After - public void tearDown() { - FileUtil.fullyDelete(testDir); - } - - @Test - public void testStartup() throws IOException { - service = HddsDatanodeService.createHddsDatanodeService(args); - service.start(conf); - - assertNotNull(service.getDatanodeDetails()); - assertNotNull(service.getDatanodeDetails().getHostName()); - assertFalse(service.getDatanodeStateMachine().isDaemonStopped()); - - service.stop(); - service.join(); - service.close(); - } - - static class MockService implements ServicePlugin { - - @Override - public void close() throws IOException { - // Do nothing - } - - @Override - public void start(Object arg0) { - // Do nothing - } - - @Override - public void stop() { - // Do nothing - } - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java deleted file mode 100644 index 04fd3a499aa..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java +++ /dev/null @@ -1,274 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.util.ServicePlugin; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.nio.file.Paths; -import java.security.KeyPair; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.cert.X509Certificate; -import java.util.concurrent.Callable; - -import static org.apache.hadoop.ozone.HddsDatanodeService.getLogger; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; - -/** - * Test class for {@link HddsDatanodeService}. - */ -public class TestHddsSecureDatanodeInit { - - private static File testDir; - private static OzoneConfiguration conf; - private static HddsDatanodeService service; - private static String[] args = new String[]{}; - private static PrivateKey privateKey; - private static PublicKey publicKey; - private static GenericTestUtils.LogCapturer dnLogs; - private static CertificateClient client; - private static SecurityConfig securityConfig; - private static KeyCodec keyCodec; - private static CertificateCodec certCodec; - private static X509CertificateHolder certHolder; - private final static String DN_COMPONENT = DNCertificateClient.COMPONENT_NAME; - - @BeforeClass - public static void setUp() throws Exception { - testDir = GenericTestUtils.getRandomizedTestDir(); - conf = new OzoneConfiguration(); - conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath()); - //conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost"); - String volumeDir = testDir + "/disk1"; - conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, volumeDir); - - conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); - conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY, - TestHddsDatanodeService.MockService.class, - ServicePlugin.class); - securityConfig = new SecurityConfig(conf); - - service = HddsDatanodeService.createHddsDatanodeService(args); - dnLogs = GenericTestUtils.LogCapturer.captureLogs(getLogger()); - callQuietly(() -> { - service.start(conf); - return null; - }); - callQuietly(() -> { - service.initializeCertificateClient(conf); - return null; - }); - certCodec = new CertificateCodec(securityConfig, DN_COMPONENT); - keyCodec = new KeyCodec(securityConfig, DN_COMPONENT); - dnLogs.clearOutput(); - privateKey = service.getCertificateClient().getPrivateKey(); - publicKey = service.getCertificateClient().getPublicKey(); - X509Certificate x509Certificate = null; - - x509Certificate = KeyStoreTestUtil.generateCertificate( - "CN=Test", new KeyPair(publicKey, privateKey), 10, - securityConfig.getSignatureAlgo()); - certHolder = new X509CertificateHolder(x509Certificate.getEncoded()); - - } - - @AfterClass - public static void tearDown() { - FileUtil.fullyDelete(testDir); - } - - @Before - public void setUpDNCertClient(){ - - FileUtils.deleteQuietly(Paths.get( - securityConfig.getKeyLocation(DN_COMPONENT).toString(), - securityConfig.getPrivateKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get( - securityConfig.getKeyLocation(DN_COMPONENT).toString(), - securityConfig.getPublicKeyFileName()).toFile()); - FileUtils.deleteQuietly(Paths.get(securityConfig - .getCertificateLocation(DN_COMPONENT).toString(), - securityConfig.getCertificateFileName()).toFile()); - dnLogs.clearOutput(); - client = new DNCertificateClient(securityConfig, - certHolder.getSerialNumber().toString()); - service.setCertificateClient(client); - } - - @Test - public void testSecureDnStartupCase0() throws Exception { - - // Case 0: When keypair as well as certificate is missing. Initial keypair - // boot-up. Get certificate will fail as no SCM is not running. - LambdaTestUtils.intercept(Exception.class, "", - () -> service.initializeCertificateClient(conf)); - - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: GETCERT")); - } - - @Test - public void testSecureDnStartupCase1() throws Exception { - // Case 1: When only certificate is present. - - certCodec.writeCertificate(certHolder); - LambdaTestUtils.intercept(RuntimeException.class, "DN security" + - " initialization failed", - () -> service.initializeCertificateClient(conf)); - Assert.assertNull(client.getPrivateKey()); - Assert.assertNull(client.getPublicKey()); - Assert.assertNotNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: FAILURE")); - } - - @Test - public void testSecureDnStartupCase2() throws Exception { - // Case 2: When private key and certificate is missing. - keyCodec.writePublicKey(publicKey); - LambdaTestUtils.intercept(RuntimeException.class, "DN security" + - " initialization failed", - () -> service.initializeCertificateClient(conf)); - Assert.assertNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: FAILURE")); - } - - @Test - public void testSecureDnStartupCase3() throws Exception { - // Case 3: When only public key and certificate is present. - keyCodec.writePublicKey(publicKey); - certCodec.writeCertificate(certHolder); - LambdaTestUtils.intercept(RuntimeException.class, "DN security" + - " initialization failed", - () -> service.initializeCertificateClient(conf)); - Assert.assertNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNotNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: FAILURE")); - } - - @Test - public void testSecureDnStartupCase4() throws Exception { - // Case 4: When public key as well as certificate is missing. - keyCodec.writePrivateKey(privateKey); - LambdaTestUtils.intercept(RuntimeException.class, " DN security" + - " initialization failed", - () -> service.initializeCertificateClient(conf)); - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNull(client.getPublicKey()); - Assert.assertNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: FAILURE")); - dnLogs.clearOutput(); - } - - @Test - public void testSecureDnStartupCase5() throws Exception { - // Case 5: If private key and certificate is present. - certCodec.writeCertificate(certHolder); - keyCodec.writePrivateKey(privateKey); - service.initializeCertificateClient(conf); - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNotNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: SUCCESS")); - } - - @Test - public void testSecureDnStartupCase6() throws Exception { - // Case 6: If key pair already exist than response should be GETCERT. - keyCodec.writePublicKey(publicKey); - keyCodec.writePrivateKey(privateKey); - LambdaTestUtils.intercept(Exception.class, "", - () -> service.initializeCertificateClient(conf)); - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: GETCERT")); - } - - @Test - public void testSecureDnStartupCase7() throws Exception { - // Case 7 When keypair and certificate is present. - keyCodec.writePublicKey(publicKey); - keyCodec.writePrivateKey(privateKey); - certCodec.writeCertificate(certHolder); - - service.initializeCertificateClient(conf); - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNotNull(client.getCertificate()); - Assert.assertTrue(dnLogs.getOutput().contains("Init response: SUCCESS")); - } - - /** - * Invoke a callable; Ignore all exception. - * @param closure closure to execute - * @return - */ - public static void callQuietly(Callable closure) { - try { - closure.call(); - } catch (Throwable e) { - // Ignore all Throwable, - } - } - - @Test - public void testGetCSR() throws Exception { - keyCodec.writePublicKey(publicKey); - keyCodec.writePrivateKey(privateKey); - service.setCertificateClient(client); - PKCS10CertificationRequest csr = - service.getCSR(conf); - Assert.assertNotNull(csr); - - csr = service.getCSR(conf); - Assert.assertNotNull(csr); - - csr = service.getCSR(conf); - Assert.assertNotNull(csr); - - csr = service.getCSR(conf); - Assert.assertNotNull(csr); - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java deleted file mode 100644 index 923440e2382..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.ozone.protocolPB - .StorageContainerDatanodeProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; -import org.apache.hadoop.security.UserGroupInformation; - -import java.net.InetSocketAddress; - -/** - * Helper utility to test containers. - */ -public final class ContainerTestUtils { - - private ContainerTestUtils() { - } - - /** - * Creates an Endpoint class for testing purpose. - * - * @param conf - Conf - * @param address - InetAddres - * @param rpcTimeout - rpcTimeOut - * @return EndPoint - * @throws Exception - */ - public static EndpointStateMachine createEndpoint(Configuration conf, - InetSocketAddress address, int rpcTimeout) throws Exception { - RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class, - ProtobufRpcEngine.class); - long version = - RPC.getProtocolVersion(StorageContainerDatanodeProtocolPB.class); - - StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy( - StorageContainerDatanodeProtocolPB.class, version, - address, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), rpcTimeout, - RetryPolicies.TRY_ONCE_THEN_FAIL).getProxy(); - - StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient = - new StorageContainerDatanodeProtocolClientSideTranslatorPB(rpcProxy); - return new EndpointStateMachine(address, rpcClient, conf); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java deleted file mode 100644 index 5a7c30ca68f..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common; - -import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.ServerSocket; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageContainerDatanodeProtocolService; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; -import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics; -import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; -import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB; -import org.apache.hadoop.test.GenericTestUtils; - -import com.google.protobuf.BlockingService; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import org.mockito.Mockito; - -/** - * Test Endpoint class. - */ -public final class SCMTestUtils { - /** - * Never constructed. - */ - private SCMTestUtils() { - } - - /** - * Starts an RPC server, if configured. - * - * @param conf configuration - * @param addr configured address of RPC server - * @param protocol RPC protocol provided by RPC server - * @param instance RPC protocol implementation instance - * @param handlerCount RPC server handler count - * @return RPC server - * @throws IOException if there is an I/O error while creating RPC server - */ - private static RPC.Server startRpcServer(Configuration conf, - InetSocketAddress addr, Class - protocol, BlockingService instance, int handlerCount) - throws IOException { - RPC.Server rpcServer = new RPC.Builder(conf) - .setProtocol(protocol) - .setInstance(instance) - .setBindAddress(addr.getHostString()) - .setPort(addr.getPort()) - .setNumHandlers(handlerCount) - .setVerbose(false) - .setSecretManager(null) - .build(); - - DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer); - return rpcServer; - } - - - /** - * Start Datanode RPC server. - */ - public static RPC.Server startScmRpcServer(Configuration configuration, - StorageContainerDatanodeProtocol server, - InetSocketAddress rpcServerAddresss, int handlerCount) throws - IOException { - RPC.setProtocolEngine(configuration, - StorageContainerDatanodeProtocolPB.class, - ProtobufRpcEngine.class); - - BlockingService scmDatanodeService = - StorageContainerDatanodeProtocolService. - newReflectiveBlockingService( - new StorageContainerDatanodeProtocolServerSideTranslatorPB( - server, Mockito.mock(ProtocolMessageMetrics.class))); - - RPC.Server scmServer = startRpcServer(configuration, rpcServerAddresss, - StorageContainerDatanodeProtocolPB.class, scmDatanodeService, - handlerCount); - - scmServer.start(); - return scmServer; - } - - public static InetSocketAddress getReuseableAddress() throws IOException { - try (ServerSocket socket = new ServerSocket(0)) { - socket.setReuseAddress(true); - int port = socket.getLocalPort(); - String addr = InetAddress.getLoopbackAddress().getHostAddress(); - return new InetSocketAddress(addr, port); - } - } - - public static OzoneConfiguration getConf() { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, GenericTestUtils - .getRandomizedTempPath()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, GenericTestUtils - .getRandomizedTempPath()); - return conf; - } - - public static OzoneConfiguration getOzoneConf() { - return new OzoneConfiguration(); - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java deleted file mode 100644 index c4b29ba2722..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java +++ /dev/null @@ -1,355 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatus; -import org.apache.hadoop.hdds.scm.VersionInfo; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; -import org.apache.hadoop.ozone.protocol.VersionResponse; - -import java.io.IOException; -import java.util.*; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * SCM RPC mock class. - */ -public class ScmTestMock implements StorageContainerDatanodeProtocol { - private int rpcResponseDelay; - private AtomicInteger heartbeatCount = new AtomicInteger(0); - private AtomicInteger rpcCount = new AtomicInteger(0); - private AtomicInteger containerReportsCount = new AtomicInteger(0); - private String clusterId; - private String scmId; - - public ScmTestMock() { - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - } - - // Map of datanode to containers - private Map> nodeContainers = - new HashMap<>(); - private Map nodeReports = new HashMap<>(); - private AtomicInteger commandStatusReport = new AtomicInteger(0); - private List cmdStatusList = new ArrayList<>(); - private List scmCommandRequests = new ArrayList<>(); - /** - * Returns the number of heartbeats made to this class. - * - * @return int - */ - public int getHeartbeatCount() { - return heartbeatCount.get(); - } - - /** - * Returns the number of RPC calls made to this mock class instance. - * - * @return - Number of RPC calls serviced by this class. - */ - public int getRpcCount() { - return rpcCount.get(); - } - - /** - * Gets the RPC response delay. - * - * @return delay in milliseconds. - */ - public int getRpcResponseDelay() { - return rpcResponseDelay; - } - - /** - * Sets the RPC response delay. - * - * @param rpcResponseDelay - delay in milliseconds. - */ - public void setRpcResponseDelay(int rpcResponseDelay) { - this.rpcResponseDelay = rpcResponseDelay; - } - - /** - * Returns the number of container reports server has seen. - * @return int - */ - public int getContainerReportsCount() { - return containerReportsCount.get(); - } - - /** - * Returns the number of containers that have been reported so far. - * @return - count of reported containers. - */ - public long getContainerCount() { - return nodeContainers.values().parallelStream().mapToLong((containerMap)->{ - return containerMap.size(); - }).sum(); - } - - /** - * Get the number keys reported from container reports. - * @return - number of keys reported. - */ - public long getKeyCount() { - return nodeContainers.values().parallelStream().mapToLong((containerMap)->{ - return containerMap.values().parallelStream().mapToLong((container) -> { - return container.getKeyCount(); - }).sum(); - }).sum(); - } - - /** - * Get the number of bytes used from container reports. - * @return - number of bytes used. - */ - public long getBytesUsed() { - return nodeContainers.values().parallelStream().mapToLong((containerMap)->{ - return containerMap.values().parallelStream().mapToLong((container) -> { - return container.getUsed(); - }).sum(); - }).sum(); - } - - /** - * Returns SCM version. - * - * @return Version info. - */ - @Override - public StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto - getVersion(StorageContainerDatanodeProtocolProtos - .SCMVersionRequestProto unused) throws IOException { - rpcCount.incrementAndGet(); - sleepIfNeeded(); - VersionInfo versionInfo = VersionInfo.getLatestVersion(); - return VersionResponse.newBuilder() - .setVersion(versionInfo.getVersion()) - .addValue(VersionInfo.DESCRIPTION_KEY, versionInfo.getDescription()) - .addValue(OzoneConsts.SCM_ID, scmId) - .addValue(OzoneConsts.CLUSTER_ID, clusterId) - .build().getProtobufMessage(); - - } - - private void sleepIfNeeded() { - if (getRpcResponseDelay() > 0) { - try { - Thread.sleep(getRpcResponseDelay()); - } catch (InterruptedException ex) { - // Just ignore this exception. - } - } - } - - /** - * Used by data node to send a Heartbeat. - * - * @param heartbeat - node heartbeat. - * @return - SCMHeartbeatResponseProto - * @throws IOException - */ - @Override - public StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto - sendHeartbeat(SCMHeartbeatRequestProto heartbeat) throws IOException { - rpcCount.incrementAndGet(); - heartbeatCount.incrementAndGet(); - if (heartbeat.getCommandStatusReportsCount() != 0) { - for (CommandStatusReportsProto statusReport : heartbeat - .getCommandStatusReportsList()) { - cmdStatusList.addAll(statusReport.getCmdStatusList()); - commandStatusReport.incrementAndGet(); - } - } - sleepIfNeeded(); - return SCMHeartbeatResponseProto.newBuilder().addAllCommands( - scmCommandRequests) - .setDatanodeUUID(heartbeat.getDatanodeDetails().getUuid()) - .build(); - } - - /** - * Register Datanode. - * - * @param datanodeDetailsProto DatanodDetailsProto. - * @return SCM Command. - */ - @Override - public StorageContainerDatanodeProtocolProtos - .SCMRegisteredResponseProto register( - DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport, - ContainerReportsProto containerReportsRequestProto, - PipelineReportsProto pipelineReportsProto) - throws IOException { - rpcCount.incrementAndGet(); - updateNodeReport(datanodeDetailsProto, nodeReport); - updateContainerReport(containerReportsRequestProto, datanodeDetailsProto); - sleepIfNeeded(); - return StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto - .newBuilder().setClusterID(UUID.randomUUID().toString()) - .setDatanodeUUID(datanodeDetailsProto.getUuid()).setErrorCode( - StorageContainerDatanodeProtocolProtos - .SCMRegisteredResponseProto.ErrorCode.success).build(); - } - - /** - * Update nodeReport. - * @param datanodeDetailsProto - * @param nodeReport - */ - public void updateNodeReport(DatanodeDetailsProto datanodeDetailsProto, - NodeReportProto nodeReport) { - DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( - datanodeDetailsProto); - NodeReportProto.Builder nodeReportProto = NodeReportProto.newBuilder(); - - List storageReports = - nodeReport.getStorageReportList(); - - for(StorageReportProto report : storageReports) { - nodeReportProto.addStorageReport(report); - } - - nodeReports.put(datanode, nodeReportProto.build()); - - } - - /** - * Update the cotainerReport. - * - * @param reports Container report - * @param datanodeDetails DataNode Info - * @throws IOException - */ - public void updateContainerReport( - StorageContainerDatanodeProtocolProtos.ContainerReportsProto reports, - DatanodeDetailsProto datanodeDetails) throws IOException { - Preconditions.checkNotNull(reports); - containerReportsCount.incrementAndGet(); - DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( - datanodeDetails); - if (reports.getReportsCount() > 0) { - Map containers = nodeContainers.get(datanode); - if (containers == null) { - containers = new LinkedHashMap(); - nodeContainers.put(datanode, containers); - } - - for (ContainerReplicaProto report : reports - .getReportsList()) { - containers.put(report.getContainerID(), report); - } - } - } - - - /** - * Return the number of StorageReports of a datanode. - * @param datanodeDetails - * @return count of containers of a datanode - */ - public int getNodeReportsCount(DatanodeDetails datanodeDetails) { - return nodeReports.get(datanodeDetails).getStorageReportCount(); - } - - /** - * Returns the number of containers of a datanode. - * @param datanodeDetails - * @return count of storage reports of a datanode - */ - public int getContainerCountsForDatanode(DatanodeDetails datanodeDetails) { - Map cr = - nodeContainers.get(datanodeDetails); - if(cr != null) { - return cr.size(); - } - return 0; - } - - /** - * Reset the mock Scm for test to get a fresh start without rebuild MockScm. - */ - public void reset() { - heartbeatCount.set(0); - rpcCount.set(0); - containerReportsCount.set(0); - nodeContainers.clear(); - - } - - public int getCommandStatusReportCount() { - return commandStatusReport.get(); - } - - public List getCmdStatusList() { - return cmdStatusList; - } - - public List getScmCommandRequests() { - return scmCommandRequests; - } - - public void clearScmCommandRequests() { - scmCommandRequests.clear(); - } - - public void addScmCommandRequest(SCMCommandProto scmCmd) { - scmCommandRequests.add(scmCmd); - } - - /** - * Set scmId. - * @param id - */ - public void setScmId(String id) { - this.scmId = id; - } - - /** - * Set scmId. - * @return scmId - */ - public String getScmId() { - return scmId; - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java deleted file mode 100644 index a4e0028e108..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common; - -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; -import org.junit.Assert; -import org.junit.Test; - -/** - * This class tests ChunkLayOutVersion. - */ -public class TestChunkLayOutVersion { - - @Test - public void testChunkLayOutVersion() { - - // Check Latest Version and description - Assert.assertEquals(1, ChunkLayOutVersion.getLatestVersion().getVersion()); - Assert.assertEquals("Data without checksums.", ChunkLayOutVersion - .getLatestVersion().getDescription()); - - Assert.assertEquals(1, ChunkLayOutVersion.getAllVersions().length); - - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java deleted file mode 100644 index b6584d17017..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common; - -import org.apache.hadoop.fs.FileSystemTestHelper; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.File; - - -/** - * Test ContainerCache with evictions. - */ -public class TestContainerCache { - private static String testRoot = new FileSystemTestHelper().getTestRootDir(); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private void createContainerDB(OzoneConfiguration conf, File dbFile) - throws Exception { - MetadataStore store = MetadataStoreBuilder.newBuilder().setConf(conf) - .setCreateIfMissing(true).setDbFile(dbFile).build(); - - // we close since the SCM pre-creates containers. - // we will open and put Db handle into a cache when keys are being created - // in a container. - - store.close(); - } - - @Test - public void testContainerCacheEviction() throws Exception { - File root = new File(testRoot); - root.mkdirs(); - - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2); - - ContainerCache cache = ContainerCache.getInstance(conf); - File containerDir1 = new File(root, "cont1"); - File containerDir2 = new File(root, "cont2"); - File containerDir3 = new File(root, "cont3"); - File containerDir4 = new File(root, "cont4"); - - - createContainerDB(conf, containerDir1); - createContainerDB(conf, containerDir2); - createContainerDB(conf, containerDir3); - createContainerDB(conf, containerDir4); - - // Get 2 references out of the same db and verify the objects are same. - ReferenceCountedDB db1 = cache.getDB(1, "RocksDB", - containerDir1.getPath(), conf); - Assert.assertEquals(1, db1.getReferenceCount()); - ReferenceCountedDB db2 = cache.getDB(1, "RocksDB", - containerDir1.getPath(), conf); - Assert.assertEquals(2, db2.getReferenceCount()); - Assert.assertEquals(2, db1.getReferenceCount()); - Assert.assertEquals(db1, db2); - - // add one more references to ContainerCache. - ReferenceCountedDB db3 = cache.getDB(2, "RocksDB", - containerDir2.getPath(), conf); - Assert.assertEquals(1, db3.getReferenceCount()); - - // and close the reference - db3.close(); - Assert.assertEquals(0, db3.getReferenceCount()); - - Assert.assertTrue(cache.isFull()); - - // add one more reference to ContainerCache and verify that it will not - // evict the least recent entry as it has reference. - ReferenceCountedDB db4 = cache.getDB(3, "RocksDB", - containerDir3.getPath(), conf); - Assert.assertEquals(1, db4.getReferenceCount()); - - Assert.assertEquals(2, cache.size()); - Assert.assertNotNull(cache.get(containerDir1.getPath())); - Assert.assertNull(cache.get(containerDir2.getPath())); - - // Now close both the references for container1 - db1.close(); - db2.close(); - Assert.assertEquals(0, db1.getReferenceCount()); - Assert.assertEquals(0, db2.getReferenceCount()); - - - // The reference count for container1 is 0 but it is not evicted. - ReferenceCountedDB db5 = cache.getDB(1, "RocksDB", - containerDir1.getPath(), conf); - Assert.assertEquals(1, db5.getReferenceCount()); - Assert.assertEquals(db1, db5); - db5.close(); - db4.close(); - - - // Decrementing reference count below zero should fail. - thrown.expect(IllegalArgumentException.class); - db5.close(); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java deleted file mode 100644 index 5cabef295f3..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeLayOutVersion.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common; - -import org.junit.Assert; -import org.junit.Test; - -/** - * This class tests DatanodeLayOutVersion. - */ -public class TestDatanodeLayOutVersion { - - @Test - public void testDatanodeLayOutVersion() { - // Check Latest Version and description - Assert.assertEquals(1, DataNodeLayoutVersion.getLatestVersion() - .getVersion()); - Assert.assertEquals("HDDS Datanode LayOut Version 1", DataNodeLayoutVersion - .getLatestVersion().getDescription()); - Assert.assertEquals(DataNodeLayoutVersion.getAllVersions().length, - DataNodeLayoutVersion.getAllVersions().length); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java deleted file mode 100644 index 0f3e7d12d22..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ /dev/null @@ -1,444 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common; - -import com.google.common.collect.Maps; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.states.DatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode - .InitDatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode - .RunningDatanodeState; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT; -import static org.junit.Assert.assertTrue; - -/** - * Tests the datanode state machine class and its states. - */ -public class TestDatanodeStateMachine { - private static final Logger LOG = - LoggerFactory.getLogger(TestDatanodeStateMachine.class); - // Changing it to 1, as current code checks for multiple scm directories, - // and fail if exists - private final int scmServerCount = 1; - private List serverAddresses; - private List scmServers; - private List mockServers; - private ExecutorService executorService; - private Configuration conf; - private File testRoot; - - @Before - public void setUp() throws Exception { - conf = SCMTestUtils.getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500, - TimeUnit.MILLISECONDS); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true); - serverAddresses = new ArrayList<>(); - scmServers = new ArrayList<>(); - mockServers = new ArrayList<>(); - for (int x = 0; x < scmServerCount; x++) { - int port = SCMTestUtils.getReuseableAddress().getPort(); - String address = "127.0.0.1"; - serverAddresses.add(address + ":" + port); - ScmTestMock mock = new ScmTestMock(); - scmServers.add(SCMTestUtils.startScmRpcServer(conf, mock, - new InetSocketAddress(address, port), 10)); - mockServers.add(mock); - } - - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, - serverAddresses.toArray(new String[0])); - - String path = GenericTestUtils - .getTempPath(TestDatanodeStateMachine.class.getSimpleName()); - testRoot = new File(path); - if (!testRoot.mkdirs()) { - LOG.info("Required directories {} already exist.", testRoot); - } - - File dataDir = new File(testRoot, "data"); - conf.set(HDDS_DATANODE_DIR_KEY, dataDir.getAbsolutePath()); - if (!dataDir.mkdirs()) { - LOG.info("Data dir create failed."); - } - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - new File(testRoot, "scm").getAbsolutePath()); - path = new File(testRoot, "datanodeID").getAbsolutePath(); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR, path); - executorService = HadoopExecutors.newCachedThreadPool( - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Test Data Node State Machine Thread - %d").build()); - } - - @After - public void tearDown() throws Exception { - try { - if (executorService != null) { - executorService.shutdown(); - try { - if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { - executorService.shutdownNow(); - } - - if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { - LOG.error("Unable to shutdown properly."); - } - } catch (InterruptedException e) { - LOG.error("Error attempting to shutdown.", e); - executorService.shutdownNow(); - } - } - for (RPC.Server s : scmServers) { - s.stop(); - } - } catch (Exception e) { - //ignore all execption from the shutdown - } finally { - FileUtil.fullyDelete(testRoot); - } - } - - /** - * Assert that starting statemachine executes the Init State. - */ - @Test - public void testStartStopDatanodeStateMachine() throws IOException, - InterruptedException, TimeoutException { - try (DatanodeStateMachine stateMachine = - new DatanodeStateMachine(getNewDatanodeDetails(), conf, null, null)) { - stateMachine.startDaemon(); - SCMConnectionManager connectionManager = - stateMachine.getConnectionManager(); - GenericTestUtils.waitFor( - () -> { - int size = connectionManager.getValues().size(); - LOG.info("connectionManager.getValues().size() is {}", size); - return size == 1; - }, 1000, 30000); - - stateMachine.stopDaemon(); - assertTrue(stateMachine.isDaemonStopped()); - } - } - - /** - * This test explores the state machine by invoking each call in sequence just - * like as if the state machine would call it. Because this is a test we are - * able to verify each of the assumptions. - *

- * Here is what happens at High level. - *

- * 1. We start the datanodeStateMachine in the INIT State. - *

- * 2. We invoke the INIT state task. - *

- * 3. That creates a set of RPC endpoints that are ready to connect to SCMs. - *

- * 4. We assert that we have moved to the running state for the - * DatanodeStateMachine. - *

- * 5. We get the task for the Running State - Executing that running state, - * makes the first network call in of the state machine. The Endpoint is in - * the GETVERSION State and we invoke the task. - *

- * 6. We assert that this call was a success by checking that each of the - * endponts now have version response that it got from the SCM server that it - * was talking to and also each of the mock server serviced one RPC call. - *

- * 7. Since the Register is done now, next calls to get task will return - * HeartbeatTask, which sends heartbeats to SCM. We assert that we get right - * task from sub-system below. - * - * @throws IOException - */ - @Test - public void testDatanodeStateContext() throws IOException, - InterruptedException, ExecutionException, TimeoutException { - // There is no mini cluster started in this test, - // create a ID file so that state machine could load a fake datanode ID. - File idPath = new File( - conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR), - OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT); - idPath.delete(); - DatanodeDetails datanodeDetails = getNewDatanodeDetails(); - DatanodeDetails.Port port = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); - datanodeDetails.setPort(port); - ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath); - - try (DatanodeStateMachine stateMachine = - new DatanodeStateMachine(datanodeDetails, conf, null, null)) { - DatanodeStateMachine.DatanodeStates currentState = - stateMachine.getContext().getState(); - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT, - currentState); - - DatanodeState task = - stateMachine.getContext().getTask(); - Assert.assertEquals(InitDatanodeState.class, task.getClass()); - - task.execute(executorService); - DatanodeStateMachine.DatanodeStates newState = - task.await(2, TimeUnit.SECONDS); - - for (EndpointStateMachine endpoint : - stateMachine.getConnectionManager().getValues()) { - // We assert that each of the is in State GETVERSION. - Assert.assertEquals(EndpointStateMachine.EndPointStates.GETVERSION, - endpoint.getState()); - } - - // The Datanode has moved into Running State, since endpoints are created. - // We move to running state when we are ready to issue RPC calls to SCMs. - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, - newState); - - // If we had called context.execute instead of calling into each state - // this would have happened automatically. - stateMachine.getContext().setState(newState); - task = stateMachine.getContext().getTask(); - Assert.assertEquals(RunningDatanodeState.class, task.getClass()); - - // This execute will invoke getVersion calls against all SCM endpoints - // that we know of. - - task.execute(executorService); - newState = task.await(10, TimeUnit.SECONDS); - - // Wait for GetVersion call (called by task.execute) to finish. After - // Earlier task.execute called into GetVersion. Wait for the execution - // to finish and the endPointState to move to REGISTER state. - GenericTestUtils.waitFor(() -> { - for (EndpointStateMachine endpoint : - stateMachine.getConnectionManager().getValues()) { - if (endpoint.getState() != - EndpointStateMachine.EndPointStates.REGISTER) { - return false; - } - } - return true; - }, 1000, 50000); - - // If we are in running state, we should be in running. - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, - newState); - - for (EndpointStateMachine endpoint : - stateMachine.getConnectionManager().getValues()) { - - // Since the earlier task.execute called into GetVersion, the - // endPointState Machine should move to REGISTER state. - Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER, - endpoint.getState()); - - // We assert that each of the end points have gotten a version from the - // SCM Server. - Assert.assertNotNull(endpoint.getVersion()); - } - - // We can also assert that all mock servers have received only one RPC - // call at this point of time. - for (ScmTestMock mock : mockServers) { - Assert.assertEquals(1, mock.getRpcCount()); - } - - // This task is the Running task, but running task executes tasks based - // on the state of Endpoints, hence this next call will be a Register at - // the endpoint RPC level. - task = stateMachine.getContext().getTask(); - task.execute(executorService); - newState = task.await(2, TimeUnit.SECONDS); - - // If we are in running state, we should be in running. - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, - newState); - - for (ScmTestMock mock : mockServers) { - Assert.assertEquals(2, mock.getRpcCount()); - } - - // This task is the Running task, but running task executes tasks based - // on the state of Endpoints, hence this next call will be a - // HeartbeatTask at the endpoint RPC level. - task = stateMachine.getContext().getTask(); - task.execute(executorService); - newState = task.await(2, TimeUnit.SECONDS); - - // If we are in running state, we should be in running. - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, - newState); - - - for (ScmTestMock mock : mockServers) { - Assert.assertEquals(1, mock.getHeartbeatCount()); - } - } - } - - @Test - public void testDatanodeStateMachineWithIdWriteFail() throws Exception { - - File idPath = new File( - conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR), - OzoneConsts.OZONE_SCM_DATANODE_ID_FILE_DEFAULT); - idPath.delete(); - DatanodeDetails datanodeDetails = getNewDatanodeDetails(); - DatanodeDetails.Port port = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); - datanodeDetails.setPort(port); - - try (DatanodeStateMachine stateMachine = - new DatanodeStateMachine(datanodeDetails, conf, null, null)) { - DatanodeStateMachine.DatanodeStates currentState = - stateMachine.getContext().getState(); - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT, - currentState); - - DatanodeState task = - stateMachine.getContext().getTask(); - Assert.assertEquals(InitDatanodeState.class, task.getClass()); - - //Set the idPath to read only, state machine will fail to write - // datanodeId file and set the state to shutdown. - idPath.getParentFile().mkdirs(); - idPath.getParentFile().setReadOnly(); - - task.execute(executorService); - DatanodeStateMachine.DatanodeStates newState = - task.await(2, TimeUnit.SECONDS); - - //As, we have changed the permission of idPath to readable, writing - // will fail and it will set the state to shutdown. - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN, - newState); - - //Setting back to writable. - idPath.getParentFile().setWritable(true); - } - } - - /** - * Test state transition with a list of invalid scm configurations, - * and verify the state transits to SHUTDOWN each time. - */ - @Test - public void testDatanodeStateMachineWithInvalidConfiguration() - throws Exception { - List> confList = - new ArrayList<>(); - confList.add(Maps.immutableEntry(ScmConfigKeys.OZONE_SCM_NAMES, "")); - - // Invalid ozone.scm.names - /** Empty **/ - confList.add(Maps.immutableEntry( - ScmConfigKeys.OZONE_SCM_NAMES, "")); - /** Invalid schema **/ - confList.add(Maps.immutableEntry( - ScmConfigKeys.OZONE_SCM_NAMES, "x..y")); - /** Invalid port **/ - confList.add(Maps.immutableEntry( - ScmConfigKeys.OZONE_SCM_NAMES, "scm:xyz")); - /** Port out of range **/ - confList.add(Maps.immutableEntry( - ScmConfigKeys.OZONE_SCM_NAMES, "scm:123456")); - // Invalid ozone.scm.datanode.id.dir - /** Empty **/ - confList.add(Maps.immutableEntry( - ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR, "")); - - confList.forEach((entry) -> { - Configuration perTestConf = new Configuration(conf); - perTestConf.setStrings(entry.getKey(), entry.getValue()); - LOG.info("Test with {} = {}", entry.getKey(), entry.getValue()); - try (DatanodeStateMachine stateMachine = new DatanodeStateMachine( - getNewDatanodeDetails(), perTestConf, null, null)) { - DatanodeStateMachine.DatanodeStates currentState = - stateMachine.getContext().getState(); - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT, - currentState); - DatanodeState task = - stateMachine.getContext().getTask(); - task.execute(executorService); - DatanodeStateMachine.DatanodeStates newState = - task.await(2, TimeUnit.SECONDS); - Assert.assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN, - newState); - } catch (Exception e) { - Assert.fail("Unexpected exception found"); - } - }); - } - - private DatanodeDetails getNewDatanodeDetails() { - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); - return DatanodeDetails.newBuilder() - .setUuid(UUID.randomUUID().toString()) - .setHostName("localhost") - .setIpAddress("127.0.0.1") - .addPort(containerPort) - .addPort(ratisPort) - .addPort(restPort) - .build(); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java deleted file mode 100644 index c6fa8d62102..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; - -/** - * This class is used to test the KeyValueContainerData. - */ -public class TestKeyValueContainerData { - - private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5); - @Test - public void testKeyValueData() { - long containerId = 1L; - ContainerProtos.ContainerType containerType = ContainerProtos - .ContainerType.KeyValueContainer; - String path = "/tmp"; - String containerDBType = "RocksDB"; - ContainerProtos.ContainerDataProto.State state = - ContainerProtos.ContainerDataProto.State.CLOSED; - AtomicLong val = new AtomicLong(0); - UUID pipelineId = UUID.randomUUID(); - UUID datanodeId = UUID.randomUUID(); - - KeyValueContainerData kvData = new KeyValueContainerData(containerId, - MAXSIZE, pipelineId.toString(), datanodeId.toString()); - - assertEquals(containerType, kvData.getContainerType()); - assertEquals(containerId, kvData.getContainerID()); - assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, kvData - .getState()); - assertEquals(0, kvData.getMetadata().size()); - assertEquals(0, kvData.getNumPendingDeletionBlocks()); - assertEquals(val.get(), kvData.getReadBytes()); - assertEquals(val.get(), kvData.getWriteBytes()); - assertEquals(val.get(), kvData.getReadCount()); - assertEquals(val.get(), kvData.getWriteCount()); - assertEquals(val.get(), kvData.getKeyCount()); - assertEquals(val.get(), kvData.getNumPendingDeletionBlocks()); - assertEquals(MAXSIZE, kvData.getMaxSize()); - - kvData.setState(state); - kvData.setContainerDBType(containerDBType); - kvData.setChunksPath(path); - kvData.setMetadataPath(path); - kvData.incrReadBytes(10); - kvData.incrWriteBytes(10); - kvData.incrReadCount(); - kvData.incrWriteCount(); - kvData.incrKeyCount(); - kvData.incrPendingDeletionBlocks(1); - - assertEquals(state, kvData.getState()); - assertEquals(containerDBType, kvData.getContainerDBType()); - assertEquals(path, kvData.getChunksPath()); - assertEquals(path, kvData.getMetadataPath()); - - assertEquals(10, kvData.getReadBytes()); - assertEquals(10, kvData.getWriteBytes()); - assertEquals(1, kvData.getReadCount()); - assertEquals(1, kvData.getWriteCount()); - assertEquals(1, kvData.getKeyCount()); - assertEquals(1, kvData.getNumPendingDeletionBlocks()); - assertEquals(pipelineId.toString(), kvData.getOriginPipelineId()); - assertEquals(datanodeId.toString(), kvData.getOriginNodeId()); - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java deleted file mode 100644 index 58892227a65..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -import org.apache.hadoop.ozone.common.InconsistentStorageStateException; -import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.Time; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.io.IOException; -import java.util.Properties; -import java.util.UUID; - -import static org.junit.Assert.*; - -/** - * This class tests {@link DatanodeVersionFile}. - */ -public class TestDatanodeVersionFile { - - private File versionFile; - private DatanodeVersionFile dnVersionFile; - private Properties properties; - - private String storageID; - private String clusterID; - private String datanodeUUID; - private long cTime; - private int lv; - - @Rule - public TemporaryFolder folder= new TemporaryFolder(); - - @Before - public void setup() throws IOException { - versionFile = folder.newFile("Version"); - storageID = UUID.randomUUID().toString(); - clusterID = UUID.randomUUID().toString(); - datanodeUUID = UUID.randomUUID().toString(); - cTime = Time.now(); - lv = DataNodeLayoutVersion.getLatestVersion().getVersion(); - - dnVersionFile = new DatanodeVersionFile( - storageID, clusterID, datanodeUUID, cTime, lv); - - dnVersionFile.createVersionFile(versionFile); - - properties = dnVersionFile.readFrom(versionFile); - } - - @Test - public void testCreateAndReadVersionFile() throws IOException{ - - //Check VersionFile exists - assertTrue(versionFile.exists()); - - assertEquals(storageID, HddsVolumeUtil.getStorageID( - properties, versionFile)); - assertEquals(clusterID, HddsVolumeUtil.getClusterID( - properties, versionFile, clusterID)); - assertEquals(datanodeUUID, HddsVolumeUtil.getDatanodeUUID( - properties, versionFile, datanodeUUID)); - assertEquals(cTime, HddsVolumeUtil.getCreationTime( - properties, versionFile)); - assertEquals(lv, HddsVolumeUtil.getLayOutVersion( - properties, versionFile)); - } - - @Test - public void testIncorrectClusterId() throws IOException{ - try { - String randomClusterID = UUID.randomUUID().toString(); - HddsVolumeUtil.getClusterID(properties, versionFile, - randomClusterID); - fail("Test failure in testIncorrectClusterId"); - } catch (InconsistentStorageStateException ex) { - GenericTestUtils.assertExceptionContains("Mismatched ClusterIDs", ex); - } - } - - @Test - public void testVerifyCTime() throws IOException{ - long invalidCTime = -10; - dnVersionFile = new DatanodeVersionFile( - storageID, clusterID, datanodeUUID, invalidCTime, lv); - dnVersionFile.createVersionFile(versionFile); - properties = dnVersionFile.readFrom(versionFile); - - try { - HddsVolumeUtil.getCreationTime(properties, versionFile); - fail("Test failure in testVerifyCTime"); - } catch (InconsistentStorageStateException ex) { - GenericTestUtils.assertExceptionContains("Invalid Creation time in " + - "Version File : " + versionFile, ex); - } - } - - @Test - public void testVerifyLayOut() throws IOException{ - int invalidLayOutVersion = 100; - dnVersionFile = new DatanodeVersionFile( - storageID, clusterID, datanodeUUID, cTime, invalidLayOutVersion); - dnVersionFile.createVersionFile(versionFile); - Properties props = dnVersionFile.readFrom(versionFile); - - try { - HddsVolumeUtil.getLayOutVersion(props, versionFile); - fail("Test failure in testVerifyLayOut"); - } catch (InconsistentStorageStateException ex) { - GenericTestUtils.assertExceptionContains("Invalid layOutVersion.", ex); - } - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java deleted file mode 100644 index c611ccb28e7..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.fs.FileSystemTestHelper; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.util.UUID; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * This class tests create/read .container files. - */ -public class TestContainerDataYaml { - - private static long testContainerID = 1234; - - private static String testRoot = new FileSystemTestHelper().getTestRootDir(); - - private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5); - - /** - * Creates a .container file. cleanup() should be called at the end of the - * test when container file is created. - */ - private File createContainerFile(long containerID) throws IOException { - new File(testRoot).mkdirs(); - - String containerPath = containerID + ".container"; - - KeyValueContainerData keyValueContainerData = new KeyValueContainerData( - containerID, MAXSIZE, UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - keyValueContainerData.setContainerDBType("RocksDB"); - keyValueContainerData.setMetadataPath(testRoot); - keyValueContainerData.setChunksPath(testRoot); - - File containerFile = new File(testRoot, containerPath); - - // Create .container file with ContainerData - ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType - .KeyValueContainer, keyValueContainerData, containerFile); - - //Check .container file exists or not. - assertTrue(containerFile.exists()); - - return containerFile; - } - - private void cleanup() { - FileUtil.fullyDelete(new File(testRoot)); - } - - @Test - public void testCreateContainerFile() throws IOException { - long containerID = testContainerID++; - - File containerFile = createContainerFile(containerID); - - // Read from .container file, and verify data. - KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - assertEquals(containerID, kvData.getContainerID()); - assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData - .getContainerType()); - assertEquals("RocksDB", kvData.getContainerDBType()); - assertEquals(containerFile.getParent(), kvData.getMetadataPath()); - assertEquals(containerFile.getParent(), kvData.getChunksPath()); - assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, kvData - .getState()); - assertEquals(1, kvData.getLayOutVersion()); - assertEquals(0, kvData.getMetadata().size()); - assertEquals(MAXSIZE, kvData.getMaxSize()); - - // Update ContainerData. - kvData.addMetadata("VOLUME", "hdfs"); - kvData.addMetadata("OWNER", "ozone"); - kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED); - - - ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType - .KeyValueContainer, kvData, containerFile); - - // Reading newly updated data from .container file - kvData = (KeyValueContainerData) ContainerDataYaml.readContainerFile( - containerFile); - - // verify data. - assertEquals(containerID, kvData.getContainerID()); - assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData - .getContainerType()); - assertEquals("RocksDB", kvData.getContainerDBType()); - assertEquals(containerFile.getParent(), kvData.getMetadataPath()); - assertEquals(containerFile.getParent(), kvData.getChunksPath()); - assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData - .getState()); - assertEquals(1, kvData.getLayOutVersion()); - assertEquals(2, kvData.getMetadata().size()); - assertEquals("hdfs", kvData.getMetadata().get("VOLUME")); - assertEquals("ozone", kvData.getMetadata().get("OWNER")); - assertEquals(MAXSIZE, kvData.getMaxSize()); - } - - @Test - public void testIncorrectContainerFile() throws IOException{ - try { - String containerFile = "incorrect.container"; - //Get file from resources folder - ClassLoader classLoader = getClass().getClassLoader(); - File file = new File(classLoader.getResource(containerFile).getFile()); - KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(file); - fail("testIncorrectContainerFile failed"); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("No enum constant", ex); - } - } - - - @Test - public void testCheckBackWardCompatibilityOfContainerFile() throws - IOException { - // This test is for if we upgrade, and then .container files added by new - // server will have new fields added to .container file, after a while we - // decided to rollback. Then older ozone can read .container files - // created or not. - - try { - String containerFile = "additionalfields.container"; - //Get file from resources folder - ClassLoader classLoader = getClass().getClassLoader(); - File file = new File(classLoader.getResource(containerFile).getFile()); - KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(file); - ContainerUtils.verifyChecksum(kvData); - - //Checking the Container file data is consistent or not - assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData - .getState()); - assertEquals("RocksDB", kvData.getContainerDBType()); - assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData - .getContainerType()); - assertEquals(9223372036854775807L, kvData.getContainerID()); - assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData - .getChunksPath()); - assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData - .getMetadataPath()); - assertEquals(1, kvData.getLayOutVersion()); - assertEquals(2, kvData.getMetadata().size()); - - } catch (Exception ex) { - ex.printStackTrace(); - fail("testCheckBackWardCompatibilityOfContainerFile failed"); - } - } - - /** - * Test to verify {@link ContainerUtils#verifyChecksum(ContainerData)}. - */ - @Test - public void testChecksumInContainerFile() throws IOException { - long containerID = testContainerID++; - - File containerFile = createContainerFile(containerID); - - // Read from .container file, and verify data. - KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - ContainerUtils.verifyChecksum(kvData); - - cleanup(); - } - - /** - * Test to verify incorrect checksum is detected. - */ - @Test - public void testIncorrectChecksum() { - try { - String containerFile = "incorrect.checksum.container"; - //Get file from resources folder - ClassLoader classLoader = getClass().getClassLoader(); - File file = new File(classLoader.getResource(containerFile).getFile()); - KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(file); - ContainerUtils.verifyChecksum(kvData); - fail("testIncorrectChecksum failed"); - } catch (Exception ex) { - GenericTestUtils.assertExceptionContains("Container checksum error for " + - "ContainerID:", ex); - } - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java deleted file mode 100644 index e1e7119727b..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java +++ /dev/null @@ -1,227 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.interfaces.Container; - -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Class used to test ContainerSet operations. - */ -public class TestContainerSet { - - @Test - public void testAddGetRemoveContainer() throws StorageContainerException { - ContainerSet containerSet = new ContainerSet(); - long containerId = 100L; - ContainerProtos.ContainerDataProto.State state = ContainerProtos - .ContainerDataProto.State.CLOSED; - - KeyValueContainerData kvData = new KeyValueContainerData(containerId, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - kvData.setState(state); - KeyValueContainer keyValueContainer = new KeyValueContainer(kvData, new - OzoneConfiguration()); - - //addContainer - boolean result = containerSet.addContainer(keyValueContainer); - assertTrue(result); - try { - result = containerSet.addContainer(keyValueContainer); - fail("Adding same container ID twice should fail."); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("Container already exists with" + - " container Id " + containerId, ex); - } - - //getContainer - KeyValueContainer container = (KeyValueContainer) containerSet - .getContainer(containerId); - KeyValueContainerData keyValueContainerData = (KeyValueContainerData) - container.getContainerData(); - assertEquals(containerId, keyValueContainerData.getContainerID()); - assertEquals(state, keyValueContainerData.getState()); - assertNull(containerSet.getContainer(1000L)); - - //removeContainer - assertTrue(containerSet.removeContainer(containerId)); - assertFalse(containerSet.removeContainer(1000L)); - } - - @Test - public void testIteratorsAndCount() throws StorageContainerException { - - ContainerSet containerSet = createContainerSet(); - - assertEquals(10, containerSet.containerCount()); - - Iterator> iterator = containerSet.getContainerIterator(); - - int count = 0; - while(iterator.hasNext()) { - Container kv = iterator.next(); - ContainerData containerData = kv.getContainerData(); - long containerId = containerData.getContainerID(); - if (containerId%2 == 0) { - assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, - containerData.getState()); - } else { - assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, - containerData.getState()); - } - count++; - } - assertEquals(10, count); - - //Using containerMapIterator. - Iterator>> containerMapIterator = containerSet - .getContainerMapIterator(); - - count = 0; - while (containerMapIterator.hasNext()) { - Container kv = containerMapIterator.next().getValue(); - ContainerData containerData = kv.getContainerData(); - long containerId = containerData.getContainerID(); - if (containerId%2 == 0) { - assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, - containerData.getState()); - } else { - assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, - containerData.getState()); - } - count++; - } - assertEquals(10, count); - - } - - @Test - public void testIteratorPerVolume() throws StorageContainerException { - HddsVolume vol1 = Mockito.mock(HddsVolume.class); - Mockito.when(vol1.getStorageID()).thenReturn("uuid-1"); - HddsVolume vol2 = Mockito.mock(HddsVolume.class); - Mockito.when(vol2.getStorageID()).thenReturn("uuid-2"); - - ContainerSet containerSet = new ContainerSet(); - for (int i=0; i<10; i++) { - KeyValueContainerData kvData = new KeyValueContainerData(i, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - if (i%2 == 0) { - kvData.setVolume(vol1); - } else { - kvData.setVolume(vol2); - } - kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED); - KeyValueContainer kv = new KeyValueContainer(kvData, new - OzoneConfiguration()); - containerSet.addContainer(kv); - } - - Iterator> iter1 = containerSet.getContainerIterator(vol1); - int count1 = 0; - while (iter1.hasNext()) { - Container c = iter1.next(); - assertEquals(0, (c.getContainerData().getContainerID() % 2)); - count1++; - } - assertEquals(5, count1); - - Iterator> iter2 = containerSet.getContainerIterator(vol2); - int count2 = 0; - while (iter2.hasNext()) { - Container c = iter2.next(); - assertEquals(1, (c.getContainerData().getContainerID() % 2)); - count2++; - } - assertEquals(5, count2); - } - - @Test - public void testGetContainerReport() throws IOException { - - ContainerSet containerSet = createContainerSet(); - - ContainerReportsProto containerReportsRequestProto = containerSet - .getContainerReport(); - - assertEquals(10, containerReportsRequestProto.getReportsList().size()); - } - - - - @Test - public void testListContainer() throws StorageContainerException { - ContainerSet containerSet = createContainerSet(); - - List result = new ArrayList<>(); - containerSet.listContainer(2, 5, result); - - assertEquals(5, result.size()); - - for(ContainerData containerData : result) { - assertTrue(containerData.getContainerID() >=2 && containerData - .getContainerID()<=6); - } - } - - private ContainerSet createContainerSet() throws StorageContainerException { - ContainerSet containerSet = new ContainerSet(); - for (int i=0; i<10; i++) { - KeyValueContainerData kvData = new KeyValueContainerData(i, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - if (i%2 == 0) { - kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED); - } else { - kvData.setState(ContainerProtos.ContainerDataProto.State.OPEN); - } - KeyValueContainer kv = new KeyValueContainer(kvData, new - OzoneConfiguration()); - containerSet.addContainer(kv); - } - return containerSet; - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java deleted file mode 100644 index fe27eeb02d6..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ /dev/null @@ -1,300 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.collect.Maps; -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto - .ContainerProtos.ContainerType; -import org.apache.hadoop.hdds.protocol.datanode.proto - .ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .WriteChunkRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.File; -import java.io.IOException; -import java.util.Map; -import java.util.UUID; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -/** - * Test-cases to verify the functionality of HddsDispatcher. - */ -public class TestHddsDispatcher { - - @Test - public void testContainerCloseActionWhenFull() throws IOException { - String testDir = GenericTestUtils.getTempPath( - TestHddsDispatcher.class.getSimpleName()); - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testDir); - DatanodeDetails dd = randomDatanodeDetails(); - VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf); - - try { - UUID scmId = UUID.randomUUID(); - ContainerSet containerSet = new ContainerSet(); - - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd); - Mockito.when(context.getParent()).thenReturn(stateMachine); - KeyValueContainerData containerData = new KeyValueContainerData(1L, - (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), - dd.getUuidString()); - Container container = new KeyValueContainer(containerData, conf); - container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), - scmId.toString()); - containerSet.addContainer(container); - ContainerMetrics metrics = ContainerMetrics.create(conf); - Map handlers = Maps.newHashMap(); - for (ContainerType containerType : ContainerType.values()) { - handlers.put(containerType, - Handler.getHandlerForContainerType(containerType, conf, context, - containerSet, volumeSet, metrics)); - } - HddsDispatcher hddsDispatcher = new HddsDispatcher( - conf, containerSet, volumeSet, handlers, context, metrics); - hddsDispatcher.setScmId(scmId.toString()); - ContainerCommandResponseProto responseOne = hddsDispatcher - .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, - responseOne.getResult()); - verify(context, times(0)) - .addContainerActionIfAbsent(Mockito.any(ContainerAction.class)); - containerData.setBytesUsed(Double.valueOf( - StorageUnit.MB.toBytes(950)).longValue()); - ContainerCommandResponseProto responseTwo = hddsDispatcher - .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, - responseTwo.getResult()); - verify(context, times(1)) - .addContainerActionIfAbsent(Mockito.any(ContainerAction.class)); - - } finally { - volumeSet.shutdown(); - FileUtils.deleteDirectory(new File(testDir)); - } - - } - - @Test - public void testCreateContainerWithWriteChunk() throws IOException { - String testDir = - GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName()); - try { - UUID scmId = UUID.randomUUID(); - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testDir); - DatanodeDetails dd = randomDatanodeDetails(); - HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); - ContainerCommandRequestProto writeChunkRequest = - getWriteChunkRequest(dd.getUuidString(), 1L, 1L); - // send read chunk request and make sure container does not exist - ContainerCommandResponseProto response = - hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest), null); - Assert.assertEquals(response.getResult(), - ContainerProtos.Result.CONTAINER_NOT_FOUND); - // send write chunk request without sending create container - response = hddsDispatcher.dispatch(writeChunkRequest, null); - // container should be created as part of write chunk request - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - // send read chunk request to read the chunk written above - response = - hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest), null); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - Assert.assertEquals(response.getReadChunk().getData(), - writeChunkRequest.getWriteChunk().getData()); - } finally { - FileUtils.deleteDirectory(new File(testDir)); - } - } - - @Test - public void testWriteChunkWithCreateContainerFailure() throws IOException { - String testDir = GenericTestUtils.getTempPath( - TestHddsDispatcher.class.getSimpleName()); - try { - UUID scmId = UUID.randomUUID(); - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testDir); - DatanodeDetails dd = randomDatanodeDetails(); - HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); - ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest( - dd.getUuidString(), 1L, 1L); - - HddsDispatcher mockDispatcher = Mockito.spy(hddsDispatcher); - ContainerCommandResponseProto.Builder builder = ContainerUtils - .getContainerCommandResponse(writeChunkRequest, - ContainerProtos.Result.DISK_OUT_OF_SPACE, ""); - // Return DISK_OUT_OF_SPACE response when writing chunk - // with container creation. - Mockito.doReturn(builder.build()).when(mockDispatcher) - .createContainer(writeChunkRequest); - - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(HddsDispatcher.LOG); - // send write chunk request without sending create container - mockDispatcher.dispatch(writeChunkRequest, null); - // verify the error log - assertTrue(logCapturer.getOutput() - .contains("ContainerID " + writeChunkRequest.getContainerID() - + " creation failed : Result: DISK_OUT_OF_SPACE")); - } finally { - FileUtils.deleteDirectory(new File(testDir)); - } - } - - /** - * Creates HddsDispatcher instance with given infos. - * @param dd datanode detail info. - * @param scmId UUID of scm id. - * @param conf configuration be used. - * @return HddsDispatcher HddsDispatcher instance. - * @throws IOException - */ - private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, - OzoneConfiguration conf) throws IOException { - ContainerSet containerSet = new ContainerSet(); - VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf); - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd); - Mockito.when(context.getParent()).thenReturn(stateMachine); - ContainerMetrics metrics = ContainerMetrics.create(conf); - Map handlers = Maps.newHashMap(); - for (ContainerType containerType : ContainerType.values()) { - handlers.put(containerType, - Handler.getHandlerForContainerType(containerType, conf, context, - containerSet, volumeSet, metrics)); - } - - HddsDispatcher hddsDispatcher = new HddsDispatcher( - conf, containerSet, volumeSet, handlers, context, metrics); - hddsDispatcher.setScmId(scmId.toString()); - return hddsDispatcher; - } - - // This method has to be removed once we move scm/TestUtils.java - // from server-scm project to container-service or to common project. - private static DatanodeDetails randomDatanodeDetails() { - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(UUID.randomUUID().toString()) - .setHostName("localhost") - .setIpAddress("127.0.0.1") - .addPort(containerPort) - .addPort(ratisPort) - .addPort(restPort); - return builder.build(); - } - - private ContainerCommandRequestProto getWriteChunkRequest( - String datanodeId, Long containerId, Long localId) { - - ByteString data = ByteString.copyFrom( - UUID.randomUUID().toString().getBytes(UTF_8)); - ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo - .newBuilder() - .setChunkName( - DigestUtils.md5Hex("dummy-key") + "_stream_" - + containerId + "_chunk_" + localId) - .setOffset(0) - .setLen(data.size()) - .setChecksumData(Checksum.getNoChecksumDataProto()) - .build(); - - WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto - .newBuilder() - .setBlockID(new BlockID(containerId, localId) - .getDatanodeBlockIDProtobuf()) - .setChunkData(chunk) - .setData(data); - - return ContainerCommandRequestProto - .newBuilder() - .setContainerID(containerId) - .setCmdType(ContainerProtos.Type.WriteChunk) - .setDatanodeUuid(datanodeId) - .setWriteChunk(writeChunkRequest) - .build(); - } - - /** - * Creates container read chunk request using input container write chunk - * request. - * - * @param writeChunkRequest - Input container write chunk request - * @return container read chunk request - */ - private ContainerCommandRequestProto getReadChunkRequest( - ContainerCommandRequestProto writeChunkRequest) { - WriteChunkRequestProto writeChunk = writeChunkRequest.getWriteChunk(); - ContainerProtos.ReadChunkRequestProto.Builder readChunkRequest = - ContainerProtos.ReadChunkRequestProto.newBuilder() - .setBlockID(writeChunk.getBlockID()) - .setChunkData(writeChunk.getChunkData()); - return ContainerCommandRequestProto.newBuilder() - .setCmdType(ContainerProtos.Type.ReadChunk) - .setContainerID(writeChunk.getBlockID().getContainerID()) - .setTraceID(writeChunkRequest.getTraceID()) - .setDatanodeUuid(writeChunkRequest.getDatanodeUuid()) - .setReadChunk(readChunkRequest) - .build(); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java deleted file mode 100644 index 07c78c04989..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Datanode container related test-cases. - */ -package org.apache.hadoop.ozone.container.common.impl; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java deleted file mode 100644 index a6ba103174e..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.interfaces; - -import com.google.common.collect.Maps; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.mockito.Mockito; - -import java.util.Map; - -/** - * Tests Handler interface. - */ -public class TestHandler { - @Rule - public TestRule timeout = new Timeout(300000); - - private Configuration conf; - private HddsDispatcher dispatcher; - private ContainerSet containerSet; - private VolumeSet volumeSet; - private Handler handler; - - @Before - public void setup() throws Exception { - this.conf = new Configuration(); - this.containerSet = Mockito.mock(ContainerSet.class); - this.volumeSet = Mockito.mock(VolumeSet.class); - DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class); - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails); - Mockito.when(context.getParent()).thenReturn(stateMachine); - ContainerMetrics metrics = ContainerMetrics.create(conf); - Map handlers = Maps.newHashMap(); - for (ContainerProtos.ContainerType containerType : - ContainerProtos.ContainerType.values()) { - handlers.put(containerType, - Handler.getHandlerForContainerType( - containerType, conf, context, containerSet, volumeSet, metrics)); - } - this.dispatcher = new HddsDispatcher( - conf, containerSet, volumeSet, handlers, null, metrics); - } - - @Test - public void testGetKeyValueHandler() throws Exception { - Handler kvHandler = dispatcher.getHandler( - ContainerProtos.ContainerType.KeyValueContainer); - - Assert.assertTrue("getHandlerForContainerType returned incorrect handler", - (kvHandler instanceof KeyValueHandler)); - } - - @Test - public void testGetHandlerForInvalidContainerType() { - // When new ContainerProtos.ContainerType are added, increment the code - // for invalid enum. - ContainerProtos.ContainerType invalidContainerType = - ContainerProtos.ContainerType.forNumber(2); - - Assert.assertEquals("New ContainerType detected. Not an invalid " + - "containerType", invalidContainerType, null); - - Handler dispatcherHandler = dispatcher.getHandler(invalidContainerType); - Assert.assertEquals("Get Handler for Invalid ContainerType should " + - "return null.", dispatcherHandler, null); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java deleted file mode 100644 index ca3d29dada1..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * SCM Testing and Mocking Utils. - */ -package org.apache.hadoop.ozone.container.common; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java deleted file mode 100644 index aae388dd5a1..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.junit.Test; -import org.mockito.Mockito; - -import java.util.concurrent.ScheduledExecutorService; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -/** - * Test cases to test {@link ReportManager}. - */ -public class TestReportManager { - - @Test - public void testReportManagerInit() { - Configuration conf = new OzoneConfiguration(); - StateContext dummyContext = Mockito.mock(StateContext.class); - ReportPublisher dummyPublisher = Mockito.mock(ReportPublisher.class); - ReportManager.Builder builder = ReportManager.newBuilder(conf); - builder.setStateContext(dummyContext); - builder.addPublisher(dummyPublisher); - ReportManager reportManager = builder.build(); - reportManager.init(); - verify(dummyPublisher, times(1)).init(eq(dummyContext), - any(ScheduledExecutorService.class)); - - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java deleted file mode 100644 index 03f0cd4d816..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java +++ /dev/null @@ -1,191 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.google.protobuf.GeneratedMessage; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsIdFactory; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatus.Status; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.protocol.commands.CommandStatus; -import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.mockito.Mockito; - -import java.util.Random; -import java.util.UUID; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -/** - * Test cases to test {@link ReportPublisher}. - */ -public class TestReportPublisher { - - private static Configuration config; - - @BeforeClass - public static void setup() { - config = new OzoneConfiguration(); - } - - /** - * Dummy report publisher for testing. - */ - private static class DummyReportPublisher extends ReportPublisher { - - private final long frequency; - private int getReportCount = 0; - - DummyReportPublisher(long frequency) { - this.frequency = frequency; - } - - @Override - protected long getReportFrequency() { - return frequency; - } - - @Override - protected GeneratedMessage getReport() { - getReportCount++; - return null; - } - } - - @Test - public void testReportPublisherInit() { - ReportPublisher publisher = new DummyReportPublisher(0); - StateContext dummyContext = Mockito.mock(StateContext.class); - ScheduledExecutorService dummyExecutorService = Mockito.mock( - ScheduledExecutorService.class); - publisher.init(dummyContext, dummyExecutorService); - verify(dummyExecutorService, times(1)).schedule(publisher, - 0, TimeUnit.MILLISECONDS); - } - - @Test - public void testScheduledReport() throws InterruptedException { - ReportPublisher publisher = new DummyReportPublisher(100); - StateContext dummyContext = Mockito.mock(StateContext.class); - ScheduledExecutorService executorService = HadoopExecutors - .newScheduledThreadPool(1, - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Unit test ReportManager Thread - %d").build()); - publisher.init(dummyContext, executorService); - Thread.sleep(150); - Assert.assertEquals(1, ((DummyReportPublisher) publisher).getReportCount); - Thread.sleep(100); - Assert.assertEquals(2, ((DummyReportPublisher) publisher).getReportCount); - executorService.shutdown(); - } - - @Test - public void testPublishReport() throws InterruptedException { - ReportPublisher publisher = new DummyReportPublisher(100); - StateContext dummyContext = Mockito.mock(StateContext.class); - ScheduledExecutorService executorService = HadoopExecutors - .newScheduledThreadPool(1, - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Unit test ReportManager Thread - %d").build()); - publisher.init(dummyContext, executorService); - Thread.sleep(150); - executorService.shutdown(); - Assert.assertEquals(1, ((DummyReportPublisher) publisher).getReportCount); - verify(dummyContext, times(1)).addReport(null); - - } - - @Test - public void testCommandStatusPublisher() throws InterruptedException { - StateContext dummyContext = Mockito.mock(StateContext.class); - ReportPublisher publisher = new CommandStatusReportPublisher(); - final Map cmdStatusMap = new ConcurrentHashMap<>(); - when(dummyContext.getCommandStatusMap()).thenReturn(cmdStatusMap); - publisher.setConf(config); - - ScheduledExecutorService executorService = HadoopExecutors - .newScheduledThreadPool(1, - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("Unit test ReportManager Thread - %d").build()); - publisher.init(dummyContext, executorService); - Assert.assertNull(((CommandStatusReportPublisher) publisher).getReport()); - - // Insert to status object to state context map and then get the report. - CommandStatus obj1 = CommandStatus.CommandStatusBuilder.newBuilder() - .setCmdId(HddsIdFactory.getLongId()) - .setType(Type.deleteBlocksCommand) - .setStatus(Status.PENDING) - .build(); - CommandStatus obj2 = CommandStatus.CommandStatusBuilder.newBuilder() - .setCmdId(HddsIdFactory.getLongId()) - .setType(Type.closeContainerCommand) - .setStatus(Status.EXECUTED) - .build(); - cmdStatusMap.put(obj1.getCmdId(), obj1); - cmdStatusMap.put(obj2.getCmdId(), obj2); - // We are not sending the commands whose status is PENDING. - Assert.assertEquals("Should publish report with 2 status objects", 1, - ((CommandStatusReportPublisher) publisher).getReport() - .getCmdStatusCount()); - executorService.shutdown(); - } - - /** - * Get a datanode details. - * - * @return DatanodeDetails - */ - private static DatanodeDetails getDatanodeDetails() { - String uuid = UUID.randomUUID().toString(); - Random random = new Random(); - String ipAddress = - random.nextInt(256) + "." + random.nextInt(256) + "." + random - .nextInt(256) + "." + random.nextInt(256); - - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(uuid) - .setHostName("localhost") - .setIpAddress(ipAddress) - .addPort(containerPort) - .addPort(ratisPort) - .addPort(restPort); - return builder.build(); - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java deleted file mode 100644 index f8c5fe5e275..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.report; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -/** - * Test cases to test ReportPublisherFactory. - */ -public class TestReportPublisherFactory { - - @Rule - public ExpectedException exception = ExpectedException.none(); - - @Test - public void testGetContainerReportPublisher() { - Configuration conf = new OzoneConfiguration(); - ReportPublisherFactory factory = new ReportPublisherFactory(conf); - ReportPublisher publisher = factory - .getPublisherFor(ContainerReportsProto.class); - Assert.assertEquals(ContainerReportPublisher.class, publisher.getClass()); - Assert.assertEquals(conf, publisher.getConf()); - } - - @Test - public void testGetNodeReportPublisher() { - Configuration conf = new OzoneConfiguration(); - ReportPublisherFactory factory = new ReportPublisherFactory(conf); - ReportPublisher publisher = factory - .getPublisherFor(NodeReportProto.class); - Assert.assertEquals(NodeReportPublisher.class, publisher.getClass()); - Assert.assertEquals(conf, publisher.getConf()); - } - - @Test - public void testInvalidReportPublisher() { - Configuration conf = new OzoneConfiguration(); - ReportPublisherFactory factory = new ReportPublisherFactory(conf); - exception.expect(RuntimeException.class); - exception.expectMessage("No publisher found for report"); - factory.getPublisherFor(HddsProtos.DatanodeDetailsProto.class); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java deleted file mode 100644 index 37615bc7536..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.report; -/** - * This package has test cases for all the report publishers which generates - * reports that are sent to SCM via heartbeat. - */ \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java deleted file mode 100644 index a92f2361382..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ /dev/null @@ -1,227 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.UUID; - -import static java.util.Collections.singletonMap; -import static org.apache.hadoop.ozone.OzoneConsts.GB; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -/** - * Test cases to verify CloseContainerCommandHandler in datanode. - */ -public class TestCloseContainerCommandHandler { - - private static final long CONTAINER_ID = 123L; - - private OzoneContainer ozoneContainer; - private StateContext context; - private XceiverServerSpi writeChannel; - private Container container; - private Handler containerHandler; - private PipelineID pipelineID; - private PipelineID nonExistentPipelineID = PipelineID.randomId(); - - private CloseContainerCommandHandler subject = - new CloseContainerCommandHandler(); - - @Before - public void before() throws Exception { - context = mock(StateContext.class); - DatanodeStateMachine dnStateMachine = mock(DatanodeStateMachine.class); - when(dnStateMachine.getDatanodeDetails()) - .thenReturn(randomDatanodeDetails()); - when(context.getParent()).thenReturn(dnStateMachine); - - pipelineID = PipelineID.randomId(); - - KeyValueContainerData data = new KeyValueContainerData(CONTAINER_ID, GB, - pipelineID.getId().toString(), null); - - container = new KeyValueContainer(data, new OzoneConfiguration()); - ContainerSet containerSet = new ContainerSet(); - containerSet.addContainer(container); - - containerHandler = mock(Handler.class); - ContainerController controller = new ContainerController(containerSet, - singletonMap(ContainerProtos.ContainerType.KeyValueContainer, - containerHandler)); - - writeChannel = mock(XceiverServerSpi.class); - ozoneContainer = mock(OzoneContainer.class); - when(ozoneContainer.getController()).thenReturn(controller); - when(ozoneContainer.getContainerSet()).thenReturn(containerSet); - when(ozoneContainer.getWriteChannel()).thenReturn(writeChannel); - when(writeChannel.isExist(pipelineID.getProtobuf())).thenReturn(true); - when(writeChannel.isExist(nonExistentPipelineID.getProtobuf())) - .thenReturn(false); - } - - @Test - public void closeContainerWithPipeline() throws Exception { - // close a container that's associated with an existing pipeline - subject.handle(closeWithKnownPipeline(), ozoneContainer, context, null); - - verify(containerHandler) - .markContainerForClose(container); - verify(writeChannel) - .submitRequest(any(), eq(pipelineID.getProtobuf())); - verify(containerHandler, never()) - .quasiCloseContainer(container); - } - - @Test - public void closeContainerWithoutPipeline() throws IOException { - // close a container that's NOT associated with an open pipeline - subject.handle(closeWithUnknownPipeline(), ozoneContainer, context, null); - - verify(containerHandler) - .markContainerForClose(container); - verify(writeChannel, never()) - .submitRequest(any(), any()); - // Container in CLOSING state is moved to UNHEALTHY if pipeline does not - // exist. Container should not exist in CLOSING state without a pipeline. - verify(containerHandler) - .markContainerUnhealthy(container); - } - - @Test - public void forceCloseQuasiClosedContainer() throws Exception { - // force-close a container that's already quasi closed - container.getContainerData() - .setState(ContainerProtos.ContainerDataProto.State.QUASI_CLOSED); - - subject.handle(forceCloseWithoutPipeline(), ozoneContainer, context, null); - - verify(writeChannel, never()) - .submitRequest(any(), any()); - verify(containerHandler) - .closeContainer(container); - } - - @Test - public void forceCloseOpenContainer() throws Exception { - // force-close a container that's NOT associated with an open pipeline - subject.handle(forceCloseWithoutPipeline(), ozoneContainer, context, null); - - verify(writeChannel, never()) - .submitRequest(any(), any()); - // Container in CLOSING state is moved to UNHEALTHY if pipeline does not - // exist. Container should not exist in CLOSING state without a pipeline. - verify(containerHandler) - .markContainerUnhealthy(container); - } - - @Test - public void forceCloseOpenContainerWithPipeline() throws Exception { - // force-close a container that's associated with an existing pipeline - subject.handle(forceCloseWithPipeline(), ozoneContainer, context, null); - - verify(containerHandler) - .markContainerForClose(container); - verify(writeChannel) - .submitRequest(any(), any()); - verify(containerHandler, never()) - .quasiCloseContainer(container); - verify(containerHandler, never()) - .closeContainer(container); - } - - @Test - public void closeAlreadyClosedContainer() throws Exception { - container.getContainerData() - .setState(ContainerProtos.ContainerDataProto.State.CLOSED); - - // Since the container is already closed, these commands should do nothing, - // neither should they fail - subject.handle(closeWithUnknownPipeline(), ozoneContainer, context, null); - subject.handle(closeWithKnownPipeline(), ozoneContainer, context, null); - - verify(containerHandler, never()) - .markContainerForClose(container); - verify(containerHandler, never()) - .quasiCloseContainer(container); - verify(containerHandler, never()) - .closeContainer(container); - verify(writeChannel, never()) - .submitRequest(any(), any()); - } - - private CloseContainerCommand closeWithKnownPipeline() { - return new CloseContainerCommand(CONTAINER_ID, pipelineID); - } - - private CloseContainerCommand closeWithUnknownPipeline() { - return new CloseContainerCommand(CONTAINER_ID, nonExistentPipelineID); - } - - private CloseContainerCommand forceCloseWithPipeline() { - return new CloseContainerCommand(CONTAINER_ID, pipelineID, true); - } - - private CloseContainerCommand forceCloseWithoutPipeline() { - return new CloseContainerCommand(CONTAINER_ID, nonExistentPipelineID, true); - } - - /** - * Creates a random DatanodeDetails. - * @return DatanodeDetails - */ - private static DatanodeDetails randomDatanodeDetails() { - String ipAddress = "127.0.0.1"; - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(UUID.randomUUID().toString()) - .setHostName("localhost") - .setIpAddress(ipAddress) - .addPort(containerPort) - .addPort(ratisPort) - .addPort(restPort); - return builder.build(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java deleted file mode 100644 index 05ac76d1439..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Tests for command handlers. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java deleted file mode 100644 index 606940b5106..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java +++ /dev/null @@ -1,295 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.states.endpoint; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine.DatanodeStates; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.protocolPB - .StorageContainerDatanodeProtocolClientSideTranslatorPB; - -import org.junit.Assert; -import org.junit.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.Mockito; - -import java.util.UUID; - -/** - * This class tests the functionality of HeartbeatEndpointTask. - */ -public class TestHeartbeatEndpointTask { - - - @Test - public void testheartbeatWithoutReports() throws Exception { - StorageContainerDatanodeProtocolClientSideTranslatorPB scm = - Mockito.mock( - StorageContainerDatanodeProtocolClientSideTranslatorPB.class); - ArgumentCaptor argument = ArgumentCaptor - .forClass(SCMHeartbeatRequestProto.class); - Mockito.when(scm.sendHeartbeat(argument.capture())) - .thenAnswer(invocation -> - SCMHeartbeatResponseProto.newBuilder() - .setDatanodeUUID( - ((SCMHeartbeatRequestProto)invocation.getArgument(0)) - .getDatanodeDetails().getUuid()) - .build()); - - HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(scm); - endpointTask.call(); - SCMHeartbeatRequestProto heartbeat = argument.getValue(); - Assert.assertTrue(heartbeat.hasDatanodeDetails()); - Assert.assertFalse(heartbeat.hasNodeReport()); - Assert.assertFalse(heartbeat.hasContainerReport()); - Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0); - Assert.assertFalse(heartbeat.hasContainerActions()); - } - - @Test - public void testheartbeatWithNodeReports() throws Exception { - Configuration conf = new OzoneConfiguration(); - StateContext context = new StateContext(conf, DatanodeStates.RUNNING, - Mockito.mock(DatanodeStateMachine.class)); - - StorageContainerDatanodeProtocolClientSideTranslatorPB scm = - Mockito.mock( - StorageContainerDatanodeProtocolClientSideTranslatorPB.class); - ArgumentCaptor argument = ArgumentCaptor - .forClass(SCMHeartbeatRequestProto.class); - Mockito.when(scm.sendHeartbeat(argument.capture())) - .thenAnswer(invocation -> - SCMHeartbeatResponseProto.newBuilder() - .setDatanodeUUID( - ((SCMHeartbeatRequestProto)invocation.getArgument(0)) - .getDatanodeDetails().getUuid()) - .build()); - - HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask( - conf, context, scm); - context.addReport(NodeReportProto.getDefaultInstance()); - endpointTask.call(); - SCMHeartbeatRequestProto heartbeat = argument.getValue(); - Assert.assertTrue(heartbeat.hasDatanodeDetails()); - Assert.assertTrue(heartbeat.hasNodeReport()); - Assert.assertFalse(heartbeat.hasContainerReport()); - Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0); - Assert.assertFalse(heartbeat.hasContainerActions()); - } - - @Test - public void testheartbeatWithContainerReports() throws Exception { - Configuration conf = new OzoneConfiguration(); - StateContext context = new StateContext(conf, DatanodeStates.RUNNING, - Mockito.mock(DatanodeStateMachine.class)); - - StorageContainerDatanodeProtocolClientSideTranslatorPB scm = - Mockito.mock( - StorageContainerDatanodeProtocolClientSideTranslatorPB.class); - ArgumentCaptor argument = ArgumentCaptor - .forClass(SCMHeartbeatRequestProto.class); - Mockito.when(scm.sendHeartbeat(argument.capture())) - .thenAnswer(invocation -> - SCMHeartbeatResponseProto.newBuilder() - .setDatanodeUUID( - ((SCMHeartbeatRequestProto)invocation.getArgument(0)) - .getDatanodeDetails().getUuid()) - .build()); - - HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask( - conf, context, scm); - context.addReport(ContainerReportsProto.getDefaultInstance()); - endpointTask.call(); - SCMHeartbeatRequestProto heartbeat = argument.getValue(); - Assert.assertTrue(heartbeat.hasDatanodeDetails()); - Assert.assertFalse(heartbeat.hasNodeReport()); - Assert.assertTrue(heartbeat.hasContainerReport()); - Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0); - Assert.assertFalse(heartbeat.hasContainerActions()); - } - - @Test - public void testheartbeatWithCommandStatusReports() throws Exception { - Configuration conf = new OzoneConfiguration(); - StateContext context = new StateContext(conf, DatanodeStates.RUNNING, - Mockito.mock(DatanodeStateMachine.class)); - - StorageContainerDatanodeProtocolClientSideTranslatorPB scm = - Mockito.mock( - StorageContainerDatanodeProtocolClientSideTranslatorPB.class); - ArgumentCaptor argument = ArgumentCaptor - .forClass(SCMHeartbeatRequestProto.class); - Mockito.when(scm.sendHeartbeat(argument.capture())) - .thenAnswer(invocation -> - SCMHeartbeatResponseProto.newBuilder() - .setDatanodeUUID( - ((SCMHeartbeatRequestProto)invocation.getArgument(0)) - .getDatanodeDetails().getUuid()) - .build()); - - HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask( - conf, context, scm); - context.addReport(CommandStatusReportsProto.getDefaultInstance()); - endpointTask.call(); - SCMHeartbeatRequestProto heartbeat = argument.getValue(); - Assert.assertTrue(heartbeat.hasDatanodeDetails()); - Assert.assertFalse(heartbeat.hasNodeReport()); - Assert.assertFalse(heartbeat.hasContainerReport()); - Assert.assertTrue(heartbeat.getCommandStatusReportsCount() != 0); - Assert.assertFalse(heartbeat.hasContainerActions()); - } - - @Test - public void testheartbeatWithContainerActions() throws Exception { - Configuration conf = new OzoneConfiguration(); - StateContext context = new StateContext(conf, DatanodeStates.RUNNING, - Mockito.mock(DatanodeStateMachine.class)); - - StorageContainerDatanodeProtocolClientSideTranslatorPB scm = - Mockito.mock( - StorageContainerDatanodeProtocolClientSideTranslatorPB.class); - ArgumentCaptor argument = ArgumentCaptor - .forClass(SCMHeartbeatRequestProto.class); - Mockito.when(scm.sendHeartbeat(argument.capture())) - .thenAnswer(invocation -> - SCMHeartbeatResponseProto.newBuilder() - .setDatanodeUUID( - ((SCMHeartbeatRequestProto)invocation.getArgument(0)) - .getDatanodeDetails().getUuid()) - .build()); - - HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask( - conf, context, scm); - context.addContainerAction(getContainerAction()); - endpointTask.call(); - SCMHeartbeatRequestProto heartbeat = argument.getValue(); - Assert.assertTrue(heartbeat.hasDatanodeDetails()); - Assert.assertFalse(heartbeat.hasNodeReport()); - Assert.assertFalse(heartbeat.hasContainerReport()); - Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0); - Assert.assertTrue(heartbeat.hasContainerActions()); - } - - @Test - public void testheartbeatWithAllReports() throws Exception { - Configuration conf = new OzoneConfiguration(); - StateContext context = new StateContext(conf, DatanodeStates.RUNNING, - Mockito.mock(DatanodeStateMachine.class)); - - StorageContainerDatanodeProtocolClientSideTranslatorPB scm = - Mockito.mock( - StorageContainerDatanodeProtocolClientSideTranslatorPB.class); - ArgumentCaptor argument = ArgumentCaptor - .forClass(SCMHeartbeatRequestProto.class); - Mockito.when(scm.sendHeartbeat(argument.capture())) - .thenAnswer(invocation -> - SCMHeartbeatResponseProto.newBuilder() - .setDatanodeUUID( - ((SCMHeartbeatRequestProto)invocation.getArgument(0)) - .getDatanodeDetails().getUuid()) - .build()); - - HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask( - conf, context, scm); - context.addReport(NodeReportProto.getDefaultInstance()); - context.addReport(ContainerReportsProto.getDefaultInstance()); - context.addReport(CommandStatusReportsProto.getDefaultInstance()); - context.addContainerAction(getContainerAction()); - endpointTask.call(); - SCMHeartbeatRequestProto heartbeat = argument.getValue(); - Assert.assertTrue(heartbeat.hasDatanodeDetails()); - Assert.assertTrue(heartbeat.hasNodeReport()); - Assert.assertTrue(heartbeat.hasContainerReport()); - Assert.assertTrue(heartbeat.getCommandStatusReportsCount() != 0); - Assert.assertTrue(heartbeat.hasContainerActions()); - } - - /** - * Creates HeartbeatEndpointTask for the given StorageContainerManager proxy. - * - * @param proxy StorageContainerDatanodeProtocolClientSideTranslatorPB - * - * @return HeartbeatEndpointTask - */ - private HeartbeatEndpointTask getHeartbeatEndpointTask( - StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) { - Configuration conf = new OzoneConfiguration(); - StateContext context = new StateContext(conf, DatanodeStates.RUNNING, - Mockito.mock(DatanodeStateMachine.class)); - return getHeartbeatEndpointTask(conf, context, proxy); - - } - - /** - * Creates HeartbeatEndpointTask with the given conf, context and - * StorageContainerManager client side proxy. - * - * @param conf Configuration - * @param context StateContext - * @param proxy StorageContainerDatanodeProtocolClientSideTranslatorPB - * - * @return HeartbeatEndpointTask - */ - private HeartbeatEndpointTask getHeartbeatEndpointTask( - Configuration conf, - StateContext context, - StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) { - DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder() - .setUuid(UUID.randomUUID().toString()) - .setHostName("localhost") - .setIpAddress("127.0.0.1") - .build(); - EndpointStateMachine endpointStateMachine = Mockito - .mock(EndpointStateMachine.class); - Mockito.when(endpointStateMachine.getEndPoint()).thenReturn(proxy); - return HeartbeatEndpointTask.newBuilder() - .setConfig(conf) - .setDatanodeDetails(datanodeDetails) - .setContext(context) - .setEndpointStateMachine(endpointStateMachine) - .build(); - } - - private ContainerAction getContainerAction() { - ContainerAction.Builder builder = ContainerAction.newBuilder(); - builder.setContainerID(1L) - .setAction(ContainerAction.Action.CLOSE) - .setReason(ContainerAction.Reason.CONTAINER_FULL); - return builder.build(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java deleted file mode 100644 index d120a5cd4b7..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.states.endpoint; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java deleted file mode 100644 index fb2f29b6a13..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java +++ /dev/null @@ -1,139 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.GetSpaceUsed; -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import java.io.File; -import java.util.Properties; -import java.util.UUID; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -/** - * Unit tests for {@link HddsVolume}. - */ -public class TestHddsVolume { - - private static final String DATANODE_UUID = UUID.randomUUID().toString(); - private static final String CLUSTER_ID = UUID.randomUUID().toString(); - private static final Configuration CONF = new Configuration(); - private static final String DU_CACHE_FILE = "scmUsed"; - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - private File rootDir; - private HddsVolume volume; - private File versionFile; - - @Before - public void setup() throws Exception { - rootDir = new File(folder.getRoot(), HddsVolume.HDDS_VOLUME_DIR); - volume = new HddsVolume.Builder(folder.getRoot().getPath()) - .datanodeUuid(DATANODE_UUID) - .conf(CONF) - .build(); - versionFile = HddsVolumeUtil.getVersionFile(rootDir); - } - - @Test - public void testHddsVolumeInitialization() throws Exception { - - // The initial state of HddsVolume should be "NOT_FORMATTED" when - // clusterID is not specified and the version file should not be written - // to disk. - assertTrue(volume.getClusterID() == null); - assertEquals(StorageType.DEFAULT, volume.getStorageType()); - assertEquals(HddsVolume.VolumeState.NOT_FORMATTED, - volume.getStorageState()); - assertFalse("Version file should not be created when clusterID is not " + - "known.", versionFile.exists()); - - - // Format the volume with clusterID. - volume.format(CLUSTER_ID); - - // The state of HddsVolume after formatting with clusterID should be - // NORMAL and the version file should exist. - assertTrue("Volume format should create Version file", - versionFile.exists()); - assertEquals(volume.getClusterID(), CLUSTER_ID); - assertEquals(HddsVolume.VolumeState.NORMAL, volume.getStorageState()); - } - - @Test - public void testReadPropertiesFromVersionFile() throws Exception { - volume.format(CLUSTER_ID); - - Properties properties = DatanodeVersionFile.readFrom(versionFile); - - String storageID = HddsVolumeUtil.getStorageID(properties, versionFile); - String clusterID = HddsVolumeUtil.getClusterID( - properties, versionFile, CLUSTER_ID); - String datanodeUuid = HddsVolumeUtil.getDatanodeUUID( - properties, versionFile, DATANODE_UUID); - long cTime = HddsVolumeUtil.getCreationTime( - properties, versionFile); - int layoutVersion = HddsVolumeUtil.getLayOutVersion( - properties, versionFile); - - assertEquals(volume.getStorageID(), storageID); - assertEquals(volume.getClusterID(), clusterID); - assertEquals(volume.getDatanodeUuid(), datanodeUuid); - assertEquals(volume.getCTime(), cTime); - assertEquals(volume.getLayoutVersion(), layoutVersion); - } - - @Test - public void testShutdown() throws Exception { - // Return dummy value > 0 for scmUsage so that scm cache file is written - // during shutdown. - GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class); - volume.setScmUsageForTesting(scmUsageMock); - Mockito.when(scmUsageMock.getUsed()).thenReturn(Long.valueOf(100)); - - assertTrue("Available volume should be positive", - volume.getAvailable() > 0); - - // Shutdown the volume. - volume.shutdown(); - - // Volume state should be "NON_EXISTENT" when volume is shutdown. - assertEquals(HddsVolume.VolumeState.NON_EXISTENT, volume.getStorageState()); - - // Volume should save scmUsed cache file once volume is shutdown - File scmUsedFile = new File(folder.getRoot(), DU_CACHE_FILE); - System.out.println("scmUsedFile: " + scmUsedFile); - assertTrue("scmUsed cache file should be saved on shutdown", - scmUsedFile.exists()); - - // Volume.getAvailable() should succeed even when usage thread - // is shutdown. - volume.getAvailable(); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolumeChecker.java deleted file mode 100644 index 2e267be01e8..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolumeChecker.java +++ /dev/null @@ -1,211 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.server.datanode.checker.Checkable; -import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.DiskChecker.DiskErrorException; -import org.apache.hadoop.util.FakeTimer; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; -import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Set; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import static org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult.*; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.*; -import static org.mockito.Matchers.anyObject; -import static org.mockito.Mockito.*; - - -/** - * Tests for {@link HddsVolumeChecker}. - */ -@RunWith(Parameterized.class) -public class TestHddsVolumeChecker { - public static final Logger LOG = LoggerFactory.getLogger( - TestHddsVolumeChecker.class); - - @Rule - public TestName testName = new TestName(); - - @Rule - public Timeout globalTimeout = new Timeout(30_000); - - /** - * Run each test case for each possible value of {@link VolumeCheckResult}. - * Including "null" for 'throw exception'. - * @return - */ - @Parameters(name="{0}") - public static Collection data() { - List values = new ArrayList<>(); - for (VolumeCheckResult result : VolumeCheckResult.values()) { - values.add(new Object[] {result}); - } - values.add(new Object[] {null}); - return values; - } - - /** - * When null, the check call should throw an exception. - */ - private final VolumeCheckResult expectedVolumeHealth; - private static final int NUM_VOLUMES = 2; - - - public TestHddsVolumeChecker(VolumeCheckResult expectedVolumeHealth) { - this.expectedVolumeHealth = expectedVolumeHealth; - } - - /** - * Test {@link HddsVolumeChecker#checkVolume} propagates the - * check to the delegate checker. - * - * @throws Exception - */ - @Test - public void testCheckOneVolume() throws Exception { - LOG.info("Executing {}", testName.getMethodName()); - final HddsVolume volume = makeVolumes(1, expectedVolumeHealth).get(0); - final HddsVolumeChecker checker = - new HddsVolumeChecker(new HdfsConfiguration(), new FakeTimer()); - checker.setDelegateChecker(new DummyChecker()); - final AtomicLong numCallbackInvocations = new AtomicLong(0); - - /** - * Request a check and ensure it triggered {@link HddsVolume#check}. - */ - boolean result = - checker.checkVolume(volume, (healthyVolumes, failedVolumes) -> { - numCallbackInvocations.incrementAndGet(); - if (expectedVolumeHealth != null && - expectedVolumeHealth != FAILED) { - assertThat(healthyVolumes.size(), is(1)); - assertThat(failedVolumes.size(), is(0)); - } else { - assertThat(healthyVolumes.size(), is(0)); - assertThat(failedVolumes.size(), is(1)); - } - }); - - GenericTestUtils.waitFor(() -> numCallbackInvocations.get() > 0, 5, 10000); - - // Ensure that the check was invoked at least once. - verify(volume, times(1)).check(anyObject()); - if (result) { - assertThat(numCallbackInvocations.get(), is(1L)); - } - } - - /** - * Test {@link HddsVolumeChecker#checkAllVolumes} propagates - * checks for all volumes to the delegate checker. - * - * @throws Exception - */ - @Test - public void testCheckAllVolumes() throws Exception { - LOG.info("Executing {}", testName.getMethodName()); - - final List volumes = makeVolumes( - NUM_VOLUMES, expectedVolumeHealth); - final HddsVolumeChecker checker = - new HddsVolumeChecker(new HdfsConfiguration(), new FakeTimer()); - checker.setDelegateChecker(new DummyChecker()); - - Set failedVolumes = checker.checkAllVolumes(volumes); - LOG.info("Got back {} failed volumes", failedVolumes.size()); - - if (expectedVolumeHealth == null || expectedVolumeHealth == FAILED) { - assertThat(failedVolumes.size(), is(NUM_VOLUMES)); - } else { - assertTrue(failedVolumes.isEmpty()); - } - - // Ensure each volume's check() method was called exactly once. - for (HddsVolume volume : volumes) { - verify(volume, times(1)).check(anyObject()); - } - } - - /** - * A checker to wraps the result of {@link HddsVolume#check} in - * an ImmediateFuture. - */ - static class DummyChecker - implements AsyncChecker { - - @Override - public Optional> schedule( - Checkable target, - Boolean context) { - try { - LOG.info("Returning success for volume check"); - return Optional.of( - Futures.immediateFuture(target.check(context))); - } catch (Exception e) { - LOG.info("check routine threw exception " + e); - return Optional.of(Futures.immediateFailedFuture(e)); - } - } - - @Override - public void shutdownAndWait(long timeout, TimeUnit timeUnit) - throws InterruptedException { - // Nothing to cancel. - } - } - - static List makeVolumes( - int numVolumes, VolumeCheckResult health) throws Exception { - final List volumes = new ArrayList<>(numVolumes); - for (int i = 0; i < numVolumes; ++i) { - final HddsVolume volume = mock(HddsVolume.class); - - if (health != null) { - when(volume.check(any(Boolean.class))).thenReturn(health); - when(volume.check(isNull())).thenReturn(health); - } else { - final DiskErrorException de = new DiskErrorException("Fake Exception"); - when(volume.check(any(Boolean.class))).thenThrow(de); - when(volume.check(isNull())).thenThrow(de); - } - volumes.add(volume); - } - return volumes; - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java deleted file mode 100644 index d0fbf10269c..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import org.apache.hadoop.fs.GetSpaceUsed; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; -import org.apache.hadoop.util.ReflectionUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.IOException; -import java.util.List; -import java.util.UUID; - -/** - * Tests {@link RoundRobinVolumeChoosingPolicy}. - */ -public class TestRoundRobinVolumeChoosingPolicy { - - private RoundRobinVolumeChoosingPolicy policy; - private List volumes; - private VolumeSet volumeSet; - - private final String baseDir = MiniDFSCluster.getBaseDirectory(); - private final String volume1 = baseDir + "disk1"; - private final String volume2 = baseDir + "disk2"; - - private static final String DUMMY_IP_ADDR = "0.0.0.0"; - - @Before - public void setup() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - String dataDirKey = volume1 + "," + volume2; - conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey); - policy = ReflectionUtils.newInstance( - RoundRobinVolumeChoosingPolicy.class, null); - volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); - volumes = volumeSet.getVolumesList(); - } - - @After - public void cleanUp() { - if (volumeSet != null) { - volumeSet.shutdown(); - volumeSet = null; - } - } - - @Test - public void testRRVolumeChoosingPolicy() throws Exception { - HddsVolume hddsVolume1 = volumes.get(0); - HddsVolume hddsVolume2 = volumes.get(1); - - // Set available space in volume1 to 100L - setAvailableSpace(hddsVolume1, 100L); - - // Set available space in volume1 to 200L - setAvailableSpace(hddsVolume2, 200L); - - Assert.assertEquals(100L, hddsVolume1.getAvailable()); - Assert.assertEquals(200L, hddsVolume2.getAvailable()); - - // Test two rounds of round-robin choosing - Assert.assertEquals(hddsVolume1, policy.chooseVolume(volumes, 0)); - Assert.assertEquals(hddsVolume2, policy.chooseVolume(volumes, 0)); - Assert.assertEquals(hddsVolume1, policy.chooseVolume(volumes, 0)); - Assert.assertEquals(hddsVolume2, policy.chooseVolume(volumes, 0)); - - // The first volume has only 100L space, so the policy should - // choose the second one in case we ask for more. - Assert.assertEquals(hddsVolume2, - policy.chooseVolume(volumes, 150)); - - // Fail if no volume has enough space available - try { - policy.chooseVolume(volumes, Long.MAX_VALUE); - Assert.fail(); - } catch (IOException e) { - // Passed. - } - } - - @Test - public void testRRPolicyExceptionMessage() throws Exception { - HddsVolume hddsVolume1 = volumes.get(0); - HddsVolume hddsVolume2 = volumes.get(1); - - // Set available space in volume1 to 100L - setAvailableSpace(hddsVolume1, 100L); - - // Set available space in volume1 to 200L - setAvailableSpace(hddsVolume2, 200L); - - int blockSize = 300; - try { - policy.chooseVolume(volumes, blockSize); - Assert.fail("expected to throw DiskOutOfSpaceException"); - } catch(DiskOutOfSpaceException e) { - Assert.assertEquals("Not returnig the expected message", - "Out of space: The volume with the most available space (=" + 200 - + " B) is less than the container size (=" + blockSize + " B).", - e.getMessage()); - } - } - - private void setAvailableSpace(HddsVolume hddsVolume, long availableSpace) - throws IOException { - GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class); - hddsVolume.setScmUsageForTesting(scmUsageMock); - // Set used space to capacity -requiredAvailableSpace so that - // getAvailable() returns us the specified availableSpace. - Mockito.when(scmUsageMock.getUsed()).thenReturn( - (hddsVolume.getCapacity() - availableSpace)); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java deleted file mode 100644 index fa280ddb730..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ /dev/null @@ -1,246 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import java.io.IOException; -import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.container.common.volume.HddsVolume - .HDDS_VOLUME_DIR; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -import java.io.File; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -/** - * Tests {@link VolumeSet} operations. - */ -public class TestVolumeSet { - - private OzoneConfiguration conf; - private VolumeSet volumeSet; - private final String baseDir = MiniDFSCluster.getBaseDirectory(); - private final String volume1 = baseDir + "disk1"; - private final String volume2 = baseDir + "disk2"; - private final List volumes = new ArrayList<>(); - - private static final String DUMMY_IP_ADDR = "0.0.0.0"; - - private void initializeVolumeSet() throws Exception { - volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); - } - - @Rule - public Timeout testTimeout = new Timeout(300000); - - @Before - public void setup() throws Exception { - conf = new OzoneConfiguration(); - String dataDirKey = volume1 + "," + volume2; - volumes.add(volume1); - volumes.add(volume2); - conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey); - initializeVolumeSet(); - } - - @After - public void shutdown() throws IOException { - // Delete the hdds volume root dir - List hddsVolumes = new ArrayList<>(); - hddsVolumes.addAll(volumeSet.getVolumesList()); - hddsVolumes.addAll(volumeSet.getFailedVolumesList()); - - for (HddsVolume volume : hddsVolumes) { - FileUtils.deleteDirectory(volume.getHddsRootDir()); - } - volumeSet.shutdown(); - - FileUtil.fullyDelete(new File(baseDir)); - } - - private boolean checkVolumeExistsInVolumeSet(String volume) { - for (HddsVolume hddsVolume : volumeSet.getVolumesList()) { - if (hddsVolume.getHddsRootDir().getPath().equals( - HddsVolumeUtil.getHddsRoot(volume))) { - return true; - } - } - return false; - } - - @Test - public void testVolumeSetInitialization() throws Exception { - - List volumesList = volumeSet.getVolumesList(); - - // VolumeSet initialization should add volume1 and volume2 to VolumeSet - assertEquals("VolumeSet intialization is incorrect", - volumesList.size(), volumes.size()); - assertTrue("VolumeSet not initailized correctly", - checkVolumeExistsInVolumeSet(volume1)); - assertTrue("VolumeSet not initailized correctly", - checkVolumeExistsInVolumeSet(volume2)); - } - - @Test - public void testAddVolume() { - - assertEquals(2, volumeSet.getVolumesList().size()); - - // Add a volume to VolumeSet - String volume3 = baseDir + "disk3"; - boolean success = volumeSet.addVolume(volume3); - - assertTrue(success); - assertEquals(3, volumeSet.getVolumesList().size()); - assertTrue("AddVolume did not add requested volume to VolumeSet", - checkVolumeExistsInVolumeSet(volume3)); - } - - @Test - public void testFailVolume() throws Exception { - - //Fail a volume - volumeSet.failVolume(volume1); - - // Failed volume should not show up in the volumeList - assertEquals(1, volumeSet.getVolumesList().size()); - - // Failed volume should be added to FailedVolumeList - assertEquals("Failed volume not present in FailedVolumeMap", - 1, volumeSet.getFailedVolumesList().size()); - assertEquals("Failed Volume list did not match", - HddsVolumeUtil.getHddsRoot(volume1), - volumeSet.getFailedVolumesList().get(0).getHddsRootDir().getPath()); - assertTrue(volumeSet.getFailedVolumesList().get(0).isFailed()); - - // Failed volume should not exist in VolumeMap - assertFalse(volumeSet.getVolumeMap().containsKey(volume1)); - } - - @Test - public void testRemoveVolume() throws Exception { - - assertEquals(2, volumeSet.getVolumesList().size()); - - // Remove a volume from VolumeSet - volumeSet.removeVolume(volume1); - assertEquals(1, volumeSet.getVolumesList().size()); - - // Attempting to remove a volume which does not exist in VolumeSet should - // log a warning. - LogCapturer logs = LogCapturer.captureLogs( - LogFactory.getLog(VolumeSet.class)); - volumeSet.removeVolume(volume1); - assertEquals(1, volumeSet.getVolumesList().size()); - String expectedLogMessage = "Volume : " + - HddsVolumeUtil.getHddsRoot(volume1) + " does not exist in VolumeSet"; - assertTrue("Log output does not contain expected log message: " - + expectedLogMessage, logs.getOutput().contains(expectedLogMessage)); - } - - @Test - public void testVolumeInInconsistentState() throws Exception { - assertEquals(2, volumeSet.getVolumesList().size()); - - // Add a volume to VolumeSet - String volume3 = baseDir + "disk3"; - - // Create the root volume dir and create a sub-directory within it. - File newVolume = new File(volume3, HDDS_VOLUME_DIR); - System.out.println("new volume root: " + newVolume); - newVolume.mkdirs(); - assertTrue("Failed to create new volume root", newVolume.exists()); - File dataDir = new File(newVolume, "chunks"); - dataDir.mkdirs(); - assertTrue(dataDir.exists()); - - // The new volume is in an inconsistent state as the root dir is - // non-empty but the version file does not exist. Add Volume should - // return false. - boolean success = volumeSet.addVolume(volume3); - - assertFalse(success); - assertEquals(2, volumeSet.getVolumesList().size()); - assertTrue("AddVolume should fail for an inconsistent volume", - !checkVolumeExistsInVolumeSet(volume3)); - - // Delete volume3 - File volume = new File(volume3); - FileUtils.deleteDirectory(volume); - } - - @Test - public void testShutdown() throws Exception { - List volumesList = volumeSet.getVolumesList(); - - volumeSet.shutdown(); - - // Verify that volume usage can be queried during shutdown. - for (HddsVolume volume : volumesList) { - Assert.assertNotNull(volume.getVolumeInfo().getUsageForTesting()); - volume.getAvailable(); - } - } - - @Test - public void testFailVolumes() throws Exception{ - VolumeSet volSet = null; - File readOnlyVolumePath = new File(baseDir); - //Set to readonly, so that this volume will be failed - readOnlyVolumePath.setReadOnly(); - File volumePath = GenericTestUtils.getRandomizedTestDir(); - OzoneConfiguration ozoneConfig = new OzoneConfiguration(); - ozoneConfig.set(HDDS_DATANODE_DIR_KEY, readOnlyVolumePath.getAbsolutePath() - + "," + volumePath.getAbsolutePath()); - volSet = new VolumeSet(UUID.randomUUID().toString(), ozoneConfig); - assertEquals(1, volSet.getFailedVolumesList().size()); - assertEquals(readOnlyVolumePath, volSet.getFailedVolumesList().get(0) - .getHddsRootDir()); - - //Set back to writable - try { - readOnlyVolumePath.setWritable(true); - volSet.shutdown(); - } finally { - FileUtil.fullyDelete(volumePath); - } - - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java deleted file mode 100644 index c5deff0fc78..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ /dev/null @@ -1,190 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.volume; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Set; -import java.util.UUID; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.DiskChecker.DiskErrorException; -import org.apache.hadoop.util.Timer; - -import com.google.common.collect.Iterables; -import org.apache.commons.io.FileUtils; -import org.apache.curator.shaded.com.google.common.collect.ImmutableSet; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; -import static org.hamcrest.CoreMatchers.is; -import org.junit.After; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -/** - * Verify that {@link VolumeSet} correctly checks for failed disks - * during initialization. - */ -public class TestVolumeSetDiskChecks { - public static final Logger LOG = LoggerFactory.getLogger( - TestVolumeSetDiskChecks.class); - - @Rule - public Timeout globalTimeout = new Timeout(30_000); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private Configuration conf = null; - - /** - * Cleanup volume directories. - */ - @After - public void cleanup() { - final Collection dirs = conf.getTrimmedStringCollection( - DFS_DATANODE_DATA_DIR_KEY); - - for (String d: dirs) { - FileUtils.deleteQuietly(new File(d)); - } - } - - /** - * Verify that VolumeSet creates volume root directories at startup. - * @throws IOException - */ - @Test - public void testOzoneDirsAreCreated() throws IOException { - final int numVolumes = 2; - - conf = getConfWithDataNodeDirs(numVolumes); - final VolumeSet volumeSet = - new VolumeSet(UUID.randomUUID().toString(), conf); - - assertThat(volumeSet.getVolumesList().size(), is(numVolumes)); - assertThat(volumeSet.getFailedVolumesList().size(), is(0)); - - // Verify that the Ozone dirs were created during initialization. - Collection dirs = conf.getTrimmedStringCollection( - DFS_DATANODE_DATA_DIR_KEY); - for (String d : dirs) { - assertTrue(new File(d).isDirectory()); - } - volumeSet.shutdown(); - } - - /** - * Verify that bad volumes are filtered at startup. - * @throws IOException - */ - @Test - public void testBadDirectoryDetection() throws IOException { - final int numVolumes = 5; - final int numBadVolumes = 2; - - conf = getConfWithDataNodeDirs(numVolumes); - final VolumeSet volumeSet = new VolumeSet( - UUID.randomUUID().toString(), conf) { - @Override - HddsVolumeChecker getVolumeChecker(Configuration configuration) - throws DiskErrorException { - return new DummyChecker(configuration, new Timer(), numBadVolumes); - } - }; - - assertThat(volumeSet.getFailedVolumesList().size(), is(numBadVolumes)); - assertThat(volumeSet.getVolumesList().size(), - is(numVolumes - numBadVolumes)); - volumeSet.shutdown(); - } - - /** - * Verify that all volumes are added to fail list if all volumes are bad. - */ - @Test - public void testAllVolumesAreBad() throws IOException { - final int numVolumes = 5; - - conf = getConfWithDataNodeDirs(numVolumes); - - final VolumeSet volumeSet = new VolumeSet( - UUID.randomUUID().toString(), conf) { - @Override - HddsVolumeChecker getVolumeChecker(Configuration configuration) - throws DiskErrorException { - return new DummyChecker(configuration, new Timer(), numVolumes); - } - }; - - assertEquals(volumeSet.getFailedVolumesList().size(), numVolumes); - assertEquals(volumeSet.getVolumesList().size(), 0); - volumeSet.shutdown(); - } - - /** - * Update configuration with the specified number of Datanode - * storage directories. - * @param conf - * @param numDirs - */ - private Configuration getConfWithDataNodeDirs(int numDirs) { - final Configuration ozoneConf = new OzoneConfiguration(); - final List dirs = new ArrayList<>(); - for (int i = 0; i < numDirs; ++i) { - dirs.add(GenericTestUtils.getRandomizedTestDir().getPath()); - } - ozoneConf.set(DFS_DATANODE_DATA_DIR_KEY, String.join(",", dirs)); - return ozoneConf; - } - - /** - * A no-op checker that fails the given number of volumes and succeeds - * the rest. - */ - static class DummyChecker extends HddsVolumeChecker { - private final int numBadVolumes; - - DummyChecker(Configuration conf, Timer timer, int numBadVolumes) - throws DiskErrorException { - super(conf, timer); - this.numBadVolumes = numBadVolumes; - } - - @Override - public Set checkAllVolumes(Collection volumes) - throws InterruptedException { - // Return the first 'numBadVolumes' as failed. - return ImmutableSet.copyOf(Iterables.limit(volumes, numBadVolumes)); - } - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java deleted file mode 100644 index 3328deb06d9..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Tests for Container Volumes. - */ -package org.apache.hadoop.ozone.container.common.volume; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java deleted file mode 100644 index 1d580a09747..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.mock; - -/** - * This class is used to test key related operations on the container. - */ -public class TestBlockManagerImpl { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - private OzoneConfiguration config; - private String scmId = UUID.randomUUID().toString(); - private VolumeSet volumeSet; - private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; - private KeyValueContainerData keyValueContainerData; - private KeyValueContainer keyValueContainer; - private BlockData blockData; - private BlockManagerImpl blockManager; - private BlockID blockID; - - @Before - public void setUp() throws Exception { - config = new OzoneConfiguration(); - UUID datanodeId = UUID.randomUUID(); - HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot() - .getAbsolutePath()).conf(config).datanodeUuid(datanodeId - .toString()).build(); - - volumeSet = mock(VolumeSet.class); - - volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class); - Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) - .thenReturn(hddsVolume); - - keyValueContainerData = new KeyValueContainerData(1L, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - datanodeId.toString()); - - keyValueContainer = new KeyValueContainer( - keyValueContainerData, config); - - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - - // Creating BlockData - blockID = new BlockID(1L, 1L); - blockData = new BlockData(blockID); - blockData.addMetadata("VOLUME", "ozone"); - blockData.addMetadata("OWNER", "hdfs"); - List chunkList = new ArrayList<>(); - ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, 1024); - chunkList.add(info.getProtoBufMessage()); - blockData.setChunks(chunkList); - - // Create KeyValueContainerManager - blockManager = new BlockManagerImpl(config); - - } - - @Test - public void testPutAndGetBlock() throws Exception { - assertEquals(0, keyValueContainer.getContainerData().getKeyCount()); - //Put Block - blockManager.putBlock(keyValueContainer, blockData); - - assertEquals(1, keyValueContainer.getContainerData().getKeyCount()); - //Get Block - BlockData fromGetBlockData = blockManager.getBlock(keyValueContainer, - blockData.getBlockID()); - - assertEquals(blockData.getContainerID(), fromGetBlockData.getContainerID()); - assertEquals(blockData.getLocalID(), fromGetBlockData.getLocalID()); - assertEquals(blockData.getChunks().size(), - fromGetBlockData.getChunks().size()); - assertEquals(blockData.getMetadata().size(), fromGetBlockData.getMetadata() - .size()); - - } - - @Test - public void testDeleteBlock() throws Exception { - assertEquals(0, - keyValueContainer.getContainerData().getKeyCount()); - //Put Block - blockManager.putBlock(keyValueContainer, blockData); - assertEquals(1, - keyValueContainer.getContainerData().getKeyCount()); - //Delete Block - blockManager.deleteBlock(keyValueContainer, blockID); - assertEquals(0, - keyValueContainer.getContainerData().getKeyCount()); - try { - blockManager.getBlock(keyValueContainer, blockID); - fail("testDeleteBlock"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains( - "Unable to find the block", ex); - } - } - - @Test - public void testListBlock() throws Exception { - blockManager.putBlock(keyValueContainer, blockData); - List listBlockData = blockManager.listBlock( - keyValueContainer, 1, 10); - assertNotNull(listBlockData); - assertTrue(listBlockData.size() == 1); - - for (long i = 2; i <= 10; i++) { - blockID = new BlockID(1L, i); - blockData = new BlockData(blockID); - blockData.addMetadata("VOLUME", "ozone"); - blockData.addMetadata("OWNER", "hdfs"); - List chunkList = new ArrayList<>(); - ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, 1024); - chunkList.add(info.getProtoBufMessage()); - blockData.setChunks(chunkList); - blockManager.putBlock(keyValueContainer, blockData); - } - - listBlockData = blockManager.listBlock( - keyValueContainer, 1, 10); - assertNotNull(listBlockData); - assertTrue(listBlockData.size() == 10); - } - - @Test - public void testGetNoSuchBlock() throws Exception { - assertEquals(0, - keyValueContainer.getContainerData().getKeyCount()); - //Put Block - blockManager.putBlock(keyValueContainer, blockData); - assertEquals(1, - keyValueContainer.getContainerData().getKeyCount()); - //Delete Block - blockManager.deleteBlock(keyValueContainer, blockID); - assertEquals(0, - keyValueContainer.getContainerData().getKeyCount()); - try { - //Since the block has been deleted, we should not be able to find it - blockManager.getBlock(keyValueContainer, blockID); - fail("testGetNoSuchBlock failed"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains( - "Unable to find the block", ex); - assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult()); - } - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java deleted file mode 100644 index 84ab56da864..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import java.io.File; -import java.nio.ByteBuffer; -import java.util.UUID; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.junit.Assert.*; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.mock; - -/** - * This class is used to test ChunkManager operations. - */ -public class TestChunkManagerImpl { - - private OzoneConfiguration config; - private String scmId = UUID.randomUUID().toString(); - private VolumeSet volumeSet; - private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; - private HddsVolume hddsVolume; - private KeyValueContainerData keyValueContainerData; - private KeyValueContainer keyValueContainer; - private BlockID blockID; - private ChunkManagerImpl chunkManager; - private ChunkInfo chunkInfo; - private ByteBuffer data; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Before - public void setUp() throws Exception { - config = new OzoneConfiguration(); - UUID datanodeId = UUID.randomUUID(); - hddsVolume = new HddsVolume.Builder(folder.getRoot() - .getAbsolutePath()).conf(config).datanodeUuid(datanodeId - .toString()).build(); - - volumeSet = mock(VolumeSet.class); - - volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class); - Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) - .thenReturn(hddsVolume); - - keyValueContainerData = new KeyValueContainerData(1L, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - datanodeId.toString()); - - keyValueContainer = new KeyValueContainer(keyValueContainerData, config); - - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - - data = ByteBuffer.wrap("testing write chunks".getBytes(UTF_8)); - // Creating BlockData - blockID = new BlockID(1L, 1L); - chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, data.capacity()); - - // Create a ChunkManager object. - chunkManager = new ChunkManagerImpl(true); - - } - - private DispatcherContext getDispatcherContext() { - return new DispatcherContext.Builder().build(); - } - - @Test - public void testWriteChunkStageWriteAndCommit() throws Exception { - //As in Setup, we try to create container, these paths should exist. - assertTrue(keyValueContainerData.getChunksPath() != null); - File chunksPath = new File(keyValueContainerData.getChunksPath()); - assertTrue(chunksPath.exists()); - // Initially chunks folder should be empty. - assertTrue(chunksPath.listFiles().length == 0); - - // As no chunks are written to the volume writeBytes should be 0 - checkWriteIOStats(0, 0); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - new DispatcherContext.Builder() - .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build()); - // Now a chunk file is being written with Stage WRITE_DATA, so it should - // create a temporary chunk file. - assertTrue(chunksPath.listFiles().length == 1); - - long term = 0; - long index = 0; - File chunkFile = ChunkUtils.getChunkFile(keyValueContainerData, chunkInfo); - File tempChunkFile = new File(chunkFile.getParent(), - chunkFile.getName() + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER - + OzoneConsts.CONTAINER_TEMPORARY_CHUNK_PREFIX - + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + term - + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + index); - - // As chunk write stage is WRITE_DATA, temp chunk file will be created. - assertTrue(tempChunkFile.exists()); - - checkWriteIOStats(data.capacity(), 1); - - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - new DispatcherContext.Builder() - .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build()); - - checkWriteIOStats(data.capacity(), 1); - - // Old temp file should have been renamed to chunk file. - assertTrue(chunksPath.listFiles().length == 1); - - // As commit happened, chunk file should exist. - assertTrue(chunkFile.exists()); - assertFalse(tempChunkFile.exists()); - - } - - @Test - public void testWriteChunkIncorrectLength() throws Exception { - try { - long randomLength = 200L; - chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, randomLength); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - getDispatcherContext()); - fail("testWriteChunkIncorrectLength failed"); - } catch (StorageContainerException ex) { - // As we got an exception, writeBytes should be 0. - checkWriteIOStats(0, 0); - GenericTestUtils.assertExceptionContains("data array does not match " + - "the length ", ex); - assertEquals(ContainerProtos.Result.INVALID_WRITE_SIZE, ex.getResult()); - } - } - - @Test - public void testWriteChunkStageCombinedData() throws Exception { - //As in Setup, we try to create container, these paths should exist. - assertTrue(keyValueContainerData.getChunksPath() != null); - File chunksPath = new File(keyValueContainerData.getChunksPath()); - assertTrue(chunksPath.exists()); - // Initially chunks folder should be empty. - assertTrue(chunksPath.listFiles().length == 0); - checkWriteIOStats(0, 0); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - getDispatcherContext()); - // Now a chunk file is being written with Stage COMBINED_DATA, so it should - // create a chunk file. - assertTrue(chunksPath.listFiles().length == 1); - File chunkFile = ChunkUtils.getChunkFile(keyValueContainerData, chunkInfo); - assertTrue(chunkFile.exists()); - checkWriteIOStats(data.capacity(), 1); - } - - @Test - public void testReadChunk() throws Exception { - checkWriteIOStats(0, 0); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - getDispatcherContext()); - checkWriteIOStats(data.capacity(), 1); - checkReadIOStats(0, 0); - ByteBuffer expectedData = chunkManager.readChunk(keyValueContainer, blockID, - chunkInfo, getDispatcherContext()); - assertEquals(expectedData.limit()-expectedData.position(), - chunkInfo.getLen()); - assertTrue(expectedData.rewind().equals(data.rewind())); - checkReadIOStats(expectedData.capacity(), 1); - } - - @Test - public void testDeleteChunk() throws Exception { - File chunksPath = new File(keyValueContainerData.getChunksPath()); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - getDispatcherContext()); - assertTrue(chunksPath.listFiles().length == 1); - chunkManager.deleteChunk(keyValueContainer, blockID, chunkInfo); - assertTrue(chunksPath.listFiles().length == 0); - } - - @Test - public void testDeleteChunkUnsupportedRequest() throws Exception { - try { - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - getDispatcherContext()); - long randomLength = 200L; - chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, randomLength); - chunkManager.deleteChunk(keyValueContainer, blockID, chunkInfo); - fail("testDeleteChunkUnsupportedRequest"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("Not Supported Operation.", ex); - assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex.getResult()); - } - } - - @Test - public void testReadChunkFileNotExists() throws Exception { - try { - // trying to read a chunk, where chunk file does not exist - ByteBuffer expectedData = chunkManager.readChunk(keyValueContainer, - blockID, chunkInfo, getDispatcherContext()); - fail("testReadChunkFileNotExists failed"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("Unable to find the chunk " + - "file.", ex); - assertEquals(ContainerProtos.Result.UNABLE_TO_FIND_CHUNK, ex.getResult()); - } - } - - @Test - public void testWriteAndReadChunkMultipleTimes() throws Exception { - for (int i=0; i<100; i++) { - chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), i), 0, data.capacity()); - chunkManager.writeChunk(keyValueContainer, blockID, chunkInfo, data, - getDispatcherContext()); - data.rewind(); - } - checkWriteIOStats(data.capacity()*100, 100); - assertTrue(hddsVolume.getVolumeIOStats().getWriteTime() > 0); - - for (int i=0; i<100; i++) { - chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), i), 0, data.capacity()); - chunkManager.readChunk(keyValueContainer, blockID, chunkInfo, - getDispatcherContext()); - } - checkReadIOStats(data.capacity()*100, 100); - assertTrue(hddsVolume.getVolumeIOStats().getReadTime() > 0); - } - - - /** - * Check WriteIO stats. - * @param length - * @param opCount - */ - private void checkWriteIOStats(long length, long opCount) { - VolumeIOStats volumeIOStats = hddsVolume.getVolumeIOStats(); - assertEquals(length, volumeIOStats.getWriteBytes()); - assertEquals(opCount, volumeIOStats.getWriteOpCount()); - } - - /** - * Check ReadIO stats. - * @param length - * @param opCount - */ - private void checkReadIOStats(long length, long opCount) { - VolumeIOStats volumeIOStats = hddsVolume.getVolumeIOStats(); - assertEquals(length, volumeIOStats.getReadBytes()); - assertEquals(opCount, volumeIOStats.getReadOpCount()); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java deleted file mode 100644 index 4fdd994fb11..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ /dev/null @@ -1,284 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import java.io.File; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.UUID; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_METADATA_STORE_IMPL_ROCKSDB; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -/** - * This class is used to test KeyValue container block iterator. - */ -@RunWith(Parameterized.class) -public class TestKeyValueBlockIterator { - - private KeyValueContainer container; - private KeyValueContainerData containerData; - private VolumeSet volumeSet; - private Configuration conf; - private File testRoot; - - private final String storeImpl; - - public TestKeyValueBlockIterator(String metadataImpl) { - this.storeImpl = metadataImpl; - } - - @Parameterized.Parameters - public static Collection data() { - return Arrays.asList(new Object[][] { - {OZONE_METADATA_STORE_IMPL_LEVELDB}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB}}); - } - - @Before - public void setUp() throws Exception { - testRoot = GenericTestUtils.getRandomizedTestDir(); - conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); - conf.set(OZONE_METADATA_STORE_IMPL, storeImpl); - volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); - } - - - @After - public void tearDown() { - volumeSet.shutdown(); - FileUtil.fullyDelete(testRoot); - } - - @Test - public void testKeyValueBlockIteratorWithMixedBlocks() throws Exception { - - long containerID = 100L; - int deletedBlocks = 5; - int normalBlocks = 5; - createContainerWithBlocks(containerID, normalBlocks, deletedBlocks); - String containerPath = new File(containerData.getMetadataPath()) - .getParent(); - try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerID, new File(containerPath))) { - - int counter = 0; - while (keyValueBlockIterator.hasNext()) { - BlockData blockData = keyValueBlockIterator.nextBlock(); - assertEquals(blockData.getLocalID(), counter++); - } - - assertFalse(keyValueBlockIterator.hasNext()); - - keyValueBlockIterator.seekToFirst(); - counter = 0; - while (keyValueBlockIterator.hasNext()) { - BlockData blockData = keyValueBlockIterator.nextBlock(); - assertEquals(blockData.getLocalID(), counter++); - } - assertFalse(keyValueBlockIterator.hasNext()); - - try { - keyValueBlockIterator.nextBlock(); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Block Iterator reached end " + - "for ContainerID " + containerID, ex); - } - } - } - - @Test - public void testKeyValueBlockIteratorWithNextBlock() throws Exception { - long containerID = 101L; - createContainerWithBlocks(containerID, 2, 0); - String containerPath = new File(containerData.getMetadataPath()) - .getParent(); - try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerID, new File(containerPath))) { - long blockID = 0L; - assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); - assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); - - try { - keyValueBlockIterator.nextBlock(); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Block Iterator reached end " + - "for ContainerID " + containerID, ex); - } - } - } - - @Test - public void testKeyValueBlockIteratorWithHasNext() throws Exception { - long containerID = 102L; - createContainerWithBlocks(containerID, 2, 0); - String containerPath = new File(containerData.getMetadataPath()) - .getParent(); - try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerID, new File(containerPath))) { - long blockID = 0L; - - // Even calling multiple times hasNext() should not move entry forward. - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); - - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertTrue(keyValueBlockIterator.hasNext()); - assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); - - keyValueBlockIterator.seekToLast(); - assertTrue(keyValueBlockIterator.hasNext()); - assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); - - keyValueBlockIterator.seekToFirst(); - blockID = 0L; - assertEquals(blockID++, keyValueBlockIterator.nextBlock().getLocalID()); - assertEquals(blockID, keyValueBlockIterator.nextBlock().getLocalID()); - - try { - keyValueBlockIterator.nextBlock(); - } catch (NoSuchElementException ex) { - GenericTestUtils.assertExceptionContains("Block Iterator reached end " + - "for ContainerID " + containerID, ex); - } - } - } - - @Test - public void testKeyValueBlockIteratorWithFilter() throws Exception { - long containerId = 103L; - int deletedBlocks = 5; - int normalBlocks = 5; - createContainerWithBlocks(containerId, normalBlocks, deletedBlocks); - String containerPath = new File(containerData.getMetadataPath()) - .getParent(); - try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerId, new File(containerPath), MetadataKeyFilters - .getDeletingKeyFilter())) { - - int counter = 5; - while (keyValueBlockIterator.hasNext()) { - BlockData blockData = keyValueBlockIterator.nextBlock(); - assertEquals(blockData.getLocalID(), counter++); - } - } - } - - @Test - public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws - Exception { - long containerId = 104L; - createContainerWithBlocks(containerId, 0, 5); - String containerPath = new File(containerData.getMetadataPath()) - .getParent(); - try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerId, new File(containerPath))) { - //As all blocks are deleted blocks, blocks does not match with normal key - // filter. - assertFalse(keyValueBlockIterator.hasNext()); - } - } - - /** - * Creates a container with specified number of normal blocks and deleted - * blocks. First it will insert normal blocks, and then it will insert - * deleted blocks. - * @param containerId - * @param normalBlocks - * @param deletedBlocks - * @throws Exception - */ - private void createContainerWithBlocks(long containerId, int - normalBlocks, int deletedBlocks) throws - Exception { - containerData = new KeyValueContainerData(containerId, - (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - container = new KeyValueContainer(containerData, conf); - container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID - .randomUUID().toString()); - try(ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, - conf)) { - - List chunkList = new ArrayList<>(); - ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024); - chunkList.add(info.getProtoBufMessage()); - - for (int i = 0; i < normalBlocks; i++) { - BlockID blockID = new BlockID(containerId, i); - BlockData blockData = new BlockData(blockID); - blockData.setChunks(chunkList); - metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()), - blockData - .getProtoBufMessage().toByteArray()); - } - - for (int i = normalBlocks; i < deletedBlocks; i++) { - BlockID blockID = new BlockID(containerId, i); - BlockData blockData = new BlockData(blockID); - blockData.setChunks(chunkList); - metadataStore.getStore().put(DFSUtil.string2Bytes(OzoneConsts - .DELETING_KEY_PREFIX + blockID.getLocalID()), blockData - .getProtoBufMessage().toByteArray()); - } - } - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java deleted file mode 100644 index 81d3065833e..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume - .RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.DiskChecker; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; - -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import org.mockito.Mockito; - -import java.io.File; - -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Map; -import java.util.List; -import java.util.UUID; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.ratis.util.Preconditions.assertTrue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.fail; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.mock; - -/** - * Class to test KeyValue Container operations. - */ -public class TestKeyValueContainer { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - - private OzoneConfiguration conf; - private String scmId = UUID.randomUUID().toString(); - private VolumeSet volumeSet; - private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; - private KeyValueContainerData keyValueContainerData; - private KeyValueContainer keyValueContainer; - private UUID datanodeId; - - @Before - public void setUp() throws Exception { - conf = new OzoneConfiguration(); - datanodeId = UUID.randomUUID(); - HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot() - .getAbsolutePath()).conf(conf).datanodeUuid(datanodeId - .toString()).build(); - - volumeSet = mock(VolumeSet.class); - volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class); - Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) - .thenReturn(hddsVolume); - - keyValueContainerData = new KeyValueContainerData(1L, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - datanodeId.toString()); - - keyValueContainer = new KeyValueContainer( - keyValueContainerData, conf); - - } - - @Test - public void testBlockIterator() throws Exception{ - keyValueContainerData = new KeyValueContainerData(100L, - (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), - datanodeId.toString()); - keyValueContainer = new KeyValueContainer( - keyValueContainerData, conf); - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - KeyValueBlockIterator blockIterator = keyValueContainer.blockIterator(); - //As no blocks created, hasNext should return false. - assertFalse(blockIterator.hasNext()); - int blockCount = 10; - addBlocks(blockCount); - blockIterator = keyValueContainer.blockIterator(); - assertTrue(blockIterator.hasNext()); - BlockData blockData; - int blockCounter = 0; - while(blockIterator.hasNext()) { - blockData = blockIterator.nextBlock(); - assertEquals(blockCounter++, blockData.getBlockID().getLocalID()); - } - assertEquals(blockCount, blockCounter); - } - - private void addBlocks(int count) throws Exception { - long containerId = keyValueContainerData.getContainerID(); - - try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer - .getContainerData(), conf)) { - for (int i = 0; i < count; i++) { - // Creating BlockData - BlockID blockID = new BlockID(containerId, i); - BlockData blockData = new BlockData(blockID); - blockData.addMetadata("VOLUME", "ozone"); - blockData.addMetadata("OWNER", "hdfs"); - List chunkList = new ArrayList<>(); - ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID - .getLocalID(), 0), 0, 1024); - chunkList.add(info.getProtoBufMessage()); - blockData.setChunks(chunkList); - metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()), - blockData - .getProtoBufMessage().toByteArray()); - } - } - } - - @SuppressWarnings("RedundantCast") - @Test - public void testCreateContainer() throws Exception { - - // Create Container. - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - - keyValueContainerData = keyValueContainer - .getContainerData(); - - String containerMetaDataPath = keyValueContainerData - .getMetadataPath(); - String chunksPath = keyValueContainerData.getChunksPath(); - - // Check whether containerMetaDataPath and chunksPath exists or not. - assertTrue(containerMetaDataPath != null); - assertTrue(chunksPath != null); - //Check whether container file and container db file exists or not. - assertTrue(keyValueContainer.getContainerFile().exists(), - ".Container File does not exist"); - assertTrue(keyValueContainer.getContainerDBFile().exists(), "Container " + - "DB does not exist"); - } - - @Test - public void testContainerImportExport() throws Exception { - - long containerId = keyValueContainer.getContainerData().getContainerID(); - // Create Container. - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - - - keyValueContainerData = keyValueContainer - .getContainerData(); - - keyValueContainerData.setState( - ContainerProtos.ContainerDataProto.State.CLOSED); - - int numberOfKeysToWrite = 12; - //write one few keys to check the key count after import - try(ReferenceCountedDB metadataStore = - BlockUtils.getDB(keyValueContainerData, conf)) { - for (int i = 0; i < numberOfKeysToWrite; i++) { - metadataStore.getStore().put(("test" + i).getBytes(UTF_8), - "test".getBytes(UTF_8)); - } - } - BlockUtils.removeDB(keyValueContainerData, conf); - - Map metadata = new HashMap<>(); - metadata.put("key1", "value1"); - keyValueContainer.update(metadata, true); - - //destination path - File folderToExport = folder.newFile("exported.tar.gz"); - - TarContainerPacker packer = new TarContainerPacker(); - - //export the container - try (FileOutputStream fos = new FileOutputStream(folderToExport)) { - keyValueContainer - .exportContainerData(fos, packer); - } - - //delete the original one - keyValueContainer.delete(); - - //create a new one - KeyValueContainerData containerData = - new KeyValueContainerData(containerId, 1, - keyValueContainerData.getMaxSize(), UUID.randomUUID().toString(), - datanodeId.toString()); - KeyValueContainer container = new KeyValueContainer(containerData, conf); - - HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet - .getVolumesList(), 1); - String hddsVolumeDir = containerVolume.getHddsRootDir().toString(); - - container.populatePathFields(scmId, containerVolume, hddsVolumeDir); - try (FileInputStream fis = new FileInputStream(folderToExport)) { - container.importContainerData(fis, packer); - } - - Assert.assertEquals("value1", containerData.getMetadata().get("key1")); - Assert.assertEquals(keyValueContainerData.getContainerDBType(), - containerData.getContainerDBType()); - Assert.assertEquals(keyValueContainerData.getState(), - containerData.getState()); - Assert.assertEquals(numberOfKeysToWrite, - containerData.getKeyCount()); - Assert.assertEquals(keyValueContainerData.getLayOutVersion(), - containerData.getLayOutVersion()); - Assert.assertEquals(keyValueContainerData.getMaxSize(), - containerData.getMaxSize()); - Assert.assertEquals(keyValueContainerData.getBytesUsed(), - containerData.getBytesUsed()); - - //Can't overwrite existing container - try { - try (FileInputStream fis = new FileInputStream(folderToExport)) { - container.importContainerData(fis, packer); - } - fail("Container is imported twice. Previous files are overwritten"); - } catch (IOException ex) { - //all good - } - - } - - @Test - public void testDuplicateContainer() throws Exception { - try { - // Create Container. - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - fail("testDuplicateContainer failed"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("ContainerFile already " + - "exists", ex); - assertEquals(ContainerProtos.Result.CONTAINER_ALREADY_EXISTS, ex - .getResult()); - } - } - - @Test - public void testDiskFullExceptionCreateContainer() throws Exception { - - Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) - .thenThrow(DiskChecker.DiskOutOfSpaceException.class); - try { - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - fail("testDiskFullExceptionCreateContainer failed"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("disk out of space", - ex); - assertEquals(ContainerProtos.Result.DISK_OUT_OF_SPACE, ex.getResult()); - } - } - - @Test - public void testDeleteContainer() throws Exception { - keyValueContainerData.setState(ContainerProtos.ContainerDataProto.State - .CLOSED); - keyValueContainer = new KeyValueContainer( - keyValueContainerData, conf); - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - keyValueContainer.delete(); - - String containerMetaDataPath = keyValueContainerData - .getMetadataPath(); - File containerMetaDataLoc = new File(containerMetaDataPath); - - assertFalse("Container directory still exists", containerMetaDataLoc - .getParentFile().exists()); - - assertFalse("Container File still exists", - keyValueContainer.getContainerFile().exists()); - assertFalse("Container DB file still exists", - keyValueContainer.getContainerDBFile().exists()); - } - - - @Test - public void testCloseContainer() throws Exception { - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - keyValueContainer.close(); - - keyValueContainerData = keyValueContainer - .getContainerData(); - - assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, - keyValueContainerData.getState()); - - //Check state in the .container file - String containerMetaDataPath = keyValueContainerData - .getMetadataPath(); - File containerFile = keyValueContainer.getContainerFile(); - - keyValueContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, - keyValueContainerData.getState()); - } - - @Test - public void testReportOfUnhealthyContainer() throws Exception { - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - Assert.assertNotNull(keyValueContainer.getContainerReport()); - keyValueContainer.markContainerUnhealthy(); - File containerFile = keyValueContainer.getContainerFile(); - keyValueContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - assertEquals(ContainerProtos.ContainerDataProto.State.UNHEALTHY, - keyValueContainerData.getState()); - Assert.assertNotNull(keyValueContainer.getContainerReport()); - } - - @Test - public void testUpdateContainer() throws IOException { - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - Map metadata = new HashMap<>(); - metadata.put("VOLUME", "ozone"); - metadata.put("OWNER", "hdfs"); - keyValueContainer.update(metadata, true); - - keyValueContainerData = keyValueContainer - .getContainerData(); - - assertEquals(2, keyValueContainerData.getMetadata().size()); - - //Check metadata in the .container file - File containerFile = keyValueContainer.getContainerFile(); - - keyValueContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - assertEquals(2, keyValueContainerData.getMetadata().size()); - - } - - @Test - public void testUpdateContainerUnsupportedRequest() throws Exception { - try { - keyValueContainerData.setState( - ContainerProtos.ContainerDataProto.State.CLOSED); - keyValueContainer = new KeyValueContainer(keyValueContainerData, conf); - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - Map metadata = new HashMap<>(); - metadata.put("VOLUME", "ozone"); - keyValueContainer.update(metadata, false); - fail("testUpdateContainerUnsupportedRequest failed"); - } catch (StorageContainerException ex) { - GenericTestUtils.assertExceptionContains("Updating a closed container " + - "without force option is not allowed", ex); - assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex - .getResult()); - } - } - - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java deleted file mode 100644 index fe702fc693a..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ /dev/null @@ -1,270 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import com.google.common.primitives.Longs; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; -import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import java.io.File; -import java.io.RandomAccessFile; -import java.util.Arrays; -import java.util.ArrayList; -import java.nio.ByteBuffer; -import java.util.Collection; -import java.util.List; -import java.util.UUID; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; - - -/** - * Basic sanity test for the KeyValueContainerCheck class. - */ -@RunWith(Parameterized.class) public class TestKeyValueContainerCheck { - private final String storeImpl; - private KeyValueContainer container; - private KeyValueContainerData containerData; - private VolumeSet volumeSet; - private OzoneConfiguration conf; - private File testRoot; - - public TestKeyValueContainerCheck(String metadataImpl) { - this.storeImpl = metadataImpl; - } - - @Parameterized.Parameters public static Collection data() { - return Arrays.asList(new Object[][] {{OZONE_METADATA_STORE_IMPL_LEVELDB}, - {OZONE_METADATA_STORE_IMPL_ROCKSDB}}); - } - - @Before public void setUp() throws Exception { - this.testRoot = GenericTestUtils.getRandomizedTestDir(); - conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); - conf.set(OZONE_METADATA_STORE_IMPL, storeImpl); - volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); - } - - @After public void teardown() { - volumeSet.shutdown(); - FileUtil.fullyDelete(testRoot); - } - - /** - * Sanity test, when there are no corruptions induced. - */ - @Test - public void testKeyValueContainerCheckNoCorruption() throws Exception { - long containerID = 101; - int deletedBlocks = 1; - int normalBlocks = 3; - int chunksPerBlock = 4; - ContainerScrubberConfiguration c = conf.getObject( - ContainerScrubberConfiguration.class); - - // test Closed Container - createContainerWithBlocks(containerID, normalBlocks, deletedBlocks, - chunksPerBlock); - - KeyValueContainerCheck kvCheck = - new KeyValueContainerCheck(containerData.getMetadataPath(), conf, - containerID); - - // first run checks on a Open Container - boolean valid = kvCheck.fastCheck(); - assertTrue(valid); - - container.close(); - - // next run checks on a Closed Container - valid = kvCheck.fullCheck(new DataTransferThrottler( - c.getBandwidthPerVolume()), null); - assertTrue(valid); - } - - /** - * Sanity test, when there are corruptions induced. - */ - @Test - public void testKeyValueContainerCheckCorruption() throws Exception { - long containerID = 102; - int deletedBlocks = 1; - int normalBlocks = 3; - int chunksPerBlock = 4; - ContainerScrubberConfiguration sc = conf.getObject( - ContainerScrubberConfiguration.class); - - // test Closed Container - createContainerWithBlocks(containerID, normalBlocks, deletedBlocks, - chunksPerBlock); - - container.close(); - - KeyValueContainerCheck kvCheck = - new KeyValueContainerCheck(containerData.getMetadataPath(), conf, - containerID); - - File metaDir = new File(containerData.getMetadataPath()); - File dbFile = KeyValueContainerLocationUtil - .getContainerDBFile(metaDir, containerID); - containerData.setDbFile(dbFile); - try (ReferenceCountedDB ignored = - BlockUtils.getDB(containerData, conf); - KeyValueBlockIterator kvIter = new KeyValueBlockIterator(containerID, - new File(containerData.getContainerPath()))) { - BlockData block = kvIter.nextBlock(); - assertFalse(block.getChunks().isEmpty()); - ContainerProtos.ChunkInfo c = block.getChunks().get(0); - File chunkFile = ChunkUtils.getChunkFile(containerData, - ChunkInfo.getFromProtoBuf(c)); - long length = chunkFile.length(); - assertTrue(length > 0); - // forcefully truncate the file to induce failure. - try (RandomAccessFile file = new RandomAccessFile(chunkFile, "rws")) { - file.setLength(length / 2); - } - assertEquals(length/2, chunkFile.length()); - } - - // metadata check should pass. - boolean valid = kvCheck.fastCheck(); - assertTrue(valid); - - // checksum validation should fail. - valid = kvCheck.fullCheck(new DataTransferThrottler( - sc.getBandwidthPerVolume()), null); - assertFalse(valid); - } - - /** - * Creates a container with normal and deleted blocks. - * First it will insert normal blocks, and then it will insert - * deleted blocks. - */ - private void createContainerWithBlocks(long containerId, int normalBlocks, - int deletedBlocks, int chunksPerBlock) throws Exception { - String strBlock = "block"; - String strChunk = "-chunkFile"; - long totalBlocks = normalBlocks + deletedBlocks; - int unitLen = 1024; - int chunkLen = 3 * unitLen; - int bytesPerChecksum = 2 * unitLen; - Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256, - bytesPerChecksum); - byte[] chunkData = RandomStringUtils.randomAscii(chunkLen).getBytes(); - ChecksumData checksumData = checksum.computeChecksum(chunkData); - - containerData = new KeyValueContainerData(containerId, - (long) StorageUnit.BYTES.toBytes( - chunksPerBlock * chunkLen * totalBlocks), - UUID.randomUUID().toString(), UUID.randomUUID().toString()); - container = new KeyValueContainer(containerData, conf); - container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), - UUID.randomUUID().toString()); - try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, - conf)) { - ChunkManagerImpl chunkManager = new ChunkManagerImpl(true); - - assertNotNull(containerData.getChunksPath()); - File chunksPath = new File(containerData.getChunksPath()); - assertTrue(chunksPath.exists()); - // Initially chunks folder should be empty. - File[] chunkFilesBefore = chunksPath.listFiles(); - assertNotNull(chunkFilesBefore); - assertEquals(0, chunkFilesBefore.length); - - List chunkList = new ArrayList<>(); - for (int i = 0; i < totalBlocks; i++) { - BlockID blockID = new BlockID(containerId, i); - BlockData blockData = new BlockData(blockID); - - chunkList.clear(); - for (long chunkCount = 0; chunkCount < chunksPerBlock; chunkCount++) { - String chunkName = strBlock + i + strChunk + chunkCount; - ChunkInfo info = new ChunkInfo(chunkName, 0, chunkLen); - info.setChecksumData(checksumData); - chunkList.add(info.getProtoBufMessage()); - chunkManager - .writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), - new DispatcherContext.Builder() - .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA) - .build()); - chunkManager - .writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), - new DispatcherContext.Builder() - .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA) - .build()); - } - blockData.setChunks(chunkList); - - if (i >= normalBlocks) { - // deleted key - metadataStore.getStore().put(DFSUtil.string2Bytes( - OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID()), - blockData.getProtoBufMessage().toByteArray()); - } else { - // normal key - metadataStore.getStore().put(Longs.toByteArray(blockID.getLocalID()), - blockData.getProtoBufMessage().toByteArray()); - } - } - - File[] chunkFilesAfter = chunksPath.listFiles(); - assertNotNull(chunkFilesAfter); - assertEquals((deletedBlocks + normalBlocks) * chunksPerBlock, - chunkFilesAfter.length); - } - } - -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java deleted file mode 100644 index c3e67c7ae6b..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.Timeout; -import org.mockito.Mockito; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.UUID; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.OPEN; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; -import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.mock; - -/** - * Tests unhealthy container functionality in the {@link KeyValueContainer} - * class. - */ -public class TestKeyValueContainerMarkUnhealthy { - public static final Logger LOG = LoggerFactory.getLogger( - TestKeyValueContainerMarkUnhealthy.class); - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Rule - public Timeout timeout = new Timeout(600_000); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private OzoneConfiguration conf; - private String scmId = UUID.randomUUID().toString(); - private VolumeSet volumeSet; - private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; - private KeyValueContainerData keyValueContainerData; - private KeyValueContainer keyValueContainer; - private UUID datanodeId; - - @Before - public void setUp() throws Exception { - conf = new OzoneConfiguration(); - datanodeId = UUID.randomUUID(); - HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot() - .getAbsolutePath()).conf(conf).datanodeUuid(datanodeId - .toString()).build(); - - volumeSet = mock(VolumeSet.class); - volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class); - Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) - .thenReturn(hddsVolume); - - keyValueContainerData = new KeyValueContainerData(1L, - (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), - datanodeId.toString()); - final File metaDir = GenericTestUtils.getRandomizedTestDir(); - metaDir.mkdirs(); - keyValueContainerData.setMetadataPath(metaDir.getPath()); - - - keyValueContainer = new KeyValueContainer( - keyValueContainerData, conf); - } - - @After - public void teardown() { - volumeSet = null; - keyValueContainer = null; - keyValueContainerData = null; - } - - /** - * Verify that the .container file is correctly updated when a - * container is marked as unhealthy. - * - * @throws IOException - */ - @Test - public void testMarkContainerUnhealthy() throws IOException { - assertThat(keyValueContainerData.getState(), is(OPEN)); - keyValueContainer.markContainerUnhealthy(); - assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); - - // Check metadata in the .container file - File containerFile = keyValueContainer.getContainerFile(); - - keyValueContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); - } - - /** - * Attempting to close an unhealthy container should fail. - * @throws IOException - */ - @Test - public void testCloseUnhealthyContainer() throws IOException { - keyValueContainer.markContainerUnhealthy(); - thrown.expect(StorageContainerException.class); - keyValueContainer.markContainerForClose(); - } - - /** - * Attempting to mark a closed container as unhealthy should succeed. - */ - @Test - public void testMarkClosedContainerAsUnhealthy() throws IOException { - // We need to create the container so the compact-on-close operation - // does not NPE. - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - keyValueContainer.close(); - keyValueContainer.markContainerUnhealthy(); - assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); - } - - /** - * Attempting to mark a quasi-closed container as unhealthy should succeed. - */ - @Test - public void testMarkQuasiClosedContainerAsUnhealthy() throws IOException { - // We need to create the container so the sync-on-quasi-close operation - // does not NPE. - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - keyValueContainer.quasiClose(); - keyValueContainer.markContainerUnhealthy(); - assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); - } - - /** - * Attempting to mark a closing container as unhealthy should succeed. - */ - @Test - public void testMarkClosingContainerAsUnhealthy() throws IOException { - keyValueContainer.markContainerForClose(); - keyValueContainer.markContainerUnhealthy(); - assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java deleted file mode 100644 index 2c71fef11a6..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ /dev/null @@ -1,316 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; - -import org.mockito.Mockito; - -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_DATANODE_VOLUME_CHOOSING_POLICY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.junit.Assert.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.doCallRealMethod; -import static org.mockito.Mockito.times; - - -import java.io.File; -import java.io.IOException; -import java.util.HashSet; -import java.util.UUID; - -/** - * Unit tests for {@link KeyValueHandler}. - */ -public class TestKeyValueHandler { - - @Rule - public TestRule timeout = new Timeout(300000); - - private static HddsDispatcher dispatcher; - private static KeyValueHandler handler; - - private final static String DATANODE_UUID = UUID.randomUUID().toString(); - - private final String baseDir = MiniDFSCluster.getBaseDirectory(); - private final String volume = baseDir + "disk1"; - - private static final long DUMMY_CONTAINER_ID = 9999; - - @BeforeClass - public static void setup() throws StorageContainerException { - // Create mock HddsDispatcher and KeyValueHandler. - handler = Mockito.mock(KeyValueHandler.class); - dispatcher = Mockito.mock(HddsDispatcher.class); - Mockito.when(dispatcher.getHandler(any())).thenReturn(handler); - Mockito.when(dispatcher.dispatch(any(), any())).thenCallRealMethod(); - Mockito.when(dispatcher.getContainer(anyLong())).thenReturn( - Mockito.mock(KeyValueContainer.class)); - Mockito.when(dispatcher.getMissingContainerSet()) - .thenReturn(new HashSet<>()); - Mockito.when(handler.handle(any(), any(), any())).thenCallRealMethod(); - doCallRealMethod().when(dispatcher).setMetricsForTesting(any()); - dispatcher.setMetricsForTesting(Mockito.mock(ContainerMetrics.class)); - Mockito.when(dispatcher.buildAuditMessageForFailure(any(), any(), any())) - .thenCallRealMethod(); - Mockito.when(dispatcher.buildAuditMessageForSuccess(any(), any())) - .thenCallRealMethod(); - } - - @Test - /** - * Test that Handler handles different command types correctly. - */ - public void testHandlerCommandHandling() throws Exception { - - // Test Create Container Request handling - ContainerCommandRequestProto createContainerRequest = - ContainerProtos.ContainerCommandRequestProto.newBuilder() - .setCmdType(ContainerProtos.Type.CreateContainer) - .setContainerID(DUMMY_CONTAINER_ID) - .setDatanodeUuid(DATANODE_UUID) - .setCreateContainer(ContainerProtos.CreateContainerRequestProto - .getDefaultInstance()) - .build(); - DispatcherContext context = new DispatcherContext.Builder().build(); - dispatcher.dispatch(createContainerRequest, context); - Mockito.verify(handler, times(1)).handleCreateContainer( - any(ContainerCommandRequestProto.class), any()); - - // Test Read Container Request handling - ContainerCommandRequestProto readContainerRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ReadContainer); - dispatcher.dispatch(readContainerRequest, context); - Mockito.verify(handler, times(1)).handleReadContainer( - any(ContainerCommandRequestProto.class), any()); - - // Test Update Container Request handling - ContainerCommandRequestProto updateContainerRequest = - getDummyCommandRequestProto(ContainerProtos.Type.UpdateContainer); - dispatcher.dispatch(updateContainerRequest, context); - Mockito.verify(handler, times(1)).handleUpdateContainer( - any(ContainerCommandRequestProto.class), any()); - - // Test Delete Container Request handling - ContainerCommandRequestProto deleteContainerRequest = - getDummyCommandRequestProto(ContainerProtos.Type.DeleteContainer); - dispatcher.dispatch(deleteContainerRequest, null); - Mockito.verify(handler, times(1)).handleDeleteContainer( - any(ContainerCommandRequestProto.class), any()); - - // Test List Container Request handling - ContainerCommandRequestProto listContainerRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ListContainer); - dispatcher.dispatch(listContainerRequest, context); - Mockito.verify(handler, times(1)).handleUnsupportedOp( - any(ContainerCommandRequestProto.class)); - - // Test Close Container Request handling - ContainerCommandRequestProto closeContainerRequest = - getDummyCommandRequestProto(ContainerProtos.Type.CloseContainer); - dispatcher.dispatch(closeContainerRequest, context); - Mockito.verify(handler, times(1)).handleCloseContainer( - any(ContainerCommandRequestProto.class), any()); - - // Test Put Block Request handling - ContainerCommandRequestProto putBlockRequest = - getDummyCommandRequestProto(ContainerProtos.Type.PutBlock); - dispatcher.dispatch(putBlockRequest, context); - Mockito.verify(handler, times(1)).handlePutBlock( - any(ContainerCommandRequestProto.class), any(), any()); - - // Test Get Block Request handling - ContainerCommandRequestProto getBlockRequest = - getDummyCommandRequestProto(ContainerProtos.Type.GetBlock); - dispatcher.dispatch(getBlockRequest, context); - Mockito.verify(handler, times(1)).handleGetBlock( - any(ContainerCommandRequestProto.class), any()); - - // Test Delete Block Request handling - ContainerCommandRequestProto deleteBlockRequest = - getDummyCommandRequestProto(ContainerProtos.Type.DeleteBlock); - dispatcher.dispatch(deleteBlockRequest, context); - Mockito.verify(handler, times(1)).handleDeleteBlock( - any(ContainerCommandRequestProto.class), any()); - - // Test List Block Request handling - ContainerCommandRequestProto listBlockRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ListBlock); - dispatcher.dispatch(listBlockRequest, context); - Mockito.verify(handler, times(2)).handleUnsupportedOp( - any(ContainerCommandRequestProto.class)); - - // Test Read Chunk Request handling - ContainerCommandRequestProto readChunkRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ReadChunk); - dispatcher.dispatch(readChunkRequest, context); - Mockito.verify(handler, times(1)).handleReadChunk( - any(ContainerCommandRequestProto.class), any(), any()); - - // Test Delete Chunk Request handling - ContainerCommandRequestProto deleteChunkRequest = - getDummyCommandRequestProto(ContainerProtos.Type.DeleteChunk); - dispatcher.dispatch(deleteChunkRequest, context); - Mockito.verify(handler, times(1)).handleDeleteChunk( - any(ContainerCommandRequestProto.class), any()); - - // Test Write Chunk Request handling - ContainerCommandRequestProto writeChunkRequest = - getDummyCommandRequestProto(ContainerProtos.Type.WriteChunk); - dispatcher.dispatch(writeChunkRequest, context); - Mockito.verify(handler, times(1)).handleWriteChunk( - any(ContainerCommandRequestProto.class), any(), any()); - - // Test List Chunk Request handling - ContainerCommandRequestProto listChunkRequest = - getDummyCommandRequestProto(ContainerProtos.Type.ListChunk); - dispatcher.dispatch(listChunkRequest, context); - Mockito.verify(handler, times(3)).handleUnsupportedOp( - any(ContainerCommandRequestProto.class)); - - // Test Put Small File Request handling - ContainerCommandRequestProto putSmallFileRequest = - getDummyCommandRequestProto(ContainerProtos.Type.PutSmallFile); - dispatcher.dispatch(putSmallFileRequest, context); - Mockito.verify(handler, times(1)).handlePutSmallFile( - any(ContainerCommandRequestProto.class), any(), any()); - - // Test Get Small File Request handling - ContainerCommandRequestProto getSmallFileRequest = - getDummyCommandRequestProto(ContainerProtos.Type.GetSmallFile); - dispatcher.dispatch(getSmallFileRequest, context); - Mockito.verify(handler, times(1)).handleGetSmallFile( - any(ContainerCommandRequestProto.class), any()); - } - - @Test - public void testVolumeSetInKeyValueHandler() throws Exception{ - File path = GenericTestUtils.getRandomizedTestDir(); - Configuration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath()); - VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); - try { - ContainerSet cset = new ContainerSet(); - int[] interval = new int[1]; - interval[0] = 2; - ContainerMetrics metrics = new ContainerMetrics(interval); - DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class); - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()) - .thenReturn(datanodeDetails); - Mockito.when(context.getParent()).thenReturn(stateMachine); - KeyValueHandler keyValueHandler = new KeyValueHandler(conf, context, cset, - volumeSet, metrics); - assertEquals("org.apache.hadoop.ozone.container.common" + - ".volume.RoundRobinVolumeChoosingPolicy", - keyValueHandler.getVolumeChoosingPolicyForTesting() - .getClass().getName()); - - //Set a class which is not of sub class of VolumeChoosingPolicy - conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, - "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher"); - try { - new KeyValueHandler(conf, context, cset, volumeSet, metrics); - } catch (RuntimeException ex) { - GenericTestUtils.assertExceptionContains("class org.apache.hadoop" + - ".ozone.container.common.impl.HddsDispatcher not org.apache" + - ".hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy", - ex); - } - } finally { - volumeSet.shutdown(); - FileUtil.fullyDelete(path); - } - } - - private ContainerCommandRequestProto getDummyCommandRequestProto( - ContainerProtos.Type cmdType) { - ContainerCommandRequestProto request = - ContainerProtos.ContainerCommandRequestProto.newBuilder() - .setCmdType(cmdType) - .setContainerID(DUMMY_CONTAINER_ID) - .setDatanodeUuid(DATANODE_UUID) - .build(); - - return request; - } - - - @Test - public void testCloseInvalidContainer() throws IOException { - long containerID = 1234L; - Configuration conf = new Configuration(); - KeyValueContainerData kvData = new KeyValueContainerData(containerID, - (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - KeyValueContainer container = new KeyValueContainer(kvData, conf); - kvData.setState(ContainerProtos.ContainerDataProto.State.INVALID); - - // Create Close container request - ContainerCommandRequestProto closeContainerRequest = - ContainerProtos.ContainerCommandRequestProto.newBuilder() - .setCmdType(ContainerProtos.Type.CloseContainer) - .setContainerID(DUMMY_CONTAINER_ID) - .setDatanodeUuid(DATANODE_UUID) - .setCloseContainer(ContainerProtos.CloseContainerRequestProto - .getDefaultInstance()) - .build(); - dispatcher.dispatch(closeContainerRequest, null); - - Mockito.when(handler.handleCloseContainer(any(), any())) - .thenCallRealMethod(); - doCallRealMethod().when(handler).closeContainer(any()); - // Closing invalid container should return error response. - ContainerProtos.ContainerCommandResponseProto response = - handler.handleCloseContainer(closeContainerRequest, container); - - Assert.assertTrue("Close container should return Invalid container error", - response.getResult().equals( - ContainerProtos.Result.INVALID_CONTAINER_STATE)); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java deleted file mode 100644 index e3ae56a3aa8..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java +++ /dev/null @@ -1,231 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.junit.Assert; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.UUID; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_UNHEALTHY; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - - -/** - * Test that KeyValueHandler fails certain operations when the - * container is unhealthy. - */ -public class TestKeyValueHandlerWithUnhealthyContainer { - public static final Logger LOG = LoggerFactory.getLogger( - TestKeyValueHandlerWithUnhealthyContainer.class); - - private final static String DATANODE_UUID = UUID.randomUUID().toString(); - private static final long DUMMY_CONTAINER_ID = 9999; - - @Test - public void testRead() throws IOException { - KeyValueContainer container = getMockUnhealthyContainer(); - KeyValueHandler handler = getDummyHandler(); - - ContainerProtos.ContainerCommandResponseProto response = - handler.handleReadContainer( - getDummyCommandRequestProto(ContainerProtos.Type.ReadContainer), - container); - assertThat(response.getResult(), is(CONTAINER_UNHEALTHY)); - } - - @Test - public void testGetBlock() throws IOException { - KeyValueContainer container = getMockUnhealthyContainer(); - KeyValueHandler handler = getDummyHandler(); - - ContainerProtos.ContainerCommandResponseProto response = - handler.handleGetBlock( - getDummyCommandRequestProto(ContainerProtos.Type.GetBlock), - container); - assertThat(response.getResult(), is(CONTAINER_UNHEALTHY)); - } - - @Test - public void testGetCommittedBlockLength() throws IOException { - KeyValueContainer container = getMockUnhealthyContainer(); - KeyValueHandler handler = getDummyHandler(); - - ContainerProtos.ContainerCommandResponseProto response = - handler.handleGetCommittedBlockLength( - getDummyCommandRequestProto( - ContainerProtos.Type.GetCommittedBlockLength), - container); - assertThat(response.getResult(), is(CONTAINER_UNHEALTHY)); - } - - @Test - public void testReadChunk() throws IOException { - KeyValueContainer container = getMockUnhealthyContainer(); - KeyValueHandler handler = getDummyHandler(); - - ContainerProtos.ContainerCommandResponseProto response = - handler.handleReadChunk( - getDummyCommandRequestProto( - ContainerProtos.Type.ReadChunk), - container, null); - assertThat(response.getResult(), is(CONTAINER_UNHEALTHY)); - } - - @Test - public void testDeleteChunk() throws IOException { - KeyValueContainer container = getMockUnhealthyContainer(); - KeyValueHandler handler = getDummyHandler(); - - ContainerProtos.ContainerCommandResponseProto response = - handler.handleDeleteChunk( - getDummyCommandRequestProto( - ContainerProtos.Type.DeleteChunk), - container); - assertThat(response.getResult(), is(CONTAINER_UNHEALTHY)); - } - - @Test - public void testGetSmallFile() throws IOException { - KeyValueContainer container = getMockUnhealthyContainer(); - KeyValueHandler handler = getDummyHandler(); - - ContainerProtos.ContainerCommandResponseProto response = - handler.handleGetSmallFile( - getDummyCommandRequestProto( - ContainerProtos.Type.GetSmallFile), - container); - assertThat(response.getResult(), is(CONTAINER_UNHEALTHY)); - } - - // -- Helper methods below. - - private KeyValueHandler getDummyHandler() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - DatanodeDetails dnDetails = DatanodeDetails.newBuilder() - .setUuid(DATANODE_UUID) - .setHostName("dummyHost") - .setIpAddress("1.2.3.4") - .build(); - DatanodeStateMachine stateMachine = mock(DatanodeStateMachine.class); - when(stateMachine.getDatanodeDetails()).thenReturn(dnDetails); - - StateContext context = new StateContext( - conf, DatanodeStateMachine.DatanodeStates.RUNNING, - stateMachine); - - return new KeyValueHandler( - new OzoneConfiguration(), - context, - mock(ContainerSet.class), - mock(VolumeSet.class), - mock(ContainerMetrics.class)); - } - - private KeyValueContainer getMockUnhealthyContainer() { - KeyValueContainerData containerData = mock(KeyValueContainerData.class); - when(containerData.getState()).thenReturn( - ContainerProtos.ContainerDataProto.State.UNHEALTHY); - return new KeyValueContainer(containerData, new OzoneConfiguration()); - } - - /** - * Construct fake protobuf messages for various types of requests. - * This is tedious, however necessary to test. Protobuf classes are final - * and cannot be mocked by Mockito. - * - * @param cmdType type of the container command. - * @return - */ - private ContainerCommandRequestProto getDummyCommandRequestProto( - ContainerProtos.Type cmdType) { - final ContainerCommandRequestProto.Builder builder = - ContainerCommandRequestProto.newBuilder() - .setCmdType(cmdType) - .setContainerID(DUMMY_CONTAINER_ID) - .setDatanodeUuid(DATANODE_UUID); - - final ContainerProtos.DatanodeBlockID fakeBlockId = - ContainerProtos.DatanodeBlockID.newBuilder() - .setContainerID(DUMMY_CONTAINER_ID).setLocalID(1).build(); - - final ContainerProtos.ChunkInfo fakeChunkInfo = - ContainerProtos.ChunkInfo.newBuilder() - .setChunkName("dummy") - .setOffset(0) - .setLen(100) - .setChecksumData(ContainerProtos.ChecksumData.newBuilder() - .setBytesPerChecksum(1) - .setType(ContainerProtos.ChecksumType.CRC32) - .build()) - .build(); - - switch (cmdType) { - case ReadContainer: - builder.setReadContainer( - ContainerProtos.ReadContainerRequestProto.newBuilder().build()); - break; - case GetBlock: - builder.setGetBlock(ContainerProtos.GetBlockRequestProto.newBuilder() - .setBlockID(fakeBlockId).build()); - break; - case GetCommittedBlockLength: - builder.setGetCommittedBlockLength( - ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder() - .setBlockID(fakeBlockId).build()); - case ReadChunk: - builder.setReadChunk(ContainerProtos.ReadChunkRequestProto.newBuilder() - .setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build()); - break; - case DeleteChunk: - builder - .setDeleteChunk(ContainerProtos.DeleteChunkRequestProto.newBuilder() - .setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build()); - break; - case GetSmallFile: - builder - .setGetSmallFile(ContainerProtos.GetSmallFileRequestProto.newBuilder() - .setBlock(ContainerProtos.GetBlockRequestProto.newBuilder() - .setBlockID(fakeBlockId) - .build()) - .build()); - break; - - default: - Assert.fail("Unhandled request type " + cmdType + " in unit test"); - } - - return builder.build(); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java deleted file mode 100644 index 9e6f653e7eb..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.keyvalue; - -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.FileWriter; -import java.io.IOException; -import java.nio.charset.Charset; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker; - -import org.apache.commons.compress.archivers.tar.TarArchiveEntry; -import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; -import org.apache.commons.compress.compressors.CompressorException; -import org.apache.commons.compress.compressors.CompressorInputStream; -import org.apache.commons.compress.compressors.CompressorStreamFactory; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.IOUtils; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import static java.nio.charset.StandardCharsets.UTF_8; - -/** - * Test the tar/untar for a given container. - */ -public class TestTarContainerPacker { - - private static final String TEST_DB_FILE_NAME = "test1"; - - private static final String TEST_DB_FILE_CONTENT = "test1"; - - private static final String TEST_CHUNK_FILE_NAME = "chunk1"; - - private static final String TEST_CHUNK_FILE_CONTENT = "This is a chunk"; - - private static final String TEST_DESCRIPTOR_FILE_CONTENT = "descriptor"; - - private ContainerPacker packer = new TarContainerPacker(); - - private static final Path SOURCE_CONTAINER_ROOT = - Paths.get("target/test/data/packer-source-dir"); - - private static final Path DEST_CONTAINER_ROOT = - Paths.get("target/test/data/packer-dest-dir"); - - @BeforeClass - public static void init() throws IOException { - initDir(SOURCE_CONTAINER_ROOT); - initDir(DEST_CONTAINER_ROOT); - } - - private static void initDir(Path path) throws IOException { - if (path.toFile().exists()) { - FileUtils.deleteDirectory(path.toFile()); - } - path.toFile().mkdirs(); - } - - private KeyValueContainerData createContainer(long id, Path dir, - OzoneConfiguration conf) throws IOException { - - Path containerDir = dir.resolve("container" + id); - Path dbDir = containerDir.resolve("db"); - Path dataDir = containerDir.resolve("data"); - Files.createDirectories(dbDir); - Files.createDirectories(dataDir); - - KeyValueContainerData containerData = new KeyValueContainerData( - id, -1, UUID.randomUUID().toString(), UUID.randomUUID().toString()); - containerData.setChunksPath(dataDir.toString()); - containerData.setMetadataPath(dbDir.getParent().toString()); - containerData.setDbFile(dbDir.toFile()); - - - return containerData; - } - - @Test - public void pack() throws IOException, CompressorException { - - //GIVEN - OzoneConfiguration conf = new OzoneConfiguration(); - - KeyValueContainerData sourceContainerData = - createContainer(1L, SOURCE_CONTAINER_ROOT, conf); - - KeyValueContainer sourceContainer = - new KeyValueContainer(sourceContainerData, conf); - - //sample db file in the metadata directory - try (FileWriter writer = new FileWriter( - sourceContainerData.getDbFile().toPath() - .resolve(TEST_DB_FILE_NAME) - .toFile())) { - IOUtils.write(TEST_DB_FILE_CONTENT, writer); - } - - //sample chunk file in the chunk directory - try (FileWriter writer = new FileWriter( - Paths.get(sourceContainerData.getChunksPath()) - .resolve(TEST_CHUNK_FILE_NAME) - .toFile())) { - IOUtils.write(TEST_CHUNK_FILE_CONTENT, writer); - } - - //sample container descriptor file - try (FileWriter writer = new FileWriter( - sourceContainer.getContainerFile())) { - IOUtils.write(TEST_DESCRIPTOR_FILE_CONTENT, writer); - } - - Path targetFile = - SOURCE_CONTAINER_ROOT.getParent().resolve("container.tar.gz"); - - //WHEN: pack it - try (FileOutputStream output = new FileOutputStream(targetFile.toFile())) { - packer.pack(sourceContainer, output); - } - - //THEN: check the result - try (FileInputStream input = new FileInputStream(targetFile.toFile())) { - CompressorInputStream uncompressed = new CompressorStreamFactory() - .createCompressorInputStream(CompressorStreamFactory.GZIP, input); - TarArchiveInputStream tarStream = new TarArchiveInputStream(uncompressed); - - TarArchiveEntry entry; - Map entries = new HashMap<>(); - while ((entry = tarStream.getNextTarEntry()) != null) { - entries.put(entry.getName(), entry); - } - - Assert.assertTrue( - entries.containsKey("container.yaml")); - - } - - //read the container descriptor only - try (FileInputStream input = new FileInputStream(targetFile.toFile())) { - String containerYaml = new String(packer.unpackContainerDescriptor(input), - Charset.forName(UTF_8.name())); - Assert.assertEquals(TEST_DESCRIPTOR_FILE_CONTENT, containerYaml); - } - - KeyValueContainerData destinationContainerData = - createContainer(2L, DEST_CONTAINER_ROOT, conf); - - KeyValueContainer destinationContainer = - new KeyValueContainer(destinationContainerData, conf); - - String descriptor = ""; - - //unpackContainerData - try (FileInputStream input = new FileInputStream(targetFile.toFile())) { - descriptor = - new String(packer.unpackContainerData(destinationContainer, input), - Charset.forName(UTF_8.name())); - } - - assertExampleMetadataDbIsGood( - destinationContainerData.getDbFile().toPath()); - assertExampleChunkFileIsGood( - Paths.get(destinationContainerData.getChunksPath())); - Assert.assertFalse( - "Descriptor file should not been exctarcted by the " - + "unpackContainerData Call", - destinationContainer.getContainerFile().exists()); - Assert.assertEquals(TEST_DESCRIPTOR_FILE_CONTENT, descriptor); - - } - - - private void assertExampleMetadataDbIsGood(Path dbPath) - throws IOException { - - Path dbFile = dbPath.resolve(TEST_DB_FILE_NAME); - - Assert.assertTrue( - "example DB file is missing after pack/unpackContainerData: " + dbFile, - Files.exists(dbFile)); - - try (FileInputStream testFile = new FileInputStream(dbFile.toFile())) { - List strings = IOUtils - .readLines(testFile, Charset.forName(UTF_8.name())); - Assert.assertEquals(1, strings.size()); - Assert.assertEquals(TEST_DB_FILE_CONTENT, strings.get(0)); - } - } - - private void assertExampleChunkFileIsGood(Path chunkDirPath) - throws IOException { - - Path chunkFile = chunkDirPath.resolve(TEST_CHUNK_FILE_NAME); - - Assert.assertTrue( - "example chunk file is missing after pack/unpackContainerData: " - + chunkFile, - Files.exists(chunkFile)); - - try (FileInputStream testFile = new FileInputStream(chunkFile.toFile())) { - List strings = IOUtils - .readLines(testFile, Charset.forName(UTF_8.name())); - Assert.assertEquals(1, strings.size()); - Assert.assertEquals(TEST_CHUNK_FILE_CONTENT, strings.get(0)); - } - } - -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java deleted file mode 100644 index 4a1637cb169..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.keyvalue.helpers; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -/** - * Tests for {@link ChunkUtils}. - */ -public class TestChunkUtils { - - private static final Logger LOG = - LoggerFactory.getLogger(TestChunkUtils.class); - - private static final String PREFIX = TestChunkUtils.class.getSimpleName(); - - @Test - public void concurrentReadOfSameFile() throws Exception { - String s = "Hello World"; - byte[] array = s.getBytes(); - ByteBuffer data = ByteBuffer.wrap(array); - Path tempFile = Files.createTempFile(PREFIX, "concurrent"); - try { - ChunkInfo chunkInfo = new ChunkInfo(tempFile.toString(), - 0, data.capacity()); - File file = tempFile.toFile(); - VolumeIOStats stats = new VolumeIOStats(); - ChunkUtils.writeData(file, chunkInfo, data, stats, true); - int threads = 10; - ExecutorService executor = new ThreadPoolExecutor(threads, threads, - 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); - AtomicInteger processed = new AtomicInteger(); - AtomicBoolean failed = new AtomicBoolean(); - for (int i = 0; i < threads; i++) { - final int threadNumber = i; - executor.submit(() -> { - try { - ByteBuffer readBuffer = ChunkUtils.readData(file, chunkInfo, stats); - LOG.info("Read data ({}): {}", threadNumber, - new String(readBuffer.array())); - if (!Arrays.equals(array, readBuffer.array())) { - failed.set(true); - } - } catch (Exception e) { - LOG.error("Failed to read data ({})", threadNumber, e); - failed.set(true); - } - processed.incrementAndGet(); - }); - } - try { - GenericTestUtils.waitFor(() -> processed.get() == threads, - 100, (int) TimeUnit.SECONDS.toMillis(5)); - } finally { - executor.shutdownNow(); - } - assertEquals(threads * stats.getWriteBytes(), stats.getReadBytes()); - assertFalse(failed.get()); - } finally { - Files.deleteIfExists(tempFile); - } - } - - @Test - public void concurrentProcessing() throws Exception { - final int perThreadWait = 1000; - final int maxTotalWait = 5000; - int threads = 20; - List paths = new LinkedList<>(); - - try { - ExecutorService executor = new ThreadPoolExecutor(threads, threads, - 0, TimeUnit.SECONDS, new LinkedBlockingQueue<>()); - AtomicInteger processed = new AtomicInteger(); - for (int i = 0; i < threads; i++) { - Path path = Files.createTempFile(PREFIX, String.valueOf(i)); - paths.add(path); - executor.submit(() -> { - ChunkUtils.processFileExclusively(path, () -> { - try { - Thread.sleep(perThreadWait); - } catch (InterruptedException e) { - e.printStackTrace(); - } - processed.incrementAndGet(); - return null; - }); - }); - } - try { - GenericTestUtils.waitFor(() -> processed.get() == threads, - 100, maxTotalWait); - } finally { - executor.shutdownNow(); - } - } finally { - for (Path path : paths) { - FileUtils.deleteQuietly(path.toFile()); - } - } - } - - @Test - public void serialRead() throws Exception { - String s = "Hello World"; - byte[] array = s.getBytes(); - ByteBuffer data = ByteBuffer.wrap(array); - Path tempFile = Files.createTempFile(PREFIX, "serial"); - try { - ChunkInfo chunkInfo = new ChunkInfo(tempFile.toString(), - 0, data.capacity()); - File file = tempFile.toFile(); - VolumeIOStats stats = new VolumeIOStats(); - ChunkUtils.writeData(file, chunkInfo, data, stats, true); - ByteBuffer readBuffer = ChunkUtils.readData(file, chunkInfo, stats); - assertArrayEquals(array, readBuffer.array()); - assertEquals(stats.getWriteBytes(), stats.getReadBytes()); - } catch (Exception e) { - LOG.error("Failed to read data", e); - } finally { - Files.deleteIfExists(tempFile); - } - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java deleted file mode 100644 index afbf274a8fe..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Chunk Manager Checks. - */ -package org.apache.hadoop.ozone.container.keyvalue; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java deleted file mode 100644 index b9b1beabdbd..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.util.Canceler; -import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import java.util.Arrays; -import java.util.Collection; - -/** - * This test verifies the container scrubber metrics functionality. - */ -public class TestContainerScrubberMetrics { - @Test - public void testContainerMetaDataScrubberMetrics() { - OzoneConfiguration conf = new OzoneConfiguration(); - ContainerScrubberConfiguration c = conf.getObject( - ContainerScrubberConfiguration.class); - c.setMetadataScanInterval(0); - HddsVolume vol = Mockito.mock(HddsVolume.class); - ContainerController cntrl = mockContainerController(vol); - - ContainerMetadataScanner mc = new ContainerMetadataScanner(c, cntrl); - mc.runIteration(); - - Assert.assertEquals(1, mc.getMetrics().getNumScanIterations()); - Assert.assertEquals(3, mc.getMetrics().getNumContainersScanned()); - Assert.assertEquals(1, mc.getMetrics().getNumUnHealthyContainers()); - } - - @Test - public void testContainerDataScrubberMetrics() { - OzoneConfiguration conf = new OzoneConfiguration(); - ContainerScrubberConfiguration c = conf.getObject( - ContainerScrubberConfiguration.class); - c.setDataScanInterval(0); - HddsVolume vol = Mockito.mock(HddsVolume.class); - ContainerController cntrl = mockContainerController(vol); - - ContainerDataScanner sc = new ContainerDataScanner(c, cntrl, vol); - sc.runIteration(); - - ContainerDataScrubberMetrics m = sc.getMetrics(); - Assert.assertEquals(1, m.getNumScanIterations()); - Assert.assertEquals(2, m.getNumContainersScanned()); - Assert.assertEquals(1, m.getNumUnHealthyContainers()); - } - - private ContainerController mockContainerController(HddsVolume vol) { - // healthy container - Container c1 = Mockito.mock(Container.class); - Mockito.when(c1.shouldScanData()).thenReturn(true); - Mockito.when(c1.scanMetaData()).thenReturn(true); - Mockito.when(c1.scanData( - Mockito.any(DataTransferThrottler.class), - Mockito.any(Canceler.class))).thenReturn(true); - - // unhealthy container (corrupt data) - ContainerData c2d = Mockito.mock(ContainerData.class); - Mockito.when(c2d.getContainerID()).thenReturn(101L); - Container c2 = Mockito.mock(Container.class); - Mockito.when(c2.scanMetaData()).thenReturn(true); - Mockito.when(c2.shouldScanData()).thenReturn(true); - Mockito.when(c2.scanData( - Mockito.any(DataTransferThrottler.class), - Mockito.any(Canceler.class))).thenReturn(false); - Mockito.when(c2.getContainerData()).thenReturn(c2d); - - // unhealthy container (corrupt metadata) - ContainerData c3d = Mockito.mock(ContainerData.class); - Mockito.when(c3d.getContainerID()).thenReturn(102L); - Container c3 = Mockito.mock(Container.class); - Mockito.when(c3.shouldScanData()).thenReturn(false); - Mockito.when(c3.scanMetaData()).thenReturn(false); - Mockito.when(c3.getContainerData()).thenReturn(c3d); - - Collection> containers = Arrays.asList(c1, c2, c3); - ContainerController cntrl = Mockito.mock(ContainerController.class); - Mockito.when(cntrl.getContainers(vol)) - .thenReturn(containers.iterator()); - Mockito.when(cntrl.getContainers()) - .thenReturn(containers.iterator()); - - return cntrl; - } - -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java deleted file mode 100644 index 2d679a1cb45..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.ozoneimpl; - - -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Random; -import java.util.UUID; -import java.util.HashMap; -import java.util.List; -import java.util.ArrayList; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.DISK_OUT_OF_SPACE; -import static org.junit.Assert.assertEquals; - -/** - * This class is used to test OzoneContainer. - */ -public class TestOzoneContainer { - - private static final Logger LOG = - LoggerFactory.getLogger(TestOzoneContainer.class); - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OzoneConfiguration conf; - private String scmId = UUID.randomUUID().toString(); - private VolumeSet volumeSet; - private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; - private KeyValueContainerData keyValueContainerData; - private KeyValueContainer keyValueContainer; - private final DatanodeDetails datanodeDetails = createDatanodeDetails(); - private HashMap commitSpaceMap; //RootDir -> committed space - private final int numTestContainers = 10; - - @Before - public void setUp() throws Exception { - conf = new OzoneConfiguration(); - conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.getRoot() - .getAbsolutePath()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - folder.newFolder().getAbsolutePath()); - commitSpaceMap = new HashMap(); - volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf); - volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); - } - - @After - public void cleanUp() throws Exception { - if (volumeSet != null) { - volumeSet.shutdown(); - volumeSet = null; - } - } - - @Test - public void testBuildContainerMap() throws Exception { - // Format the volumes - for (HddsVolume volume : volumeSet.getVolumesList()) { - volume.format(UUID.randomUUID().toString()); - commitSpaceMap.put(getVolumeKey(volume), Long.valueOf(0)); - } - - // Add containers to disk - for (int i = 0; i < numTestContainers; i++) { - long freeBytes = 0; - long volCommitBytes; - long maxCap = (long) StorageUnit.GB.toBytes(1); - - HddsVolume myVolume; - - keyValueContainerData = new KeyValueContainerData(i, - maxCap, UUID.randomUUID().toString(), - datanodeDetails.getUuidString()); - keyValueContainer = new KeyValueContainer( - keyValueContainerData, conf); - keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - myVolume = keyValueContainer.getContainerData().getVolume(); - - freeBytes = addBlocks(keyValueContainer, 2, 3); - - // update our expectation of volume committed space in the map - volCommitBytes = commitSpaceMap.get(getVolumeKey(myVolume)).longValue(); - Preconditions.checkState(freeBytes >= 0); - commitSpaceMap.put(getVolumeKey(myVolume), - Long.valueOf(volCommitBytes + freeBytes)); - } - - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails); - Mockito.when(context.getParent()).thenReturn(stateMachine); - // When OzoneContainer is started, the containers from disk should be - // loaded into the containerSet. - // Also expected to initialize committed space for each volume. - OzoneContainer ozoneContainer = new - OzoneContainer(datanodeDetails, conf, context, null); - - ContainerSet containerset = ozoneContainer.getContainerSet(); - assertEquals(numTestContainers, containerset.containerCount()); - - verifyCommittedSpace(ozoneContainer); - } - - @Test - public void testContainerCreateDiskFull() throws Exception { - long containerSize = (long) StorageUnit.MB.toBytes(100); - - // Format the volumes - for (HddsVolume volume : volumeSet.getVolumesList()) { - volume.format(UUID.randomUUID().toString()); - - // eat up all available space except size of 1 container - volume.incCommittedBytes(volume.getAvailable() - containerSize); - // eat up 10 bytes more, now available space is less than 1 container - volume.incCommittedBytes(10); - } - keyValueContainerData = new KeyValueContainerData(99, containerSize, - UUID.randomUUID().toString(), datanodeDetails.getUuidString()); - keyValueContainer = new KeyValueContainer(keyValueContainerData, conf); - - // we expect an out of space Exception - StorageContainerException e = LambdaTestUtils.intercept( - StorageContainerException.class, - () -> keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId) - ); - if (!DISK_OUT_OF_SPACE.equals(e.getResult())) { - LOG.info("Unexpected error during container creation", e); - } - assertEquals(DISK_OUT_OF_SPACE, e.getResult()); - } - - //verify committed space on each volume - private void verifyCommittedSpace(OzoneContainer oc) { - for (HddsVolume dnVol : oc.getVolumeSet().getVolumesList()) { - String key = getVolumeKey(dnVol); - long expectedCommit = commitSpaceMap.get(key).longValue(); - long volumeCommitted = dnVol.getCommittedBytes(); - assertEquals("Volume committed space not initialized correctly", - expectedCommit, volumeCommitted); - } - } - - private long addBlocks(KeyValueContainer container, - int blocks, int chunksPerBlock) throws Exception { - String strBlock = "block"; - String strChunk = "-chunkFile"; - int datalen = 65536; - long usedBytes = 0; - - long freeBytes = container.getContainerData().getMaxSize(); - long containerId = container.getContainerData().getContainerID(); - ReferenceCountedDB db = BlockUtils.getDB(container - .getContainerData(), conf); - - for (int bi = 0; bi < blocks; bi++) { - // Creating BlockData - BlockID blockID = new BlockID(containerId, bi); - BlockData blockData = new BlockData(blockID); - List chunkList = new ArrayList<>(); - - chunkList.clear(); - for (int ci = 0; ci < chunksPerBlock; ci++) { - String chunkName = strBlock + bi + strChunk + ci; - long offset = ci * datalen; - ChunkInfo info = new ChunkInfo(chunkName, offset, datalen); - usedBytes += datalen; - chunkList.add(info.getProtoBufMessage()); - } - blockData.setChunks(chunkList); - db.getStore().put(Longs.toByteArray(blockID.getLocalID()), - blockData.getProtoBufMessage().toByteArray()); - } - - // remaining available capacity of the container - return (freeBytes - usedBytes); - } - - private String getVolumeKey(HddsVolume volume) { - return volume.getHddsRootDir().getPath(); - } - - private DatanodeDetails createDatanodeDetails() { - Random random = new Random(); - String ipAddress = - random.nextInt(256) + "." + random.nextInt(256) + "." + random - .nextInt(256) + "." + random.nextInt(256); - - String uuid = UUID.randomUUID().toString(); - String hostName = uuid; - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(uuid) - .setHostName("localhost") - .setIpAddress(ipAddress) - .addPort(containerPort) - .addPort(ratisPort) - .addPort(restPort); - return builder.build(); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java deleted file mode 100644 index c3d3b17aefa..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.replication; - -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; - -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -/** - * Test the replication supervisor. - */ -public class TestReplicationSupervisor { - - private OzoneConfiguration conf = new OzoneConfiguration(); - - @Test - public void normal() throws Exception { - //GIVEN - ContainerSet set = new ContainerSet(); - - FakeReplicator replicator = new FakeReplicator(set); - ReplicationSupervisor supervisor = - new ReplicationSupervisor(set, replicator, 5); - - List datanodes = IntStream.range(1, 3) - .mapToObj(v -> Mockito.mock(DatanodeDetails.class)) - .collect(Collectors.toList()); - - try { - //WHEN - supervisor.addTask(new ReplicationTask(1L, datanodes)); - supervisor.addTask(new ReplicationTask(1L, datanodes)); - supervisor.addTask(new ReplicationTask(1L, datanodes)); - supervisor.addTask(new ReplicationTask(2L, datanodes)); - supervisor.addTask(new ReplicationTask(2L, datanodes)); - supervisor.addTask(new ReplicationTask(3L, datanodes)); - //THEN - LambdaTestUtils.await(200_000, 1000, - () -> supervisor.getInFlightReplications() == 0); - - Assert.assertEquals(3, replicator.replicated.size()); - - } finally { - supervisor.stop(); - } - } - - @Test - public void duplicateMessageAfterAWhile() throws Exception { - //GIVEN - ContainerSet set = new ContainerSet(); - - FakeReplicator replicator = new FakeReplicator(set); - ReplicationSupervisor supervisor = - new ReplicationSupervisor(set, replicator, 2); - - List datanodes = IntStream.range(1, 3) - .mapToObj(v -> Mockito.mock(DatanodeDetails.class)) - .collect(Collectors.toList()); - - try { - //WHEN - supervisor.addTask(new ReplicationTask(1L, datanodes)); - LambdaTestUtils.await(200_000, 1000, - () -> supervisor.getInFlightReplications() == 0); - supervisor.addTask(new ReplicationTask(1L, datanodes)); - LambdaTestUtils.await(200_000, 1000, - () -> supervisor.getInFlightReplications() == 0); - - //THEN - System.out.println(replicator.replicated.get(0)); - - Assert.assertEquals(1, replicator.replicated.size()); - - } finally { - supervisor.stop(); - } - } - - private class FakeReplicator implements ContainerReplicator { - - private List replicated = new ArrayList<>(); - - private ContainerSet containerSet; - - FakeReplicator(ContainerSet set) { - this.containerSet = set; - } - - @Override - public void replicate(ReplicationTask task) { - KeyValueContainerData kvcd = - new KeyValueContainerData(task.getContainerId(), 100L, - UUID.randomUUID().toString(), UUID.randomUUID().toString()); - KeyValueContainer kvc = - new KeyValueContainer(kvcd, conf); - try { - //download is slow - Thread.sleep(100); - replicated.add(task); - containerSet.addContainer(kvc); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java deleted file mode 100644 index 5c905e02870..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Tests for the container replication. - */ -package org.apache.hadoop.ozone.container.replication; \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java deleted file mode 100644 index a136983415b..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.testutils; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.keyvalue.statemachine.background - .BlockDeletingService; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; - -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * A test class implementation for {@link BlockDeletingService}. - */ -public class BlockDeletingServiceTestImpl - extends BlockDeletingService { - - // the service timeout - private static final int SERVICE_TIMEOUT_IN_MILLISECONDS = 0; - - // tests only - private CountDownLatch latch; - private Thread testingThread; - private AtomicInteger numOfProcessed = new AtomicInteger(0); - - public BlockDeletingServiceTestImpl(OzoneContainer container, - int serviceInterval, Configuration conf) { - super(container, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS, - TimeUnit.MILLISECONDS, conf); - } - - @VisibleForTesting - public void runDeletingTasks() { - if (latch.getCount() > 0) { - this.latch.countDown(); - } else { - throw new IllegalStateException("Count already reaches zero"); - } - } - - @VisibleForTesting - public boolean isStarted() { - return latch != null && testingThread.isAlive(); - } - - public int getTimesOfProcessed() { - return numOfProcessed.get(); - } - - // Override the implementation to start a single on-call control thread. - @Override - public void start() { - PeriodicalTask svc = new PeriodicalTask(); - // In test mode, relies on a latch countdown to runDeletingTasks tasks. - Runnable r = () -> { - while (true) { - latch = new CountDownLatch(1); - try { - latch.await(); - } catch (InterruptedException e) { - break; - } - Future future = this.getExecutorService().submit(svc); - try { - // for tests, we only wait for 3s for completion - future.get(3, TimeUnit.SECONDS); - numOfProcessed.incrementAndGet(); - } catch (Exception e) { - return; - } - } - }; - - testingThread = new ThreadFactoryBuilder() - .setDaemon(true) - .build() - .newThread(r); - testingThread.start(); - } - - @Override - public void shutdown() { - testingThread.interrupt(); - super.shutdown(); - } -} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java deleted file mode 100644 index 4e8a90bf1d4..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.testutils; -// Helper classes for ozone and container tests. \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/resources/additionalfields.container b/hadoop-hdds/container-service/src/test/resources/additionalfields.container deleted file mode 100644 index faaed06d2dc..00000000000 --- a/hadoop-hdds/container-service/src/test/resources/additionalfields.container +++ /dev/null @@ -1,14 +0,0 @@ -! -containerDBType: RocksDB -chunksPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1 -containerID: 9223372036854775807 -containerType: KeyValueContainer -metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1 -layOutVersion: 1 -maxSize: 5368709120 -originPipelineId: 1297e8a9-2850-4ced-b96c-5ae31d2c73ad -originNodeId: 7f541a06-6c26-476d-9994-c6e1947e11cb -metadata: {OWNER: ozone, VOLUME: hdfs} -state: CLOSED -aclEnabled: true -checksum: 61db56da7d50798561b5365c123c5fbf7faf99fbbbd571a746af79020b7f79ba \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/resources/incorrect.checksum.container b/hadoop-hdds/container-service/src/test/resources/incorrect.checksum.container deleted file mode 100644 index ce3294750c1..00000000000 --- a/hadoop-hdds/container-service/src/test/resources/incorrect.checksum.container +++ /dev/null @@ -1,13 +0,0 @@ -! -containerDBType: RocksDB -chunksPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1 -containerID: 9223372036854775807 -containerType: KeyValueContainer -metadataPath: /hdds/current/aed-fg4-hji-jkl/containerdir0/1 -layOutVersion: 1 -maxSize: 5368709120 -originPipelineId: 4d41dd20-6d73-496a-b247-4c6cb483f54e -originNodeId: 54842560-67a5-48a5-a7d4-4701d9538706 -metadata: {OWNER: ozone, VOLUME: hdfs} -state: OPEN -checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/resources/incorrect.container b/hadoop-hdds/container-service/src/test/resources/incorrect.container deleted file mode 100644 index 38384c8e697..00000000000 --- a/hadoop-hdds/container-service/src/test/resources/incorrect.container +++ /dev/null @@ -1,13 +0,0 @@ -! -containerDBType: RocksDB -chunksPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1 -containerID: 9223372036854775807 -containerType: KeyValueContainer -metadataPath: /hdds/current/aed-fg4-hji-jkl/containerDir0/1 -layOutVersion: 1 -maxSize: 5368709120 -originPipelineId: b2c96aa4-b757-4f97-b286-6fb80a1baf8e -originNodeId: 6dcfb385-caea-4efb-9ef3-f87fadca0f51 -metadata: {OWNER: ozone, VOLUME: hdfs} -state: NO_SUCH_STATE -checksum: 08bc9d390f9183aeed3cf33c789e2a07310bba60f3cf55941caccc939db8670f \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/test/resources/log4j.properties b/hadoop-hdds/container-service/src/test/resources/log4j.properties deleted file mode 100644 index bb5cbe5ec32..00000000000 --- a/hadoop-hdds/container-service/src/test/resources/log4j.properties +++ /dev/null @@ -1,23 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# log4j configuration used during build and unit tests - -log4j.rootLogger=INFO,stdout -log4j.threshold=ALL -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle-noframes-sorted.xsl b/hadoop-hdds/dev-support/checkstyle/checkstyle-noframes-sorted.xsl deleted file mode 100644 index 7f2aedf8675..00000000000 --- a/hadoop-hdds/dev-support/checkstyle/checkstyle-noframes-sorted.xsl +++ /dev/null @@ -1,189 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -

- - - - - - - -
- -

CheckStyle Audit

Designed for use with CheckStyle and Ant.
-


- - - -
- - - -
- - - - -
- - - - - - -

Files

- - - - - - - - - - - - - - -
NameErrors
-
- - - -

File

- - - - - - - - - - - - - - -
Error DescriptionLine
- Back to top -
- - -

Summary

- - - - - - - - - - - - -
FilesErrors
-
- - - - a - b - - - - - diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml deleted file mode 100644 index 1c437418ccf..00000000000 --- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml +++ /dev/null @@ -1,196 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/hadoop-hdds/dev-support/checkstyle/suppressions.xml b/hadoop-hdds/dev-support/checkstyle/suppressions.xml deleted file mode 100644 index 7bc94797df8..00000000000 --- a/hadoop-hdds/dev-support/checkstyle/suppressions.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - - - - diff --git a/hadoop-hdds/docs/README.md b/hadoop-hdds/docs/README.md deleted file mode 100644 index 8d5cdb714fd..00000000000 --- a/hadoop-hdds/docs/README.md +++ /dev/null @@ -1,55 +0,0 @@ - -# Hadoop Ozone/HDDS docs - -This subproject contains the inline documentation for Ozone/HDDS components. - -You can create a new page with: - -``` -hugo new content/title.md -``` - -You can check the rendering with: - -``` -hugo serve -``` - -This maven project will create the rendered HTML page during the build (ONLY if hugo is available). -And the dist project will include the documentation. - -You can adjust the menu hierarchy with adjusting the header of the markdown file: - -To show it in the main header add the menu entry: - -``` ---- -menu: main ---- -``` - -To show it as a subpage, you can set the parent. (The value could be the title of the parent page, -our you can defined an `id: ...` in the parent markdown and use that in the parent reference. - -``` ---- -menu: - main: - parent: "Getting started" ---- -``` diff --git a/hadoop-hdds/docs/archetypes/default.md b/hadoop-hdds/docs/archetypes/default.md deleted file mode 100644 index f4cc9998dc6..00000000000 --- a/hadoop-hdds/docs/archetypes/default.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "{{ replace .Name "-" " " | title }}" -menu: main ---- - diff --git a/hadoop-hdds/docs/config.yaml b/hadoop-hdds/docs/config.yaml deleted file mode 100644 index 7b75888fb28..00000000000 --- a/hadoop-hdds/docs/config.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -languageCode: "en-us" -DefaultContentLanguage: "en" -title: "Ozone" -theme: "ozonedoc" -pygmentsCodeFences: true -uglyurls: true -relativeURLs: true -disableKinds: -- taxonomy -- taxonomyTerm \ No newline at end of file diff --git a/hadoop-hdds/docs/content/_index.md b/hadoop-hdds/docs/content/_index.md deleted file mode 100644 index bb1bf9a744e..00000000000 --- a/hadoop-hdds/docs/content/_index.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Overview -menu: main -weight: -10 ---- - - -# Apache Hadoop Ozone - - - -*_Ozone is a scalable, redundant, and distributed object store for Hadoop.

-Apart from scaling to billions of objects of varying sizes, -Ozone can function effectively in containerized environments -like Kubernetes._*

- -Applications like Apache Spark, Hive and YARN, work without any modifications when using Ozone. Ozone comes with a [Java client library]({{< -ref "JavaApi.md" ->}}), [S3 protocol support] ({{< ref "S3.md" >}}), and a [command line interface] -({{< ref "shell/_index.md" >}}) which makes it easy to use Ozone. - -Ozone consists of volumes, buckets, and keys: - -* Volumes are similar to user accounts. Only administrators can create or delete volumes. -* Buckets are similar to directories. A bucket can contain any number of keys, but buckets cannot contain other buckets. -* Keys are similar to files. - - }}"> - diff --git a/hadoop-hdds/docs/content/beyond/Containers.md b/hadoop-hdds/docs/content/beyond/Containers.md deleted file mode 100644 index ea7e3b17c43..00000000000 --- a/hadoop-hdds/docs/content/beyond/Containers.md +++ /dev/null @@ -1,235 +0,0 @@ ---- -title: "Ozone Containers" -summary: Ozone uses containers extensively for testing. This page documents the usage and best practices of Ozone. -weight: 2 ---- - - -Docker heavily is used at the ozone development with three principal use-cases: - -* __dev__: - * We use docker to start local pseudo-clusters (docker provides unified environment, but no image creation is required) -* __test__: - * We create docker images from the dev branches to test ozone in kubernetes and other container orchestrator system - * We provide _apache/ozone_ images for each release to make it easier for evaluation of Ozone. - These images are __not__ created __for production__ usage. - -

- -* __production__: - * We have documentation on how you can create your own docker image for your production cluster. - -Let's check out each of the use-cases in more detail: - -## Development - -Ozone artifact contains example docker-compose directories to make it easier to start Ozone cluster in your local machine. - -From distribution: - -```bash -cd compose/ozone -docker-compose up -d -``` - -After a local build: - -```bash -cd hadoop-ozone/dist/target/ozone-*/compose -docker-compose up -d -``` - -These environments are very important tools to start different type of Ozone clusters at any time. - -To be sure that the compose files are up-to-date, we also provide acceptance test suites which start -the cluster and check the basic behaviour. - -The acceptance tests are part of the distribution, and you can find the test definitions in `smoketest` directory. - -You can start the tests from any compose directory: - -For example: - -```bash -cd compose/ozone -./test.sh -``` - -### Implementation details - -`compose` tests are based on the apache/hadoop-runner docker image. The image itself does not contain -any Ozone jar file or binary just the helper scripts to start ozone. - -hadoop-runner provdes a fixed environment to run Ozone everywhere, but the ozone distribution itself -is mounted from the including directory: - -(Example docker-compose fragment) - -``` - scm: - image: apache/hadoop-runner:jdk11 - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - -``` - -The containers are configured based on environment variables, but because the same environment -variables should be set for each containers we maintain the list of the environment variables -in a separated file: - -``` - scm: - image: apache/hadoop-runner:jdk11 - #... - env_file: - - ./docker-config -``` - -The docker-config file contains the list of the required environment variables: - -``` -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=True -#... -``` - -As you can see we use naming convention. Based on the name of the environment variable, the -appropriate hadoop config XML (`ozone-site.xml` in our case) will be generated by a -[script](https://github.com/apache/hadoop/tree/docker-hadoop-runner-latest/scripts) which is -included in the `hadoop-runner` base image. - -The [entrypoint](https://github.com/apache/hadoop/blob/docker-hadoop-runner-latest/scripts/starter.sh) -of the `hadoop-runner` image contains a helper shell script which triggers this transformation and -can do additional actions (eg. initialize scm/om storage, download required keytabs, etc.) -based on environment variables. - -## Test/Staging - -The `docker-compose` based approach is recommended only for local test, not for multi node cluster. -To use containers on a multi-node cluster we need a Container Orchestrator like Kubernetes. - -Kubernetes example files are included in the `kubernetes` folder. - -*Please note*: all the provided images are based the `hadoop-runner` image which contains all the -required tool for testing in staging environments. For production we recommend to create your own, -hardened image with your own base image. - -### Test the release - -The release can be tested with deploying any of the example clusters: - -```bash -cd kubernetes/examples/ozone -kubectl apply -f -``` - -Plese note that in this case the latest released container will be downloaded from the dockerhub. - -### Test the development build - -To test a development build you can create your own image and upload it to your own docker registry: - - -```bash -mvn clean install -f pom.ozone.xml -DskipTests -Pdocker-build,docker-push -Ddocker.image=myregistry:9000/name/ozone -``` - -The configured image will be used in all the generated kubernetes resources files (`image:` keys are adjusted during the build) - -```bash -cd kubernetes/examples/ozone -kubectl apply -f -``` - -## Production - - - -You can use the source of our development images as an example: - - * [Base image] (https://github.com/apache/hadoop/blob/docker-hadoop-runner-jdk11/Dockerfile) - * [Docker image] (https://github.com/apache/hadoop/blob/trunk/hadoop-ozone/dist/src/main/docker/Dockerfile) - - Most of the elements are optional and just helper function but to use the provided example - kubernetes resources you may need the scripts from - [here](https://github.com/apache/hadoop/tree/docker-hadoop-runner-jdk11/scripts) - - * The two python scripts convert environment variables to real hadoop XML config files - * The start.sh executes the python scripts (and other initialization) based on environment variables. - -## Containers - -Ozone related container images and source locations: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#ContainerRepositoryBaseBranchTagsComments
1apache/ozonehttps://github.com/apache/hadoop-docker-ozoneozone-... hadoop-runner0.3.0,0.4.0,0.4.1For each Ozone release we create new release tag.
2apache/hadoop-runner https://github.com/apache/hadoopdocker-hadoop-runnercentosjdk11,jdk8,latestThis is the base image used for testing Hadoop Ozone. - This is a set of utilities that make it easy for us run ozone.
diff --git a/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md b/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md deleted file mode 100644 index f4f5492cf17..00000000000 --- a/hadoop-hdds/docs/content/beyond/DockerCheatSheet.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: "Docker Cheat Sheet" -date: 2017-08-10 -summary: Docker Compose cheat sheet to help you remember the common commands to control an Ozone cluster running on top of Docker. -weight: 4 ---- - - - -In the `compose` directory of the ozone distribution there are multiple pseudo-cluster setup which -can be used to run Ozone in different way (for example: secure cluster, with tracing enabled, -with prometheus etc.). - -If the usage is not document in a specific directory the default usage is the following: - -```bash -cd compose/ozone -docker-compose up -d -``` - -The data of the container is ephemeral and deleted together with the docker volumes. -```bash -docker-compose down -``` - -## Useful Docker & Ozone Commands - -If you make any modifications to ozone, the simplest way to test it is to run freon and unit tests. - -Here are the instructions to run freon in a docker-based cluster. - -{{< highlight bash >}} -docker-compose exec datanode bash -{{< /highlight >}} - -This will open a bash shell on the data node container. -Now we can execute freon for load generation. - -{{< highlight bash >}} -ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10 -{{< /highlight >}} - -Here is a set of helpful commands for working with docker for ozone. -To check the status of the components: - -{{< highlight bash >}} -docker-compose ps -{{< /highlight >}} - -To get logs from a specific node/service: - -{{< highlight bash >}} -docker-compose logs scm -{{< /highlight >}} - - -As the WebUI ports are forwarded to the external machine, you can check the web UI: - -* For the Storage Container Manager: http://localhost:9876 -* For the Ozone Manager: http://localhost:9874 -* For the Datanode: check the port with `docker ps` (as there could be multiple data nodes, ports are mapped to the ephemeral port range) - -You can start multiple data nodes with: - -{{< highlight bash >}} -docker-compose scale datanode=3 -{{< /highlight >}} - -You can test the commands from the [Ozone CLI]({{< ref "shell/_index.md" >}}) after opening a new bash shell in one of the containers: - -{{< highlight bash >}} -docker-compose exec datanode bash -{{< /highlight >}} diff --git a/hadoop-hdds/docs/content/beyond/RunningWithHDFS.md b/hadoop-hdds/docs/content/beyond/RunningWithHDFS.md deleted file mode 100644 index 154be5332bf..00000000000 --- a/hadoop-hdds/docs/content/beyond/RunningWithHDFS.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Running concurrently with HDFS -linktitle: Runing with HDFS -weight: 1 -summary: Ozone is designed to run concurrently with HDFS. This page explains how to deploy Ozone in a exisiting HDFS cluster. ---- - - -Ozone is designed to work with HDFS. So it is easy to deploy ozone in an -existing HDFS cluster. - -The container manager part of Ozone can run inside DataNodes as a pluggable module -or as a standalone component. This document describe how can it be started as -a HDFS datanode plugin. - -To activate ozone you should define the service plugin implementation class. - - - -{{< highlight xml >}} - - dfs.datanode.plugins - org.apache.hadoop.ozone.HddsDatanodeService - -{{< /highlight >}} - -You also need to add the ozone-datanode-plugin jar file to the classpath: - -{{< highlight bash >}} -export HADOOP_CLASSPATH=/opt/ozone/share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin.jar -{{< /highlight >}} - - - -To start ozone with HDFS you should start the the following components: - - 1. HDFS Namenode (from Hadoop distribution) - 2. HDFS Datanode (from the Hadoop distribution with the plugin on the - classpath from the Ozone distribution) - 3. Ozone Manager (from the Ozone distribution) - 4. Storage Container Manager (from the Ozone distribution) - -Please check the log of the datanode whether the HDDS/Ozone plugin is started or -not. Log of datanode should contain something like this: - -``` -2018-09-17 16:19:24 INFO HddsDatanodeService:158 - Started plug-in org.apache.hadoop.ozone.web.OzoneHddsDatanodeService@6f94fb9d -``` - - diff --git a/hadoop-hdds/docs/content/beyond/_index.md b/hadoop-hdds/docs/content/beyond/_index.md deleted file mode 100644 index 2a29a5810aa..00000000000 --- a/hadoop-hdds/docs/content/beyond/_index.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "Beyond Basics" -date: "2017-10-10" -menu: main -weight: 7 - ---- - - -{{}} - Beyond Basics pages go into custom configurations of Ozone, including how - to run Ozone concurrently with an existing HDFS cluster. These pages also - take deep into how to run profilers and leverage tracing support built into - Ozone. -{{}} diff --git a/hadoop-hdds/docs/content/concept/ContainerMetadata.png b/hadoop-hdds/docs/content/concept/ContainerMetadata.png deleted file mode 100644 index 48bd1c43c0361d2b2d23d2bfb8bab16b1dcd72e2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 98493 zcma&N1yr2N5-y6nlQ6iuTX1)GcXxMp3BlbhxCaTrCAb8FySu{x!QoBz-r47zd*55@ z&0387E!EXk^;K0@6Q!geiHv}Y00stzEG;Fb0tN>06ATQz91aHb#BpM}1`G^I%34%Z zNm^8tSjolF!rIOp3`{C2RTEZAbp$I0q4ujV9Cg z0tt-gD_;dWU=EsCQH3}<9?n=^THZ9V4IDfJ9IBuvVJKa8$=P`dZ1nQw0{DpA6y~xd zK5{$x@J8zcQJ8}Yh5_pwAv7@=dJh1*ROwRF00WzXJ+83?NTqt=_QSAd5cjK4OsMRD z-jsG|Q{_$gV7K|f60G_1=n=uru*xmp0~?9se<6R=r+}!30wZAU5B5V>1al_uHw6MM zXjqycQE&_|&0b>JOv93g#&CO&b<{*q-j9{Ec5uI0^1goC-J^{#UF;Rc93kCF{m|F7 zHk|psq@Vrc6FCk`J=%%rvj)A=l4y*I_@`m=ulzQn>8?hTV3;PF;7sfYCaNF%C|X&j zaQ!7pxpEL$^hm4{TY|F7uk>kCA}Or6Y|R_DO(UI4WBTUrIe=!BEizBZpK{2XBiD=Qx3?*D(soFP0*<<79~nQYVJEi*5@l>pnu*UvM?L{ zzVqQ%1K)#F=b}nt23}F`9Q5aYDdb5)0mamA^nL~rcwXxaN$PwIe!XZRY~0dXf)g~E zB=%bq{ppYduhkMeur@VJJ2U%(ipJ_r($F?^I&JBYG0yQUnh zxbBS>V&4o>>VZJuV(p<%K<+>D|KWd`?n8-x8H8j8PU19W6~V8f#)jb~4>qu=_%TdK zU>oo`>wSnw`5V_~bwhojQk!O_y+lYt;T+h{LX6KfO5V~EpD98kmB?Nt-2}!z08sW; z1>Nn+qvoyN^>xta8q1nsv!q~4BT?~eQ+g4f$4mCvGUK1Opq}&QoLTyQCljIC0^g!7 zjCOJ@Z4vvW&QH$_@74>qm6gTTRgLp4v^viVju7mDgma=_J_u2OP{L$Kn7Ns1)`dNj zuIer8$?=2VlzS|}Kx&AJgmMTCilMI!ueER77i!-}^T^p&i$vN)KaR08tHbBK%%YK3@VTeR#a@;6Ml|(mk#6)5QGeRJ; zN^rGsWnzsKKakA@jXN$!oF_4R*uNOy9o!+wk`jSiU{OxMgK{gpWc4oVqk?4PProK? z264>-bt{54SbLH9qJya_E5PGBf*!Eaa26KXap>z_E-m`B2y+uwJ8+FSbrW|jvU!BJ zgE1|@d}L=|6c?oB7NZliy(c}2-MlkuI?SIl|&Y6+4=M z6s=D|tdR{#4Y|`Ytx`vYNGiAGzh=Q~NIzt^W$1|87w3ImFYKC~o^F^1&Sh0o%z_(d-AvuRb|d}g6)wQ5D9;lXExd`9sFh4iAa8T|s?B44Guw(}rD z(MfqG4JJj$)^&%@Mjyo+=|_)8<41@Wl^6R*s+)Ek?7pmC<4c?k=4Lhp79XUKFu!2* zLhV8kLWz)PItKj&NA2GErZ~+uD908t z#`>7DFHWTlT%iun(>-xEnSgg%0S7;%ofj({=t!z%Y4FSnFX6|nO(;wXtHkG`vtBEO}Dy{ z%jODpEMdT6uc1Gdpg7!DOCGKmJo?Obe7Z;PvA(;?9l<>48Dl25mT|(!4rrDwlnotwsUNSO zGL;?Vi1+81u+Up>ncGo_f7(4h$~tLVyKT#|z*;9!XW)o;;dY^OaUuXC;4CoaKXH%YC+Y`y zBn0{&9PDXsew(~t3Ix32zpcG0QZ6c@DJ%*U^7haM0ls`Fpi6k4L3r~GpMo%RVeKMhxd8! z*U{FAws2TDx3$PgSZQ6{cin4o(kE*UektDMaRqLVP=h?An)__1RhgBx+X=bbbicGM z)ml`2Y+^tAP>7bp>GEQ`^DpNI=WYc`g}7`zwg=@7-RF+`8Tgp6O!#(81F8t}TDgwg z-GpK(8`*j3yfnT1XG#iob9tGC6lc|aMecl?Bw7V~X|fWEvNI{X3|tQPm)Sh)yqVEb z{cH!pPOHZy97;A9OZ>bFwq#D%Kpql)3V+W>SOfaHIL?ILALTy|4_xo@?iHbXL)=3Q zjDwtFFP$xf_Or&Rw86D)8-2uuKg%fRXOPum2~Nh1uP41wQgK*XPAECZoUik_82GFU zM|F`)y11l0RDagaRk5_rY@*v>3iJ=?rk80JjT;QLkAd@qLwkq&AJH=5)_J<50)gut^%3d_nBeVMS5#xSufmH+F z8NZ{|IeIDW6T?L}ATZF>wtv2qr@d{|zTZB4h0tDcJ>U^GpXk^#P!RIyoZ5GKaBhO=;D>)QwVW* zbo|^lGj6mtuBvy|?AcZv@-tkvM@L4eRusi^d9HQu9jO!=6^ZyUo%VlfcQ?XLKIGU6 zT)1}~g&c%KcKf~t6s%Qs9(}$WNJH6(o1cKsm1Kn0=Lz7x0Xk~7XigZ$wk~@!94*J5 z%+z|Wp{;tku-)xn>xO@Wab#)}ZBY%-ymQ#&pYe5nb9r7^XRNF3yA!=I^h&tg^x*7% zIud>8L**}tKa8g((&eiS+B2s-j^rrRaS=9Vhwx0iW58F7#2b9oWC-mQgpnmP zm~=GQ#|Yn)A5M!Ra-p7k8vHES1%!lTqGUihRL>6pWKfXg7#U%Z1ukR*y;)>uW#u&U z^XJeR^HMe#{+MPyQ>sXqW#~ms%Aw~8#is28C(Bp>8d6+II4HG%caqX}1p~vP`27bi ztwMeY$~>U0)wJBScIp0{`)Z#De+%j+-&(twd9qEMIBwti9ay1GcuD3AP^H1^SPK=@TiDM{4*T%i=WiW z&CQ92iOJK`lhKon(b2_{iG`b+n~9l~iItTB)Pup*+riD)i^0K_?C(kb%p+#*YU*O` zT}NV-rVrH-1vm--P~s{k=|eFYEtMa&Y~JEf9lDzgw7C7@3*=--fwaTl{|v z``z;Qu)p~Fn;hTo$#|5kz0B>j#jNek9b7?66X51z=KG7x|JCxJh5pw_t^bT<;oxBZ z@1g(I^j|}NFNH_Z#o8RSOTYI*fQ66g|9bXMe?F$)8}(ls_qSC3dI}PY00JM=zakbu z*y^$x0|OHRlNJ+J^8!EZgf-IBOce-VXUoEI*KROt9b-6m#xLU~Cn13k!y+cRL6JrN z(%y6aol!Pu@7ox}8_^dbt1U_0=|y^y1O=&!xa7sSvldU;1|NbShj0D|KT`8{akP`v zC5l|nrhG5Dbh~{wIyZPPR$X0vHv4l(jNq^#B7*+&fdn7ilOUo^EZT#{421;#pARLd z7$IHB|9!>`6%j0Cg>9@zOZ`DyPItGMxM?_|=L`Dq$O!uDIL^Nl_Z*TiGeTwcFHr)g zY}sW;(6CV;&L8j;@DXkQL;eUI+ZE!B^5Xp5!P?47iKa=qf0}c+Wp%4zY6?vzPJ>6Z z;>K$4gU(S4D{9)h(VgCNA--e=N`B|`w93ejA9H%Eja(`83^D)VrC8EP4!$Y-3o1Sr zarg5c)81Y8Tc?}5yS25~q{L1LH8~W9_C5UWfr~c;O z=o4uIBq=YY97m$mA$gbi7*^}ev}`!}WG z%uvs8F>MHu*&Uf9$%+CMyAKC~Z>=5Y?seA(wg(^qGo!%ic`TW`oYZ_9)m;Z-c|RVN z?R}!F#g5ctK>Q*If1Vaz-d_~X?iSG2>xHKx+s5iQRo*b^D!^_BLIfyT6+mG ze{LLYSP$y?SS>|D6?HlBc%0#D>i8e-hy9==a)q9%PV;+SUOgN@I<*rIHplVb8aP76 zTnhEL8*NDg()S)KVxy#kDWR$9fAyK~w|Xo7`|}@0B7%d^4~>J&g^~Zu5`K3g`#BH$ z-_`+o7Hmvx1m%Nmq^%@s#7b)PIGJ2OS3Uj@KbbK-BceJ;$CmPM5_;h{3=lcH9ye71acTmgnzg7o+QeIEl_{dVNpl=R}yZMZx zQ%n^W+zq$;iuU#-I{N;K2uTZP`P*7E5a7)Cf<1Im+DBIGmK4GrtljaEq3)~0L`b$+ zFyam+XxG-(WQyC{+QOKML%Eae-el>bntV}Uqb=|LFcfSjNn8ZaPGzUCnRCi%zf3+U z_||c{)L3@s>ekE|H#dofO#?yR7AN%W+qbb?&vlwc%gHoH{}bH=DOhqsMI@OBoNvX& zVxyy@yQhtlY47PYth~sk$n4RhE&n$61bH|!`d|-CcURZO=VQ(8Bt^e%%%!mQxdAk6 zO(+HaTQ&cyj`Ow?iIfYF3DE%z{pu4B(F-jT|5;%Ja&Qd!&p19+1dr<jbesa{LY%YNeDfunFp4nsB}iCdN?ZCi`gxtpZAzaoS* zNYSFiii?UM=Xncc|Eu@*Z!#X0xN%a7SIrH_co3KoL8=E%le=N>`&l{ufIll(a17;7 z6r$`V0t^p!C`vGJt&2@IE2aj_piXn^f8?16Cx-x$n>egph!k8A5>zJrLEXmomi^Bh z3LimcgehVMjucE22S)yL|Bt4Dix!}QY{_41G4nsL{nz$6iGU0;AB`08|8!6{Bt}qe zsEEA(2aL1X%a$G+o2sG@_bZe@I`ad7`xWQh=LQ|OlWiNZZbp;)T}J4c1Fm0EW>v* zCEwy{;&-`ONdtiPCck0p_grQ$5ULSE?eH6vSL26X8lT1`d-&;ETU#*@!pZSbWXUn6 zLSP}tp1cMQewJiEz-ht%L9XCgI5~KTv-~DqUEPw}T7~126ZKDnBddugvuK%hB+MVs zH(k+@U`iSrRc&l-Z|^|LEp2RE=;rB}p8(f>@X0zQC1p4Uhs}p#S>khCv=bjXoe8{?r2k2=33%785x%f-gskd!M5jlb)ZS z*V7l!DJ=dNG*U3U1n~)cQ?lEd`+~j;$XQ!nOXR~Dy zGNZ8M*6DwQ)e}aNFbB0<__&*_czgutx(qh}R`2Q*yu}3mR%#AGVq1!z3jU$UqWp61Bk+0s%ZauZ5icNG!SxcvxUt88g68Jt`iaqL&OhV_ z-vw8v3YC!#Q#upQbeTorL4=a_q%Tj@DqQS+2ygQFIqWg>2WW@7Opxq z|JyK#8+=OflQM|#q|5Z^Um1dgz)XQYtOJoN1M4Hx{6+mY-T#=Nk=pMzdqfz?ulZUg zeH1#(m)C&>Y!Z%4541I3b%KG0$fq4Ic5b6H^e*j77 ze?{dI0Pp~4d2)=R?D7%|kp7x}0^{65go>cloiKpNWh=$j-Tp&PNCrwOhZ`O?v^VAM z$8RaJ;(>R80|Gijxu^>#Q5m!mc$l2N1LvlM?7m4mdMKK2nK}lH_ zQ1s!73-Tll4Gnj#1l9TGOu^^Es@d;&R-W2HF~RQ;C`Kd*^Sn0{cJJ||F8!poov~?iZz$SveSvPrpImggfg!E-`ivkDE!VL}AZr{n}lyDF4^#k>l$^FE^R29A`mXL5EP z$z&QBP}ZNHV+L3VX8JLlzY_*avXs$lH(}Yjeya8Clofbdnmk5yGB!5W!N>*4efav_ z(Y=QJU(64}2M1STpYssL%Q`sJ;`4gQy>?XW zV@O8zKNPhFytiNo_9$5VC&B1J<3fkX5n7uhny-Ug@>O6&DooX_xrl4`I7 z$Oxp)(gOk?@yG7ATigfV>1(a?2Fs>83U-4(98tW+=w>DYuEhfE1WmM6R7g)2 z^65A?T%z%L+_a+PIcK?Upe4r6k8}EsSI16)cLYzySvO@w1obyOkW7ck9sMuszDLVy zy;s{vaz&gZfCVBB9P=-IBPrbKt0~?TEKaw1<*e-u;EpK~IwBfLB+~zy#Auzo!ReDMhB_fS2#c<9$Svf#=^JUY-xmyLh(^fX#gg zXL8oW`m(ao7zbcCW48g7tk%!#VtU_c;zpQ#h62w+9)EX!B_2xI1D0(GO7i#|J+HE; zKfe0Dx&r1e4g@{#V7c?;GF1oUGu10ue@{(=d3cdYyAcooIXyG>8sKNJG$hiSpr)Z_ z^XS(#(+Ywqs9t`zBU;DTfA9e_zQ+h^(U2$ZsABL|E#TFQe6Ys82IC^G>Taq?()@b= zhfExe`?ez}^#qQyO&S$0CpTF9z^wqe9`*aPvoj2o$ls2%60_B81Or*I8;8vT6g|AAUHFrgzPf5EkhsC5b1W^81;@ZMQ3Gnte-O z9zq)Q+M1d6^c!tW!*>}Y{_>aC+k&E@^*)uIZNSt1+ znojQ>R1ykFYhf8Eh6fDt042wlP^w$|pbRkgfBP4lcK8$3wEEv+8Vv-bG)vusq9xEQ zH?oqI1&uUmL(5ansv)L0Rb?la*Hgydzca}gLo3NxCaS^{KUQMyySS)nXa^LgrG1UP zjX<23*W=BROgT#W{IVfj@%hT3;`k^5hs3WH8}?DA!HX-6Ow#3Z66R+_7{J&`B`D*D@yuqcNi zd@>OgCo#;d#}v-7j(Zt zf8OAyVw6k#CDp07ax*rRz8H1>6cYD%P<4hnXEA&EL^X@D`bn%MP4E+oFO?&UpZB zpiHc^4-LhB2oKLOK$Ucy>_zGq_I?yqUP8KIfcAW?ewAR7VWqYb2A}7--lCj<;Rth- zJr`%S%I+vmDlLmb>YY(QfXgXWOH3c|vMdb`M%JwSZZL7GwV}*L_OX?xq)l-4IEEbm z8n4=pOqxwU=PF~4z`N!MV~K!1DdZUqH_xHo4-L>v1ts?;Y?wqUZCip_4`qD;d@oVLXx$Q4wr2@` zQTqUzU-!hs#zyOTn$(k=Zc^R_;z0C-#TZT)5O~T%@aO5T0a-zQ{z`0qbh zw+yk&UkMdF!XJ=iqRQm6OCN`*XjLnLZEe?w6Z%}5nv&$XZyAK}be2!fE`HpXI}%7< z1PIVm7V9BWk$qZ#k*&#Q^FSP}=KA44z5)wE@-MOVgw~S->ByVQspA>($`JImWK`t! zd0-!J7gIiFYc`W@@RS23mDA=KFQ8L18pZ~MnRpYr%W&j2PF?%kaO{7EB$FCVrG*9E zDi&;bO2c&*yxX3Os3JWS%wx2LzwrvO^8PDdbZUU(VrA9hiQ{TDb<`Ti4~0jT3V0yn zrfl7f^VZXnbUA%SArc^s7ktg=xcrXDYCBI4f=P`eSqyp4QJS*hUW8%hz4fFUx`{>kiAt7M;|Gf31tK_n zMAo0yJ*#-4SBk^Ey6?B6U9Nw88~yZtY%kW)FSP5+{nZU%CBEduGe8jVOYH8Rn;$=J zIIq)p@7Ky3bu0DIoTfg-0L8#^ol42kG~tnRiqTnmXFi1O*Wsgpq zc~9dWmwCX82AP(RNL?l>Ztf%bZPvqG_8o+0oF8^#F{FPT{W?WBczINB86XtBxmOBp zL31| ziU-khy2&I=DK0y&&CXVfWu8q0^aTvop{AHLDqfBW3r0R?phUToZ&#e$UqmL}^`Q!q zUIa`&?N%-Li`$&o>;=QV`sn#S?U-$IXV(^vk$tE316H5}Wr%#k-sNJ#PJmfNEfdQ! zwwB4cs1x^+whl!l4%;EPprArx%?5n78Tg5xlsc~^VuG)nKr};2V)lt7VTTXRva38u zl*%>Of!$#us5|UCHJ(8Lb_R;1&JlIkGQ6P^WwNaZ9{EQ>=FMRlz9quvJz@R|L~N)_ zh2aH}`D(;e#@JmMSZa5?)SfvK%Hw!V5t?BFA3Sn0)F)8t>9Z)M1qI4KGN$1Y5?bx4hhVRESevH@+k}$K@vN zb-%eem-E(x;qJG5$HVmSxHyk6V)QFGf^LD;t1{lb#7Lr4iPK`QhbpGjs=6+jIcF<^ zJwW$;$-DP}k(CXPSe-253X4FKz)tWxiyzfVbaj43Np`;gr6{MFJ#CEeGKPC#k2N=YC&iJq4v!H(+$cUd~VGx+*=CR^%k@t9!l zn^Av;vzFp3jLhgviREi%5LiX>!9BF`LhYK%j}I3K&~tOZ*ubZ(@q9r=!%@AJ219<} za1k)?uFG(O`-#5w*RrA&(aVTpAY=FAt>a$&w+$HoqxUK*I;4g##|wKQhz2CGQ?N9N zC?dygrb7x3>vvsDf{#$ad=##yPwLxyi zPy0@c)aZiXeW(ua_9%5YKbI;>|k?cjI_-mo?Z;8Y3rnK`)qKIa;!Z9IL zR)!#45)l}PtITgR7r%Ad2RR|FLAz%+qZ~?}j1i&>`Ig4~NKu>sokQf&zwGZWPnzIC zsN#YrnH)SHOiU6)%EYPDu7>~MBc385@t}QAj{0)K*$<@7~C6k#B)38?P#rRaGT5H4GJ#t403FY03eYp+uwM z06pS2(3yhdzR+Fn)nwqn1Mo;O5aud?YC-?5&gF{EumbAwA$GC~<-;5|4{v(`XE%B@ zxUAWkwJp2iFU|$SIkJZ`#7V>#Na9m?`r2%{I4czd0+F?CqgwO?#w5;p2w3;c5$jyZ z3$N@&Ayj}ELpsy8wJCmJ=qzBRND2%m;;LvA>6$9Bbf&?pZ0P0opwE>yfH_bdOnOj| zQ6<6~4Rj{y>WtCD&2lKgt2BDfti+JXzW2*iXtP3qa4>pUE72m51#~QSCpu?n&P(($ zC_4f6G5&^Uj+l44Nv~syk!*$_+NBM+>}&>CY-8C?v@*Woa5MqCuP_H+DSdg(zT=d* z2C8XhfVN~ax6<}(e5 z4`j6Q@pe2v|H$vQgB(jf&hkb|!DowgJHnP$2;YXfcdnHd*O2f=F?n;6R=HT@W?bKa zP{J@=2wdbmKEd0)qRLCD3q0eIN+5H&{Aqk9+nRu#4=P{+^WPrqA9l+Gf&m@eK+1Bf zRgaao%6D0Y{PzuB*9_l=VFfP*{FjQFxYYwqGY{+h-yI~2E*~Xa$HB83zzzmwoqI%y zBIF_}yTIQ>=dK;R7S0$#e<}EanPj-^ph3N;!&P|Bo@>i{QF+0n9K9v0E=ngSE)pX5 z9dtgoLEQ!Mv*cLEhirgoe+t~+sEeA#z%X7qKk$>;;9RshrSf@yv=+f~0XT+PTKhYD zG?+#Yav$n&6CE|(BM2D(7VqKfWnMfP8BxUKPADI%+sRQS{8@$F=j4XBXR#~F<5#UT z=N2#dcQDB`hJvMhza!}b?lwJUmS3Z9%y}%t; zAxY`mwY$|SL$yn;AghB>!&hL?y6u3h`Fe-<;_>qb;8wh!+MGx^LWAiL%I`xiP>KHJ z);wV{^`r=w<$)?S|1Cfw-!zMyP>raz7xDJ_vjcYxJ1Wl-hw zWc9WJ4RqMxBA!T{a4^_;z67WaRW2-uXucI*il5W8fb>E4#)Ka=;7fB+&xI-?{!ATPYYes_#?6YNIViEkw`Ri@O z=9@w|ec8m{5ZtGcWQeF+i^yx==jJ>(JDs2Q6nu(IM$GZKce82co{n%LxKI4p(opg} z^{6b#1_?=)6?34{nSr`0MmS0=^ooY2xR&~Q%kr98Bl?LVs6VVfvYW3MGc08=F(s(h zOQN3pJMwl$b5$g78`n9P)J=8U-th(Phs&(j06DMo+9gIe5*csisF~L0pNP*SI38{N zb{qarzba}>bczcYS)y3Z-uAhAsIn7a5kGwDYa| z#rmvFSngAkNKe23D+{uwgk~Hjf$AKuKE5E#(f5pQ5+_oq#dy8$_R|Jg^P_3iUHuEc z$kX2Mot~Xfoi1ZPI4c!chirHRWZmk5MhA};U#4OY@XG`*U+)SpBHjA%1O(|cYkz(L zm7_y)y!UZ{%w;7ru>&oJ3Rk-CF{hM?f6Rz)c|$oZ$5y-bzF> z1!=WXah5zIM%^d^UP7)_Qr%@zjPU+fu^VJ{Xk%z^(U2(OYFBcjR!CcJ)UR%$xdoCI zMpYfakCxg+f%-~tPa^_Q019|@ukZ7mK)ZbI}b=r_N2 zhgF9VUXw}c?ncdjd1r(#sEo(uCnRYE;Gt*UrinC;v2es}&gqR7GdED81(vUeJQb;B z{yxUjjoPIKPlX?85sn&KpR}OXY7_pHU&3vr!dY9=mp2{`pj@Oh( zCQFq{<-Qut*2LJPs))WGg8$qo0!^+)rP!yiPlz^B{oV@S2lI&x-?+zw{no^KeIYOM z%d0cp@^76$H88Rl{g-F=oBZ~cqc}hCqNa4-1!qiDh07F-MyBhW06jlRReQfJV_s$y z!vra}qaoL0%w*zgpNSl9o93}Z*JiwuQ9hKR`n=aRR)@+qZb46c1wyQOG%jnXqT1qwktsNb5jk-$u4C#6<^<7FX9ouo?6Z>p{ z*Q=!>+UfMJdqvvJQt^0DaRHYpcj1t2`0418w6fRUatzT&*DL%I~p8_ek znTJu=K8C_E>?xl}e!9uaY<$Q44zD;B# zyt;+SF&W-pG@qA8@wwkel=$~mQ}&-6H}LHu+(IUJ8UTb?o}M$mgh52p-hG9h)yrOq;pp-J826<`P=Y6#E=SXK%|?L;bT&6*~O1 z2E*(wDU;N<`P+!WY#E%zkyPCKa$#mAKT&1xY8tp}5BQ~+bU2JPr_Ztm>hoj=~YdZLzE=oYJhNuLMLb<(nai=4C7OT zTLDTKiI1)hzbjGo3Z+EtS6v;$Zb z8cw5gW*^)?gYP#`gO}=rtCnpd;=Kf1XKz?xS4uJqTxUg24zDim(4Lw|L@~p&{HRxs z4OtOEU3fUz7xX;3KN)hpbG7$E4UFE}9*EwZ*L6txrgy6W zimTsbU$+qRlb5uZhYwCZ)OG0zTz?CsI+@R{jn>Y*o+|q4vLnwq@;MywcWHdKYtn5J zRBXzsc>=|TrH1$mIc<+&(6_^lfEd%QrUvVl%R!sFm=d2?g^Wus7^7=t%d~;Pl+QC- zu-Fzj#Zz>MuDcFWiDQNFC-x@a+gKtrhq_F71nqcZeH96!t7yd;DMhxO(2h1by5%bo zAXpojt9{xo&)wIgUr-*eBuVAjyrN|xTnsTiM4*}puRRj=0y?G175RV75u>A5Y=8%{<6W?TiGa zcSOqD9(lN(HiJ&r8a3nac}TN49YClWKWca$sFby<^+v0?A=HYp?QHxDNfl>AG_jo+c7pQw3g*J}Ht6KZ<%=j&s1X&cUg&+wDZxtI6c=gUXj zwK^!$;OkdMF2zc3$77fieA{bemOXrNM$RzR+b90Cp%g|@UF`&@*aCCzrezgS>uM{w zE4zFCilZ~Sc{jtql%eigtyM_sI&L6na+PW66m>~l_kScl%w3yX%*kbH@W@l-zrrSA zM!qel&z9#sQc=IPj`iQ>Z>5MEjtd3QknEH424Fb^*~{zX6cAB4B$+aHnC9;y!bmes zRuBqDVX0Hye`c}#g`awp%=dQg*`55Y^UNNYTIVNDpF ziR-R?yx4M)lRqwJN7?mjv;kG zT|=+LiZbTP@why?PJEzx`9zFu(}y)p!F56A$Y}ORRwkR|EK|}-{RK(o&0d}qHoMKt z1N{vD?ttp`Ig_>(G4d;%8lw-hgGV6}Ra!7x>46sPYY%ZTBs@#5U-?9*+5+uKrPT%Q zS=yOun^mq-p< zlIxLu%66!%&=fo;DIJf~Rg!FjRq{!`hnUsNoeh&af_#|)ajV=`(y?x4+4%{5Ds;Yr zDL$zLSz?Mo%LFeJIFkZh43b~i9u)Cjmsisadj3U%L^_=}A%)4_j(#0xf%8vX$Lg#x z0Ac>2D@EYZ^dosx$F)nz<_Ad@hxKsLH`sUk5u52%7l(utO!B)Ut5D6XaeYqwgiTxm zz3%MpQdH!t1hKg;H$5Z|FNwP19>kr7?Q+b+GOZogeVSvL2RSrVBww|aI)wyll3MBs zBt}&C(W9@RM&;C_)g= z54mVm>_Y@Al@jPx=R)qhC+Z*)oyhl0cokacd80mXg5ex`#qV9W2j5
>M6iH{D); zMHDsiLzlWMFTExbK3Zi{w8dL6c#>8ZcO_r;buh|9!Pvcc-MAQT#LG+O_q{t#&vw_s z_8lU6BfeYr%;&pY=r*yJiCm8Ex}DG7ot?`I%X9_Hcdez;?#jt07QZd9Ztcq~msL00 z9|P>`_-T_GccFO{Gj-jQ`nL+`xV{U3mf2Sw;K%7!4*P1z!nNxXR{)K;R*NT+>R;mw zXZ;0{5YFx`vBGe6hcQ9!qPo%y$I$$wb%nTe)2%x7q1|`gI(}Aocn1DG64Ltmv#*{) z1%HNMZshw{(pZlD_?qWM2N{O1-*M?pU#$|AygbQ`1;9uE-9ke_!YWey#lXjvAoP3m z>~geo#ekmjJ;J^_pW9NQC06eI)8D(E^BaOka7)ZwL%p%a?%t!20ZF->O4 zt{hWHlMbrnN$8zfEXEcwA=+7xm+hH#@1$?oY;v3U$k;l{>_MU&QDWzt&sp*PC9rl4 zzRqXTSFuBA=VhVdm5)5?Q;4V>;2xLj10z;6ozi+dQW}S)K2U?H+K9Ns}gy z)99$CDG&x@BY5HUBjklL?l}BU=_p%U8+QN;hG~+9Oww6L)JAIqe z7<4}ke2u57P6=wtIKsMy z1s&0-_jywd8xqcwzXv~1^AG0|mKD3UTsF>ewLR%1P)ZJqbTB=mgJ@j%C9MiaUiNU! z(FY@xlU)z+az-C=Qv5Q*8CWK5meZ+^cL+dFA<8Or0ki)i2-8^LHFEOvZ2k%1Lkh>G zaH*>gGBb53s9@bXC2QRz!#*&xMcf~0JyQw*-5GoQ$oK2sl+?DK_AbBDd>?^6Bs*Af zPn`J;`a?8SmoT#*DLBL)p_N}<0g-R?g7>rj$Ivsdp!jQSr$AP{HlEg_uxeuUA4s3d96k{qCG_c{jK~MJDLeQ~ z#=1;}1GZcbQBzV-hTVS3Hs{Y!QNr)Hs%93XWK0|L5TX(ZRGVdDQqNIf$6ZOsNPn~r z4|yC|ea+1FWJaPG&k>W3;f8PMj_vn;8IL{^yek7;RzPG0#rQl)mXZaXDW`QL2^4TG zEZ*;``UoJ7o+t6@9^?{dff>;U*>?}F-)aG130%zA@UbNawDCwnxHwnXbpOzkj_>g1Au}?`=p^Gr#*%Xtm z^>!y$xi8wEedAje;J&YiY`E=CHgq9<=;#oO%ux=9Ub1SGTiVWUsTasCA0}HlnZ+3W zJ}Pqd%{LnpxXW_-bMYfnSpqHQJ3xF4UQ{Iwg94T36q zWIwZLD{B`s(Z-RmOi1bfzRYt#c$yf(wtl6T$zAYr|Cmqh-F44vDZ7>R10%O90qVs)gTPE>YANZW4s4p#b2v*13Ih3nLmusjr9SS#RnP+_!I#EQk_9AtZ<$8o~JfxWc&oc zG>Y}CQhNq|7r0U|U7k*E55O^I?};=xZVmDlrq@*lJkmYs7PEOt3nBYADoUbkqO`NG z3@&569+{y7AM>j}4Vy=o(R4jeGvi{LF$7QCz;dczxI>cNDseh4R|~hf6motNjzdKU z+$ayFDAEa@T;}uKgcaMcX@7047a@lKMz~1q)|2fozXBaAl)VU#)TB3#(?6Qr4*IvD z{fYc4sH}M!>IVvzQW#xaztcM+N8b z4RoQ>h!_IjHMy_1N7vMu|Gse)u(IPS4~Ts1(t~>meCu{RX@jcty&QXIIIk#{mnqxk z7SGgs_)uKF{tBm;K-SJFxZlw)IaV4CLC}bUxmPvgC{fD*0 zTq>d?xuIw083Q{gXlZ8(u^nxD$NM7l|ijYKhcR<3~dD zfnxgD{@30#P6Mt|dqZ>8%I2{mfrZ0b$Ts<9YEJssAW+Vh6 zYFnQ58{A{LSHi8dnB6Gq{W$A8dkdp3hX;`E? zg{51%r9-+)y1V0De17jgF!$b>J11t&_jCLl+roLIjAlKLkORcs6Y6)=VhqXYWsD~}Dg!awpxE9z?vu>Jl;`4U5EpMPa|S!7)lyOz3XIn0)JyuM`gRgHuIfeW0G{Zeq!=#rS1TJ2W258ew+JK5eIs6Q%6=qc@CI>T~*sNmfbkYNwTt4C)u{DW-;{uuNqWMAtuiE z!luDjWpvSl=kqI{NSNRYnC`zCK`(JrC$vbs3CG%4=-hjCPQ}|L4cZ;UtXtia_@5MX zIF`e&D3zUwowUw4Q}q$Nzb5BW^rVi_@MVE5h_AXXE<8-n$&P(@>0Kw< z$0$%n{Q_67ny08gy227x<>j}ZEQM0z8g7ss{r;KHG`F3P+`?(?JQbiIE|mpI%JaQ) zibt8*9&~@H1`&H(2M*I0B{sBdF}A8RJeuOU9e=N?*ElaEU|Eh^=~f!SOAdWiD0Fa> zu}J?`0t@m+^AVaq8jo<3aoEpiM{(nx4jIw|T7S$ck8z`mUuywVg+9MY%Nk}FM{p-1 z?X^}8VP*~rYwMLb|0&j&UqBC2jZz&I9CAcz5rE*CE=98ku%|T4J2fS>H|8ym*wu~t zWiy`PB3}8%QBF;vL03_aszCAJ>(Qn*p)E1Gn4jZHmy!Diq(@iuzD?`o0vidpxfrA0 z{XoG;oM9Wlx$ITB`S^JU+LV50@gVt$qhm6>ZO2k~Gt*!FmzQ5PO%o^Fgjp=A$~a_Y{FZKT&clcgSh zq?Z{eiW>C*(-sVWI==U&f6mL}t~+bfA9sHTsbOdRp#;`@HpML!GQYl)-=}S&d$B&T z9Gncd!RO;esOB8K>kqc^b`Xo&Vl^pKiGe8OPW zj4aP4Dv=1rHoF;+xtiB=HQt)Dhvb0dIMl74bI7Qy#)+>O)pMcXYzwE@y+wo< z%>xXdWch_d6#?W>;P#~ro0-WEA?7>$p8+^Lj4law-_Q|NyNtlln3rE^*_jvp(cxLV zge^6j9L*J(O;$-1(Z~L~0!DTlcPtrbyw7oc1xc(#OH=%sna|Adkd=VW=n^;g%xrA@ z7q>{UE{tx=9lZR3ma!-`Ud!yj`Z@{iYhw*DjGcNty(fpr(Sd{jccm0hwE}E^8r%*V z?{&h%IVAotMtdb>P7ZGF+wnLK3r9f46I z9z{Lm-p^dW4Y%bQ-yq?^6F(j*aAA21x$(#!>v4bYJ>BEtI05*isuEpNhqor>{89G`AO{4l}A!WJ!uf4iTALO?w}h z)GR|Q7R^BO-I0lt4<(xz?G1Qe9{9(d0Pbs@T`pw^;sAS&DM12U8(}R`&`XEEW&p}( zETR}BE*xluT&*CUvHzUg&#>Pb6QL9&pRm3_Y_JW4ZuWjAf#A}-d#bFHH!Cji>QzFt zVUb-9{9P;r7IG}#%JJ!#t{!pF%8W+Qh-xmRp?FS6{1m5r2PR^IZ4ANY!|T)omHlxP z<`*HBV0if8Gb-z$1`wBvu&XSi=!8yTj@&)gZ%&A690|KQ#M2+ zzZ4HxF5-IvMjDTcY{yxCw4ZzUQwivG3ykWns3A^dv~0>Z)j&NUpyGJlTn(BE9>Mu7nNH2gMN8ZsG5C`(ZIn*L@!Sa4kTHZ&bT~FNqJS= zm{7w#chd7&ag)`3V$^IsTV+FRAZUjE&AkzC?vyfgE_%Ok3;!5i|E*t;4pP(%iG5YSFP$QkG z?kWA!x7!ail4g2yTxa|8>!a2Ttq(D%^j&Zh{9M-qy33sQ#&9Bt@EV5?!91L}- z4-H89mp^_Z8V(5J^`gKHRJSp>DZ8}^)0BI0fA+A4`THg_yDMR}Y8fw9$`M-K&a#EE z%SC?Koa~?&sTJ(}NEAG!77)?%j&$vAE}Xo$Z?<2zh}ZI-u)8Yk-Nb zaSGRb6k)8%8O&b(z{XF?(}EoEg#@up+h{!P{G>@^h4x&dO0huWOvdl3{rq4#psn;T zwuKzGQd{hu!_v0BcE;O|FZx9%oCxohjxl$2odD1%$?<&K?xAf5gVd&ccka@ zi^|U!6FI&b8s)=GSz!|Cm};%~rKs{aY0z6iYLwxi-wJ}lv_oqiisw|6S)gsVGFMSFyZ(x0pbMblh#FyP?OZCWi zt@JL6RFlhS6R2IIrS^NyX1i3q7;PFvLBXGf$yhH?^x%#7P^ki9V`XAc0Ajjfk+-r! z9k#r7vV0A(6ZRNp7zS!8osHUb(knlm(&^}6=cqOsLE}X7-zzL}kxG)Y(Rx(J?57mW zTe6J_PloA}`5N+|8%kChzU6F;fW)IOWYg|5B`kgg=q5m*`aOO|H!l}$Tue z-z$Z5-tp+~XG{oexLLf2I&hw!Tkr=Wx<|foGdIz82Ls*3dEa~9=k7Tybe)@E%({D- z+I>Olbg1N)F-sVvy+w6`EWnKYsdnaPdHT~b!zmn)oVqnp=jfMGkxLu_G(!~0u0Wmo zU^u4DD(35LYUFhaQwxg_w)BqBE-GaRT1v_LkW9vvh>ZTTGv4rO?S!_RWTVJK`8ah; z(xA6*>y02j{AbjS12tQ$R_gLxU^W*j?w{q0=)+2H!xk@9BYEG#zFQ`U4E;0*pMW$Y za&<&Je%`ZT|}is$=?v z5lCba)65*)#+Kv+VO4Y}i}M&-(^EM=T4yIEl4D?d`T9|JvFRSeAhYIE<>XGw69;!& z$Z4lc#z-~t%lBtsD==X9>2J(V#T3sQ1>M#Y&XoaZ`qeAMfVl2ER zTZ4tb(E4WR&V@?Exzmo9Y^H=>%vFe}AxxlmWSSOM-PcZbOS+aU4s25n4#Q@=Pum!{ zS^_E`K4VS)80FF^Y{}|kvw40WVXZu}p8C|m!>5(6x~^i9rt~G;pG4VJ5SVehVV|y2 zF=gYTJBLl&6OB{zi=HLJvrq+odgPn(U7CJ9#hS54>rXN7QvAUGb3=9p5Kp^$D7dUzKtl zMWh`ANypSePbm!gZL+W01Aqr$(?Ipz4~eiIgSho|DdM;jpPPsC8{Zo(T4v_HSvk!D zWvT(K99I{hiq*fcDI@6y+dVmU@}z+!^fcT+rV&6}};^|5%1hl;12mYwufq2)#)hA};#(N!Y+_OUY_ zn{OeeF+xvT-`cj+TRW51oS2?6oIVc=(PQkY#R!La1~5xohVBlPN`Fz;b;#UTn}W_c>VH3M6rBFV$>>Q-?cevA(;~l}heZjSNAC z)D@`JThEgVwXfrb!w8kE`o26>{_4oMrh>yZTgdW~6Q^Yu`y-Lv)<(>15i_Xgy38mc z(Mvih*GfpECNG(-k^?^Sal5wY!*j8p+6dnNh)x*9%}EMm*_?MTOo%p&!TsiipL~sU zRxtmId-yomz=8r1hbKfWQ;BH^sl?z9w5*ym0yA3zp~?5*Uc>YTDwz69^1>cr#b>Yf zaPx9I7B;lxFlq_EJ(@zX`ncUb1l3pBKh9)3bHEAE@N2oSNry{|>!8;gm+Zq^QyBmK zA(|YaB7;A0PfmzCWB&oXEui`SjTPSAM@(e9zZiPe{Or14MPmRB8VdroU;9Fh+zqm7 zEOctD+k6@r9y+|16O@iv(Bsl$w6&s{Gw%LL$g`2S)uJY6GO28#m)-vgaHeKvI_Y32 z3&aRZ-w*j9DMA#&9NEL4;O#;$klV!_%r(lZ@R|Fz|8|M&8=WjQe&9^YQ|53i>@oPM zg_|2=to6HORE3H}FctDbn6R;7l)_o2`h45RS9>2Q&;XT<6el^gvrLR?0gd?L!phMx2=CS&}iyH{*pY7`QxWC9dc@3xaBdle9|en zHYj_RwP+UcaVoR3aowqDV_0H;nIDLJ|7;3FYnXPawHKUaepLAo%s9B zkBEp!GP@?vq7EmTe`k9;KqvjSaYP$VpqO%8kco%NJAMh*#OXO9!C^Db!@{dk!pv{i z$X3>52Xngc>B!+W&X~@-5EA6`d)&d$5+=pQ{~gjaUsGz6T2_;jbPo3LsEX)f7-N@J zmPFH{Hd)*)BtlUCMVjA}K8I?~v-<@PIplN42hapxGS5?AuRWvk0h|GcC=a)TW%WFW742K65 zOb6bgnv#!#wavXbxX~x_iRE_VOycaSG7ukzv#zpDln{vgi7h>$V&2RttliIM28V=4 zQoN?u_wNQWaKK_Q)6%_779gK_e-A7kl_JK(!Llzefcf!3p7u+^PaThwhYLem*w2N= z_-de)#8~ee!nmlHdNFdgF6H`qMYLu2_gx2mr7vfoVs7tLC&B}8lI=3}+03sg>!PHY zH$>Iq<=BiG{Z&blVj}}**4(pk2go#OifKw-%?J9QR+~PPLZ22yh{I}k}_ac_jh8e za2ceD)j_tw%`gS)I7-{J67VcMps6bH+x%0g7^|aqBlc)CWRzVc&*Yc8D_{Qwin6_7 zRq1^{`9PNc!G9Z!vZ+L>c!0RL^#^)snC9E`0lYg64!w*PaFiXmp!H>w!qMZAa>!|x6@8f!vYrOn zcjR6+9@&M0cjh%nTidmYl(*{9o%<=Wd_?M>En~1pm}4z|V&|ksY^}!#GNZ)Jw8)r$ z)k<|~pmWc>Xxh_GMC$h@$lphQSw^Y8GjHBZuP6uWVjK`d65kuYtwnZ>^)desB+?HD zpPtW(0x?qKMuwL!-L%IFQ~0XNitlKFi6rju15d@9yop64HG5DP7YQL~v)a(OWSF}| zHo$#=2@HtiAo#?-G>FddV{e=q4R;Gf3*0}yQIn}9*VyOefB^X5ospVI8C*AU+)gZh zT(+HaS(%QQCjyn6+u%hYDKB8xVG$o2tiT`1Cy9cB}N z-5Ar|A!8&RAWhD*Q~i$TR*?~CnUV_OdxvZ{j~}dDn=QOx)sj$zaZhJa0D@nRqmCAl z!Uxn*@>d7ZA+%9S_l}(1NimgD*YAGrqWJ0F5__j1pj`*w{*c+w?p;B&W z#DvwdV4wMJ6xA?@6#2Oo-W)PQVHZOG;ubtOI)x*X}1_@>S8uC&vgLIyUbRI1*KN?U$+YWxOA)ka|jtIn<-#`ZbNhZjZq zixt%{iBQd5+@Rmp;rMPhZeDv&@Mlf;j5MC~%u`1UCAUNjI`BQZk@bGX@q2CV#hk$&%oecy-iObZ^ zy{;P)J@?5-n*gVL-}p#FPs%jfWSpaRFDsq6{C>qpvHCK;!LhCNH2E?aN?ouG7&CvN zo(QBB1Z7EQrfmOd_kaKf`n9n@WzxBgz_X6-I#A}=-L*q8f4qI5$H~^}NmBPshga5T zL;ZvKVyOLpPXPNe$8*?^L#Bdh2jE5QB5_n9@Gm`r)fMNqSmuu9!cxQ1$J(&7FK2kV z)t&czz?^rz!BilR==IVS-pOx&-^U%yq9%oF6=lJ{&mkq;_J*{KNU;?tyd2QfP4{Rj zoE|^L>O&wqu{9D{7nwjww@0;pTyz9}awRE9*FEe+fVKFAJe@&>5(j5H(5>xuQqSQ%?|9WVo@{sB&^5u$fPjWT z6vFqp`?SpuLcGT?bCqQhSGg!VThb3JOdK{+dizv!-&U>{y_IiFP`+Enu#1xLtY9Wt z4282Z$<5at+QyIm#>*e!sJ?1`n9f=(t{#rFX!Ou)FOcRE#rHwqTFY+NW4bW~B*LgmjneO?KThb8Bt5LQiDcfzl%3C_+r~1!)M?1S>|<$m z(kNETesKQjWfFbZT&8~PxyUwATl29QOFl{28s+)coO5VWf06vAbxR9lzEan%`xC8R z3bTUGF(prAZZROnMm$egpc^2TllXt`J4w$_-OlyNw$a*P4lfOn-=lLcQ* zGx=SPsNDBUXoffl^6kE~wQTg26xi9T61Th5ObMdv%t`OvWg9H~heCl;y8Qj9RX-J) zn1V>zuIN(gc*g-D@iS3Me~Pt5BCy%#U1pqzR1w6~^n|mev8UzFP1O{0{EE4N?gJS^ z*(gup2MTI+?;wJ9xE<9WB^*{7G-T+%f7P&RU~uz}2CFKxzHt}sl9_alSYg+}rJ>4p z)McTEQ0q+Uay*x8ZhWce2m|;eI5O zOK#QC%x=bE9og82aGTnwj4WWW zZ-(o)B(?5)JgZS0)J(9LIQgDrNc3s1ro9b|kF;Kf>^s*1KcS?I+otX%p$08TlAx#8H&Et zOfs5FVG6+)j0(N9C}*~9s$l33RCb*iY4oudnbO)`|CYZoQT^{s%^sdbJY`4_p2ola zsJW&>Pf9rtdRPch*`6TvgY`2}&*(;$0!ttzC%`l(XJ$!-XJk>dz2D7^zlG$_xRbJk zxwV_R{gUcB1T-WG2#B@XXZS*_*I1SMDlMmVO1{8+ zeBbi*$ORg-NMmH%m6w#r7*%Pm;*LuzyU=g%lD`6_0ONU!O3^Z?fGqurOO!;+=f7*8 z(V;TmS4PsIsGpUcXin@Q^ep3*wC1IZa!_63{=1b^tK^M9ahYADCl{V%2w((hG)fk3 zX)6W|ht+BFWBTq+?UUuZxF?II_=q;Ayk$d%|P>KRnjt8wYdV zuhh#T_mnWKvIIkb(KGqvEUP}B1v`IT>)9yA>Vd@RrqJ#TIhf|bQPs@1_5UBMgmeHT z{IGTmSjMS^Q}eGXjk5S(zdedS%WG#hNfgZQICR(~7o~h*-O7GqiRg(l&__kP^sWU? z=va8ynmS&#Z_yZQ5ZxVzm9WaJ3mDU7!FzSRn)mai@Fo3bR~~y6#q_`A^K~Ya#QgNN zq;u80?WdIMf~Y9qpcAkx(?MliVxpPSh>uJ;mVx?t{ZjO>%r17!)rADh&Lg`?s$)qrHUp2`@x`3_pZP z%`3#w!ZQb2boNRV{15@t2XNF1_2aOb;n@35|Dz6TH?-#`@xeIpjq;+$TN{I&ylCEHy*jU5{&*y6gkWWhBZWFbx6_^4i^cE z24o^b$Qfd;c>N4UF_QO@=7eK21YiQa(5R}VZHh?wbj+5l66)Z!Q>IObI#~BUY9ONQ zel~6uH+Pif+pRV2#bO#r(4tufHwSXpSw3sW;tB58;=r_R5^Y8qZ{yV16 zY4oN8&D+xQ+)~m0-%r|}Vy6Tak*?{Rf^VSle}aWfM79D^hx&^Byw6|3updDMCKfsT z*Ku;nPsemx&(D^WgU-yHgKf<*z@re!Ai4g?njw?*G&AWxk6sE`0>?2*`XTXkLs!!s zqyIxJO^~fIl1Cb1=1{|ca7Q0&NMQRfpjg&GQW!~~H!~>uVu<{c-OdqWzNvmQQ*JhP z>ck#Qez)mRM84g(Ht^axNAGK**H*y%)T5i?BUubyHD+cX2s~M($#B-I-d8~}z50o> z$GM2^ed}KA-KI3niFHP7!8=8z<6E!5`DB;$BTEC&J1xs4byY!fKYpQnAs)!^vgY*& zF@2G%b9h3WJvX!Cn>D+Vjhf6 zyOF`4Kna4wqLsWg{;c)XD8c#=um>t_PBp4N;_@`TzbEH8$kHGeL=vb@v)^WSzbDW! znax5QD89gT=bipwVo38Q?HeT8M%`A-zzDOl)4=GAByg#?N#uh|jrqvLRCDv+!okmP zxfK!tP|l4);#;8B==*qG#hv6t4hIy`>PLZ#gi7K^tFpt}KV*CRDd*65+LEhPq^L;Ot_B5 zKZbwrDb@yKEmd8aOv;J38SBu?EWaAi|ImStP}1ggUL5UCN`HqU71&^3a144*MmI(i ze@p$Bv`%lrC12%w)mJt1VIOVpR#P@nc{U|qr~R8dmIWxhF*%kAPqKkxQnh}-4v5<{ z>6CG$2gSlDBvt=v{rG5&C0_Ee7TivS`J3^+gw#3acXwQ^ee8 zl9mOEWc>F&2C5i#`oC;&yOjJLC#a0zJtjDpZ*kJWUvhjtyIJrjDvHX4IsM{>f7%qK z)zd9%n$Yq6rLf&zI9P*N7Q%`dDvmFI72i6E?HfNe>8l7AoqgFI1Ga8zv`uB2%puQJ z=i<*x*CyUK49&UEF}smPE7hDQ`JXfSBhs-bPDz7`8a4Nuk0v-cxG8xYbN|mkuW||l zbcQk+7&(--;O`0q%pj_EZxZYb%YRhRse@Df!X?pzy8I1d#0MP`#82Ws<&--JUd6A| z@NC10pU>;Y>hNmMXX^(X2{(ap1iQu)3Mr7U`OY`(EHqm|3bbJNB#kgon!VVO z1RJ+BXMgMO-bSAPUJF|GSBpXFj}|QIaC>U_%J&0 zMH@QQNE#Z_WB}I1U@*7^or_LKJ>C!=91=UaN&N&;aFg570Q1%mbSGP$l>SNcWgm*g&_36EZnLzQo1;_kGOZ!>3q~)jS{G^<2s!xjdz;># za$4ThMAW}#_4COXt*DSN;cJK;KxhyB13F439Bef14;lp>ZH9{5l}|kP zvaN(o?UL6o3Q_P8*`j|kD24<%r05Lg%T36fax7rq^fVhCvGZ(oJDMyipaJo~+k%TPjxh=Z@JEr-k^ z?S#FLC$q|-pQtM!B{!lpEf&UoD41mMXT92WOrv$nfIa)hS&8&K$5Qgj%4}Z%;sdy= zN%ZF>T0*We!Njp`!Mz{-C9Q_OtWN08YF)8{h{82GN}$`vqcZeAxUzvR#u_$TPWs6cds!K87>j%iH%K`3P(VTY9WxS7BH;YIvYU}W>3D2%82H1k5J`lz!DJb|Et%E zw@}0=9R-EhLvABYcD8p!g#V<_b-aCas8=DiZL+q}A0=KP4wta`1z_+qi$}L82rd>03nA$p5>GA%2uFsvlS^;qo*pt)zmb2bK9tIncJo+H4TCUt>DU>LJjv%GIQv$0GhDI zWljml_^dmRs#Y~8xqRa;y%u$sAh+3$ktA{akGSuDCM=B1{`(B7fUs~OoU9Smh%2ib5lIBskU8%FkXHeue#v6yx|)9a7sH^HJ9pJx;*}CBcimvHn-?zj*4=a2DoX=@F&+XQ)-|w}h7!qAp2u{hq+~F`5jH zgSeBhcyMeiDdurRYV)2%pZUgj+dht7e6-0eMU^2PZPIQoAMkDhM;0i$8gNGyIZ>?o zK^$#IQ*bX#%>DBDhPh4P;puEGp)b($$&(qG37Hx05<{lVfevtW>v2S@0!zYzT@W+e$Idj3cmZQG#;TncV*ptkPI1=+h(IA*5ze zOhjwm;B?9V&(*TNg^iwWoEv=T=nTYy@?Tq8benQMj8rWyO^HEI`q+2TfMc5nQ^z9w zPnVOXi`O>iJJ7q{<6nDdjSnX$*B(15n1Y3ma9 zeA=N~2gJOE4FY}VWfe}J7<%~YPEq-A!hcUfO%ka+_MC89jvdw}l=&0OB)vM*>y6fMq?uHl`F78SmPfm?{>d*6xLJ;2EnDp?3>Cgl z2d2kMjr;Du@y+`!er8aO3EZSF-9PATZKv6)lY5Wy>>%kUC*xl2&3%Eq98>;FQVgoe zv;Pj8Ft#M{eT3r`6#H0_lcO|Sud1q=|IW-u1>PTOq3D#-BYT+V&u+7sv|7Xyb(_V@ z+wLvr+l-s}uX=pRtP{k%^)9jE_9syaS{w?oaT9^-m*mJFg*ib_dzCj7 zH!?s%Wz|^r$F-7|kJP%vQV{1;DLlf2Aar+D(%tum(cZm;^C>cYM4_?o0-!3@ywjgs zXB)J%g(RH5dyM4aX>UBzC*KTv1a#e??};;f9lP{nac$@=C@5D;P2Jzp95I}~a*F8J zZ@IkX0^FS+TdhzWthhI&yWCq;Lz$f^9FB=^6%Xxx8zX^Hu=b$5)MD{e!2Ou-a}Acw zy^?ciG<~y{JjKMjP$;~fiW-jhT6Q#7r+AX0qbmJCD?r32nw=}*HS-j}FBp4{2Eu(* z=%GX^*hyg{v;TdvR5O;aooIF!>iZNhMOYR`#OKn#l5{heN#wcp$5xPP%uoHsk+6gL z18pXSvbrk6#{$Rv{1)-tUM1TdL68nv>!?2n=qFB)mb3{^(;PZUF>05e2sc0#o-cnw zJqB#qEAtuba;LzL?Q84!WUT3{%ldo?4j$L>_Vp0^ikVEkib-lerc~TKOYpV61`&l# zI&x6)^D?cg6v4{NLgQp**OZapuSjI`p#WyaSoer+ z>8k+C6fHGMHoB7h$|8)cn^Sz!z4U3+FVzA(rxP)O%wdy?B+k}E?9Sl}sEo`llx)ITb3Q^Nc4w&m@ zj9Zj4!6U`=f&?q{r+KEtMVgZVD@$N6TDtPsmH@A?%~0o(BLjGK!0DK@WMi5E4d`=2 zk~NZ!!;S%;E#=5%H;kAvvGA|;CU{QE_0bSsP5W$7td{7&Lrb62gJVB&fyNveIA((P zIHCHvd}l8;u0(!n*gBVm8sKry6S#NEA7iPqvz65jX|8RV=R^*#7Wp`_t4NV{ZywY- z%Gs5|!x2OjJrsarLOaK4eM2$|D>C!L9cDL2@&aspj@!o>eBjjd5DJOw<7K9_8xnZ? zUNK;TDSGrY_w(<#7%Q^u{y#SPR~?BHcOgs@MfPH02yapCw*euW_3CA*;MiCsGM6vN z@Y2jg(iNQpfNoE~nD);ijT!1Nr-pCKKY(Kyd>SjhFW&e99<_>f-B=cZ=>j_N5K=hG zlWT|pnN1=i+%zui4c6DBDOzb7aCC@NDlx3Mm=zRylLNcMz!~W%Y2n-b3P$A4WjEh%=*%GAE#E$6RzDT z>L>lGm!1xDI#J`4`7@kqMKR24(q<^?v@eUX= z9sqSk38~kg*WpZ=bHx099yO(BpWBO_zM%6y8$jJuCSkvOLq zq(TpQf5COolf_Qp+h(rfS|}(DDZW@~bllO!vcTH&d3unRWf$r7>Y+GiIqKw+_q4&R zMAC$3fa{d%*b9kZOGXf4<_r#(=xBvspr|X=bbFK5#LUCiGm@o>?;tTDwPD(ywB4gM zmPoCVqLl)19r{&WQ=MQJ@AgwT!1HuXgmMGmIW@ACGPX;zwbE$(6H994r#A8vn@yJj zCi`EwfJrXVUZ(o7%nek9+ryhq1c-A(@L zJu2Y==*bz^q7d{VCRRT$w5{Yc@BQRXA8A1U9^vuxm9!V027ps<589h#ANX?nH|7<% zIUPsL^(`IiFgbZiJoQiNJrgOi=OVemzYm4`4%0_rg~;pYr@meU8YPDS4IQ&r55Bgk z$GuNtNekglsj;;aJM}u*a<%CK%id!RYBGf9sWx+u=6x>P|1>qP?)-tIywu%(&dENP zZxhiz`EgIK2Qwhp7tp@4dN?q+3kpaWh~Y{4Yyll7U+&9o8V(;#+?Oh&;NeUGofYpJ zJ$xRAe27bOZ^)&>xlNZRMJ+B4Y=oag2RQ~d^l%ofJ9#fsq5yv>0L(~Wfk3(jtur(` z*Hz`CMeStUEGzouo%O-kR(q`))FtpZGpnyyF0j<7dUs6*)h{Db##2U2aWOOCmbqN+ zPs^NB{=X5wU?8HEo06;sK z6+rrxC|rUjGLP`%xOk5ov>~$+eyY9pcOrHkj)H#8-&gR-7Z;;+aM`T!JGJwC@s2!^ z#kYP@zKM&SF;~C@1!{o@94xeguAVA2 zJcE6?e4U=1mYz)Za`_uQyMzVmZ(_n>>`v_&y~$JUd?s~)5vUeYsVHcD(M0sM6i4lI z%_2^xOEgNf5ep`MX1Jt+AW`BAKpBR((WswKrvLsYPyoyW6q9p*)UfJ| z3aT-aOORb`s{_z9^;JNN;-~#x6e#K_2crDQCPMHWt8%Iq&Yr?gOs|`3OreOICJDEb zFRjy$B*xjk#8h6#nn%q4R>lC6<{fOg2o%&t%Xa|bf`iW2^@Y~exB2PpOUeCpnS9Ud zeOdvJ>+#+Y+xDk=Chq)br<>#Dvgv5caVl#}MmQY*HFDtu)SoAH0E%Bgjpw;#nQVHY zYtB#{e?QVSnf$1rl*K?2pWF3nsBT;BD0?d!lR~n{Qp$_JA!Ku#JsO#cJ`N4YF$ZLT z4rS>FLpk1bCc;5e!f3Sq`gg+qIP;y+Kv|7*r_ktE zh1Gk%_~-UST%rDl86x1EvL-woES!tI7G)898bXBdDTW=RfBhzpTh#C0x{a6yi`4E^ zaobRwb$(+F*~n_^%kzWiRrF`$-@BtBOR>SDSbKwd6`ZbW?_nQ;+r!l~&#EJsK1O$i zZ`?JsEk*c2r`oe*IB}R<5!%K{HKivx+9ltj?(cVsXbkNmH)zYN&8u1cBxtuDjWG7C z$vCIUdFi4qgO^vkhXczDcZgg3oS0#Xi|HX!T!;&u?M)b$5KXzXI>vftILW?w3}Y-) zP(B~gjC3NSJ{7blRy5dxiSux|xq*q(#EqhpbKU!I?d-o@oZUnwT+D`mr7ML+M6~sh zQLjAJBSk(igW@s~whj?w!bd!V4IwRa2nE|htSR`$n z;C@k_YOC=pm#btQ$~&KmW^X`?iHMjKA*Ux$=Q|w(tReCcDMCX1SY!f2OuYW0wj{#OKNwTgFdei+YU?ZT`Z=T#y7B8CMU z>X#5Lk*8x>q_*c-_NCdj6^q~hP?1-vahtIWJxB{;%!Zo4SadjS&dQ3VW>@CX_3_~7 zi-pOtzUI~~2Y6kX?6P;ELptS*Upg>Zlua+(!Q!dB6S{U^4}oLJpT$N6AFNx)cAN-R zckxzu|81i+b@;j2;LuPg2wq3Ej`8zn5#(B(A*&%ZZn$#jj;aBzf3(fghJPpey4}@K znW*xeqrd4j4}_m+6tSl&TR>bc_a+fb{^W7ciRIsQgZBc)>uyR=;nWcq64c7Ncs}B2 z`uo$<|AG&8VDx8#eLGX~-xq`c35;Y*hycSsfGSd{d<4*)t|Bk=qETC4<6M_XVp6+s zeR(#B7^Oi{mmZg z?)g}IJdWGDg$&R>b<+>3PuYLV^>p3D%s0kK zu45!RMvye4w+0uSA=6+K4fSuxC|l63D<6!Ggp(>6HBWH-jmU(t4z20}Uqg!fVvo=` zAhTwruw)Ez8P%CrnMVYCIsA+AGUf2k;3V?aF;Vfl7gs z;$t9m*E{ugSg1gmFhcOdMk09^vS8~&V1;g@vh?NS-G%Ax={h(rE{C8#)_M}^_?{Lb z7^@_*A>_j6!$-{o>_ruDZyqGdATuTg=QhYr26nYo&|N102VFj8S-wBIU+10%V!$)HGu;NHC&>w z#+XM#K|K;YDlA(z9y!qmqCB0T2Af57XvaK0cjz#tBdHt?M|HD?-0!vD+VuQqOWq?Tx0StDn#*PqCY?est(17R3v^^~fL{JcODS>jp#1#&{+_T8 zTjkBiU_GrSTFYk=D(;ZLRLYJFs(89=c!&zT^0NSCmi%oAuU3>1?6*n zTS9v3e9LOLChY!r*5~{oF_gqdGg{zUGlk1GGymsLsNjX= z*aIkdyY`0XWa(n_>1I_!zykc0qO)GSiFLMgBuygQgS5kk23vpN`f#3pU|_(dVR~Qb z2qAF-YZor33jKACg(-xu-h+*5s^0lh1E~5d4ZAl7sIz{sjq-~Td!35l^Sf2H@a^bP zE`$Ktm=l4BO_J7L&`A`Q#xNkkeoO15nOvYi**OyBb3`u6H3MOcm&gW_8%*)YUPC)C zn`)KF=H~HjGg`|cRFPTy9r}uk{6%|v(DpCUt%wg9tV?9=5|5k>S25Efv zEs*_LM`Hxj?g)F(HMVTq**d}&vuFwl51DOMi=<*mkr4*YqCtV)PUF$cXx{J5ZLO zw<@`S@$dKU&{lXRT|vkb&91h^It}Ifq@U_A#2|WaBN*k?Bsx+N%OM;ZZf(v+J`U%tvBoao7)ixt6E9uDDN>>2M_g*{~?$>cx4{^RGjohEY)xzUYCFm^f$qu$0^h zzq53s6B?D2!8e6-$)$Nm_yzirbxE$KK(iDJ?c+5w1}ILd0eVHQKocjp9`s>Kl~_zU zK+arM>@xJp_`lFA4CWzscv#kABEJd}V`^r)@LcdivxzL(v)vgwnFI?z83QFcB47}i zi6zNLtiZd_Zd&wV+vMGBhHp=5#ss^m5Stwt+1d9=SNacwp>GHcyPz#|WNtYkt=Rb; zFO_?Y0)N52W`$f+(XT_1$0EXFne&Q;)5Z~hHVS%eOvcoF=+JmHv$&aT939uN;vs`Z z$eo?Ps6hl!2>SQc|79JZ?|zB&J3UtS2=VBVPcV?)%M}Pl*3JF(pYO}uIit&SX z`KqMRdH%Y9{TlQ+NOfQsVp%|30RTXecq-P{g}AnzOwS*h?RVR05PBO`kz}Pj4ZY)( zdSq>AETV=6dyT8QJa;>*U+vw87V{}|>yuhJ`I{exvshm>E#6~@bAEb#G#%7b+c@R( zj7vyJ40QOvF@=I)LAmaQ?DvxjLJ1jst`PXJXA5Xry$4b3=8VDd50~;YJFK2JH!;=l zGP6db!-g&CQp!19?0J1jVG0AMeCcr7gmR@kXxJb69-o}u7rNLt1NAnGoA(3prttM> zKhZccln}@;8%F=-ZJVJmxM(2;8$_QhAHp6_i{)>0>HW*)6TS|;x&>$vgm|jQAE@09 zyIizwFH5UxK@Veh<vr_*$0$FwXyzs*6hytSC zX7lY$j9-E0JL2sscp+yNV?C$o~0Bo50BP`=nG&)GMuRP{lfRxNuu+IX+j&Dh0w+6r45jh zzZwTUal9UN<2-0|UmN0AbNGm7O0`sEWL^Lh zl6_6@#0+X)|G6Jnu-7C@Q7l7DRkTuZE!hQrLmdqcjlf|9LuXj#E3gPHQ}SbvgyLK-JRB@v#XIJCS&qx!l|aIZ#E{|Hm5q-=H$t?&B?ZnnQWeHyYAEPe*K^ItfS9$ z?Y-7}?Tr>LPzVttA+RNVW-w+jl1QrDBVG_ z;4s-(Ye8S*N`$kb4zU>nie3QinqJJs5Xw3>EcD~KbD=#qYW#FO0eM-&0iXEC;v|Te zWzlPDYAhOd=G67)f`VC0$73KZ%>NGC&7J^4gua^d8a19P0SjA5p}`BCK^o8NC@bRN zAXWL6JBl}no74Z%U4?)(Fcm_z15iKC4#+us!0B)SWPX#=ly?uSV*}iYFkacUNqshmg&5>2CKI-RF@?N7R zsN;W-SNshgTI~C&biwn@Aw^Z!TkERZPNY!i&B4F?-JNLur22YRhS~?7+DJwfss4Pp9EJN$cV{EQnX3PJ8+XIcjT(eP1rGCRfgf z?@m|d7+&K=w8>9l3Oec$RiI!qTB<1VRddSxvqbN)4swJ+*#BQ2LxTqP1E+Z*r#9s@ z3_MB4?ESDN-kz83|N*`ynpN`5r z$RrU_a!ZC+iHwv8u9Vg}t6x=uHs~J z#K~fda-+=>InCmO5KSjU*uHYeZq5I zRsN&rrWN#l!b*bd*eyNVk+)QrmP@K@@h5VXHU{-N3=J&Aobh>%RObp8+V2d0Q6nls zGfPgsbJur&q;z+kT>U&I?@rC@F)Qh?C7Rq#*iTAk&*GNg`oqd&o3fYK>9B-C*8D?W zcH-!x&h}C&{CB_RnoZALAd(j4ECxcb2^BeG124u8N?S{$L^zfD*WwizXZmGnXZmzG{NcU5VR67=Pjh=rR4;%G zX=4lTrU=nJbU| zwup4^=pWqAa>Eh{sIgI@p_ftx>XkaBM78aggG9`Ry|9N1RS=NKI@GBrMQxN?&Q*h^ z&Xh%!9rnby(dwavvL52^6}D;gsu|uI4-@hKi{o`YSQdVJy}6#wGoeL7j=tCn*Z2Kq zFkK}uaAKE8T4P4?{*=!2grmN5(*fFM_qovSWAc9L`?|q0;3&fwq9xQMk z){iP6kx5LH?n-nV%p`@BjRuW4<0C!9 z;_G2r1pBl8gDB&_UM(QameuM*vWU3&AvlTq-yI*ykGn)BIEca#X7$%=WPG#P?4Qw} zd!BP&jVR9AbYdCFK3lEgZ`zKGu%R{86;kQL#@;^3K%`(+oPYNfm`NAtNP^H#{qQJ! za%Y~~TrHPZ;b6!jbwzW+Yh69(pVN;=_DOwn|2GSuys91+dy-2&jn(oy4GjWJ9757S zg7{4Dxs^O1<%xmJY2e!CdpYCAI8hdd`Mk~~@41k>>t6ZQn(M}4@+WHsH=7F@I>KkV z2ShV%L6g)w8~`A)3Z5cC#`pDhLCE~(Be}ev;TUyuwnpS}dJ6>3Dkd_6SLRJFj!XM# z5(;4G?ZNBkH5-BCd#gUh*zmj&qqs2F#@e;bA!qd+kqKD?XqCUouxx{=>UE~ZCJvB-;8B_KCBCJ3_dLNn`= zG@hr61FF6A{sO&xiVgQ0@af2TUxu@u4vMpVl%RF>U^;B|IBXV4vGnz|>TQsp+X(cg zkCBn92&fRi*#qCkYfy4|6{2}1jAXpU0z)i$+wmld-DlXhp$M=f;=YN8WyworsF0zj z5~NPoi%Lsc{nKN&T`xF|j+4>%xXNl2uW z5=)8l?G8%7B&{blwC=%@xJl#xlZi!;GyJHEXVVY z&4#G$N!gvL_$n_--NES4!TqM#m{{1qu?p%5$Z-Fx77IE>{O!PHFkL8^eT->Xecr-( z6AkC7N~T6~C+NLt;>O0X7t}OgB<1AhTN#nNA(@6bdEr5dn5MrNGv@mxT$Fpc-?l~a zRI5ytDn}GbnT=o)k)eyC8PB;_uudiJ&o}gw=rzArO}eRm^z8&}76~_}D+`t;LDqhT z;TGt-N{H#hxpMtRTi;Hyp0c`8%jlVdf$o;CO>gWjgPgu60IU>T-?FU@;BU;n<}&pXm&c zDvq38OlP0|9wFfI>Q0}+kvQsKZ>#a%=JonWE*a483|>?=7|4r~5`ROI*M#_~AM%449<7AS;wBz5x72KKh9HAHU%7%3+xy+#Xp^1{ zSMba`PkZ>WLFC%do4|sc|+KHJPFm?w+Mxp#f0K&c+tq zxDSMV`S9o4Bra>M?qJEQQcuyV^D^hMdwE!Z86zL z<&Etuan#w-X0i+}zY)CuA&!j1ii8tzMT|{Ez2Q|OLE>h?J**q-bmG-a_uy5kmGH?H zhQsLxl>*_ZbUPbbj5t7q22M;zY^tf+FL7(UyW{4VVK-q1o{2h*pB}9uJV+*pYi0yT z>!z0+ag-&#yge2h8EcaP*O%P)Whfb(Ig(JvlJf}8;^2_WWI+jiS#2^MJzdZx!=}8zhbONIY(|BA}AXNqPD>YMuUECX57MFLrX zlK#9CXruuZhG=e@v-;ZjZ2%W*!!F0)HjG~Ku@oWR72K>Q%^V~82|k%v76+)#M48%`DGKq??~Sb z79_4d2Df|bmc3p_a2S!T^1J*GyQV>P9KiKRux)+f!?Gqm7BbEC2uLo$k-i-#y{Dh& z3EY`e3)>yESn^2z;xu|xVtKMuBYa-Y;V~e%cI<>%ETckyRQ4QP5JnN6X0xtRtluS@F4bVyTixQ^nFcD;yg%&evI9aJG^gPcm z&<#ObPflfMxb_E?!?Jz(jyIOi_*?S>KO%|#e*O0 z`7&5qYF>Leb$fMivj7fc+Sz?=sRP))1x|C=6?Wc1ac6tguJ(rmlNYat<)vhNf&cMW zX;M<(oYPcIyi35NkQ6Z$38OC`Z$<;%R+4;i+7|TP2(fzqi&Lpfir{{9B5-|Lnl6*o zZb;ZN+*;2Wr=jMwug;Wx;raSc7Arok+Kfqb1-398_^*mguIw&Q!rON7h2b-U=bPcl z+GyCXL%bCJMsaTXMZgcQW)uQuksps>>{NE^G)QMDZqu@h+pyfzIS}}Bg&E5l)#`r+ znJ9VpeTaYj(*raS31;PvaI9Yi!U9B*fSwQ>K*3cly)$#6L+xjy<5fl?{v1pDOs5Rt zqh8>QJYOmIMH~wM1F5KU(@jQ>AUu*5t2e}{hAYAYY z+jQI;5}-q?!=!UO#ETU5a7w&Qc5Y%xezMPZZ=9VCNK0Pj8cpnwdV|3yNclWWGnNG- zO_lJ*qvRp=I}_`gA;2WgE?N#>qw`8G3_qBKDa5{v!UdiSKYdzioC9 zcFil%NP{ipfIqa9V@KQQO{oV)HoCB4QcNhZUt-hU*2{$nHRl zjYqo`!wm8RDyE${Hy6m!AxJ*uhyOxi8{&F<5TYIFoe+J0Rv5WHMgKuXZM3Bil(?Pa zkWWM>mFzq&vMe;e#$|p9RD_$%BS0_A0>%+Y#K+mTl5?8Xyp{}Iz3sq&)(ZfKLa?|v zN(1EdKL~-ionE~$JEjFZ$b2o;@lk<6QO$uu4po`oW%e&8lf(H{uU>8WwjzA@TFWVw%)3Q${ z87KmYCwwl)T;!iaf|?Oq>%48;4fVKY?EmwtbU-BvAX)3=Yui1S^*uBw3LLHGh@qi> z%dcnpto{3F5O85TNr>-@AIv@3mK)ABNu44xTX$ZI*-^?e%^q6oNk<}0<3R(fK~&Sm1=6O6=#{Q2;}r&@F}mechuQjAOv zd!h7QQF&6xYM)1VLm3NQvVE^&b-V^2cr{XpRqqtG#L~WIi)o0MVwL;_Vqe5S6s|GwNc?U;wp>DBKWxx2 z57~GXlNtxcn9_Rtnz^P?ogpn0v=;|wu+4Hkfq#qy8-aPCOd_c#JMwPCFUr5WC~kry z1bNH87YYQdCvKD)K%*jbDzcr*U}r~^c09!)Q`kZeyao~{5TbLviKKcx>tez59!c!+wIJl?=s74&q@@IMnx3L1j){2v z&Ltuj;-^ji?{YE0C8nt39MaH^Ania@?6tz_Q~^NPRErUDlnSGYk4}w@P1_!`E+Y3a zf3{Rz(jTV~@fnwwzP-280!xR~=hW9-Q`kkH#i&K<0}+%{>x<2v={k^n!?IvF;Db)k zahzbAZgB)9N?@Z0Z}l(^SpKvTs{xz9IO(BClPm|9L?CjW#|)DomTVi=l+sriUS_3M zs15S2@P7FUb>~WH52+?#|BybhF&JdO!I*3hBy+XaJ`^1B+a2Og{~f#S%BHn6gK4!# zw29(7rEA%|vQz>1X)EN5t5>Cwz>Coq#93TYD*|)|a^Hw>-5hY^kNpBBay__=#oK1K#D_bg`B2w(IYEZsc3|#i zGpT}km8X{Kq2h_NFG*&!UM@o%w#Q;Q>tW`R-GF4}FC;nHFjP-Lr;D~HD2fbqld+?v}&&okp5 zlm4c0s!qp;mxS%}RD)P42L`SX;!Q)nT?1%J4?wZKvvWi2?<$0#Z%gUf%i;r0Hgx1# z&!2hDvZQvL^v2fdD-+-EJlDV7xhzq1tCUsB%1-MPGb90r_B-~$c26no6WF`uk1+62 z?Ot}rx3KHUF6un|&8}j}a+}(3EpTTSl;zCe!WHjZ1<|3&MTI#)?%~d6TsWa1esNL4 zTHE17GzGVo&x9rI_m9xEWNyTL@i%k99sA*xt$IyKKd^b`>@kVv1jxzFgrCqG-t2%b zyeBhFzHjEFa#&WyKXVF086^hz)TsZ)22o^X5EuK!{!%9Z>CJjE%gA8ITO11U=_!BD zj_eC>u*KibPhxj-slcm7BVxBMQ&TAc>D9Sd6EJzSW0~1VIF6=bGsQ|Oyj!Zhl5*(T z*JS+K2lc3s>Z@UC$K!^6K0jUTlqJbm598*-_tL4dWW2> zc1Y)uiZG_^fBjzVnu`0p!vCg$4kZRVkIoj0q9tGiveHcy{j`>omSeq?7vyxZi22RE9FmC*Qud+ zRQuMXuX^92x^j1Qp{vYBcN{TgHtajL;XAgJLW&x|B{Z+>b6HIndAyM5y5Xa&)nrZ! z15ZpLD|mwn#cWtM%d>-=&_Rz4UEwO%zJ&I1>8ICx@SJ!=Kb}wOTHCfvl68wbW^^Kk z88bNRP?uaB5q$y%en#_CS}s)f^iGJhT=ZV6<`q6bbGXF5bx8Z%m$IbgZ-O|yNyK~{~Fb%XPlk@u!BJJ(wzkAbd_;x7;WVkGsDplF=NrpV- z8l9n+`1bof{UO*<$NwBZzVEYMUs*=c?NaZ&!tQ?dh<9_alrJ7D3IUj1U&~iFdOSy0+}(h>@@7DuW&`($7r>ucrjy z3@SLvd=}Zx=eXWu^}L^HMt*t~?Qmq_4?&H07wI_$^>4F$nJ8xS9|(aOLQ7s^(I4<1 zd|d<@2ed!jc@;QvZn_K?q=u@?9b8X+IaeC+Gikq!dNzzC#l&(IZ<^TL^4%+L2d2Dp z-=190VZkGa&-bvA_D32j24G_xv9VMl(|2j11(4hJ2t&0mpfJD&EP9qyJu zBZeUWs(30{rEnC8=Q-0jWM_)Cw}E>VIkWBrfVPr%3jRRE_Rh^3b|6a@;yuqh)Lq%z zuSRKAl}>46O*vO=(7Yqb-F;4K#0&H{IxYV67)WPS&=No||H<{;69MnY|43Z1N&gP0 zc|Lov-*Lhwov}@%n`eG7PEoX_rWZhte0)ss(+2aJSHeF(}fWy3i2t1@2>jLVmoy?y>du}G1*kUvPQmTck?PL$B zk%)=3iGgc6D(YhynLHfP#REL(QAhUTKv z;QROc!>igSiL&q_0>Uv{3@0`XDO8MlWgSW!1SK7)w#lL%3;S9Rr*9>!cD#XEb)Sj- z^N}82wpc(IBm5-PU*&pM8m$pV7PTOouq|&LzZHg2XW02W9ZrinKg-m^?T+v^f$qr# zYKNXUPP4Utp%Zf|cnFYR?P}9I@VLQ#j|Avr*vboRlk{Sb_f2ZcK2T z{Gt7(^Iq(Ab=WHuey#_tYgR+MAgo4st9#60p;7^7c=I)2?O>afEHCX7WnzL`Twpq! z332y9bU!`r{8x+mz({oyEueE$8bUT6>)%U)oq{qkdU2Ay)QpRM!6flHJLBA-{aJ}H zjkj&iqu~7-8~wIU)y=?s?;yanDAa0ulnfv3a z;{x+|D$*;0TIXZ64=sS?V`FB-SKnHv)8)Cg8nIl=<&a9TU)FLffOVLeB(9!rh$n|(%*&8`Nm6c;3y<$0An-8@#h zOPCTpglKZ}PX;x|6`Hmq5HJ%sQm1U=Yz8tB|2wFF1MQVu8mC#Mr>km6;kTFk;GaSH z7miGM@?O(EL2UXwZn3~df$28AlY}H_3o@_|L|OOvGEwI zkS${mofp+#k3XsJZ~HsIRv{qP7J!4`JJ`ehm(!a9>`gqLn-w0rfOGQ84Q-Y~$S`yV z^35kb-5!@aMPzSDUSzxPY4XYO-KuawGHnglIg!e`vq#B|ubSQ%jMmANrrHHNxSJn7 zFgpK>!jJbCM|TL!2QEz3n~v0koYx{N5;aSE6F>!+wwGwlJ|QFV_OR!yHsYjTIMEul z3^Fnfoo**9`z7^E4jV3nG5s&(hGSE6`^>WZ{5>z4{|*uAtOg?6+8J}Wss9|<^?V0( zk$wTM@~}}YtBxnu*WI48e*SEG_M8=t9BG^KQW;Jpzk3lPrl?XS&a8zZWHBl%&#G%t*YKdygMYLKJ|p)1Py&3ptSL9 ziQ-Xo+w4Dr?E?OjY>V(uD=6qJrTI^|e_TG!C1&R%m6r3jXXAsvxmR~9wtZ5)F1KSy zDVOJJn_e3|!V9W)pUR-W6j=X_m)8pYjy{wIj_X0Z$!9GXV)1;vDl;tVyJ$0q1Q(t4n*&maX0*yjhn);wz**#UN=b|gY0*-{7ne_$?6xV`vvWKa)_0Y#HgAS7m@*Bv~~5h6)x`4T9WcV?-<)F*M3W zC4fU^IX=-JuQ$r7mpS@78?hT1H4)6uA+dO_)B{yei!uMRbA%G^n33#ZLZxSqc}r=aqc+6FTUf=PgcIl&b4_= z->TRZ-b&PylT_b^)wXcTZSzYo0fnXrrM31D1=j-b2HCFm_qMDU`1@Wm0umN0>IeNL zT|`B=91%q;vzu6{Kp=tAHc>J<6$#a%Ko#iIW(=lT1Vu zpN{cCISqZzMu8CSy@#37{|TGai-*9uG_Ig~xqdt*Jp%5P78JQF{{v1okh806xLY_6#0q zT`7AcNaYvJsYD*Oz&ZL68m8!880xnZ|Rr+j1ciq zlo%4@1nHfgAa1m(gM|4Fk(f~=oVXL?nWhknP=J1g zB$37-JZag}ZGwU_IRvzk#iuU%e%F-qgR+~#1K%}xVkm*KX0NiuOFz`yovR3R#M{N1 zs{VvjPg{i*OqL5+T(I(#sPFG4S49K%te(^19Kn!bDv~zF#z7@Q?@@QF{<&m&#i<_R zXQrs$As?S%228-*(RCYL_{LP}rYb~l!{Binjns|A>(?+F4^w};7OQiK;^GxAjtxk4bUQ0A&u#M8=1KH$1EcNRl@O2}SSuS5qp z6ONn!ZkafvedTx8SPdFUIL$&h47r%Gv47ltk5Y;HnU|XGd(<(zrr5@}vE4-i-lP;1 z+Da_7K-p-S1X^k49KUopeW*XC%82q$1f=7hf}~s|#YGg}1;=0t?E9Q_Ybq7E20?87 zFF(G*z9rOSJ8p0fcc071m@=zkr~Nc}#G7s~J#n}=q>zwpduRZ=*9k2RE4FwceGhPy zAh`DLLBsO@&FB@j4N^6or0BVD`Mvxt6E6jjmn7(k_W;6=_I-Cy70|w$^1tUOWU*IJ z-ke{c!yCgu8Trl8Lrzcp0tifrW*n~o&;fd_mP3U_IERA30luL;Xv*k=U_HZIyKl4= zx>9At9j3ut5NZiLOcAX@)CswhL#0p-vkzk=?g!{CAjFA7kdgDZ_q(z(GJ_}hnRUqm z2$srekVB{0DndEk=5iVr&d{j=k3yWI&2sH+@H`bqD zm+F66=4El_m8~8e1IFE~sXC1)M@!=65{hKgzx#1unP>4opEy2lg?z2qGa_f_mF&-g zC0OLDK4ES%i1^QaTO`vVFv9N>AFM`^(N*tTysq6hJ6V{!SUUNHVq5m^OB8-Ml<>|@ z2i@mlbiLmU3rv4q8+NlUZyuR<>?QQ8#@QDGal9wpK4t{6`b_K9pUPs_@{% zE_adqk-%cmsGtmKZ}+hVf>mAW3H2{Q4Pc;WR&6SaZ9+X7uVTUh7(y;=fm7#xrA76h zIi()3k_*H&KkH~63G&+E&Lor+L2sr8hNIG!SARnt8~FMd(WEL+W5=ps&;(pyaD%Y+h{xA!4uw(A(vprOT1R;{Ock~_ z^LQ|~WoYEC{Jd*XpN4o`t<)*bD$t*HO}VT%>Z24X$OJ7GN@;8CFXcK6HVb}cI#e4I zge#NG0}wt9PDL{4dEcs<2cmjCv}E3z!*`cgb!we6+1SAM?4$ItLMX)qSg0BLC9y-J z9V3%X0T8GUvL{t2%je159ULp}B%Iw7fZ$1OHDEnMIU+rG9qx+uDneLszlJX-?El#=Oy+& zc$tPxv~1vWSeSgvj`Y^DpOwG!`8C}QLY1hyUzHZ`MzF{UKau)g0DiAiVA!WzU5r0C zF=g3u*)!k}RN~PnQL{-m2DFR%wb#Jay~_mM)ahyj_Xj)31d)EepJQTYhyqa0)o`{?Z}&_H>P| z8J&5E>+=-q_4Ky-60Ta@a>JFE@8fU1o>w?KWD|m#&YCEqVOKHyq@=c zQ#hf}jPmyj@n-!>`4%UZ!$$*NfvWPa^K=Rd&R zlIOd{L0ljZqGdD%2@502K5Q54lJK8M0)&XPDFiuc$Vg7cT7;J&&=7g_kGtmfn{8xf z3?;80EP*N5ws&mx`*K@QeY0h%($wG+S|v0gJO3XsckQM6k>CG$kWH!}NcM&M1*}fEgicvbS111r}zk*8a3n*-aq~D3Hh4b@Fh?rYorZwZF!Wthk zcn;Y@2hVJ+3P>V^@^AP_r@MeLDCz}baVTlkDC;VL6Q<=#F-e)T}evGpP_fTiy zF12W4^SGawwLDMq07;Yv;ZceFewz&AF2b2ok4Cv(7j=O*T;)a*Ym_0l0iWkr$`EHR zA+1L9@E(knuz-kK8L{b1S^=(iMgU(tAB6jILe`*<^L)Du*)2;i9{r0+E!7s4IcxOm z-)_GCs{`imCe(9&{xhbpzzJeIC9)CSwj%`18nAXMziSlT;|Vqu&3=Y^u#WSallekt z$F>9#bX|8;Dj#{FOge{kFZHtQ)wAs}czwm@uicB|N$8I8MU8NwbQ49P>@ylZ3XP-- z6>AZy8{e}oqb*!4HicwfB`bn1TBp-oeE$O!$Ttb%90tp*$--X4}&pQwdyN)vCnpa*|`5UTsI- z;dIEH==Kp%n{IWs1R#4o&l1<-aSjf=Z6#?J4`;Y;%ul2&W=|_h)H~kOQ#yN42>43; zf!>vfQ)U)6W^yrz{vDYlu~{@531EC0!oV;~liFEKI*XWg>%Qa`ng5Bok)0Z);Ab6g z?Ddrjz%=q~;#J2;=MexTjQzrzVaAOptQy&XAh7L7P6>7HAK!@`<{v``ra-_fWMPi} z>EX#(`QvZ3@Qreoq1*7^6f4*Oz}eiy%YP*91uSb4v^fb=%(O(O;Obp&@jI zR(BloyX90mqBSd}B@Jw1ko!s(SoIjm&7(6JN}_MCNvwD1Pg;?XH~b`LB;nQv1^D?v zCUQ3R8VIO4WfbLaxev#XT|Fl2kG#MkgRu^tIvz)z2z&=c2po?Nrwfl`vCtEt*|FkC zSaMQu@H?9)QRoNxc?3y-Jw)^GWE%{xa_ec2X>w@42^nOo%#MgpP0 zEu=&856U)OFmVxlwe~yknsufP?fmC_EpLf9+L@-OO& zld~|cOZidfOp87bfRi|O>t$B=n=EsOx7~%|;eR`Y|1sv&2Lh;gyx%k7UEUG5B(*Gux0W7W&QKb%6+_vW5^-uXZ$@NjIa?867#yz&0l)b8gRXdQ)RUe{20 z-ljI|sY(A0Z+75|Qx{6=wsFAgC2iGsOvwA|ZyDb$O#(!uPT@S_yzY@FNNVOI{}=o2 zbHks?y{p#pkcYerAtvJ>vwbv6w3r@Sk5!zH;~_#J{ln+EB3a9cFMZ5!E$0(6q5LB? zrC&W5-O?Q){XaOPZYqvr!wdOt2M+>S|8(w+FOM2kST{g0`BIvI_5IUxt&r+Qk~SBQ zN`!nVT~FP+Mg~lWnQ)=IiwXuBX_Q^Rs)8@Ftsck-=ok1KIUG)qilRjj@!9fj$M%lv`VS0O|hjoouZK^5J%fp|!|@Fjd(!8>4gF>*u(?MAU=8HNJ!Xp&ytgNLOq&ZtcG*Xx7n!a91`G}c3}rD{VM zAkoI~4Ja!?+O#WIQPz8tiGb6onbjYHUgC@s+e4YaanA>kd1j*V$SiMolt%z~VXk){tXDwU>`(NXdP;-;3gQC4RbmR1HYjwk>Kz6VYDB+O(% zOE>!-nop+-SA&*SqqPjU78fBTUJCGrN}}c%w_)Cy=Q%r`*$YK{qWO{aX8&~1wb(g# z##we7ONqc^3|%qeUz^4EIv&$lkN=8Rwm)ed&Ya_$&*xDB)fnHERVc3H|6Zr#SL*<0 zL!kJ5T#8v9FWu^>!v#t)8AX=s6&snP3iKiPpk}>UEbsL=iv#WY%JW&9JcorLzd7*= z^@>qYqxc=nUCBc4n*uL0zW1u z>jz?hVmkX&F3tYs1?hN=bQ#~2C^|fB>4Np&TUBn0hoZmIq7t$y5Yx0|aoBe9-k;3G zsPvuD7p%KqI)5HwNJ!Az@EDVI*pY2A%Ye)hn&pKv>dB#rKIi&X?U9e_m%xn7-GD%L z2-FN0NLQ)}DAo&C?Z9`f!(9KX8*G0Rc{-D>(utRwa6o|$DY=Gw*=9ib%)n}mLkdLa zV8<8)9rO5EyO%!DW06h|2UjO~F;|R$axbwzbU{9eL=J@?l=yO-LAOiVM6*}efF5l0PuY8ZfDj0MTxLKK#lkt+0$9uIe6#&RlZ2JQ>mzD=T`YVsRn!U;z;b7 z-3(jaJyUi`Lr+SB)6R~Z@w3wPKsX*vPlv%^cv_}(We3vjr|qTo!da0=BO@b96hpU? zF=dR^P3iDtewgXE4o{bnY~L3$ecw)erLz^bVK)gA`fMiQCz3V1Y+-G#OSiW)Zo8wJ zgFA~C7S2B4Hu4~*IC16BV$Dy%g049C^NCnXhO`nI-q+=o=^ACcQ2#Zh;@5*yqjx57 za6vFG*TIjsho5ulK)+%%E^0~m9V2loJ}uZ6h|c=kIs<`n`a`!z2@G_8yxRQ;0)3F9 zcO3N?eQzPB@!(h`9*!nqpS<&>x{y_`u|MBq%pMFAjDEOn@RQiMEnc2s>;CXB4YaztEJcxf9R@OLZ!ksqvG2g};`|7Jx=pzwvxM-VG zZeN)hf6su&;b7elnY#r3H2xvpCM{>!h;Hr#>~~RvRt{R z9`g&}nkHorvm7@;Zw!wLS@Xh8ul<|nVLFHjeCMn_mdTv*GsdJ!B@VRv7sIZT=hMiVzi7T_(Qfqqj{L)Ur`?5 zhbrfs_85L>FeD5HrTL5WyCCL2fM^HO zkbw)T`)TMhJ0@8&ovZ>-zMrptZg1s?w)N1B8Au3E26tXU^7*@!=1s!^oRuBcX@fcn zyl_CGgq86Cjtw_K$vhs{GuY4@jmO9RY}GK&=hGQ*YRLKAP4rjSG^fDX2HCc|@2PtN znB~Ye7t5R%$D5g91Dx7TFBIju$8XFo`YW&d+^F)Zu{|;DuD@&?GPxT%Os*2s5^?Z+ z-#d46%SK12qmzF|1IXj)n-%nY>|5pjw%&0Q?molC*D_gsTK7r-`yc*gdiBluDo-bo zQyx)T6NfYZ^hW`9anLQLabjfUQ;GiV$&xZ&{q#p}3j*E9vGte%rw6;*1MOirs`Eb@YFT)yVCiEZ-7?J2S9|@|3YpLt{2Rvs$>{2s z`XXBQ!B#v}ST=MFF0AHZyE2?^SQ_5FQ1tuWI%Z=5nW&rK5v1T(7D|!M)nuyYYBzd9 zku1aOz%g_{!9b8p1O*)|mNOWk35azouli*=xcv!Jc3bXKK5t5I<#iy z_$inNIcld=iX>1>vKUO`<1E@~2?8L97#bH^7 zEP;Xv+Hdq&v#mA1WxkaTiXgIS7KHa3cHzsTUyfULj^IxAf;0`Bar`z3G{xHC^-%j47n&xEqx<906>mr~Me_LyHNxY=aF5CM9 zbT6)pYd;|Qwo$>Pm*jU=Q8zi=y3yiTtR@MFsY167 z_ipK2ZG`DnVPHEioFS1ahB!NVz)z*&=TfLP;V@+V%xNyS8&n0Qp&`@YXWk2}7)Y-0 za6yosig!2R(qW=uVaz1?6?yOQZp4TV++G7D@;~-;+n@w@sNRoi=Vp4Ju=8Ej-C523q zzzPZ+8s3XwTuP_ATFF8Q6>{Cc2@r1UXOvKRb#~13X^`J>qikVvoAha9s((%s0gZb4 zo+oE&+CNoj014d@{X&;dtq>4;jVI_q*Rk+SWYd-KoZ`eIZibXdCoC*yOlm%_g(JfB zQTtT7MC?`xbDO-DYE_s%Gz3g$F|4+$Y8I4~CDqlKSF5l;&)c*Oj|refq;3Ess$1@0 zpeVT!I6Fc?qF&e!>ck}f%`CNmk4iFB;Xl>#?^d5ooFG3%%M1y{B#JGIFwdINmK9is6jUV`x8GS}YecBgZ2v(fB={wuB_2lH(RG zo?0#txDQ1)yafNeR%*d+!J}f#X`c(uj3enR_j)vU2XL30r^g$e3-Q_+HL?qU+A$IGmWGYmmLGsP$ zPb=7znMJzSuY|m2jKE@V=bc17){CUz>)wxXtDffl`bBk!Kc70&(B+lEUB@h()Qg#a?Ojrab=DU}CQ86_O6Ak28Xhg~9* z>=&G9fhPHnQ5C^7H8|kXaVGBUmfZMpGN6`;PsoU@cea)Y4b+Us_q^VBzNYa0a2}pH!>xG_-BnvzSmzpCwf$(s?Nz}q6Pdt(uv5trKD$a;!$5j#edu`!~}e7$U?JI&gX2FDC_bD|Ro8rL5Q`-_yz z0{b&aZG#3PkEu7l{fiO!>bq(;U;?Z1ZLjk_i%zo^$AVsRIa@Jpd=08=u=hMvp@HIX zkMBrX->v7-0?U@+?!V@zBU(f4ABu3WkTC3G{-h&Sme+9WQ|I_X zmSM$cJ-(Y3UtW_(>so~*POm;J-7>vuIugy8w21IQ>h~z5EmFHhI0ftPeZ_d6Ht-$v z<6&&O2Wf%UOcB_T4fOLlUe&L^h}BqRm*=8_qbgaV(qmG;vlrs5wCt343P5X92$8#V zdl7Bt;}GY{#Q0B1_|o2TZsu)flI8tVQv>{3YU@HD{XNeXri_BBzshd~GpLBii_$vg zRImDWT-8;o$M@f{g2p02S^T~b=fK^NYf7`DP_m#O&N7E8FK20PD^dgfvLQz*L*pjb z8R!t0hmTKsD*LQh%qe_(uj zI6#5IvWbtc!Z7?8!e);P=1;koT^`52wow84r<#&(-%T{}XGLV2&=Cw{uRrHdL;hxP0M2}QwRuV|zSsa#p-C3O`duU9%#vQ~k&1_> zVor2@9ut|5BNuLt^{cY@3=DU$T5j}{$57*|X5db`-rquaFE;yHZ(U?$>bJ#t-T2J@ z3b?-o9hSL>eQCFos8wcBx;79UPj$f+O|knvFjFq<3oqnt_3~iP?0MHV-Kr`r?%;7? zU)X+iz7)!|Sa!yll1e~D@Q^uB%hQDXUqEO$P7>ihuXz5%|D)*~!|Uq0E*#rwY^ULg zZJSNg*tQ!xO=H_mV>PyIH@0nj`}wZx{d<1xbI#gpt~KWv_ps}U(fzvo(S}m=?6+iecdU`re33YmXwc@^yyyA&$Q9y~t-s$Ov=xO{@8h>5Z)~ES-O;@_a@1PEdz{NbLOjOUulS|!8TZ6)WdV3!OS0+X@;WQ7MKnxlV7L&Wxk zN((yG&%r|*=?f-EvV$d%nqD%MfPlicA!?;gJsDO;3kO33K-sqUCUYo5;7z5~=^aw> z<2At!<7T*vB+#X^OSvOrFC|*M7|<4JE^olR><1a{_dT~~kudc~K2Zr*dqVu;>q=DB zk{vRStk3eo$K$pI-xQUtQn+ zCDL-6S@qM@nSw&wHO}%J`_(XJ%LgCW1!AtzOZ35@JGCMzZO%di2>$`UGGEtj(Y>&M z9=KS#4}6juq=1vt55|mIvKeNJKFG!n*2{4*OWlvgAFInY&Hfb1$)m^~X0mvB1fvGO z^}dZ^M0$JeihuT1Be~R@Pid@msGo0v+4E`#l*jI1*WTYv-yqxLQ!F9H;OF(tYvR7ylfn6~ ze=UjSb*Po_YVKyLbhTTaiwQWm=S(~01)hg~%x)Hy@cmRX(xKG!BE_r1gA_gb%IX@o zWaQp^p0+%KvOfYvK=HN6kWW@*4sk1wTCg0Jk`$?^li@LX&n0|nJoe>!*%EDv@cAb1 z?MxsQo&(X_G`g^=3-cMKl(ZrU3t^E6X|BxH!p5$&ANOQo(#|I=f7mrdx0dW)(wm!> z>#*=J>>YOhj#qP7>X_}edmOtj?eVvsOzMhRsib&^XZZzi@kgP<|KnO;3ACYx!P>n5 z#M10PrWL(X>i$0No$XERUn6$&BFN)9rGAs_JZYMLxuiG?C8TLrm3VI=e9Y!sVM;Lj zyG2I(GhZ`953egAjldwXlatAWXi7!^%e!M%v>QoT^B&5#I{gc(Y4N61b^<;yI(!iVN9`p`KnQFB+GM0l)z~vis zD{K>I)k0Z<*@b#FE4(MpC5!&Y0!UU%I51++XWh-<@=ZS6*`j4=VD*(w_fYnnc#M5) zdCD}8*Am=aB3}*fd_K@tXZ3-a=PK3LCH!vTi{@@P6@lp|;tK)I)kf~Xu>wo|a27w| zq&`P^JWs3Q>gczcKtcZ)I{MOKX(d5{$hT)3sCP{q+xkLk>PkvHAq+*i*~t!>sgPf5 zf##-U^0f(jJeP-Ky1f(-K$n{)vdjMJhY*w%P&jso@_aq}dtOJ+p$7A}n#PfI#*h3g z=C#Z(t11-IC~hW;G|rELA*GZ{;+)2EsC`IZzf|Vzw=_B@D_4{EyhBvPpL7Fh|u+y9(0fX?D9Aa$yDL!d+SVK}d)qe_NTw=uOI?s7`*H-kzS z{Y7Sf9!gEAA+f5B>wBJA*M+G zYNvx3iiEP>hoQ4}_Lt7nZYUV^=fhDF^c1Y5+g==7xhHpZM<~X6H%HKZY_@r?6NNb6plwn(g4+puI>=|_fj8wi8 zJli!Rsoe|N^p-sI_qzAm^Q+S^OFuK6E2W{=j1Z}fw6lXfo1ZS$>6u<=51$SRuDKdA z${&@alU>FbAcTD(n5Q%Ou|#OExIRU z=l@i&{LZDZYxq(Tvp#E2Lm``KZRI`$ojjQla0a=bHZdz^A3vWRb`F z5Z4`fiSX)D{x;2m63X0Ud@aC4Y|@PEpxMHP=?ea}(+G|HHNgF~R6cF?tK#Z>FUkbz zW8w$uJKvA&cOHocQ=@u3>7yu9RwnLoF!*Yc3{qW!`9zA0erqBLNE!_9lU5RGK~iMV zlZ-++A2)zFJX@vu7oK36T-gdLaz&g82$nGl9u$l3MGk5{_X_7-Qa$X2X@DaX?UvR> zW-0Ff2{`L|&%1>H>1YdpBsK+J^VWJY_@+2%wos!e3Sta5T8XJ|B9FHo44VX$JS(Y3 zPPi;vQi^bYgQD<9}v zm=8<-iq8<=msS|=f377%x~`O#NjeT28m&D~sDX<^Ky)E<67S<~`J%GfViIE$R+vmj zJU>x9(#|gZ-BKz|oMlujQ-&#OHI!^TQbu90X9q!HTNU}2HMH;Rx%)Hm2_DeXAe%_% z3}L)xs<{-9JTVN!f^MV@HP_Djo&8g2+$6~{c^(}jnV!hwQnHgE7o|7XDd5v7cJ}_v zC2d|nr8ReaJwGG*035bKak_GnrjnafH$UI-x?cq+kLdX7PJSTsdKQX|20M*(=QkS0 zdZTknu;}lLh!9DgULin^b4CdWngG}-4ES6vt1HSR|MCc+!<-sjb^In@W>fwR3p^T@ z-+PUiYBqDvv4^bEZ5&_#3%*^6@$5#jUHxngOZRaZE@rdH;Qk#YQbZg`$S`X#(8rxz z-O7W(ARs)hYOoJ3$B^`EFu(c*kGJ1gTT&YGh_NJaJ%5edePE}_hN@VXU_j=NetcdV zVR6Jl^DrYGB8qcwDMJ4U&U^ek8E$l7mef1U@!YI9-Oge3FE0_=m`Jqac~Xs$YP-Hl zbUtiet@-Gr4_B07-P5@ucbmdO5Fwj%778QQi(WCR3=`XKgO5~HSiIT85M}SWPkb8$ zz*w2fS8BpDZMZYQFouU)1o(aAXqr&VEp>edX~DsV!1PZ7^BxY%#=gSK6Np1x|`>Cq8;!=Z#K)gY2}IHZ;#qw)R$f4VNlh7|W&8 zL%ruLMPO`|a9SzA#dLRgggkA)J~b-{PR*)M71Eaau!+346WIi*^)I|8=!r|(zI~QM znn$ExY)0~Ed0u)NrLlQ^#r47~n^zw7xNidn<>Eq)bp?EO+2iPq4qY<%WDS!N%5o^c zK+U^yJGQELobx0K+dUVims5^hen;LAW(h2oy}CzZHIZ9NT+(1SLvb0y7lWaKeHvMD zc`-_{UX8f$^7dm7NFnwM+2ekyXW(}UGd9qMMKq@HQDdQ)P9((6Lc=Y9Vx6Hk^KS zVZe>`pHAb{A^lmEU)WmF48!M5YDh)(?E;}OY?=YL3{qt`luAbvZjny=v8Zg-f5zGE z2^$(RkB)I8AwD|mA~&U?AA`4lx>iBKwL*!T8}#?Np8YVK0#AcrW;n>iXw9-XCalqM z!-G^;;cS3L;wulAZ8FR3l%Vv8At#hj%>xvMb!gI_x;?RU`p;M-wb^#~a#^9zw}=ZQ zToj*OrmVK%mBUP`C;|@EB;rm)$_dGmwBWfyym3#{)*s4kzov<-Nu{KI)YYaYv@&Eo z=JC@SBVj}2%O;}TW{EATpBoc!hFQjju-=df!v__5+smf2p&J!%FR6o%VFbuP;Y^#6 z+kKd=7BLn8ra-u!sjh!8^+e0f02~us2udDy0cd9BrSY9)%ob{DALY7UZs_uXV7}VY z9HeBvTcXpVL)f@e_EF-w{QvQ~Xm&mhI1Ohcsq6gQPih4%o*l`2B8>&v>SP`6z18>y z1qg42x=M{pvRP?AmJA;Va2pxA&7m@<~on_O0f(5wD=Kd+nUE52uTf>_gA-CBM^bk?Q9(q5kJFp;e?|O=$6J# zf{vH-I^?GPPufDHEBZkUDIUni%9nDgU7InUR3R?2vs{KmLd8 z=al~o#x>~D$5QqX0CiJM7g?fkMoN%g!PSTr8xKu|$ay_pB%umE3IAtESl~eR{>>>F zAv}T(D(pcjeOp(MW7V;d4rg~Q-1OK*ZaHsS0|4|~ba`yzT7I}1{Un`BFVi3JP!MKS zs*BCb%qkq@#I5x)F*aqqP34>h}sDIL@#Ym2>2lh%BHb2vQWK%P_9zxU14P-iVOn z!ppGfN=l}i6rIJpA9_>EB{QjsZgjlHEz5cZ+WBLm)<Et^AFAUL=SX&f z@{g!lzoXdD9X>j8K3ywhKmSSk0vXP7b=R(rG$avlezFh?ZQisX5xY0)az}!ibydhM zeZC#Gx6U0c(pV!$bcaVnmcWAWSPKOL?c0=j>q_$bne2Zq_X4n4n%?Rq0C{j_#g7iHrt@XStlVtJn~~ij zzz$YEL;_RBy?`y9IWg&Pv#Iy3aL0@r?&FuYv-I;f{2Wx69~F>W{$~eH6T-P-VwmdO z#>8JVVEdVgsWo_br~{(IuOp_#RmbarWQK3swEz17{EsW1Tc(%SeE>tu^zmmQU;n_M zQw-xNfXjpg$FM|c4s8fFM%)Duu_s1it8u;uY3Ay53JWNT2r><2_OgdbiQl60o2hXs z5XJK4JD~C%8m?lyj6I7vonMs`tcI0*bMWPfox^CKa4KD6*8w88U;F7^kVy*cZ6QG> zKuvoYA4{(8PU-LgtYMjni7EOCVtz-nrZ1-P5K=z8!xl8 zG`zcAyg3)-9rJj$LIJSJal?+U8R4Aee?W@}sR(^R!%~SQS15dBlD_Pa zyL`&oY|j`Qb4<~RZ`*TP2^sUU-@G`xuwOfCU-?nvfXhtABB32z_GrQbqopO|9 zR3Hb$SCHUd`DCMuO_QQ2KqbP95DcPf@Zqyt2m`0@0wAvW*FyAvyzEk=1l5J|Zzgpx z6a|h2X@&3?!eAajhJ&Ke%f0)jWH#JCZz#`K>Qg=rs$D@sDa`tM z<$9I)SYK6Bb48&Hp;85dp*4QX)NJoqps;j;*y~VfXh_bW;Tv3K&HEpM++YkR4<@uJ zvoP9^XA{_v8fZ)+yF|K4x16C|gum+wfpB80y2Y0x@!0nIMZMF-x#baf@t*i8LUTn( zL*Zzh^K;y;%i3iUsMWiCeLT-dav#wMR^LNw&L!PpU9MukUXx`lXfKa zh0zIERWSw;{)2~>$j0oTHqHklEp=@~fLS zox#J|D6mT#8feGCe@)Gz1ybP+^tBGC$ zc$O*Q*aAeXD4&aUo~F+FQ~-vD-Eq~6SaE>fIV93|#NmNwG4MmqstN`?NVuCcS1W`r zd@`AlpwV(p@+<}gpZ!-%wCyE7!a8UMT+No6f)lFYJTOj@V4%eCNK`4EKb+U)Ncdmt zeIdXtPj)&SWyJi8wIDCg2`8|QKrnLIn?_mMOUUc-tJqs836E58BGEPUkXe&9(kxIk zF-^ygVODS2?n;ag;`Yxud{L+8K7#G5H6NqS6D->R*?g^-ww+YV5jERr2A93*w`c+& zdGeQcD%=iV^eSYEMWz&#%XORUn!e4VeTimpuaN{1112tvM`GKj3CE<}6uIGjtMGDE zpacOLf)|n_@yTpk`Zt14$>9_B4&LGPa<+WJ`vAp7}tYfw#H82_d>%|nx zF8wu3N8#(VpQGNGISTengC)kHi3Wxl$Qd+c{r+;FVAR7qwBF+U?6ETRRV)-q(Ndu% zaV3$W8Eu*qDG52#3|RFhi{AwpW+gyNg9C)%NrWea2(kgZf4PIlS+gy}XE?7yAZF!{ zHjEOE-8i{m;mrZCdl|sJya`gQM~G7q^!f?eGOjf#+$AbQjpf(lk3}@js5~Q_C5%w{ zj2ssjEFVu;=E^X9AIOB#1EyEhhU5?D!)fAtXzD-qE;$dR(x z3DzkK%5$S(VmNmo0oF~%IQJBY&@mx`%yfha5S9e9)f{@w7`BE||KVx)G=L7jBM=ixdj%C|u zx1w$DZ1Dd!9Ym-GT_Xq$GhXaH`FY?$25ILx2B~ah)!ai|)BdXh_=CNm2v!?@ zzx#zwVF6rndxh6%N=a#pzlz3^Z=F*T77n1k7#Zrp_8}JIIcM`~BKCO4>~50T3VPnr za@ejW4gxt?<={1|`6%p(O2S^?jJt1PU^SsZt4Qh`fZ1Z87%T$rf~7~Weepfz&?65? z7e`O5Uv5=eoe%ora6FCTkK#Jl<#JZt7bZrcm?YjEmayR{`>X|aM)*74ro-2P6n&L} zn|+I$d6LpEYY&aaJ8Ox^jRq26wlLHe$;|qS+}%3>IWvD%C0`uAq!1P}#u3;V{6)Gq z;bt&56BK zttZm4+L*hf?~_FBeV;_QS<&3z(39>F4hwD?Ib+r>-e+jMm@g60Z^lmjbr%WZzvmM| z?|v#!9HP|8n?vH8px3BCBU=G<+Q;EE*|AXpSj!)OPoJO`*!ugXyd~7ccMpH_hOCBm z*crzs01pKo$@!29hMB=Ft^GYIHnXSWV-i-qRp)2ZrUf4VBlIo9dpi7A#`A+0!VK1a zcBWa$XSI+})s8TdyqM0X&}@R`ln?p`I+) z7I?D`SgPQ$nSJr$oU7XDik3$~g6IKODn?uVE&5;kf5r6QtI<;T%hXDKdrAUDazJ$& zK)XxmsvP=GgE10iAkEoGarj;|GC2;cKx)qS4?;6Sp6}5a2Pq=$T7{wOmR4$)UQkq) zD$neA-%Joyfe24eB&SAZX7c4{YXckIoov6HluW72-mv;h;6Mgo1=M$6?ZlXC5Vh&l zLNKf4#f$f%PjH)bFa5l&gTp}D?`9UfMaz17A+Jd0B1^&hsg&HmnwmgUZAyYKMPm%h zA^%S#i-Ig=3g_O&0v8GiqE-wntbQve3mis5nl6Trhum{QdFTz8mctQb8rorwJJTXY zobgUCdRy*8@5bY07=35{_zeV6*ae1y$t&hhx>iLhdIIko&GYx>_9KJs+nUl?>vpY) zp5n-w)46g|)J4b|^q$&)?#>VBi1MlCuniR7Wu2r*E~fR}$x1%y?J#$bhom{lB!(YO zhuwkfF8c?T4$`BLu!=DVrV>8Q+JEg{=;T=PgZK*)JVxy!e^L8OYw0oiK@2+;{+NHj z#m1J5z+uJ?M|Eoz~BMnj7E7 zX3Z*W3Y`GVJHyI-tNj0Tr58I=ro&Y_lJ|hs&rb!`7~FvLMC>; zm(4bAiu| zu*-GZ+(gtSgMaW*FTn(KOM&f@9x!HEyV?m))}o`G;2Bh)5DAKZ{aNL8HzRd?T#}Pq z`^D^Ne&tY*DCI7z1%?`;lxAK?C-iP{Tjzx^fe@A%k_eOi4GmVl+996UqTohURCW5g zAnbVP@PRtSelxsQ6H?B|+WgY}Tx$y_J&yZwvJC`ocoG|_Wd zYmd;NyyX7}lLDbIza3l%5u z@8+k?OM%c2qXE%txZw5yYE+B0)TgV3BBdO`$*lS1P@jm z*ya^jV*2~>V0fuZpV?7(T`*clbMl6KuacE7R?Yo&RA!QKQo_UhHw6r!+>{4>rbRFllFQo4^N;}rW3Slx;tVgH#|Pi_cs67(ia-j8uqyoaT~j)Q*>;DN zty4*qnwg2BbFcw8J7PQm^HEc^DZtV|SYtc;D2@C3zkQkp3Iuz^Z;*3a1CIAfvskUK zgow?MJFT8K->CilWTT_4)VxqOX;elv&8(kGz8*6f$}dPj>-FKBq9LTFrUo}@2sF(S zIFS-}B?FFB_YG^R{SM}yh+Uj=@{!2Q{;4r2<|F01{9?;FkMm((P4CYiL+J~&sX=b6 z;-rH_L@1E3y+IE|ob>LGRgJT1x>v<$Ohg@gh<1z1v9%}40i zGn(wAVmM1hHoXNyWmBq!d7Sr~=J^PZWp}`@q+lljD+Ao^y7~0n-JUbE@}kX3<@(Ca z9}MS-CZlNcjv=f`j*5MP>*~RPYOd=u!!t(;)u=ZaOKI8$NVmpQj$B6)tX#n1s|Lsd z_U`CQzff&UXa~CMK+UrdQgdaVgm6~gS0VEHC|&`_AP{i-B9{;zf`n2H{V@by>P5us zA}wzKY!a&T-0AF=qV^s4$u{CmBo2X32ASJvo1jSu3FY^I4Ys?rYiM!?1_nt}L@gMt zd8~FLSEJwzdx{kgJasE8JvnLj9N_*qOa*!9z8yDYXm!=k5^;mkGPz!zYhvC&QXx>!l*T{hh48%5v!M z<`ZA)?EXOBKX~ld83j=ia?0f!`M0rJ#aFct@uBR&(PPop-o-F(!x`Fq;LrbbZI591 zMZvHtT-E-w%s58x?%Yo(A)(g6n}Hf~;!lDNLVbBH1zFKkSl7Ha0M5UGZ!+i! z6j57*(vqNFg8}a%zv^DLmP0n~0saR|z*5EjpIzbfYQXC4=^Bb2=_%0EJ=C(P0Db*& zE5_;SgY-(6#TD4j`36E+B&f4@L{5;h2whCx&b+g3kBe zvQXzZSu7akuo@wSgy+Q9x!J40@p;G2+E$2k_^Mjxif+ETL*;k&jPe-O#%Ed{y8|!S z5zTs7)DGR-pP0LIG>&YJt*I?f^-7mG3AW8;&>nzV<1gkzMvTn?QiA*E8WD{Qj&ixN z@^)@Wh;5=AV=y0={XuEpWFalL3LoZ}6rZbivC%>wkz*4gcYQFTrn)q24dO9?ENR|F zP*eNQBIoyAr+@B^1z3?73=l`V4$b*~r?_B3<2gWc9Opg~0Juy8LsnCk_fh)lVVq|9 zG*+_Ixvz}VAmb3s)g}K`1|Vk`bK0MT9d`c@$j07KPF68#`DD1McxDM-&@*k9nz&Lug zKL+c&3Sli}YEyy@o+~-M(o42iAYcN^7_Vll`5PS9j2mP>SL`k4?3<-%IagF5#1BOZO)nkfDfirej##8}iw0{M@-Tb?}z(6LjLkTn; zur3({=d1Q5$X^M#78D6BRht7-CEYfSyL40%9(h-*C9)%qu9uTPz<_fKZV-~24P{wH zX8qCP`*c8P#E7>o?h%-!&}s|K%^?39e~Z1}dsh0M?I`4LEb`1H=k;r7w?e~OxKi-9Ck~nm@vD76yxn!WI5{|=^e3sq{la7{rm zu?@Xh{J!$@+`R}`wY{WbOxL!BJMW7pggR)jcJXABY^lmF`Ox*|vvOjvs&P9-7MmAc z0tfEb#aW45e&B&E4eXZoYzbzyMKvP=pMUi&K=!J^dh8ZfVf`Kd%DVnd`Nmvm?2w`O1<5``k#ZDP@xKM%c;8~b6F|OnN-#IlE zyWyPxc1b=Ky@s-n9${avs-`CH{zw81L{Q|_FGTz&Jae=fir8rAz)|78O+ov$zMrjJ z|8?Ycmi1wOg(_$(LvA#~YG~Z)$DtB~T@hPBw%qJt6*JDBIhsi4Z(~oUeHBvLw%=@J zZ0OM1-SN_A(F=Pe|b{&FHeLtEn(P5Ef zYO%Sy8+DjWTyH}+GbQ~#3*ilD%r~Shy^~%xbb)(h&IjX@;hc^ICbITuzeu1!g5}Oc zh^VzAJVzYNKkCF$cq~`Ssa%hy1IpFQ$V(tjmLr~y4o`%KzAcDtru`8CJBM^;UX_j` zmpB4~Qn#4fxU%?#UCrvJ-^_vIW>8+Tel)kD9MK~463uH$@(cX> zrd2oNT-`2%=P{SgIt>R&SVM-6u}2kgFt@ZTSTu2}v5WQ1(3&zUZ+e4m2ZM7l)Fb7X+s zzqXk|nS{)eD65vU+o%U;A9J8fY>(FM?YlRO&P8z55k7HJw*k$T_x2m?)^YUAm=q0r zxrx<1r3r=K32S4;DE4XuK`Ov}B0DG)$uH!fR_|g_fgshesg%-~=!n7w=l0){224Vx8VI!h{ zXHdWziGl|nWsyxX3SUBbq`R!v^>i@_6TP8O&XaPK_c;4{VcF;FZ?g~fkkMO1x{FzG##CkNl`0A!HajF_g6Ry! z9A1f{(d0vsE2c{AmdwtlJxEUDN9v4TdMrC51*gQ=584dEY;%{=ShPjsPSjhMvlyW* z0W4!!RJ|G#9>HJK3Qw;N7z{6nX_VJarL*KhIyHOxU)5mE>&g z&X8b45IqvP+EI}GEzb^kDOIn85fC)N?YO%WrW;EBCY!`a7l8X-i5r3t!zwEg+2p^gqdl5(C=L=^q0dRxZ8kEY9mIWPj_-@i zczC1DO1!}zJkPR+f`%F9Ye?j`NH#bikvR*sMv58;Pjh)V`@!OltJ?}3-GjDeK+!zJ z%RZrU6B+Z$Ew~_lUf+`Vp~SDMPMRNy-9c&(>(6^%#7a@S+woG@Bu+o`FQ2axX-H1@ zD3|KQ%+q-vQOngEL#3f~@L&s3=Ukr=f@hR&vF8GLx@J;mE@z^~KH;Kxk%_U1EK11= z6%#a4t(F29LEDd+^ykX4R~M_b6D>Tb(1YEmL}l_7Y!S%W-H)pa^yJ~#ZVQsw-2N&5 z+UDx^W)c}KOiVc=JyaO&S+3PsF27i_U+EhT0i`A# zof z5ZkTfHZJSBsf+6o23()ipYyi_e$^9QnMQNI9G?f?(JyZW7sv3nt4t*%LGEkRS!u$^ ze6vx0QwXdaPF>H@Zdeb4?jBUt1_&EV;oSm5e{2S(#M}z+Y=7Qq6|LO72yLJ@1_SO#;qMIYG9F{6HS*3QlPiXM zNCZ&|Yr`LO3&Ula$+M>9vVLN`%|7OOhy`37Bis6D0Y= z$m5%i#E0N>*)iOQJnG#i^&=O3^&uAq^8ugy_Y_v0|Ad5sh~M?`@@Ls5G_eJslc*O+ zBKMLnTqGt>syw(KY>=X*zzK&y7AtHMXD(4ph3ivI$6j?03_&HL(R3bd?J-sG3F&WL zhF7Uqg$bd!&GxN5-5lsh4C4}$*JVtUhcDNt#78FPPX(HTG#T~m5u>2f`q^o7XAZc24hRN>1WGbcz*eE( z=f@2eEAMUxXqxoPN9`)tfG}(i3>UZeMOcL5Af+h7M=Aa#);h85viGl`Y=pD4`J(v-Y*=$pB<>S#1JmyqAs37AMz*#Zjs zzAxyNdY!?7pBGXpQQ*4=>Da@(-}^F2xOG5jBDy8EC3ftODaUt2_O$%YC(Wf!N0eB4 zbm3+n;sO3UNV?4cIzmHKEWHU(b1H@?6WLTHN6|_vF!2)M5ublG zNyK-uKYPMOf$3q2my2+)O7Xv+(C&e0n<}&}TIby}Ut{43aN(e+F%Q$}**V89 zPZq`?2vL~BZCK!`v2ERUwFGfP7U(>_(MFyx&}(=%thBjb)Hg=)-_77)mO0ml&NrAr zkH?Anui;swVW)8ME^1+Z``&U?P-95o+1N+{>HKrfG(r0O+X`JCq`*@i|ram2Ct?abM-9B23GTP z$6;G}kG4sc1jm&-cDaTrPy?h*ZgmG}bQ4a}wuG-TG}) z@|>2kftG~O;CGi*&T%^X#-VNboD*?0oCoO=j(XlL6C9M5#`L`mLHkyk>;U55{nw*R zoSl!`-eLFWdR5OEk-uSmucHgr)2a5w(+`!}{gQtlzI!!a87(6cUszv|ljkmr*gIdW zxAfxLtT(H`EEDoLaSt{9svS=dA>Ag=m1j=Xwf&?;o=>~NGTUhXnLnq~>SAfNP>zKR zWO~iBof&=u!3g!S)2uhe;yPz%Jqm0Sf~EfG&!d}hjANa?fXl02;frnqtp?D2D;GB& zjLZXoQRz(e@C^VvYZ!(IL>kA^EDdRNA4R~4@SG~0Pmw_0I#z!zhXhyiWig5r`@GWZ z--~*Iq5$5TE2{?>x^&J8gv`wz*?gHfiELnJcLK^?@gL`PtHx8_T?(d(vM>8;aEP^y zQEl#a5hl1+5Eh@(kk{+?R8Kt(hG{ogAuN)9f%u6J^zBjrO&hNc&z{_7woW0~g=F}z zNAuU_SO4(U@28NOeXNs7^xya9*f}M8^eScdF*u})ui){)Bi&D?H|}ADKodakfSdt8 zFMkF=wXG;4b{vpP{Ju)XELWlMCgXhXg^y^FA%#2AjCp?Q4MB9->IUy!N*wl3TgsQn z2ymm?y1>IzqsGno8QTLmS>uZpvrd>i5`7;~NK6f+!1`7E+oN`09TtT{+WQNUI;WJx zf3r*^a5aab&Y;glZqiJ(U>tloUrdkSWRzv`Vv*M9{|HJMiI8BYxj zn`F~T1L z66xIUm0DC7HcKd%81v`G@;l8`UkH>2g<;P2P1t{^nZo1TJThGGsO{pS$~xzB|$~ab~a^B;KFl8zI;K#?}k;}9D)2)@$-4` zFdbj`sBZ)18kZ39=uVlsOYzmduGFe8uo1FFve@kOhoeX1E1qel-M~^qu$tZCLL82s zW%IhqMl%l*@)-{zi4qKrWE%5EqiauSa+f=2{n53w)baK}{hf@VEH%t~d+E44VBuy^Qw%7kjVR4l|ZnFz~;pg{n zx%KytrmC@w@REMZ10*dCyqBjU#;Jcqx`ludpd{Y_p>dA+v>Bi`5d(TQan(IFy#CDn zKvpq$(pNR=ou~6u`spf#wFe~uyz=V^uosvY`1yFc?^O)E(=!t)6)37AJ5?ya$_?{+ zvebO#TWWrZtfAGBAOB5@?A}^6>F(&^oP|1_`Y=anHNS^muJSiqQ_o*rMG2w-)!}0} z*jPQ@jT0+n&gQ$@+MLlOVW+L*bm6v z)NVW@Du2`jCX%UP%MJ}*_b3~851t?|l4s7I!O}wilFG;Gr(O*lD&JliqR-5S35)v1 zO=&cl$)nz2p=Ah#V3T0?tHHZr9{V!crOOVQ|B_KIAuOXW*Bq{VG5X z?Xei4KF6l!3KAiAtSimbTsJY%@eGqdsanj2dOB)vw~VVnVIIYwq|$5eI9(!TGD{Rg zuYp+X9OF41*vHEv4*KNTA^VOC`C`AW&iS_vbf0QjQX;e zL!UWpil>tTWn?*v8Wkp%qgh-e+sbI5V@=#&P~xI6_)^zffm1(nbGu~5O&=BZjVNe3 zH@cbbaz%oqg`7{xHD7D(RoQ^lN)-zKTI}1<4>U-99>XQKf32?UyGwZdOf*SzV6|yo zHY;_gr6h)8tgAImyQOLzWPk)D%s=}(Mppa)JS0&T6112ml3A)e3n_=iw7+Z;9nckI zj5k&=e*>lQKAuP^Xf+i7{${OAEeH)eE?B`-3I&GS|10f)JxM{816lg|@oZf&W3jsE zN0(v=R>T2(KXVkW$UV1xEJNRuK&lUrh`|J-0eWuM>vRccVq^1pTXt-MYz^_vdcYL8 zeWJ`;C|7eY-#jwp{G$Mc?SZ&(42MIBKDJflK?ofv13+&W>5;vg$IPcnikJMEb-9!$o?t4eq!G zAuYyT|A+eo(K(6>%EhLq_hz-x`sN2=TJ7Vr$5^He_Yqpn6yyUZj9=;oV7KR)$PvnM z@H_xQs>Okfcq&4d__%MEXNu75z3glA{AY&a9U`PGWV!EUIx9}>rZ-<|8=%^zc?YFJ zLNdiIE^{B?WY;guyN0}$npFmBhm?>Wf1^5#WN@M08@s|w!z454ihgEXQ3)*kiRcC$ zJ)^5vn%M%P61e^GB&qRissYj=!bHsp2WjHoeN=4ESaN^I&@(&&h!`Rs@ik7{eCV#J z;+4716k%?wM%y+w^(q~nQ4<1n4*Qu(*hDI^(7QK!1yI#ybd2M`iS5gj(40220CQJK zXcxl?9rqO^Q?aE4m;FVb_Ry>>F(G#=D$d_d4ULL-@KyUet0@_ePIS&p6=$49V&#^{sMgE^LU=C7QTYo0tKH`Hy4uBqn_zA2`LuZWMyGKqZJl;A`{ln>3V>^gM1RzUH zdhwBL5=eNQ#5+T~C3z|=zA$KlL_Lb2`_;9G){p*=sCSO9(WJv@G%JQ9i3?KcAS=pHyh*yVNC4seRUNURvs zG3Rmt4%jfKMFSRtY7t5$1)Tm@z21XS_@b_L01}xNHwK_CnGMfURC%^NKSV+ zM8}GUlBR{zsL|9<=vrD$*HI8|=LqNcyml$7Q!@ROdLrlK9xU=@m`NT@{i>dQtK(B% z@8_l?ZJM+NjWgmVl<%C)VV!N*UUBf~ar0NMt=9^?2eW*8myCMRLmE!zg(afPi{LwC z*=#;>;l87WKFPVm6KGgmkEDb-EQrO|C>HFwx7(}16N6v0MEB*#T$`M=QXWJ>U>9ts z%AAZ>)7xU|jN&B~1ow~ah#&d3(9d^qA-$(WJDV2r21lZP2pVUf;eDMgM#83H zq@$DdL9Hi$C@aY}3h$?W&-#SdA>eiirpxOq#7>~MJ?`~z?nrU7o5k&IMWb25eM0(R znkXu^J6CGM`-jFUgf&@nf%WX^a-MJ5AB4#pS1?{^6W)g~`!8pTr3-kTYz1QR=`u`PLpEmKq_b_`{)PAXkzdxTa)~fE<>PIB`-)C0b z!C(U6Fk)h~bq`EZU#hNgG`k8sN+5Gkis4Ya=)jQmn$Q)jpOpVjG%*yHqK3OjA z5|O}8EHWv3T+Xrv#pQf_;(KNF2F=_Z^N9ii;zW&0!RvdkXD%N3Yv3qPoXqrozRj~t z{E&bQ;g^WcXfEC^8mZyxjT)2ONF4}o!q?wLZ@7ef=Nu$N~LJCUyXD(Y(yX{uH6{SjEX~ zt_HnP@(O_3Cuo|wE>>H|l`eRg{6=CO-bKJ{N$x_bVF1s?LV$i90d@5peUkLW6U7oy ztNML3IFINL++mvE4+GLvhn``;5Jiqwy`s=kK%oGHQ#@y^ z)D$ZwlX)<|JW%w4c)OmBEj6Cx2Y^;a8YKb@%O>OTv8psoli?Wa(S@eNpCNDfF1aX) zgx?zDFtYf(Bbw}%L}z`|gq?~%z6yoZ$^9)h<}bXqfuN^l=_DgtuQa__@094*cT-}= zu-zWdA5D3cy6AW%&C@IwGCqFRAZ4f0NOasC89#E;-n#Ecj@ZynP#(`|eZ~Q|y|JM3 z*c<_}+^DErn{vi;Zv34H9~PCMoBAXE(jnF@ z6*7R@Ee9~JdF3*01*}%v_}HxHLq^m}MO$z7N)#pixiH((rman$gr7jQQn0%(85MPP zt_$*7%~sIfY+H!jb=EwlQbM;&<~d4$5~1wV38;8KI{F8$rY^0|u=^KkcH}Ks@k-$I zx*e-I00=s2w|A-fQ*r!jj}`Z~xBKmOO7!d*8V#bV{r-|2pTtX$ zps&`30ptrJQ)9KyE?POyHdVd;et!k3?uYS;0-x6aHqf4GPpi{G#4kcR^9~T;W~ATs z3TbvA2q{3U9RNwDf)FTZ@Hx(Zp1YO`Ufw;mbnr+L-@-NC#V-sI&R&*b6;SH@7DZ*> zYeA9DzBZg}XWq{4gZb6niRAxtsPq~e&Qy@3VJ-hH>8p~C55SBo1WYIEiBE=ED?W^s zlbY5%RCdh9%QV6hDtNN`E#E}%M*2<1wkTlhk=f8n<#afGV}4vL%HJGJxwQRZQqdfY$uggSMB)3s=c)}aEns(55W?&2qYQxT>vG{KFz_p+PBCdftj(D42{ z!oRaQkspPa9o0n&7yP$du%yZwy`ZD+doNnzNB@wS+E6B7TmZaK=#rDg?&{aAIE$fv z)qf8omF~L#-62gVAx|n-+(C681B8E>W?bwXx^2!KWR;btHlbA!&F(#ZFh{7F%?@74 zz9LliXf>`-XL!&EyCo3NZ7Rn62}JFy>eYI3n1eo|-97?J%z9<=BENcM6{Xmz(gIQ2 zq=vcIEYl3-27lp8L(W)gjQ14xjN2q!62?@YzddY}c+lB0D0Q?rCE~AJ@`z@iKFzqD zS7rB6)Bo-jZxZj2(U;Y6Ig#J+6$sYzvH~j_PRXds;uE6gX>cfYYLV%Ed?EmEK#mf? zi=vYINqz@;f>6qGsy)TNd*Y1D_wmKAZI<-8n0Z1HU-kx25M;q-8VC;>Spg0YbxPC# zGXR|#j@y13AN4aD2o<1X;b4*@?2l z;7VDP_Q{V6>J7=I{0@;@1Mh5~2~xtc|CFerwuhJcURw|iDlayqc~)>O<2hDJE{zig zeB)E@8>%=e0xmTXy~~h=Os;PIDPu8KaGzlCY!{I7B~c`uY(xt+boe_Qv#xES_Ya%E zbJU~j#ahv=7uqmPQzb9Zl(8sMbMDi?2X?z_+;k#(n8Js+MhQL{C*Osx@u9#~PSdDl_4`+ynpLUIbeI8^hJyo<5Y5oWVRzdAhD(D>$+YL_9KBIJ1jd4VbCu!Jj8?Y^J7kJkr=8~ z$OV2lq4NQ2M;9s-eBwPZXr`rkCDBD4X6HZvYKq}Bkg3arh}UguXQe&k4HWmh%+>+>T%JmzxFP$Auw) zr^)g=Jx4*!GI7;$0yy(MxLEiiEcV^agBs#CgFt*y^rZn2C=H-+g`q%LI91sapAD-J zI^*(lA3f1l?91GoX;JzXSs(I;fz?RxZ6ZWcXK4r!53x-GEv}jkLHKbT!w-@uTd!At zzkACws(aR4=9cek+|#xp4P3tOuX8};xd&h{PtCESIV7&=vfO>U)P<)m@LxFsfeTLe z!vXBCZBy}HB_=k?!UUvSd$l~Fk2RKgmOG~(; z+0wh1Fw2+@J9faI|LJW|Kj)6&;_U0nRuM@sv|`sypJ35GwIzfwI7A5LN0z3y zX`6bke~>8bdb&skOVtwqLq4fk)_Zk=O`{qYoh7Yr`Kc=_q>JM}Z6qgJ4<|U!`Fczz zzi}>`P!^ufR#10{gSUA$u>OF})kqY~__GflsYEmZ1%`!<2^S#H-J3|vNT5O#XrN-+ z#!O^hIl$liMfmjTXrtad7BtSu3eV|!v7W{M^oTi<#FdzqJ9m5fm*w;yjrAY*wKo*3 zi@lcygRWDk1wC1&D$rFG{0{KKem?bxjv|5i{C|X9LOsG@#`!UkRt)Grf(;eiHoCl3 zTkH)FJOk6sp>)#Ckm1GgiuZBowL}454&pZ;^Utw#JdR96bVI4=gFsJ>HRINvJvYLJZ$+v>@)~Y2b^jb53!s^`* z*?G#JMb-(_h^B%QD0qtA;&OtM!|OHr8H?*f*2gfn5e}~qKuIp_@_vc%fKfG4 z#fD_X;xMq;r?As5gf~Z%)PWPMlu>ApvC2e)(SpTMPunC_pWt^8O)f5Z6$iu_|5W2h z5dMH}e0ey5ya0Gil0W{1jWIQ{dp%x?7EFK_;~Vp(8JGrv4_;voS>FAc&tK>;8z-op zd_63PodHr%sA&l9pP`XA%LHC{oc{Oz>!5$tsL3TdK>jH0dgh{-nl?;v@oT0!rS9VK zb-fZAP3$fif09{ngNsqHXTng6@ll)tXV6e@hW>k6fSlDh*lTaQ zmK`9}0o(_9Jl7igp|tJrNoVG%?-rDB)qfCDKOq<}TmH9RS3mg)X~UiaLvc)>MWP4b z9i8p*hYZg9E3_eZ3$+u>L?i%e*U~c~X-aqKfgeFcB)B_SDEYqDru6atM!HBA`xp_d ztCeLS2*Ur@s6jx0XJUBX9joCd=8q@_80Inw68zhYfPeD~#0XJ9FQ73lQITLIlh!yh zZe_qoa^QE^hB`AD#Rn1tiW*P;DgzLH8Bpuj8Tf^lA>lJ~mXkUs;cOEQF2P5){v-Aq zg@FyCgQoGE3LKe?a+xfS{7T?k`1|!?Igod%%Zqn@!n!`)s*@n;{!_;}LQRo2R*RF& zbhwCt857n7hPMOsUqnb`&*`N)(*l5bFnS=a4qrtZ{pi)S&`CRss^SgefW1R1H4(gM z2jYTmhpBQnITV*~zT1gZHJok%0BFy@$pl$b)Q&xM@td=TEPRn#iblp6rNf>`lA+Vu>!D7@QDN0CA zbmIP*LMahUN?Ei>qABc<--SSUisK=|Wg|jgfsLduY&4Z2bR{6B@Uhk?R|pp}3*|Vz zaHn2fsi+9DnolUYAR)^4f(0^Xj-+pu5V#xf!h%C#*6P*MK)UXi!`f%saf1>>+1U~W z#(^N@U7#qL0p3Yy?I_oXRsjb=g2z^Fo_?sRvgu3S2Pi3)NVQ-CXQ2xhS0tcx*saD(BK&>6?A z`u;}Lu-AE}w+-(;v>Nt8{>>=Cd}`-%JC5}`vF6l+8*Yue`2L(jSLaAQUuHqacP5w6 zPGs#~(Gx^>11(`#pOy!T37-j;I!0UNn^#7vKPY?dJxezMpq`))%O#2!NOf~>&TI;# z&_A_1 zRmgpf#sveLATXr39j=f4rJW0Z1+w0rMqp8IC(UrePH+xqN&Ws5$BC{OVzqFt3jtPG zQhYH`MGq#xRu9D%15j#ImYppJ9HB1<%q4xJM{^@dkLPvyglGO4I0VG7pQ?+j6#~on z_Vs2`rA)c#t5^iqcf{B)qib9Xtfz_0eJdL$ogohVJZTIi0+jDzyumk`{fjr-ez@3+Iz@qO z_F2Cdnh3M@8V}u9n%|r$kzw?d%Y{S^wa*?@J_y^BmW7p~s8V0BKaRc3&Me`~?pt{~ zb1ul=dDh$cR{l*+^8REY7Dy2l2Uzy03OV=A|1I84Qm7t5+oif-@fad%_ox%&yd2l`c;;UiRVVAntz7NK*1a&;wMcd0}^8FczTE zECHmLvm&VTMJUAF%(FTP*9Vh~?!S-xIliy%5}!Y0CMK|bH{QSH4VC=7uh8)&*Xb9_ z7YoD*Sf?FCC>^Gz=TOG4gaq+qV5>~U+oO8VNJj%EU*!YkMyPT<9HlfWxAYaT3u#dA z|IFy-FB$6`wX>t~yp^8oT8d%oZA|`TlNloev~A8n!C*2ap=TtmVwv#|h(tpNKie^g z11c+_ouSxNu*ay421}KvTR5NL+s?3-fnE3=R(lwyHpe~b>EMY(s#s7<-0|^C+SFgp zT>rK{RK)x{LiQ;Hvm}5mXvL&>DqwzoL6B+ntz6nL7(r zIN|0fKq`@|U&hmrVA?gtt_xLE)%QgW(p_4j->RjwN-4OxyLp3Fx1IESOXp^Pcmu(f zo?B<%Ca6B^lt}Mcje>iyMf1I zC2+w8Gu5N)^BIRYC$YO&rQ3X8?+{DX;P!sMRx?XVR7S`SQVt?F7uOdBZQ@2D+Cx>) z7!AhYa=$d$#jrL9OQ%vTY5$H2Q|ph1;b0_&<7C<&_|w&$-_xS%9=<2%0^^XIP)2fmJ;USwUx6*leE6m8qPDIog0WfGK&At#ls-REx(W+IVbBefF5XEuvM z|G<1BqFBd!Y->8WIAh%_XSCS5s25vAXur+YX~45^Mm@m{pmIEa>ooOXdX(7fDB6=A z;%{x~VwnU^EWZ!*nsV?*0k%X4w8eH;M(+WPoqr@91gx)U^ir&YvQf>cnr}w0Ee;Jq zsNCrSb|h?8%K>`s=B(J%N&y0%%sX_-Lx)?HvLd&Du?shITYP7|$`9O$EPYjnOa3~y zXZh+M9v-AiJjj9N?e^!d`4I&T_G)Gt#iqQV-lux=Da^Iy9X4;ycWaLZCk|#6V_ zX6*~Crpwlhzm>&J(N-#Q??!C~Tf$4-c{_R7BhM$Rq81Nxp3P&wiu3)wobN*1W8{>- zBKkuhNKC=Yv`?^Hi5NM99Jf^V_|fyiU|&mlV(Hn=FhI^?|i$xR)O z4alv{AGM7_Jl>R_pJvPJQ5v&G5hLT~G72Pr+9|mI_*+{r!A6S_>r`Jjyk^Yn8 z*SBBaL<;lDQ{SW-kJ-?y8RO&&bh$#Jf^?_c#`qo+6u;qq)vm>8Je~?RdwYdsb1O`z zQ>@Z$C-8?2AF_dQnfY)eHW1Rtm(}#RZbfH(FK|8j6aU4C?~iV8;tABjImLRb6L-ya zRtf0dZ9PcHjYrqh>!q`~fp=Pjpv`kle&N5u3C7<*sK>~nqM&=I_7;zO1fXP_?MkQ6 zNj)qggm`1-lsFo9Ktm$CE*t)(*1kWba|jxNNv~B^gu55$kllB`dGCBlNe)25Pa7_m zrq-lEkfB?A-5*~iKK(k-V^~OV1s`DnSTL12$fp7 zurMGvQZncdp2Zi8YX}w0-~V+TkkQAh_?=!yuS`WvFPco zNSr!gjRGit8UYQL$MV8t)WWiamk#qX`p|z4+h6Vowd8_FOQ7$Fm2MQF#6q#w zF_Hj7BJp+q;pH!RYVlb8igG8gwU$(Qqi+Sc5yAZl*cZBhoElM4NpFDy;A~n6p97k52}1cc@kGxde(dL{&%~ z9JFA*enuAmtM3hj1=B5(%7C(e{ntnmN#5^?ZuMLs!If!WHG2 zVa%Z-A}*(Wj1#o{azYU^0=!LU=C5ykR`BF2RV|x*?Cx8Krmi;%I-A%wmS7T_G?AaN z0_jPKKU7;{^Ghrxt$eHb006}TnL^V$AAiwO zclqt}TjUGga=oq=Daq%W^Qi;*99|(;H?NV2H?2XKhH$)lYyFgo)^J7GkWv_M@9HgG z7Gt}`s*BgRC|=9=*Q}Jh|MmA6pCM?X1Ld88oQfj20*TmSAlz#fAY)VhBEDjZF=h@G zeZpJO)otBHHS_Cf6DOKwa_@V0E;?o_b#6sqwSJCey$&|FeA0iRwtIhNb?-Z1^_`G? zHk=?$V$=T7H*C}~ecRO=32ipi69FelW=7sM3`i ziX|!bKo#hd)uuEoTpJfTpQ$&lxJlv5d?r));maGj;}!1xW2j&S6yplN_1Vq-Ed+G` zacSB6@^G4vGe`Dv02sMMB-7ASt|zcS!eM%pX719t^Si&|y*ek*9~P z9nG^-Qr6)9rsZKbhU@tGOPVEJ9Lb+Z@vwsOS(Ju)MG}HUYoCVPs&}`TQ{we@+J@rT zhgt{e74=)AL|n{&pfP}t(LivE8;wFt9Z*%EX{wz?9(iz0AJU+pcB8dseDtRxS`bUZLp( zhk^&gq=OX{3VXcn{X!h(jj97U#)`_rQ$Ik}F> zG%WS15#PB^4FlO#p&bu~shVnuhaKKk;6z}a70=+{rO1wecs=bjgd}JrPCV8mXxMl- zmC4&UY{_r&=Acg+W1D(6ku9Z(yIzapbtc2X{C&Byxv;}T>nm5n{FnhE?(Eh2BE%WI zqQU7~ZyS^KWvZ>$`5q1@fThCFFun=>K!+J|gA#P%V!?zfXz)QRB) zf6FKNr2wYM=9jTnIhNMK;e?TVk~4m>5wx}+cQLnlKriI5(Y^DnQ<{0}KmZ3adfie) ziH8&{VuU*Eyie?Yf~|wYa%rapvHD4zvy5EgeJ!|gwr*p^a`A2ae;6B_I234qn0ydP z1F(|$EgdRbrK6EdDb;H06eQrVc^WpFNkPlf-|({jX2_pd3Qx7zK< z%X7TA;%UBB4kD$Ps!>P{$|iB>>oJ)7#NYc|4#NFfxim!Bncd!>cufCs{wR~yY`Nh} z6JQ2F^D)TZN`!~@;77Uu2_pbs$ z_sF4<)(d~Rg*KMDOzUrn0pAi@#ki9q?qF;hIwV#cAHr|dN#3v)8;X|8P418Yq?j$8 z#6W8}TPR00#x$5D@^U}(?RzsWV`d4RX1xj^lRV1tWsoGT8lYjIa^ z_euNAdOT-GZdoLg<6l)1^YW}lYq`@lA9R&x6D+L?IBRK|P!})+M-&J!(-`K08&nCf zLLJ1yf)vJzzJ;~(u2{Btl4=X=O8Hhb%x$dAvsJjW0~Hk#L^xn)aC=ivas^ChOjx!K za_aekR9Sh+&x8kA{$BPwX$xXTZad4wO!cmB&um><-PU7C5h&!bZzt-Nx#lDn!x+Od zpX2+41^0=%LW0N(c7|f=3NNXTH};g`wS);?S1~)wga$DBes4E0Nhx49?&S=R{?-e@ zugL4y&#M-A5uKTx9D+j#C$w|lg@$ACrz_(fg`MjdOBnXa(wY@Bo2hX>n}l{Mv+dVv zFdI;g@A20K59F(nfSq2+;4Vy^DHnpV4GdZ53PX}AqEVvlx%fDKj#`;qVM zYWbI7%}$q_l+tNIW_b8p;Hgnt-`iAlO-o6GWodXJb2ZUQ%1d|c{F?% zUTOZm#YlW~G&KByX_HEq$+#%oc<_lOz+QtlYq!CG0o6-9z>x&da6?J4)DS*xgDgkQ z)8j)#pPu}^OO^JNZy%Z}XHw9A0HP@i`;G6n;|Io;;1>ct>gi_{zU$?>PD&0C42P~7 zSWT9S(Rv?~ibvfi?hY*Pi4Q?j}je+jy+b(-#RxAnk*GMJMQ5{DBZf0Wl zJ0pGO3!$&+pOIhu<)t!C-!8T;e<@=xJZfWDFe*bQ5^IP0kE?tM^wmM0s-+JN|6h?9 zfDWE4YD~x9m+@3*HhxT3%)EpeR-Q@baxp`)Cc}dj*otqe_ic7s#3ds*cC{?F=4FYtOPJ@nbOXQDwv49Gfng; zE0rs$x_w>iV$GOGwVLyhoP+x$7YIn52>4!RPUtkp#|g1X^S-(KnhGORm{Pb&Rc_RB z*>&RNSMh!QYd!ZmeXsgUmfA3(`_-A%`|*imuGkniKBDPlCNTYK`gWwI}HZS}FdblXJ`8n@)b8=BKar;BQ(B!*B z;eduHGX6UTi~Ug=8Nc90)JxJln3hVao_8>M=sM#n?2bmCX6YwVa_0;VU-mqvi|%pd zSEsRt)ZB5xztfz09UWI&C3CeBcGCTXwkxh@!;3zWkZnyxdn*qrXU~m=a$|2U1b3d- zmNfdU7>pBDQiP88=u*^%uhZ`#tN9xRNERzHxZ@>Ube5zy<5*UIi?imSmp3na-z>LN zcPw`g6{w+QlIu+;g;MbZY@|sp&n;4r%(vvShMq2wky3XZYoKzcN|P{Gu3$|ElvrD7_byIzKgtWc$%^_Y|BmbdRumh-%mD~j{^FONNI6qv->6-eaf@;L(|7UpUHE428_I99Ei|&4d&i9ytF8iJ1VfeDuD@qQ+8Xf7 zY!A-I3Tvg&Slr#O_wuVcNm5(dHPy%=`i~?P^eCqUaFoC8`xlkg)*PeJZ8;yUg`CRLLjnsX3)LXdp_tIR&LJt{OPt7!txnNu{NK`#7eJgA^j zdXXqn23V%nfn*u#-+;@hBZ5`mEPIc*FQ4Ba%z!2XyIVX?^J&UOJZ>IyjpfO3IL zyc|#JEndy{fTjIWDl~6BkFj=zwr&6uN>^w`O~lXl(PaapkNO8ja`AKafEB@=CI;-9 z$r#_rqq(7q=i+aej;r(QV8j$TYcts%*22xU9wQi(yMXAhDc_Lep1E*+DPGWyL4(=_ zF8BPr)j~2S{DFcSv=$*5-)?4Y;QF&bg$N=wk3FO@EG7OcY~kOB^V%HScvzz86w>eO z34>h%(yHd%w}*0S8XrwwMKm)p%2$TGsqaq`?67*R&#?xGT7L(dRP*a2iSj!AQbpSA z71uUDe4MsQAL+y45FrX7#IcS+Kx@7gT{dj74Y>Y^5iC=q&<9|na&A)CrVAjOOeQZp zC-iTEaXgxv)yJwY5g>p>i8Hy}6QUs9@w|J+2 zNQk_}r5+Ym9$9$VRmUfDQ9j+X>tas)uT^G-;D*^*E>~wv7sRE17OAt5@^G_qWikKV zB^&3bd5mXQ+gcYQml641?o_dFi9k4NZ;z0scyO=`Tf%ryh~VO~Etqw$!0r7re*xe%z*XFuSHp}j&+xdHD&{wvWV zDToo-K}K>7I9}p)UiFrwSTZvd%wQXVzr#iu{b1LGaebbHe?ka&EvRZBGWa&lUR9TU zinGqkZLx?L(98E7R(=eRQDbR}(;`0wI;H({rA{vm9#8X!*ok^|F0Ljvc$G>l|LyCE z9ekp95UxPYHpJSGO>3!!kD`tacl1RZS~c)J3N85a@xf4bk#LVtPw zybM~d+e-**i&@vcBEAqr^oDijRJQoZs>7J_#63WI=b3?jteEC3&t?4*%TZDuPj0nH6pB~jljEeR5)Q2Wg6(RG_Bum-2~Gtez8}~ zO%$piBH9L|`S5C8P&)hN&Ll9&^en)|IepWGf75iofs}mwLq1zvOeO`x42AVK0-UxV zShoFCd&sQ0`=&r#X502j*k_inY5mr(Tpcz=Be#IR#pkRpPgqmw5dkw2_p{#DAg`Hk zM9Dus>Gy)|4X5TLFa_ShlIqRIp+5cTEtxRfDV03rx|s?T!(1G&!m=+K`DOrbCf)Y? zF5ueuO9>Bbmk16)l&-&U@K;7g)?-!+QN9<{e1POy9J}F@bNrsAnLTKZ&Wp z)Sr@YvT8Ju7}~SXzT*LDwnUeWV(E`A+BQPl^0%b@6)J*sIbCw<>6y`K%7QWOu#a?% zMyEAGt#l(dZ?&pIjC?M3%601O`-T~dZxkQSR}gI&BP6;_z>3F6>rMM+G{A*SL- z&)sRdN)m7IUGe}4>C<#I%b!Rdn=3Xt`Ui^nWQmp*=f30i(KH?b1$+^qwYSqPHDO|W z-HzEDzVEUZ@n$AR?jq_pJLku8UYLXbtyFRoC}$+KPOaqorvlj@IB+@Mb~@cuzwkKH z;saEX1T+fJ22)tTLT1k6n#XKN5M>3Qqwve*4!+;^W-vywp@0MppmJeq9fy^*>JAmw zbDibVXz2?Qam=9#S#Bt$lT!?1`fH~iPd`pa?Pd<+t7m?h%9ILYH|32R_J;Kfi|?e^ zidN$z(Nnsm-9Ml9H!tOjz!pPHz+ zxs_Zr+pf#rn2#s(8pLl(52ee4tqUSk$o#dr{cF**3$lh`6fK_7T<=N$N%3mXWe@%B zM8lEyv3A1I51w6f-FTkhkt)#Jz^sdmBALCg<{Gl5W$YNH-uEs2FP%Q+F$w2+yUCI8 zT)wV^(9Z5vK^>mu6fnG%eu*q-;y1Q~=39K+Mjxoz-$y1@3afZug3-_b_j#^^LF%!cTcyCC5K955!t->Su zu{^ImtCd?W8I@n;+JWic%B+1coHWO-`eMxjTDHYxCu+@`f6p0-VobYJwjtcag_7~?qv;mX!ot9crk$|oY z+K)3I=u>IM&`Ou}`5(Vg7TunB=#8twcu-O5bD_eci8TjdiRl!t|8U4`Edk+AhD@F> zV>b7hOQlYeU92}l7e^-!9}<;3&dHP40NPvZzXn{{Cv>MrC7>Rr*~B#yeTvu7zB`dg z@TWmSZX(hNuD;hb+1Xla;7C$6-I35oE3Zt5fcp#bbGWXtH{4A*JFjxgEq<3id{s%Q z$iD~WYQrK(;Y%ZavqcLDT;ip`oKhda_85u1+}_!I@2q>mY^q&xKBc)&CR+>IeZuB9 zfIKf!x=?A#tQhRq|J+hjCxzS;>`w-oRd_bA7y4jmF8c6vB_}pmd$21W&j-fM(yOMC zIB=bx2c+u|BpiQfHh1xMv%lgKydQrxJkEPqZQSekQuV@<)2_i9hF1yotN9H6JT(+b z>{zkWsG7s|eZ%>HNh$o6S&!1v=qgy3WPWv*3SK<`gb z#JF{uh0Q`Gu3bk3ox`Q6*+Y!24yy3qC)oGLuYUZH^-i3_PK$netWV{PyNEqp{82EE z*bLtSl0?IZB!O1IM^fg5ZQE1v4ZS7_nbF_?Fu#)-6_LhQ$(WY%ePZ88-aVo z{bP8z_ zB%5HJPOj+pE~MAnVQR8(@kv{*myD?1H%q*8DW#^3d8`K6b5G8yFNzp&{@;#Ocz8rm zs*zM0f?>0Uq??1Z;;^9O3}omc_qv%ydUlu*Jzl9zPAJrIzqwM{&<`631N`?{5_SCc zt5!u-x}F&MI7&&0_n@(Kx=XT)7;I9|C|3ZZ2D#wJ+*rVUrvL5(!&a*~W$@iDVwc$R z3B$E~iHjhIWr3+%$TQ$*D4@gP2ODWDzxg1mrTU?a#R|`&&y59pY#5|fjRe>}X^Gbj z1>bZfUlzARRNcXZghKZvg3Ixe1PWH)HcAH9%8#RAD_N9`TK}=xR)p6=15z{|(SpFAS_Q#UQ~pQPzEC94*UW1P4sC>{ValDu(`iG+&|5LzS~v3EnK`tDOg?>Uk`gbRK&Pe%QSC zc|+9{XDd9OuHzx*8Jl!koTE`?j80PEdI{({^h2TAg{@3~a*je;Qvfr&WsV2SI|KQ5 zSxmW=U&M$dTwTPhwZE^`)*ioX{Mc|CrV9Uq6|dkNwS-i&6X4&KUXgFEQ`hn>>LE=8 z;EJgxLIZF~pa=3&gan5d}xmMv@R9?>eK<#{VC#mM_ z&s4^Su-Xos!5z$A%WLBp)_M7Y^IOHO^{KZjt?7e9rR4g2f9g{3eqt{HFe|(J%kQT& zoLBg&3(P&(HBu%h$40s^-IvP2k0c2nl#nTibbl0jVKC@oujM0Q-N~*ypUG(IGezqT?AqI#R)SGVxt_`SKd$-M_j~k z!9G>V%~3263LbK68-Q02yA<+0lB3$keBZGAd-P;uKB3cmsTBHxB|{Gd3YrXhe9C=q zFw}gx_+&CO)TH3(8J}Cu4p;oJ>gl7OPk7LL5o7B)x%lfsK8>|@35k2!Ret+Vv-FRR zuG2(e9p;Jm{e>C-P}12Lr|)%3a+hP(IdSs5Cpt>>d%Yr07-cm?4^Zn>sK!ksx9!n>MU0h74nCDaf!?R2t~o#=_f;PLT%>S}%e&hfO_!DUcX+~m zvwT)Um*?^qDN*+~DdRuY_&W}|F^BvysCvnXZ4-2p)iZDcE-RZ;eMwLwrjFVAB8$Vg zYIR6SBDJ!-fihH0JaFl@Ypg?Z_wC7)5`&|zBj1mdu!hP#`SyD&yp}Tt*1P>K;hPTc zRm!W=_`5fRzBBRfeGn)AT{0+W7&#_IPMn*5cGYEJVosN{-F+(H=7CoDB^FeR>=NRS z8W8s-Juq$|7HOIIgwx}QgV&2XeQCdtmn4EnL5_HeF`5U~$z9-N0)JNj?10{u4Pu^W_QSU9|Q(ZU}|5KT&%fej3g_J1i8fzY-Ch*Kjg7)6%)>UL0u9T zPx0-;-;k%R==k+E!!iNxTESA={KKTfr40?s*N6oSjA=qjR7myX>w1|RvlY7G`gh#} zs1e8Y?+0_`i4uI*1*`HMYs>#%QC}I=*4A#FK(SK1xJ!XTa4QyQ@nWS|C~gIc6(|k? zN+~@J?hb|G65Jh1C{WzJxJ&Rr;LEx1eeaiNWaL*yM)uxot!K?S*PO+?a7T%HL&Naa zWmKF+L`6p`oE|C%<8QY2OFrD>UzOZ(+YB6Qn%~WTD9O|FoP(ZK_7JO+ z$ehThc`y4;=%_Fzehm)~tQMq_=f4SkJvH*+3jP*U&loj(u_LgxaCt7Gy^z!<;xD%n z5|n=9bgD}qoVfqfFLX~Ofqm}!d*-KcwcT&*QL32Z(cV($F$&JlZWwmE@R&8SAMUmp zRkPg(p6~WB-=91^)IKnl_uVlecRF zdvHbRu#CK1>`bEf07Ylf`tQwd!bqG))4!S3%?j^Jo|JB3PMBi+T_>^&F#nZ%$qm7% zudAS-&<>k<&209(ymsw%yTzly3IMmTsFuD}>rD!K_CPHATc+(elA_kFG{^B=7++eF zv6o~r?WS_-Buw~)ok?MqA5XZJ`8_gOHa~0LAd_3&mwl>)1IaD29VwIWa$_?<=%K?| zXpl4AX$@v~e~pmi99DvXs#VwM0GK?gAav~jPwCIA`6#uj_9zFH$w{+=!J>9y2^a4f zr4v|BUNm?QK>|L>348P5q4nCEP+ZQk`_TyAHH<@I;AdF{^F|M~Zj|)t_Z42xRefJI z81DD~E!<+@vJPnY(L?_Z{>LpehlXhXWx-dF_g@Q(wsA;T%|;$*e$ad`p4SkGmny!i zWSvvKU(v!}UN`+NFAb*=ec8~V87+73BXxJu({|8Hp{>z!-0p9S^NixAtyTw-yz=q< zpJ*ifW=_l@-MK+6oWWi7o#hQ>$$jy$r!X(4G)wEmVZ%~VmV~;f3qE9=b?F)^jO0qr z7w^qGYpyDJvTPEC)*IK|_vZnO-a#wML!4nITE+VvP1!hEVx>w0o82 z**=H=RKosGO@!lerg;`b-)p!CbaJ&8=E6G!2R8$vqBE@Y?se$AXvQ zk=!ifR`W<2Y>D~9#Nc8V+#@KPH)r3mrvp*6m+;74*kr)FGcgaRw(vlrag4hXT zQ-20xO>nJ5KKtI%G6N84XyO}oR+F1@HupZ8uWhOOViY$p&osc@whG!3_PvYf)uWX= z+_YVKsznK-7cBULyAxE);rDOtya=hoK=b)%H>wZc0>}b3Ca40g_2RcnGn`A!f}4F# zNT8QEqX|I*v&Hk z{-QFICS1XL^>{|jU5${E=;{BKh6B+_d#cW|r2Z(Fs6=<~dJcDl(D*Sx{$w{%4<5C$ z`R<9z+KWDMce9XxWIB#oDO{Hz`!>BY8)FjkLt)fr5K2o+@cF31lBR{dJvU$LNw>r8 zVT1O{&9HZd-JET*C#kY*r^xC){mQlSJNqADi{*fQj-;{Zzys@DpYXuh>NZXT>yw(Fdu zwabW2GIo)4B}vAeUvg2W{?iBLTO9$!du;+Wvc0fkrYiBi$=e9R{uO(Or|+` zyg3u^@#!(}G3`v8I^S+!pR;wkbNwmFDs4AOYSqgfEK?DJZniQZr|MJA-J_OcFQ)r9 z-Y~v;Rm?l(R(!?Ucq^A;3ivb#Gn7gJe1YRB32fUl9rvE^I=i{tf)=5-hc=R37)Jp) zXzRW)D*xETb(aiWF9bdt5}Cl`lb1a_`(ZA9SgX4U=76FikEO0?tk-+$wxBQhzYLia zn!UOIzuT!xRI5oZlE&zlR`Mw)(h!iATm-HS#(`}6zHSN*d5cW@hRXFRYt?#=+y`gw zX+&19JltRHrdUV0$t(JoRy3DLz>Wp2Il{mf#oCgBzvM7e_VRCil zQC1#-6Q^~@aXX<$#n%I|_;t0;{4ncDw2IhV^T@uuf}TO_%eIrA)<9cV`FnWJ#VCD2#6xq)7&e8^7+gj#DiX)+U3owE9+0C^E zk_#lGS0A#_KIebRqQoI5GGkuQuSDz}-JTIPan40G*AyPfV@AZ&-%=9x`z!=+gk>qv zjHmdt5DB1u03hSwZRi3H$6!-1o!pc)<*(Q3Or1{$`KAr+_sbi!Fd42D#>PPL;CJ{i zwj;urtChE)nS<|VdCX{TQ!A=@b^fN_^7&xatNt4l6(gaJw2x;#Jx(pMT(J ze)~@9_Mj%73LlH3N>Jfke}+;5s}JY-uX60c7e{1}sE!Bpu_57g`$f5HKG*%|(XFP$ zQbIfW(AD2`{)Ygy$;XiBWDU^It7U(9GGYH;kH`_Eng7uyx=Wasn!N6q^4fZYY8Nl~ zyKOeXV?g|Rx@-H~EhZUnLqA2;qL|YPZsjGLs4b#5H{-bpe4G!tPOv<`apAX1fS_r^*oR)@w1WLGY2{^?NyFKYQZQBs2Wq zku+)Fx#bu39rw=kT^6B|NWv8XvdFh$FpII#;>s>DSuAfp2< zl+rDE0+V_W>w8&`-Pubb&NYvvdTMzG;RU`779evWf0xs+5vN`-0=>kXW#PsnJ&o_c zmoqCvK-?TiX0U-DeKyi^aGW-}xK`fZB?`102;*6wVBxEs{-LZoA;UWOE&POZs!s29 z!atc_i_Zh^)|tIKR>g*{(s5wHrm9IFKznQ^6eror;w(w`t1Frtwnj`S zgvt$UcQjF7RJm3Pw!W*nNtBP5K>hHSzhd&emKPD~G|c!tP2F*=fAFBo38eJQ@u5a0578!NP~U;&+OtC@Z*>XTyUG`BbF~Vr?`*E-H9d0fPad5 zOunLqzJzLXoT~>Mtw+m&o!f7=DqJgMU06v81y7aaFu}yuVsiI4BSA;2Cf0q2INm9L z|Ni}+td!}0P^Hq{Jm@(UnEz>mV0Ozjh#3u8A6t7P97``@XqlTpQNa#Lz1hUh_75mN z<+5XdOy$GoocyghaUrXJw|jCG%jS@E+i93VYdy5UyRC#h4u}{3b}X%TL~q!$zphRH z`$lZr^s2k#-T}exn9WV^E7uB65W-Q}_2bklb4G~`(jNvgn$hW2&awH`;?7o^q*P3_ zR8`BDN)7ql>_3AG=kr8=_AC?5FHv}xOg*?Cbtd(pFsdQB)x^xKF#ZmboJ;`z4H3p9 z*%>r|u)hHVc8JX0@?%lSgVG@ZlF5mqd@yzrrVtw-3yP9mp(s0nJspbd5>x!yjFr35 zqab&^x)wqRaG@rpnv=a+oB$S>fLmE^*Qakm(Yo`o0mEXrQB^6qgprAAxb1BaH*{a^ zb~pVY@TDXRwfY|Y`_!GsKa<7KN6I^7-E_pNeM_OqGE_E;wk)8Qw6YvIwomeRY{A(= zEKkv6cDp2C)a!vdTRUZdNRl}jMY!a9*YGl3*+gF{_du1+IRd}XQJCsXlE;z?5BoDD z-u*^nr@TprsrS+pweucI&0M2`_{A!qL2CW`XKhLAK3B%vh+D5PN`sTLf9G~B?8U-n zYQ!J0VWWHGHdpC3y?|d!`hH1LE3GQF47Qy(`;fW-S{qeb?s)Sju;cZ}LXH2C_t5@6 z-?GJK7^gP&jt`YTSNnB0Y**-3>|7uS6^LpjZ0^KJqAVW%sM|)P7dT``wy7 zEhp9{xL^(~AQCPy!_Y}orHZ7Usb_z;LmdohAb7WvMvUhujvnXu^7M+x#AFnQU!d_G zUuK=Z=D7W~nf?iQ=UPFbd744Qs0%0THo@_a4av+?laNO9Q#Cu8lOHOkDtlBsbp2w2 zE++mVNC_>}hj%Xdi4(Wr%AbLD%4IQ{5`O$YGdeCsS4VI~aOY}}N%R%!SBucnXpBU+ zoz3~CDRaE+(y7eiNxWHgD^fZ*N?u8^9prHkrddhAItp2 zbFI8|C1Hj`1&~wRs%AD+Sh3L?z^ME6Gf?aIyLzQ6mx@xm4lVtf`#y&6MCn0MF|Km}cB?#u=Y*BT z8W5%Q0p|Y-G4`4V{5C{>^Or}1uyNPuXv}vC023hc=%a*%dx$Mlhl$HulP7(zJ|y3} zgGeI?u_^! zIvo-j@!23X(f=5(LnB>ws3-xHpJ1W@+<2)gW47M8xaTq(sa@TARFzDXxBL!QXkv}C zHh*AxXZzgoDOPsw^!p5Jvx||3^H0n>x3xA|D&a$lb zIPJl7w|O6aR_UBj1&OMU;WA$i$cT$AJSS{;Y(lef2_&&rf@8XFLn}At4sufQXl^bO zE>1pxUIim~G}x!?1aj^M%K_6@^h>0<4VcrKy9D#eGcdb<`h}&q5r@cC4~Sts!ddDW zXT70g&e4ZeH#@=FB928GEW`f)dwe4X|Ra1vy;iDDAE$KSSoqA1Eb zEhf*9OoiV-qDlJEXe@V-uVG%8h)-}sW>b4iJ|6dch}{&k=G+-({t;gQ9iHqGrI#e! z(L%5rQudQ~cvTqU-rk<(mBkUuV>c{I{fKk#jtw_JY+CHuThuqSiJ5*n&8NC=Xwxbx z*2f#=dBkC%KV7XLXZTmOL?#4_GCq4-hw4VZLS{-oEp)rJ{klMTUX*Db7hi|iV#&E- ziQ&ofrwsHTHXgCujwiS4DY*dj*5bc+@K0-lV|^r9wFw7u9RV(n*yKw=qwtgqeg3Bq zE?_u78J`!FllsQHm%fku$!MH^Pb^gs4NNdSQ^o~GSS3s*8(QfF;0IT8i>OomY6rZS zq8|7ay-C4rvPNoMoLFZdj9+>c8y(QA4fHz4{$4Gc_m|`l-@dQX$%Os9=V!1gmxcw) zt4Se#ac0)f1QhsI*88Yp+Db_;pH&%RQ%*=b=s!RF2w$*#lq&D= zijLZQFKt{q*LCztS6gIWv6f6K(<%N-uzdC4FmjWs zvTWL)6lK)$*^+bSbn?h!!bdErz?~U}{(YOieZ^L&OJ7x%4jN#0%g%f7{R0oeSh}6r znHQncj__ zPgnc-10j@##?E4R8h)0Rzx%8-{DZl@x}3&K1T z@yQ$6iXy<(_C(2eo$N4XT7h7*Bo7nzkCr!Aum>&mK>K1H9oc|4f1@{q!2l_euzu@z+JjZ=6`=N{F?y9UGRA zr6oB_G7Trz^|L(33cMUfMysU)_ztn}wdFqDdb@&L<MT9sI* zhv+?uMrH6~8_X^0OUEshC4(l)74O(s$KxX(9`(%{wvm9U2Wu6l6g_#ae-9bwmS$3? za-OWyMW{2LiYKZc{gZDSScUaE@F3+SC z+gKRPCKQCWw*x?d1N(voO!d_j(fTf?4|_=DCA+rSS2-aU=4LG{LV&#q3w}7>U&E*c zTna89oY}mCyBDs!NeJO6U{@nl@w?212O6;1M&6+X7;i%O?YhF<30L;Du@_-6?Obp< zq69K(3>G)kPe*FDe;JWZV=-GB+wn7><;%{G;#P;uzK~*9A=eKV0(;7bgLtXXsS6@7 zKk1_wTzi#DO~xACXlH!(DHG)sHCE82(yF>|Lg@p;j7|fxKgGkU2vw7(^VBA5 zix%|diMC)zG|I*H14(-`)Sh9_TVO_Dpw}=BZYbe;=b8f*^tE7tXyA2H9Olv&8{(eiuQ(RSh)ck0Ir>nz+kzVN!hlUTN2J>aM# z$_bqkUjB5W?*E2lh*J_R1LDY=3D@CzcCJfxp?hKZsAx}Sz*<8mnr8x{&ovOHLh|ku zrhSoc+$u)Jdiu-Hm&r4c%QyZ!Z;O@5wj^u3S-9?p@F|b^_RqCqURc15&_&u_j6jTf zTYsT-U8y@U|xsJT=W#xGr!1ljd;k|L;Y<2VCea&!X5crw|!X6g9)j~gBcB6 zOyk@VTgmIWA!cH4j(!}at+f;$Go!3RpR3|NjY>D07WocchtRrmeHd3Nl;TksIi;SW zRSKdrS1Xhwss(ThwWd2;d~2(z10!VaAZ&N?Q0y4Kw%U?`*=5TH#UrzXm| z*z;|)hwZ@OrC(uH{g?Yyt9}8mhN_|^mjK_Uy{e>ePqwnUmqBGU zA$nOe|Lzdu^Fr!LL|Is{$M7S~NnPZ*)ECoc!236@2~!@=tjp%hi#bx4+8NehY*N1? z1KOBo>c;9^NSYbq7b}RUzd}Vj+t^l|K@5#Q686WVaS2_=I``GFPcwVdXr?t~&dx=4 z@9~bV)Lsz!7zt{b1T2<*BwU~Cw)#YcdWA7vx!TbLjB>kvu*Xbpht=2Vm8qle9LqPy z@;>V5`RbZanqLEIX_z-KPqB=`bGtz=5w^Qt;>cgF&*X#jEPT6bczN@3S6LYXg$ptY);&6KH2?* ze@OP4Y@gSCjbX|qki1O{(Gg*HswL@z5i$%{1JwIMUtKtwPCWM&B#iO{oap3_L5(0i z(KnbH)x?&m@qKSm_pF-CQkcLvjbiMh_^bssiO4{%4R$z<1(97`faM@hav0y!_l@9)F>NC~s7s#u&6=%7ZcDKuRkkzd>3E}2#i-Y3 z^}h=P!M;XNezV`-9T;6f0*KIw&Y~BiOTByDdsw^g*G&+b!g>w5zGekp{a50TUZ}`_ z8it>_IIEdyplL{*C0wVc=-IV`*z?}>p6z9`l+iW`D^_k8$&oAv5{qCoB|(DwFOOTnWoMU4*C`6 zHJ^O{7#>{flV+V2(iDK)SB34(J^4;rtSHs3gfl4pL%r&acBAv>pl|$NS{P)o#xxH{ zReVxDRq9Z!(qhSHdkjl5aq3aTq^<~hn9j&(`~Ce{%s22eUQ32pCqHr*r`iHklnf-pg zd!7em>c!xF|CZkvKxhUy5-bVeHh`?1j|kl|)Dv9}IUpFlG!P6P5ypjc*Z3B`-2_|_ zkA?mzrIC;Kx$kwu17W>7RWRUwJ#Q)T_WI=NnO0lbY;Pjs*%u#VZ)PFUBEvVOXwQsH z!&3^*3OTov*b!V^-XNb%(Ls8ZHNx2zx+^g<8PfK?{w`S@i^zaQoxSCjk{&QHA~v$O zNLw!Uk5(-n!^hxb)6VyAT8^qqvF+=A;~E^W28*SWw9g)t4K~KSTns3?`)<}-lU5Z! z!i3lR%dWMbg-UMooPbd5NZ3!JC$z#LI^a_z!zG@DHw3g!3fv;R&(@PE zv!o(mZsUd(L>dc_Kn@OgVYT(OX_M6Ntrw4TP?|z=fpY58sdoJ@u3b*|@;Y!+V zWcDqYcuw>ulFqzPsx3jOX(_7Z_r5_#!oQ2)zokJ>baDs2zh18*zX`WSx;JA8GUIwe z2gl>KE0@%|c^Hw6!No8p#4+iR4|6VcQ%%Sb46u|0l-zF6Oet|+aIUk>`E%xcHNg{v;c^^kEzScXOuS+XRPr7>1DM} zaii5r%ak3?+|$CO3sM^~3MDeXS(K73So%4s`4(t~Zt7>PU`dBeCR0r}Tj`kyadOA< zj(e1TyAAEE%de_AP8-T-O}cmWHwK;+QuU{ag9*bum!jK>^M zS$9~!pw;cT+GHDFpwkL{d#^!8BuJn3=moC;UZKej6g-&>8aFXp@=eH&ev!0^@T^Wq zKJ$_qS=eZ$2!h*sx~C)kV(0W4HvaEw2NInuejR!>CrtPr$7LvO=cX0=!ig)?dO@@p zt8D`~3V;N2c9J6gC8P(vsGL&=L$l#cTWMw__cbHnmJfI_G{9Q<)*C~^oYzQauIZBK zf1%95fGXqq*@`$@7#`WmcYt4MdHsjfx*E?V&l&xImZN6J!B_rL0>{R&Q62z376I_v zh1iC={%%;KMxSc>=wZW(S)Dc2ViF&|Q);@hqmR$*S*l=;LXGIHVy9Q%MZm(%440sx zsb}KVpR)NxhCLIE!5tqe6WeHG7LJ=GU_|9vY)c3>1{eS;jB48gLU5?;Goh$mzOxn= zrDX(@|In?=w9B@_$kyNy!vCnr&Jjb?d=OH5vrykse&)e^P&Z?y4~~z*%sGWPg46=2 z;QMogTwRipAMJ=;^~XM1yBF{y$y5utvGBMj0#I&jj*rWeKnm-waYTvv~0uB_0+Q2?F`*n64>sJqJ|T&w)nU zF?pGXF%@R#v7qZz`Oy(f6wv#Gs8qd0&l%J5?fc7&eeQQlz>!>f*VZaqGJTWjU)1IY zb6BI8)jP|zf(ptwMjg)zQ(e13ZT_9#uek6))hu}}=%W#@`4|OXzJgPm`B)|*;P`A6 zk)4vAOyjc8v*L&iu#a|Z*beDDIRl(nwl_yhVh~LO8C~}~CeLM1oq5)Y+r=B-w71tL zd^OI5_ml@lJfNpr0Rz#+dt0qihQ4JJ#UP}ciJ`enT%O)d=H9o$|C1`{nqeb##ILZg zn#vtx?HYj@I>&EhXHL__Ew*9Ar*psHp7D_qYFqBBxkG_JzPo|uYB7Ewd1XgS7V`Y5 zx8H7$)>K+MN3k3d^$pZ;$xRtrDz-dNEyYjLhcdes#2dcY0LBob4w2vH5i9d$v-}Oa z(IJXgQ=Iprgin4bQJYSpLujS_eqAN|+7VBpaQxpyN9*4IW3$dyk#=d5lD;kogQRXcxnFkB>M|cevE}v_wm$ z_J4LlZro4wzf`Ghx|lX@d3dpTt*g+m>|R=UH;3WJ+TaZI{a2*l1kfYuyUsOje*%Bz zQxEJ4!1ReAEgfReh3ne&yvP-Cf2tgq)(oCxSYH7!y88!x2JEjyjHPyWJ2&&##=gwg zFfWn0MC0oBAp(SZnG9W6NG0~Zn@ZhfC1Q?akI%w&BSCIB}a{;yW1Ac*KB!p@S~ zMh`}OG{2h&^$t+n(o67f)Z zJTHop3{J-JB4t5iVhf6wceCbtvYW4DVEdzE#0$qU#Igr(%s>6ZEnnR!fCxrQl>|dE z$(lP-mG&cL7xx5ZQ2XhRcm-eDCmwwXOWQI3!eC{k0IC&rZ2hjB54!xaRwwvyB)V^G zS1y>4TiRK8`H2xS%RTDiI1wOJ_==%hAZJJjEoewW%*E=luNuw_ZN%OwidLgh@3V2R zAO{?ZSasGU#p9l$#=TounrYdXefcLo)Aru^Fr?8%Wb4i8;?)kkInY9_r}`oV^9no|~h4<^zzwxmv)Zv@$!7)N4AQ-34TT#;V@t z#-@@T)u?}6fZkI4%e9>Tz6vlwkf^1erEUR*}`PEK?*lz3_Mk8StW z&A6(IL1r9*mPI2~E|aOtqwQUux+?9>^?h+%b07Rc!l-hEbaaGLYZV+_Z*={C4-*C& zkd=psK>fHl=zBSNFrXvR&uuV(JckH)v>KhGc}!qeDTivx_DCe)hz(Z{wXsgMy;6up zRLrO;JZ7Poa%QOOqjP*=j|pEmi&r{6ar>9BJ93woMg6>d#-%+MLgr0|pB#^ovdPU^ zA#9+%1j#X>B;Z+zax76eW*Ixm1+x5f_HuE`o#p>3wTs6BqGMvZeDhgqM7LA&@YQ7U zli?~Yf%fY)1}U=7#Jrvq1T-^=i1zQt<*tFHHEYn9_OJSQ-$cQ1XR_32D%if4YKD{p%$^Q^QeKZFH&a zu1r&D`S|l1YhxnHmdK_b?|%PR$(Ir(252Pjl)^YgqYE$hLBxXhrA|5Sf5c`u#I?z| zIeuAz=Tl5{=bBwWFjE3O9OE)1Fl_GYo2!&I z`U2UazqN@jkN4!}KJDXV@zT79!od&~+ZQtt- zO#aKQ7VO-yN{O1nu^An;APW^ocMiB%l9&qg$5i+`bdW=cQRDLdyvMOT&6Wx76d#9h zMu`Y`eNj7WF(p|(%Dv*8#NUa`5$Was%mob>0o(S>b!9gZ?a!n2(9yo9jTL5P@yXlU z{DUY)W@sFVYeN2A$`%rN)=wWT{rgZ~Il&NfL;rU(F#!mk&9(X;$#)2;vC)}j9-%xb zy~Q`c&j6MXrl^M-co53pke?w4iX5?#-n|*#V!EES>cUyI`u)D=Da`9HeMluHsQ#*9 zFP4j%Ao1a*gKNN$_6`|7E#zz!^8V#227}&mX7pJNSq2|&9DCGhO0)I#O2-}!&`Qgb zVFA{tiDDv+-HtJ>T6-5k(kjMmUR|S(J63$sE?=9_4!J_6r z_QlG9?t8LR+beg8>6dfpsa|~dns?yBLk{vclnId9=sar-r!sM%nB4mJ>=@@5qoC4OjI5>h zF(*F?8u=4He^)M+bV9L?T&NmkrSi)<9+sqO)9)qh=%=p?ksH$u@-4BY2rJP*bF{5~CM+Q`U*&qTf|0q2 z69@?9G%+iVR}HI6L5PNW|5#ENc_rnh!b~4QN@!C(e@JmMgHQJ|MBjpr6PRs17WLrq z5s)Nib*5^MfwSK9v#Ct)^*^>Vzl#R_|5L`;GC>y+wm>Gi z-xR+>__BTnjrcFefAb4r7VMR#!bpzHd=A+RKzYBZzB%Ty0K9~ zy8on}g8ugKHV~sqPUD4{P`?`I;^@pzAX?!21yJKg$|;LFbXJkHs&VEhH#lncJw2}& zmFaF|)ws5aOeLo^ZC}2^=1*KG1$6tw@~`p(X!)lm3R_3noR-(Xv?=UO0oWt5(t;fp z7K-Ud+r=cTDOf|@8qA#YoBICf-x%mdWzhM3*Zl{Q1hFFMP)<`!xH!5XX&C_FhiLt5EEuiv-XKKRT(%|$=WzKR&g$dn$E*Dey7A$PyNek{%6|Q z6U(vt81NN)BWGueB?d2OqdZwkN&+6At#YJg8>@{#BTLO1M3d9AOEKQBpFU^3nD*6` zoigM>i)UW(grj1Ut-+`7`rPXzcx+XjA!0n~@v|c?)$Fcj$!8Vl;NnpmM+;690S+aJ z_c9}A-0N%dCW@vz{nXKDywZ<7MHKkGlR zLnYd!`KXIMA8TJZcXrqKh<@Gwo9|E(#`NNXcvujdN&Me$EgCruk&NExI_{E6(uVgK`w)Ijt_dQ>sp|M&!!~ZQ-528+#5h)$WlO?8dF}(pjw}o2@Oli(v}XEOs+EIwi*2zC$bSwm*tFfH^ZUPWs+c)1F#$g5c35S9 zu$9oS4VWso-ZeDHjEFP*A5igMVDaA)WOrO8s%G)V($5?)k-Q|t@8pH*smRlTk!hd- zYyIkZJv2VRK^y(%2J+TsYn|rHV%52l=h}UA>vzgeT z-vri$NiFm_|8u~rZ*0A*0;Zx%YAw*xIP!FiGe-XRasAIjYo<<@^6a?mb|Wgdnc44T+KS2gDO;ZHcc|?x<<=hOTNo^r`*|kNGszLodWkM-4c}4F_&}PKTl9^xzg{< ziYwY}u6XjO+Y7vq&FFJkxbgq(8~*bf??s7**aT-E-64Szr@!tvEyV3v3cN(WHn;ad zBYE&f>WTL9s?)VNRGhoHI+^x+ktuTD)(hOGe^*RgBpeuZpPdm!ArCl5 XR;|?p?CG)q%u7v4OR?gW#pnMAv5QY% diff --git a/hadoop-hdds/docs/content/concept/Datanodes.md b/hadoop-hdds/docs/content/concept/Datanodes.md deleted file mode 100644 index ea63fe46b14..00000000000 --- a/hadoop-hdds/docs/content/concept/Datanodes.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: "Datanodes" -date: "2017-09-14" -weight: 4 -summary: Ozone supports Amazon's Simple Storage Service (S3) protocol. In fact, You can use S3 clients and S3 SDK based applications without any modifications with Ozone. ---- - - -Datanodes are the worker bees of Ozone. All data is stored on data nodes. -Clients write data in terms of blocks. Datanode aggregates these blocks into -a storage container. A storage container is the data streams and metadata -about the blocks written by the clients. - -## Storage Containers - -![FunctionalOzone](ContainerMetadata.png) - -A storage container is a self-contained super block. It has a list of Ozone -blocks that reside inside it, as well as on-disk files which contain the -actual data streams. This is the default Storage container format. From -Ozone's perspective, container is a protocol spec, actual storage layouts -does not matter. In other words, it is trivial to extend or bring new -container layouts. Hence this should be treated as a reference implementation -of containers under Ozone. - -## Understanding Ozone Blocks and Containers - -When a client wants to read a key from Ozone, the client sends the name of -the key to the Ozone Manager. Ozone manager returns the list of Ozone blocks -that make up that key. - -An Ozone block contains the container ID and a local ID. The figure below -shows the logical layout out of Ozone block. - -![OzoneBlock](OzoneBlock.png) - -The container ID lets the clients discover the location of the container. The -authoritative information about where a container is located is with the -Storage Container Manager (SCM). In most cases, the container location will be -cached by Ozone Manager and will be returned along with the Ozone blocks. - - -Once the client is able to locate the contianer, that is, understand which -data nodes contain this container, the client will connect to the datanode -and read the data stream specified by _Container ID:Local ID_. In other -words, the local ID serves as index into the container which describes what -data stream we want to read from. - -### Discovering the Container Locations - -How does SCM know where the containers are located ? This is very similar to -what HDFS does; the data nodes regularly send container reports like block -reports. Container reports are far more concise than block reports. For -example, an Ozone deployment with a 196 TB data node will have around 40 -thousand containers. Compare that with HDFS block count of million and half -blocks that get reported. That is a 40x reduction in the block reports. - -This extra indirection helps tremendously with scaling Ozone. SCM has far -less block data to process and the name node is a different service are -critical to scaling Ozone. diff --git a/hadoop-hdds/docs/content/concept/FunctionalOzone.png b/hadoop-hdds/docs/content/concept/FunctionalOzone.png deleted file mode 100644 index 0bc75b5e1fdbbec9e912a24cbb9557a0efa32ff0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 55334 zcmeFZbySpX)ILfKT?0th&>#&%*AR+QQYtAWFmxl`F-R)i11h3)2+|@cIfQfxQql|{ z&2KQ?_x;vcXRUMAI)8kB9M*%5&)m-)``-87*WUYjjMmarA;PD{M?*s+x_?hu2MrBl z5cR8#i;cSTa8{@R^%uZKK|=uztule&!W;_?4c)+2Pv1jdLtV}6T>HMBrV&Tdv9QGPJL zAd4J62n3RGdulDEqkQ+z&~6$7I%~{jP|j3^tDK}0ooNTMX*p9%kAf(_@RgM%DzM!yGRG8JqdwtFOB&h=Q8C* z&E@3G<(%$V?M~HC8Cc$80b%^VyMU>HC9UG+&sEm%lW}3e|6V5{5W?equ^NKjkZ}b} z5M7R-$if>H!8dKkv7+}DYsZ+O_+yN>q2M-bCAb~d93tw%hMA=ASuTlB${m0*UT35QN zPK-KFbv;H;t@`hrF48{P-&2iv-}vZj_o2c%Ag$26m!ICjwlJCz-3MC(N#YbeWS-0M z0b5x_mb_s|w6kLtmoMt0ywe2z(*AYm$2n&$d}X92W((6&0}qH!RW87fUG^(PciQ9Xw~Ih0^NP;#OnTk$RAWaqLL=BARGy*rfRe!84vFw1diz zU4nRwkq$zu2j~;RjrM}QYnsH@L;i%ddLpyPue};lO$iV6=h0T`;_S&a{=4-zt9kO- z8eB!9R0@v*)%FvI4tfk!O9M1Sna=1ieY$AbKK^v|G999{sWqE{KBw8q+bCtuJV*g9 z>%cZA1HuE|Uq`ltYKA|F($~yf)n#7wLzg15VA_5Qr{?;#WlpasZ6I3z)sb7-Xy?oc zkzz{V)a>|hRRwUtGKlukvF;U_6ypI(btN)g;}sdNo_`8x!PwWoWt1!s_OV5*vP=b*s(YCo|a*p8W#Lgt+*mK=i*?$Y*jmO(}uv51N%k zv$%I_Ql@FPnU?w9TzD2dV57=YEzsy(<7%?2k>)3v0jW`$SGUQ2cym}twmLRFef;v@ zbB3BbfCRl3JPJ;WoTSPqZOwMe*f3^tYQq8fjOPDmJPY>4c?^RRoRL9H#bCav9lk*s zE~8xA%#5AVH*{avDf1S4z(3O(Q_SR6Z5mP++LKFHcc!-upD`EqHNeQrZT2Rd%xVAfD)C`d?;(?o_UWw^J)&R0eSq? zE9^KCrXRED1Cn-5R{t&rofOv&W9wPJ69G5yoVS`HNta(FXETg@a;b@Y3C8fV0e^LV z=>mNO8ZR4mS!Pi8&56h9^F@wRpO+V}b^63So0l8kD5Ngz3U%5j6(7Qz=AZXexycv9 z)j+E`gxjIxN@2`16p^ei5ysBJ^AWU)u@|-9TJk*HHst^AA8I2*EI1*S{c-~z;Dx{e z>bJ($zJj!CD_m5K5Jd2!P?l-iia|}n8*AKEdZ(NijBk-~u2in%m6t9W0}q1Zz~g`kUwWMBoCKD) z%tnI#ku;0e1)#*WEKEr>4B918iiuV2s(JTROrf3GxhrE%qJ+Ow<`vijvm@1vR)72} ztzJK4d}aEw6i=;7E8g%^qvcrwWNa!BY0{0Q><;Ubg+DJl4NR{(@|8Zs?l7ZDcSOt} zT!D2P*g}&_3+TPhbgRo^!h>vo)hYzU{z<_7jyGAwWyUr*-TsR>J#ly!1l-ohOlOdu z-{zS>tN)Sy4Br>$c@)&b)_W>$*||pq`h2RY31dlaF08blaNbz(cvUmrJE?%@OU52{ zgr&=jKe)L>$p=~sxtnI4o{42b=+mXohPY^0U>jjmQ8F>!9Q>TTITf&sy$j}Tph%t@U=bZsn`t}H0teZ)dMRpiZ9hY+- zApg-7g}a}cDfoMy7cRkq9{{}0lQ(^VbJu^?j+Vqxka}P!J_6bfr?vXh;w6gqD4qBmiv=aD+lMYnEZ?SabJI@BGF?pme+* z2^{2<_LMUGDQYN5@4Q0&sYO{vNaPPQoQbvJ_J&XOp6yzGI^`FMRsml1|TT!DUTY-w_-S7$+81I<%(f3BRbN2|t?6J^haq zE77H260@N;<~HDoih`~Gbx5txt;{aT_LQ!ygl4DBDWX?|x0X}83|3#^$$!EEAccCB zKEN-HAug>Bo`v5Ao#{#*{V_9;MZBWoyNel{FcuCGYAZW-o>4i7eu0{c@+byLp) zzbpi=K%|U@2(s$U1a>3TC2vA;4ML@ml&=muJt0sLg`Ikr`T*>Rjv^l^v?}N_rvQIq`;wov4`#AfiFHqucnY}3NINgEFhZ-y`@!33Vpj20z%{#yBOd=*qZEAUC0rT&aq!CV+AL9`S_+ z#6*cv`oLxgpLkGk3uWQ+9@4We;sYjQ)AK%j`)MSlzCAH1}1y-{v+;1 zRIq4IZN!uub!HeNTz9`xj4>xC`?w76`|EpgB(nQyI zcv9*6=fMep*|_S2EUv6Sy@$)Adf$`pexLten8$?MBXN#i;PqD~H_HZI-B_S&`r^nx zV*eGs1%g1!TR5=kzq7p=5^_SG=t_L;UPAl)uN_C-J_!kqt!4t*QT_MvZx2wyz8I>s ztq2L|2kkb)-vm75kC23Yqwf^*R+ z-cpN!XS{#V{YP%XCX3M4Ko}>%Ppd3Wos-G41`)HlxirVzd*(|CzGZEiZpKpK8@GyN z^$;Ja>12!%asK`CxTpGE-U%17#v8R4w)$5zcHdq+i21cO+5ijI_qE>)3}|VwHHe4S z^o#hPj&hpG(HRn$U|Kr$^U5!DXmUo+(q(FtSuW>J&GSBvgn?ne`8JHO90d#nogeo9AS#c(Ru*u zDyQT(CQT#47(C(10(bCwjCKI9i{33N73w!nJPa?|AL~V1st+ z3a3s!OMZNPlx2|Rucz!>4AX# zOc4et3Z1{_Knn;*Urlr3F2X;&4W^VLS)yV=A!(@zPNjY(+b#ofYKaX-W)4QkMbk&@ z^K%9}^c*6-@G5u}!XMUpRT5Sfbz%>^elqp>Tz(x*S;>TMX<$}iT@hAESYW=jm}h0> zEs?xN%9hUL)ojmPJlEf5$Xo$5(yyzZ4Hig&DbfzH8{J!jyscjqavk~D?=chFd+~xg zmE4)$TA>tv1hi!EAnSP4tlv}h*+D-l{nqY=WAvOra7#!pbTDHBz9HUG zSZ-*c!ehk?|FTeMRcrXi)<^_7ViUECG9K^LVuYG0e-Ks1;1G!$z4poVQz%yeua)Ne zB8Ee)@-T-ls=l6ktwY;d%f%3AFuvh5$S+%uOMn43dcMc}NkFC6t}^0XQ?X%oAw+%6 z_qI-$=bFtEaq8^CTWq43{QzxZDBMNAp4^Fetrt@^hHSn4pPYB z$*SevZWCoq9s%yoGrStiB{2s+IW(c!x-S}E&Vy2)fjUy{G8f(RRe?rXr&IaV_v^{m zuGRLh&5CY!Z2N5Qm*BkorKLb$l%-PI&GzAOB_=DKyxk$N`KT;5yGnp%)RA%GJuGLh zFV}Rnq3z@+D~RK|YHy9+$e&8dzIiJ-C_zlFqdyvG6f+dUAIhP0yeo!|{0g5!Jc98J z^k0KIF4e;e75Vaaw3win9#S0vNW*K~j#TT~yVVp~6A`bljAH6r8}nnBYhRAWmPedq zY@kxB=7PR@mk?@)mLTFYb!^gy5ex5GL>BX3%zc6bJ`RZ9}(kWeQk zJ#XfY4nm@0{)?7p5gwRsiAq}1wb!c9J-`;m5j+!N54*pgJ0&`KN#a9qAvbbbXB*aa zAPvjAV~6E&uc7w6E&)YkSXHpC?YBi`xr0&qmOx}`_!wyy2Mx|>5}X9Uxx(;K`{IC6v*=};gMS6 zP|gkke$Db=S_R`F|C=+EarDM@oCq8AQBj>YdNHHwVLku%?cM5ZBcUk_4NH?QzPM^| z?@@lQ@gAuk3{63c@Na^B*=a`DY>$IRru^6H`s;M4J^yw~`hm1}HNaM?s#*S5gcOv$&MM zQb}-1eevQe&uxwdT}>}9(WIn-GP1z9>}>j^oE*oIk&*tkFr0y_%kzPy&X|FGkUvMWzz1n)9 zAB)P^tT&l^*+o@V)s6baLy4&LaOBj(xtke*)Nz&XFg<@BKmRf*MV6hPe>E;w){p=8 zyUfniv5@x5kFV~=Jy$~R?8TOBF>Z8^#SHKUIH-I1b0?7x>~vlwY?bgP1#*3T%`odc za=at_@b)(Y2=&_S?S_mR)$a7kDhhSYcOx9ELSK}-Z#|&7Fy*=48Ln>wy&VhV@VsVB zs`X2IH!d|QH28Ho+j8VbooM9>&tl~#Hx7|^$?e>;&Vn5st$9M*8oyx6r)xk#RW;eG zQIhevx$B{A>+VcBR+Tu~FzeFB^TQ3xsKdi+1>~fqxA@x??TsZ(FLig#vHp^dDsiTw z2{GnUx6AaB4&hGg%$zegRz~S7p~^}Qe&%7ev|=+SCZXXBRBB3d(-u$*o=uyilbp)FduOrC6j8>O=f<2j?`$;&_8SFZK|e)Ku zL`J5TA>m;a9rM7&JHqeUy6?U^FWw*Is~7?C=weQ4I*Y3Ln7%$9vV|;>8w|V(-&24b z+Y-OFwWgpKiA`-!PdB!tkFzDd%6AVPeJq|CP+TeZQlomMBMWa{IUTUqc0npmt=Z?V zuI%d8U=NGVD+f&H{KAV6S2FmJYPp{HxP#FgL3e$lM8ZnTE}8hCtE(&LtIo}?lamhj zVM9X-Dw*u^@3vo|;B&YXZB9nu3vvyPfPAHQ!Y^n*E=D(Oir6jf!o%@O&yF2y zKN_-k&VXjvNbr?<>m%+GGG^BJTMP@EW`7$x^W%A&?)|C&{3{9#abbb9rc}5)a*lDc z#JXpkOrBKkIzS4x!p;&WXDcIaXb@(+@A5`R+D*CM=Sy;yia5f>0N18 z5FUf^I0gDR)8`#eSiuOGFk4HV!bNM@?9KF~@XB}8x!2|&}i4=UXZO-%R> zLO!*%K!erE?A#hd8g zpj;?KSgkN?#;K9dEU*@B!0+l+E-+1Sqe^pPjP~*%irqTXAnVMNF7qAJRl0|NyS2^I z+td83V)OmoXubjPz}mPo=}2M5Yv`E)$#~S!0sDn^&`gy>BQ#mB*x&reZrqE1Hw~S1 zk0~UWZtdjc#A&7Pv5XbsQ-0@k+SFrZutqr{Lt1#$+L=Yx-4>_EFHa|fQszV^N9Z-% z&MKaimzyH(MTHPQ=ACl50eD>6Js4Ww{J|?NC6DXALCADXS4nf2o34| z&ixiTE9K>Id*lZ4XJy;zs8BPsxIBotSKcZvX0a6aeB@7H{izjz;h4R=51Qv@m@!>n zFg!7LrplQS6=RmGjA8P6xYfA0PReb?Lvik^x`b9jwV7tB?0eZhtR1M8DZmpzGKl)SiX(Gif zay@>8_6Cb8ghoEVd3s@X0K75XHr85Hy5alaLABQ?_S?k1iBd6>-ImUpmQ@S)Yk~)v zaYl83$KMF_Gb8%a(hqFFqK?i@{WKF9SwW}mb(+)J@~c@wN%Dde(_-fdSNg|k?cYZN zo9XXr2yCwlvP%DjN~oxkjB9@4RfIrYMd{RaobT}B_H*kc_wtMnS>l=JxpWrTO?%;| z&`JBkU2>9)ATI~;xxvQCCZT;3FGnE(LT}#h2E2y#{jqscQy#k>r=*^h0xI?IkQ9a1 zUxWPiQTi6^<9d;``jb7Te=|WYF722tT{_QqZ1!L>!1{bCEP+Ec9=j4Ecl# zl5q$<_n4rY@s6uQ6lxlIP7@jKnzEzZE;@<@Q$c&dUp^PV<4NYT*=bs}9$nc&_V21u z(be`wD(St`o08e@Qd>Q6Z<4R^J&MQ~c*keRp#7CbKFIlp-UA`3x=pjC<)wl2YdY&d zlbTQF4;i!nhF~C!Zz?xo^Pp|Mz22JN<9}MUEFH`7N`ulJ; zTfO3lJROp6Zl>A4l6YI(mBE?gAeHSOx$$+|P*)5VX>jr2PVzssTP-#WlpSL8|5mp@ zM!MW@pCDtU2_2jC-@g zpGO6<==kgV)vv5Gr#?KZ3nG5<*>A)1RFvb5s;G#=Y|(Y{_iLwJXF2M#_pG!I#{rD8 z8?rT@yE92ih=gJ|0^(Zc6KD^zq|URh9m)ZJi!&j6s1yi26gNF3uvG{*#1CED1NUOho#TXu;u$vj zudCe~t9Obet8=U_IscfFZ>lKmFdlMX{?REn_zFkXo@{Bk-}Ab$FZ8&%pcC9D+=f6zhG==7;x?F5rwSB=y3>;;K9 z*|wnn0)9;oNz1aFyLE9=z_i?$@MWb#CC&MlsHpDatM4-#)(8Eznxe;BGixQ5zhN~B z0fmc%6hR&l3qWiQO6__}BuCQ#Rw7oaNy_k~mK_Oz`Xz0ubR zjrh!6=+V#GFf)s#QvW*|JcLOFjJh+wYY6~(^uX&muNlWX2ToY;-rW7lgg!R$-p-uzIDx4++MmuVm{F3^gre2J89IhP{7v-V(gOYL$l!Feq#I+`!3wJ^eJ zseIX9{!bdq43)c$WG7V|E&9k-Ex;!luDa!?{i_Su;hCQ$E602h*KfcJ0bHPJ08_0p zm{oNs2xGlG&wwuZ2u||(|JJAr4_>X%BsRZEdmyA%>nw~TJc8lY1Yrn2GXG`0M>cok z;{Jz&6hPG{t`7)%=Bvn6vudio_GcgU{XctTa$cvH-nn)~Ig6&4GSa_K#3=KROJ-@l znx~GA7Qd}Z{->aPO^PU@y|T<`d|R}xq&;O2EvZ67Q_vF?_gw7UKIdswEx)L4+K~7! zBxJ>tA&hsQqxNHh$@kB6HhREc{&Xw1E!Dak{OXC^FK5Q~A7^H+iWW!^hO5Ga|Mci- z_NBV?Z7E+hkF&N0gJ`RZCL4jqFm2V+>5kVAi>v|{nujkw1lE7~a!*w90)*w`asrMA zv_(yi*yp*8?a(|aCP(>w>^uMZeF!yK@It6_{5)CD`!5pb8D5%?j*}-ZuV;yuv#-rM z0%iEm>lhWOqJrn$PAf0&%ssquwetU&-8W^lK%STuoJ4-2}!tURKuP$9n1>Z?m_y z?5GbuOTx;nbmUNV>6e^|NlBJmblTe5kvcF47a3wr^45@Hcv^z%xnpj}{CJ;&`V++-lS z=T|P~J*g5@PWc`XUcc!1vR$o9!M2P$En~+?uA&GS0_T7axohPyQt;opaRi?YbZAgX zf7R!~qO|*5XpIK@nP0fmZWtw`I)FnT;&-EOK4Y_@H>0sr4?XX_Dc|q{pkVKuny!o$ z)zI|ZZ88pgQd~TYGnuFkTOH=GjNn*z!90c()Cq>a#zR0Wj3OIdtz$$qMDkWweIszg z@zDcvl~&Z>Wh$$j4;$BRrWXQ3v_NL^)u?ViPwCKv zy+pf){qg2L-Z#%{%m-AIXids3+5=%f#D`@KWf@ zuT8WO%#+1_D-Hj@R#dyQ*ipa~f(NXi+9Z>xA?E(&deJaAA>vfQ0HOmW1b3NZ%cny| zSYcIB+vPl3T*P1mw$gU~1pH+zsf2gXy6w8ex=BxqFm1KA^PL1y3=BKBEZ#>|9xEu4&kc$8y?46EV+P&i)hCo> zL7De52R5{9iWmqv#i3X+I7TGFu61Iv6T;ATH}-xTcurE&K%Lhzc{Ri2R6 zrLek5DPIECJp-upJPRmd=@?(_W3I{r2swpMjtrP$c?V`H5$2Y!!9M^K`m&hld>kz& z>~CYRXepv{xoWsg#J`I0{1`)KnE;|gkQ;jHZscU96m3DJ#WVmWL}v(i7@-*qrM7+E zsD!7b0xQh4PSK_9CIQjj&s?8j{#g`_MJs0?x!qt}meG~&=9g;4*kO4f5xcKNv;JhA zX~GRly-!ib%X!_A3(FV7VisdKU5=P90xDPfu49W-L1`Lu>1rrUE&{6TIM%JMwyp^i z2l%15ILn)}(a3B1iv`BarPNh9HKlos211K~dZxRquyZ*AL$u-GpHXt*P`I?hb(puD zx9*Ey_3R!$_KrvEKM>TgVH%47{X%vGQj46c)v>}5RXWmo379PS!+YV9#F%k`W~$Jt zW(v`P{l_o7T-bAA+;Lz_=9*LyCk4#DGD;goaF#uDt(zPcLnyIgY_rzmbqfBEp97f$ z^ZX{Q%7Sp-6UpLMQlyl2fJ=z-H<>ff@<6HEI#AY*b({eF;cTQehNUOq*xJU`X<`*}AjA6p+ zC$ZzeO?UYkd^>`D?C*@QqaRq6WhFn=v}9ralu8qd`v?GvkTQPORNHp@=G@#TgYka} z+noIEl!!w}Pl99*6X{53i!y*l5Zf_-ywwE^6I>HOToe=|AxfB;R@_eFkn}sFePtrP ztIw6}oe~8IdqBd{bmu!voA>zRh(0zQuP$ZBflF2!LLjluY;&s; zsBs(!1%792u0Rl4oDPPGDS(d&`De^{PTAH8vlfK{82V-(P{6ED=z6+n`w?IfeH$ZF z{^|=WI5q!AETokZj43F{Bl<^?WOF07;WQ7Ke|i)S!&-2Tf zL#4Ia4m&~*DtgF0hU<&{p6ElAwUSZ3bUt}L4s)t2OR{N&IxlPAIj0AiN6ol-nJMAd z@L5Yf%Je}>Bf3$cYBH7>1+8c6CNQh-mCc=EY;N+|zL6_~9$v$Xfs=;*#2s0;+y=ta z1<>DP3o*6YFoi%neE1(LI1F%%rULuE{tvc*>Pv{jeMaN%-$i)|^$2hF0Q$mNFfu zuEvlitQVeQe|c40_c1Q&`OHYEs3ximza|^F7YcAwRfJJ4Lbx@uyRw3Xxgq<>H;!Zv zFzKd`za&~rsGKx`QGh;;$q+tSUi*=SQJSA*nAi6%8QJ7#V<35t<838}xqboti3Wr< z)Gv@`Zu|vBow+#P!M>aC6jUo4Zf-<5zApIM@fc{{3@2?W_H9I6}WdJO~ zX#^*ron3_%W}Ls?U(rxh2o(pa5mbfnl)p9S?ck4Zz;j093hGYv_kwDORJ+UvOgVLp zTY1gxQTjM8^OfHusxXv;pEdqN&-lZ(cm+&!MX|w4|FAFq;Cr$}*YUOAETF%C`rnlK zG1;t0|1|jxC1`8{u>a)1emDGdQ@NLe=urL#YM`UY2jez>JNn)He`R+~}4J7!kGM8#PAaF(~mpQ44HAHzKHBd z4HpJF`zLwo-|4DPF95Rx%7l-!E6aGZr@=q{%f4p9AP7>I>+I>FGI{wSaNlLVIJ^N- zr$xZkx*nJEcr{I7QyD~k189~Fv@=j*Qhgch=(&9KV%0(hjO9yW;c`aP;ZbHI*GObd z!2;9+Y}r&x$PX23EwAObXyL$usG31te89LP>~+f`mP0{35{te=Rv@w!w=^VGQ2>G! zZP#;wS3`4#^#fh#NmNT%Pw+naVZ2D15jLZDyPVhTwxpo@lm%st%_OMeK1XpvUr4L|~NgOs*M;T%AXgL^RR!zb(X z>w!?(8is;zs~1>^IRqCJ>_O52HVrGC+lGfA$eG={iB&7yjEKd|{i4HdwB;1Nq^ z+S3_kVJUQ9TGhyLC45qe(BN1ksYDG3!-s4#PE-=X<0KjA_9oz4>Z;?h1>3=AqGU|s3wO#mqp zmZ+c1UZj!$hHz?LwZ}I6Kjl{?u`H%;vA6(GUVzmFssK1(uVAtta#^|eDY=wVUD_}^ zfeGswNJD}nHGT?dDyPGZKzOY&}UV4+DDNx0ymjD1QM8Q zE8!Dx!DxrJzx&;8{TY<+J6{=ONgZ}s;G<-SppGQ!Qo0W`s&I5@C)AKc5q~R!!p9Z5 zmB3@J{FM=BtUX$AVs=_@uy z*g)MtuNujr5NiY)k_%EV$8kL;LYVbn8;P2RPck{9>ZaGSu|7_tl)ivo9;Qd9x4tIE zm>Om=ACdRpx!Ya|e2<x5#-Gdrj~ zDW4op*n{0y2t+SOfxu=9?E+kH_6m%5jm))0*~FKC`tEA|BK$d${epf0;^Vq=Ox39Q z;Mp`&Uqv1%h9Xc8OsVGg_l@^oOB6p%7!Ps`l{EP!kyR_g& z1e?%`z@<+Kz6@p%eI<;F03-m=)S+NQMR#}u@D;s>9GF}+1WI{orNgj9rDC+M1`Bd8 z%@@gcXQp*nU8cgTTT2^K0uq6Z0%%N?7?=aU?5RbxIp&5QD;mK~nQO8F+fH$R!>Zsyb@&JtEb+iYv?rKm=e zK-h0(#J!paEbDG^eEN1|; z^zs}?j~h*Lkx%bd?xXWM2YYW@C}D<8l;)y{C|0md$GtODxGaOGyjFkrh7Hg}dV&}2 z;CSrdS{4HiMBh{6CN{(+imX-KX%ir&-6_sr$o~L*#jA{L&LV)N+)zSz_T-6t2-e9t zJI{k!R*@|qgxV`CWRd|I?tL{xNK+;7AQc2Wu?b&h4%Dt}8FT5WT{j!&T?=BS1-I*^ zs>xgSX5rmcgLSo+)%nzGGGnye`xJOr?FX`nn}n+=aFTA5BbiaE8#`-`5LISam`SkDw8+H zMRabEI00xz=d+H>m0{7X8cJHgKseOfr_I{FM`qm}N^!{0F+O((DDXoFkpvt#;$4^M z*|~P?ueOSm_wx##RMla!P!EAZO(PEaqQ0thbn4T7vcDJ@V#W@9mxrPaEkupuY({I>D<(knKr!_H1HttypZS&~b7)#V zfn=N*imecEV~A=`GbD<()b3r(RzJ-ARyxEqdde(=eQ@06J^4(1~AAH~HudnWQ&>KH0Qo z9OkFV3O^$?x^opfw0Cj7^x#q~E+B7P200D4T{+I(oP2*jX*DCBjdM~yaWg(uK>XSK zwNd+4AbqBa*PVA5bLjQu?FFN4e5o-&48bXcm3k;zf&MJx`bnBX|LL9iG zD}C);_A)1?{hMiUSyXORS=b}RV`Rcq1;IS-0Y@EgN1647UoLtM!M<-t)@8HS?T(>Z zl;Mg-bGD;{30ueMyOG86#B(DUB{j=S|2lc=+D!7iK6DH$B%&b(YOu z_fMv8C!1jlx(*SA2CFFfps4?Cnr006J*^An%vZ9=G(>PjZ1uHPW0v)kSTYoO{UPFB zkw6XhQjTocFJgC_MsQG{L;lkR*DDR=ZYp;8gUwOsi2q~_S(57vhDb3TIr)#CQt5u2s z#SpaX_|u>Lt@g*yi0}1uoZ^TG$lcNq$*6jhVOr$ixH=YiI-Pg>#SNhg7aapkesTUr zyOz%rO!?@{mQGOS^AnOU+s?2Fg<2_OgEvVB=CE-rAo|N zf@h=soB0oe`h$p?@%X~M`FQbCKe)`Mp!#k!=fM2w4&Qxq?@}+^tNCULo2U(QAJ%RR z(oS5{Kybn4M7Hicmp7^Ed~Qm4ONr{8G4YuxDtME@-81Yl3q$YRWIm4?l|lOUY%Xaa zE`jA?P1p@|;=Gi8$z7zMeR?EK(WiHrq>ntMSI9%mSU0l_`AHh%e4x{n|IdPflS8c%=Op*(t=zWddqi?RN|b+wzPi;va^% zH*ea7Aa3HLJYlgt-h4Gxtz}ayAN0F7 zUo}5E$Sk^{)uD*)pk?F3QLENl4g{*gUSGsJ_SHg4`){o|ZW5rd{tfp!72vRpRvtt6 zI?=;v5wt-L2I3a=cjTGgKivTgsWlZNI6 zH?L0F!~NPr){cZv>|c@nqB_N(a@)i5_&cT;J6%(mHs@G6hpe_FzlM_nY<(EYwB{FS zG7)i~SP%M@9Cky+cAd{cx6%L!i*ly^Dr31fDQA#3?nl#~vuj~*!*565)W&ZmqWVP- zSxK6!E%<2jEiIx&a5ftH1l95-u6HB0Z{wr8WaM9OqV8~1v=g=WMR)k7pT8HzO=;Vu z#FOnmA!G=U1C_1*%=zFB)qmnoTzVAh{!}^s;-6v5M>#0QF|S1Hm>kzuTcz9gveNkogq7Cnj?^o&S*~g z&cngM!T0GI8QNm9e5LB~U!qkt`IAzOQQ=7heoFN+`OAW(rKML(%jEQ(>}IrS)H#WX zKnAKjY6d1IrliF5TREv-T+*_#vOKH|qct^KhlKoZS0Ga~>DI5`WYyG!Nj7>pCdgGd zKiCSvJSQtq*VFV@{M<4*`LTI;SV&q!bI>c58EpJ zQ7~LJ3@hzoYb|7l&Ksl7?#{Yk($cIH)yW@IUv+>s zSlFuXF?gld+^{V4o0|n0UAl(3%w59TSN`!z?{Efk=cI4%%vBdJ<;qI`*qUn|-I}W& zb(nABcON}Vd6y+#HZXlWw7PC0UB}MJ&h8x5_9Q9yMav$d?XvY7C`d5|-YmzYMZ=m2 zF_#-7B~nlMbGSkTSrUUAYC<%*iDi52|NdV9MrBKq85tX#`JQ$Dssm>@3C_Mcv-=uG zDXQ-tE@+`9PE*4_s)zFs@mlxc`%2~u7`&U?Qdd|+WMDO}xY)tC^|_Si#?k^Yzln^z z<#Oc1H%rT8Ufo@tLs8$ZXiwJ^TNQ1+j5O|iLWQlhY+#Dt2Q{Pzguur>DbV?1)krT> zQGRD&x|_ja>a71YC1-9bqNejp;oSk>bsy4){Igo{!NNy5k@3c!?wOtLZf<5p6ZF;4 zwpjwotE&Ju2Htz0<{y}UY=s2YYVs(}dHDF-Tn3VM2Lx3H$@U$G;Ar>iPjrz;f%-_kJ-l@$M>Dw5(D~^M zrO4X=kL!?2zftoFPDUw@J&h6@`x+KSJY}q^c@!a%ulS)xhN#WiOL$X~hVAF5U=4+g zim!6SOI;UXGZ+uUPRL2}4p+!O`%9ttOWw%rm_3Ifd!AeYvKzY?Ckf+%R62peSoR16Y4J2b>8*0-!*E$uIZrSl4-$g9T8|#BM9&y$Q zWV{UjrX6(cSxX~tJ$bbdcq%6%Exo)S8yibn#6kDv7P}G%-enMwt+vXxS@b)b&!R;S z_>rc{FZ(7iY*iFT=FDvaJ1&!|TZqLskF(a+20p@*c4-?)+~U6Xo+9X2&f6@A6JkIy zY{v5Xz}MX@JRAB@_m}YnZQCE&rD$Vhn^N>S)*kEd=|PsI1pXJ@gyX4x zB-;K!)N{d`qG=)L_|VtqG11X}Q_6Q1JlylnEoVH}Lqm2qGj!glo}UV$sIdU1kp9@% z^zE5CdgQr4wRD2taz=cRv%xjgcRIgMYh`iWu}cws>v4qUs9A+_?<8sc=((?=eGvQSiTgj-oSHo@^|k29ByALAXwj3EOE=J^G@mFCgulo7eybB{?#u__%sysoGAZpIDj|5iaArN5ELXE-ZYY6?l z?mf-LEq8$xIubf>)8a7~X4Jy?ZK~3Lkdl*&G`S*^reo=UYFK>u@gZh|Yq}IS92-4z z{mC;K+JJ?2ffTV}A=84fs0CF@T2U?T_ViDm>r_Z$*7{^_g>XW4s#|k1pR)?!UJQG7 zK*+Qn$3aGjD=jM@h&0$7dT(vikx4+B>h8&UfASaG*FesR=Vf>nr)8q7luutFm=sqW zLmMype)m!Y8PCWlNptMJu}VRxbbIN`_Ydxorib?(w%A}FY4|SgzwoaMW(fu`1}(0} zmIjrZK3CEtUNrYCB12&@V+@4SN}@k7F(4CZXPZk@9UD_)>oby3=r@F3XueFYfik&! z4@>iP?xg4f@HNbab{Mp&y7UKqsUtg ze>qmxX4UJcD9&}svW&G!0zb;OuI5Fp@gO4b@PC%3PnatK9pU*WVn#FXS93*gaM@!&P(p&B$|z5b0!nKym#+0mlZ)_)nPiG~Dm> zNY|PvjdeF0(jUyeV>?Jn#g}eB@9!ztfZ!8JGnwl)9$UfItrU*(d2}lR?Rv2Fd|4I#myJm9_a*_!_C)F`-mzCEvg z=&)&7J7e|U39u+$jvUVIv$#L8B(#I%nYDQ7wo@V4dM83VFN^bu_dW-uJ2YQYxow5f8l`z_3O#Sd~W1KUTY@9%(WP_?f6upmkT3b6(q-8sjjEj zf3lvQ?pi4%Bg^QYK0btRI!?Kc&3fuSNz~`DAm zpFctol?wJ_K9<&9HItfvX;+PQkL=U3RCXKZU5xvR8y6gR8m)k{@K){gyAGFTYv@K-H86CCT#s9L!lNgv$o=SAD}bqIwtdL8uZ zMjWOqdP|J2-cAMA{NVP~xQragri&t8Gj*aBm;GY)@k?UyD|~utR0t6Af4F)Jpg5bR zYdA=-1PBn^f&~liPJ#t@cL?t87Tn!E1b26LTY|g0vn;;Ix7_yp?@vuZ6;;>HUL)Oo zPM_)7Bw!cga@EVGPRzLFAznz_KFh*R8?9^}Uzq_zrS4X7fl5LuVehowi2d?2VEpxU zY#VjO5fmq|&oMgm{W!#Hh!>2ca1Sl&`?;DzbUVkgO4*%mU{AO9l3~#FnH4Nqi)QRF z9mMt-Q>e=BVP5_|uo#^~j#$oe*4s|&YR zgIUC_pwx1!NpgIH(J&(#&ON!bj-mH1OkM- z8jjM!Ae7^e3{Zbw;#(5iHw6#RJoGGH=U!8eEhhy(Elb*7(Ih7v_j-%lawOr>7ac|y zPK_*ZkdcUTaTS?h?Q)Twq*zv>;k)Z~#=MuVgy&kP_{B(U-N`cRwI=rYwy8c1m{tp( z7SBGeNUCZ(;AcSo$~~va{rVMbd}^#UBT9YV%KiIBw}L_v*`VREu_3no-E#548KZ@J ze!mvvv=qara_gx4=HTJ|7&|1i3wVfT(MN8Ep8)zig2?=ccwVYtac}}=jooCfOv7L< z&xa69Joj}Jp5Y?wg0u@8dh;|rbl%Z?5@wZMM>;pFe^^nm(d1y)JmV8Z`;J_wQoWk=>piH8hF=jN4=^Ib$wc} z&3m2!kb)98(3Ypl;Wm94XH}J^`U4pF2sGG1e7MS0g0KsXsRGfnMe3D;%OVDQt9OT& zS!>ul1>8fQj<~0R}PtI`s){3DPT27sSD! zk_vb5xM)I5LotHn#cYW>Hsa^Q@-Q|^M;c#h7qu4d-9ba~HbiS1dz!?o$=UONE{bo6 zJHb6O#V&1bTd`}w^UJt5%N~!5jhX!n0_l4E1TV}j9$#FD=D7BM(O$^yy<}`cqn@l= zJiFW`0^)&mn%QRj-ZsC70VB+W3R*=)K#_qkkEKG|mOJ;Qqy zP!rrw(5FDwYgDMH?=i8dI^zN!(>lUAt_W4{76-0rGmoJvzq&=LYw?QRcBt|TKx!8W z6gZ~Z7w{6XE4^w#s3Pp1G2il|roor{8+&)3xVb&hbf_KLF<261&~jJd09P#+Kr2(4$cFM(>H8N){IQm(zDrCG_b z1J!3#)FVrdTmHMBpo_7#=Jm8rtZ|m<0;w(r=)p9)^trVDcRqGmjOXqmvY@G2m@zR7 z(G+rCI{?SJ-N|Vu*0cG5_f;)uBa*M7K!mbulKqYy@atUM&ZKO9cbem4(6;jykXXkV`yJFoRcHBE#19_W7ST<;}5c%8?f}Zz%((EwYWG%*aP<1($FMhWoqmXbK zoXi_<)#aO-b*zd%Lvl`(8n&O?U(yWID96?G`5){;;s7Prv8NA5cH-J8y7zp*B&pU82W2 z7Xrh2%R=EJL|NG{p2IkBuk|scF-!W>8VwQZq#*db5{XAKL=aLv#kKH#2lPD4g8c-Q zB0C4_zKecQAylKT`GoFqLg zM?9!#REnY*eJ3)s(J`kVq2fII{hQ>$g~=Jq%WY@+pPS^R?*pSe%3DnPtpzkR5ZpK2tM|sgh5Bq2C|a z+nAl+YTexBd4K2%y8Pp}cO;pX9{iwf=$#6xugxl7(v^PdN`3LXH$vi z$F9<+9@AWVB3ic{Pv&tByc6*Csj?){YX~0;5>q+{YyFK(4xAtqD3+ihgjxvcl1Hf0 z>@Pqr9iQs-W*OwLauPl>lC(!=05RSL-gIfNW;D8HdYu{VHFyhUICl8@#%5^Ugd~ss zA!kuo)d*e@I|zw-zST*6g2Uyy8%#!Elt`wwk>LSB@8`df`S}b{g(7%BdxjHfrY#0H zX%Eti&D_q~u@EYb;oA&MwuIPenK+|uU(=ziPH^sBHnM!)Y>q58-nudh>hxVa#>Pjw zIbHR)HcH)#N}q6Q31KyzS-F5Zx*cdYRr#W6s)P5GpX;|b?&;Nrm0?$BsKj5O(Xsl6 z*;ae^*_ANsYRy8Xv7xL?%4GdP7B)(BW8G#lZ}-vV$svXB-2>t;;GRuS-Y32*XbAPn ziRY)de2E5S_&LH2a7eEY0#QzKx$>w=S$=tV&fFyzxHcv3efEt^uV;_uXB#kM+Y39w zX%D!+3+z$NV7Z&}6~KNx8Jn+|A4=W3B_>ueyzuL$)P3yY<+xO2xu`}nojtPwbnYxY zvd?lus9Hu+z1C>LaPP4*j`=cDWENs_WA0$W>-umtij;YA-Ev0a-Y*VqChX8<0cdCL zMhw%tVVVT)syaTchfafefJ$A@M3-9~nDJ%e2^NOv)U&derboMh$~l;ello%_G5JqB5#_d6WvX=s*kBl9F1HUEgY zB{b&^-Un?9Ok~}sZsND6WxJ-CxjrHW6JHFMQd!Oxkm&vlhKCne=w%fZ=m|ZK>Z3L1 zNKqZ|G%BkGSa{?^6mJ3WP+$qmju`UJjdW<+{l~()_CjR2EPgA2+5sM60*F~?QMt`q)#Wb@frMq1kO=Uk2scJjl(6Q zmx=6?xE|von*{(>wKDHM1Xs6~uVX}U?FsSObOIx4I~EZOQ@&2@oSk35xs6r54eec; zWOZOg4yznE4S#CrNzQ_lH-waISpeQPZm}($^dx3z?b-I5cmhcVGO}&%Byod#e%#H7 zyk8x%6y7!#=*%fT^U~&F{dB@c*^HZ?#os$Yt3?+&gb=I%sg>nQAO4 zr>Kkgaez}GA`=t~25ZANBTN_O`tlRpU6(P8k-lGa@*V^fFrBGDq&*U_QRQh{yuQ8O zfsd)IOw|d+>aBj8rIuD&vrqIwyO@vm_`H@saW95Oaa^1dS+o+K47S-%&UsDbm(bE( zIA82p_8)zw7fN`|A6)?7@jPx%nCmBzPIZFF*ykXp*96tji?S3@=d-upi-B-Cj?C(M z_%~^eC09jLOG4|B8fU1-`|- z(bTqSHK}r|TAww3Owl*qJa>@7bwo|~*X>)VyiZ^Ou+YhxA3rWR)zKSt;F$2U4&4!v z+0F=5q;fS#gw?j;1*J~7wF%I>{6;2fWz4lU4a_aD@}!7)Ij3B^WriEh{zfU7EeORi z&RNh5OJ{N_BQV$o+B5}SyXdB|9jl~8rq2;#fnGC^f43+ZUj|+G%RmTr0Znqga9WfY zrg1fZb4=E_AN819Lx87wo?vRo-+sxrhol2>8Ea?I``Fd%KABbpQg{~JTR6!0jjRG* z0^{%7K=z6ls7NZZ#m9BVm1->)2Z6Uk`mMQaHf|-;Z>{=3sNARurTA&^RCHKjf@gQk zZV83i6hVt=nSs?;JHLfKG^&X9 z;|TA~mFC%S8iXCTC9`#BDakK?Q>GG^i3GScAe1MwA>aQq#Uuwpwh&0LXO$1k&u-Wbh(jS$9lH!aIY zEG?X4`vADVm1>Z>LmL+kCuEjj#C_%MU(RTTrM7MKabK%VA=?~y=lq) zZ^_>>KXusYQ?Ozhu#kfV1>i9)wA{>Ggs8)!1>KDw63#pCPn>Z~B4-snQ|e9rN`F~E zduqFAXq2!du>_}V!vqpMoUPwq^@0dU>9Mx1*6%qR?_KxxkNSQq=#NV!zSDsYWCHZc zWp;dMHS&BD>Axo!;ghqX^8zpcJ~v|6V7ni$&c7F1D6+R?V?jVg^aejUI;>*y0mTN^ zFv5O)2#JjH-hRc$5N_u-#eCKR+ej6yk#_HsEfF)`0P4>^Ao*KyuN7Oy&L5nESTj0Vyjd>n=tGxNn zFoSDu?DhkgwbmoZ<3fZAD)b4*;xa7@JH1~Li*wK-ZgJ&g@R)~~XlkNNqqgV0_rbky z0$B!XTvk>Gf~;n=+oNB)H4)cYy6|&=Dw0qRFtIr75z&#qHEO!0N2{`(WwfqKQXEIu zn|+P<8iyF8tBTdtHc+Vh??mSU!tZ>-9)BOn@w7xA{Sqyh~ILJp7y;vr)VWBtur+UGK|(Z*!aRU^Qnn!Yrc9>*=|>3sZH z|3d#v{}LlUPC7EpNm@eMQKX1>hV-A&Ku@#T~PIvMss4zqmV3CJIJ^BBo6 zqxI(^d5G+1ldmT)?r-pr-+sYj08ewxVMM>@*k&RpjC3%o3(|2?cd4}n+5j#bStr9t zF}f;$m=R(#r0_hZ$^3X@C(}AmEoEns%z#|Ds+LPD3+&9O5PTY@E8i`0`x!^}RxLnvIV*~xn%1VN zcY&VOQ@?8nn~0eJzTRM$W-ma+B*nt!%!Ed{;)pk6wqdY%bj1pcNyPyxmZ zt?dHugdK>5g(Y*|uTrBLq4ME*5t0H*5<6A|u$Rm7E!TBj*Kul$`^P6pII%(p-j|w2 zXL>_T^6Ek(2YmiU5JabE_hk&q5DreRvD#@F3pC5&DT=n;y(y!#$pMYasNywgWw=v_ zF?jWj*8CiGurtHf>9+OB+1GXj4a~`5WfwNh=MQR~q|CP2Wu0&N)z zom%WbbH-2|f9}xVG)RGrU`CT3t-Zm!JSG|)Jxr!d>Sp||`xI^37O`$-SIGy*$!2Fw z_gaMv`u?KhncHm~XCi}o4t&vGvghGr5-XN9#G4h`%Na@eULG!7UT=n<^G)DWqCZD4 zir-xa4Ww7DKU-{88lQ*>@RUoAK0mB3?rlELx$=Pi**E z+V*Vgeq$`xakyOhAgvC_2odiLdD4e)z+XpO938ZUSJ=x?WsBT{qO)xknMNbLPQ_~p zkg+YtOy_fJ&7$$ce=7{ciQ<6Bn|wv)!z%~k>%|M! z{oBXF$+B5Xff!@O`>d?%Pn~N2LF{z+Y0{ACmxrAUj%6dv=XPeYjXqNU;e!xUTURdLXY~_fR9C^q?->0BQd`6dM?F$d3+S22 z)*hC`2P)qH62KLK*l>>LY=7FGfV`l~ltY=8Rie%hoCT@9%+jg#0I95M^rr_r!ZkL{ zJj(xr$TuZXIiC;T*9o@52S*!ZpW?ft*viheL5x8Rx>}8Chuh^Oe}O|VxA9!W@Lt3X zc4~AX150I8x|#bbkZR1KHH2;4Et7doIOg4b42D}Q{(UT&TU;qAGvddB{(G{V77T+? z7M@nf*(5gw9397*9Q&GePJ<^ z^;ysni;k!15PqoVc0vyi6vW(%7(OlS1~Fh1`7tT1ZkOPnIYxk@aR~R?8+BOU?Y(T2 z?Zrr+**&CUbB3P;b${=N;eLNJj(@q*YAT*)KtyQE=WRX4Hsi^iS&gpEgJU&0HJ(k2 zyRYR+p`mn_QE4Z|*J@6_DZ=rZ5D;H(3B2tYcw{>Fm_AKg&TxAc>$N0y#&s<%wUo*hH?Y>Rnt%Y&q8Rpun-ft$W=?yx9+rHln38eQ0>cy6JnU^)9_>^6Xch2eGgsxPIlJb zv?T$rUK%>6RsN#@RNJ-L#|Qow(8AN|V3z%W8xR>kJH}DZ=Zw*!ACed1`i99XrGB4c zE_3_Nd0KBCc_QWRtn|QVWg>xEb1lPtz-^EBG41lOkAtr^^~Zc~MHaSc!{cp71>wWz z^Z$b?f*S>P+&?fe0oUzhxwyk3VJhSm?4_bW0o>D)uh`vRzEZQZdVEi+)LgBH%=P1s z&5iCSIGb(Wgun_mBs3~|hz_m&YU~Y&Wo>5(#w(F2!&L2BnV#Od<1ATwv7&RA-{Cs) zHihBBa)j(w+l1TYSbxT8QuSwUc4d_|+rreC@!IvD4+r8!_#dlM9`C^g%HX9vzppMi z{cgp@!PzB7X)ra;CtgB>=j@?1fBxarP~?s2;UAF%{xPpj zdSL^^3%ITp4~19y=o1k?plAfz6CZ%;ojr4G*4I||AU2z8l@(y>Q@wd?X|=%N`*b-y z4-oQ|$_A|_J6~^d1mxmJxn1_7jyaGCu)z@H>sOplu0INR>_ZWYT=xORnV2&s0=TImOKgq#mk@vV~g6Ky{X zL(E*7mq)d|drcPF+gDXP(pS^Hcx?9Oc;HOVDQUBa8rGOAXbY6QV-_J$vsTHOD041S zFdM+86tQiSe69+Y$s>I+jckBMgI;04_-@~aUOR`0%$mAKexaWbVPRWUj?^NcR%H_7 zyWZlbFeAgSq~#`kIb631?iRP|)B`fEG%PPbVf z$(d{yk%_u@X1{Ag|7ovr&n+sR4ETj}qVo6Nl<)F+zN@!1-)Z=5(ede~BbYChKZhMV z3dV{gCN?RH-SBc*HpP-_Av1cihgkeZkV(#A^yzV=(zBoD>Rzmpns^(^C0ePKK%j=f zBY^XkiA&&iC)5)eukp74M`c0s;EG7@;|&Gp?Er$&pC! zp%5uu<6#}DQ+JCI&s`I`N!H(ls+C&5CMRTm&7HRHl_4Foqc`C=B|+Y}1oFn!2n2A@ zfoM2G6-u}^QV*5ss`~I-;4KVSvlWxRHERP@{V7?XUD5&i$?iBkF6>~9x15{}7qau{ z(d;*N%cpBoxv${G%T*F~X8tt(*xt!3l;mnz;3k9LHyAhc==2&<*;BAtBf47EC zzlsSai~^p%J{_G*q!BWI2PB>8WuDjp6RbWO%ao`SJM6XvLpXw4&Uun8{fS&k>qGPx z_?-*gq@R6}Ld>T3cMm0({bKHGWw)lE!C#}@?3hD0D?){*VrH7_T|a8{Y<>-s75r>& z2c7#d>L;^m4>wk)a57Eq?^WXdD!X})>APUjj;oP8S=laOVoGgNmckDGwADH-kQ%RS#j&Sganjf1TF;oMzA`JJbVOnG{;98!}2$xswBj5_=Q|sB;{9c zX52O|9N9t#qnt@?Nyu=wkt%H{LC@g;n19h!m;Ja>5|xj4D9hfy{;smR)7tM~)&s6A zdgv4HG?`yMsOa!N+#_;{Ar>8Ry`%NzUCO`V{m;yc5A(= zK#QMZ&{D7IT;p|q9y>cuAMvMY${a^W+;&)~IL>j6GGi8Sg^-}27<95!!4CU$L0gr< zY|T9hLmPj&WqZ)LQ(1?jveV_=S9hPDlza7DYKj2R&{-q2HXXz_L1%f8y1};rpa>N5s>1 zjJ!GCFSbyP^ilaI;=)l0$>8vxrq6M4QlgdbqzFx9Rgn<|ZVy=e&E`7<%x>z=sUz~< z>|}(fB-YleLk71h3FihL>cbm9Xv8k%JV9QRm#O4lY5uKa)jc->ffosXxagbXsoF}_ z%&N~Ql?$shoP>4(?B&|A1F1Ukke`a*>u7hefX2;dxI~p(BpLU08>hP z*h6b^etazOvsWGki3-E?WYQx>wl#_`f^OOQSr%cmC*1yp4;^D$Tr|nWO4mlG(Y>0Q zi3Bi~OMzYZn>_TXw)Y9KNeR`dy<)e(IFeu^vpz*#)t>Oqpke~$i7FMwB5=;hl#q%5 zqI!C5D*;_7><@h#9$&b)s;}eDjaM4n?>S)N(6@WJygkwJ|9EefTVKQmNTV+)Y_%t2 z#i|{n!@vA_K6TVP&o1z4Wv6t|^747{tpUfQZzts!6g>BQxkYz(yYCMW29-gF_$Jx#eQAt=(<89()jN^9cdvzzS_E3iZe`wJb#1^75Ai`T1B9rc)n@nYv*-Nm+x z52BniWed2Fc9~v;0@-m~2Kz-{08h(+mUzrmTkY*1$J|?%<(pp+MytZo2jEmn`__U$ z^(m$rRi;=^9r~Ec#afXA?Yyb%*7uobofb{f*c|UoN@XaW;a(R=4?>qMeT3ZbBY#Xf z#U^JO_#xTMiWMilut4MJ;=2>1$`cKxN(Fib5rM)oEjjC0r+eg*j-{=l9+!#^Txe41 z_YQC7YjI+6ahYEWDWxCr@ypS9`>U-!WiyCmN{90j$9}|g`+S<+na6AR>jijw{jTZg z)QZ!p0Rk(?DTb(O0>VGi*+wPt^L?dbNB=Wx+ps&yGGzoiD@n-8(oVoRe;7Pv7(GMHf-)K@fK5P_pwsP8 zXqi0NHesMTl_v=7Fs<$y08R&@+?2~Tx$Uy$X{(%Y@5A(7q6RC$8Ogi4Fo|LTS z6yH*RJe@ItU8^}T?f7ugnMw~yDu~6eKWH^N=a9Y{uz#B0)YmR7)ktAcmTadt791+o z3ivfgS>lC~g*tWj$-U(Cc&d4oMZDhcV(kfQhhdlCwaeoz5NV5_l>arnV)*5&UA4X0mpaa>YUoM^;Y}FX?NYFj#1M_Iw9O*EFBLapW9w)dYAo> z{=dU3feoeke*aQEq~SuLXMCs+73cxrNFnnv!48cK&voG+Az0|BFBI-2OKUVO&_HQF zm)B^uL=A+U|Mhm=M?CJ+Tj+nM{jUWFxhEie^9u_QrS^GXDM(o~-RRB*tyE}tW0NnT zt9y>P&E}&Qr`=~t_>mmx41?v~UjHoj6C-oAPQ+F3YbhhjjU>k(fBbJy^(dhYSZ0ioXIVdes>*6?bYTQbsX&ZWX_t)$%=l-; z#rGo@2nbXbNL2U<8Xcbbj(O49!QFaSjCfuNc%H6i7uQ!nAdnTui!V|%FFCq0yKp!m z4&bk6hQ-1lUEo+N!k^BpHci)7mooN)nZih`)w!RAiGPlViD_-npO-m;pC7ok7Q`tl zTWNZ;-PUs&vUM(F&oiK|GN<@huKG&_yO;&Kh8O}|IDL*{ft7k~U$e#SuN4F8s@a)mX%~kjjyH5}cul*r8)6n}B7y>JDvuiq?aD zI(f`cMxoh`${13{7@3>ze_kK4lQS@sRy3c`f7*3sM=E=b>U{YShLAN#8HRnsQIAFD zj>^zusRNbqT{o4rh4*$A9$E_}VKU`HGm@Rl2%-IHt|X+{8aF3b?MZU=#Us-im{`r1 zmIQtq4HRk59`=oVdaMRF@T}k8?YqHU-g~K9-~c?|A1uou9&@W=%7}3*7u(X z^bNwt?>TI!+}A)Hz!_=vddAck(P`YU{an^X^IA&o9NH%IcD<>*L(G)KCBd<`%)LXbTZ=N z$ZeL4Sn(9>5e^_>&@pYP z+dJ``=rKSyIm@g4C}8untiEv9u@7yj&RJwBdfhuKBQC>}f`V(a_v-3uQ7h^_(YFlA zE{ga2E~)aGbEvgy!{hP<;mJl`00*dEF4->&2(R6oNxXIg`0}!XmvwQ)+P2u@WpWN5 zB%oX11g-#>h2qb(xa@z1u#Wx#d_3%h1%;YxHELhBT4|6K#4|DV zsuliLgxvsTsYwAM9=nvNCkhOM%4Dd ztac7^-&QHN()VfX?Ew9+o&%ULNM#5J5+Rr)Ih`amk;Gc2OQrW-e2-M-dMU!xsu-U> zbl!8l*Xw%?{2m2k2I$vMt5+}j2HILm5Ir`N;OT_t5a!%tCTXn8I%42iJ3crr^&Llo z`t$S(9C0V;#5$h3nQ5Lh*6@?H6y%fDd+NUnN5VtA)#oZ}<5_8M%|->>@!8V39CkV$ z+Yo|KSJ;1?q@RMg?`jH+v%vKwc*f^XFJqe1BgO#)#4&_^yF6ent1((f2<2tJ?xp;QjO3X{PBD& zUlyJBL&!xTc4eDoEz+~6lldH~ zgQ4eK-YowHlM-BhRR!Zq4~h10GN<0R>E{Wb5U(?q5R}erp&Oz&NBi$NESMoK=If_v z$akXOI^GtG#AnU(2)->K%7j!&-KEu0Jx^zlZ9Ze+{}%(|V{kJ1+`tdW%eK=EORhAf zQ`B0-2^&p6yo5?8-1e2mEn3!%z(F_Q0oLucx7AQ7Bd-SZ zv&wK&feV$QHU@?5%T-bYmX-CdQS1qM343(Dn=9BO7CkDl%(&?{)s%ja`vf$uL7oo= zNBBF(`?8QM>F=l3mlzt$`qGodwkegQiuw9-)p+5V7`=73VV$*wK;>^UTuRR8%c6#q z{F3m|-xDpHLF4+26I9+$_u?Nv_)=O0_lRx&y$2|wx+2`rNsQaDKO5}Tcn`P>lnSrfreuWkN(+fz=iLQxUc+nB7eNmXfjplLqas^H zvwaxX^_}wc{$1F^uze|D*F6btcT*0!Cw;6{4>1<==??tWnJ1K^F+8QS2gGZNW(I(@ z=9gux^!du)0%|Vj3xcaQxZmR;L{B6d()?oK0QL3qgM>bAql!*7@`$;Uh0d8lc{g|D zHS4it*?cIUHXDBye<=T>C3GYKcTg7Y6ZhMqrwT=f9;Y##Hk51Rv zH&7LLfoMIquY;wB35A=1=EC1(t`<1Hm&wvVgwBxiB4MvUYNgd0!CtBPb-A%3_W`-L z*r~N*ra*`T$!0dR4LL=&NM5pD?VxL|yv_p*w^ZvumdWYu%k9{b%6)qvsqJw?De&^f zYW_G$oea>hq&%M(h^m5SiwGe=_$VvzTxzoPz0#GR)pq#g;OAz`P!bhQWQs1c39jRq z({_dY-6Idz{Dcy}dOe(on`5p4a;*ganc7bqNU(hi$Ez+?3h~q;xIwKA<%NgDoic?o zL^G76`fEF}aty zS6u*OO#~1Se#jiu2KKgk%9+ejsKZM(2-;1C7)t9sLE=x+;s%j9D-CHER9CrjNdl4k zFAD7%d$&h0&oKYSzjCmB2$u8J!3v9d65<7U!7j9Ub{NqT6RG^M^uYAx7>(-$G;K20 zh;MFukaz)q=9)T2n@n4J%OEeb_CbE98G(BO#sV84_Gh>gWy)<0i$Ec`hm#?Im-Ch6 zHeabjzP}?)jwsDEY+usMP(?!TR1b1It`4hrPgH(q7Hg8_?9pOA)Yrume3f%`<(P<& zZ){4q)M4?i|Du1g#!XY>{)u>n;25?7Zw}bA_B!cu2vAMtAEAc;y8Fi^`c{0*G+ui% zOx#|VALh4kZD)En@C9~g(G;F1J83>0eV*O4y2>V3qapty!=peWM zsFI3B%t?T^7H&D&Kc`*Ogz6KF(kyQoR2Ew$Gh`KxCX*^Bpy_tLDd~3ph9CeNFjG=V z1bZ}N@y)3RZe!L3S#M?h8vYAz zJKw!gB7%ry5c*mjoAisVU+=Nl+rPK^r;Q58dazn(%AZ>CN*b8@Px?h~t_SLImJM7@@K#0hP@_M9dp^)3xt?r)c=al)O>E)n z993&w+_B31x(2h-09{U@^=D48d(PXFrSuL>-el$lr#vfKGxUGmpkUV`?2kHZvJL?M zyP|a+*gn}smEs=#s$GML!1C@coDwWzYFhmD3x_}p@ zWcD2Nf6Uk`+Vh(ddiCiZqU7>yX|+0rkp`#T?jg$9i#1mMX$Y>GmCs7agij(tD-=@6hLoE{w|tgX zDMp_t3H)DNE~5u=x2(^*!Afe4_^mg&V%hHDX^m}uHzy01@ z>thaK3Y^6nJIugY8_}n8@02B%BN673r{$Nv<#^NGDK~Ss^*@DN4(s}9BV- z%_kp3@pvEAOVonz?Hi-1`Z8Mh!D)&rj|Ckjq z51h*ntWBCuKkpEPM>&@ltql%_4=y~GwQ0Y}rMD8K_}3yF!IFn(k$<3uToKT6b>gG# zQZS)eiXJ(9UOE((y*Jfr!(T8*L3f*~C}zfvs?#s5o4cn9r-j5;BK366?S z89ko=l1qpGA{Oq$CnJ7R`2|r)I1U|yil08okhyRwzSl;jI{TQ-Qf6!PakWkMpbrSc zYIVclcs)X~)M&vHXs2O|7D}isN0>;XT4<)I^P9NyKI4%S7>cuX?EMrPMX(_5;Ikr% z1wyNVM0LNGW1b(4zu|djPeIN z+ROz)X`c!%Q8W)jv$+|O-}&2fD zT-G+u%-cw-3x=x@{dNm?DVdP;NpZ?#a1>$JNq%(x>1|-BZoofUbR*6l0KdYb5XnFK z_4d8nPf>oZD1d#?H}PFQ$DL8umqP8U!wg;JR529eq;mcppsu%*u?o#gLxWGP1wzniv*EG8>8hlDP)k={vm3&soMd;5wa5T~*3cR! z4{Irvz5I+|lo7a@GA2Z_a%I;t%O!c_5dTRoH8K0)ojg$4r%szn9yI#;)-0|7PR7I5nubo`J=G|x3>)dojUAbboI?DiSI zPrhLDp->I%^D7UBnt8;@V4%YnJkeJZ(hwkm9BGL{D!G7lpOS* zQ?S*C?j~`fP5_aF@btH(o*Uf?d-@8baEjjJOO71tu~)UXwI@zg_qk(LUld(*KP}I= zm$#tj&$-7UdIIg39ABzT64M7FwQW?rXXla=+=Rc6GuFoe@&M$~4uojb=rnvWFt3A4gbh6ftk|(Se}r zyyufaa(nvAI*l!Xe_fd0BVq_DXnR6Rj^|4T2#W7U&&3@$>__8VgHuOn-4>|P&>R}U zyIA>>6FloJ8`YH9k-{BC)Kbcq%DJfqQsI#E7p1$bBp>8pHk13oBDS$GJjq<5ge;#; z@oQjCy7SM>ZI)_wf8i2Vta$ieY6j&0pPJ!A|0*mrbU(?fd1KO?tC3KOlqnlmTdvO1 zn-%$4YNTVj&X8H@S^9e_zA$Qfs*I5XLVg}jp(y?9LMvnhzx`*nyYJ+Z!?5=B(hpY8Vk()u5wGk4;F4CS}=;i3w-A@8}OSk?{^VTP3kX|Z+I0*uBL-@^(0PkNBWqgE~d8Sds} zJ!or@Qi2{`9?^`<_18M1dzjQT4HM+7? zKEh8=xyIKVU_fO@u!i4KoLs_nsLQFiJLsl5Uq(|-hcc8##Aj) zt}NF2PGH4~-I&R_=7g|-r)-Gn0wg%zE?ckh?x!Ion`{w>9Aj0l(JM#d!N4XK+O0Ld zIhuQ;1pAM}|0MSbN_^gmnWA(!7~MeT^GAt;R-gIt#bb0*na`vnm+MYKQBGv{4-cg= zu5VSs7`}FWg|%eMFzRcjwqTo+EMsPp)B1=Mr2^pjsJdg2&SpbXzFe1Peo+L<%ZRHb zn%G%wb0Q=Za1-tiU1>N*eJ}!M_m?9L`v08^e}k|?`YGrF!cNQz^9*lc_fnVj6OtzN z_6^j41Do`Q+zEnz#^*OSe#Px&Ze|oPR9ntUX3AV1N*ePiF2>rWBvh;TwihSX<*#N$ z&KuI0Xo!s=exMMzFQmL%<3VeCVW;Ky=dn{AoJ=#d5e`OW zCb*~b3o*$Jx?$XdaNgwjcl(RBC^>k!PE_O&K*xEdV_NP`!z6jy5=k)C@zQ{!xf1AG zA5<%hlATZrI($_B+&N6%#HtdE2M~B}i^4wEh&x$w#x z;R%$V>{&kj_ZER@F}C)QXe756-Z|!XbJX4O2kkc7AmlOkS8U79*?SN~-$=b~!o*o0 zKpB{LJapcVql$syrqoPD8dU!qD;Y8J@>A7mw66HKbCFB;YrJ~#8tkwM=jjW`%@E=i z!LsCEB+{#8hspIINob5WB7jdm1o>>o zFJihf+QL>`QRM%@i%&k#%o-kg6Ie_2R(z*R9a%DB1e+1=(g~9b`6fPi&kM&&r7}Wn z-xGbPE3PA(Y`-t?PG*o!Hjz8%_k{{Mny#~Jw7T1|M>i0*C_)g8?il=behV)( zIpbPsG-fzkK52`jna)V&aeQZ~no_GyW%3Da2Ct+(I(<7tMHyC#KN%??p(O`mKei=@HUGtFYUiQX;bO`1E) zrj$OLR*Z7A-nDczq!4m-r`eS32;L+z^kAqZIV)<(b_QU1dPHfB~XQ=dP1r4{>60V^r{RIsm z%ZVb=mz;^K&WC3_l_?frFS_b?%O|m3$t#TMO8=qH9r`lH4(yvNLBTWzp~DWYP0sT) z2qrE1wha@(!Aa_q4^wslki+Qgy^)+~Hb(73EtyX67U-o|M%y1NU%Ju?KXN$a+;MQ6 zE7gU*i4|jYIpIx@+ZX+TGKTSv{Z&liTMqk!RmhuILc&;CnKT=P3m*4Utln1)61GbODeRcw52EdLlTF*gqgg}jARdAR>XuWfy7rqL&BpgE83*qcCRuUu82ZdZ-~<{v|8Et?~|k2h0#yLxqc z`LuJcJ8%j!+~NPldE%eiETTJ%r4$?h!x(2m#BSYV^^LYN@8n8#VD&hU zMewfwrK-0Pgv)emrD|@!We9Kl>CU{Id-@$;B393h=DS&jVXE}+0HKior3iac{c^wL zr<`-}i10<1HyGu7o&Ky$V6PFip%oqq2XJObH2L?^_M4IxT!Ag|VLeP;*cBM>AF7S= zdH1T@^00Z_PKe8xNh@VLRAVc#hlV^R;~Pcunl5@x?LxuQr%DZ56-c{nRt^R^M+>+; z=D?H)#^OP3f)8S>5vK*buNl@Um4Je>*yc=zCO;tkH^^p% zdN)cboW#&zqYfH+=JVk{-{03Y`?E3Wt?hnuU4k*!m(Q~wF_0a_`7M2};=AVK>DA88 z&Tm@kVMOAB!a~U|%iz6ATTxwkh?dAv8d-fMVfn2~{@pt1t}_t#7wqfW82Ty*XA)!2 z1OI`tqT%-cwfC0MRWwh(D1<;rh>(Op+>I!47ltL^lS|!9bX}k)^UBLF;z4&+NE|zx@2DEP5D~))_ukJ zLm=Zjh_;zm2F$-~{3*~IiyI@+1_q9)AloFo$7dAB45faP{`b-~!*LhL7z`#R#?$Fh zZotMJ2@XC-#MnG0(pGsPSSG+LjVdAYS0EH;CLRccVxL`Gkw65#N=7yFw6Ie#K0i`9 zeu{+phH5hCW??>SoB23hBZWFRh#%qzNX>lYGg$p8_7acz{mrz-2VEr$xX#U-oM_gb z9>1tw`RoLR8ewxsb3v1$P~g)py9`E7B`u;J8ww6+gi3|@*EiRDlUDFA1WIV+KQc1L zO&@1e|1h|?mKvWa?><8mk_^)yeYj@pr6azyvOt5D=|C})gpLePEOb~N^V zRc0oTaeiKYYODbjE1Frl|FDcEk!dWw29+|jcVuI(@uri&NdQIX@H)K`n6qlkVNe(=9VDMe7YZdq-k*@_Bn z3;`#lSb&A`BP-m043{iq`}R9+85zH?X?+6Dt7}?hC35xFE5i18Tk?k%s%5zF&Y6J7Pc*5{Ia6 zUj5nZ0`F@uL3~O$@J4$1cMgEAs_C9gle6sz)4$)Z11y{|QzYphXUH$>lL*~xZo5?$ z(D?87r-K0Y)TQ!%{=4S_ZVtZ!G_fVCp2Phf@vrwy0rKbIh+zK2aGyFf*N0d}XfyJiod z=NfWzYjBvE9ci2%XGbO_$>A|fAQ>>m_UcJ^3jpRq`mf};+_A&H0{e_?gU6TKL_CYK zLPA2aTDJEL>CqdUo$uL2Gb63bW)k#h&PH{FnUvSdK6)}Nw2B1ud?l8a>Z}0Cvm>u* zHy|Ugxv+N=v~8~;A03$Zo}HijX9fn6&j*t}91>QYIm2_b?d9YH^5YNCn`f@6{~!5* zsHqxIi2sc?Y@2yo5Rs6ekmKyt>lc*=nFZxDewiy-mZW~M%h2H2E1TYIi9$;18}oTX zi1YTQ=Na9MmYMyDMH*jpm8E)uocx^mJaunpWCJD#p09w82a@W;~@a*&q`&DY%uW39Ou!!`9a9 zjLX05v=c61r_nIx^X%Qw61^j(07Vel@e`=}XJHkl^ssGgd_XAs%ubr}HKj!VsLo~9 zcHdOTGj{reBJn|#k|y!4=4WhF(}p!yG>{RjHhQ)D$ai zoxI-5x(P_gr0NlN21*mcroq~_aHGPc=(yZw3oblB%o&l6PcOx2>@le0C^MrL)zs8a z(@9UNtZhCTUUk8~GCJT`oEY+$bX6A@9}>6&GQ79ew&br5Tf~3b?95E%i76l-I8g)$ zF8{xxJe-;F9p8neCfWfz!^w+Rz|uA7Mt1W-|M-;Fd3WQ0!FZd1+R^TCF@)s%IFZ{U z?M)RV!#DSML_uNUls8*qBbhdRqwk$BJ{u|J>WV2T&5q5BI{dVi^;p@zqwC^o@FrpB zfHSK!U7ES|SQ&~jTxiTYQ{Et^oUXG%rc5~d+?s?HFFiI!X{JaKJyZiKbRMW(jRjuL zFY`v+g6P3u))}tt6rC$~Icq3#$%C3AGuL;92J9J|M}VaMPJz641|~g#{@f$%zJ<;YH||&z&5JmnM&`pY+6#2XDuGy10t5(>5S;WYl0YF}W{YEIDXlN+ zi$wS#3&lM&kk$+dVJyQmBqImHQDJ&Car*trP~e$^K`K_6(bwIcIVzx__$tG#jSYs1 z^a_Ln3up-0nkW(hu3NFT*d=54k^N<9Vgw?(52mDN<03fp$0sLq0~3CI#O-j<-3HG9 zpFANh=fL_3OQ1lUi_R*qE0V?Pr-Gk`@CVbi(NyZ}0+?HLO{yhWklZN-kj^t#*EC7p z6EQByF|}bwRwIrf4aP*xrv1If;P$|i@< zCXX)$V8QqKNya2q!~ERaa9n!WxUK?${RC(19s7OlDO;`EG~GGinu1)?7f6JQRGxiZ zd{hQ?%jr{mbY=j2Dz}4R_;C4FWmXKlc;Ap^*a!dIn0`-{dRH3)h=G93Hl0JQ5BvR` zey)AtwxEWvN3tn8+X-D1JP)MexY>$r5DMJX$8Ja*Okx!~i`h>@3~3O%!0^&;P2m}Y zgrN+A)Hy9skwyq{ZQH9ufpU(k-2Rk6f|;-T9%%py0>T0DHE1@!KwV8;Bc2nocF8&G zPH%E7i*Y=S1X!#bM{P%Hf%J$#+$|#7(fyXkOqYtdQ;e{|J!WGdnp~Xp%sIJ0UrspW z7tVxVX!H3}a=zL4VVHacrd(Q%5+%T z`Z=U9=kuCL!cw3Jizeg{mx0`=a^QPbk7lF?I=^Ig*YZ4mZ3_t6TiNSL@;1sC>O{6q zBB~1Y9MYV0VDqOUHTeBaB@Z#g;^Xibs-bO*l@DVdkN=KwX zfmj}aeAR}ZtKX-v8HEDPeb=C0Z;3CuQ0Wfj9?|ti6*148Fr>ZGf7`sytuJ90!Aj)l z{S}hJA9s~sl2j81SXkUY2+X5Kpjfmemcl8$e9pEdx~#Q6(?K~q&+BAolJ>cZF}*7( ziQDBBERmVn-Tszn|7Af=PEPsm*7bSEI8*o3&=OO<4ascNEdrWJXCB}|jh-g3co{V< zEq#hXjJI`q<}#@i;){U}P4T(;0kRa|bo_HW+-;eS50ZX=1WP)v&!t&bQKooGk$$5H zfRl>6Id=;slL`Mc6uSD>x6v|Ij#8N$IM<{r*Uwb%+fRB4M>-J?$pC z%5tTX3B|0`8i$lFl-l89*+~(ZJ~>DbcoMw^@`c`8c!EvsPYlvN=wo0G&b*4Rv9J7m z+x+?C%>M=rQmA#=sPORzw{gGhY`rVdeM_xV*Q;IEKJK{l6Cgh?U$g~kRk;c;We)`T zFDs2Mg9HtnL>l#pwD<7F+!`ZEcIzuaY_6Y9KB8ob;hSolrtrTMd25dOJa*vmCl7}d zaKz(eqNFq_Uo#@J?4e~Q&^&Wi)O~%3pQj@efguc&_f<_qcS6KkbRbVY>okXC=(ch{ zJ|R*NV~PqquqQWsSpdp*Z8JAoC?v1g84hYyw!t@hfsbYMRrlF{4-`^6t1!ioV(kKC z_;mVO?cA)57E}RjL^Jdr$;Ysjk?DmtKx8^R!r=Udobi<@e!xeV1<9HV0YYp4;*d=t zrzbKEwoP?za_l@rJxiJdL3+VaG)L&nc06BKAa^Q-Wg>qeMdl}eK+);USl7oV@JG2B z1{O;-UA#a%{DNWocFAr_YnMRM3%rb|XRuqNj+Zi%twALk>F0eT)#l!WEPu1v1Il% z*a0r4`!8X}#%j!NaSNPC*+v&}G8Ia%W#1I$+-1D*?3Kp76@@C1r1Y-%DbZJ%avIf* z2(AEpotGTJL4JA-!dj?ZrM;Ea(;%a{jexD4(+t6Ip@T-(=Q0H4@LtC}+=2x+<0qNQ zI}e&eQi;W#I`mpn$+n9n{qXuWSXG|=yeR{3f2nxDWr*Kb2dxO;xa4oywNLdYS$ z)vnT9ix#%RAte}(nMS>j4;pQvR!{k{?G{;r4kVk5*`pB1(PdhPhxpBd~SAxp#7o> z_$V{@SXhZz{Q5T|RbG{#b{jw2qK#x>)1z8b3N)dqrwcMg!(v2t(R}^kCHrLw24Ku> zdHlZpJ8{3}3`?lHF%8wb&fL9H&Uk~}-#ya^l zkzBpRKW<^)lmKOCXNSG)mJk5QN&~*+(;(=$4>$rQB3}cuK=O~7w7QkoW&6wmK&s`qoOj=YZ+ zDq;`oz$`(IDQ|*YTt9Kp_y*uNJ)XW;7|Vioxw!tGwr^VH!ujKqw`tu+ zg`Gh0PQ4fmoDUHVs*O!IA?w3WzAol)ceN`L(9zmpEf-5TL$56*u!&XObB^+#6qd^a zQf7fKA#>b}E7M<(b|%+bBLJX$*Q>9YufazCNwN^d1z zn65w&;)?$_#=@ght<>N_CTbd&hO<(Tk8!m+1Kb&yg;BSlmVSTG5b>3fwOC)owgfQz z(%=E#jJP03a@=JJ2*<_b{Du&=+lO_DW72t|*UhqRL}JoAM(H}H!WsnBk!v7$tn1QV z_URfYb%r{nZ{q^$HD77bPv;&RW zl8<#bnU{%}Yf%YsGYDm}y)$B}reG&*=WxOq?`{_(Ad&O5e z|5M~jTxB#59_}#?Alr|gkQfP~Df|Yiy3L9D+Uq+4rr@Ni1yPe(J1`zXQ|Ll@vfEFo zmy0oQLVnQiaAH{EZce2=4Z0?mWb-9xlr|Wa%fO~thGp{Pp4&hwp>gDa#37DkSe9(F zt5Dg^`f$%i5cJ$)ZRmJe0@r6ciAUH?liRH3&vT<<8RefEOge@t#@tGh!YkecP6}U z&W5VZ-Pf7`jT8GVv6@Xf^=|0w6VeP4$T}cx$^MWpc%R!laKuHkk5K&F_2)9YQJ*0I zUQ-MRwq{*5xWx0iNc}|hgs{npP)`9I(s(Dy2%EM=Y9~RC$BMO^>9d#p*__ruy6;6Z zG3Le*|DZ!uE`tnPd*b#^n9I1MYA|vg0Pdd{y2^YIWU^`UJiSrwJA5X`o$?*2c=Mrr zHLK?QFg_$FNl@vKE-QF+e`s-2VNia%W{Oh|Ve{oea`?2%SEOuTgR7I)!>*bmalV?# zH?>@S^)NL{s?2Ww(au+1wd$D}8M%xrTyMJro~baL0!V!^A_>M~Opc4uTch?brnM0c z=`zlyKClLSmf&b1trv%AIsCF2sZYQgRhG_=FC<`dVNmi$S0}o*yVQe6kGs_5y|?#w zrf&Co)2pc88>c|wI6OsmD1@-c!Ee*RmInuS0_UeZxrkt(NM%$BLAx$M%mZ zC8S219$IN>Yjj3F^ctv@Zhp_L`ham8{*IeJ*eW315e4xD#T6kF%@5s3xyrt7E6H2w zFt`??ro@!wX@%$?xwJ+AH=oD-T&W^sC>&>UnAI|xC=8(G*0sK9qmCZ?s+SU!WON_p zau(ib7nE}JXy?oMcw={Qv2s5*aT>GnCTfI&prF-xW>$JPmAkS}u$(v?b>-md!|4gA zT-1R|cU>Ty(6`Q;{k3474kTBWFQRHB=Xw`kZ}=H8xUcn~&y4BpwKN8b@+G9zT%&Ff zZ5)wet3RB+P~gt24aTN1#K1Xx$WcJD`)$4N`mF4otPybyd1 zTs!RP8~%5K85zxC6=w4nrz8ZTWkfM(cK1n>IMq`>qK%Wp}%zyZ4Ixr5gov z-!h!C)!L5l03hpI+(72FkQ82|Zs!dbC=)F^-=}S8QX)oFfLEZY;Q7mI&Z=(T6hR=I zDeFzDc(ick3IrAjb)5|*(RAui`+*eZP+1g~FR=Zhz&^VAgHJK}=giNfEkF6%pP?Yy zy{0jaKPELPJBIHqjykXe$H;rE&fF9=fR%fF2D;@CldQK5L1(%@#i= z-S8KVrm(O>1fp5R$Y85JZ`t#OspR2@g;>tj@YFoqo$;Eg8>M*#Uc#OrLCBC^|8BwI z_9+~>JtP#Kc@v#-1>}>BXej?y-f(o^lX|xyxl8?}ta;hEGvBjbqF5>xOJ~VjiMKJ( zIIcW%j74%=UsWW7v1c>lh%pCsf=v6mOVVNA@7T<7?8159G(YksFsIAuTDx^kGVHDh zGrk_Wdzc)54&>(vfAUCMOkRfzXGaxD2OlaP>H!C9X%|M;jTPy|Y6GcZo9qKHWg9xZ zG(zh2LP*8ECG}YzxcJs8hGL4WM~nHU#%{cs@38a)4bl<`*iDI{D-1=r$5hz~yYed} zLA|pA|4is11*77uR^7FnC-1mn5bO*(3FL$`Ro zt){gdndk*R)0+FneeE|&(4-NHKAZe$2k}1`Zm0q)9{NJE&eiEyb(f6x;U#4ZC(~Zs zzBePW@Cily**P$)W@jLI8;bMcdd~5%r15;0?uzU~08ltB#|OF~{GAv9#p>RznbBgx zmP+H;pz0Urgk8HYUlMk$y5da-uFOP(J>9v5h$Y5vBQ~}0rhYK#pN;X>Z4qz%SWZ~7 z$;OJXQk=eL-fCo-I?Lm!6MIv~C9nB`wq}Z(JLMAuss&Zx+eQ-4suQjC8edj8^on7x zPK(Pam%|d*-VKeG@P*w>iimdBd3!p_d)MewmVrFHid}lCcwK8H!8EIM@+^HgjCG&X!YaGl1x{9yn9ew`HvkQ!zIFR#5oi<;XstAsoUrks zk_wiOGq%DXZj|w^_Tr!w{^PMz!Q;jC5sz~mU$E8P0U;xy?z#d-$rHq2}W^~)05*%ZEFm#fhH?X-dL^b4w^B+ z#sSl8kS@NXN`@c%<|12^!?Z!9=1Vu6EHCjJe_s#$(pUl!UyBI z8+%7YYjAvVh-9Qa=_Z@&oz38vjTf>0ejq-V6I3(@HGluQj^Qo@Z1h!vAO$E?AbI#g z3GLJqk09+d*R2&qAd?o|u{4;{7c)4fZ{^y@m|t<NKo!R&Y7r_Fu}VNOLI}V@Qj^{8q+F6) z9%?EDLzEAt5T04Oe&aDJ3*b+P(^dB%2=^_jp-j7`@8it?5DGWR`2KS~mY5klciFn7 zOP7>QAFJNdWb)C{j^>by&%RsI5o$ggPOFGUKS?!YU7*FpjAroBeo7RHaPbpU>dE0-6hxu1! z_RAOD5XE(F;WT9OYw~lkePkzUHA$c$RM<=T^_bhoIId9HQQ$>$4@c)JQN>o>bl0B zD!an1J+*r`@Mu};NJoMYv_|sChLidUzUFAnYa!$fIrY_5<$SVLjK>geXoS1KAVRPj z6Dllap8T7c;)^Np=itJ==u@7!>@XHkY9aKfsDOSSN|5O6Mw6Kg8LrD;A+Hf9S%PQd{ao; zSVWCytKkybutbOkaW94fzLErWr_+U`DHrypTZ=}ocPno|M?-w1v`ebO-4`)txAXgN zREt2WX1Ksv{YE@WJb7rgb6~D~ZG=x94H0v`h$twTR=gp+Z-LGUZLp@#rc-}FBV5t| zE$W>eJX%fEP-QJXGZ-hkY#Xv&6e!)n?gT)r60ZWKyj$XAm-2i{i{6{a5{&Rs53o2z zhvJw5fq#~JDNqTV{uaZ+1&9i7DtXV7LJ0wgYtSvRyf4t4&a*~d-ZsokV!t-ry?1F7 zVE=jiW^sRU`2xzvd<-<2Qg+PMT(nJ8mhc@@`FP@%OzOMNpwmi_;h+)HuFa?CeqNM? z^yDmUt3cHxS>#8ZQ?QO&By`kM{^{K7bn|k!h>wb@(nzP$tz5zegTLUEr?&m^veA!) z7?$}Mvg`5l5k4j$v|tF2G(=$c0WW4g(fu6kE(;!+{uTWan5&N9xf}m!+VLEL@}kuC z2U`#`$R#|Z8fv<)0#Cn2fw~5A-KN5KGfl=)+0ms>xZC#we?k!#rK9VpZo49^X6$Rq zu%2iYyXGco3~bekF_U&u~PYC0f53J3AVPwb5*!i9MIJTY*A*6V z6?RTua@3u+X&my3S*~?zHn3f=(g*da4ro+XBs0T9o)T!#W*oOSXDp6MJMXx9&sPF%=;Xk-4o}J9H^72fF^|M!A0jB1cFspaxK4N6{g;S+DbH_zF0P8`SYXIVxy7aay_M2@u zPO3!U(@hExrOQJ}%fjM4a4t^oWr0ekA;thYGa9tnH zx1f9X5G#1|V~fG(ZQim2S2G63HJ&cXpaGTYk+B65_*r*oA9wdQ=sO!@E)K@@xt++! z^ji3${AHNsHI+{|0+%nN(uF%!R>OdzBLP zZ~jI>4l>qCdf}NY<$SQ&84)KE4$rT2fOZBl%vXy|jxB6amEP@3K*Swc2`i0CCoL{Y zU-UC>4+p23zcr_Ua6ZV01>(F^lg9K@Es($&K@`X@;oEal{sOo@q-W&RUC+*tR!mox z3+Ghot6I1VW^E+Nyt;Y2xR^C3yctAx+JwZVuasZqhDP~TYSU@Ou)QT1WD!H-Pq1nx z-OLT-jPThK3$9*?U<2ID0i927=9h0nzkkN09py#aIVi=}dow+^q7KOWitkwgcM!9P z%nzR1s<8w#@ub!hJ?r-;?Sm| z5)68+mTke;1~U_ERC0)8_4X3ua!niNM6+O85b(X(in=Fys(^#}g}acUW^+_(tdZ(D ze;Ln>@0S2EZs!FcMk{JofBej*$pQENxK9%O^!UxExPai|W{!;&9(x%174n3wL1%%1 zqmRej&O4(|(3tZb@|mC^|LT$m0@L(vQd9tZ)ocN|jvfIz0D=v5qoWksnDWTNwkQxT ztWzoDGH+b!oVhMA=)Y-`)(t^6g34>BT$WKIpr{qV!aj& z55cZlKo0onn54j&d$_qt0pe=7g-u8 zH7or4#zj{i>IItu;w#I)GYnzHzrfMAW6#TI5I^-yN{YUW{NAMno(Xa2UcdxC!-+xK zx#X@QVkz2FAqx?-IqEuRrT5rYcexGn9@$Tj#+~=!JpC36GFwS_pCTuK)LaKCFv>Uc z8+Mbz^-tlx9d$~h@zQZB{?}6RCT8BmY50 zCiTlcZhzmE%T7w!u-0ym2!e+*E67LRyRmVwu(0e13tP7O0D+6LnyQb7Gur25B+pe}u%p9z zM^EH=8BgErjie+5k$&9sT>~P^kh4I#;xaMh+pZd&r0z=qv_imaJe%T9>(Pnb0Cioy zH{CW}YN8MGnpBxqdB0tbh`2K%km6{zak1X?LQ7ifOG2zCJ@uwUs8zKhTf#KGOrj9? z6!YMx1gpE57W2X9FY=ydLBbiB!+@dT5u){^I(PpAjSTZthgS9i@Fd6pvPE%Sk{STm#Ula7 z#2XUE(7Iuj@u{Y-AaDz?0hy2@0gC2H0qM%fJ9uR)Qr_1i!sTm zJ$d_N3DvpW7D&xenF08fa7oEXyEk=52XEd`41m1iO9~P6_h*l|99oi0q8WMod3*Cx z<>Hbtfl;|1N;(`#=bhr05Z}(7Nevkt1c+Mn!NS{W1B7wX7k{8~DYy_?yrzh{YqVSY zOh?<~F`zQ=+31rO zWS63!K=)~&{(;_MAHc5`B{hqdT-Sg~nwa5NN3@^_+`3cPb8QM?-jiX!dDh4&`o zJyktG9h6&X9sI57&mg{X2a3_~^F?(N0-uv3;iSq$0(FRd7?fLfxl;hJd_wC|KZLy- zpP+obo`c0cu`k;{19qG3HHAMAQTzp5d<&ET;QIL`8Up%tHT`o!Zi}5UpU%H2Ge8(@ z`$>Y-(j^i8jg}Na1}1Yf3nA#=1+|6)0Mp$d2*&>1@~`$|o}eEY;yCDk6!(etln+m^ zk}!UbzXJhos6>H|M3On_{{f}_gTUsd1~lUWKgjWK%o4a!i32){;dC)z|2Iwl4>0*J z*eft3)Ilnjn_>HZ)6@W9JtqU`cmJkfJmDe@fsRyf&QnkRO_L+w+sOZ5IQ|EC{`7y# zzn4CqwcjZ5UzDEX1CUed(d=IpMt&39obhC9|9@cUAaQ6j9O>FVe~K$G?mEQKJ;g%M z0;G4DX|^QdFP^zaKqsu-?6K6qWdrPPc7t2oF#4UawX6ftWO09=F#qbw z=xT65SaS5)6vP8CS)5&1Dq;Gt39dW^1U3IJPoqttN9uM!h`ddk$Ek42ptJAa6i(JI z*a+;EBms0^6}sDoVyaAKsumhn{}$ZX%r~hyVEAcErOGXc6!A$9g|R&nt~eBn+Q_a1 z^m{c6o$>ZCCld~=^x`F$fCerg*KZR0$bL7mwF;i(7cbx7r#h9BW65cik52nlkB3wg zvfL@z7ANv{m0#{aEr;?#@6=e%tLdyXdNB&j2~+~mLJr-ND^#!XF_^L{c|&RR95B1W zxy$$EIL{Pa`))`r0DaxNrRxn=Wf4xN%+&c8b)>mz1 z4XWVEt4x$neDK6G`ISxK4t{O$s*IvRVS|bPTw1P%y7ykPj>PCnY%#sAJLDiDpH2Zs z^WM9Hm^@J5``L33Bth;kgNfQve(p@AySqs(7rJj?w~XI)D23s;ms3RHc%9_GQFPOD z`F24MBFl5$>c@Fgi2W-G6P_RiZIHH?qKVo@flEK?cF|t02FmD4cxKZ$3`c!I{erb_ zEE@-q1X~D8ZO?Zh?(=sWUEH^y-@cgXXZ?O5I?1JwRaU6O(StrjCWYimPpkax@k#ts zenPezy#Lsz6mkJSIg-y+(l|hN1)SLZSMnF=EO2z5*!X-3?+N_BC2V?^cB+lb1w%WE zPsWA62Gl5A?7Be#!O7Lb=)8Vx7Sz9q z02FkFb4!HIe*@|>@(eK-YO4ZNr|gn{`)ciYFYK)nI;=!|)i3a89EEAB@?Ea#uF z&$o#Z*LZJ70`(7kcsTp^EeGr;`N0(#m@j_W4|$T5g%=E9g+RTUM?a%iW%;0`GqPWe zsyx}l+eGRE0UqITSIl)E9@x!#Sp6A<&N7Lo>91x5WS(o_(AE=|i+-tEGbOaa)#n53A7^FxJWsmUtovkU`g6uy zN$GhM=~6NnXI}jx&5kGqt&1gQwzlpP?H&MulM+1{|4Uxbt8#77=be8WI(QOyQUrK` zK$xcn|NQ1XfC1Ps2W-Fix1DW!>Wx|JZ>H8Sn+X4O9B5PI#rgX(aDyoZSoAhTFvP!& zuN{ERpTQc$zX43?-`}!1V12QIQBeT4{QXtG0A~Bp{QU^jzu)(HT6)aev;PqJ)L}Cg zz%W~%GT%Q%uYR?{oB)vTL%{#?Km7nmd2WCvwgkTO=l>Zeck{{=3~?V`AJOH%yw41f|37D`_2NtTXV2ct2=RWF zVGek_Z0XL^eYjW1XK;#^&G%mZQ-cX;jTHI`B1`#RL`6y}t_iVAB@Kg)o}JuBPs_k+;G9~bsvXX614oX`(0^eQM8ZJ& z8rS_91aJY;&=SNFI&iw(4pX~swl=O$S8b7eW``?rD!T);lVEC83P*fq<|LeSS3;3;fuJSWQblmyxayP}g7 zq(Juy$?`_$6ZVSQf&5%>b9x7}#uu62j`-$`Qs#`=mkbmjl-1jibjY)e&WF24eoC|< zU0wb&5cYi)*7x+*d9H`!$DqKm>9mvLPF-|uhWu%T8v~X65+%=XzeWLE+C`g@C9BZ27#0Q2cR*B4qV0cjqnfi?YZ z(9kz^w2LxIMAuM*ju;9jNbl;5o5UyWdHy|)lut9h%AUH0-A7k8KR#_tVBNpEdaZLb zryR|LB-N!CXI5QNa1_8m9&Pkxi3u<9&J`@atrw42Sg6U=4>y^eHp1J6q@=v7LGwl1 zpI;ODgY%5~v7wDHSr^KFajXV8V}x-}CQ-{|7E#(5eXo>}5z$^0qRpww*u}fLK1u{P$quC6p?U+WIh}fX3;_ZczgX4PfaK}_aRMToe zM$rb;>6~Gcmymjzk3_kPvTd-wM=( zboE+-yUxdgXVNt3S_>E`P8vlBRKW&*+Xy1zl&RE8#Cr%BYdq+TW5!)d_>Er(qDLjj z=T0MAt5uqCjM>e0UH7g&2iY-DXi_J@RW(tp(Hl=*lLmbuVS|EsQivycGXDvko|RQ~ z;n+~5pRRwmY+FMaR^CpcxpHEEN;{foJUN62pr{A1-g7iA&r>|7q_{s1NGzgcD(1Bf z^d4BAWmM+WguZxuf0qPK{tRceo1>|l?L8qnVPC%NS&%BJD zVx$~RMfts!ixOII$flwd8cL>=a4vj%i5z=DVdKhnn=#(6G6TuhnZ2*2^UUA&yv+N`9sU64 zRS-Q)i~(kGeDWW=@eD#BjeDg1T9EY-&9Rg*@2)${dNhIj4+q`3uV3!&q+af8Dfc-M zMqN6V>SGC!^w;43wmPRU({6UsNTeEj*`6!rLo(tM4x)&K zY1J`Z7xFFlv**ACHyFa{>JC4isW3`$Y^mGLt&?{kk>_d$=Tgjvpfr@3WV6(%ezDv^ zG_H!-ZhLKQbdP}67Kmk;2nV67FvraK)a{i2fPo%h&rovqXtStdv~Tc5X~A4hbA`m%d~=HWrQZBlDi>ipMQ;EVagKiX5~u zqa#!?pr%G9?f9jmmw!pM(ho&MB)s)Dx@0O}+AQPE`F?j_Q_~0TX5j=GFa+iib^4 ztcd+v!Gtwuul*e~TY}|OdeCDy#`NtgL-TzxxpwyYA z)q02i!>Gm0c*z05%5hL_yS5%5EHSoAsSE+OKi{nR9(lu(dObmdFpATy^*#~kN~{Ka zc5QifaG!owaMii^OyIY9c?KawT6|abl6E;M*dbM-SoIUCO2T&@3LC*fDUbvnv4_Mb zg~Rk2R`iFeMoYQ8Nx}h|9BH(t$p_dMu9~v(s6ufCZ;#)0a5*jN6=gZ`!p{ono^q~q z2c%eGoO+IP_Z2C~KlN{KCy3RlBGiWS>-}*zkfo+4h9fg_f(eNioCaoRT=vRTb-TnY zxsC&#liDPIghT@ErHHLNT&?T5xy?6tcte-8d}yZFxGA?LH@7cjA<<{Z4GWL>aS229aC z=WDdOKYDNi5$#z?S`hEcOXzfDI#aG`Qio<1QfD0nxrG2za=@uNq7_U_0_#idby#KNr}tdKe`1FP8QJY6=Q8~*I=bl;kTB= z>C)dVz%|J*s(MjkE1ihV1xiSKZ;_tpav>-&|t0LK) zxFVBQPXj3{+dOax2jOwq7Q6-+tLjeqS?VUd#Oy@f{t9O6O8`2=dEGW*BQ7W|ZWez$ z=x@!vssg!YeqFl_RG3COT2hy!MG zH$n739tnPJruZLqYDV;k5HsAobd-)~9aa2HLO3X9#5NwoQMr-5TYk|o^QRR zn6jvuKXC~*`?5q@UHXgYfq+DN-<@%<{c`ohkomFX8<$^8(&I-jnGiOy<5^r10Zy?RXU(>kw&Q zzo<2*bsrW{CSK}#t(Ys9N5x?DfH&Ee6Y}(M)HAw4AsZG{SU|`nXdtR=jED}Dr>Nh@ zl;={Zni+f~k8zRO{5+7rsJ4iD8>!H0tb6Dbk#Q!Q)iWIy5?upF_GAZp$eIJKO4ZOh zy?`cV+_|%;>_i4;1w@!J!!Fbd-Z~*0KB`T1r=zJ0FF1BZv8D;F33O+|f!0s+>IC)l z7z{`AGv+7s4bIEPJ#&5yp$6bXxH_)456~X-M{}1LwbAYMu{70J5Uw>VrJbjJg$Jc8 zd$J?#EYwT_{2aF_`3kk%=dx9nH;P+3ATzUw_giX&Z9W%K2VAZnI4)08uLMJ!up{#q z*mQTY&IrN6vMfax8zU}nwuVAk%7}Y_dLA3&dX(!SCbNAuiB7BzapzA%i4<{fuTe3^ z{0`h>K0t7pJ*r*q&!BLObczlJx(3^8x!0c^$=k@)29KMWf9rR(`(n+~Ifvs+ps2`3 zgbK^Mr+e_Acz4}sFxns-Vk`u=>G;g3}F4=fHh98+#0WI%_x7I@8B~BMc*zGHZTYnZk*+a2s_67y*eL`T?B250!*7 zdg%w#Rm2SxEzL#Hb|t;Pls){bdpk=H)$6v;Pr@jrp?-;N;l#c=Muf)bv~OF-5GHnS zA{YXnj4Q$!OUZavZ?UjzunJ-`Q4~<@ohg{@F7E+vOh~rTYq|<4CEEGT&lfb&n=F=6 z_(($m;+rk;JSi7Z-_^y;krWlSvqtvU@|S>Ue{$hKdj>Y4)$b8QJ6^V8J;Mx-Np@nM zbs$6vyFEO_^}JmmBW_8(MI$CX~f zL=mAI=X;`_7Z}RmzM9!h(t;xa+{>r_`yqfGqfjAAUCWSOT9v8V2hkrGm`>sKG$t9q z@=&g`cd6iC;8z(BClHuKG`49PerSi_ht28I`$HivmFh`B@E`~WPxQq7D3lwtEb5gr zH~_oQY~cZX5%z8}Ybb`C!3c{IJ48NWo8tPb*)GP#KF^5GEE!58I}rl|hhu;G$xu(o zlH2XviI+O(y?rJ7<2tUxy2oLu(e)SAxw}22kT`}As{Gcc6(_VD=*yy!K2<}b^}Bp> zFZ3Q9!#kk66xuiT)&*7tvt7L6E!bH4W)FMw6(Q@peWI`CtrDFdcILOw;tE>j)u{p| z>q^@wS1oJWleUaQirI?i*^W=P9XE;IEXAaH*7k69aenMdw$D8un*lxTBt!QhKK5`n z3o~=ZaLO5K4>|cH{meSN*FHU%xX{{Jc%)$gcf9nmp8Y^C9>J%6!#kynBgKXp2TjTyxm3%IiEOT3LKcZ8&?b=8H2z3T@ zzFLXk;ogL%gPNLH-so;8h)aCJH)h>!7FwV+RSc|-T3ftg5kyN}TfXLY8F^Y@G_hOi z_P8iz1SAy!v#Ic0#FtW-EGNg1Ra!MGePQI_;v8-G{XaZ{E|wv9E>HgUbv}&GH+c^B z<;i^?sKxiiY+Zd)yOUgEl_vL@6tX#<0P+7U1k1&MA%upQ5*T&K7fq&h8&qz4-|wFlEAce)|6%7MVxG%{XlXm$6TTC$WXC6x(IvKHPVLvojY0 zZ5DbCaIgMNXQ9;)$i;!eNAkm;0cO)f>@_N(jc*jFv$AMRGPC6zNT@$JFRkxLcWtta zV=eq$yMtk-F@*K&83tpbqc4Laup<6>mR01wcQr)sWp&Lko_Xyb&I!Ea=i;I-bz4fB zaBy%)W}Tt&D~p&Nyr>XALa5&xfja8H)tuAYa?Bw4v)bdfK#06=q>dm7?4QWU=RlgD zaAP=nD55lGrhq)}Y^`WTrE0E_uCd93%X)=8+l8Bh!o!4- - -Storage container manager provides multiple critical functions for the Ozone -cluster. SCM acts as the cluster manager, Certificate authority, Block -manager and the Replica manager. - -{{}} -SCM is in charge of creating an Ozone cluster. When an SCM is booted up via init command, SCM creates the cluster identity and root certificates needed for the SCM certificate authority. SCM manages the life cycle of a data node in the cluster. -{{}} - -{{}} -SCM's Ceritificate authority is in -charge of issuing identity certificates for each and every -service in the cluster. This certificate infrastructre makes -it easy to enable mTLS at network layer and also the block -token infrastructure depends on this certificate infrastructure. -{{}} - -{{}} -SCM is the block manager. SCM -allocates blocks and assigns them to data nodes. Clients -read and write these blocks directly. -{{}} - - -{{}} -SCM keeps track of all the block -replicas. If there is a loss of data node or a disk, SCM -detects it and instructs data nodes make copies of the -missing blocks to ensure high avialablity. -{{}} diff --git a/hadoop-hdds/docs/content/concept/Overview.md b/hadoop-hdds/docs/content/concept/Overview.md deleted file mode 100644 index 9e5746d8461..00000000000 --- a/hadoop-hdds/docs/content/concept/Overview.md +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: Overview -date: "2017-10-10" -weight: 1 -summary: Ozone's overview and components that make up Ozone. ---- - - - -Ozone is a redundant, distributed object store optimized for Big data -workloads. The primary design point of ozone is scalability, and it aims to -scale to billions of objects. - -Ozone separates namespace management and block space management; this helps -ozone to scale much better. The namespace is managed by a daemon called -[Ozone Manager ]({{< ref "OzoneManager.md" >}}) (OM), and block space is -managed by [Storage Container Manager] ({{< ref "Hdds.md" >}}) (SCM). - - -Ozone consists of volumes, buckets, and keys. -A volume is similar to a home directory in the ozone world. -Only an administrator can create it. - -Volumes are used to store buckets. -Once a volume is created users can create as many buckets as needed. -Ozone stores data as keys which live inside these buckets. - -Ozone namespace is composed of many storage volumes. -Storage volumes are also used as the basis for storage accounting. - -The block diagram shows the core components of Ozone. - -![Architecture diagram](ozoneBlockDiagram.png) - -The Ozone Manager is the name space manager, Storage Container Manager -manages the physical and data layer and Recon is the management interface for -Ozone. - - -## Different Perspectives - -![FunctionalOzone](FunctionalOzone.png) - -Any distributed system can be viewed from different perspectives. One way to -look at Ozone is to imagine it as Ozone Manager as a name space service built on - top of HDDS, a distributed block store. - -Another way to visualize Ozone is to look at the functional layers; we have a - metadata data management layer, composed of Ozone Manager and Storage - Container Manager. - -We have a data storage layer, which is basically the data nodes and they are - managed by SCM. - -The replication layer, provided by Ratis is used to replicate metadata (OM and SCM) -and also used for consistency when data is modified at the -data nodes. - -We have a management server called Recon, that talks to all other components -of Ozone and provides a unified management API and UX for Ozone. - -We have a protocol bus that allows Ozone to be extended via other -protocols. We currently only have S3 protocol support built via Protocol bus. -Protocol Bus provides a generic notion that you can implement new file system - or object store protocols that call into O3 Native protocol. - diff --git a/hadoop-hdds/docs/content/concept/OzoneBlock.png b/hadoop-hdds/docs/content/concept/OzoneBlock.png deleted file mode 100644 index 9583bd5ee78f14cf8c9b20ad3e013d0499d3d438..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4650 zcmaJ_XIN8PvxZ0n0thNiP)cYDCZPyI2sKpc1cLM~LFu4`qBLpJkzPU<2?7F25mb7S zDn(RUsG)-(y?Dbp=YID)f9`&sz1NyqGw-Zfb!JEEYOB(L*g#}tWOV9kO8TS{M|z)8 zT_k-+n_nN23JM1WEd?^N$~c-6YakgJxv`_ceXsjkns8fJXQ;KEtBpMr>+D7nBqKv$ z;iRgwy_Yo?*4fF$6ONSu|0Mw@)z8^5FxOuqUXNtJ_qBAnkggu~T;k9>P+_nvh>MF0 z;bHdxuCJu>k2;Bx0b{(p+~6>nx3@RcTMX*z;Q$knl9GZ6i^4=jAtVWir;m%5H5TIH z$@4eJ|Kcdwd)j(9x_LRex^SK2THClj_L2dE&kgowvY5 zpu({KV3Sl4=Uljshoe2o@;Sb&2;wivfARjYgMghI|F6#c?de}!lB=>H1nfVTwYwdD&%I#U{&{qF=AVy!EKn#nx&6^BPP`c$Vff ze%>O-XTrS9nHWE&f7oMrkNVn+Zq#>#Q?GDM!;j%V`!W+B`#(zWe;}?+O=KUR`k3!$ zO&`i+O%ogU{UrCZHdzQwpoxn>{11Hx4-b#h#Kgpdhoksih>#Es1OnNu;o~fJhX5xg zCO{NCL<*i;8yg!Mv$M08?&~)RV%62vao{d=$pW$ zEGuEw`0Q-+L>$iIHkp~=yqcz(+D*~ab}*JAyQ{8!dit|%6f>kzS$Lr5_)VZP5Cj6n zr=+0cGcy}Zz0CI0TZs=uikFtDKBT3whV{Qt^H(z_UwIvI!$z&}z7_7?WkkmC*R6Xp z**+Ji9DpM?Oz}R>3xl%XiD?CcDrcr=4hDs&P?j50E0kptVM{aJNuJJd4 zXkY~2*VWx*q7jA81eCgHREQGY!~tT^@M71tNa5RRJWPo+3&KvqP9#uwnve6kRSsC!u*8z5oqlhh=dcgo}!2bhy3V2*1T?WTq`Gmd8^mUn=)E$ zIq5|Xe>W$O`;wHFni^j1FaRbF*)uY->IzPY&gRMiCnjYG5L`(qyDnv3Uf$O3ZkZR4 zc#r_DtfkVnGa)%SwCOFH%Jmz$h3uJ|=dgiU5qmRG5*;*@2xgqOY)+06_;tML$H0u{ zOh`yb#v6#=qL@584ajA+hcJNogV!3u?B^^Oo>af5m~BWOr~OA>S~yq5dVzy-|Y z&-v3EhID!|odvkq+$g8X@H`aXd)X^~ha-x1AIJgW#b|f7mhzW9PVOGBl>jK_C#0vBl$hh23Pwu< zCc`oWXEl#dO>(*19v;v~57Pv-Q@mb|3!yZ`0h>t{^&yG%VPX)JJ59`)w%ignV$zsY(G(w7 z%;6lqi9)0C*H!J%r(Cajv|CKlHROEREd%N)zV?mJHb2vuX`suoJ?s5NMN6x&f6Q5( zGCEs{3gGP6j&smBybz*!yD($kXLh;Lu6u~LKS4SqJO(LmCF6AHLMQP?y<>DjBTYny z{fAB@L@jN=Fs2|#N7J6SF`&XXGJQv3V+dp~|jJvQZO_0*OewQkM zQ~GAm%RS;|+Fda{s;n@=AXQsjdNhunwk_yXTEx3U($R0HwsdBFJ?e6Sh3fDF=lI{v z2I_B&U1Qs%X~wLHu*}Mbhk*Hog?#1J*t9#&A}5c4gt44YNO$+kzA|^H#4O@M!o!QP zXSX>6)+U4YyeFfmQOwP)!&k>^pM}Itpu`mpapkGwzRWrY8glh&e1`8QM!Cbi< z9iK$USCsSR-aPPr*ZP)d*{(qyN9gQZP4N7M0IYng&d^yVSW@msNQ@Z5=Na9#Wkzu7 za+iWDPR%FzmefQ(vz;ASE%TFqWlszo=D(!+>`Xk`Z{2)^nyO`ldH}rtg>T-xr#%6A ztTgb8nqy`EPQOF8!ccJcYrO*IjpehqNwxbt$me<(LiV!l<vf%vA#W{%Jq{lskxw-I5*&l4@#&WDX_cj$g2bKX}2H&bga7qIko=pbV3J9qA zXh$L}%T5^lyia+_Vah)bELWdo7fq$CBvS%B)IQhn=gfq_yI-n}4-?Cq4kA5;+N3p} zqxP}CS(!sK-mlzTc|2cn3WzR96Fo*DxVRop)`*4CVd9)kC2}p?(vk$+TN)zPf6x=f zbmn>nSXzdw;U+IEVib?HWpwY}r07Ba342;H4!k4R#Kk4MJtn(ccNm~mU~1bkqHIIe z814EEM!8#Kz(E(;%srFu{YCL(yzoh*c#(g^CD5hLRCfA%G6g<9-}WXaT(Evrc%x}v z{lfn_tTSWzuaw7ivZu@n5DZ+X5 z88H3a^~8Lew!I~Rr_mL@-{@Tn+2De%)428ALjf(Z`Jh*zk=CA&@X(s>Vaf5D?NPVH z>O!BPkf-7IN~_Xc+cc2#V>U|@ALl6BVxC>4V^c1OP5bl4xSmQOztb3o4rlcJ&7phg zxjqV0t~TTUAS`Xn{6g88%I7;M#P_8rPY3N5sz!Z_ojPfg$MN}iqkWZ}v(`3Y85WBv z*@d@;A_CGf-c{!)o*Zi7E-+YVgkoU#|D!;GluYEF$8n967g5g7sQt5>&-2O8PQ)ZuZvf z+=0Pulh;PqWKLu+kI`!7n}BzmKJ?K`)d3CW4ms;;?bZhyqwD+3`_sk$>nwC&zXhb{Z z!WbLob&)4k$@HN8$zckkWoO=G>E-ctoaq;pwP-eReAeCK&0o=weCt%%dwIT(VRs%5 znJveUN=g+b2gM5JS#B3pHDk`7?v<6{zuOca`V3m)s0?=d?^0)t6OrPe?aD;F?srD#Mj4 z`6V~-O*=a~NH;fO(b$v164Cxbdf<{#Ms%wjxKW`F+y9G`^`UyrD=nUxAjRu{c1$-8 ze>W%K(Tr^6go8821K)-M^|=oG_Lr-r7KwwK8P4N13DK^cTIR584|y|vocv9prJrgR zI%;7!tp=HnT6O(k7#w~NPF{7z_h?J+$=zJoH%${B@4JI}NWX?Uc3tsKF)8*b_iMl} zTxXF!H$EOHP47|R!%gr>r)J-gM|hNqqhqDlpA?s(qE{Qq%xQPViW77m>C$l=*AEIl z9_>>_TRlAi6zbgA_T=7p7+-NUWA#y}j(F7?F@FVM7bhkV^=E$WxqmhTjY){$*7t!> zboy>;oPT!wz*QSX4PD?l4!BZwM|VI~RwsfQqQHb*BPvF<9rYpiT_gWR%dxRqelQxp zXGo~bp?SZ(A}v$)BnDPq>RckYHNxD#Y)q~}iic8?X0YLcO#~?e3+O~5>DK16mz>HY zHC#$grKv5HPrf*SkxtX}l@g6NpKag#Xq@+gn6nlvaP%%|b{gyRI`YQcGZ}W2}~$C(@0-9Tuu9zMEj} zRu!VjtmT+_J8tEc!7|6d&rq&03Ok|t{mI?I&qlnm%unKCJ{fk)Zg&;i{91ad#Vg&e z;#on_Gx%)1@IZZzlICTN)HnQVw_X%0J=^vgHrn4^@Y~rwm49e+*JDe}>TVbOTcmCH zbGiirXm^QQv6cWamZU3bsI|tn@Oo|hWY6L6YjGXFt3KwG@>WlCFNX4x54m5vu$Jx> zyA&txNA-ht&3K<4)-1St-NbE3GBSW+Q6kzgXM(2dg7N1JkpR9U`WG)%S09+BBAwmc zMQT7%%XZhimbOw0iyw4Dw^KI9sOF;d5~-hkcrD{cpR~p^th=~MbghQinO4aD5Gfbx z?f#NuTne=a+!!e`3I#`p@e5rabbJnN7P=C~)(r{F35*2vRd1ax8`b|rzPC#ou(-7EgZ<@=e1g+5kM65i=;U$x_z0@#_Zta2%`5S)*#FT`?6 z={}R%*c4+mw>?&S2L75YFWlnJVIKTqL=l5fAy`oAYzK`9eM^;cdTEx)V#$g?{up6Ts*qNRLY zc3$#4a(C?EGd)kmz?V2ZvPg%++`EOuGE%H>d^wz6N}H5{x&QR&Mw7CrHxaGj*Ne$Q zh?36rc0#1%1Kl=*j75>gHLJ#~iV4Wre3hP@k#!wt+T9a=pFES1D<$HVkBtT)Ta1-< z+pDn;gh2=1zk)U%=%&%7(D|OM(cRwgRw2TGl{={|DFv7 zyqwa*#nl?0Lxn}~+1$e^TyCtZtGf_9SmfyFsEIR!OrgujM3in{Ms4@LAWUk;r;+kq zX>oBw`JR(*<+*iJA^Bbg>9%;aYzz)bT38}_`ud5*-z~WK!+5}8FamJl!sJtbe@&;0 z?8Do4h2%?(Ow)ujObP$J{4O+Wa;9-sfxQCHShDp#}${vUZg Bt;zrZ diff --git a/hadoop-hdds/docs/content/concept/OzoneManager.md b/hadoop-hdds/docs/content/concept/OzoneManager.md deleted file mode 100644 index 1ebdd4951d2..00000000000 --- a/hadoop-hdds/docs/content/concept/OzoneManager.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: "Ozone Manager" -date: "2017-09-14" -weight: 2 -summary: Ozone Manager is the principal name space service of Ozone. OM manages the life cycle of volumes, buckets and Keys. ---- - - -Ozone Manager (OM) is the namespace manager for Ozone. - -This means that when you want to write some data, you ask Ozone -Manager for a block and Ozone Manager gives you a block and remembers that -information. When you want to read that file back, you need to find the -address of the block and Ozone Manager returns it you. - -Ozone Manager also allows users to organize keys under a volume and bucket. -Volumes and buckets are part of the namespace and managed by Ozone Manager. - -Each ozone volume is the root of an independent namespace under OM. -This is very different from HDFS which provides a single rooted file system. - -Ozone's namespace is a collection of volumes or is a forest instead of a -single rooted tree as in HDFS. This property makes it easy to deploy multiple -OMs for scaling. - -## Ozone Manager Metadata - -OM maintains a list of volumes, buckets, and keys. -For each user, it maintains a list of volumes. -For each volume, the list of buckets and for each bucket the list of keys. - -Ozone Manager will use Apache Ratis(A Raft protocol implementation) to -replicate Ozone Manager state. This will ensure High Availability for Ozone. - - -## Ozone Manager and Storage Container Manager - -The relationship between Ozone Manager and Storage Container Manager is best -understood if we trace what happens during a key write and key read. - -### Key Write - -* To write a key to Ozone, a client tells Ozone manager that it would like to -write a key into a bucket that lives inside a specific volume. Once Ozone -Manager determines that you are allowed to write a key to the specified bucket, -OM needs to allocate a block for the client to write data. - -* To allocate a block, Ozone Manager sends a request to Storage Container -Manager (SCM); SCM is the manager of data nodes. SCM picks three data nodes -into which client can write data. SCM allocates the block and returns the -block ID to Ozone Manager. - -* Ozone manager records this block information in its metadata and returns the -block and a block token (a security permission to write data to the block) -to the client. - -* The client uses the block token to prove that it is allowed to write data to -the block and writes data to the data node. - -* Once the write is complete on the data node, the client will update the block -information on -Ozone manager. - - -### Key Reads - -* Key reads are simpler, the client requests the block list from the Ozone -Manager -* Ozone manager will return the block list and block tokens which -allows the client to read the data from data nodes. -* Client connects to the data node and presents the block token and reads -the data from the data node. diff --git a/hadoop-hdds/docs/content/concept/_index.md b/hadoop-hdds/docs/content/concept/_index.md deleted file mode 100644 index 8f0aeb07c96..00000000000 --- a/hadoop-hdds/docs/content/concept/_index.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Concepts -date: "2017-10-10" -menu: main -weight: 6 - ---- - - - -{{}} - -Ozone's architectural elements are explained in the following pages. The -metadata layer, data layer, protocol bus, replication layer and Recon are -discussed in the following pages. These concepts are useful if you want to -understand how ozone works in depth. - -{{}} diff --git a/hadoop-hdds/docs/content/concept/ozoneBlockDiagram.png b/hadoop-hdds/docs/content/concept/ozoneBlockDiagram.png deleted file mode 100644 index 7fb738f68a9eb50f359d48f8e143b6e3bde998ec..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 45218 zcmZ_0bx@qm(mo6uT!IDnV8I=NTW}5TixVKYy9al765N9Yw?L5KvJl*Lab5gd-gBNO z=l91~HC0=6bMJKb)zdvaR}UMZq9l!uLWBYX1A{Ir^Fb8`2G9iq13QU?0DWSLF{uOn zfU^`+6oY}OjYEAfhKGR>g^~RrrtSfIn1kGozD+zp?%s6bU6-u><68rrB%^Mr{}2lU zgDt}wtB+sdQ!ugRXqXx3-$Y{0-{c$$q_wR+UwG%Vj0zn)jdkp13+yhfI*W|wWVo+* zrkw~_?qs;<-jHIUAjiY}{}0hmSnn`(LB_T+0dOya;uG=*n;}MNDJeYE1)}e3)N8K* z5fKq#Tp7#dL&7r0mDHaeazjI4sYL-V1M@v#eSO{b+uH(iWd*rUD&IWHAmX7YC2%b6 zJ0AdJR1O<`wWm0A${#|)!batkl*p2kley;7iVDxb@|g-z|0gm4PLNIoYNmzmJcl{B zb76NmD5l1icJkxiLlIPU5&=JTzx1<4G!Jk~NMB#b&;s$2Mj16E2rUK(R52s^uO@4x zfvS^}?8D^kP0qI~%3WkSY{mKR(wscVTR4DwjKh@Nj<#&W21p$IS;X0B!$ON6r}!ps zUbbK*?f;<@;EePM<0KldsnPCyh1m3Mjv7O12wb+T=rY+>syERu+m#>2->YwiZRtsE zAEYEDK^nlg>H}1ezVBBC>rhiCJ!7&HGyoPBmSY0eNACuMlF-o57$q4Q?UXs1&D+#D z=C!Ve9FNZ1njZ@D`*0>ECU#8f-v=@LAKF`uYQO!;G1O0ugCh|$I{KpFG~hdztt1m( zQw+b5mnF^zg2Y6l#gUNVVXf2-NQ_G@IjIewT5DL#tpAjBQqv*>Qvhmf>C6{nM>$Nd z8@4cn$-a$qN0?rTm6YJIvtvXKT`*(hBXY=x<=6*S@?SVnz~+n^`#kR=G=d>(1xg)= z0`Wvq9{_KKa&5^4j&|wCO9~5p;)%(W9@ZtLq@~BJfo<#Ny(>ic&PvwZ^xTNr`)6rJv}@5f??zn=HO;VD?S%RYMi$cjO4)u#fq9gE|=yw*x8{T zzuUwYPIGV-jk6B~C>@0Fd+jfS*H3cbw*o9USrJoj0vuCSO-4f;tgIf`zM&&%0gqg zyXECgetaBxss9Vs|As6I+^2^x;*52#{~I~~_m~VT{Pdk>xYU$vqB&^6yv}l*gS zd!}Yu1qF{O8N6`71gJA#l$!6fsZf8X_i;?*M0~x0r=|b*e`7c(x_f`Rgqr2arBxMd zw3&ZvQqgr7;v(9N;EeXCcJF zP;Q&yg)2t$(^J@~0xjbF;Uh!$krx{I;d zDig*SiUi3|lRBIj9Brit+sjYB{WTRTVD&05`H=UNkk9Edo*vImQ_H}w1I^c_Qk@An zA7bDQ{^fKC`IvFFtXigVu~8o>vOml|^&Deacw1GqRp3sW)0p>?=D*6r%tckyeEQ4~ z4jv3)l^WJNxk}oQjFP`cBy-dMCDe#RqUC@$NCmi^f-E<t@_I>N3o`SwU6ayOJf1P1M^g;l0MrdMX|^{V{tsF z7?FqylRXtJIUJ2zkUq<$FqnEd?XqSj&E7}3tV|egyJpSGv~X;1pCf1uQ|06)LPFl} z-ybCH{9#_?8SGUSrl4BA*I$!6>)Go8yt@Bzh|C>LEZeoak|>8ehDAWj#+1q3B0+Y1 zS;(Y&UV=t+S68;WrX~H!ev#hldC=fs>Z3}397J>0CYNyAp!zesr|T(dozFJ5XX)dn zlg)H$G0BNdXBSu^KxpJ}O(YI^EpVr&der>nX?ZH&kjl!{lGtFC3h4iw;d6Ba$M*T7{*&Mr|%ft zFw@j9XXsimALD%umbgo|!MUEBQmQhHyOCdf2HT7L9>7^in{^ld#NTG|^pw;RC*lFb zZ+SW>*2J@tS3ZRMFm|vK{pf4BPfvWAyLfWjG$QoSe$Z$Zv4A}!D|_Cvm?!7y8WZAj z=ognQ==n1wga6-twrWJ!h5nqNk(g@)%CjP)kv&DS`{=FCYp!^2ZZ;I;`834o zgBnsF1i{`Je3q>wGuOH2%kv%I+?$$#WbI8+p6`Z_;Dy_wTy@u7;GgU|fSy0$)+rC& zTpt4k&rUC#l-sgKg7L@(kL^K|As6H?^@oSJ(dfc9t?m&jIEzLfjm%^3b-f|@AqxJ6 zX=$z>?fJLMp~Azt>82_H9$qo26z*0%y{Y;6jPll^Y**G;K1 z*?$x+;0pfbI%Wb6G4ISi`sz`OJ@tIs6GV>Xhc2$)6@cCB86vC3-BKDMBD}{dTy}i0DprCwOw2-8& z?VG4oqsPLQ&K_Q)KE%=Q5csk4GYC7$aMQ*vweno5P{6KbamIcnXue-%rL1G=^rFwG zyV55|5#b??J`iM+n&Ep>ie;1bsRL=%?_!x2$?>%n2D=vt)30CTt8wJ)%XkgLQCpNZZ>ZzzNCakT?45 z!|v*nsdRfB{Sh@SB5oze457i*zc{DiU}A(MF+JUgG}8__7`MZf%>n3FpLRz6*jYV7r0etZ$n(ot314W0$PQ@I47^hI z8@ks#FI6_5BFBEL8uZ)Cb4NO77N=mOl7J)MWJsI~6neLFyZM9}y3Hx;S(d#yA z>#}&9_D6Gic)Q72CIlukePpohYK&E)Y{zOW{FLx*);jgZ0J<3}ACGEUTsQq>YtPbm zj}k^YF@xA@a-@g#GtY~XrZLMOwKcmnZvz79gt{@KEzwLCJRA-U+R4dEyHEefXY#;cAy_(KG z23G&@mme*-Bwx2ApMRj99+pviSE8(xY=+Q7C z&g&=f9_y0-E|(}0c*6ThQWQ(9$!C(hzyMZZ^-8wR>ruPr-b=r6 zJ+@(j>+b>-du9^1^{uOxskFU9bn&dqWE0+3Lt$z$_KZFe!a0JfCCazji6EHcD-VCp zax$a=Ur1v&BA(PRA~^V- z<6=2xghEEaG;DvIVwwLk`NC!0;lcihO*4@Ri=jqw1 ztmWKZg6{$Ti84nhDAPmG1@57>xW;N%bimIR=m3?xK3`97f`7GXlx-UD4h=|fmHu;( z<>=D*taZ`4L`^XH;|t%`{Wf5XXi!`WgV}=Nrz={H>MNJesA$0GM$hAjG{ttq+r^MB3Yx^;T<6ib0r-0b zTX-##c~^M!ND;LR-EwNVF)$jb#c-hWgp+5Rbr+104$u45EH9o77$F~(ZZAhR-}k2<a7Kj>3#CiMd%r)(QsmF&8i_~qK^$X#iW=Q_ zjF-PU4ot@_DUNk_>_Xf)qVHVrf(%mjWYzr)jnWR=wuY8RYZc!09r;f()46_Z)xD%e zu3;87mq(wDmXbv<5x($lZ^HKw>-t4Tl~|0Z!ag$!f);=s_w>UqLkJTDLrE9tRJrW+ z5ef>- z-cH+`=#{2~QdIvLfhyFZo4jxIQe5Bu`wYrS#lP@fq*Y{=ia+y=tq2Lu;U=EmV*oyW z%(V~9KRgr!Kv}yiIC*>-K>IaZBF_a14PYm#w5)8vS&t4%t-VmD@5w*eWpOpfN#Ab@ zB7I=?=!6fdtG__ny50GV&1679pk;>a1eOL?&glY zX?<9Hs%*J|JKYv}b1}5Y2_~>$ zAQnm3T+nALAM#``nDoZ`z-+a!u<)sqA+x~8Zs{k{o0Oy!?;I=uw{e60*s+;3 z8;%WxAth+)py%NM*%NKyHLc#Zb=)O{jMjZkOdP=k1&sF|7iXETq! z+2)Yu8Z}i_sZTlAq2e9dVV7{Hm>jaW1^)P@shTfcg}*~o2B%ruChQh`da!D_d{3zu zokI0LOB=}xplpM_q4BKI!L(=7i}Lr2|EWPyz)w2m4xtbwZq*d=|Djn$0hEesQdP@h zAhq%wiPqvulK+tjgWQaS1~N}Pum(9B80-JPa}Hvce&g2rib4hHjjy6%{9o=?6u<*G zav{yT5vcU4XU?wp8?Z0Em_#@>7`sRYguc;_%++#3t8l6p?pHeA|Ay{^=$dNP)@u+P z2>)CnRA)c|c>*%}2HpK%@gYQkow}3+-HRIFFx_o!@;Iak$Q2R})l0eke1$IS`t|>F{=}Hz}eV;{!T#* zpmG%{meecO<;!YbB7J~nAq0QoI)r~MX`xn8i~|-xttZux+r!R3BW|@F&iq#c583>R zevv9K(Z<@e0S+$*Oy~;ix#>QedypZZrwDe{|{sc{vkyW+~z8PhiupjhP0voPWQ zW2z`7cC*rOI}jHb;Z2P+oJSt1u*oaZzyX%|#}Z5xfCN_)HORG`8<5G#IUEc1=pmA$ z$Uif{Aa4bTN(xz&G3ZlD?XG!{ZIDBSJocuP{cA~l0Hs6MlgL*u^{dx7Km9#l&@!B| zT>01Kw?U%Kv**qlmw@LoUDcFC-j}Tj-F-*yA0^^3kY{H3?KKotj?yr|Cm{^e5La== z?`VH#h!kb5vTZ+50)4qH-|+I2lJHP3{c zi2q;--NKSZ)s~%QxHTIK7=Cp@HMmP-RipbCY}h!Y_L^EY-cvac0~u+L3?C(Q8>E}@ zK>J4o3^F7TCfbmoi!I0)P0i;;z)1#&p*}M2J6^WW)eU`bMpd2E~VOk-Tk6U7go3Wr*v#|wF2WvF1cgq9HXQnWdANZ zQ2;DuK%B|iCV`&DSuL8}*vkuqykwGpg}~AUjKAHy7<35j-c3B4H;Z(KO7R+qs(on~ z{bx{&BQJfWd0*}&e)S~Cs_20Gvbr1b<>mj*Vg)A!Ch`hQE7w#&-ImjYPGKP#cK_e+ zzf`sBaMqu0XUk0N)+k@IR^UWRZ_XiCb#wfiY2rm$kOjXDInUvelSwh zc=@*CW!aXQRIjPv5p{iCYON&xyXZV*@@xAA@*`{dT(3^kjiRMCc?tAl%xV8FToEh} z>1jn(g92Se?wOE&`_jgDV2Qu3gyvhFIAAdfm4_M2w9~>mG(*h3Wy1I?;eAMVux@Np zxwN{yvrLAQ9vV?s3(er>zZl6-gdm65EO)B^3TXB$|D_QOMG?UnTzmMhE2%Ll09V|3 zN9D-nzORx?MC!htficPzlK#RVp4^S9%SzBKpw5c^lYa_={!_{{;eRBsB6nXI$G*T& zDpigUH330SO*P^B`$D%doZaj>iDrz3?e6zFb4?PWu8aiZ_kWFu50Hivp3zRnvkkSz zjf)*@li~8ah!t}Fi&J1cr9<_sStCRA+Sk|BxG_LP-q%t8v-kE$<=5juk`Kf{#H89D zL|v~xzrl<6Ye7&33_`bWNhGXey*g?IzvQO}{vsOf*MD7!#12Ta@_o{X(ehoObS*do z$A2+m{~IwB>d2RKku>FYA3NU1G0JZ8#t{e1|6Qq~eiSOYj4h(L)tE`@sHHoh(7;AL z82KLv&*=kGbx#V)>C{g~Um-xPgW<2I{&y7wJ}AI+bed{RqCPZPdxAufV^&2IEQJ5M zNEEP-lwG4otiF!%pxG+)w3vSe7Wtu&@z+9`?pFXIE~V@PLy(PNcwW{!Egs$uU1yyCc)K+9g5*+f7cB(RZ(EVO@O0Vd%xln03&T@Kw)_uC+5We~(qmP{H7whKo zHvJOVJ?(eL#^%e?Q!dyPmIV^*hIV;uh9`#Es% zf%Cd@&0>FIbP2k9Io~}hG-X}uwPHy00n%Kob*p|sB{NX+bv!CSHX_35bdBuRa#EjA7+h=h#wYFmtW8C53&?WpQoP~C!pp~bz~Wo6U>PkZ}wR$iZaWk{ZIDBL+6EWyuBsm=rr$HRDSSw*Yt^mIs^ zh>RJR*An?H+spDhK_Z#ngCO=-^FVE1!Vm^7gY%I{-(iwubiXnmCFm5M=4rq^S)ej?{QR+3dBDr=#E z@JnAm+-1I-rhiHx?(O;1)~=cA{m9Y^Vn_LUQr?i!Gk`fwyGBbSbcg~!8B zNoky-%xPpojcEi(2N`em@qug%wNEOuyB-fr2_K(e_OCYBTE5ONTOL_LOGY|?KkI{{ zp>H-}-Hr4I-&}NBKHovQ?+tDAU3k&}|LF1A5!mYx-+6B}Jwff#lf;ZSm z{DAba&;1X+?8o)|l@QB7!AA#Y!i!%%WxAxt4=>BP7x8Auw+WaXpPYOe;O3_=ik^^7 z)Rkb&&m0vJ>RRcjb`y@`YLCpSUR?g+!)6R4%^M`@Ujx6(UM-rPlmio+MjqH?RTKM* z3*XO9O!Q1m*0Yu`pK`>ez7DNtz2`ri?=V1UpJ2B;g5H$E!P&H*qWT1585hHzEluJ2 zR1T?^18qOq6wV*r3_Tv5eSYCuz9WrfKc%!kZAf@;Y;LkK%f;5*^RfF;p?>%<&f;m; zF?`TON~HGv4MLupct-brZ=2a|U#_FHTcvh(=9Zv@#O}aDsoUeQfKdaN@4EP{f&#E< zW=7q!@G-{GMw5x8M5udt`eYjq8JcYBqgTj7a$2BtIOVTjvEnVnCe_bg_9xS#CfA>Hg@X7HNHN}C zsZi(Sq46-mqUU;+xw0G$UrJ=0ZD?$M2dfn?lf_s!Z4S-b2Z<6043s)33CoKb{G6FT zVFf!&O$GSm?t^8sRLa|S;MYkthJ@W46xrR`(Ka)VUr%VkBJjgD-f#Q|u8UkZ_eR7n zf@}r{F3npGj*H;tFgWp$fY|4Erzf$@zjnr+s@r0p_llj(y@j57+uRxWo6&_kU0eqr z(D*ZTRjZ@2>56)Nct{O)GT=Qh7CNM+kFgSy0TR7fK{}+T^eWPCzJw0q0Opqea0oKI zEtkwS8&i<&9)MH&l^ZV}dd5Bw4R42q(bOSUZ9662uaL{yv^=lLpe~s9#lF9pfG7x# zP4o?Cp;4shNP9F%7?1*0cC$f!me}>Z1AFZe9?Hh%e$iF5t(M4?`V=`D!V(U z-@|1F2#C_hD$iTE#j&`-TamhDn z9tygTgjsnqZpRSDs21v$5K^3D{GZ!urJuQbJlEa;(g)^zGbu`vW44jdU>ihsonNEb z>?Ah9(fSB#k5%YM>n=^R+oNJrw}7>J%r5#t=Zmar+chEWw`9Sj-yj+7Se!#>+357+qfZzvXX_EKFx7 zq``5Ufjg)k0LyeWNrkvQWam!M+vL_Eb*`}bb?WOPs%ob=?(KdX^11bD7(koI0{0zY zCHncae3w@R*XVBv^^EnWq~?%Oi|?chrrKN%CNEUD*}rLNwyvt3%Rxc6T`BG|L6a9q z;cV$k=RbBUTMlYgG4wCfTkx|f`rIPNVKh-<;e2xc{fB zDI=KgddqQt`9helSE-x-VdCRP2e|FaCBsebOEQ^d#Qf%aEUGLHA`@IKYEoI*sTYJ&-L1k>J<77G24 zY+@ylZCJ%c^=vTx8@BuBJZie!)btHpYC#Sum4W1~4-pf-#_N2&c>WvtPP%jvC2{iW z9H2t&?I=zrz^nh3RozN!>W8v9@ZZ9P!oeAsf4;kA%sb0pAWr>)cJ)$;GnZ|n)%(g^ zl(^?RU43=4|KJR`;Eh8XR<5joaY?*)A|}^)dPjiyALdm)4^NCvm#1Q$t>bNp$;t0t z_KexeJ3YT#9$D|5;h|jrrlAAeDC%M(;}-BrnAx9v!*fs}Mq$2P8iSf48}CIXZj||w zmcQ&7cG2|tj@&V_2j>%&rDk?jbQ_*xLgVV|_&&9PDRHWAQG=n(_k%5S4T`AFQ@__0 zQOzd95jmMu6^4F0nLfkUnM_SPIY65#=e0G3Gq+lUQ_5s1!Qh?26lEXgB5VEJ^;#Wx zyF2sr&v)j{el+ienRU;~(*}!@L^pvJ4K$IX-)P zRh$6o7WZ*}Ts6=uI_Cq8!V4Q9EKvkPC$+3PFfT9A@K-g+|IMoWE=2TqtSzyk+cu*n zALAlQ_4k}bUb(9amQ?cHLoP=)w&Qig8s2{U`>bToR11;flHLYq{8>ASlaAYHcDK1wSf9rjX7husxaf6(tGSr|KSy&FRo4K2 znl5W)2QVTT_rL{k4cWur{YQ%9>TjUgFH2>4?GFv=?G2&H9i=Sp=#`)%=Vxn^lSsn4^%6?!EzP8hOt(41 zGw2Puvp4fy2UPN^?N&laY`=_v??_)<1TBi0LO9r~j&*6d>-v;V` zXwO@=eO^7_n6F9NTZFIKDbI0FnB5d=Q3o!IEo~Ze**+r7r5h%BH+)Vt?sB~#@+?@T zsH6hBQzJ4D`3<9? zsuGFF+Y;qpxhXha6O@usDGH13Tc@!5piR5UI69fdv2nR5%^b7U+#ILVZnC2uW@^QT zCW4?AbDP<6I^wyS_$!;Igw`}e^PiK!-XC+mSaclIXc2DWTiLGGjWr9snIW|L(iy5I zuF$H@a_>zR0}L3X8cI29b2@h}{)5i%OTBHqYBoD=HKXgT@Ek8m{v$yHc%6!pwnX{N z@&v7wA1_tQ)|Z`*LAKO{c z->%x~J8rD=A7yf_YE~)Vt2MVVu;1ubG(K{#`JKQVMN_AO%&e@|vi75-NL1V7PQh01 zqvWtm)sE$X5h_o`fO~baseo3x3yQV<_co8D)k$0Vk#$^FD;ACF_SMR?GS{5S+y$U2 zCwdM47F@O(v8wM2NUL&Lic$Hf3+pGgB&Sut0|2oXooRNuQ0$B*>7 z`)}vj{lA4sO39CAc^`1}6M!A1Z$5cG)7=^43{gfqLFS9VD|dYmAg=QSFVr@4u{@nkXLr19SOE_vR5cC{3$I)V!8i7EEx}2GBTBI?$^P>y6BJqfRD#dxxzRZ(_8y(1O7CJPnC7|rx*rR zDfIbZr-O;trW3hKP4f%3KWjhM-I>#V9!d{Wh;s})mD%J(Gj*H{|KTKgGhXB}S^+2Z z=WC4r?^**!&?=8yOzQ`382`k_3dCpl| zv<{j)1(<<1Dam~veB}rw29e1E%HEYxr3oEMuk8)r$(D>~w_m>>if}aC)@okLm~7G8 z_4$18y0?ACQ>&0suLnED3~XNjhtOjFaQt9Xm(Aa7a9~AhMDfgB*+@;(M-==3sRq@V z&xZAfK{nd%vwnyYu8))WKLpSpXW9c3j+_T%J=7^9Sadb7S?7BV66j98@+CsPvW|3Sj~MnDi0C&wKQ2zEy;LsqKS<+qDPKxRZ=&zc z2jzxgopijD$KMY&5D%K^*ph46tiGPg|7hxY-K6&Bg!Y#H-p*JxpTC zJw<6i){fRy8rcQ18nj#r^WU0Wj^6v!Hx4zT1U@599O(d|(bS&Y^mK|Ft_u3F*T=Y! zo^_j#sSWCza|e^9AM1ah5lu+ov8LNbuVS5CbAPd~Up!7qb9D&63gFtV$JovZRNkjZ zzyR1j6zgv9B*KwM<$tdbgm;Z#HRIo5%y_-Im9(Mnc{EyQv@*_Nb-37nm9M_w*)u-aSaG!OL3X;ZlCVCOmF*Oavci3;lDpYd4{3^!|5>p!LO zRa?jCT4aDk(7J<+FqgV-I^#dNCUs}=66}}{=Pj3|YUnQQ>H(gGzIkNW9X>JdAYHz- zpoW*%}ZKaU|nqL0mxf@)&+=3Xv*vM3A`tg1nv_l&U20yb2{V16U z2wT@{mlL8_ztiV?Idw}KM-R3P<j2u)1D8=NeyJ!-{`7BJtV=%8f^Y zf&se>m-%QP4XRr8T$8PH<@B!Zbu0T1mgb+?8}X59T8+@iCRZjmXyh`)u=b2Y;|>$3 z5TpIsHpuzff!G_HBcXUTy>7OvW-Q6Z2Z;ekzb-5@%+}MccTYrqpB02fvq$&5DfrS= z8O-#9c-0iG*rU(@=g9oM8LLz(xS4=l$ZMjn%+vT$VUtXwmjv((9JD5FfNfFS9c@4Z ze(uFJTRB(b*y^>fC6uAh&-ge*!8#tOn2~*2OXR2P8prSuc$T6tOfjnw38j0v!GMm` zp25S311G{#PX0^Z16a;S!gJ63$0Up5FrZ&H6Q>_!$qZd9q6xtzP0PzYQS06X3oW)c zi*?X6$37wM%k^{vk*v>>)#h=)WjzkT2wSpggv|+*j*Do0uWxENVA^qRelu81DAB0p zmf^@RP4KdKorQed9|v&kMZgCe`F@l1iDD` zuqIM1rlRPRP&lLnbR&Hc0O+$@T$i!_sxWw~QQf=>xmiHRz|nbL_h=Rh0ZY@*`Z)Jn zJzrw|iLxHCj&t)&%R=$Wj!G6;4KZCK^|~0IYtvYKCw@^$)t2UC8tsNd`PkKdU|w%W zpot-OrwZ=@e!3PiKfo_AgRJdx9&U%&1nh5Zd@h0tRb zd-ZyY)64AL_9)QWxIl&=$qP$s`%?vOj>X?UMf};@uwomW`O`N%uM}tPzgCFoCKBdk zmjlIr>Zhl_TVlzJ^VgC~VPUhh4_r-~$L>5MefaN~>${X58Mn5Z#tPHiL}?iv0=tu~*nB40VPnF> zlaDi5j!~1_sp3sH=e$zL-CdgY&7)`Mu2S=FFsl(UFDJxfD*{^&rw1CPc|q;{P5NYs zo8yqt4|ptA8|y%`ubg0dS>%4Y*@Zx>{;9qlEmOjU@H_(Vz7O=8vc7B@>(5ykMnv_M zOK&*@#ugve&SCL`zNN5o1n9bc)WzhZ@c^X7}$0b!q^t){a5%PPW3rc)GCvGU`&a1EB;RuEsshCWna8G~3`|hZY zXoW+Dx8tT;iuwT|(bx##%!vq&@ibZ^R~UXtMMY)UokbZ)Zo<~rtBe*u2RvjrU3E>< zTFs^QFPjV}TZ`eB6+#0d8RmJu<>l&N90kDuJ?dB{*!-M1(>MCg3U*Qqvs=xL+)$R7 z-cl8eF2npWNOB?M`uQ@OeRma$*E7I37sKA?ww_rB8m%ORNPy z0{a>H!GE_VvWaK(ykAdu=t1uZH5lAG7~woplENOPq;Z7)BaL#oX9Mg$q|XMaPf9+wn@hKQ=Bb6)eo2=h{c-|Hu$R+W?AvD3%=0S@&3J@;^^?e8d4?e7l=VWLZJW?S zG5uzdOQ+tcmI-(s<{9$^^?-*|7Hw$AC=z2}oI|)PJ>apeMmUwSMu?Is>?$d>N}z;s z!TE88x6@8CGKH-m`^cVnYC zD>Y*S)oP!q2CXbCAx$+m+m$U*8~3aJ8Bp6|U?oOA9Gev=2=bNW>dJR4B4 z&Bj(tMu=%xBZHrK6=1deCiM`jj(F;k`^f$<4AbVeP^QkzBaThZ!%e)3iW6a)fHk1u zLWlekFE^Ze`{X2YKH~FrH1{I|xij4eSG+K!=82T%k@G&hk;Q`KJqaI{Pp1 zNSS1B+84YIGkD=U+xKEZyH9`d7Yp@jWzhQF$L$;V=9^#pnH+OX45M!1cXkkDEFE6o zE4%@s6^q8PdNCM1n0zPo3W+NJY9m_}<@yD4?v1c!dSm;+f?mv-fa4)%%gz^pk*hFe|7j@u;`iZ!{&-2j zkvo_kv{!ATNzPk2ggz=+|84o9QoN_1lL9p5`T+INyE8|hna&X=yV{pnlNwpQ=(e$Vj-huuj+QkT&EfXvy5I=we1SbpCmzO8|*w&v?0f7WIH;O27328Nqzxi z&eNYbQ-^F;k@%C>noZ-Lt;NJ?@!0i>uzl>Xx&CFox|Xk)Yjt@6kkrsokuz|i=tEgu ze;4Y7;!iF2K5y3f=*>4c+zbr;OmzPI3T|NOw;YK!pznP)9B7pidP^?g4%*Bb1T~`r zQu&RIv1Ur0MIC1LKNv!9_a1HC-6i&)^}Smvtk6v%-v(M0OG@hav+rdnR&i9wXWow> z%KxYJqJQ=9t<;?L^JfdIZYIF&ZVYOw-~a(oQ={7NT+fEP*WA%H?$eTctS2ZAR*Q{g zUDH?=WV2X7xwNvtB~S3fEpRft$0j`#K)xI>{DI$JQ_pOq*8AQY=A0tvpWgxy_)KY8 z!*d`^Z(vj)O-1)YguVNdUvefqXmL{LCSun3pDK4rDt8Q-3j&BQkMxS zstrzI07V{BFdbb4*$RBY1>HQsD+=%Tx$el_g*A&p&`k2|F>*D8iuK#9W(Rl!&H!uvQc8l*yuxY+@Q$ zk|i>ii>FVhbili9tYE>lH$ui+e+BJ4rYmFbf{aiUw(EU|y)LXav9KfrT)n?arya9~>Vf*W^wNW`-EzbySVWZXN7}^>o_FZ3 zjjEx&`9DY=UaW&Di^0qy)pr5ix&Fl&hRY~d_PoyLcNDU;>o5Q|>w2QSfLm&IWN#&O zpgPum!OgoGDJ;_&=MDrd)Q9+2wh;dO2pu@=L9D>=@82hJ`q5+;Hoa^T3qG9hi>3d9 z#aLER==db7YGR`Mo*(xebv#y}ew&b4^1EK>zi%9rbn*)4@Cjr}|L1xf91iT`!6pN# z3%^@8DycvWfrtRYv2Cb}SCv}z_y&TS0l;83Rk1t+wtMiV0J*EAB}Vbt?_2xiGnN{O z&qti;=#MhpIWXRS%Mn}aZe-|8!@WeDHZSe_!Bsey)}CA&Ef$@JPhrHH6R-5Yn&8eC z0ci`&lwocZ+S&DLFUK1O8J{xpEbY6ePQd zrOY;jL2%eFjWFE~pY>)$-I=J>$CKGD5f^KT`-%t5%KGtgb?}C~`7e=YUpg3YBTMUL z<)#0_wc7sm+7&<5sHX+Yc^b~DGI4e^tBdgDwx-U*+-J|TeV+3!frGuU2(O4`Quw^o zf$nYZOi*$dS@Wz9=whO$!|HY|DvMQkTI$%&t8?sRMKzV_!JyUsGecgqAm=vYj{1Pt zVjBy$BPM=cY7^YF;@hWIko;&#`hX9lyfI2%1oQOw(N|IJ57~apO#Q+*!Q{XDs7V!I zfK9!g0amlw6eP??A)14W9Bae!?=cdSvK^Inn9+C03gNIp?+>b3FsVATIDS<40h$)m zaKfZFFZPp1kc}*I7Otu>oB53%_Y#8Z)I_l2<{c+qeMYue@6mdHX0;x5U`RXMSmN=t zv|k(s8JjgQdtC50+yBOAq4Uj5=VJewl@={&lvr!%dt3=PjVWoXR%tYv_3XCsAk#;f z{5khlLFk(5gsyGbuGsg3@+I`VXLc1S$!~v$MX%LaN8kTltYfP}mRzPMD0H;j$mhUg zH1Ze6amD|~-djb*)iqn7Sdat>7A!yr65NA30fL1fP2=wFt|35hx1hnbAvgqR++Bma zyVL01eCI#^`OniG_vyZzG5Vng-Sl3ocCEFlX3bd}ieyMLT@LGcisBJ9 zwUyxq7KFZd|Nc6SuY&2ifjuyb+V85zeI@=S6&1ByD)-%;Q9*q0CaS=Hldwlpcw_x+ z(F(Cn`r|h?J}AY;*-W~(;nvz}AJ3zUv3|U{G&msn*=>NModmoMF67yD4-o4-Uf3w&gm z={;WCgl+5r!SZ+x!*O?Z~HXj_jt8!L-9}DAM z)<;slmuh8L2Lq*Y+Z34o_-@A&tmvlPOm5a3>A7`9h+X6_|FLn8xK^`Wp8a*4FYDcB zH^FdoB!?Mk@%LyRgZFW-l&SrjD6)j0lv4!t5k9=9e;uQ0_?j(yXhHInJlh(w_; zMjT}W^5iVfooA@EVD6|TH|R^A-d;+Y*x6`u{5e6zIlUFn`j(C5p>hjkVFeaGe$Vhz zUA0i<<02Qgz=xHg8=y^#?nlmxbA7`kg#&5vj`J(jkYrZQ(W6`WO$8?(A}J!`qntc( z1RjPMYWRyA1E>4BDcQ#_U%q$=4D>EYBZ$g~kngpOE%o)EZ$D1Dt|hyw+Gs)72Dd2z zwaFvB%i%4UFjE`-O^IXQZ1*vM?6Rvvx%WO0vATV`T~IwKrsbIW2v5%7k+rVey}A0c z+clRY`Uh-?BJ9l|r(ly>rm!jB8_Cj#z47;m%XeZ+WT)u@%TI{r^Dv@@Ja-acGC}w! z!;G@7c0$V)-M(($+ARC&==ubs4%B|Tt)5KVAHZKB*`M@?^5-O(CFwKEuyv*LomhuF{QKOii`7Q2E07B z{^UShT?IF#xbC{Vl+MS>q_}4^daem+=iN%q3-d|JY)O5$?Bm=mOFwmGA6|u1^CF(X zck70UZygc2AH(~csA|Sz{_jmbe?)3~P|ZND`2Ij1koa$2r#v;3qycrhhQeWElh%b2 zZ@z)t-?jrC*0#}Pd_RVT)&rPiAzOZaL{Gog{RDU?OJ>Qm&}i~$x|W@s=Er|IB6IC( z<(U5uvBK^RszNl1)Z4*5yS){c**^}FXNl^2d(pA%LYLvI`zqp;*3}pFF}66A)E?b;GxZ@yTU#+XttY+JAB*egoUKd3?P{Kc_%y zo4qWW?`<~=l5W!Xm?gr=oU7F1y8xYCedHExwLkt>dqO4a z*vPVOZr|x<{6sTfqU#Qx!F3JdE@paY6LVS|WRTW5ioCPF5BB-U*8IV>I|RR%=WoYo zVPRuz{K7}Z_r>lON(K_?kD)<9!8>VW82?O+F8|72Zc+-Di?LU=Ev02PXS2W zA`n!(i15hS8>{7Cn?P|r2I|T3)u5FqU$jK2CZ9NWRp%{CS4B=#RSP8>zZQ0Gz@qj3 z>S*bgazR(}#VUt@&txjQgPN~<(~Ks<`g|-9C+!^IN#zz_A#YD@@03-3@5d25u?y(X znoVXOjq^C|Sx2MprhwlD81wrJwfcRZTep$kQ&O@_^-XVA?~j+*_1Lg{m-r-kD}Y6_ zLkf@n!*Zgs<$IB$_qF%p;G`w=@ggwg$#rl8Hb=5J`Ui<<-VkBuw5!$BiyE6eYz$G* zgI|7q#;`N|GeRVwGMb4QY8Q(EIJ#P|@ku#A+X-dUzxK}qS*)F!Zh|3?T zqW~4n=k*!SBY|&7X`cY%aGXYfF@--I4p5@L&0Ol9$3uPfhn_;HMa9BGW_W2ErDAX} z6v;uh)%zZ$pmC5%kxT)hO$x4?<=-^FR&fL!1?MgO#_<2(i~>nnBqcBrd--;Re=8UO zBHv9IT!b#rrKj;fxsU%pe)HESKXUw+G1IxHB=0f$EdGaT2#03$%)9UoT=ajq#Z<4~1Jv-K&*GK;-KFcFUPH6bQU*cF?*AaRojsH)G?j|&VRl951th&8Q6yZIil~VfX;zpJh~M5KRJt48?+IL}%CwX4KW$GT2F&sYCsoOR4nA5CxX>(z0s4Or2N;+C>u28oC*s(t z{~09^A7?zPB8b@j*NA>ROFI6~od4YMXIuY&F{j9Lr-mF*UlbDc%=6upD4>WUtW>%En)`OPjhHGO_Qz?9fVI+3>8~~qD(86 z%|)kbM6^L8;IiySuygtz?SIi;|K*kh{yvRz&gRQgMbXk}3rOWId71I4pU!sv zELvgP_?xiO=nd$E=^cxD4JSKE|ozNg@&E9c&<2UBV~ z+nIi|a}Ak+WAKrbC2X@TmaHe=>ADIeCa^|oo(o$Sh`4Q%94sY6YqS+XMuYt!W$%p@ z1q=%NAtE_5@1^3cI<*Tp{}v=rfX&AqTjcoHR7d1n4i1suBP1{>O3DXAowxDoOa1_C ztE9tHOw+>U!xV7CVf%9R&GnbX+Hv=OYh4>|+X?0f}H~8Yi{4T9t+BSbtWWsvDe3@g;Fm9L`&`TJ={O z8fZdPgr;rSi(ysHVcH$gRFuZnwLNor$FMHZK-KtU4oMu~MtGPs1Eq#zdXVm6<=b7?WPOArU=YnX;Z9i-aD z8hwhoKL+Hktr#%|E3}ye%PR#A^x{qJ79WkPA?ls99eb4QQGwpZ;rho|){sUn=kY0g z)VVYU^(q(hm@=JiEF2fbq^hCpROZo#TUFzGm{VlrI?`;-K8+#2zQ3YLu6cTlm;mK! zi`PV3*n`qoGO7a#pgFHNn;(m#Pm5Qk)oXs!50jvtb;Ev~qZzjyBWz#r16mfke{Fq$ zwu`mWZaw&Fh{|+fw3G+m%ARJvz3gGy0df=|Y9dXf-S&>&5?a$4aiR;ZU$hGuXC)L= z6}YBGm%K>RShK#@ybxiKCzMC4wPx_DPH=l8OL(s8MJPqoM~G3q_D%??y96IOGEu9T zfz_zg`^q>ih}%V?i878yx8bVeYxi#y5K80p?}u;;6L;>quGF%9*82mP;iAp`91-S( zw~MC*4T)~y}2xTx6$t^gIug$2x-#;skZgN6O;Dx@mzNY zW^UsiGRSkW%vqptRn!lVaK83HI^i#f_A^Agn21&9rd+7FGRblY=w$a9sUoeDp1CV* zuV`;fSc+)0>6>pf2wX?zmBep0QzcAvStkg}t+6eDbg=rWI_ELn8*0~e&fmCnV89NP zB}Qh^K@7T#wxA&yV5NGRBdGqQ)Cw^+@zdx=M=ygEa~HK&e24$RSwXHOkKY_R9nfb! zb9LDGU9FoXS)-?W?-sW>Wz(A5CuW8W#M%}Ro`BrmhG}4$oPMe)zitXz8gPOp^k8d~ z7vDnY!(GF)IkbbbW)>d(PEu{6lxupr*jS3^Ug#eg5Z$B@L4RP`@Vp6a8mY3j9ltCc zez%`(RdO?gqc#D9nXx+iJ(|}_B(xn zTuL=aL4FCC=uDp#K?7?3Y%W3w+6K^B19GHhg+0dP{T>7gvTB(uPy3@;O zry!E>e8<;2vHp$KXODd^2aR?^PUmM6m+-)RDMVRsJ_x*1E4A;HEv5u5gE5qAHpsO_ z)fXeU2X4ArriypWt38-F2L3d3|K4j>m|-Z=ILn8q-TAn@@G!jB*fmmJvbEZsC+D%L z3iREQ+4!jW*9}bgX6JI@mqo(41kxPR>d{)rsii_yB+bfDL)86|N=^Uw^FK&yh=BU!>DL(t)Ntc{H1VRYPb%;HVp*u3o2o;Rd?U)B9_-1!gVB!=bkiz`ws zKval|hH&GpEaoTj33Za<5B&;q;7s)q!g^VS<_gtfQ2T4k!a4042))ft;-^Mci@nM$ zOYanB?uO~#gvGa^7T8wxNT;$5)7*Ow0hXk%2wNOM`gjz!7ldz=E@>WBBW#n(oe%}} z6SH(!Eht***}%MU@i43$E{2pme*N!>Ac>SWMZ?O0ZM;(W$U$}EQdoZQ2N7~py??Ax zvP=n@U82K=8NF`FC8;;6>D`>$f?m!MB3o3`*Ng0>t0zC1@ykZ;_uEa0s`_Ci-Ym{z zNL7;$W6|~}E;f`avg7OK#+4Q==XnI|H5YT2WRzP-Q)utz&WT*ogHDFzEcPN7?jsZT zPXnw&G_6CkNQ+fd4Ll2qFGs^&yixoCHvaP|Yl__}&pNq@Qns>Bq;f`v;mGZSjbZ<% zD6uU_R*Tayw#E|Oft`VNS26mo_K&RG6GB5_56hj8YOB1iuuJc`jwhKdc`F3 z;}W*JozP}$sMq)vms82%7H`R*V@c;|?H|2r5*1k}9sVG|2vf52k!Zx}ba!d3IofSF z4$atY2k+iPctt2SL8(QL3iwdmG^rc!8b#8apDAv4e;gHh9t8MD-oOX(a#GS|D8B3X zuju(KL*&yEL7O7AAKP>fDEqQ8t?*?IdJy*XAwo_Yj;kR~H@e5|fx~F6KGa{M?PDpv z_IpU(sdaGb6m1#)K}oY9aM>*utjmM6rg{{T00l`Yp!B==8N5Z#0H{*1@Bz}PpbL?ooq3S!Z~E|4xN z%Kf)}w8WgCiK|wpMq_m!m}C1*N6EZ$ZAE_VWliIewb%Tr3BLQ^P2|Oja9&peG*1DA zY2ne&iEUA2N6S?_g+Gs~!kGyB?9f6Kt+V;$F19XP777rQxS7*TZ zhbrJpYLF~Vy{d~Ed}EX1rO8xr@kmJ{$p~@$mxX~>9T*7WHq{mqpla^^3VD3SyF#%R z!7A3L%&{ldpQmtvI-`I*X3XPYfLY=$M?fzUg{2o~iq7N^N_Qo|d#xRu%T&CpQE&Om zih@@frDs)Nk4#nNgde7eZ~axiCSy;$_{tH+z34*V$-i;sx{-0UOl;SQe^bgl#)Vz% zw$jx>I){f`X^gc0EXe{gHb10$g{(}Im^~#zVxm4~Ahb}ilep=8gKTK^n;yMMO_OK5 zc0*OW&4+>1GNn-8Q+u^OMbn`L@>aQKP2XIYtY;R3)ONk&Yt6XJNYBgVYt{LVeeLEDxkK)*GUi{SxPq=j9hcfU=rG@)_ab4NV>0A zY}Fb@&QF&)RV)-*n6h)nx!qZT?9qhAyJ4BxQ&}e;ly|GYk9u~E3cQmc~@M$X5iSt(7Au{N%y5?>J z%d#UsdzH({m8(-tzUAh}K_)CKA+z}?H@D=fHA5KWAGO*A>dLh|1)xUUDWI;hk@V)E zGnVYOC5rgd>S9$1dCHw@eS@6z{WD1R^FGa({v6#ms82v}w43pn4( zKkci?x+~2Aa?aA9#oCbZh{IylG&)Fn!$%;l;Zp`3c2jr|l`YADxlCH-gq;z)>HGa0 zI#4}2Ya%M9ixZ7hbq-j{`Myla3>4$eRp%FLk1{+q7vrN0@D|Hk>o>6ZjJ*oI$OHm? zi=?!)J90~eE3UGfd0y!r9ZbvSMo2g)P|ros%Wa`Y&z2~b0#(J)M%We6d+?6sAg|N& zi`sI82NHrVz8`%Z+6&&4_9BD|mx$dKw)_eOFSt?vQFJc1(0RFMmjNt`8;kZ8sd5v^ zZ!UiR$N{gd?bN2gN?&osNNiPy`c12okAlal+A=1&|X#Jk4B-HUq7bI}7NkuBiC@y-po>r6}io%=5@s+TP2<>cmujzEd zHD0liC{V)e9hqL%%>x8_ALDq{oI-~GykqsC`=AvmW}}mcv_#3+?p?i*Zns9mWcni)O6z&f}4a{Qku}z-wAsK$`bFZrYemQ`iRmi z%k!zVHt3%0R!J9ys1yRFdZ zVl-mQ_f{0jCcI|SD-{^~yf@_%w$RtNo?#{IVFLQ;l*0QF%iJ!c-CDksK- z9;BVC6*hXwdtc9OcyiEb=c%*;>g&e%QO{!uJfu%gFSAiYLk-|XTVEo$z{4lxbHc~G zN0q^qK#F<)&F=zpT&!FsxLWpZ#Okblv!3eM;c#)$7RFd^3p=7pndP>9gM==wCsao@ zmi^J;KB9~F?Ke17zkk2{dT`CU1?#(Ky|%!Sn*uohd;~oALvL$_S1Uq&FIPH_9i1n# ztwT$R0t=E5jD}0oJ7S1#>bS|B7?pq}+}%;L4!@K!G=J|(bR`P3vLbT&Wp&hLtnfh( zHn2Ex7jPBeNAOy$%bOMKvq^}CBm##d(&~YB!dk98qVJe?P7!9#y(NEyE4I$ z>NOnjH~4LAzq5UTT3Vv#?^jZP4LTmxA~(|Hn*U&j2#j!5p5}WiCc;;rL%(1t(dS>p zMjE!KI2%x_{rnCN0eB5l!)?wqcxkIPLcW?-#A|br6`~?pQJh%L4b4p_n%cAc%LF%6 z@WL#6&a~HgdK%2F^IB;E&KpQAJ@N99Tl-)h-7f?7O%e$3uDEXBNh-cqStT|8VJjqm z9#m7B-{8GR+|tHoM}qrxxX=%r#v=6XSPe|95oHQi)e zrTZHVFSm>zo%gU;)F{EvE)Pb(`ce$k_>Pd7L9 zMsa=d@p@~3)Vz{TG;&>;=fj5+E{dg3r>AUm;BuX_s1|oYR=!)4ALOt+q~Pxh8!ihc z*I4ypbaYAeq}w-hiZH#_PhlHyD28nL!^)_@iU@)s9J(W}v%UlAvEV%g$-_eOFwV zR3aL!VzHRoZ-YDp$}{}jCfLibpO9=R&*I(&2ZfNW{KO+55uYj7&Ih!3;3u|Qljrxr~&`+=o8sf@*%aN3ugTjlt!{0K|9LFHl~tks(vn! z$Vjf7=kbt5KefLZ$_UD!B8Xy?^7p5=fl94?k(0}HG>sffQC6fUUw~3>tvnv0nSPPl ze2Mw8;SC{PQ+k3)$Vg+#x$5vK*{tHR87DgHF`<;g{)=!11roOEnItq3*k$b4Ox`_T zR{5n`h;=vG?1H}d)G zc_`GxDRA++lu?lw){fO@BzwolPY!BoM=cck`hl4T_ZDjBck?genRJJOXjWAz>;(4U z$~2p)%-gPYySjV29|=f2_ro?eHj2E^VrVYHP|;Z*N>P!MF)EIl@VgU;a0$UCtQGt6 z|J*(^;PyGI^^2zITwY@ltCcHOwVLXB@VhokUb~~czjih3Nvz>C?cUMkuf0!5PD^9u zp`ci0uu1rCCf*nahhD*-vay4XUSajSt-k-w*dfpYdC|mQFzR{X8wN8Qb~w{PiO3t) z3%wvG3$p#-LiNh0o^?lXq1JgMiCsGzQY6`&EM;($55r#X(gfRB-JE@qVkF%xgN#4? zR;T`V_(V*CZV=zgI~P5z*&Zx~KjEzokRk+AMK5Cp3VG_j<>lZ>vd42Cge!Wet9=y9 z#hsm&LvVraSdB(n4>r4AfG^Bmay%*XssW#zTr(lnb;(cO$vIBXui-~M(65;ll16I? z;e@-wxI`@NkfN_%; zZxe;?Zt!>Gy|gG9CSexe;J1QeuGtXLyA#5U27@CQ2(TqT^1eGE5}t^va$f+cH(B6q zU-MA9zk}DVsxhF`+|?}Db0!WJKdlk#eo-*z>=q-FP()ke;p`ylELkRoUU7Jg{`0fj z`N|xHNAOw~t^;Y_=Ihcnu+`GuLPr@@DAKg9$w zvG!U==dLs@>QEH_1Jutuit5r60)4;Jq|Uw~G{!H>C4OVnZI%pR#X775bN<;xj6$)K z>O|=S2M>b&*0#;d|(iJk&Y6xL1uJLbO_A1ZwXR+1V6o?JR8O^&l8Ltzp%8z`R z7|rz=^5NaWOqn|Iz{g`3@57)XY~ul1P-zq=8OKw9qOoamtHm)7HhQ`Kf&*u-r5*k$SsKllW-5=<0GB*4ttgL`aR z6Vf6)FBMkly@I2jVtno&SkYXmFdfhl2aOF@!jK8W8InT49umV`5O&YFlqI+WKR;&# zXny*1?euZGdel2Tg*4q=z5Z_RQzE|Gk(R@YiF>+gldkv0%Y`o%=g+V$x_RI0wxM6I zzH>x)J};ot_cHsuw##Q9YP>2C1(%9YKxK-;9pdtqy(k4n$Al)d6Np&j#&*j`L!8!Y zuld{$qQ48}a78))LI|W%!jBb1>V(s*z2Vp3ytbs*hOj#yUQ$dIE+*gq-Cag8V$E}T z@Z4R4DL)@ig3E8ktmlLk{Qb8mgb#m(nSMkt&|i4OE615^ifSLl^c>9 znqaBe9CWP+w1zEIUChTL#502Z7$N&WLp7R_PGWwyC2Za^UE{PdN@lTe0dFx~OuFd$ zWQdo@us}~j{#Bs#YJP2llpPgu@Y`2ROntvRaoHpmLh!EK+s5_v?gVl5Y}Nvt-mbiX z9-A{1b%TR{dE}c1*{6=^EpoQ(0Vww#+oOxn+u;UO zzmMDc!6!zxJ8WaZzS$dx^j;Yff@?WCaDkvJl5z!SbEdN4)E;0yKOV}Q;%+J=RyWxl zQAs31vhY@Ms0Wm#m!cR!I;w0rP1r9O!&vWs-#Z912wDS?jVO^_SxW@WcXOljcYxFVj=xIY=>|4m$IJEEiJsXRW(}AxenSZo zK9i0N3~M{0SHXQYisu`?p<-lLf!eUd2FoW86Zx_liW0~17Lw9Y9jHs-xpAX?dR^M) zLc~6Js^Qi4m$9x_rt!XdYLEpLfOs5gBq3DkB#}3mRQ5FjkgLnIwe^F!OZJ?!1I|iV zjf>NaUSJ~=qF0n|bR7I-2CV0tN*G=;&sc$i@J>(ed4ptRG}iVdJUDFhGIQ`3=7Alr zsoy4@*DyTgF{H`B<4dS0in!}4bXka8ECh{4=JP<&9opW_ z70j*WgTHt^buRs17^g&Por#QV3qqiD{P}wc$|73d!kD-eElplg7^w3tO#o}FRf5P_ z2-Xs#@ZOX-S~q?;gP_AVPawiw5En^0OO=>-o9||%7(A5pNu4WYjWJ$B(Lp_==Bo>$ zn&NQbyRde~e#hc9#;WSJwqJYuhN&cL8z@=#^sUS>qJ(2`YAti77HpT{UDUq|!g#m0z;V8O>Eux}O@mTvp#vOz8?DlRkR+Fj-}(2r06 z2ham7s%xsz-`Cgo7JHReV!5%%=hXDg#B@S3Cvv}!Vj_;k#0;^uEv)%#}$=ZKwc!hm|-JRMnaG~(ajmVwo9X16O_XZS zU=4bX`SxPFg+;P*tZYw>>Y*#0g_T(#o^!vtaTnXy*CMiv2TrTs8sDdZEU*5jpJu#n z99@Parn~l~^Irnt(cW^OYyyua**K`Cc+LJ|juxis*+EI7&z&)5bLY#`2aH!=x25(+ zFZwz@KW2T_fNp_3a^}S^=GSI9eKzvyz@*5!QCY0rpznw(<}T)2?(`cJlgx1+=hiEl}lo(u(!Ga4iAX|^!A{5gI_d;T%|tO==#DugKwL>l;# zAjh8acH$eauA-V64?e9E!NciYl#||w>d)VLxYQMuESu?q-r;W^Sk=4GfgW!v#_Ph@ zuPZoPH3UxTEq)N{H)q&(*`jep6Y3rEH5Cqr!E^aux$!yQS7_MfIj4Tu?IJslfkL6| zky|4fB3Qz=X&PO&9dpJiEe04K2Yoh^h31xAE$qa4N-Gm1&^}qM;{xe>$u&Y-36su~ z{d*;HdbPqrhX-s#xi_-rK$E1|X$s>|_^ZW9Y$_Z6HF4c$7fO$uCsce-4fG1ETOsj; z;^|*=&Z*;RM7m$0SUrjfdzTxER3BcChBLslX>98(CT-+=pDsOb=gY{G$Q}Ml#hkfV z_TPwJbo*he)JQ&9dFC`P#xnhbE8GwR5mR{%)24D$XE~KRo>)A}Ih8Lh0s_KhQG7h! ztx3CHjj&s%>xD?_7pILCGLO}epyy~|GEWnFA@!;f;r8&`kj&V@QbWqm8o~a4tGh3Y z2@VXtgnWY67RBTCZkZZf4K^J~Mcb*FqwYEy4eJ33lMVyBFEnQRN-=$(;50a0E0w$m z$^^3CbllWGIhKBB{Q6SmJn>M)oIU4$@RhA%>eWa|UTT?&zcTZCI7CMO1Y<(IVH|Ek zFc7a(Ytj+`P=n9&zH(R6r}nM(zJif2282`RFZ+}ZuA*d`FxjG4Et0@zm{F>#4#qr? z^KJ6Uxtpx)R;p#B^g81ixy>KC$zP7IRc=wjAm+RE)VS`!!J&3uh79e6GWx}U`b#6@ zni#7F8~Yd+)m*Hjolft~YZ8cb*FbQJIGFMcWxcUf;+Ap}2DT>J9_FV?mf9skOU_qU-KQM7 z=H#(qFN+t|bo%6F9ikHNAJ~U!EVpaQXuE-jo(`7>*?wI_9x8fJUNrBo_n+I8t4o=p z@Q#jee*&I;ntkQC-6!SB?fC;Z=!zpZq3O0{L{zmrMaM4(MZ=sd#WvZHU4_;ji*4-J_3FLJ0U*IDFj}iDlC*PBU*?FpYv9hrdZyXYm|EM*>=D9x(Z18hRZ!(v3Y%zXa z-wDW96+~qV@^dR?{J5`<5yrz2UhcjY`0Tv5EW7q}_ZN5ZZ|0R{A75i0C;QLlE)o|+KnT>TOEPkxS%2;V2u_?QlADfD$3q=wT;7(+`;CHkvuUf_Gg$G5UqwU#xD9D znRY;t7TJLd1NkBK2O z%GUbQL9NOaMIoP@SL_F&o+cM0S4zE2$o1-bth6UOtNZyo{VOUIWaJnVKT-H;$(fIu zL-p+EmTkEgG= zPi-{z!7+s*&nPnQhj*ua>ZXGOEv?F~w#zF?xVkHo2~-YCL3otIX5IF9KW=W+V!@F( zEpj@PGHNyRclj6IXu>q*&A&hHB(X;bR5iN}Ol>M=r^Gt^xE}H6>O8wDwnsf7|gG)BXU{%$b zvgXD9Js(d9k$MY&v^)~tz`_#^Hq>M*{|>dnk0@(K?l0|^{R+A#F(_N)K|#p)^lpfN zC&b%dzV-%TzJzD?Ay^d4NYP|-oz2~up`=WEeX6*#Mns2Ndt3MF?3M17tz9zPZ2!KF zqFI+N2S=bPzpXq)txCLcU~Ok7(k6Erx32#r)5s?&yF{X0RH3A(#g9f~kw8wd%ZzEh z^>Ueo@=1S`1A^JuL(Ohxsj2TbD>GPeO`_C-E<4bM;n;o5Bh}o8j)~b^d2$^9GY(W3 zs5aeRo_w0>+2x3Qj7yjOzFEZsR0Jy0Tc2a!!=m-e1VvM&NJy@yjoElK%b!li9|8H>@7Up&d<=WPng;DdVvXr4ss1?Gsnx~iF%))j z_s5s4Rb26|V+AnDp*xhz#^VC{jMO}k&rqKkr`v3q>T4MfcZ0xxjakeWA!fd($6N{_ z_lc)PQWk|#@Kp{WtiH&1fE!)m-k2}p5p!ohsZY=JJD96OvbWx3>Y{;Mfar4$Y8s}Y z#1ooCA~ZsN_o?!m+Ak$}RpNn(#UXN4pVn+ni!hD9Q8e4sKg7y(q<(fC4i$5z_^ZkY zK)d4)^Dm(#g$Sl@P_+#D3y6!@BAda@>k~;Tu;eK;T1$qCe%5j$X7&X z6=WSe{>|BH)VG)ue0`>g-$SG)^e%ummgU@j^vWKthXf9eRaE+usEVV3*6Ly6FQ{b& zJ&CuDdC%ClMkSM$)}I1qnlN4oD%1$%+IM>7{?k->JI}ks)3A-Ea_f~9^M@Zu`d<+5-mUN;aNBCh^Sa-U4wb3~I@X`v z{RcLNKbNOo;^Jf*RvToeZTD7VJCTY5e&2m8>KR&^ue+CKB0ms`a@}hy>75$+`kUjC z)u^zQ1GDrIJ~qDtZbI0PwUymCp~x#m7=C?^3xk{;Qrh3$T+?oLIIL;3`^qczHTHYa z|nk*C0~J9=0g;zQkYn;Qs)bvWMQCSKqU}+6Wxb1 zk~aX#hcpYRykh+9Wkd@NJtXz@)AO7C;CRM~Uf~qv+MDF^qB}wI{aslSF<8}D+=s;! z9|14sU1bu%0z9B$(__N<&b-MTCYf70=aABkajcNaRhk766wcy$J26V&9`OM41xk0q1Hv|S_Y`R* zg;AxF4>nr6&#WlgyJ%d5#F%#npfsg#yf4M}gEMkl$io!}khgOc-6Vzmlan<$eLCT; zUM)$SNhQZM8x}$x442Rl2PM|ma-KoW4+BXh5zm0q+?;Z1!m{q0F9kppJ$ADTo!20E z1{f*={M=|S4#waAq!ewU1}>$P=0*{W2g|-(S16btqZ0Uui_n1rm4V*#qyr-FL*kCK zN$Uzzpa9?|;xz~$vzzZ>XCmwUCYv+UpIbT`4H1cA#q6mQPv)O=( z5_dC)*?Um2M$-Mg^TSJf#epfPnDVtZ`;ZSc+&z)>YdQGpP19~X8s86 zq=hc1?Tv|hs<Tr>`W9qH5C`hsl0XdmOs{0>&{C`kw z?p8gm2E~>%V=dz|nu!QOi78+}5SlDm*iffZBKWAQh45$M zl&!3)8Toh0p52bSFG*z|u^SY*^Hc8BLa|8L=PAa;=1!JI)IlXOh8*WQTKNNJiCsoz zayYh<(}jF2by2x?^TznrXD8B-FeY`?SWX3Qh^{o0W-$5!_qk@GtfeA%Q8tHiZF^Vf zxxF>Bbac5>maaKte2&UIFDBx4wZHh7e}X^Uo|+j|nTO8OLV%q73T39}xkkYp&vag# zX8w=enmjs=!Yc1BC_fhUR1NKR)bUT0u+-*9K9zhbNn(++JzvsJHM^=-)OdSyah_3G zWw5JdF)yHWSv5Z)U*x?0?Sfe9@diLNgI)CLScs|9J{3*HsXlOJ^3@bcPP7nbZ@UC9 za#-~4Uz{{|figI^IxZY+m+BLJbRfEq$6mJ|4~!IAvHQy>V_VJTyu9Ro194%?%u`L| zhq)SBZ_BOtRcRm#Gg^MRV2=X7N^}1`=VYP!@%lQj-0sbitH8?q2R%PvxAp$}yz-1@ z*;$tGyFqVk*@sdM>b+Coy|*tTfoz-3V>6TCI!jcI)!s zXC4Gxq?75*w>;?>wEn z`wmotV@Zb6&fI6zm(hLS1L@|9DwZJjc!hMy3LnN#MK?Dx!KvFxK?Cp5Z@;b2i6_Vh z2)d<~8}$v^o~yi+3_q++=?HoEOW9lhGA9or{~h?_djH^}rUnsHa5^HLzJGLqHy!{F zs22&{#}n%9tFlOI+&pyt{rfjr*8X>$Ci^Bs*NXKwQcs25?_Vz(NR%1vhF*AGyM7iz zwVJLZ1?UbF0J<-&+8?JrfiJsVRN(Vqo%&%o0@*&{ywKK`+!Jm@LjX)bBZY5_t zI^v=X?k9x>Udc1Yf?R#wdyz`N#N20u^cjllzem>S+pOL{F{ph|2e8ne!k!3%_dAZE zLxA+Z(5s91;&0qyArK>QdhDb+tuD}k9=8u#rpm5^RR97&?CeU{tNd}OzartR+a^N`#bc8!iM3MZ}O+~`uhjZx^bb5-m8Rg)}|g*7J|<#WOoX zQ>6?^PkAgt-eJP^%VNe1DmkXQd$JQW7BmQ_UVXuk6F!i)qb^j@;m+C-wqwxr-r<`wo&089TMI zM%Um;pdk00=C>QG`_++!)wmoD+Hcl%A-w%xtOy$};y33qQc(M4S2xS2^aU0|JzgYq zL`GRVRtQiNzmot;PizbT+I7p{=gY7<i9?NA455$88}x;=G@vGdgk()!*Dq_M_Ry|MW&d6dS0W0e1y$)HUgJ64mROi_%Co$x0jrymTs#z;S@XqRrfGnb^LiT<0 z)i`2*K{bH-_Rm}frx?b^a@*|L%CH^H?<>lz-8pQBe3h}wXo58impC126zd+;V?UU; zrIph%GHTI{WS@}!tx7Oq{QC)j+4Ac&U4}Cvl{i|IRL}3o2kflZ2B)Zm%Cb*n`ys;F zO=q8QNCheazI_v?Wux$)F4g)(GR>S92qn)h0Y53fn)$4b<2l>eQr&L)H+T_0r}!fY zt+D5N#7IDpZnFztujt2{|+bR!cC*{lNRw+#tKo0-TdEGrY2;0ca^bCYy5gY-V2 zIzZPLeZ5lbCP_zpI0DR~3lMtRRF>mkXONeoLgL)dwsN?U!#9X};AMW}ZhncSB~);B4=~ly z&Am!*WT18i;O)uX2C%e=w{f>h{Cl($VechL`azj~l`GA3Hg`h#be``t`9nL>>cBn7 zjuK84&FfdmjqAZIwq6WkQ6AwI{&im3>Fia3L%O0aBb~hyvNjJ{m%TX3lJ4)hNnkFj z3UJUiK_wVUFU4DyV2GkMzFvPE`Tf2|WN1a`RF0*ANidzy{?lO?&Zg~ehg{lboE%Ur z(*s>^|3cj>E@gxfde`HHaHA_N)mah%WZoP9B_u(*B-PQwq=MWAI7Es17i-D1Tx7AE1>C8CZJ{_4NsHBx7D zhoNg&m!KdyoNrNTXPQ6{D$BuhwD&GlKGvCnBzd%Cio)~e$M(PVKA`eS91mcT`xl33Qj^e`eNJ3 zFT8_VOgp6Gb(vBIe!2`NXu6suX0Mbb4fZeXR1a?%8`tM=d@S*S5WL0H;$UO@#W+Gp zux9?S13=s#0KymUYxPPm=GrWTGfBLxCK%5-ml(TdbR|w)NvxN4NlS|nl++Ny#6Zi$0iGM~enO9_|lK%RW6G+%v9OvJXny>neE{y4+!i69@6*36+iK-4&44n0zxFCn zjFQeq=ye{T;&6;5<}@#g7nt=eT;z~};dg!MmXX!gIa0WnR3!~{crUZoD4ZBgTXyG? zNAE14p9cxUp_A5n^@$gMvrMwATZT8`JX;`aKA;}0*D9OJJ}R1u!#}aVoK_?D20v`h zsh6 z(t9sb6%s;kp@sT}-F^FZU&}9l3}oicoiq2Id&>8ndy>*LC03~9*2&EH(n_5W+U3_=kQ`#eDkPDO- z(N=pegOjtlcvj1Mdn7OZ_@@XY5-y~8+%ELCtGZ*Ya(#oZxw(2~!0wuR+=`4YKeBna#7`zRV72g<{C1li&Gl65}Hg46k-$D-U} z$arL;_}f!c!bd}_xZnc@Q4i7nJ!{pNGxv-!Be5Q!B;ors9$nDE(e>QPFOucHxzZ09 z#D!00DvhC*!Nc8!?$KuZWz#!6S9oK7d{i+SCQNy44@~C^gs^5?)1oOgwiQxpLBePE zpJPi1u8wE!SX)~y-yco*Or!Y?TuQxzT~LFa1#A?;DZ2V#-7Avi z0=IAK5SYS2Q@F7fAvQfpf6EABVOm^L8Y-vv;^+2vO2McKrPYQBG%lS}v%@2_Lf=9s z@^CX}OO}cUYve6lkTGbTmpug5$_H&V#c&qBUFb{Tf4?1-7&!FmJQ?(2_EBua29OLw zsOSXiVHcZ=eHR^)K~^3r89S>}aZdmgT489k=PMNCw&X z5rpO}uZ6J`{f+vZqCV`Yt`f}1E{Z|TNvvE<(D*XZH3KY*tk6YsLsX zD)pVoF@2EiWx$b_$yM;k01_3W9iNcD&>DN~8_Fv$ea4E5 zE3qEu2>l?zr%;K_PZ6DfmD=lLZFxmYA}aWl3Qos#t8ToH=F@=q0Xe33UmGOm{CU)S zo|Nl%W*(q5hXVmfQ7%;?CYf7mKl;On0j5yh&74EFJqQM%Yz^Bc@>cd9)prXLYY~*t z>@T>Y8{oP{bLBq(%;8rXE^Ij=a=%8;kn%>w=8=Z$)(=FYu#ZZ$bN;lDXhPjM{oV=W z(a`Er`tW5lFA2pTs^PPRoEih!YBF9D%*T>SwdU_+{Z5P#xo^J^L0WfDroB!=( zBsLtrwx$FLXJKbm>Ad_hNjzUi@MapdmZpYA=Fx$YvQv;`x8^a$BQb+@K@Qf1rBwI6 zTx#cewR)|raMsLg0@9QBPZT2LMgpuBCU`oMVqodQD$?HZ7p@A=-abO@F}E5%?7(hH z0x@ta-_eA8lB;uM6u-BF%(TECx$6tV_b%NO_lts^d{tEGP=kdoAPaJ1)UVKP7#;EC zA@D)NbJxHDMv+Y)?_sD7?r;gDMa&={6XrWhqnqv0(p%Kv%Zd}eY1(J)toa??g$MSK zPF5elCQ?-4l5lp62x*$!4-m-}+-j-|YVvkcSAKGaE7J6*r3_H()(a_8_KFk;G2DPO z4K~>%jBy{)&a4gz;RfG7GS}}cayRpSHhXH3uUFB$e~zxX<$_#z zi#Eu|6xDmesvt501Vb#n&tjwCz2pg(S7_@_!&LtiU#;|@x*Ujb3=X4ItkA7|82OQt zeHhcE;*Nf*Qqpl-Wqk6o?n~+74ZQd1K?MCM2c_bdDLKx-8UMOTgUS-2hSS(KoZ;^8 zBvahCh@I)OD1nWfWsIoM-qVY?s|_Cx5?h_is5EGqpwIM~cr#kBb!4i9m&BCjkj9@A z1J1btmSm1%X|IWi10IJZS4za+yc|oDHQ!Top;?{wdGOQpm_0F>GqsPyn0K&qAerXOa%)Av zxH3CLoWFEd2r{6~8!1;5qIp*NaJ``sSqlVXG6rRs^5MFqh`dT^bvZ3(40$lzHogGVG<$#aHa(0!_|P>v{skpDxvr&kXi|We$?m#z8r9r7?QI_= z-j#*jPOsnyW?IJFTOFUY4l0U%(jV`hHpLR9P79fWI!DY}G@sm-TZ0%mVBwM1(K127 za-4ZlpHNe>62g_YrA6rHQHPZ5LD=6SV>A{Ncawelg}6AIg|Mv6h-0@k_py=W(nK}~ z$$6W=tS~-@NZvc@YP}N{oC(iBIRz;xXT;|6><1(DDVJ*ZNA-7jg3f@j_9_on96ro^ z6qC85jzSV7v`z0|+$x!IJLUD3ChrHCAIO{9OO;Xy2T7#1N$={Aj&@q(V@x{Bs+eMn z%Fe6fZ}Y2QCmahX(mvT?2HJ=gHdXe{3IyC`EDl9_`}D zHG(q<`4_rg*GROD%zcYybD&gON`7;d2k7no`!`K5%GEMaJ6xrvK;B;cJa2!zFZ`DN zH5qoK+Vh#f{Bg86Z$k-LYgk7LUpyI9Tan||dzeE*MC-JO<>EJgH3fU_Z|);~JN!j~ z+*|Lg7fd0IF*mk3lZ&~(qWBsVm}jirVi|WCDG!e)b)e-W9i`;$+o4Kwo|577f-F`F z_MLSaAFQ8+*(=P1iBMA7z9qt317p7ph^Z=&M)JB)U-EQbWKT{=K3I@W=jitF$8tI7 zA$z3PuCl`M+riWnN(c}swXIzPC6vHgvy=8HfjO{f#*h;p{ zdVc;Q*rsz(=aeUnuST(3Sno2MgDT^-wz8J#ZPEniZVAR5I*+cHOQHPvWXL{|#mR6c zNj4h*Rev+S4j<`TSX*kjXqZW&ytFIUGFB_fFx&A7Ryv(~CsZ?dtL~I$}lFle-TQ zPhN(EjTNwTwAl&m-lBeu$;L z9QwG$W_`*lK&Y*a607)R!Zb2~)vs3aR<=TjOIEEy=2Z?7O8a*OU@T|Ktn{lE-{%JECo@0<3brP3GLXFa&>ACI=^!qDG|r*1Mb8I*9KUX z=DixiWzuj*xCfiV`qz!bYO}k*L>LRpleTp#*Z$c*#?SQ;VXmk5!_nM;ozdR^eq>Vt z+b=fa?NV-jQB9|6A0%Oh4*zRPX$Ei&tCaIbjG2c8=`5*HOjRyU5D0?Riej5;W*B)RFGUnw4YqIgGd}&f(#(AwqZl4DTb@Us zcdxSGuH+|kCU==c`J_Fw2|A&PaU{gT3V-^s2&2y$?-?>$O>R1#7Akz`NcI#+<5Vy)eujuBdPm! zAT_FujCUVQGFVO49WRvy!{3S987ly$|5p=qp~=h}%K)I54S70;M=TpO3sPc5sKmn~ zMnXbEO>n&niZfFhvsG?2S~G0=y|;}#;?@NdN;3LJITQd|&0R6no%L}&%JoY`)RiCQ zcM6y?=XEbzZ5957EF!;nP{@Oq2BI?JZy?;SplR+?OIt^Sz731ADW^knpya!Ng`2@wo=jPu6|e z3`bCp4g<<;tM??uc0;TnAm`$O`@$?k44{)${gl>b<_>1F2zQ1Iy$?%%X6%>&Z9D*NujgXq&UL+aX2wHK{&J-QRKN|^vwYyhg2T_9RH z=U*t2+kdgq%8ZC#^f;?&u?o(9QP^_z~iN|ny-Ni}MXgx#7a=eK9o z_Za)qLD$eBu()3F<$Rv*|3RyP^THb36F9bi?+3pQ8YGkh?)4r1^hz^eh)DY8f6xiw z2lTmhAdLr-$&^>WcjMX%fTh{ad(MAxhcrkQF!y=gB9fB}-d)8#El_oljlF>B(O2uO zTzjX@;PJ8_Ozp^ij{KyU49GpV$j*&21PLh>i}yIava z;_rX)@?&RZNuG>UkQ1ful2+YP{WXe8J!So>(3Q$swhU#-_tV|B4fWDe>( zeG1Ior!lBTvMDG_zLB47F_EwvlKjN=**T~cNKJ`iMn7E9tGSth7!_~g1*s4-S*66C z%7mU1X3};vpX^S0FArxjJ~(=zzDG4rnZka-3!jkc4pn2$7^~PdsCp$^okeIGuGin+ zkt&{-CSh}^7ac8J=C$fH{HjX@3{y1qCKMgiO}g3seR^51g|B?X9Z*zdn@7Q}yUQT+ zmck2nVc?*{+HEqkD%YsK6Sd&rl0}B%!6TuBx%tKI42pcrsS5ldiy>~T&|LeX`G)$D zyF=;|0F~TZu<{%Ls11~~%;u~d^r6Y+XS`n@e;#<sj_0O zJNv^KN

k6|1m^-_P=EG!Ro#?Mr>& zdzU+bD30NVt~n_9bzrgqTEPXYD&jM!^Y|6FK0wAR&JUlxSH}>GW3+K!o!=e#D%EQ4; zuG`}Z89Q-2&}5sl+K-2|`fw>hf@DJ%g7$VOHe|Bn)MzuzG2az@$(XLI@C;Ry5 zPlL8`LNAd_dv}3x!{!hNe!3xIbp_l4CP{(4_KgI>CZ3ShM z;s|EwnE+sQ*P&O|jlD4;<>vMInI};wvTkx{w81FHv);MMzb>b~fvS8(`*ih=z!s9~ ztaK$@5w>zxwY@Q2cIv^n(@Syg%NkU_w0;!|kBS0wU+@a3wHCZBSwo!gMjNZ`yh(RH zb7;O<2dka^itN8L4P!EhgDad=yRN8R+o*?aMaB*yjZ4~W!5~mC{A>8E_AR>Wtn_&?vTEY3+1Oycq=Y$s zJ4#B*RN_maR=EeAvOrcTm9d;j-zw({OU{8Nt0{`jA>l$$tu-;%(NJWL_ANrR_II54 zb@oOC^(H%U1y2L>4F6LjXT4`h7hj`WQ=}NMvHbOf{&A-Iyh!mTs;IVm)L^WW(!wTb z6K?Fcx-J`;C-a|gWF2vimU!|s1{D+2 zj}PrTZ0-s&RcHHU1)Rx!aqs&aaHhoKp!LD=;bLR@Bw!PFbdAeAZD)cyWQaR<6wfkO;2;5%44u&dF?og3QxScG6@I&MKiU69-Jf5xftg6%&u&xF@?4}E&|FEt`K9^T%W={Q78@@)& z2GkoqU$a{{xwBTKy&XN=(Z8!NU#Fh6Po9pz3YvO&hyyMJ3lVN?cJ4Hc#`#ZmFHPYhFWIoh}#lAnFF7BUqzy{LrWb9*f^aAr#!^y&63LKI3ZI^=>Sl|X$ z5$Ojj2VZGa7jORmmJ_&nqZ}DpU+VAgPenpXdbZs|wW8EzlwY{PK4tCs2b0vOx(q0c zC$%hkDuv9}WxVPo+Nmy4aeF-m5R7HpAWue>erDK@3v1`bj5-_J{L|iugS;h6VBdi( zU#LXg105Yl3xF)y-q-gO*guQu9k+}2ucCWei@WwzEwCzABwt@xUHtqT)FvCIhg@gc z^lQzz+iOGQsJJ_7r8#3IXjbj|?Ru_K6fm1u6dQj}Ab|blEalD`J7{PKU2^>kIsJOp ze3!7j*MGB1^miXr8cX?}T5YHBBdfg*F;SJx&4G~ogHsCwaHM#8cv!tVgDzmj9k%vn zdobu`hIL+9BPN1@02tE^t)HV2BnMRLO@V=dXO~DW#Zqii3CQzy)OU`jANJ7PPPxMq<92v=}s#n6lij(38O2m8pj(!9NhY^>L7oY3HkHDuZ_B0 z@KscsE6Fv$K>@_}?{5&bU&bnR7!S#RnPPw2WdF-SV=Boe*HEg2?ybLyar~<)3DDG* z(tFDP`rm)+%Rm40qo-|5(iUPiaQ(Bu$e%XmUzR2%@wb@&tP}FjAO4SHl=1}!@Mn^n zE&o#xo;*axoh(w0&jF^*KX3o*lS&kio$#^ zxx28|4Eg`A|G%2P^~o>AfAU?Xzh4a*-LH@y-}?XzYRK$^lYAWw;-qT9uSAFBAMN_Z z(^URAiK1fc!GjZl}^Ai7f-Lf;?qvHt5ZJ%x!L5N@~l9DKsZ zqo^EjUUI$xuS1B>^7JV$`!s6oD-hghjp+aj^FM9y8F_hqWKZm<&bqM}T-U}V)2)Qm z!58cO2G7--CP0rBKoG;7L?e&lo$R}zgP|yKXGjY)*(acM;Td~hsI!k8j)0ms}grlmLf8ny2<(@Svbjq7v}f$Yn|5lw=QdNmlHd=9RieN+0CNaNy&(F#qDPcYHg zcaEFKqcXG;u(!LRy(b+|I_H8lA$+|Y4h`I_Bkg!keex^;?JO&V%R{Ls2)0&%;8h+J zxGCTo#H>}X9_ydgeEyUUFdq8p)E^*5s?qN~ltqqt)|AO-_7;vW;;y5xPXyq1p?nR_ z^)$Fg5~2Ko3*o{a_kr#>P`Jh9zSK;J1|dRLSBI5$eN|3jgY`~kI5wcZoU+hK%FbwexWwl`(rwo9rL8V{C6*2y1DR^@;GRZzkD zBFKn)0?o0I&J0ew1+@8c>LIL9 zD{eTrbTzajH=_-BXTgesTfyZ=D(vyD;_x+8_i<-_Idk8y{EX^a7gk&?3VT|QFj>M` zqEx=e!TFq2|DYECr6)JR#EIKAp+}(lqBG~4*heD<%u)CrHA<6cKNAp`I(sEja@-bBTon!gGp{W*na6dO&S z(y0h1mhzxK_egz8+DE&@t)$(0h+&R(&#zLe?QKgl?v%YPA=e1i_spQQTkw$Exa`>b zkbpH&w?UMXUDdM(!*wmNO=21#$&FtEW1NrXyk+cQF))p^U*?$nX_q(qNzQSYHUfX| zlNxL?6RO7L-!gF`2B#-+Q;>p__(U@k=Xf;>yX!meXKBz4gHbI$iZPB|oMSC2?0E@K z2-FWv@9{ZSEF1|5(&-`Z_9hmIY;gQ@^8c*`{+9#4D6(VL-N<*eA+0xsE&v}5m50iO IiWYDF0{|BB=>Px# diff --git a/hadoop-hdds/docs/content/design/decommissioning.md b/hadoop-hdds/docs/content/design/decommissioning.md deleted file mode 100644 index 8d620be515e..00000000000 --- a/hadoop-hdds/docs/content/design/decommissioning.md +++ /dev/null @@ -1,624 +0,0 @@ - - ---- -title: Decommissioning in Ozone -summary: Formal process to shut down machines in a safe way after the required replications. -date: 2019-07-31 -jira: HDDS-1881 -status: current -author: Anu Engineer, Marton Elek, Stephen O'Donnell ---- - -# Abstract - -The goal of decommissioning is to turn off a selected set of machines without data loss. It may or may not require to move the existing replicas of the containers to other nodes. - -There are two main classes of the decommissioning: - - * __Maintenance mode__: where the node is expected to be back after a while. It may not require replication of containers if enough replicas are available from other nodes (as we expect to have the current replicas after the restart.) - - * __Decommissioning__: where the node won't be started again. All the data should be replicated according to the current replication rules. - -Goals: - - * Decommissioning can be canceled any time - * The progress of the decommissioning should be trackable - * The nodes under decommissioning / maintenance mode should not been used for new pipelines / containers - * The state of the datanodes should be persisted / replicated by the SCM (in HDFS the decommissioning info exclude/include lists are replicated manually by the admin). If datanode is marked for decommissioning this state be available after SCM and/or Datanode restarts. - * We need to support validations before decommissioing (but the violations can be ignored by the admin). - * The administrator should be notified when a node can be turned off. - * The maintenance mode can be time constrained: if the node marked for maintenance for 1 week and the node is not up after one week, the containers should be considered as lost (DEAD node) and should be replicated. - -# Introduction - -Ozone is a highly available file system that relies on commodity hardware. In other words, Ozone is designed to handle failures of these nodes all the time. - -The Storage Container Manager(SCM) is designed to monitor the node health and replicate blocks and containers as needed. - -At times, Operators of the cluster can help the SCM by giving it hints. When removing a datanode, the operator can provide a hint. That is, a planned failure of the node is coming up, and SCM can make sure it reaches a safe state to handle this planned failure. - -Some times, this failure is transient; that is, the operator is taking down this node temporarily. In that case, we can live with lower replica counts by being optimistic. - -Both of these operations, __Maintenance__, and __Decommissioning__ are similar from the Replication point of view. In both cases, and the user instructs us on how to handle an upcoming failure. - -Today, SCM (*Replication Manager* component inside SCM) understands only one form of failure handling. This paper extends Replica Manager failure modes to allow users to request which failure handling model to be adopted(Optimistic or Pessimistic). - -Based on physical realities, there are two responses to any perceived failure, to heal the system by taking corrective actions or ignore the failure since the actions in the future will heal the system automatically. - -## User Experiences (Decommissioning vs Maintenance mode) - -From the user's point of view, there are two kinds of planned failures that the user would like to communicate to Ozone. - -The first kind is when a 'real' failure is going to happen in the future. This 'real' failure is the act of decommissioning. We denote this as "decommission" throughout this paper. The response that the user wants is SCM/Ozone to make replicas to deal with the planned failure. - -The second kind is when the failure is 'transient.' The user knows that this failure is temporary and cluster in most cases can safely ignore this issue. However, if the transient failures are going to cause a failure of availability; then the user would like the Ozone to take appropriate actions to address it. An example of this case, is if the user put 3 data nodes into maintenance mode and switched them off. - -The transient failure can violate the availability guarantees of Ozone; Since the user is telling us not to take corrective actions. Many times, the user does not understand the impact on availability while asking Ozone to ignore the failure. - -So this paper proposes the following definitions for Decommission and Maintenance of data nodes. - -__Decommission__ *of a data node is deemed to be complete when SCM/Ozone completes the replica of all containers on decommissioned data node to other data nodes.That is, the expected count matches the healthy count of containers in the cluster*. - -__Maintenance mode__ of a data node is complete if Ozone can guarantee at *least one copy of every container* is available in other healthy data nodes. - -## Examples - -Here are some illustrative examples: - -1. Let us say we have a container, which has only one copy and resides on Machine A. If the user wants to put machine A into maintenance mode; Ozone will make a replica before entering the maintenance mode. - -2. Suppose a container has two copies, and the user wants to put Machine A to maintenance mode. In this case; the Ozone understands that availability of the container is not affected and hence can decide to forgo replication. - -3. Suppose a container has two copies, and the user wants to put Machine A into maintenance mode. However, the user wants to put the machine into maintenance mode for one month. As the period of maintenance mode increases, the probability of data loss increases; hence, Ozone might choose to make a replica of the container even if we are entering maintenance mode. - -4. The semantics of decommissioning means that as long as we can find copies of containers in other machines, we can technically get away with calling decommission complete. Hence this clarification node; in the ordinary course of action; each decommission will create a replication flow for each container we have; however, it is possible to complete a decommission of a data node, even if we get a failure of the data node being decommissioned. As long as we can find the other datanodes to replicate from and get the number of replicas needed backup to expected count we are good. - -5. Let us say we have a copy of a container replica on Machine A, B, and C. It is possible to decommission all three machines at the same time, as decommissioning is just a status indicator of the data node and until we finish the decommissioning process. - - -The user-visible features for both of these are very similar: - -Both Decommission and Maintenance mode can be canceled any time before the operation is marked as completed by SCM. - -Decommissioned nodes, if and when added back, shall be treated as new data nodes; if they have blocks or containers on them, they can be used to reconstruct data. - - -## Maintenance mode in HDFS - -HDFS supports decommissioning and maintenance mode similar to Ozone. This is a quick description of the HDFS approach. - -The usage of HDFS maintenance mode: - - * First, you set a minimum replica count on the cluster, which can be zero, but defaults to 1. - * Then you can set a number of nodes into maintenance, with an expiry time or have them remain in maintenance forever, until they are manually removed. Nodes are put into maintenance in much the same way as nodes are decommissioned. - * When a set of nodes go into maintenance, all blocks hosted on them are scanned and if the node going into maintenance would cause the number of replicas to fall below the minimum replica count, the relevant nodes go into a decommissioning like state while new replicas are made for the blocks. - * Once the node goes into maintenance, it can be stopped etc and HDFS will not be concerned about the under-replicated state of the blocks. - * When the expiry time passes, the node is put back to normal state (if it is online and heartbeating) or marked as dead, at which time new replicas will start to be made. - -This is very similar to decommissioning, and the code to track maintenance mode and ensure the blocks are replicated etc, is effectively the same code as with decommissioning. The one area that differs is probably in the replication monitor as it must understand that the node is expected to be offline. - -The ideal way to use maintenance mode, is when you know there are a set of nodes you can stop without having to do any replications. In HDFS, the rack awareness states that all blocks should be on two racks, so that means a rack can be put into maintenance safely. - -There is another feature in HDFS called "upgrade Domain" which allows each datanode to be assigned a group. By default there should be at least 3 groups (domains) and then each of the 3 replicas will be stored on different group, allowing one full group to be put into maintenance at once. That is not yet supported in CDH, but is something we are targeting for CDPD I believe. - -One other difference with maintenance mode and decommissioning, is that you must have some sort of monitor thread checking for when maintenance is scheduled to end. HDFS solves this by having a class called the DatanodeAdminManager, and it tracks all nodes transitioning state, the under-replicated block count on them etc. - - -# Implementation - - -## Datanode state machine - -`NodeStateManager` maintains the state of the connected datanodes. The possible states: - - state | description - ---------------------|------------ - HEALTHY | The node is up and running. - STALE | Some heartbeats were missing for an already missing nodes. - DEAD | The stale node has not been recovered. - ENTERING_MAINTENANCE | The in-progress state, scheduling is disabled but the node can't not been turned off due to in-progress replication. - IN_MAINTENANCE | Node can be turned off but we expecteed to get it back and have all the replicas. - DECOMMISSIONING | The in-progress state, scheduling is disabled, all the containers should be replicated to other nodes. - DECOMMISSIONED | The node can be turned off, all the containers are replicated to other machine - - - -## High level algorithm - -The Algorithm is pretty simple from the Decommission or Maintenance point of view; - - 1. Mark a data node as DECOMMISSIONING or ENTERING_MAINTENANCE. This implies that node is NOT healthy anymore; we assume the use of a single flag and law of excluded middle. - - 2. Pipelines should be shut down and wait for confirmation that all pipelines are shutdown. So no new I/O or container creation can happen on a Datanode that is part of decomm/maint. - - 3. Once the Node has been marked as DECOMMISSIONING or ENTERING_MAINTENANCE; the Node will generate a list of containers that need replication. This list is generated by the Replica Count decisions for each container; the Replica Count will be computed by Replica Manager; - - 4. Replica Manager will check the stop condition for each node. The following should be true for all the containers to go from DECOMMISSIONING to DECOMMISSIONED or from ENTERING_MAINTENANCE to IN_MAINTENANCE. - - * Container is closed. - * We have at least one HEALTHY copy at all times. - * For entering DECOMMISSIONED mode `maintenance + healthy` must equal to `expectedeCount` - - 5. We will update the node state to DECOMMISSIONED or IN_MAINTENANCE reached state. - -_Replica count_ is a calculated number which represents the number of _missing_ replicas. The number can be negative in case of an over-replicated container. - -## Calculation of the _Replica count_ (required replicas) - -### Counters / Variables - -We have 7 different datanode state, but some of them can be combined. At high level we can group the existing state to three groups: - - * healthy state (when the container is available) - * maintenance (including IN_MAINTENANCE and ENTERING_MAINTENANCE) - * all the others. - -To calculate the required steps (required replication + stop condition) we need counter about the first two categories. - -Each counters should be calculated per container bases. - - Node state | Variable (# of containers) | - --------------------------------------|---------------------------------| - HEALTHY | `healthy` | - STALE + DEAD + DECOMMISSIONED | | - DECOMMISSIONING | | - ENTERING_MAINTENANCE + IN_MAINTENANCE | `maintenance` | - - -### The current replication model - -The current replication model in SCM/Ozone is very simplistic. We compute the replication count or the number of replications that we need to do as: - -``` -Replica count = expectedCount - currentCount -``` - -In case the _Replica count_ is positive, it means that we need to make more replicas. If the number is negative, it means that we are over replicated and we need to remove some replicas of this container. If the Replica count for a container is zero; it means that we have the expected number of containers in the cluster. - -To support idempontent placement strategies we should substract the in-fligt replications from the result: If there are one in-flight replication process and two replicas we won't start a new replication command unless the original command is timed out. The timeout is configured with `hdds.scm.replication.event.timeout` and the default value is 10 minutes. - -More preciously the current algorithm is the following: - -```java -replicaCount = expectedCount - healthy; - -if (replicaCount - inflight copies + inflight deletes) > 0 { - // container is over replicated -}.else if (replicaCount - inflight copies + inflight deletes) <0 { - // handle under replicated containers -} -``` - -The handling of inflight copies and deletes are independent from the decommissioning problem therefore here we focus only on the core mode: - -``` -replicaCount = expectedCount - healthy; -``` - -### The proposed solution - -To support the notion that a user can provide hints to the replication model, we propose to add two variables to the current model. - -In the new model, we propose to break the `currentCount` into the two separate groups. That is _Healthy nodes_ and _Maintenance nodes_. The new model replaces the currentCount with these two separate counts. The following function captures the code that drives the logic of computing Replica counts in the new model. The later section discusses the input and output of this model very extensively. - -```java -/** - * Calculate the number of the missing replicas. - * - * @return the number of the missing replicas. - If it's less than zero, the container is over replicated. - */ -int getReplicationCount(int expectedCount, int healthy, int maintenance) { - - //for over replication, count only with the healthy replicas - if (expectedCount <= healthy) { - return expectedCount - healthy; - } - - replicaCount = expectedCount - (healthy + maintenance); - - //at least one HEALTHY replicas should be guaranteed! - if (replicaCount == 0 && healthy < 1) { - replicaCount ++; - } - - //over replication is already handled. Always return with - // positive as over replication is already handled - return Math.max(0, replicaCount); -} - -``` - -To understand the reasing behind the two special `if` condition check the examples below. - -We also need to specify two end condition when the DECOMMISSIONING node can be moved to the DECOMMISSIONED state or the ENTERING_MAINTENANCE mode can be moved to the IN_MAINTENANCE state. - -The following conditions should be true for all the containers and all the containers on the specific node should be closed. - -**From DECOMMISSIONING to DECOMMISSIONED**: - - * There are at least one healthy replica - * We have three replicas (both helthy and maintenance) - -Which means that our stop condition can be formalized as: - -``` -(healthy >= 1) && (healthy + maintenance >= 3) -``` - -Both the numbers can be configurable: - - * 1 is the minimum number of healthy replicas (`decommissioning.minimum.healthy-replicas`) - * 3 is the minimum number of existing replicas (`decommissioning.minimum.replicas`) - -For example `decommissioning.minimum.healthy-replicas` can be set to two if administrator would like to survive an additional node failure during the maintenance period. - -**From ENTERING_MAINTENANCE to IN_MAINTENANCE:** - - * There are at least one healthy replicas - -This is the weaker version of the previous condition: - -``` -(healthy >= 1) -``` - -### Examples (normal cases) - -In this section we show example use cases together with the output of the proposed algorithm - -#### All healthy - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - HEALTHY | HEALTHY | HEALTHY| 3 | 3 | 0 | 0 - -The container C1 exists on machines A, B , and C. All the container reports tell us that the container is healthy. Running the above algorithm, we get: - -`expected - healthy + maint. = 3 - (3 + 0) = 0` - -It means, _"we don’t need no replication"._ - -#### One failure - - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - HEALTHY | HEALTHY | DEAD | 3 | 2 | 0 | 1 - - -The machine C has failed, and as a result, the healthy count has gone down from `3` to `2`. This means that we need to start one replication flow. - -`ReplicaCount = expected - healthy + maint. = 3 - (2 + 0) = 1.` - -This means that the new model will handle failure cases just like the current model. - -#### One decommissioning - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - HEALTHY | HEALTHY | DECOMM | 3 | 2 | 0 | 1 - - -In this case, machine C is being decommissioned. Therefore the healthy count has gone down to `2` , and decommission count is `1`. Since the - -```ReplicaCount = expected - healthy + maint`. we have `1 = 3 - (2 + 0)```, - -this gives us the decommission count implicitly. The trick here is to realize that incrementing decommission automatically causes a decrement in the healthy count, which allows us not to have _decommission_ in the equation explicitly. - -**Stop condition**: Not that if this containers is the only one on node C, node C can be moved to the DECOMMISSIONED state. - -#### Failure + decommissioning - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - HEALTHY | DEAD | DECOMM | 3 | 1 | 0 | 2 - - -Here is a case where we have a failure of a data node and a decommission of another data node. In this case, the container C1 needs two replica flows to heal itself. The equation is the same and we get - -`ReplicaCount(2) = ExpectecCount(3) - healthy(1)` - -The maintenance is still zero so ignored in this equation. - -#### 1 failure + 2 decommissioning - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - HEALTHY | DECOMM | DECOMM | 3 | 0 | 0 | 3 - -In this case, we have one failed data node and two data nodes being decommissioned. We need to get three replica flows in the system. This is achieved by: - -``` -ReplicaCount(3) = ExpectedCount(3) - (healthy(0) + maintenance(0)) -``` - -#### Maintenance mode - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - HEALTHY | DEAD | MAINT | 3 | 2 | 1 | 0 - -This represents the normal maintenance mode, where a single machine is marked as in maintenance mode. This means the following: - -``` -ReplicaCount(0) = ExpectedCount(3) - (healthy(2) + maintenance(1) -``` - -There are no replica flows since the user has asked us to move a single node into maintenance mode, and asked us explicitly not to worry about the single missing node. - -**Stop condition**: Not that if this containers is the only one on node C, node C can be moved to the IN_MAINTENANCE state. - -#### Maintenance + decommissioning - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - HEALTHY | DECOMM | MAINT | 3 | 1 | 1 | 1 - -*This is a fascinating case*; We have one good node; one decommissioned node and one node in maintenance mode. The expected result is that the replica manager will launch one replication flow to compensate for the node that is being decommissioned, and we also expect that there will be no replication for the node in maintenance mode. - -``` -Replica Count (1) = expectedCount(3) - (healthy(1) + maintenance(1)) -``` -So as expected we have one replication flow in the system. - -**Stop condition**: Not that if this containers is the only one in the system: - - * node C can be moved to the IN_MAINTENANCE state - * node B can not be decommissioned (we need the three replicas first) - -#### Decommissioning all the replicas - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - DECOMM | DECOMM | DECOMM | 3 | 0 | 0 | 3 - -In this case, we deal with all the data nodes being decommissioned. The number of healthy replicas for this container is 0, and hence: - -``` -replicaCount (3) = expectedCount (3)- (healthy(0) + maintenance(0)). -``` - -This provides us with all 3 independent replica flows in the system. - -#### Decommissioning the one remaining replicas - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - DEAD | DEAD | DECOMM | 3 | 0 | 0 | 3 - -We have two failed nodes and one node in Decomm. It is the opposite of case Line 5, where we have one failed node and 2 nodes in Decomm. The expected results are the same, we get 3 flows. - -#### Total failure - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - DEAD | DEAD | DEAD | 3 | 0 | 0 | 3 - -This is really an error condition. We have lost all 3 data nodes. The Replica Manager will compute that we need to rebuild 3 replicas, but we might not have a source to rebuild from. - -### Last replica is on ENTERING_MAINTENANCE - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - DEAD | MAINT | DEAD | 3 | 0 | 1 | 2 - -Is also an interesting case; we have lost 2 data nodes; and one node is being marked as Maint. Since we have 2 failed nodes, we need 2 replica flows in the system. However, the maintenance mode cannot be entered, since we will lose lone replica if we do that. - -### All maintenance - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|--------|--------|---------|---------|---------- - MAINT | MAINT | MAINT | 3 | 0 | 3 | *1* - -This is also a very special case; this is the case where the user is telling us to ignore the peril for all 3 replicas being offline. This means that the system will not be able to get to that container and would lead to potential I/O errors. Ozone will strive to avoid that case; this means that Ozone will hit the “if condition” and discover that we our ReplicCount is 0; since the user asked for it; but we are also going to lose all Replicas. At this point of time, we make a conscious decision to replicate one copy instead of obeying the user command and get to the situation where I/O can fail. - -**This brings us back to the semantics of Maintenance mode in Ozone**. If going into maintenance mode will not lead to a potential I/O failure, we will enter into the maintenance mode; Otherwise, we will replicate and enter into the maintenance mode after the replication is done. This is just the core replication algorithm, not the complete Decommission or Maintenance mode algorithms, just how the replica manager would behave. Once we define the behavior of Replica Manager, rest of the algorithm is easy to construct. - -Note: this is the case why we need the seconf `if` in the model (numbers in the brackets shows the actual value): - -``` - replicaCount(0) = expectedCount(3) - ( healthy(0) + maintenance(0) ); - - - //at least one HEALTHY replicas should be guaranteed! - if (replicaCount(0) == 0 && healthy(0) < 1) { - replicaCount ++; - } -``` - -### Over replication - -For over-replicated containers Ozone prefers to keep the replicas on the healthy nodes. We delete containers only if we have enough replicas on *healthy* nodes. - -``` -int getReplicationCount(int expectedCount, int healthy, int maintenance) { - - //for over replication, count only with the healthy replicas - if (expectedCount <= healthy) { - return expectedCount - healthy; - } - - replicaCount = ... //calculate missing replicas - - //over replication is already handled. Always return with - // positive as over replication is already handled - return Math.max(0, replicaCount); -} -``` - -Please note that we always assume that the the in-flight deletion are applied and the container is already deleted. - -There is a very rare case where the in-flight deletion is timed out (and as a result replication manager would assume the container is not deleted) BUT in the mean-time the container finally deleted. It can be survivied with including the creation timestamp in the ContainerDeleteCommand. - -### Over replication examples - -#### 4 replicas - - - Node A | Node B | Node C | Node D | expctd | healthy | mainten | repCount - --------|---------|---------|---------|--------|---------|---------|---------- - HEALTHY | HEALTHY | HEALTHY | HEALTHY | 3 | 4 | 0 | 0 - -This is an easy case as we have too many replicas we can safely remove on. - -``` -if (expectedCount <= healthy) { - return expectedCount - healthy -} -``` - -#### over replicated with IN_MAINTENANCE - - - Node A | Node B | Node C | Node D | expctd | healthy | mainten | repCount - --------|---------|---------|---------|--------|---------|---------|---------- - HEALTHY | HEALTHY | HEALTHY | MAINT | 3 | 3 | 1 | 0 - - -In this case we will delete the forth replica only after node D is restored and healthy again. (expectedCount is not less than healthy). As the `expectedCount (3) <= healthy (3)` the replicaCount will be calculated as `0`. - -#### over replicated with IN_MAINTENANCE - - - Node A | Node B | Node C | Node D | expctd | healthy | mainten | repCount - --------|---------|---------|---------|--------|---------|---------|---------- - HEALTHY | HEALTHY | MAINT | MAINT | 3 | 2 | 2 | 0 - -Here we are not over-repliacated as we don't have any healthy nodes. We will calculate the under-replication number as defined in the previous section: - -``` -replicaCount(-1) = expectedCount(3) - ( healthy(2) + maintenance(2) ); -``` - -The main algorithm would return with `replicaCount = -1` but as we return `Math.max(0,replicaCount)` the real response will be 0. Waiting for healthy nodes. - -### Handling in-flight replications - -In-flight replication requests and deletes are handled by the Replica Manager and the problem is orthogonal to the replication problem, but this section shows that the proposed model is not conflicted with the existing approach. - -Let's say we have an under-replicated container and we already selected a new datanode to copy a new replica to that specific node. - - - Node A | Node B | Node C | expctd | healthy | mainten | repCount - --------|---------|---------|--------|---------|---------|---------- - HEALTHY | HEALTHY | (copy) | 3 | 2 | 0 | 1 - - -Here the Replication Manager detects that one replica is missing but the real copy shouldn't been requested as it's alread inprogress. ReplicaManager must not select a new datanode based on the ContainerPlacementPolicy implementation as the policy may or may not be idempotent. - -For example if the placement policy would select a datanode randomly with each loop we would select a new datanode to replicate to. - -To avoid such a situation Replica Manager maintains a list of the in-flight copies (in-memory) on the SCM side. In this list we have all the sent replication requests but they are removed after a given amount of time (10 minutes) by default. - -With calculating the in-flight copy as a possible replication the Replication Manger doesn't need to request new replication. - -When a datanode is marked to be decommissioned there could be any in-flight replication copy process in that time. - - * At datanode we should stop all of the in-flight copy (datanodes should be notified about the DECOMMISSIONING/IN_MAINTENANCE state) - * We never ask any non-healthy nodes to replicate containers. - * In SCM, we don't need to do any special action - * In `ReplicationManager` we already have a map about the inflight replications (`Map>`). - * During a normal replication the number of in-flight replications are counted as real replication (2 real replicas + 1 inflight replication = replica count 3). During this calculation we need to check the current state of the datanodes and ignore the inflight replication if they are assigned to a node which is in decommissioning state. (Or we should update the inflight map, in case of node state change) - -### In-flight examples - -#### Maintenance + inflight - - Node A | Node B | Node C | expctd | healthy | mainten | repCount | copy | - --------|---------|---------|--------|---------|---------|----------|------| - HEALTHY | MAINT | copying | 3 | 1 | 1 | 1 | 1 | - -Here one have one node ENTERING_MAINTENANCE state, and one replica is missing and already started to be replicated. We don't need to start a new copy and node B can be moved to the IN_MAINTENANCE mode. - -``` -Replica Count (1) = expectedCount(3) - (healthy(1) + maintenance(1)) -Containers to copy (0) = Replica Count (1) - inflight copies (1) -``` - -#### Maintenance + inflight - - Node A | Node B | Node C | expctd | healthy | mainten | repCount | copy | - --------|---------|---------|--------|---------|---------|----------|------| - DECOMM | copying | copying | 3 | 0 | 0 | 3 | 1 | - - -Node A can not be DECOMMISSIONED as we have no HEALTHY replicas at all. - - -## Statefulness - -SCM stores all the node state in-memory. After a restart on the SCM side the datanode state can be lost. - -**Ozone doesn't guarantee that decommissioning/maintenance mode state survives the SCM restarts!!!** - - * If SCM restarts DECOMMISSIONED nodes will not report any more container reports and the nodes won't be registered. - * ENTERING_MAINTENANCE and DECOMMISSIONING nodes will became HEALTHY again and the decommissioning CLI command should be repeated. - * IN_MAINTENANCE nodes will become DEAD and all the containers will be replicated. - - *Ozone assumes that the maintenance mode is used short-term and SCM is not restarted during this specific period.* - -*Reasoning*: - -Neither of the node state nor the container state are persisted in SCM side. The decommissioned state can be stored on the SCM side (or on the SCM side and the datanode side) which can provide better user experience (and may be implemented). - -But to support maintenance mode after restart all the container information is required to be persisted (which is a too big architectural change). - -To make a replication decision replication manager needs the number of healthy replicas (they are reported via heartbeats) AND the number of containers on the node which is in maintenance mode. The later one is not available if the SCM is restarted as the container map exists only in the memory and the node which is turned off can't report any more container reports. Therefore the information about the existing containers on the node which is in the maintenance mode **can't be available'**. - -## Throttling - -SCM should avoid to request too many replication to live enough network bandwidth for the requests. - -Replication Manager can easily throttle the replication requests based on `inflightReplication` map, but this problem is independent from the handling of the decommissioning / maintenance mode because it should be solved for any kind of replication not just for this. - -## User interface - -The decommissioning and maintenance mode can be administered with a CLI interface. - -Required feature: - - * Set the state of a datanode (to DECOMMISSIONING or ENTERING_MAINTENANCE) - * Undo the decommissioning process - * check the current progress: - * This can be a table with the nodes, status of the nodes, number of containers, containers under replication and containers which doesn't much the stop condition yet (required replications) - * All the commands can support topology related filters (eg. display the nodes only for a specific rack or show the status of the nodes of s specific rack) - * Check current replication information of one specific container (required to debug why the decommissioning is stuck) - -## Checks before the decommissioning - -Decommissioning is requested via a new RPC call with the help of a new CLI tool. The server should check the current state of the cluster and deny the decommissioning if it's not possible. Possible violations: - - * Not enough space to store the new replicas. - * Not enough node to create all kind of pipelines - - In case of any violation, the request will fail, but any of theses rules can be turned off with a next request and the decommissioning can be forced. - -## Maintain progress - -We need to show the progress of the decommissioning process per node and cluster-wide. We already have the information about the under replicated containers, but we don't know the numbers of the containers before decommissioning. - -Instead of saving the original number of the required replications before (which is very fragile) we don't provide an absolute progress just the numbers of the remaining replication: - - -``` - Node | Status | # containers | in-progress replications| required replication - ----------------|------------------------|--------------|--------------------------|------------------------ - Node A | ENTERING_MAINTENANCE | 2837 | 12 | 402 - Node B | HEALTHY | 1239 | 0 | 0 - Node C | IN_MAINTENANCE | 2348 | 0 | 0 -``` - -`# containers` means the total number of the containers on the specific datanodes. To get the original number of the planned copies we can save the original 'container-to-node' map in memory and show some progress and provide more information for the users. diff --git a/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md b/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md deleted file mode 100644 index cc7569eb2cf..00000000000 --- a/hadoop-hdds/docs/content/design/ozone-enhancement-proposals.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -title: Ozone Enhancement Proposals -summary: Definition of the process to share new technical proposals with the Ozone community. -date: 2019-06-07 -jira: HDDS-1659 -status: accepted -author: Anu Enginner, Marton Elek ---- - - -## Problem statement - -Some of the biggers features requires well defined plans before the implementation. Until now it was managed by uploading PDF design docs to selected JIRA. There are multiple problems with the current practice. - - 1. There is no easy way to find existing up-to-date and outdated design docs. - 2. Design docs usually have better description of the problem that the user docs - 3. We need better tools to discuss the design docs in the development phase of the doc - -We propose to follow the same process what we have now, but instead of uploading a PDF to the JIRA, create a PR to merge the proposal document to the documentation project. - -## Non-goals - - * Modify the existing workflow or approval process - * Migrate existing documents - * Make it harder to create design docs (it should be easy to support the creation of proposals for any kind of tasks) - * Define how the design docs are handled/created *before* the publication (this proposal is about the publishing process) - -## Proposed solution - - * Open a dedicated Jira (`HDDS-*` but with specific component) - * Use standard name prefix in the jira (easy to filter on the mailing list) `[OEP] - * Create a PR to add the design doc to the current documentation - * The content of the design can be added to the documentation (Recommended) - * Or can be added as external reference - * The design doc (or the summary with the reference) will be merged to the design doc folder of `hadoop-hdds/docs/content/design` (will be part of the docs) - * Discuss it as before (lazy consesus, except if somebody calls for a real vote) - * Design docs can be updated according to the changes during the implementation - * Only the implemented design docs will be visible as part of the design docs - - -As a result all the design docs can be listed under the documentation page. - -A good design doc has the following properties: - - 1. Publicly available for anybody (Please try to avoid services which are available only with registration, eg: google docs) - 2. Archived for the future (Commit it to the source OR use apache jira or wiki) - 3. Editable later (Best format is markdown, RTF is also good. PDF has a limitation, it's very hard to reuse the text, or create an updated design doc) - 4. Well structured to make it easy to comment any part of the document (Markdown files which are part of the pull request can be commented in the PR line by line) - - -### Example 1: Design doc as a markdown file - -The easiest way to create a design doc is to create a new markdown file in a PR and merge it to `hadoop-hdds/docs/content/design`. - - 1. Publicly available: YES, it can be linked from Apache git or github - 2. Archived: YES, and it's also versioned. All the change history can be tracked. - 3. Editable later: YES, as it's just a simple text file - 4. Commentable: YES, comment can be added to each line. - -### Example 2: Design doc as a PDF - -A very common practice of today is to create design doc on google docs and upload it to the JIRA. - - 1. Publicy available: YES, anybody can download it from the Jira. - 2. Archived: YES, it's available from Apache infra. - 3. Editable: NO, It's harder to reuse the text to import to the docs or create a new design doc. - 4. Commentable: PARTIAL, Not as easy as a text file or the original google docs, but a good structure with numbered section may help - - -### The format - -While the first version (markdown files) are the most powerful, the second version (the existing practice) is also acceptable. In this case we propose to create a PR with adding a reference page *without* the content but including the link. - -For example: - -```yaml ---- -title: Ozone Security Design -summary: A comprehensive description of the security flow between server and client components. -date: 2018-02-22 -jira: HDDS-4 -status: implemented -author: Sanjay Radia, Jitendra Pandey, Xiaoyu Yao, Anu Engineer - -## Summary - -Ozone security model is based on Kerberos and similar to the Hadoop security but some of the parts are improved: for example the SCM works as a Certificate Authority and PKI based solutions are wildely used. - -## Reference - -For more details please check the (uploaded design doc)[https://issues.apache.org/jira/secure/attachment/12911638/HadoopStorageLayerSecurity.pdf]. - -``` - -Obviously with the first approach the design doc itself can be included in this markdown file. - -## Migration - -It's not a hard requirement to migrate all the design doc. But process is always open: - - 1. To create reference pages for any of the old design docs - 2. To migrate any new design docs to markdown formats (by anybody not just by the author) - 3. To update any of the old design docs based on the current state of the code (We have versioning!) - -## Document template - -This the proposed template to document any proposal. It's recommended but not required the use exactly the some structure. Some proposal may require different structure, but we need the following information. - -1. Summary - -> Give a one sentence summary, like the jira title. It will be displayed on the documentation page. Should be enough to understand - -2. Status - -Defined in the markdown header. Proposed statuses: - - * `accepted`: (Use this as by default. If not accapted, won't be merged) - - * `implemented`: The discussed technical solution is implemented (maybe with some minor implementation difference) - - * `replaced`: Replaced by a new design doc - - * `outdated`: Code has been changed and design doc doesn't reflect any more the state of the current code. - - Note: the _accepted_ design docs won't be visible as part of the documentation or only under a dedicated section to clearly comminucate that it's not ready, yet. - -3. Problem statement (Motivation / Abstract) - -> What is the problem and how would you solve it? Think about an abstract of a paper: one paragraph overview. Why will the world better with this change? - -4. Non-goals - - > Very important to define what is outside of the scope of this proposal - -5. Technical Description (Architecture and implementation details) - - > Explain the problem in more details. How can it be reproduced? What is the current solution? What is the limitation of the current solution? - - > How the new proposed solution would solve the problem? Architectural design. - - > Implementation details. What should be changed in the code. Is it a huge change? Do we need to change wire protocol? Backward compatibility? - -6. Alternatives - - > What are the other alternatives you considered and why do yoy prefer the proposed solution The goal of this section is to help people understand why this is the best solution now, and also to prevent churn in the future when old alternatives are reconsidered. - -Note: In some cases 4/5 can be combined. For example if you have multiple proposals, the first version may include multiple solutions. At the end ot the discussion we can move the alternatives to 5. and explain why the community is decided to use the selected option. - -7. Plan - - > Planning to implement the feature. Estimated size of the work? Do we need feature branch? Any migration plan, dependency? If it's not a big new feature it can be one sentence or optional. - -8. References - -## Workflows form other projects - -There are similar process in other open source projects. This document and the template is inspired by the following projects: - - * [Apache Kafka Improvement Proposals](https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Improvement+Proposals) - * [Apache Spark Project Improvement Proposals](https://spark.apache.org/improvement-proposals.html) - * [Kubernetes Enhancement Proposals](https://github.com/kubernetes/enhancements/tree/master/keps) - -Short summary of the processes: - -__Kafka__ process: - - * Create wiki page - * Start discussion on mail thread - * Vote on mail thread - -__Spark__ process: - - * Create JIRA (dedicated label) - * Discuss on the jira page - * Vote on dev list - -*Kubernetes*: - - * Deditaced git repository - * KEPs are committed to the repo - * Well defined approval process managed by SIGs (KEPs are assigned to SIGs) - diff --git a/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md b/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md deleted file mode 100644 index dd23e049416..00000000000 --- a/hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "GDPR in Ozone" -date: "2019-September-17" -weight: 5 -summary: GDPR in Ozone -icon: user ---- - - - -Enabling GDPR compliance in Ozone is very straight forward. During bucket -creation, you can specify `--enforcegdpr=true` or `-g=true` and this will -ensure the bucket is GDPR compliant. Thus, any key created under this bucket -will automatically be GDPR compliant. - -GDPR can only be enabled on a new bucket. For existing buckets, you would -have to create a new GDPR compliant bucket and copy data from old bucket into - new bucket to take advantage of GDPR. - -Example to create a GDPR compliant bucket: - -`ozone sh bucket create --enforcegdpr=true /hive/jan` - -`ozone sh bucket create -g=true /hive/jan` - -If you want to create an ordinary bucket then you can skip `--enforcegdpr` -and `-g` flags. \ No newline at end of file diff --git a/hadoop-hdds/docs/content/gdpr/_index.md b/hadoop-hdds/docs/content/gdpr/_index.md deleted file mode 100644 index 9888369023b..00000000000 --- a/hadoop-hdds/docs/content/gdpr/_index.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: GDPR -name: GDPR -identifier: gdpr -menu: main -weight: 5 ---- - - -{{}} - The General Data Protection Regulation (GDPR) is a law that governs how personal data should be handled. This is an European Union law, but due to the nature of software oftentimes spills into other geographies. - Ozone supports GDPR's Right to Erasure(Right to be Forgotten). -{{}} - -

- -Once you create a GDPR compliant bucket, any key created in that bucket will -automatically by GDPR compliant. - - diff --git a/hadoop-hdds/docs/content/interface/JavaApi.md b/hadoop-hdds/docs/content/interface/JavaApi.md deleted file mode 100644 index bb18068f400..00000000000 --- a/hadoop-hdds/docs/content/interface/JavaApi.md +++ /dev/null @@ -1,156 +0,0 @@ ---- -title: "Java API" -date: "2017-09-14" -weight: 1 -summary: Ozone has a set of Native RPC based APIs. This is the lowest level API's on which all other protocols are built. This is the most performant and feature-full of all Ozone protocols. ---- - - -Ozone ships with its own client library that supports RPC. For generic use cases the S3 -compatible REST interface also can be used instead of the Ozone client. - - -## Creating an Ozone client -The Ozone client factory creates the ozone client. To get a RPC client we can call - -{{< highlight java >}} -OzoneClient ozClient = OzoneClientFactory.getRpcClient(); -{{< /highlight >}} - -If the user want to create a client based on the configuration, then they can -call. - -{{< highlight java >}} -OzoneClient ozClient = OzoneClientFactory.getClient(); -{{< /highlight >}} - -and an appropriate client based on configuration will be returned. - -## Writing data using Ozone Client - -The hierarchy of data inside ozone is a volume, bucket and a key. A volume -is a collection of buckets. A bucket is a collection of keys. To write data -to the ozone, you need a volume, bucket and a key. - -### Creating a Volume - -Once we have a client, we need to get a reference to the ObjectStore. This -is done via - -{{< highlight java >}} -ObjectStore objectStore = ozClient.getObjectStore(); -{{< /highlight >}} - -An object store represents an active cluster against which the client is working. - -{{< highlight java >}} -// Let us create a volume to store our game assets. -// This uses default arguments for creating that volume. -objectStore.createVolume("assets"); - -// Let us verify that the volume got created. -OzoneVolume assets = objectStore.getVolume("assets"); -{{< /highlight >}} - - -It is possible to pass an array of arguments to the createVolume by creating volume arguments. - -### Creating a Bucket - -Once you have a volume, you can create buckets inside the volume. - -{{< highlight java >}} -// Let us create a bucket called videos. -assets.createBucket("videos"); -OzoneBucket video = assets.getBucket("videos"); -{{< /highlight >}} - -At this point we have a usable volume and a bucket. Our volume is called _assets_ and bucket is called _videos_. - -Now we can create a Key. - -### Reading and Writing a Key - -With a bucket object the users can now read and write keys. The following code reads a video called intro.mp4 from the local disk and stores in the _video_ bucket that we just created. - -{{< highlight java >}} -// read data from the file, this is a user provided function. -byte [] videoData = readFile("intro.mp4"); - -// Create an output stream and write data. -OzoneOutputStream videoStream = video.createKey("intro.mp4", 1048576); -videoStream.write(videoData); - -// Close the stream when it is done. -videoStream.close(); - - -// We can use the same bucket to read the file that we just wrote, by creating an input Stream. -// Let us allocate a byte array to hold the video first. -byte[] data = new byte[(int)1048576]; -OzoneInputStream introStream = video.readKey("intro.mp4"); -// read intro.mp4 into the data buffer -introStream.read(data); -introStream.close(); -{{< /highlight >}} - - -Here is a complete example of the code that we just wrote. Please note the close functions being called in this program. - -{{< highlight java >}} -// Let us create a client -OzoneClient ozClient = OzoneClientFactory.getClient(); - -// Get a reference to the ObjectStore using the client -ObjectStore objectStore = ozClient.getObjectStore(); - -// Let us create a volume to store our game assets. -// This default arguments for creating that volume. -objectStore.createVolume("assets"); - -// Let us verify that the volume got created. -OzoneVolume assets = objectStore.getVolume("assets"); - -// Let us create a bucket called videos. -assets.createBucket("videos"); -OzoneBucket video = assets.getBucket("videos"); - -// read data from the file, this is assumed to be a user provided function. -byte [] videoData = readFile("intro.mp4"); - -// Create an output stream and write data. -OzoneOutputStream videoStream = video.createKey("intro.mp4", 1048576); -videoStream.write(videoData); - -// Close the stream when it is done. -videoStream.close(); - - -// We can use the same bucket to read the file that we just wrote, by creating an input Stream. -// Let us allocate a byte array to hold the video first. - -byte[] data = new byte[(int)1048576]; -OzoneInputStream introStream = video.readKey("intro.mp4"); -introStream.read(data); - -// Close the stream when it is done. -introStream.close(); - -// Close the client. -ozClient.close(); -{{< /highlight >}} diff --git a/hadoop-hdds/docs/content/interface/OzoneFS.md b/hadoop-hdds/docs/content/interface/OzoneFS.md deleted file mode 100644 index fcfef6dde3d..00000000000 --- a/hadoop-hdds/docs/content/interface/OzoneFS.md +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: Ozone File System -date: 2017-09-14 -weight: 2 -summary: Hadoop Compatible file system allows any application that expects an HDFS like interface to work against Ozone with zero changes. Frameworks like Apache Spark, YARN and Hive work against Ozone without needing any change. ---- - - -The Hadoop compatible file system interface allows storage backends like Ozone -to be easily integrated into Hadoop eco-system. Ozone file system is an -Hadoop compatible file system. - -## Setting up the Ozone file system - -To create an ozone file system, we have to choose a bucket where the file system would live. This bucket will be used as the backend store for OzoneFileSystem. All the files and directories will be stored as keys in this bucket. - -Please run the following commands to create a volume and bucket, if you don't have them already. - -{{< highlight bash >}} -ozone sh volume create /volume -ozone sh bucket create /volume/bucket -{{< /highlight >}} - -Once this is created, please make sure that bucket exists via the _list volume_ or _list bucket_ commands. - -Please add the following entry to the core-site.xml. - -{{< highlight xml >}} - - fs.o3fs.impl - org.apache.hadoop.fs.ozone.OzoneFileSystem - - - fs.AbstractFileSystem.o3fs.impl - org.apache.hadoop.fs.ozone.OzFs - - - fs.defaultFS - o3fs://bucket.volume - -{{< /highlight >}} - -This will make this bucket to be the default file system for HDFS dfs commands and register the o3fs file system type. - -You also need to add the ozone-filesystem.jar file to the classpath: - -{{< highlight bash >}} -export HADOOP_CLASSPATH=/opt/ozone/share/ozonefs/lib/hadoop-ozone-filesystem-lib-current*.jar:$HADOOP_CLASSPATH -{{< /highlight >}} - -Once the default Filesystem has been setup, users can run commands like ls, put, mkdir, etc. -For example, - -{{< highlight bash >}} -hdfs dfs -ls / -{{< /highlight >}} - -or - -{{< highlight bash >}} -hdfs dfs -mkdir /users -{{< /highlight >}} - - -Or put command etc. In other words, all programs like Hive, Spark, and Distcp will work against this file system. -Please note that any keys created/deleted in the bucket using methods apart from OzoneFileSystem will show up as directories and files in the Ozone File System. - -Note: Bucket and volume names are not allowed to have a period in them. -Moreover, the filesystem URI can take a fully qualified form with the OM host and an optional port as a part of the path following the volume name. -For example, you can specify both host and port: - -{{< highlight bash>}} -hdfs dfs -ls o3fs://bucket.volume.om-host.example.com:5678/key -{{< /highlight >}} - -When the port number is not specified, it will be retrieved from config key `ozone.om.address` -if defined; or it will fall back to the default port `9862`. -For example, we have `ozone.om.address` configured as following in `ozone-site.xml`: - -{{< highlight xml >}} - - ozone.om.address - 0.0.0.0:6789 - -{{< /highlight >}} - -When we run command: - -{{< highlight bash>}} -hdfs dfs -ls o3fs://bucket.volume.om-host.example.com/key -{{< /highlight >}} - -The above command is essentially equivalent to: - -{{< highlight bash>}} -hdfs dfs -ls o3fs://bucket.volume.om-host.example.com:6789/key -{{< /highlight >}} - -Note: Only port number from the config is used in this case, -whereas the host name in the config `ozone.om.address` is ignored. - - -## Supporting older Hadoop version (Legacy jar, BasicOzoneFilesystem) - -There are two ozonefs files, both of them include all the dependencies: - - * share/ozone/lib/hadoop-ozone-filesystem-lib-current-VERSION.jar - * share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-VERSION.jar - -The first one contains all the required dependency to use ozonefs with a - compatible hadoop version (hadoop 3.2). - -The second one contains all the dependency in an internal, separated directory, - and a special class loader is used to load all the classes from the location. - -With this method the hadoop-ozone-filesystem-lib-legacy.jar can be used from - any older hadoop version (eg. hadoop 3.1, hadoop 2.7 or spark+hadoop 2.7) - -Similar to the dependency jar, there are two OzoneFileSystem implementation. - -For hadoop 3.0 and newer, you can use `org.apache.hadoop.fs.ozone.OzoneFileSystem` - which is a full implementation of the Hadoop compatible File System API. - -For Hadoop 2.x you should use the Basic version: `org.apache.hadoop.fs.ozone.BasicOzoneFileSystem`. - -This is the same implementation but doesn't include the features/dependencies which are added with - Hadoop 3.0. (eg. FS statistics, encryption zones). - -### Summary - -The following table summarize which jar files and implementation should be used: - -Hadoop version | Required jar | OzoneFileSystem implementation ----------------|-------------------------|---------------------------------------------------- -3.2 | filesystem-lib-current | org.apache.hadoop.fs.ozone.OzoneFileSystem -3.1 | filesystem-lib-legacy | org.apache.hadoop.fs.ozone.OzoneFileSystem -2.9 | filesystem-lib-legacy | org.apache.hadoop.fs.ozone.BasicOzoneFileSystem -2.7 | filesystem-lib-legacy | org.apache.hadoop.fs.ozone.BasicOzoneFileSystem - With this method the hadoop-ozone-filesystem-lib-legacy.jar can be used from - any older hadoop version (eg. hadoop 2.7 or spark+hadoop 2.7) diff --git a/hadoop-hdds/docs/content/interface/S3.md b/hadoop-hdds/docs/content/interface/S3.md deleted file mode 100644 index 6a8e2d7c53b..00000000000 --- a/hadoop-hdds/docs/content/interface/S3.md +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: S3 Protocol -weight: 3 -summary: Ozone supports Amazon's Simple Storage Service (S3) protocol. In fact, You can use S3 clients and S3 SDK based applications without any modifications with Ozone. ---- - - - - -Ozone provides S3 compatible REST interface to use the object store data with any S3 compatible tools. - -## Getting started - -S3 Gateway is a separated component which provides the S3 compatible APIs. It should be started additional to the regular Ozone components. - -You can start a docker based cluster, including the S3 gateway from the release package. - -Go to the `compose/ozones3` directory, and start the server: - -```bash -docker-compose up -d -``` - -You can access the S3 gateway at `http://localhost:9878` - -## URL Schema - -Ozone S3 gateway supports both the virtual-host-style URL s3 bucket addresses (eg. http://bucketname.host:9878) and the path-style addresses (eg. http://host:9878/bucketname) - -By default it uses the path-style addressing. To use virtual host style URLs set your main domain name in your `ozone-site.xml`: - -```xml - - ozone.s3g.domain.name - s3g.internal - -``` - -## Bucket browser - -Buckets could be browsed from the browser by adding `?browser=true` to the bucket URL. - -For example the content of the 'testbucket' could be checked from the browser using the URL http://localhost:9878/testbucket?browser=true - - -## Implemented REST endpoints - -Operations on S3Gateway service: - -Endpoint | Status | -------------|-------------| -GET service | implemented | - -Operations on Bucket: - -Endpoint | Status | Notes -------------------------------------|-------------|--------------- -GET Bucket (List Objects) Version 2 | implemented | -HEAD Bucket | implemented | -DELETE Bucket | implemented | -PUT Bucket (Create bucket) | implemented | -Delete Multiple Objects (POST) | implemented | - -Operation on Objects: - -Endpoint | Status | Notes -------------------------------------|-----------------|--------------- -PUT Object | implemented | -GET Object | implemented | -Multipart Upload | implemented | Except the listing of the current MultiPartUploads. -DELETE Object | implemented | -HEAD Object | implemented | - - -## Security - -If security is not enabled, you can *use* **any** AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY - -If security is enabled, you can get the key and the secret with the `ozone s3 getsecret` command (*kerberos based authentication is required). - -```bash -/etc/security/keytabs/testuser.keytab testuser/scm@EXAMPLE.COM -ozone s3 getsecret -awsAccessKey=testuser/scm@EXAMPLE.COM -awsSecret=c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e567e8296d2999 - -``` - -Now, you can use the key and the secret to access the S3 endpoint: - -```bash -export AWS_ACCESS_KEY_ID=testuser/scm@EXAMPLE.COM -export AWS_SECRET_ACCESS_KEY=c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e567e8296d2999 -aws s3api --endpoint http://localhost:9878 create-bucket --bucket bucket1 -``` - - -## S3 bucket name mapping to Ozone buckets - -**Note**: Ozone has a notion for 'volumes' which is missing from the S3 Rest endpoint. Under the hood S3 bucket names are mapped to Ozone 'volume/bucket' locations (depending on the given authentication information). - -To show the storage location of a S3 bucket, use the `ozone s3 path ` command. - -```bash -aws s3api --endpoint-url http://localhost:9878 create-bucket --bucket=bucket1 - -ozone s3 path bucket1 -Volume name for S3Bucket is : s3thisisakey -Ozone FileSystem Uri is : o3fs://bucket1.s3thisisakey -``` - -## Clients - -### AWS Cli - -`aws` CLI could be used by specifying the custom REST endpoint. - -```bash -aws s3api --endpoint http://localhost:9878 create-bucket --bucket buckettest -``` - -Or - -```bash -aws s3 ls --endpoint http://localhost:9878 s3://buckettest -``` - -### S3 Fuse driver (goofys) - -Goofys is a S3 FUSE driver. It could be used to mount any Ozone bucket as posix file system. - - -```bash -goofys --endpoint http://localhost:9878 bucket1 /mount/bucket1 -``` diff --git a/hadoop-hdds/docs/content/interface/_index.md b/hadoop-hdds/docs/content/interface/_index.md deleted file mode 100644 index 254864732fb..00000000000 --- a/hadoop-hdds/docs/content/interface/_index.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "Programming Interfaces" -menu: - main: - weight: 4 ---- - - -{{}} -Ozone is a multi-protocol file system. There are different protocols by which - users can access data on Ozone. -{{}} diff --git a/hadoop-hdds/docs/content/recipe/Prometheus.md b/hadoop-hdds/docs/content/recipe/Prometheus.md deleted file mode 100644 index 310d078567b..00000000000 --- a/hadoop-hdds/docs/content/recipe/Prometheus.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -title: Monitoring with Prometheus -summary: A Simple recipe to monitor Ozone using Prometheus -linktitle: Prometheus ---- - - -[Prometheus](https://prometheus.io/) is an open-source monitoring server developed under under the [Cloud Native Computing Foundation](https://www.cncf.io/). - -Ozone supports Prometheus out of the box. The servers start a prometheus -compatible metrics endpoint where all the available hadoop metrics are published in prometheus exporter format. - -## Prerequisites - - 1. [Install the and start]({{< ref "start/RunningViaDocker.md" >}}) an Ozone cluster. - 2. [Download](https://prometheus.io/download/#prometheus) the prometheus binary. - -## Monitoring with prometheus - -* To enable the Prometheus metrics endpoint you need to add a new configuration to the `ozone-site.xml` file. - - ```xml - - hdds.prometheus.endpoint.enabled - true - -``` - -_Note_: for Docker compose based pseudo cluster put the \ -`OZONE-SITE.XML_hdds.prometheus.endpoint.enabled=true` line to the `docker-config` file. - -* Restart the Ozone Manager and Storage Container Manager and check the prometheus endpoints: - - * http://scm:9874/prom - - * http://ozoneManager:9876/prom - -* Create a prometheus.yaml configuration with the previous endpoints: - -```yaml -global: - scrape_interval: 15s - -scrape_configs: - - job_name: ozone - metrics_path: /prom - static_configs: - - targets: - - "scm:9876" - - "ozoneManager:9874" -``` - -* Start with prometheus from the directory where you have the prometheus.yaml file: - -```bash -prometheus -``` - -* Check the active targets in the prometheus web-ui: - -http://localhost:9090/targets - -![Prometheus target page example](prometheus.png) - - -* Check any metrics on the prometheus web ui.\ -For example: - -http://localhost:9090/graph?g0.range_input=1h&g0.expr=om_metrics_num_key_allocate&g0.tab=1 - -![Prometheus metrics page example](prometheus-key-allocate.png) - -## Note - -The ozone distribution contains a ready-to-use, dockerized environment to try out ozone and prometheus. It can be found under `compose/ozoneperf` directory. - -```bash -cd compose/ozoneperf -docker-compose up -d -``` \ No newline at end of file diff --git a/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md b/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md deleted file mode 100644 index 9f9d3478c9b..00000000000 --- a/hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md +++ /dev/null @@ -1,188 +0,0 @@ ---- -title: Spark in Kubernetes with OzoneFS -linktitle: Spark -summary: How to use Apache Spark with Ozone on K8s? ---- - - -This recipe shows how Ozone object store can be used from Spark using: - - - OzoneFS (Hadoop compatible file system) - - Hadoop 2.7 (included in the Spark distribution) - - Kubernetes Spark scheduler - - Local spark client - - -## Requirements - -Download latest Spark and Ozone distribution and extract them. This method is -tested with the `spark-2.4.0-bin-hadoop2.7` distribution. - -You also need the following: - - * A container repository to push and pull the spark+ozone images. (In this recipe we will use the dockerhub) - * A repo/name for the custom containers (in this recipe _myrepo/ozone-spark_) - * A dedicated namespace in kubernetes (we use _yournamespace_ in this recipe) - -## Create the docker image for drivers - -### Create the base Spark driver/executor image - -First of all create a docker image with the Spark image creator. -Execute the following from the Spark distribution - -```bash -./bin/docker-image-tool.sh -r myrepo -t 2.4.0 build -``` - -_Note_: if you use Minikube add the `-m` flag to use the docker daemon of the Minikube image: - -```bash -./bin/docker-image-tool.sh -m -r myrepo -t 2.4.0 build -``` - -`./bin/docker-image-tool.sh` is an official Spark tool to create container images and this step will create multiple Spark container images with the name _myrepo/spark_. The first container will be used as a base container in the following steps. - -### Customize the docker image - -Create a new directory for customizing the created docker image. - -Copy the `ozone-site.xml` from the cluster: - -```bash -kubectl cp om-0:/opt/hadoop/etc/hadoop/ozone-site.xml . -``` - -And create a custom `core-site.xml`. - -```xml - - - fs.o3fs.impl - org.apache.hadoop.fs.ozone.BasicOzoneFileSystem - - - fs.AbstractFileSystem.o3fs.impl - org.apache.hadoop.fs.ozone.OzFs - - -``` - -_Note_: You may also use `org.apache.hadoop.fs.ozone.OzoneFileSystem` without the `Basic` prefix. The `Basic` version doesn't support FS statistics and encryption zones but can work together with older hadoop versions. - -Copy the `ozonefs.jar` file from an ozone distribution (__use the legacy version!__) - -``` -kubectl cp om-0:/opt/hadoop/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-0.4.0-SNAPSHOT.jar . -``` - - -Create a new Dockerfile and build the image: -``` -FROM myrepo/spark:2.4.0 -ADD core-site.xml /opt/hadoop/conf/core-site.xml -ADD ozone-site.xml /opt/hadoop/conf/ozone-site.xml -ENV HADOOP_CONF_DIR=/opt/hadoop/conf -ENV SPARK_EXTRA_CLASSPATH=/opt/hadoop/conf -ADD hadoop-ozone-filesystem-lib-legacy-0.4.0-SNAPSHOT.jar /opt/hadoop-ozone-filesystem-lib-legacy.jar -``` - -```bash -docker build -t myrepo/spark-ozone -``` - -For remote kubernetes cluster you may need to push it: - -```bash -docker push myrepo/spark-ozone -``` - -## Create a bucket and identify the ozonefs path - -Download any text file and put it to the `/tmp/alice.txt` first. - -```bash -kubectl port-forward s3g-0 9878:9878 -aws s3api --endpoint http://localhost:9878 create-bucket --bucket=test -aws s3api --endpoint http://localhost:9878 put-object --bucket test --key alice.txt --body /tmp/alice.txt -kubectl exec -it scm-0 ozone s3 path test -``` - -The output of the last command is something like this: - -``` -Volume name for S3Bucket is : s3asdlkjqiskjdsks -Ozone FileSystem Uri is : o3fs://test.s3asdlkjqiskjdsks -``` - -Write down the ozone filesystem uri as it should be used with the spark-submit command. - -## Create service account to use - -```bash -kubectl create serviceaccount spark -n yournamespace -kubectl create clusterrolebinding spark-role --clusterrole=edit --serviceaccount=yournamespace:spark --namespace=yournamespace -``` -## Execute the job - -Execute the following spark-submit command, but change at least the following values: - - * the kubernetes master url (you can check your _~/.kube/config_ to find the actual value) - * the kubernetes namespace (_yournamespace_ in this example) - * serviceAccountName (you can use the _spark_ value if you followed the previous steps) - * container.image (in this example this is _myrepo/spark-ozone_. This is pushed to the registry in the previous steps) - * location of the input file (o3fs://...), use the string which is identified earlier with the \ - `ozone s3 path ` command - -```bash -bin/spark-submit \ - --master k8s://https://kubernetes:6443 \ - --deploy-mode cluster \ - --name spark-word-count \ - --class org.apache.spark.examples.JavaWordCount \ - --conf spark.executor.instances=1 \ - --conf spark.kubernetes.namespace=yournamespace \ - --conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \ - --conf spark.kubernetes.container.image=myrepo/spark-ozone \ - --conf spark.kubernetes.container.image.pullPolicy=Always \ - --jars /opt/hadoop-ozone-filesystem-lib-legacy.jar \ - local:///opt/spark/examples/jars/spark-examples_2.11-2.4.0.jar \ - o3fs://bucket.volume/alice.txt -``` - -Check the available `spark-word-count-...` pods with `kubectl get pod` - -Check the output of the calculation with \ -`kubectl logs spark-word-count-1549973913699-driver` - -You should see the output of the wordcount job. For example: - -``` -... -name: 8 -William: 3 -this,': 1 -SOUP!': 1 -`Silence: 1 -`Mine: 1 -ordered.: 1 -considering: 3 -muttering: 3 -candle: 2 -... -``` diff --git a/hadoop-hdds/docs/content/recipe/_index.md b/hadoop-hdds/docs/content/recipe/_index.md deleted file mode 100644 index 47053ab6fbb..00000000000 --- a/hadoop-hdds/docs/content/recipe/_index.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Recipes -date: "2017-10-10" -menu: main -weight: 9 - ---- - - - -{{}} - Standard how-to documents which describe how to use Ozone with other Software. - For example, how to use Ozone with Apache Spark. -{{}} diff --git a/hadoop-hdds/docs/content/recipe/prometheus-key-allocate.png b/hadoop-hdds/docs/content/recipe/prometheus-key-allocate.png deleted file mode 100644 index c934fc09d3c2a74c034e8205b6ec8ed4a1b16adf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51155 zcmcG$1yojR)Ge&X01L4|S_A_@Fz6OZQCd0_6_5t$G*DCo1XQGz4gu)~Q4s;@l16Ee z?!I$-zHf|w+`q>6-Fx3L&H-QG-FrX#S!>NT=Un@lf}GU8-PF6cY}v9;Mp|5H%a(1) z_)nDdAG{+^N85xi+wO_UsF0G9cKnj>#XqTRB-Cw`EevezZ(85kVrXt*cIWK9Th@2( znBOz9u$kOeBD`hGu`M#4}PH-I?dxSX9fN zZL?BIj)^c%%D$eYpJdyV`1AKa+ZS8Tkf|v6u{?ZwlvRveeBq==|6=*N>r4eD`PNHG zjzSEsbAHa&6M2)J(Z0pjyo*l^Oii!iw6<)yEu*PPz3$rj_a9UA$4Iu}yZdZnWIKs> zG(2ec5?|9v`Hm7_Zy&HeM|}N$y#EIA^`*r3QsV0|ua}Ru;;SeHQ!Ce){OYug((p%?S66k*jd)H^KUW6<_)X26`;^2~}5CR<^eC;O|*jWEyyJg4Eu2 z8*9sCWRWp3Cpb82hi08)zwrz=C$+x2^yuoy9@ zmzS55GZQbZ(LbaoPQ)-?e)9J1+wWCXfq{W>F)=dTfj*(PFJ8PT9Ko@!*Xa4VrN!>+ z(*q&gX8z`(BJOUIlHbNVa`bXqAMV&=-c!8ZTly%R0~gX;oqyBS^3I(*a&kV;pFa-{ zZtw8jIi8W3$#(WEF3dIL(%RZuQ*(1cQnXxsQ&XsrGk0K>H(owIK3-mp&CUBrNtZ-R zK3xswlu=V##B!XH3;kYMIWsp$E9h`;zKKRYJu>n)rJhUaZM%Vj(^$E}qKRd9%UidY z7{zf=@(cD8ooj0@e$q`?_V)Jn_0`4JmKJJi>imL&dwJSv>e>nlm?W@s6g*e)=+l z46IFUgvjLBSW-;P{AhDhiudKH`uv=aJ~Tp3HZn3YHa48|PX-4Dgj|*^y9*ud?ChRB zdp4Prm{Q-+(9qo6Rpe~1p+UvyQQo)f@#jmAc6R04dfD_cmz3KR;UR55aDzRN_24s{ zRQ$Dj_hw^bW22*)u_4;qHF5hzL`r66W|o#5c0RgkVIg8W+H|YjXMMI-#D8ODWhE^w z?YN)O($p_EH#aUWF8OdlKDRZ861Vlxu&|`W#EGr~DbnTs0Qt4`byil^zTdw|8MCvq z8{_1CeSB)FstR0JvlUKfrl<2;{T6q0YZxe5G_MXc8Y~1AvX5g_wPniy`^jvW7r~H))zW%nwxibcCHQAGuhkQ3kVb>sikI5 z+%hyA8XZlHj-HztsKPr+6>f->{S3mHIXM=lrXNyMTkGnM3fO&k@SwEEeRF(Z;EuU@ zjegq9>@0^~F&m$)&?RMMWj#Ij&5d<`3oZ}x2cqN$4qQt1`k0w%V`KB-!w1gu=kx5w z+aHna-?^XO)XXgT!v}3;<$$+urSA;n-5Z*55M*Lv!fN6gyeK)Y2s?lK_U+KgtG$-I z%gf7#HK8N*CrHHJza)=LOuRC1LrfZ%e(BOy#@p?gcl`rgetq7dqto^NvZr;PHnsP+ zp;_z%z3bPrY(|vCzDG)U#ZQ{j)6)ysPegwU;=HS5Wo3nndj7mJUNLI#-o3#gAq*mJ zw1*B|rkm?Yt*(8K0A~7W${kYO($sXAbh}!ruIs{|w$(F7kVadylD*zFju;yoGfDaM zTj%ld@il!o$0#l%!@|$Mf<+#x2}8ER4{_CRU%mPxOu(Luj-UDP?z3!c-MlKRO-)U?+UDBY zoyb6qjg7UTd;wu$>*g&U>KYnCt}80c2M7B5)wQ%5N6z*XBaX>vxY@Y5xe@B8P8n9d zV!>8#QbxX^JN>6E}T z)HF0WxO4w^KVjFE+}zx`Vb)Q$Z<{CXS`6JgebdY=EGP)~q-Lx&6(@ZlDG{ls2dhDGhP6ZLr)zyXMo$yIhRrO@h6Knux z4{3R_>uPF<>O*3j2z^dYRDtT-7fvLdaTM&1i*Rd4xFsegw#2BasPqpGD*2k2m{cL; z1z(Bl=!7Ypwz0NOOi9@oig1swTlf~Ru<+{j>wWw7)rJenl2+8zY|J&vW6d1}!)+KS zDFcy`M(OQ#?b_wKGMAQ<6L+!@xd|oh%m~ZDXX3~Gf`fxakHyEw8=0CGI!x=Q>J~Z8 zoj_XU=H!&-CfP^L{4CMY(XokcV%TOS%cM2l9Ep9XHX_ddj+oeI)Gk~>g1oe`@tG{P z{+47dGBUC!Po6XxvT$?1S(%ECiP6y1To|rm^avGpMM-J@_RZku$D3DAW_bm{;nT8bG;)+~R z?nBoRd0t4UsIT>!op)$xr+trW>!9+u3)zYI6X!i%#>qzv4h?m*wPIFW9c__^n&FaKi%wTd@bRme%*~Wn^Sv ziJhHXKgPtwNHa%1oJ9UgOiCL6^GCD)hVie`PY<>wT-*7GkuSji^|l5jW##JXYV4xm zh=|Rl>AvvdmnCUwe$@uD+zf&aqIXnDNJvKSiIFCxrUu>~DEDPRlsPy!7$M)7m}IE4 zRRn8RymCjPxN|2aHuhxm@#W~|*47O54~URgD5pPuTs;}ETyZKQH$Pu4=)xUjBmWX_ zaxq+zl9JNO;$$jLGa(@%E=~=Jp|ljKNn~^F9t}+hR?)zac;!r7E;4CQT%z+pp2eoV zeLLEw3j0q^$Os|A=z8$Vl`GiF>rao7^^ETpKk5nAK*2y&lA$I}@~P?8d|Bh5UO% za$_+bsRjCfY?S@Q;fYQQN!oveGP# zh;!HXZ{MDL`SrED{E(Q3*e?{_*_oM#j~wWq;`5E=K*d+_FTy3ioKB1&{al5UpElMG)T8i{T z0naa}cK$IjcdlG{9<6ovZUTa}|J$A~Pxi60u|;pJuwC=dM|_Qqj>bQe5uuphOBe?P|eAM@KhWJAwT`OBUC6 z@k19vC@(h`c|fL^B3+%0KTOW`qF?B3Y*%V;3VM1uGpwBN^ZN~LIeB>+E6v!4ij5tY zpX`;9mzTdryvBP^8oV zz*KT-@!NZZ6Cgb^mzJiclKo>Q58Tw6x97jKw+FYInw!TSm-Us~iJu`YqBAL6;h^`$ z!RmfXen#!c$jZWLx+8b)B%>Yf?LDRufA-oj@}tz$##rL`_!qQ%4WB-}18P7q#qBU- zj~JFTtPMvQ*uQtLjg{5;ix+8lEpObo@$T6(nUM4G@84h2)6=_j>C)xPUs_uO$&YLN z81L=9VPFvZ{{4A_A8+wVMS(rrE53h^Pe=&jGHF3d!CK*7#9j+gRacjGrSlK%fAVDK zjvYI;ZQI5jV)i$VrCh}mf-DfgM~=My{8@}UWPKzqVga=eDY@y4o!HZql!@1PbsPL z&!0cjd+V#Jss;rGolQx&A}2@BV@`xZ^uTmqxmt?$G74f*VIi77xo|<5WKknVE-qUj zZ)fV=*;tD zl493LkIv8MNB$s>jEcIP>{tE$`%yuMkBN!K_*MLZQ|4gnU1Q_tPoG)>Qd?W&ni13v zYfIDUec}yIoLX*5P~N$7C*bV6dZWjWALA_Fx9|9;6dji*8QmEQuYiC6VmXcKj;X1& zzId^>Ldx?q7Z)EhmS1elmYKhb|KR4Ie*@>!sZX(?K^j3Zz?VB zKIUZeyw+Ra zp1;NKaQMB#60$_L5n|T>QC2QHu50$@@ZrO+{QT+0(2NI(dGfLjSK3bYzJ1$HM+ zo&;LP&P~=U(M8^5qu5-Vt)-%(0?NUDZK<#KdtnIxdFYhfQ}N@#Hg~GtR8&>ThUdF1 zTdS+T1&nz7SVBgoBDG|L|K8va{20JhRZR_xp<7^wWNzA)K7bYnm($v6k5&j=Nn44# zyM{*4g9n%F>=v*P=x}_?wThffphKRK1exNvdac3xC$t zS(ZIN0?-8*vWJ4LuCDH>w>Qvl3TkmcKqKlay|9aPTqBUhpP`{se5$}j^g>RVI487Z zyB~kX$I|iJ#8<2ABkrY8^r5li;>Q&QYVjkAvkx9T2;a=I>XS@Ni;lhxY6aLp$bG}* z=FOY9d&r+i=ji*GPMxwJZ|5>TjSPw;i$2Y~yHMjoFES9$6s@t_+^`~2%1EfDl2ZEJ z##k&en#R=3%sE?$C!U^*qs?PezrGweaG({x@4hKa&?MvNY(dQ-cjq}TvNQIh!2{=E zHa!YhCm$h9P=X_Uy2UPe_y_2l?##Q&*4F7^VO=j7FB_nvpO_%Jn)J3medWHJ^R0Tn ze*M}{FZhzy1k4LQ2$vzq*bjaO7ee|s$|u#;CMfGK;o6ebNarSdA|#`5SJ5x2si>fh ztqtLsM5aam0-TA?#DknW73XllA`ZqDv_hKcD* zWhJtW7x@LixD3PEYz-wbF)?6OZEae{XjDj`Pb}B^%6ydmaIRHf4JfAHzpcmHvxbL_ zM(9Rt_-)U=!)7uzPMoXSwPz3OnKRZvU>|Qhm}3X&l%$#*S#N~?PFGi#fDY;D{p`~S_gCK>7+oq-GLGpK3CSzBE`%t@ny}7RLSl8*pyQ?ZI zbG0XEC`ocy&Y!o!@d8d>8^s@9xw8B99dKS3E?(Tlh}{%-ffZ!&2)aYGn)+!D0H+Pg z*)1(Cg7h3BA`#@)BMm1K6|Y~PuaA-fINnDa8Wsi~fbH;Z+<3$4Ux#3u85+fp;2+;9Gd=wTZ(DuiB`Z2Zri!nC~Xpp>b5$bg94tt zxr3n2w>XKN8hli;HdA#%_RR}wt}9L{nZO=cxb!u(cI@t$Q{wVr{LT5kzP`1UuUT>M z6BJ(PIF?)x4_|)K3cKXS#y*jYBRHG}$KFp5lw+^%+P(X@hq|UFz>o5b3RW8JM+UMG ze^?5@MD_97po@lJD-jPKvD-RfJO9ddSZT2OfqW{<@L_$Qc@ zu&%VcJVzvQkC)i33!*xkp4gPvuC0%ymWW-x?7V_5aLblDG1R)Dlz@9yR(`tiQ+`3h zuK55Hpa^ZgA90O$IW-OU|J@#S zhra5kNi&ar_CdkjyK~#Wy2^&oSADD2hPy-7=NLUCqy809K6#QYdJ&)YUyxyhH)um& zzEl9Yp1v114$XN6)v7ThoHJ4A@MjPWA^joJtK3ia&jOl z_S5lm3JVL1i1eV!0doMr$z8buiX%kCy%-`3WAuj)rr0`*lRavRidMF^VVrkyd_cY9 z9&AMZix>(^+0y{Rr5%njFr3@&n-Kf_f6va+3R!<&Nvx2Jd9b~B&!f#^r4 z`B7vcbbZ@NNaB!RJUx}=ryUBRDiFjC(uty?A{Jg3B?Vx@-{1e>p+g-EIB*8Q+TJ~T zOw7$UCJH9e=c540u7`|%beuInJAUxs42rIZ!_=`07yQY^l8w8v&Cu7;F!q1*rU4`X zu0qq|@3)_p_vPESxi%vWAOwq%x+c4eAZ7q-OiWCmh=iQKB?)R6ReE(G42l;r9V)b> zq@?@$yxMA5&!8Z=7*-++OOITuG_r==j193_>%K7yeFW6i%a`PS(%?Sd`1vU+D_3ZxvtuU2!Wz$NzpMeGb7TF+}m@E$B+L8MegyE z*g(9NpK5C@08LQ1k$2G0qwbr6?n04q-(0u1u$Y{hn)?0wH*zHIZBtj5D-^#})(vk% zEIhID?qqT@G6{)^n(4Qny?_5cE6euAjb5<7U`j}VQ@eV4BFgmn1OyI=NqSR*hl@>3 zMMeBEG~2<@^5x4Ha8!U^-@Zu#ZZ$RntDD!oyM*RkKaCeeFb9Xl^zSmPE&dMp4XFph zMw$KrvA0)Ga#bXzrcREJV{0Ly$KOPkSRKM+_~R{@X)`0EqlXWF9k2#3fC&#%`5)N$PcHxx zF=SoD<>2fx5GfK)Yip}Th9RN~0H2w-J|vWr9#hLMGBV&ME*jP_d+-VffVp8n0(|$b z7o6ii58jn&TL_fpxF00es9Se&=*GzamSDbYbWDrvxhc@ThwQ2_U zHN|{I$f>}B`4VUmZL`ei%>gLF!^0tVLUADolF_8po%^Vpzx$&|ycS0D?%Z?%mVMv%UkS2wDKrO9$0OXm-rZU-7v= zewcz)ijxa-(vhd|YW`gvv)gDoRvC-vhD?Me1QG8JS<=6(^5FR4{kWLA#(Z}jkK$rC z$aq=g56NfxD-Q98bvqr;cDBmvMA8J#jUrykT3R%8bX|U{2SI{g2>Q|4sV$S`MR69} zgx|7vOJLPJwj>feMErcq60I{gWox!^F>Nf+($RrkF}`sFSG85_X?8Yum>dTS%X{72 z;9$+$xBJm3k}~2pfg6Fsk(!j00j(2}_V(zhQ*mevotokmk*}L3%o-XSt12q|Oobh1 zJgNo`irLuOo)}U%eVAQ4_hosXPN9PpmKc$I*^}(Ls;aq>(Yx*gGWzCm4!F$w;VtWY zxlryvsUSD?&o09j(Vk-wk(6X2FF&J!i#vK0HByL&XAyTCJqpF{$Jh@|`uD$n{fY|2 zZGZbh5Kz6JpC8&97hw@W!A_9;0K`bsDkuA<9iS$Ud=7hZT}1_a8c46@wdGk{oRX|< z!47Z95FuwfeSLjR&EUd9!Rzi$PAkyW>?gac`=*(D_(3KCS`7XEJ(GXe;yL6j2-K)2 zj*iQ?XUi^bsKOyaQa-d5ebXpT3s8q4naKvSJ36DLBKO$>yAtP2Ny)%#dB5}qdU6(4 zRuQMU_{7BLwL_aw3j0f{Bw)#az!(1db+g+vv6F=oS6d~{9en}1XtE<`vS6|plpM6- z*p%T>MH_zCw)8uojaGkoGN`k?iHc&%5`SH7K%k8?XA;9=NP(*l?0(G1$ml_idj$$I zt?ds`kc7B6*6d@OslEN;!emIxNow!x8>J6jR~KA2*B4QskOFzkJBL?T*Gk~`5EK$Z zMJ&J{qTxLt=7!4fK(y}P&RyosKLz>uL?0d|Afa$lE;QGqHDz;cNCemg+B=#qzwo)& zvB}9rF3U5x?_vD5SCo}69y@jeS>^9pp>LED7YD!%3J%6cFG7~dFDVBkfhsE~C`if( z$^>W`LFMJ^+mn7r7EFgg838osPHUFCHyt~1q70y-^hT!cuHBn*5XDey_w3s@{$1!9 z0i%V*n|8>KjEn%o()FO9x?<-pKhdX(?k+6M6$k}l`0?$;$7L>XRF=Yz;xyHJX>0+P zD&v;G~vO6f{!|59qAi@W#jd;L{$ z)GDwM7~2vBrIqY29G^;Uc-B)-9DBK_@=0Ex`a z1FOKJ{axCzI}*P5CXi=JW&k=}42XH<3Q&Wd1Ruydb2CEc;(G0})2jzE`Mh-_&6 zYwubj@KTCiNpXI@ri{#8Xf-G(U@Zjf{y^G|jy34&>^#HDs+O#I9k>TB1jud`#SkM9 ztSJcz=RwEw@W5QMF%YN+6g)XRtgfPh_69Kl_X##R0V+V8J$(4^<;#~dGu9Md^VK{( zKY#v2;*W!=1Duxsd1QA%kWJLyvEa9olf%Np%?%Ch0XO#1aDUJ%2_Hp+83rjH=>(Y! zd>Se~L~&GY;5o0d-$2(0kb0v`j*_*8va%RwdCI>%;3p~lfy&qTW6<4KWn?sfI?>en z&h#G+(V2VgylrJw8;N=yTMsI9Gq zEenno#2lii1APrX%=p%=3{Z?a_E7xn=olIr;?T?-hNA*W0*YHcy|Qv6pi&27uy)As z&K-0gr%#_Y?Jis{^P+6%auDnWdW0A!vic{(cC6(r+8JCpj6U=8^C%m#Z`h#MJoWXJ z`Zof&5%z^H%P1uQDG#GoB;oBk9fwX(5ks;|H6 zeidSliHR&JEMTt$<5N;7NVg;A!BOmK21^OYn$g|6SwCHIX*ArXQ}gp#+1W5Q8KkM3 z(U*Px90}#fgB(i&*Ik7oh!e`Qn&@P7bab};^H08;vF^V;Z11hQ&Q7)>77DM&V!wd* zz$!ss1dDfBOpKI~llmGPg|h40A3WlhM#yYdHYZY#QFx&(78l>%{604q{A|Yj1b`8% z>IVfyMMjSw@R?K%6i=Uq!aDQw$B#cC<)){nK~W2$FGZ`c+H6NL@s}5$IuC=7rlux- z#_R#5t*Ny&DIwvGp&^G({wqzEOFQuG5HKpa80ey9m*P3ny*s0@#!Th@fq^+6Kf(;M zosn5ch)8&Vi`Zm^h1&TYATo$vhg9a;wX+-?_`TsK)^^ahDk@D7yg&^3MMQ9rlk#V+ z(%cj#+>yPP2gb%^g`gpq;Ff^M!jU`|z5rNrMu5~rj^4G4ir;3KW`B`EbufH#5&X8#D{ez7^DDhU^tNTT*5IYJ z)AY+=&U+sppNquFBPWvMW{U-!T3j?dcNFUFtAK#9rU{F=l4n+b(*+eMu*bFsi0}Wa zj?GUb6&)Rf=m!*m7YH`39CJ8J{(`I>~3-;5cbXse_(rqM`uDvTvGB$QIQZMGc&U| z+*eh7q}v~&Nu9-_cXxODe?yMKg5)eo4BPMgSGZv)1Z5B95^}AWG+K%mR2O6A!dB3j zLWO=R9*Ne_-F*{!9NOMPgo#1*gZg=x;P56Par<60Yv-i*l_IUEt&QNd(g$t@F$oW; zBbwC7lFe`@oarAgj^el2DL}`J6kZ60@)uM@=Lt;ijz$G|-s2_e8DNpDZ)kXUcT>|` zvznd^4}g7RdAVn`fpQ3d?abn01SHDD>q*_Ed(Z!yraX>&BmBzupZ zyZxOw8(5{3&LS)dVWhZy`!=HHU)6z>5laPW4_zssx}e>kql~vfNuU*PxD0&gx<20u zkC^`(03qm7OG``Ans&Gl)F$L%GS4qVvjvOVSx^4mNeqcoQUYAw04&WS>lp3JskTL?&rlg>nB;dZ#3F_l* z+~@=#NsuU?IQqwC=4h&2bZn^;)rQXR~Nt$8t| zS~uHN!_?IDl-#Aje=XE5QxqJ!E?@*ejRBMiI?gZ)2!xOyJa8Zl_Z1i$<%3^90Q?u| zfSF|%H2LV>$i*NdpmxGy!$BqWAAB`YhY}x`-XPunj|>T*Dv&c5H#d|U#M{23=iXI2 zJ@@j$r?j*Dtg0FVphpeE3;j55o{EA3GWvjZ9$E_otuq8=t%%n=pYlonE>SqFp-=^| zcZzX$QG${+G9;M~LWu^BQ$?2s9q{+pXV?vLa&n-&&++i=-?fV{gh(@M=32f_OS3}j z#Z`lX-ugk~5Iw!HantEUaPfC>Fg6OWc360SR8$a5E?j81`H@zg2wcDBu|=NuTn*`jI(Y`d;+vNT4titO235CTlHLag+$Ut>qRLcA`o!Ge?&d zBR){E(0(Ia;BiLViiX7d#fw*gfgG%?V?8Az0Tn+gD}fn1id|vZk-2(xtTI4@k#EOO!ss1URnu7eB-Q{SUUmn|))5L}avL5LE>Qox>!4T+jcNkgOc|BxGB(;EXN;&ZSjprFv<^6>@jE=I5eQ=lBZ zmn7T2KalN*kW4TP+Q zQV5z5=+vM5IE7c6Y4)5hGTzp$TT$=P1D?NdAs;SEIk_5?%3X~AJP<|ijC267_T`tf z-Cdz~P>X@KU}g&m=}^;iEkiGVHP**R&OVHF_rEoyT$GJMRs>DIe>I@s=0b-B_8{GP z{jW%E(azbAD%SKAZl62qUk)w0v(p6#_AiS!I%+&ZXi3Cv!ryUdWwY_}sxu#iTN*^; z0WnlU`0vpzc^&R{5=9p=JUrar-;ZCdeS5wSwu~ot$ePjJg^0InA>1< z6;zks)_H(hgc6Dh9Dk=26dbM-=7O_!u-vqNyt&P*%&G4I@(rRe$Do?pPdYjw0hV*k z3T0DnXW&EV{5Lb&!1f?ZF?oJjMmmGJ<{AOD*YLeos+_sdJJ>6Vi#NVLI|O8pING`Q z=u3=dtnlaMh>!JtEc8?t=zW=`u%``n5>;LnHb~KNFL#V+$qP->qF+cg? z;Fjl)9(`G5M;qPC)Rea>@ll1-Dc;BZS?p*7&jM1uBgyM63TP_+{gpnC)mU5Vkm}gR z5l-rHSIU1F_|nun2iNKF_wTfhk=Ro|DitMrVvoJkcMGYu?%vBs1rMP{aTzGmVZin7WL-7{yKe<|IBmi7bK2HgzTT1t7&Non6!Mr z`i_o_*sk_M@&hfw&d%P&^-ke*A#g0*S}iLH7A5O;a9jav>d=FW8iHLJH_Afs7*0v_ z)Lhg0Bj28?f{m#do2tgZW+5B4N=f(CZ8;muAFa=MmBF8-tf)Bhtkjp5?>ISW{&|v9 zCr^g+T18gVuC*CPP|?toRQCc?q0f>adaL{Pv2oT@v;OU0<6?w39Xf-ctYe{J+skd5Zoz(@2)RFZ{)N^jRu zI0kuBy_1Ubi*~Q%4y!toV9xAeH1Dq`+|tk{o{^&HaDipB)}#>Y#KF#R!LG6T^K&P$ z@0>ZpUq09xJn8N_G;A83km*$Dy?)AgOzuuQb>dFn?BTemnaIM8-emWhIEuomz&5Lf zvE#6qf>?)@6=?{4E*ueeZ{EB_ib8?Ha@El>kAR{H%FqIKF+v&#A6DSLSq$)$lr#qI zSDmc|O*BEULEnNdggC?h4~ii#+W+~<=vvp?+Y1ipTX{KJTPOzX>K}WNrNHnbH6X-t zV1M*LeqKT@01}?;Dp*8tq5mi7XJ{;9-w_1>7Oua)?(&m<6%wKaP6AmLLk3D?#}Owm z!$Ax?ad0B+cy zYr-D_V<1vC!W&KUhxmBX-MfQf?Z>AgfGOCuf8LvPFhMe{sjT!jMX#BnTePA>fz5C% z!)cVCZ)X^L`C5+%+fHwC|J6jh9`86W$+u@Rg0?>LzJ6pNTiZpPRO8%_xkk6(&1YA` zGETjhIo;0v<$)xV4Rzc3Mw#%kw?(IJHrLNRV~<$kA1#@%)SHlZ(p4&$P%-!(o)C-TU8JN(;SVY(Sj7rzF%18|3V-BE za8^laB;Ex7Eg8npO<3z;f;>0>SeON#)9DM;ife{qM13II>C?8Id8S;xwwmpJS*2V+d(! z)0iOG#mLXk4+~=N zUGC`U5a8v7?fIY>oTCKHd?0%4)G4W`dZ>`Lq|d-^0RI4Vz9#=XU=8>bpSm@bi6sH> z2V65SjV$r<`tl1=4j71j5AAh*W#x17^xGBwNcwO{M`*SQns@RVvuj88S#)-M1U-|P z8LgiWXTWtD+{40cLa!f1nGb!teR#uSwS-mnarN(gaSTs(>?;gbJiNJdgi&1nnqs(2 z4?R5Au9XJo_j0`&=A21Se6YLJ<7E<+gvZO4ocBh_zHj`yP8nN??2F^3veCTms?1aL zA%$pK_wJ3R?`UgF*3O%;6@r8rPl9g9gZ#aMk%>tMv^g|#)KpY_{QMZ3dWBN4?D9bL zoM8=ZA_gsBhd^b+%pnVlB4e&=lLtGaN11*?Qj)SSARHk%iUWWkp{aa4=>ak|hmXK@ zUT=Q?*2hZb-KhCayy7JyFd7A;+$5wE+$2OTOiV9KO4OJv~Vb%l#NBe6b|GI&Nhok<;tzn-yAaull;CA2;eq8TBS)&mjjlM$_{e-N_`~rz)AKa&C}UL2 z4Vx8S1(-&_IE*17pyxdFOdOd5kSfZ?aNj;}AS%VJE8fF%o5`Ngi$L30rJr1X=vMHw{mAV?@R6Y+)O(H|X}cWrMlRg5%;K zY3o~C&%mu(yfVsK(Ykk6nmU`zuVi_h@}Arr@oT%3_d87x!>x6n3pVKXCF{=UIR$OF zTu`oK3l{mnz!?4RgWTzZWASIEsz#UY*q&(KCo36j)qHs7iis_+sn0)N?N-4d^-pB% zu8AFB6c1Y8oPF~kwkaZUGsHHcB=LN`BFpL2sGYQRQf8fFloTb~_U`<$P@-Aun@zNC zaKgZNj&1;`8q>h;Q@vXQtIo2r-bzzPNa6(n9BoW;hFi$5EGCTN0TCf1Buv8w2=2by zZ!1by&{YHMw5(+;5&W&Y9*fD42uDQt`Ax%}D~|ylI0u@Jd)gJpAXcwPG7-gK89oNg zR23KNHh{Hnaw>=E17wh8o;IdXLA~##<~+^FcnSC#l663x2Ow-P3@hw`tNBqS)ikXd z*fpR2?ihgE%eXN?6mH#8oln;5@B=(88u?U+{IT-Q?Yd}ve2#OWiha0vCm2a z<)`L7!E~$fi_NOSZXEJz*MD`Kf3q_B+}uF3H0o@n@s8BgIX!Q=I0lfWs9@+q(4Ha@ z5}u}(7Qg_aXQ{3xCNQDaJr=t;)}{eL0h+CBkJr0L$ftz6#C>xkGBOgZ59l~+URvW% za7gs`-+u6Je755D1q=inXJW#v&XS9pgF`N62j-a5;FR%}bcQMhYlW&Rg{`BWvhpED zk7@%Em!)$Y9P&<|YK2!nK#50^#YFqR&evl2CE;j7E`=5Fbzq=9Y<|z5b59(_)Pf6O z3t}EYGwsk5Ib-vmUI1ch=qL>htRMTSE-+)f5_A%@(dlvq?Ej{9o^ApB=;}_e6ot2h zve@+tGfEu{w~f3x^lW+2;e>gtW`O0T(s+2UT?T1_B~%_Vdkon zq+40!y39T>9xpBS$GGy;Trf^Ekw`u^x{z^^#YOCUdjM6{H_ay;OPm85Z4+-p&u^RC z6mZj>GTM_(+gC8WD*GiM@CBzpc2iMi>~wFnJ6Vro_hpWZxS)^_bepTVdXSCSLZG2Q z;Cj4-U<%|p>B-=BBTXHHN+7>Xi&6m)U0l}QoYlgF2EVNWa=~QrDm!=;Uf#D~e!T#u zfS&F_4t`?Wwnu0oQnYd=Y*#U&CUN@k6Auq^vCz;^5a!ML{jeF*Ls-J_x0;$gYypIi z2*&p03xI+cl>qNv`Q^)_ z{O_qdxI>Cy=my{f9mgFqJ~1KJ#bbt+5RPL=DY6*i!-va@tTUfF1;=P>!Yn};F_37M zx7M_u<@>5sKU{CzU4T|?t|unStk$$cW9N$Tz_y+s2!7hf~bi<6;G?Azua9==g5OC0Kcj>MPH zL&SIAgJIeD@UO-4f8SFep0-Ux0H2kwZLYrumAJ=#7jY~+!2$M3;ZIkW9~jsZZw%l4 zU!Hhq%Im2gev0r1wbzw*FD^ItZJ9no7Ssl`^x}b!SYYf$w1CZxjbm^4J5&@~58J`B zT3zyX(7IG$}o-HpMs`+vOoiKZgXAV$_ zxD~#Ce-joRm|B2mL4`KL0x%(@n!}o49!lku6ZntG#z!}P+Q&cEo{tBn@2MOMbP6e& zY#=`CU5M>HGGXWuR#wx4)ghRpN}vD~(8HERK8K<(*?r5l;4Edv>$VTBv)Vt|b~1jg zSJz3Rbe?}kf^%$AQ*-EPCbV@?`_UR&wu8@r9%0pGW_)`PRS|gJ%f)S;TQB z8Uq7pvwdj!4l*$4sj4Czxj}!!8GT3$?IhHLmq9`B1C|b04^@Z6B`06o_c}bh7_2pN z62ch@r}VXJ$pz0-?;idp%(hZm`($_ND?ht@Q@$sO$GymvRN6ice2$8ai?N|L^9Y+z z>mHoBy3+h3@@cN2&$m%S|7A{Uv-sBwIk(QDZMM4JS9|JWLquT4(K$xd_D!l=m-adH z2QeI2{O~cdJ2z(r%?{m4e9q}Un+xun-f=Np-m&>b52ZBJX)N={cKjV7$($&ZOM#^% zJ-y-E;AZN*BVJ`-(TpuG6gaz5a4F=~$dP>`{#m!CcsME({Bj26g{@nAQYBCe4>OV%R z9dF{g#zoszPsW=RJdRzryT{UAtntL|r~h1H%IBJAcJ1kz>>OO2*HrAjUsBwD;PMly zy|N9fdZeZOUs zv^|BqA06c9ajk1P`KPie*N3is!{)2=q^t>zkAvRIm&bf1bTikinj2ZOwQ5<~T|Z`T zd`}ngby)G46>eMgd zCq?H@7>xe5U^OP=TDR+2OBx+H(s3b$Zc1|S4pWU!NN7&VOs~5jvC%eu*YfAWAI5To zj?Trk=gLTky!JlK^mn6}X?qj`?7ZP5U4&C_M0j4=Te-&;aM!6qqSr&X@N zw>pR!L?}@$eK}or9ehKf7vZu>O3APMWw~TEZ9DJzTB-iPxX)@^bH@UmCD)o+Cm)MY zqCb3#oKeI5pn z(Bm>&4M~ZY)(3)QYO9Is=#PuFnG+r`(0}_j(UyS+YRIrOL%v&}*y;{t?k*P||s?ObOgVoqAfvU}P^E%vKLOvCxZA5f}H!(h2SuH($xbC1}}q!)(7_WX%%%+fcak1d2Dt6b78- zZrzHSbVHDYaBqfdtxhcIMCAI6cVxE}CJp{DS1cwe@*WKjY>CzDj??MjzB#&?EuTSQ zcx2qnY%?u6cs}8PVfhh3hqB7#*LoH`i{G;@m}&zW0DKY#7sA;-Hw|)wesLK}ckXz0$3*ceF+d46e!A}c z<>DE8TvG1E6Q%d{=`>t&CQ}LyVe{5GSamVIOnOw;=6!xAYeK!?-#O}Z&q6zX)GVNONGk} zj$%mm(`iwixc}xI+2fI3?1{SLpa=CdFnkL^=+f2#_DhreQr8WVkTdQ-UF) zVT(Zvs@A>I?l-HWDK{6htNd?8c2G|6PK~zu2zZ`4W$QKC#dTrg1(~~}^Xw}VlBQhh zjbhWWtf8+Ai+gv+sHnef&mZb-5hB^N`JzFePd;l*~Fjgzl9u zyX$4oXYc3{&$bX)>0GK%02&Hr`*7tG!0s&XaPhQN;F+ap!}2!?PM$eVa#2g)+374wVQj{?Yvtf{QzL{KH64 zZgmF+BJt=8akX1P=Wl(pZPl@_=4uVy2sF;(0=_$Tzp14q`&%dxSm*ZH5^zn-FQRt; zf){w~5nZA>J*1tzfiE7%NB@7nLJw}c-3532qtcxN4Enma_T=$TVB-;Sk z-tz~Z!|yW_z2t%KT*rF?$%^Dakw_))_&j9oF%_1-D@9P^iJAA z7C*&XV$uA26P`1iB+cW`<+6p_5eg9e{`r*p@i4rnwkA0r2|jRe$t#HC^ucbyi<_xi z-#_A4e_3tNM=5#{b6Cq&jLcJ%7xfgF3mpwM^tOqS*na%*@#8rkD2K5*Ds+AFSa!P{ApzNp)u`NIs6f$x-}(qAge(k76%+7m^5U-F`a{!$E5}W za(70+mZ{O9fb^YXR>k4e>+zO^<~D{@u%(M_OQ2?U=32qKQeVKl%~3FX6(bVJM&RY@ zi{7}Kz!TXRvC&jlcf(a3ZoejF`Q_;`UR953ENLN5P+TXwevYTf;N7aFPXJQd6Ao-hUte z=-M=x(NxW>D~o3=8^B`Cm5XdlgA`5^W@BeB#ud$}9zjo*y*3-MnK_~7@ar)M$(928 zw|#)WD|oXI%-`4-2DhbOs1}%bXeda;@DXM@!Ai;#!X1uVjJ_D^Z~5fUmBHGG5?B_&7|yS1;CT~B6DUCNM1*W+pw_~nc6Lp2vGZQ- zVvl>z=TibI` zy;LmV`Bx_NtSe4XtNpW$j?bEzmDL317S#voXca`(e(mQdoDze-+U9HmXxHM&@V9T( z6%`34MbE122p+@*&(KBWNrN&qH8F4&ZVv z!~@ix(3oQ=z!@nrWVsJ|cvJVDt;xRpOH1Gv3~Iw&JLQF@K5fGs?PA|zBA%<$n$p#? zxv_Z5c^Yvcgl&Rm1>6kWQo{`~UJNA`H}KG&?rbxyMk%-x%nQc3$2QL9bXr%P&B>hc z*Jf{pJ_1- z-bX~;*VXNopwzLME~CtuX}@v7xBMdZI5v+K!$fxxvB}-=Bj@?i82g)VVE*aZwFtrm z27rzNdkgUU7#_JiGqzep+V5kP>p-m26?KRQq6N#5hTvN4@f@jN1rr{$^eE2 zjhZg{9XL!Bu4JLr028wbxmN(=zsnJ|o7e$D^tEXvCqGE}GwxeICA=dOF2=SW5m)0| zT+QS-abm#NeXY+*WFFaBKA1BByANhVz(5{i8oS4WP88lL$0inY(6KW@4J)_y=5z?% zseFZiTB3(BW(5M9rX$rG5ZfTYaa`E{H~JVhrDbFYq8{Ljc!&&^@}+%6uXv&Eq~69% zAbf=y3>aU>c6E0ci3!Yn^;tKym7fua?F*H_Yi~+Yk{0IUASbvXQGoYa9M9>Q z07%l*Y!Bu#0b;U(4aU>c6T<%={HS=b+gX0DO!mqZ*o>Q!wRmDw5T_VIqOuXV3Wu1W>YTL#N2cs25OKH=eSD5DQ+-# z@+0`Ea0e1hgm^kFFGG_;e}{^Wx8M!B0eSpYB&pnjQ7UHJ&eWZfeEvuDTA!VpAOdzJ|X2e9=Zj0P|N23POn z?VUA&frfX8VPX=#A*KqBz^*uXhMtOp@JN82L|R;!-&s0}gyxFDG>m<-7tJ+9on(VI zss}Y1SLxho@j`u`CuZ1%8X)z3Q0WsAz2D?C@*q*)W&o@hbebQnnmcMQ&F*A zH*9sq>aYMh3(}XQZoQNMBm-e$4G368ODpMBJ^)|1(##YOFtK36uki z++RCce?hWfJ$u%5>6d46oMRE;HH>M3>P2`0q!?Tw>+x}M&5K*y&ww)t8&hL(6-H3( zi&GIU7leg3{$$kBL&Hqf$t+H<8W`=&%jlFp6TKX}7++b8fFLR}cD-taq!6@r94+c z#m0o7D3_zx!{&f(29d}Km`{V%zAH(gx-e$jNuu%}rl&Xe=0_yVpyYz2ESuCi3~d$) zCFfa}*GlF&9h8c0)xC#p##+Xgha%+V9++c_e-dW{m&XE@a%!zz0d2MefHV=+WTCW(?~69 zi{H_wfI}TLonvsnXm34n2OG-?-ED~MQvm{jk_Lwh@Zq$Wm>v+Iflby0cOT4i4RMbc z&xxNTnojZO@UxS{-2!O6E!g7HbTL)SI;pYmxQqt-64GFVFU{r!jDQ0lAM!F5sYyig z_TgC@S|J`tVCqc~CtXKR)F-T$=s=dh_U(pfeM4SI+_J)SA^*$9t!5Mf&?X8jK^q{NSKCGg3a?!QE;E_+y`Xn}IVxI?+nT+4GZR&^ zd?7Y1cJY%!OM^+b-nQSr{{b>FIHWkeDioG*R+&tA--=3vk420@bO+%4rrNOK7~1U6 zs(}Lvo!GmFqRxGA0eup* zFk!n4iiz!uhpa=C`n-e}om>-xZ`LJdk7`D$UbxT)8=$44szk1Xg44l+2Ty1z66HY* zW7vN@0R9%#EG-#ZXlY-b%J(3yj`cO#Ails2l$-!G>1V+du1v1m0jde)L?wi`&&mQ0;yoOJ}9z@)@*$kPCH;f01K%(|yI zfjUQ=U`|m9(?82yB;vQMraWD7Wu4JYtQ7GF8S3UQ5G^#?U72~H0pUaU;HyW!bK6M$uj zxJ&T@W`__!A!s*o14mJOL=^x=V%isv65dQXPQg=eZ{Vs6&fU_!OJH8Yy`D;u<)g^L zygfWbV%GN8Cx(@DJReNDM`BqqzV#Kp&m2KZm8eT*r zpW8i?ttr6b;qxl-z2kvE-$9`k+pT-{)HT?ANn5)`LJYm0h~824>Y9pc+-4&#AHY54vu$@M_=!AqZNM~6`%9m@0hP>=TR;-b51H~BkrOkA>mrS zN@9Ar8mV_=4Pqu;L^-(7lr>x>izX~}8B{2{Iy-xGO+2Y-X?t`@Tq)CCNoPlOjqI|B zEof}8BXONF9hn{&g3vS^;b)J?k}s{*73U1XK-^;M)om*ZUw3Wp@75R6tfpFGJqY(8 zhS~!b1rc>LDuhV46{0Lb)qrbgGSP@=?6X_o0>?vh;kM6)+8JW|PXtV@`g)b+iuM7L z*>Omb?q_76rLDszMS^LJw?P9fTGZgiZ{5DVcGarn&DmePdM?@cV}h?}!Y$0X^Sdp|vVJNEa5tIGz?m59 zElbU_kxfWrQ*T;P%#%*>r13KCB~|&4t93G`{c)P(Dg!*QOf<^ys2G16%bl4Ak}Lw<0AHk_3IO|M;nOr zT78l$^0kX$^2Zm`^zvr9)Z6~pmi~<;ANGyoSc#8~b@=tO%U=bPt*&bqsnP@3LI*ur zn2cu3G!T}FGMeKuYc}mRLSzEI&LS_NmZujgOTRchhmpb%i6vwqdAJs=A*J9^zHOg` zU>k}R?o81RA7zjP|ujCt;%{G`@ND~9v_&kSL+f9!KC?yN?@?9}B^{@)%>?0)qCp_xW4-jrX}*#Xs6 zfCVL~=FKRH`kS2W(cr+5$V<(uzOUt2fv$M3*(` z)h``FyXdo&{vD->J5I#-^@v%Wu;_gA@~KfX_m5?ZK9#ilTcg(Qt<7An8~THS?)quAe7sFGmyzfrz`x8 zN*53LbJi0(*VVbsTiwp{8GHv{>l|yl$dKqD&ARXj+;8@#ni$~&Nwj$$c1g7JtYDXh zowKgpFIF1uNy%Wcp#?T)u-&^vLyY|mmup*r{OTu8EDCOR3ruL2Pvc3xq9Ws`F-g*g zBF}Ujj0%ZzkdD8y=u`3rc~$Z2B7{`c4sMHF?fgB2lGBxDNS|k(c^j_|qyNlCRbje;)X_4zxdpN4{8nKn+hmTZp1+YYRB3nc zIB%v2neQ7k^M3IK-0Ki8$)PMgz3^8DHB4`E>kQs6(4iAJMLj9G`|85) zxZ8H=17rF}>;bJas;}RkYd&7?zdT{X=wh@z1 z@*vUAW8+RP^QzrisxSI!mM>X2`r_QU4X*L5!k-hOi;kT=Mw&{wa3VSm2U9wVELvoj z=p83AMy^;XkVzOUT)x_FOVMuzc0(c3oBdUuJuVmx;=gIhqOcTx4M7G2NvjPJiNM{( z9&0t>Qs|6pJAFl6ctjU_CU31DqpM2qasSU;To`PM+3zMIqUpPaxt5e_T!Yvs#8)LzW zrM%e{f)uEBEV3MtJH@V%9j3fL(t26%s!SMkxD|}nWItjq5L<9WFqX7g1hk)l_Zmy< zJ$2}oOh~F5YZ?vzLi0Xse(|HyT>+SPvh*Rgvt^wb^zqjgT9I*#*uKQ^JH$2-u|Cq1 z#YUYMe{`aDEoA<^yUk9l$SA;Mv~RJF8uq3 znfxDB%e^@xA04|Mai$|lAc%Ltq)F~ouM^xA$0~ej_`_05Iu+9MZ|A=(%NfhsB-2Fk zvxZL5&9oc+`OP&`?>x3nV^~+MJWjC(pZvS26Km#v?V^vT92vq67X>H(m0tf4q0K4ZHuqjo~~agSp3$+9Zha zo~^t!+~dz}a!2{-#$*mx`FD63rLR{jC*}eHvM<`{jH7phQi~%C1BIJf-G>TA+A7o69jn?gD8DstH+BBabpzsJmxZt-Vd4T-<*G&*n%pSf)NeQWFQ z4^XE6(AmPv${Be<^>Cyg4;O?`UJQN+&A7KBH+*d!v(khhN%b&jPQ0v^f<-O8=#%*SVqvuMX6F&*#+*=2+qT{{8Be zTk(4$sC_6-jIqJeP*e1AzV`$AeJAg>jO?c)K2))C>+e+Zq0p?6mL5suOTin9PDbZb z=Y`+G7dA}lld{cCm(}c;ySg;gZ}$ET*m$_Fu)sE6IWGTw3{>3Z?h05VEl@RgUU6#d zrOv}HT{=YbJH!qjvFyl{(Z(rY(P_f#vvd_U=>5d~zvJ!rK*AjG`>C{>+H7m(i4*lF z#qV&w!ua3f!>J-tGP|;x!5zbVul_sDaH}gKoXq#FM3iUM#_xJvOfJsS}@yQjpI}d2*hTc`?xf0LqCV)ck(eVu8#2LNLg_6&)1)SO;3;= zU-)YwbsU&Zladi-T9Q<^0#;qNORDcBGFis8*HyT{xe7u+!13TQB`SK_yy}3~U4|Zf z0@56xl{E!C5M*05*)4kS{;tlTIR8Th?vCl@K5d^r&v-v)O(gkQ;M7LN?djE#ge`hDy5Kg!& zA?s|cdV-Qn{w;cc;d4PeTn&n{Qbrgx7+bO-u|WXrlOXnE+I6N7m@Y`boZL-u0~!(- z@sof*?SVpXm%Cr_f{gd)N1z@kd`I}jL`PTJP6CZtn6LMBw;K;UdHrZ(cVw8ek>{}USfG^0}DBM zj?QRCBW;b*$-}jNr62w@rC74=n;LwB`g1~Ne+J%xD-v%P=Fp!W7q_D~IXyic_lwV& zm^h+L=Q5W02`vR6tCV12$r~#`ndelqMvAc-Se*bPRrc)5bAfP9d)Ouvh-QPlV9A6a zU9P=2{@)(0HVC-pJBsw)i&6CW7?+>OPZ_Yp>_Ie&TZKg7AIP&qEZK1x6b?B7XoCb) zCK^A_>mAWt)(R*ajH3XFM*$0iDr(Tl{pG%7u%>;N{o@>jKN5##>DcXOy#B#ID%z;? z2JbDg4Qm2K3`*@W)cUlxpnp(6TzvZf6UcC`DEFUtFjGmNAMI?znZ#v2`1qAD-)p_N z4MQnmL<^0=+ct~iv{?qtHln}s(bLSPi)%};trb_tqo7YQ6t=%~FMU*ePkzFgv?(xxl`v=+?fB^Ey(r} zPK~DZmUVv;+H1MT^ybw|^T#sv+Fn+>XoG-we@2Y`v?p;=PU$O&)-m`NxV`@&D*7Ca8ft z!Fm5hd=$J8MF!o`&9-po*~P_q`uLBUk2$O-?r_u3&+tA||4qvfvXuoh1~v^mWd|ZP zKf3-LF&mrjh$XwtD89$KB&#J4rT8rR5De#kO=a8WV*lTF{QB@nKAP{^2N`ByMV#w z&5P(OZv8P?tOwg!i6*PFFFNt|3tlWO&WQynfpgPazqsH4WaW#-Qt%^$a>|ncIu_0D z?JrHw^F&yLSvd`0Em2CO+!o7E&(Di?TNksFZgF2U@PKPh4p`pcALMz5S=9MS=jU!J zF@}9yhcCXg4o|~izP=bz7+Q6A;dsN5Mg4_!R^*PCb^m9`Tz+=i3NW+MjM|udI*;=# z7R#mPjVsot`+#XId~2@VeKV)*G+L4oSj+}5-wNBEaca*Y zgb%?#Dz#+lHj%7QP}Ol#GO35C^xTL|$f-&Ao!h^t{z&)bAl8F`DF49pl2`sfKZD|@ zfXCbyj&Cfil>BrDQ#23xqZtkgCAlMP9yFpq`5_eXMn?`O=ALjO`RiuG2{p>0wBXDi zq4Be+4YCLrcKm335tiSXYSW;E#$5R2>Yu`RaHN>#W#Q9^TN?2ii&rT2p{hpGiyfvp zDl$f*H}noB=##-I{J}$M&Ja#aAwZ3^;5L(}S-)!?pDRte>&|&QOpSk0-V2jA)8SsU ztQ4CN+IVg$8cypa64NrRN6*NPw^O_$w)7uh@4&0RBvFgxc-hl$=i=6WnmgS0=Nszs zjA^|c$O!?ktQ%mm89nAH&+(t}?6+(;oP4>GHbe49^x{z{UA&%+4O88Br4&%wq8DMV z$`K#rV_j&E6g!yiCEF@o8|}N|LbcHP)s~oxenY^wMva)=Ib*d-{OY!72a>p_@Ew#= zlRl_Yj0|;W`hVWLb8aC=0IrEyzwD=(-nGzJx!(wWYs_>~FsycBu}XX@z3BOJ$CEGl zZxw26OAxrEcVok%SKf78qh;zYyPsq92cBa68OXGY0+ZL|9y%nu$QyTenC}H5M?g$YECjae`k&{V ztCJL`oa{(`{0f|?yCUj#Lvv(7}?=5tgM`DI9!-{ z|1*eSOdVbs*pU2|TR4KutE_ZnjRA!$c$_4j1gzGE_QL7`Cb|%jw-bCF;RRCR|FJvg zF7v&z7KWOqTlJgQexX~m;6LwKmRv*RW=?zI1yV1uVcVpOE5Ca#dhw}zq_tgLY6Mn@ z@Pzy$#fL=;-a%KF{~3o1jxb`iJN~muAy1y1Zbe?z{{ABYu7rlu>z#nV?Ss?j_ zDC^C8HOD;U7CA&~6(S3=RX7CLYwpma&SPKn#va3>cAM8*Gl}f5ZdvcqdTh~iYW)V>S^=GGV3)7Y4qkF-;kOt( zyF~WVqZJ1gbzaoXg_}r5a4*Y!M%U>@u>=s+as(hE)?}ijldBduXh5Az_p0wa&`G|S zqQZsXW}MvDDv`t1%_2dc`tKp<=2NPM0#{?#nY!5u$)-UgUg;zkcasGDP5(Yr@!lT; z|C{{-5dGk0n4nK!FLP%lz)s>52(s1$XI^GKAxYLrVfe59Ko+fSE@>7^9<7~s=3kT- z?#rF}reBmr7#oz+RoY!4L0@>@6N}X5%3Qr}t=JqUx3}{gv{*j$ujb$;nNa+r^PbpF zqNK8Mr*Ui3asbNA)E4fSiI97)$Vu6hZu}v&K2%_#gXd5M=t;RB0R@pBQZC8es_sMS zIGa#C&pIpKFY;;Y=jJw*=b`$mn}S^V)X9_(oeX}tPNYJ41caB)J@655X^_MRyRS*@ z@2_7_uFN=26XeRNPG<`FS=IV4TW|oNZVAxvq*Ei3usxlBtvR(h?e`yf60@)tz%bgT?|0)A4l$L4%LZ56T z2VbSH;&5F%jM`iMSNimJ<=D#tncwVxq2`*D^Xk1)QPt^ah=49$q$)rkbgqSpJ2)y5TqXkLV#(+0dBx9P zSUP8U`X&i+=g;`fnkN9n53~6ETIP416ye+Xds>NZ4YWE)h@oE;Wc=u993fcZ7Xdk- zN&M=NGkq3QVwTw~!`Z7!p_7W1ktd^qW}6*T+dUd9iqt+{vjza^GTy!nFA&uzpaQb; zxS6AqF==atwap!jQ-g~_u5(ReSf?muZiO+@QXn91Fo4?9& z!MI+uM5p|17?IjWWZ**LjxJJa?mdC`QMkN5#3y)Ct!agQLYjET?~adKf7`7}iS`NN zeg|d+vgs1I*?>mdy0-rp;3lVg>OKvX08_?UXzBFb`QG5>h#2xv<_o+ z=1{TQOb>+5W%PF}Q8GnmD-a{=M9FC1M4}ICnFZ7Ty8k0LJUnmAv9a7SO66<*Twg_A zu(q9c89=aFZuJkWkMHg>R3=@y+tR|Fl8$~-nxXeUz|0;VGuYA#w52Md|K#0f)0(u` zhQAXI{UZdhaa7?r=89*RF_OaB(mYvIzK{0OE2=AjIexUs{nHIn4hkxWzG;quSiC-l zTlDC^a->|Lzp3R5INU3ibV=f7l>98}mnq^zgpGdtpa#p78P5)!>BoLUg`1?!%ql{; zU#<7)TDelB%yYJv$=(uZ1x^%{n0%wko{+8G#E>{Y!OZ_ht(U9A8xMd5Hq zi8Fs~UiiWxGY8>hSx^8?mNNU=y5MUIJ7N%LYqU7xs;G)gZd@>`CX%1XRxBLck@mUD zg@&}*x3ACUN)DSAjwIItem36svcJl{1RVpUF>|5z^p6p$6T~dxI6#Y;1=;0KsI&~q zCOF=EH~3lm24%tbxn>)>eFWYwJgxoL%x}@xO1z9xDWznUC#%iThN1uKCJ$W(KN5t47aEFWwYFe0i}1-W zFD`C9d|sQLWYYp;&$|bky%<7i;(2S+?3*a8T=@Iq+PVIXUO=H3B;qU#AZXzLdUJFQ zLMXqP*Avk(ErMxKMmvRa}7796Y4ipr1+)zr*6!j z%0arnRU}U9pBHS{6oL{Qq{;RyFbTZ{tu`?%Z>$o@4lH<#uoI4xk{_Mik$-y*wyF8N znm>?WboFT5^);BC$0uc4>zNvl-Qg~rA8&v5o5fLIT6^6>Y#74F&_4<269)Yc=2DPH z_=o@jCWhV~?A+ZvL`u1O!DkN4Rp@8agPXoj7#+Oud!h^OuaN~)V=%MbOhiAHnDwYR zAN6bn`k(s)>^X0*{f6y^E32)yt^uY zwl@0@Aq=-Ga zy4mgpeWFJYJ<)qfUkwz}1jT4%!f5-<>wgmHf4+AZwN2Z&?}W%~z2u)uN!l$=jy&Ut zRZs4x74;X|N9rQ)DpJhN`Crc@!->e)N#TjLd;78bznDDdS|R_!b~!p%0BEDpB>464 z*onC>_y^aql%b0XMS)l{CSjW^dLBTRndd_ZG{wMI;<&dJ%+|&l%oBYajb{tf|Il1X zUjPwknxM%;L(O)@kiG&U-(Fxj?#wtDu%5GhDk!P$cEMO@%l!Wql*Slou4C`A?!QIV zt;z&)xN)9g#8y;VT_WP>P(Q@T~2STzar$j@A@2}gxXXQ@< zb0BetDT?&}n)uGS|K~}|cOs2KeHS&($S(!*x5D_NYjXq_ZU?m_qFD50@5YhHQKYbE z_obu>68QbX8TLa$MKnH<%wfj!H$p6PoB3WMn~*lc>*VTjB)Bh{eAlDZtMb>}&rCw< zYoopRJs#t*#qZC}!e;X~p&F$b0I1Ngt{n|e+xe*LB^UkW($d-10UBi1I~wR)#Cee| z&f%U5p3?e5hYiIf8k;xc=%zSFG~g}TJjidT6fye%{qL&|k=Q8J6 zw7F?c!0KK~_*~RsyZGvHx*-9SA6PLj)8siW?XR+USiS;dz6`be)Y?8!vK9aP? zcDE3KE8qXjy@4?NjK<(`G*w2bqLS|y$MA+*A1VEOr{-To;AA;Aw}41{KM)33cX<{8 zo<9@*fk783s6s@xo%~{zU4K+UW6^xBFwWpSh?cN*_cQsE{0;AbMo}ZJx+G1szuCGC^aSpT`_bhc?&Ei@c%E z?*n(X0^yI^XrAig_2B0DZH)x9MpUJ642*e9-I{YMe=ztM|GDt}IF-o-at24~B=n&i zn1`zv%j3iYf{oTj*bZCo&C88z;yV_tktTXgRZ!TPZCKiN<3Ed2OM(4t-EJuAxhLjf z<7`=D!5T*(H2B#$`A*n#&ovwG+4}guQ=gH*tI;{hWWz}3Av)I|_^%Rr z_@k^VOz|5oxN82`V`Y>Vha(^y=Y=0*7Ii1 zgxjG8O}!&bxJj-?XLGBjMz!z!{z0Ii)?aO2s&I*I-^0q1-dHQd87bLy*3^K- zpZmoogr}rO86DqkJS!Zc@aeijR5I@M=d8m78#O=Mp4U)*Z?>Vut<{FbKUTMVM_sm? z5|v82WGrCqgFab`3HUfz(*g6S$V}s2BD!+Wpp#R6ZCdUV$;`4ph1gm1KcKRbJCe8~ zOk838{Mwyp^8u(*8g1{LKZifgwW&#w7THnBL=*Rn?7BiS)GJx%T?Lq3Muz?Ky7K3z zs=poW|DP_M{9n6h@&dQul&qR0KHySx-zd7aS|-eJNu6L8LYJqX3dnDBzrWl3K*A0* z6SyY@5-#(18$n6oedC6b%!Q`3XW)MW)0keaGk8U=jC(JA!yu%NyWzye+w`u`^G^4} z!UinT<0hfGIRW+#>u*~$k`2A8yR$lj&ZAp8brM%K&o_>KTIm6b3=uG&o*Chib zIBo_81h#wH>*?sC{C?PHgI5~7t)lojuBmz3|HAb)C|u&kc%w z&B@6Ljb?{#GkgRcW!=O%eUHiE=_2Hc&4+8#l4 zhg;i?klDqBS;%y3prs|aVLtYa;+|>ByLHP>Gx*2iinBBB^5b?|4(m9+cVuM7#leP2 z{n$jo(y4SU>qeozNmmsXFG*Idj~>rTA3l>KlR9m9>%gJH)o<>b_~z*!d@1Kg?Qi1# zR*9-s{(;}9Q+nfow}7RKO@H>OYj2y%Cu_Hig-$kgiFMh9TK12Lb?uD`fHGxuH(wUz z+s|f@0g~2v27ME#MmIKQ9+O==uD#3He@J z>z?Y}L37Z4+ym3mFZ$!^ZcGU72Eq}$`Jl%RaJyt7 z?n8sr;V2o@zCeirW-&szBtAan*)tZm`$0hzq^{t-&8?MpwYhAYi(i0OCw=_qC3Gx; zRRLT8WGrB~2fgpZvKH_KfrUU$_ovN<)<}M)rsdn-sl)OV2(2tS9>@fNn-z;$T~Psv z`7*RWq90KTw5_-VtgQYNw8Y?!0FTdp=E3jZshr#=GQdFawLz$G-A(6?&dx(*;R%P2 zrD^llCVsWD2R}TRG{8Suk?mm935_A6S61N`xcPoIJ_<=px6y)D_9ATeh-1KMNN)~cGC2q@38LDljc__$3> zRF##(d3851B)0q8=7Qwn$M7(WhsH@$T|Fv19G8#5tMXQ#KR6@=Y~N%o8`u>hI+x~r z8R*n%-e$Y+q*&UjtYHZa9-ep*fF?tN;lU>`n3pBoaYa$I@Qx&kZu%8gH)~ zVC+HJ1T5imL>{2L6^$Psyxo(gM@~vYm|+UktswJ^#Rl6A84LK+!3E?-`CZCai33XP z>sN9cpNyhrY&pp|#vQH%$vMariCg!mu3P5~zgSmWOC<@W*RlG&Bw$U$VN;``&0QyJ9G0 zh>j()iKv0i9qi)}o5n4Ch;?B@qV1NP1+A}iBixclR&Mb8oEy{ToUhLh9xzP$#?>nx zl)glR)ctm5XCvc_7qLjNTAD3Ouu@OT-tJGZYD)C;_s8c1zczxA2BZPtOu8-bZ5#$E zJs4ZJTvy~GJ2eHqEZXuTA{_|fLc3KWOfiHq_2tX$?Cg-Jpp*m^a9n&mD9O%Tyolmh zV@nG}1Ry+l%d_6%YvgoHPL2c*k5@t4_n%X@Zmpi>xCih3>C;;wP93c`a z9)REI(IZyeXT>>zhZujradm>35_&pT6NaxupR_qc`ZE5>lSBC_Fd*#OnyeuZNcOy2 zac-B3kZ5@8px)J=c{=+~<%H~pRyM(>08tdMoa*Q};SRl<4;%oxY;nwNqNg8*-EH4~ zjq(wmCtoihBm@Eq3Fu6Pl|#jlGFOzM{jKbz|KrD}p-zdVT~2Z(FmU4t!%f@^2B`w7 zZG}@F;H$(D0wG{dO0tR3K5&_RfQi_Az#n4%u)?U%6m<<@(9!I*uw^47qv3&1pYroN zAkw5t*8@eWksB`{*A7KM!ePp7w}gPoeL9xw*aBJE+2Dw9hC;h!ArzONY6(Ca8mb`% z64}a~1KVH89#yFA+3)O}10A`jfG5d1IgWvpyC7A?%exZy@n5;T#`WbVa)DK~k<9I3 zhhuPz-lwZvKImfgP~f57fwTK_UCS;zFCQR-K`)t6XB1gBL1VRInEUJMuj+%h_cJst zH)C2IvUHl_&o68bGq3F@5ngwV__II1$lBW8Z$||WNIkPoOXtfzvjUc^s}+fVzrw~% z`)yv$X;v~8aQKgoj@s(ZX_VDxkzuz^U7h7NX13VHbZel*uw#ET9!uLtg`EX5#RaZ55x?Xsfvia+qUa=BMI z7{ShSkJhOw8_K-PD;0B&-9P>`vV(_qR&Mgw&i9^Q$-MHP#P(L7Fgc)={r0e?T!s~; z9GSA=u85U4Kjq+@UP(%NWXJH>7-$3rG9$$J`~w5e>FZa3roFu#juu)VZ}aoB4Ua=g z8sdGnl~LGLxOxuSfW%}E5Q^iT`B(W!RBB!H_4UDNnwy(DZb3(P4<{3qBo5gG?ex0= z0fy(!aemi<_9N~yyn6KttZTCEd!ekU&KG_IYJs?v@C&pAzL1N%{>I$ey7c|~ZEnzk zfW9h3y1|U&iF@ofttNtl8L&;eJHXrz1|i$-o`Y&Qzc;U6e;2Op{{1xLS}sU%kYRj9 zQ?elEIC6xH}PAA^P~qLq91 z==@C}*@v!u+KwW_WWAcN@Nr`!BTo4;4iCK8D))z!zIt`}&1Zt!?8_>IODor2e;gY- zNlP)viZ}x!JEEW<;qq$=1Q80>^xgdpj}Lpx?POSc7EgU7hhqm#^-|eo@83gorNc}Z zhtDHY5KkOEa>Q17BY1ztxg5qk*RBmY=x8lq_#VX1jJQsrb%JlI>!jFt{(X<#G&-BS zwi_jEZ>me+E!|fQ2DlYcyBPLa_fU@z%rt5YjXkg_#$j$>D972W;Jm9Xb zuaTP(Y!{eF>}^L76Kp+nYjAj&0WSvrF34v=M`r?*F8HM|KYvnsx*b^6aP&dh25ukD zA3S7G)uCEeul9G~w zZvk@8sSt>VGC63Hz)#`#QAgUm4|=8Wh|o_`%w`lYyyfj(j&vtF_0dJxNj*W^ZdL5H@_Y`DKfM4md*0@$&{G`bV3uSFp>WmO>~ zBAG>AQPC7R0U`LFj!exzkT~QJTih=n95tZ1*+?n-B#EaICr|`rlzNH zXDAkYNYT+6V8iMtrhz;SbeIS>H_!`Oi4gkgNdDOf#i8XZS88c$g2LwJp~v7a$8)gl zK$ir`V{k3iH#FEoiW&0)0)MESbPfFk^V#hiH|SwfpxuTXiFbh(c$lF4`o5Hq8AbT) zF#PqTi3tO4F)=aaVBdjs4d1dslAt7ujHFq={yDl!s;X3=n1cfmEa?LDw~!yvZ`$Mx z3M0mKqc~eZ+X+_xAFwO9XRH)L2gz`LXeVF?g2#uTBf>h9vp@>9rKJT-NPcAzh|(cX z29Y<&-XU&Tw|aH^5L?vw5)(SS(!+<$SXbVAnK;;nhpkLa2f$6XGf)m;3HBABm9t>0 zaPt9u8&sJJK72Uw2%-dg9_VOk$;Nje@k0yDp0M)7xVR~ZpoZ>qk-)VokbRT6zsAs@ zwFO=e_#-+R8W>u4Z!fXnn>Vh-y&{Ljcn)S$vl1j_3PJ5N=e}i26G3r z3PM9dvaNfMGkM<&4CIu7oR-mr3p@?)@n^^*KBZe;fAg6?@IB0qQW6bA{K%8A)LcI{ zLktn}n^4e0N|S+(mUykyp!@eRDC`;VlJW`&9D`^ZH@9a&+tHfO$Za9~40i#`&N-D& zArUt4?Hfp*xNsAQ_Z~#Tf|c^70t(G>oHj0MDlNVuwNY9L06ykFkQ zDf{hObV~8YRCScx0|nwLQWE$g$Wcv9%;;&TC@GPxS)&7!w6ojG^0v7-J0-=xCm1w0 zsw_0ndPJ^w_eDwymE<9>O(Wnx+aQURy#&C*yzO0Ib7nmhecrt@gg68?1K=fstjEae z4n)x51F**U4+L76Y`iRZxB9_r#V8aooYOim@CgnSSr~#qD6QZgTEM+~vRkx%=`1@s z5Dp1V-|g~`s>Z&3w1hq2+^Z7xd#`fz-j!l@5s`-adK=8H8zmoKBBr%+@F(ttU0c0T z5-7?@Z=!ZOtH9a7!w--{;=fgxDIptNSX(>t*job0V&Gfl46u}8ZSjt{3%7dJsJ@?k2rQqz#2I!UCV?jyTvGQyY^JYI$eKhy4u*mzSo*+W^BJ-jY&a!K%gb}^ zN4RMEk3kWY&{&0N_lm2}EINwu*F!(2koSY~ODXCo;d>!ho%|8v-Z=NFLBdJ!(R|;o zm16AwHZMH+f!rx;%p%eo($*A~wZArR@|0Ka+ur_?jNI$cS+WK#Hy*)6-_PJBGX)By zpGSi91SdN*nn2mU;_7!0Tj3tsN#xRbd3o?xkXUJgIpS!*p~@?;MN~8si-uE3FWn*% z6v9vvE-ULttWU@_4-EX6JZ zFfloUOsk?oNg|BUg~2sQEkg+y4J_Z&%gf-|3CuCpHVa!^wvi1)hwy~_{QQD~wI4s) zp^**76|~Rg7wDxy0ScyjUtiyKj3O`WM|wDqJ#hqkt(AyQh8xC=1F+`REnnYb`uY!X zT45k`J7#&Lkkzos2FL{qKZ}VehCL#K1#{h_p5jlRpv&|wKOcIRi*uU^ppgFt*XuPy{0YF@Q4SWC;!py=Y)yjlab>${z^vAzBj?YXa{sBzZw)r40`WGDAq(aU_d^ z-w!~9QrIty4?M@8o*T#(G;>0=Ji`N+Nks1zk@JsS3(@Qs4Bc?B`+*r0&~#yWx%#nV ztJbU;85#mFQ;>)?m-Su)tTomHX9$R+z`rISxGoPCUtgb!ta016GvJ%Tp>*d%IQSPw zK&`B-CPN0_0d2BjJf)~8-EQc~Q!$t)lxJlW6rQvoYw!8`6_k4S0|S*$4?zzSjCDvn zAp{L#x*BOm2zCQOQ`Q64L`+Sk5iAgKbvkHUeORPM`3Vr(~hJy*`^-Ql196+gPYO>K8qb9P5xA>|BPzMfHA_L$sO&qDu$Wp*n^IRl6gB3weIzU!-1Q{o&?@_lAj*=%%eN>fo$3@O#+`kw~&)tJ&Vgh zo(lmlyBkQ=_}qMOuUt$*B2>gU3}kPx@06q@2oUZo5@SkCN>a*NS7G!5@V4fO6H4s- zCVMg?BHSn~ASntc3qcb)m}Pd`Am4jyI|wZN?S|D%V!}Ij-r3|&DBBCZ;zZI1C;A+k zqDak9xWVn{xT~Wg4x=Ux4x%j3YdQ?M<-ELbT$+Z!HRWA^JploGj4B=?y?`f*MxY7( z?V24Y$k#V>?%GwAmv<6JJHQ@XS&uZnmX#)26vx`r@N8TYOsYIPgAklrAfAECMn;I( z0%2Sd9sr~mF%+c*Zb-)%64cu`M~nrc$;XgFf)5w~vZ~a#rgY!JhVX4B@SLE!gMcFV zNyTZ-R&)!pO>Bm*m!y zkxGV}A;o*-VL#ZKcX{cur_)cDA$Djp6Lv!kA{J9`U2#H;Nzq=9*)1X}>XXQ=q8d!RRvqS9&zzN8nJ9D{}Jj!J};`^(rbY)p>Tx z9k?WNCh!#>ETHV}^OtDAsemZlZPPXH+qZQyFY(xYSDpZ~0Lhhyu(|B)kPLcyP<-N; z!EskxQxh!i06y?*g*^!<=(tg8X=!mikFaP&fB~0}bNy{w>uoz*+i%U8_eA2r-G84# zi<|{#T$@F@Hm?vapW{t2GQ?f$XTE@E02#t3L*Ju*SrHNK2yigfh_PMry_Wlsn-6~a z-stYgM8S0O5lGU8K3StGhz+D|Uh4?lJv%4u> zAHgJIOE-S_aOCZp_T_!A+rA&PmL%)0d|W`p0ioiXk)EJZ!N@4?JZ1acv}ng?)J2F? z2#V$rPo8+F?|M*LS6_dYpZ-fM<;cpLT>czq_wR?+I3x?Dq^=sBA-4jtgg01Z~=mL9AcWo8v^t9u$3Cr0pPY}jskdzbW)^)G5D(&AqSYU+@Q~e7jbH?b#Fje*cKK@1)>I^PdKIBC}C4>ZluP;Y`nZ>EiKL0GsuGGMHaEoIaA1B7J_u7r{P06(CRNs(I|(314XtkQ5o&0XKqR zbXIK`vv%#U5GfHd0ND9pzX%TisXlm=0a6q-QN;WtB{^Vh+=ER=P9SZ$)1#YH zGE8%RqEK)akPK3ULx&Ev8 zp6!wSIKs52^7Qb}NMLW)tTi}widFMYa`F(~JCLYB-iO+`k$n+-{=ozMEN&$oPzw$1 zaRwBIlMBd3`TO_7qoYqq+_Ahwy5Q!6Y#dq%l#;7guLc;8S~M!pP~=c76}LvW2QuIs zRbZ7rs+g({oGxmjJ7dVIOoQo~d35DWe>h-%aELb|lseeujO}6FbhIo~ytl7UoKWvg z3V=>?D9&LJsKbqAW@gH6k>c3py$4woB2Q~GGo1I6K}sBcc7C*wAU`C|e-=`YHRRqX z2qO=AYVOqCFXL2FTI!V{CP@a35fr^Fu-&b#n~^g5bE`*O)kiJYysZt&#&Em}&HKb* z)s-~##u=Ek;U7O9iP#mi82T!s$^qL%%%!Kf=ko27Vkan4H42@6HYb|bUWg|Nzf!Zc z%Gl1eyBtwn}B8&e@; z`2rlgQEVn#i0D z%*@Oj9mTAZ3lWXMcn*7Q5(JYoP*WtF2xg`MGpvu?=QLqeASgGTffNrRZ#0F1euiQL8&XZfLPK@6wS$#75S(lmY!0M6WnjR@v=TMP_c#&|nZw5bo|6sOfvlr3 z-SRAyvq6I7;4q2SmFS(4J#`syjv^sB1|_syuaQC@P*SSE21d+-<>PP`V(I{BjY0P4 zTZK_#+u1>ldtlWrfjbEvF7^AX95_a@UIR){Qtn1fbB{-B)I-$IYU4r0b8$$(%gQdE zKfhsLL@;tTxOtRT<+ck53aTk8Dt2nPy?-okV)6~iK9ph(MpXlOZ)j}97aK4|=F8vb zzmlQYj{^!o9$+7RetH10HED_O`(?Ft1jbSoCnVd^~nw2*SGl>))Z#8qcYyH3-P(dTfn#QW?=unQIB_SaJKQNnN<^Hou z%E}s$3y0|gf=1f1jzPG!v$L+&eIgf#3C^U~&CO0opc8*>>$dSh#T*+Ux^a!(20^!_ zGizpctHdWJ7T6AE8Xy^}8nXr=``38!W4?Ig`kpU2X~a>jZkWl{g|rjSixA;Zr8F@% z&TyW#zquPBU`>*S`cWB~AQ&e6U(Wa!Pu?Ae*eh#mBl}6*#!3)_P?F{{)?D>Ls9-C` zg`NJti*r+x5Rm;X?hA+#u2_OEC-n>X;pXkrw!UR4+Lf!W?EUN|9awIGWN+K zHtuD<%Y84e98Ci$ty9N8W;vyfmRnl#*M9A(qWb-q>}q`b_3%w24P_@n_^4MLHhlSF z=p)6g1fKh8(?!8&1$heYWq7WL$#&T6OyxXQUt+iK=SXkEbLF}9$ok}WVrHF7y5r}B zRoVm0_9fN*w9m>;2s(8^z~=3{h&9K8ofQh>FPaP`RZrGbcV-G`>@Dwc*mTZu zOnHmAwX|em_UoWNb8pL%o)lH;eH%^o=9k7je00v@Ys#nGLH=sx?QW`jUU;|Q;^C#UMD=t{Re=}it(P>rc!$&vcUyYxVQxwwD z;fjr_Egc$bF5ZzKSb1DmyfWN7y>4H;P!l_`>Vu(F6y%D`?c{D3#PY7*&CsE<(2jc% zo!6S=7Q8v#U&~N5s`K9Q63b&h8B3P7j~{a0?JT)Ydb|I7{+&#=+R~aI z$AnfD?0vwXDO_8M{$MLlMb~*&Ae;rPjYjels_hlJWu0_y+iKPcP;}wl~F=Mw#s~lb|*%XYqqJ;$NEiiX~5I#)ND-ilXoKN;s!BP&XWyH_6v))hUFMmdB1 zO*5TkagS!8RRiol#g~DcMUiWhfQRwxMDg(TmOn{GawT;=L>@?!MW4SYRCj8Fq{*6^ zbt}@SBzJilr-ry}lQDc0FPEJpa9sVgz&f3xa+j7uPSZlp^GS8;krK}ZRp}yAYmPbn zm?|ItAgAtbAD(AOCE1WhQtLAQYW18`9oV>R4V&@q`bxQKQ=1<3!~OkX1H})kZAn;I z&eR95vwABjDNnOJXa5nhlU3&)x$lk^(^xGc?-;Fg#HmEdw@?33;MBw515qQpRiaqc zCG^s4RlQj***|JupZYcME64dHjnuuhs(aO_H`cUOZPK*;QgqOATa~bytc&axy<$y@v3|C!O;{Wn~+HLHJcX=wCB8`9Q#v>4LX2h9k!_H>-F9jP2DSyz%B zygHh#vssmb`@z$c`v?Ow)civActSV(Dh!$L-0LWq8!=&ezMSX0Px@$~mb6sgh>%lF z*W)XlPVZA>&Id`Kym0AUHiyRXqal5I>xyli=6Y0GPAeD&wFP^*94(GFF*FZ&4tFYefBDqHDz>4Tf{k{Cr5TgMP%5G3?cDLyVk(GCIMHX3GVF;UPn4{@&ulHqo82+@)lerJzXj?e=e7>8?U z^m3mD6~_&Gwb1@Qja_*#+iMin&a@O=Xf2ht=t5CyDT+qglrB^VFSejXEFrP9_NA$( zv`vfFRu8{~qKHUr(~wF#p4wxHNYxrkm7>&AyLgE=@4bKDzu(NA@66npd(N4;cMe^Z zN)$xA3f0OO>KvNG+B4IwBfR$_)VvlcoUApp)q-42c-F9-iDW0&}0AmNo(sl#TL z!JNk~A&qit6~@(06#)pbMU_yp=Q_E9)Gcxi+X zksFvqF^=Fh$iCk*6r3iN5hs-ZA>e|tY)nNA%6qmq#__z*hW#W_(=-c3xPzsyW6|Is zCYZKi0P?Arm+B!WFpDtWGx@E8wms75br`Q3W+YfYP+i+Qnl?L`7YWjuy5zH-`7AL# zg2Yz}{b5SJjS6BuxqDMSxTl+=E!tiHDwA`@fZo-{R5xXv_HQ8%ZCi+n+Q5xt8A1D$ z7;kbd6t;Aal^@Q_bwA*xb0D6%>%}O*2C?yrCNt&d)Z=fTZvYNyQN9bN8V=Or&Nx=MEC# zdN0%&`qVH>msxkuoLPUf;1|sR0a5;H^?y1*FRS=&Zx&N7ja;CD7Ek+2 z1-60?DDvmr_hL0Z_r6W{Pr^m%P^Vp7+vm{@U~!I7o~NMsBPUrZOQ&nI#rN%XZ5(-c##nTyK77zJ$a*vM&g+0F|$G%h~x zag$8*mZ2SHe;FTWX~o{W*U$D#f$e%|h#r~*G`^pP9~-fyT&=UloWdiK~zjvhy5Aw$IK($+teE2?8W$+>!o}WwZKL}Z-rhz-uXIgi#&Sm_E3cu7-AHL zJT|XAb*arS`ApN}BWhrT9SCU2?G;tc6j3~L;^0$zl&@F*s|<%5A}lyUvj;WF(>3PD z>F^!acsoDQ`Mx&Bdjf!|eM{B1#`T^;prm>eLiN!6w~u!8sB8vMPLC~a9D@%PQwN(% z$wmIoIxNr-B_Tlvl?Etn>CL8vY6pTM;b>s>br+dKLf={st|i=(YTcu;(L!ReZt*@N^Z1P zVGY2opXwZ)`~o3p$ht;;t&F+yA)s&62ztD7*Mp8g5|yGRG|N`I^DgKDF!gq1fLrlq zOIBPV8_)eMelVW2Y2;a3JSf0FqF%)J;-7X(c3texj(hr+3sBrPg_%&G_kRBi^Uf|U diff --git a/hadoop-hdds/docs/content/recipe/prometheus.png b/hadoop-hdds/docs/content/recipe/prometheus.png deleted file mode 100644 index 12bbe55f5899768b33e83af89d9b481f7d1516ec..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38962 zcmce;WmFtnv@RMafk1*q@Ze5xha_}x3Bi2_cXy3KLjnYMNN{)81PLy|o!~B^alNzm zx#zz7#yDfV`_35e)sNkwcXidOwWfS?uJ0@VSMn0rm_(RYu3W*El0>||a^)K8%9X2c zZeN3M?4J~?z{hom7gEZ%Z{MDnRrmvcK6ZSm?xGz=dbffqg(UZ`=oJSOtn3RmS z`w?GH8Qtq#Z$5t>K>5w~>gs)W?1U>%K5@Mec(*sbUVa$-&Q1;k@7g-*{OIS{SP7>I zZCC=`a(v9}=;-Ku7|vVmS1YUN@n3O#Oic_QU&$qT;DbW+t0jCq^QIVskGE&fim$@w z@jJuo@UeTn{T6&IV1#1BN87#s*FW^r_oERHCfl5y{aGI_Ni8QQ_wwaSMMcGXWI`)F z)lQMVzT#F^c?ilU$r>fb+&ny6({(i&OgG_X!rw})c>ezV`}^OcsHi9=CZ^7PKbFrf zxu(XWLN%XrV`D?);>dtCYGQy8#{4#M#4Zkdes{66tgLK9hJ_{Xp6HqF&Yz!lc6L)r zMLitc+$yT7TVn!_ybm8n-NC2TuC#r^!4aCk@4jb>sWtr3S5a1ez@}Sk)EU{|-_M$5Vv?DaRrmK7 zv9q&tM@Prv;^N`flz|Ux!dQDyP!K;qe{q9c!DkV#le^?1p53wB#hfO+i3wl7-X|lQ z9?n?mioqu%BO@oDIVZevg~ENyjwd1_A}cG4h=@o)KtM<9iD~=TSXF6hsX~$nTk%(a ze7d!@HD{HGMF0Bwi>??>Ia%3!?W$s%nR;Sg>qo@IaN0R{V5(BOEISWRz31_62$e)= zXy~G8B92F2<6kn!^49Y5*mt!>vc5iH#Rng=bs7} zgvk_i-pI_$D=RE~#AW)`-``)EvAesw&TjtJX~jmaM#(M;^*JEm#BbrwV~d?p zSXfwoIHcwl7EA<~VWFX{diBf?KF93R($apwxW~iIy=co%U9Bvt5^~P{! z)Yf`lUYsLRv$F*(N1wuN2?--$`cDtH{rvnmKX_CVCiPYn74bW*4Su|XfA{X)fX|= z0!T?o>Ez?pb#=W_OLDAH>$9_edjz*h1RUu(Im1|NjE$WaI|%&|9F8k}u&Bd>gQ;n0 zq#~Z9)6?ER-d=@A#l^)%7@C0E9q2V_F3!8FnAfvZtyS@h2rZt!xp7S$;5)7J3 zA}Bm6ic^M%i;INUS{shnuS0Wk)^}!`*4Nfx@yGR){U!vBItais+S=M;V~LoUn0$PE z$bwytcNdqZ-z6--*dKf*7jk`S+D8GiP*_+PX*4-C<>loCPlJht)zsJ+oy+5KumKb7 z;p)o8!!tZGvc9?+NGdoyIeE4*R?_5)?O<;|_tzEdXSp}Y#6`!YZ)Aj*lXIfT03%Mx zAH2j2JVVwdyriV$8AWDpZfQwLjpOQTPtV$hhK8J+oa$l3FXHZHfASBeOvVSQi zW-yqkxU{sSU&Kd4v)K3vt4mtXE04@Ccecq7b?hmUR8UYbGdM`_;K2b*Z(=s%m9`?2 zt-#l>UiJO{{g_?xMd<@v*VABw^RTQA1*6vK3(l3BPGQkDk{p%%*@2JKG*U&EiDavRJI+Q zSye^l{Ni*QtOE}4PkJ^s2rPWKi_6QLCX3)Pjg2py+rbH|ZD$+vv??ynPzoWEgJ9P< z4~Im+D)I5dAXI$x^J{HsArxj9iGLmjG^ zBJN~y4uO=In0PFV#II@AzDk=_&G~B}tf?fmfc-*SlixiI9Fmo_HFsD|J)d)KMn=Y` zPhZK%d}dghowbAe$;I)&gZX{@SXk3IFn~HcM!`LY`uj0&-(DH+W?*A$@VUHLfC-k1 zU07Ys%E(aD(@RQ7kVC*|3|u&ZTe0rkVW6iUEw`FT6!sV#9E>K*2_)vl`wgBQ8y!75 z%;<|vJUlvjbh_O@&S(2$e0;pKGe@gpMZ@kgC}F0@uFc-b31aLdtQUkgI)%h9^_Evw za1yLa-dqfat7vO`ZcXwZ7`eIepQ`0TgsiSU`GoZ_Ha0eze}8`;j1Qt22N#zirKE<& z;|b+g8X8iFVxJ!>YHDEd_#yT6^@)kPuCA5I$&`WuPlSc1CML9$luE!-jZ~Nz8H2ySux~x&%4-`SR3w``a@O)ztzJM)L{YUTQ=%{dZ9>Q|h{xK#dW)!QAegN7T1)KufFahe9JGRpsU7jf`M=kH3rgeSGnz zlLL(Hv7obMNem?S1aid-&+zc@`lGqesx#)xe1`0#A&BshkdTCgsSJfAi;;XMi%(bo zkY|>((~5{B9{WZ}r(t7bA0Hn>L@!Q}HN3nrGCWM1Y#f#IbeS4#dC5{)Pp`MWg;~Nm zNSV=yHo9TW6M~?ZSDcEJj7-iD-)ki$s?*o>1ROq-qOHm5Fl`T6wIA_Vj;2b5!T3~F zRqgHV_4V~#f=le2og*cwWy9OSqVn^dU0iG;Eu*&*-(APxU;|I6p&Pp((f|vAN5p!_ z+SSt|FD13Q^;wmOm>43>DF+N#>Iuwfm)#2l*plUml#~=ZFE2#tLNJ@}-=74x8nmVt zcH8S0RTdY&2E&hOA~SO}6(MM)xrm30P>iW7MxG%)LlFcXtIQTb;sG%AE(E~X^D%A!*jsV$`QC$r2;=65cqHwXp zv?of=I5}bdD@_f)w6yfz{(enK@s*u>`|)aC70t%D<_6K!_-vyMs)8|hq7|m9opOw# zJhcqWCNy^)n5E0tm#6DIAO_&z-~eWdjEp=uH~vEju%_()F+Y5{+L| zQAvrCqGE`!2|Yc%c9lI13rk=^6Hi1DelaBba?3G&6ZFCBy7LcYEifQwJp?2HKzk4_ zdwP56lGoPEX>SM$2o$%Amuock_V#vmGCg?$j&{B_l(XG%DRLUQVb`)~KPKSfy5aPM zmzNikWE`&z9ymuwTbnBcCWyaa8A?h@WBL%&IZb+Ub8}@C6cl7+IC*%=D=KWtGmRy1 zv``RTAhZa83yzJ+ML(T|r-Vdkqk%tWi?Y|p#+6FzH%>7bomp`X5{ukWkWf_oOLO-J z##^6n)>S9rW{iCU0|u>FoNTMxa)JqZ1h_?{vIvOD0C7%on-~SP!@*-od4J5>Dyyi( z99)5vpax%lvbD8kY-}vsAm!3GGz74^-t{jfaSPzbblI4>jSUh~ z((VULEG$ECFE$Qv>WH*aNRZOi1$F_0P|@27}|ZdI^DES63G@ zT)o%nD0$5L_b;@33J$ktE;}Nax^6+F>d<{`ZD`o+=#0+sW@b+HgsE76U4_`#3Lsb) zJx^+#hY)V=0w2&bV2>Xk&PFW#y^dE1TNC;(%Uc|2<~v zy)f>PO_~>f&zSwy*f_V?9DCrj#K*#iga;;bF`h*UBy4 z_AVbn_DN2j)94k4dIM(1>-0cHNy%V|o{>>L`stg=O1mzp_s1m#oQD)!wu;=`+_!Gs zvP^&@#$w#R4KhL9;k1L3laX#fi~bVzU>-J4PfriT3@Aog6w)#>mY0@zYKr6#2t*Rh z*uiE_Ru;eQpJ!kX&OgF()6;1Ii-KEWV+Z0=iATr9S(uwwS5-mmD;qZ(`XnFE_dO>^ z&|;XGjg3u0q7{NcPb_z5Pfz|3zLuES2Pky{0s;~frvbDgk)#BR5T{mFGBYxGjk}`f z=H|$%*8h3&;sxL&NLZq9UknVti}O>3I35;J(WE<-8}insrmrBt^71aDj=>n<=z7vo zD$mcyXKHG?ungaN+SqJ^lLk$Hm-QK|gX4A>h5#SyC_r|s7-rn97iP}2a+IaZ*ae+L60Rcp!-nH-E zT@|DQ$Og_jJv|Lz6dq~f*!H!Uz{Do^4+;}2kni`U$~_tC3hh64G;Hp zbO^u_R18EWh{Y&h{)X9TEwiM8pkgzDaq&W z+)HC`h$oho8(*J@rM@;(Rvv_@^z!tCT!W8?H+of}i!55iOTe$m?P%xw2H&}whDlHp zWNrA7sAvP&L7m4T4>2Cbl|CB2jg}#+H;Kq9@ADJqUt^w7!9fb>_I)r8wtRg2n21Q4 zIy62$9%33t#rd}fy?Ld@#rj*$O+m`h0#4bnvEzV^qN5;f!^A>0gHQ%cvp&pHE~xkN zmwG6&vfmInF(nj7J zZw*n^V#QbAMx|)|LN_F??A2lCDfV{lAS`FX-&=W8%c^3&t_Wg$sSeTgZ)s*mUpEgGOul{}xA)(r#ii_>b zi)~1wh=tkNBDjNU=ZF@;Yj57jU6qepw26E5SWX65DOma}JPXJBJ9RSwI5UI89K!vRYCBrdbi4mYyz-0Bh)q7xp+fJD9NPDg{#oxKmbI zik`#%eyEI)E06-oea`+u?tmzt_(lyHThHhm4T_=tLx69=N77@G|-IC_Mj#W`Fz{ufqgrwzapTTa=QL zZJe75uu?0pc!MaW8+=4L*^N5Fp@%pIs2tiH7Q?x3$mQA?BO?Pa6*@9yn&M(&DS%VE9HE@cF@hz5T3!nvdT(%Gpiz1hn_hnA zXzgGB)_Jev#i%^ilJxW?sO!7-mXmy@*Y1N&fXqoYsjO zHLPXRoh0frl&zBFb)aGL;5Lk*H?_t+<;e4*SM+l74LiU$;}`S<<(fa>C*|zEE#+>N zRaHd>+Ob82g*$tDUxI?Hc|>Vx(~F8ikl;ix@`-FMm>FedWw7V}=%SrKOX=sY`(vY{ zPx0@Oi=08|fO^RU3T&+(V1@u2*KPUX);pfM2M#Ha)up7`AqXkGeywj{Kt)LzeGHAl zq22Bie|6{+bbZXpu@KBzAIMPXG;H#YMAie0au(@1_yz{$L_4Ogsp;olsWcZpZB~LqrMX6(3lYAh}7|kj8Mo&jl(;LFAa4n>)fS2FlIE#CnrnGP2;2 z0zFY5pG&B_Ip#y4%l@s?$)>oN)kb5W^@V128~im^f8>L^$-t_!(~gm%2S&j zyta4km!ZSa&a6s~Y7ra!niTxx-(zEQ6Pj1Yo)_(m)$#?g_BxCKG^8#+J$Cr5HM1{QN*bK}TwJ>M`a zF0RITQ`307rNwBh*eGurrxIE?$C16$FVmq5nxNm((!yd=EUXQ|u(#qs9?%drpky4J zNa-{N1_nqhU|dEgjnIV0BP67`f4jO2y)ksjHRUvPn5C2v&6LK?#l>SaJ~*s|1NBy- z&GAO}Cuoh2GtdgEN4Eq?Lrs*Qoo&865AEXH_wMeW8hT%+`W5&Y;uIL8f$kCszSlctoBU^ zA3l^27cY-zyMH^{V?~NO)LH!g4B*EJn+Bc2e||#W!VslY(rVZd3n3P8vPpL=AraBI z`BlKM_wL;@u7-@7w+Q$9fm+QsY{Ch)VseK5FFbIl*Vyp6<@nsnN}H}PB+#|Uhk-I_ z{eXHah@zq8e(n18m{lmpGBPqMm@5X-^A5gi4smT)im_vn7x ztz*bTvab3U34C^x_^Ys+0XejbN1GiYziO^}NL-u@x{E93hvPLY2L|)@z-i?69J|4< zjmfI8$KoXr+2Ly7jtXjO6}h?03=IC8lZt9;Ciy>EnV39=RltN7y@)lqc=)6q>mWSHB3%`w+ zh$xzdS*JSOtCS9A*D=D{v$_%5o)4JSe@u4Ka-t8E4U?*JJikMFN(vR~M%u@ZAD7iT zzdYuIzBsSVG&2F_rx&x(XBew+6`F=z8W|q$IM;k9bd}WL`1I6fs(K$9M{av7xVVG_ z1YE>Dxyi}9s~Jh?kqqs#YL$P9F}@{Tf_8Pv2x(sH$Ii8g0}?R_36u6v8ih_2Q5v_eD$LD48v>m#=N{0&~Mj; z4Kheu;3u#s&CW5nK>X4vHoSA%4`FU}R9#Lk5A68KlXqU% z(w60FbeB)C#PZ#}r@#@$&-~ee%2L;q7kj%kv713=B zFHsBik^H}TSC`&gJ{+}Nntk~}QnwDaw8R_-=}&_g%QYbf0FWRo@dcNgXsb7i4SF@dAD!7EJC^T2v-^6&MTJqO*oOZSJYXU8|HAJwnh z;74c(JpYF^M3Rk;FrPn$I#SYKGN$$Xgr$N)EoEH%_d(~X-?lM+2xSzfP4s-UcRD6X zKo_%?14mYBbqF$Eu52mT){%W+&X&(ExZ@H(_SLK3Kk^qVG_L3F4b{7Kb~n9Vb~b0n zl|z+yyL{zNY88GMb=cHS?S;Lll!VbWYJci2PlKDq;J|jlWV>YJ5{k-pH*SEZZp+HI zEO|V?=qfBC!yKYp9mPL95t6ebU20GnZElvm^Xtsk>G#%3$QG>)A2A0C<8VtwcVh3t z#wFRYSg>oC$`>8&3n7$(19_9 zJhD30f)h1>navw7BgK|EjTbpgPcT7jwo@5@pS+V?p0-7V(MMhTv^&JrL6}>?LWhuM zE0OpfZ-PQMOMyaHmJo52i9sf74Q7ZsBMqKZs;tFVgtQ*%7nzQ{yE4p`af)-M$x?`A zebhCbBd}Fq=mbE5*~9zps-vm5so22_h9x+(Fm_v-{5W1;Es$%r=Q&NV9beh5v z%1s+fxGb})Sj>e?;ePZOv4!U0YZvx`4(4353~819#t!${uvLNY9KkY(H~u%l+G}FX zgc-B;iwE?4Xpe95qT{9JTBG57+dSjs7+0KI){oiRS?j#KUj>;~rk5YsGOM3wCO0oK z+D0GX`9?ApRO>d&er|p}w!1Me;i8;5}CdBsiHsNx=tX+XY+onoDxHOLKuK!M7|VHr0;nCu&LH9v55u4G9J>!#u-7)#i@tBqD zWG3JOeT#FHD}F58SCpM^cNuX+I~(SmR65yT+4UYGM~&c^Vbt)HEW-tBv%%rM-&k6i zv3vIRk*0(wvqAm~cUtTh?IFlkEO7yqVQ!i;AE48r5 zx9IToSO(gH@n87DHE#eySuz2(&-RfUj@kPBrFlD&XGci&d zs&!4^j!_WkA$t!C2ai{$yfI07K1_CyD~PHKRaa!=sU3Wf*b>nds^-=6_bA#th`#lG z;p77)m6i8%qr-zfh^1!+cB%abCyKePUiqnJKNSDiLt1EBy&&$Fl46XIrhe^TnU$^H zn|Ex0uj`^E^5}l#*WO9NjIfVGX?DcZtM!ac2%{U z+VqD<hrom!~5;JEBbWIVFb+{NP;W8{<+TZ20J2Dy^+Z35d`!6ij#XSXx1BL7p_<{$)lptiB}I~wrNF8>`qj=BTLq*JLs7yw z6?ueLo}>SY{)?Y}6O`8J1ZqQAFY(ND-Oi_Xa$~ftN4SvHWlL0(fkP35_oltWRbNfz zF~QP&y|!`c}4x7Kx*Xnx|Jbmbu|fBGL|w#Tkf6ueZF**18WwjBQF$}bb1 z=A=gUD%o=UCJ0E&QJ9R9_Rskdvu3g1!nZt~9*fv3tUT};;jAg!^(1pq*2DJ?=Sfi5 z{EsKXI9A}1pd^E=a6;7K#o~m_RHUwo(3C|C-!ivIKk%}6dz^j$o*)%AWdO|4z)>nT zEl}|N&jXdQj@Ubnp3d$F@bNVfX3YPs zqDD3_viE`4GJ+uKqx?M?QtWztrw=PDQya%rBok+Mz{H->g<*3>Mta34s)s2?KZyL= zU^M*9N=4Ts(w2kvu%-_Yg6ODa&a~tsit%E9;?s2ZU0hYUWqXO8+vA7km6#;{%HtH_ z@t-?;S@TY3NNkp-Y96OyxxD1{r?!;iNmQtcQ!IW{{Whmhy0V?A08XN!eAgW&Sf7a^ z6eZE8(-NnTq+L?R3yH)GevA|Y?}#=`5kolpt~oc67>b4}syBTpELuyOj_J0U`6(C- z*_dPCQv`N{Ubkd>f#zX89eyng;vQ21aeE7-r=~8TcUq=&HbdyIhFmwK@E{< zqYmFVwtDF9yxr=vvl%SPB%vKP&DvSQaX8Yi{alju05|R7@Y$)P5H2ig&h)`QmvIzN z4Mf_O?feeepCorB$6k`j5J;tBl3@NV(q{i1_;~#F6vfF|uZNS5%|RP|ZVdbA(ytM* zURI9}X1}YFlw!iQq!4nH?EzwcP896`SPPB->`<2;9{)i7n#_&12*qUEND( zUn}vic~Sk+*~I>)seX6Df`~rOEz~mRU(3)~(?>SHc1HB9)YFn~{lEeAK)P+_?0x)| z>hrCtSTo$;-Qjbv5SCZ9#IPx)y9@GjAJ!kOp<|VLs})a8z6oB)H9zk-?zfMFIi?3T zHnDk2q|I%pADR#hj}m2LR1vX%;>5J57Y_X)>vJ9%_o3((?q1$Tlk@r4ZxUwfCkAjf`o9 z^6$%}vKnj3y2LRTAx;*5R<8INAr{Q?i2EiAn@C5WP_bO>{$&l&4oa-<=l$vReq@Fs zne)(I!cE9u?tuc+Wo2slP$3OdTldQO&vHC}W$KVz;r%~ocbLc$25Q$(}KM&Q)3HlU;1agzBG7uZU$a{X<-j58;~DxnyWMgY*d zbKX5hqd;WpMkx@5)GzKZ3=0HQ<9zh_V4(g<`swO(c^VW47sqM;`3vLf+H!y7YpiVQ z*XCp7Dyjp`xv{+#X-?)dc@m#L+C7I)$SlB%1s&{+@T_82>fmU2J~!YYjOi_cz#D&o)w$6kD_WzA%3dU^Kz8u8TWScJv&YK7%?-KfN(>WTS;=z+UW=Vs?2mM7^R6{mC^ z>0Zv-C&)@$sZl5VL}%_r5uL>s+j1j2;d6_p4g`dJIT;6}%VtCo8azD%mV;|d`u=+# zu4vII<+ak3buUe>UR)i4dhXuH^Wv;r4|_)LKIO;+n{UU43pPUo*Yy1zkLyP#ciky> z?fNbe#{WngC45CX4n`0dC1ce5$Zj|+%%r~@l5NCE2)U>;l=eo&)gYVut7${3?NQxX z*3h|WcsYQDyV-EAmV5lheCDFuN7&uJDlE`1BHz-@KfGS(hq6uZdBGmf!NGhf*6aeO+@!mG#Fqp>N6v8B7y68Ptk?t?$npplpG^U=hgv!mMH)H*w-p`g|+EY)FH*Q z>6zKbc#k%@-e-zn1108m?Z*B@0l!#6fh3|agDObl;T&ot(ft`hh}EQ9@cfJfd$gh% z&+LWsO3AtqyVI;Ikj_aS+nv$f=IMk~dg_NJ_ zoN`&m#f9>ExM6v0Bj&pGgK-3PsAO=b_CHR0t9{tUcM@hz&r$_9Mr~(&cy~cO_XM`5=xG|V>!mEMzR`M&jrgxjHsJ>QOUitz zFdBY)V1V6TidW=cs7URmt~_8qyV6zsK7ekP_DRiqDIT0(v6fd4NK5e;7;WXf z$bLK#a9sWyoI$?FGh|*idLA!LWhH_?#k{9{Iij0|z*#Q0vMXJRLverwx;fMLT91XlU=5%_dSOP{mQ&2v?`hdw zJp){>-&$5Zs_+=jZ8;cnDt}E&a#GeUczTN9J~sa=;TG*&oPSY=9V1w?(U4%2*>YGq zEKHW*z{;A0Oh4ARxwkd6OLZWwu>0IvLs*b(C1c6+%AEu#6xrQF`C)FC~`^II5K z%U={NjAUSuPaPIag(_)EKvcwW2vHV9;H{pNv_tTsBalgbeX?ZhnbTrUgh;w{vze8o ztH)DgjK~*?Y-=87I>eeGHtfF9irjlGNEK+jDV2LkBBw1deEjE{=*^owWvR(2A!7(f z&a{<|Ai+B?Xwxbpmg6wT-@5Z) z3g17i>;7ZkgzqO^S0+?N8+mTiEUI#}FDXi~Oe`p=uo=F`T@U+?qd@*}UrlXrtWwAs zsi11d61Mh8eEdu`EU+%b_jAnfd3;Cgt>4L0zY)#z_MRkrHfiM?ABSHka97k`#AS3# zA_)FiwgolNChs+xQ`&P4TQ2*4%q?twZ>%E;K(Ay#=Aij0K9n^kmQ_<5C3nUM-(L#0 z$`;(UqCqx`Fq&8!E4`koblk2p-oCJB-;DZfw%H42wxUX!fr;Z+A2(Oe*lh4NFEJfc zK}zxG@T>E%?*|j2yZy?mgH|U<)8eR>ps{JFSg^Sw2&+h_SiEm1eyyd=bfH=K_0==P zg?ycWLkMi<{t%5nqa&6hoOblFpu`L`KfiOgfFlIq&Os~jP7LKcuMrJHA`QDV6hmy| z6mO+vc{0*Q)p?#c*|Cc>RA&F}zE~Q*l_yGyF24=iFZqb8{Or*E?_4M>(WUsAn4_mC zvFrYw92t*lbeUUl<3D=0_A40&}Iy^A2TP73Z znGBE&`OXUQzhTVo7_^V&e=X@YyH)q^p1^-o5w?Tg>brUEIqZ02u}UyM*ap4>^8**D zJa+`_*uV}8?AmW`ZVCf|1vUrOR8Rc=6AhRtV@(+y64ME}74NyhrozcMzREhit=H%?CbWfmjAJqC&j@J=;=F$A0l zki0D{7@C+YC+1Iy5{*L#<{K6gk<4u|4DzeV&Q1&1D+AHQKTCA9v}oex%#0`n#Xmrh zko^|JZXgaEeOw^ZeERfh->@zz3M5fcoq52}2n>vnqy{brh`{tJtaS=>>yVO^A(pT= z1ac7(5fRu5s&hlp;(|8p_Ko*IJ7U!+Zl0OBgl!$GP1uu4$A0wm5AfrGa&)-AuT$^o zCMSoD0&;s#Mw0j1{5(DZK`Crh!7g6n`1Jy2wAgOt8W390{7^?HCy6hQ6m@jq4oe?D z-UyK_1!V^aJ^uXp1M&+1uRLj;o=BV&EONitjg z(F;92QD9&Kix)Hka0c$}8^k=8uU@|%uX4zMZ(&c>V}H#F2pO>3)X^bLyLbOL(0|?- z8;b)+y>L<9!NI{GfCY9u? zo!bK$_i!12tIE$mRqrK8N9VWNDG1CR&>M~EgH#Tf25dGZdT!`#vc8+c`8qW|m)=ZF zIY2Rk|Du+Ands^7{|1_iCu}Rij$dnQtKX+jI>2dwOTd<*u+GdwLNc;?*wF-%6xyV& zy?aJ+-_*jwX1tVvhQ_Gv>k|Yuo_}=ci|=2XW((z%iZ_|8Ho0yI`ub`R0I8D}Bv)MK zgY@Om;F5>dYwNe>|Me=Ma(ro>2WFECh^T-vj%c2>1!hpRcMOYGIZ*XmdV3XMBa=!# zTIp!rBd`=U1A*@fY~O+Yeq<)lYy|{jk+4+=JOQAcqfjUyO(9o+x&$;-;JX-#v;isq zQ)%_Us0*83y(|KRCqPFB1_jXDf$og}(jQQDX{K9*vd&xw%=P(qy$Szx`21uH3hCUJ39;P0ZUU;BruqPHN7tQDl6_= zH$=rm4S2@byFi26AJLl_8v`003m2Co*gFD9U*A$vfO+uX-80**$tu|X_V73b{v5}3 zGw>7;Kt;vHf%ekF-6C-SNCiOVFnaSwQC0QV{9m9K!!R4ae0RG%KSZCLno98SA;>2x zoHxf+{TlzB>&yKDA0SgeAVcd_VQU_dnwq)?bUar#H+XE|;{}n6dIK4b&B8&CIR0S! zGCC&4%)kKEZ+@Ku%a8mG-Lv4v*;e=V8hs#R0%HSMEl=6mSA!Ni4;3Du*XIAy9se_m z3n@0o2t*~qI-f=!p~Ln61G%;p8caZex~(}{=i#y(b^l+=4X{ElNG#}*XS`44DDIQ+ z*?v3M*DrDfmLTkW9}izENC!qaB#R(2;c;N{fusYrvGa3t4QGH~!h^aS*u!o2Yw3_w0INyu%NGs47M3@9ciwS+g^hA+1Nhz4wo_4W3G>zDy0ZoP4_GqFX$ zZO#Li&D-ARrO>6y46s{@gQP^4l&4d}LqYLPCX(6X!TN&#cFX>s_PQJ)pW?AH zizr|iJycCYO?w?qxy*P~H0c8fVEfLo!vRV(y&7jZJw0@?0=@(=A%HyP53x^P9{6Ni zT?)03_)=4w2M70P1|K}TD7_RM9VlCW4Dk-^3J9&hhMWLqJy5yOI-D^HXHnjdLJ)7eD|!-=kfxrKs-4? z(=I_5iQs~`UtG*V3G7f%uda`Ge@skKH0K=TEj4v<+6cXx?6jk{o0z{Y@>UuicFlw`C_4fIIH#{Wg$H}15e`}5#Gxd1>V zI-dK?23*tVnF1JJeZ18>H6_7@4+!cI$#Ux{0SL(Cm5>t(GBjEp^r!djr}0Kt>6a9vT6BE|hL zB>bUnhmdmNY5)!pXzJAc+p2k*X!6Xx#~joQ3|Wg&gk^l6xz__>8ctVIg5jxPkQb*j zmmGr9)OdGpyob;U^e6|AltA>nyf|#AS$LNMN{Ry%YNFoDu_5{Z$n>3|nnfXwrVhWJ|z?*9z>`{hFqxrTVJ1;IN z;pFC?DtseWl6HRBa9LJVbm#6}5PN9Xx)w=NQzipr8%CnU{Gg_)3P_bKnx!iTn-h7e z#sJ%ZbO*x&vC`#nuTSdhRp7?}a~KIKGr&_TRlNAR48WHL#v)LW;Cg=x-!y^8Dr#y1 z9W=UxD z1n^iPwL|9W0euW)a6jJNn~;>rfqe{uEmC%U1OQv03K=C4p8;L&V0EDN#}5i-<}Y`A zz`AA|KY?{$TwDN62_(KJpuCA=aRBBkKr7HX$8Z=)>*`Jy8+8KDQEyH4XHIIu%yUX~ z>;v!gZ|m*d24es-ky21F5qSicA}6;2P`0)<-t&$zM6&+kWc`VlFQ}z;YF+KXln1M> z@2mV+v2u{7i&GZRYG&CSViOqwJ>Ou4X9q`)b=2S69ig+Ms!@nyt z1|rdB0Q&TJP7`_1Ou>r{AdwOh{=I+a_;1TYy_NNKKVM%}Wi(yR_gKahfHe@lDuG1$ z{P|Dt9#~0r9i4Ls$YvH6r>CbQLqoukTV7ZI1sbbnDZP-;WPg9?vGLi=<%I-7tK9N; zf4}~PxRBjkGg{v4&Xk0YQhD@$$IqKfOTTK-QgR?4f_gLfG3OGD9~Kg*YwaB!m^e7P zrKY&Ja`5s8_`drVj;QAmsKAOKF4ouAgQdUD8vOC&8%8;{?qjQS9HK|kt{N2jv_gS!LwGdce3`F51E{o~mC*Mf zoje1~uqhO50pwdIL_ajWHE zOWhC#5HDYn3cGVrQzOcwp-Kx5?u8-`O4AYu0KW&s6eHD{jW0@xxXlR!QQ%@z_1IIt zz>v$!o#%fA!YfRGD!g}?_u%#oL@MA%8yj*k*todPy)!c44+2uREHakkfW$EauatvK5ST&Fzz-uDZf!ZgmgsvrEJh<$7OMty)bH4}SC@(pF%gxF7w9qI(E0&J3AZ8BNL8`&3(=0^y@3bBO*S$e-Ba= ze0=;E!<2tPx-0?>=l62!6k#pK_Qovnff)} z>NDj1@K~lLDARll+2|b0asCQ`dM{U3V(UCRepiMjnHU9mV^fn0NXJ01k0y{;Q7UR_ zc>puED=H;-a&ppT+ARYLRWJjH7?&Vo(IiuRou$nDfQf+ta=OiiEUi>S`hQXXbXs$x zI$tXI;KQ87@Q2aLg?2l`3mib4VWQ`~_gez;3mExlr~*J(84wUENe$lWDiwF5Allg* z!|(3n7aV>uujm(S`8F!*EKj9RG0aN_uKG2Ztv}nGmh*CcoyA2(^#Lu180GImbh>`e z&+n0ST%h z_$KhBL2wVL2I3U54TLMde}^EgCo6wC?&?(9KBvZmH!wJg9I}ggl&|A1y!~J0QcT=1 z7Z(>oLIOV|F_G;2oX#8G*#S5W@@y_-M=0Mm3Lc|~+e*(MsRQqmi<=wNwjgf+)+;GU z#X$h>$CHospXJ)=CA@J+8{dTd%Fc`tZsWAacHmo%sXw{IEY>;0BZ@41T8Fwr)wU|@ z!`06xosy9O8+G^x9r~X><{k!rc=WUSS>E2U_gU$^sO7E1<*m@Ot;bOgXlAK*4eg~r zYG+pL${+hGc-sch?ZHMG8?RmI=_s@gaNr&a`H7B%sTr;X}}BcVuKyRMbT=}g%?16?(XWMzJD76$9CPT zOBz<~%C`!2AlXQJMgPcmXk=vmou<3Hdt6uX%0iMz?w_QFmYwZZtqSX_Z=X^8IQ2Eu zAx~0>LrNcHfX)GQnu?0Jy4|oC@YV~=Px>}Cm&1gj6$&;bX!Lhh_UQior#92IJ{=p7 zqPDgYXI}vK7~Y9d+J2v$Tohiaa_t@T+0gH=+1^&53<35rG#Vf^!HaT0#C7LJ zqoDKeMBpliwa0@B05I*z;r3+_A>!f_ObL{9DCoVwD~VQCSM92_MUoU!ev1c@+{q*T z2AUkKyLX#sjujGxii(Q^kjS^gs`}7RxYE|q5ewNA&?QKLLAg^7{UFdbfG{YjciAV# zEh0f5(BSUO0|-tqB_>ZFfd@2U5MH|y+}flE8U;w>@{*GD%*+daT_LzB!^@E-DhdD* zmzeYdcN$*D0JS3YkvzkI{1gbUG?ABQg?CYumwWI0xq4-9<@bE2D#7tXw^on)O@&fn zw4?%#rtm0GF1WS^Bqiyc2G-8MxcL??4G7@rqc5;pZ<>5TT3C1Z;=DRtJ|W=EBWSE; zWnFz+Sorkp^CSQYUS5~sTn$*eqtvO2MhT3H%fk1(4J`|5F+5h9px}khK}t#rz;{s3 zK?Z>V3M{sllJ74DQH4lC9GQTz1HJ$uH@sg4Vk=a>`=8>>4Ml{%fX2Nyj4E#G9AqQo z3-j|R5D9~p=|F+}0+1Ip7C^GCqod=pIsQqn7;Ghh!wBfhvQkn-Wo3|TY(Uv7TKf?? zMkD?GAPrj&=eKEVX#N#Q3`Mk=xp@b?xy!H^Y6a+4?p&NgpGQkw9kMj)_zY4I1_lOH z|5j7gP-u<(n-a<^D*+&Q^{1k{8L*xJw>L(MAnehFwFB9n>*>=`NG=V9%NI!i!8;l- zp!kX9wgB-Mbo20l>h83rY`PhYaRu{JF)aSvoIX#SFmy9KscApG04@4k=sv*oSX-lC zIip&jJ7vrUorgyvp048JzW|2%Bj(_8V5s%Bav?quF0~jwAkSSL$WWq(cWDq)nNRX!$O^FhDo2y#IxBRpPzpJ z@YeIC-qCIxm)UEuK4s2Ei)swVJUq}*!~zklQT{ZyG3A=vLJZoj9{ zX&oVd2MQNBPReB7!B?I}u#m^&FP~Lk8#MlaK1Xuo9VIdR>c;bG9D*_QH@EwpzO~&# zfAcl}`b!@4H}4%u>5I|d+)rJ1z2rnTs4JrXxba-zcG-XaruI_G>u5XvNn4bfre*-E zsN?;w1-Es)IdnE&ITo#zz8kmv_b00YuMGbWZajbPkaB-PFHKRs(<&bi>H0dWl4|1T z)A!e;q&_4oP~|wQj=|-=7T~<(X{$(8RM%=r!M|gODHZdK!r4#>&tIJyS9uq6-z<{8 z62`g80XOxOseI2!`A(Mhw?I9^wRe&fea{A)A1Hfjg~&0~m>niex(Z}yP#=#QJ+CLZ ziT88h1(juCUWjk9<(pPE-%;*!fvmT;cWwUu4#AG1BiWr8XjnRi&CGLWw{Vqte2kja-Zcv;>VFRVbB49s zO-pVbD=~yhnvl2+4al1qlVfT4_Hnj{op%edaMi1)O+TlwPW`I95Y_cRDtqgwETb+? z^dS`lq$DLokOpa_MWjQzQ(8jl21QYn5b2ig7LZ0kI;5o)X{5XF=KE&unwdNQ+;_1= z5#HiC=j{EfJy~J>`tl=+(Yb{}id7p`?ZHx~@lCYmB*QdZrO+cMx|g{*d6e_x0zWHC z*mxo&Y)JTgBFfzpa0$q5vRsYph&y8M;meur>WDmCd`Tl>aieMONAuDygiF=hj<5jn za8SY>-vehnSWDD2HC+L0f=XDl@eGy$_}}u1i>oRtac|s!O{zt}70Mkbfk= z6Lcp&16@8W9I+p?BjvO3s6{+rPn0s7teOWz*Ra+_?qCUAh;X%HA&?;@=65C-*|~Rc zU{r1%3|Q}(9`F-@+{eM;K$`n_Y{WN=azlsg@jF3<>*P52h-iuiyc|? ze>NQIV&$HnTFzJ)j`9B-?$B6hT?$PZ$-Ft4>`h9pcJd>)you;%lqbCY)G{QhyZ_3S zAl~-y{=*8;+woe_im&u-5QQG4_hm>WiGIi-YnM>TcR8K!edtC$f zL|O6^wA#Z>%XMtTELgCBX=#4ix$C{WSeWRRHWs?swe~e1Q;>9guHAkXi+XIkQ!pcV zx<@gi&5W7aZ(QSK0j!d~F$DKO+pn&vDGDGew5(9TL#q$)17OahCB~O)B~3!Y!e9gS z8%Tc)4TPG8#uP9~1_o2m9stA-exfFql|gWef4NC;@dhxIiQ!?Fqb*aC50!wYK+GQ; z!e2VoH8gm=_Ljj8{>L9S2vi9*wLxf|wTlg)nX7~I6ql0!AK37!x}eJ6oT|B^Cpfht zII@S*uN@nY`W%+nHER0YnJlQ(ln^^#s7ZWA&m)NI6JggmbUV9ZyJ?}oFh!OvRFNrB zyt%DkdQ#fh^Z5GPHMxbPHp@I>BjVCWK8kEEQT1$mfl2#wE4PuKYT0|GgRPeY_dD1M z429c*rCPU&xZ>)&Uu2DXb?wy2vgTFtXrz5?{PVyiNDs4*Bn&Sn-;DB+~=@)APo zeU1nJYiNS=8#olxig6&uz@H4eF?2=F?(T4{=>V7Y@F7aF^5v^rCsJy8dq74(z7B96 zV50(46y!LrR}8GUK;{T_(bg<4fZf;t`D0^)fjtonhd^0CC?5h80PrT^4LT^N^N`hF$0pTo!`*Ov8x=F2NV@aVr_GgPd8+7 z-D4>`b5SHHcxaMQQf?9u8u_AI_Qnk=J%;KPPchaG-J1x!$HQhD^GJn$=Pi#Q;k;<8 zlv}qdJb1D5svG*|(%j7H4pfA}Lm@+bYeu0UKabS9faoX_S{R3sF2+fkt6sZ$)w z`}viI-fAfe8TP#~E!EsSqJQ(l!iZ<$IFTwphN1p&P%( z)cYXC!z)Z+#(BwlSj~xtodTtWyLC+w848Ll3u$71|5IuP2CQKWm*;J*~U z;$WAa$E07rqPJMs7!zJ@JMY}cV*dC13DM2^y^4YNInIKiz4!FKSj>XWf(2DV_UOZf za;?B8R%_z9l?OBAS(T2WH`IcorhlE$rDmmNmYV0tA^yjaF5@lvX- z)KTz>H>=8x-WDEj#q0ht`l^?Aj@yspSk&GzruwvThT9X>*2Lv*)+JV$7Ou^smFZff z7PO<62qPg=PEkCFN72{a_=6+7+I;M1friN;Bat6|J>$%IL6Hc5*9vvx z8r4wbm@BUT$c8VMQjGO!q2xC|?kPKkO34kk{pumCsXddV@RdQ+uuEYIcMr?(>1yY` zR-Y-Goyvh3fnHXRF?GxeNQ>i+-t*A>-S%#;<7hBUgNK>9b9UAroW*cWJN822+47%1 zY1yMrkH|hI|E*7b>uL z#q9X_aX@My@@3*51VZit_D)Vt4k*pY$Ufks-|EUzL_D2-H@^gW6Tn9B7^A){1tp5l$UTY*v9}Th=q2z$J*NO&dW6panOeKmFWE50=+|oOqf5x&VcBnfi0v>$6ON7YyQ{wu*&0(3Lo?W0 z7($+NHrReFxQI^mUBb|1uluOgwTKdsm^b0k6cahkgdbb#aS+6^WAjp$`_f#kLe|M7 zp*_eXXUVhB*MAAEIml$GuG?$caCEJ25TfNM;M`G`&d1Qiy`jwDscU`v(m0&Fq4#jM z-#>1^Nt_^BhuS2^*W0E)*e|E}y{o zW!wJcK%$}JX-#}StE%3_;~{xl0psq3e~xZ{7*jRxQf*pkOe>W5w1@TmAh}G|s`QHb zdz|X?%A0KST(~4;2pySpFduqGaj3Lj<@CBGnDKGBA!k#Zbo1}!o{hD3!3Gxd&8FCJ z&VHt%tdG|d+a9=A4-cznDX-fUKfL>lMHTs6TbwmN;_6c-)wRHf);zfR56X)3YN?x( zt{FXob0i;Ox5d zY8d)(Jf^p?iLX1!w;Ojq5Wo3_Ip5|irvg9X3CVKi;bHlVXd&;_ewu&F0C)e?^Kq)Ta4yJqm++gT;(sQ6k{pS)?)p&|7iG>XFVWE81-5XeU z-b)CAht|PO-L}7ibAb{7SoTakXlKU>E^&x+0ZPIg8Yl21jJJnTp#^;Z{vDWv5HhYg$fJOh5HgHCE})57hlLD0AVfj}t9=S<0$M|1 z52hFnLkRGj1egn270B)a!AXV^CT1WC0=d6eD-B>y!2ZJzZE^wTUS38dK<5H|B5=`= zX@WF&cG>`B2;9N?As|}&fP<$9xvoNYmiC_;WE=iBeiH%dYy&lLg3OBkm2obj1c6IY5QRC-q+u|s{b$A=zbqb3W z2uB1wrjHV)$m?CT!a^+dcrh!s741~T84ZzAo+sq1VEnr)5_)i+oT{!{l9B(MqiWup zW(bZAmkmP11<#PT_ z1atMhL-y+4=);?i(-HCf>#?Q>8nySTB`|_U^Yn|yyXktOU~$}16eO6~54&7Tlm74NiE&d*`iai7pxN$O{C|a+5);0R z;Zn`7&9D$+9#6RUA+LlAiiGOf9x-aA*bzLWxRHhGcY03RFn42JRIl)U)OI_L3Xb<* zHi50j3rY9%17is4GU`JLU7`^aFPabCCGtcoNz_^i$o=NW{r%E65VxtW^PGy`*17K) zy(Vmo6_^r7#$L(a!+Sn3iyyDsI1xu&2@q(q=xT8QhVv^o;+=|>tK;2e$ zI<8ua@50U;d1_E!RH>j_zdpvZTd+z~f*r-#|9Z|++wul|GuZdB8GIGD5X&>LcWu8u% z=2}Bx`2>SiB-BX~r01#E#s`RH57k)RraHb?e>B$Jw8`^iPaAHLsP%rZvc|+Xo z2sr$Kn**tl2;egp7~$RkqJiQGdcH?=5rCTpOQi#M1CRzZebdv^lOgp0H$mFn8BnJX zT0eqYdCa5Zr*E z?alatBuEb6A49|G{{Yzo-@c(B04RZQ0{L8D4^poI5T-zU9F#J7u*=CdG`gmejmKG6 z^<=AR(*JQM+cYV<^y2;y&mXEfR29lw{E;Ei>#hP%T&U#)ukP)?X6g4LB(W0T%Al2b z_7JI%pJuxZsbwP)1Qn{I%xFm8y;W_a=sz~TeED_{q9wrwchoi{|8>di!}fw?nN${^ zb@Sn4I+ISEu)6rRZlBS9G<#~xF9r-yYIJGLt;ErJDLplHePpB=_y%vCN_f@2BvP$M z^sKb>-FTe#I4!Dbe{XFB8~F7zucxrLOM|_HMj{7IBwB} ztS-OASnA{vR~hxkFFRVMkGPfV(*^!@$X2$-b+^-d`3g>42eVljbg@FQl^mM8xi%PW+5PFGz`=4m@&?ptsL0_H{ z6h^3-cen2JUhWW6MOfq6)9gcZ|J`?AKdDYDNvbf&X5AlcAJ+Ncc4G5KLOZZsS5tL6 z>m|pDQtw<%yOGkm#e7#!)r@9N4Zjjz| zeb+a_nx8AJO36KI}}{TMg1ARnIx7I`b{J+A3y5ObX?M z2@E|rE1mi^`WuzKUSJf$X7H9~{+0$`A>~?vzN&Gn9eM!lFOJs?pDHD!Q0%Kp-=ZGh za~*tUT*psY_?usMmHE<3Y`jxvz1?q=F_lmpy8d8~Acsq+*tXldDQVK&Dp&pv0h?{4szp3N*xB%mg2 zaPJR5Ai#M!H{bP`~nC)92^`N7#N`5fSvgbhe! z_&PVY8VRzFpkt`?ON^T!mTi?1<~RL^I3sBC@xHZ%c8bfO43a*&-=h`lZL3$DuaH zzb19o_i>?W%I<-n}OGEd*r1C?>%t zZT{C^ROBm(@dVqa*SKW@!=LR_f8w^>=O$3kQKhzOd=bbbj?j#CKNKhqaJ}#(K=xh? zum8=Z+|JLelKaa4i?8Ng#gbc9Y*w~VX#JA1o0pt@hJ=fWm%BciCSpw@xApLj8guSI z$A%>ZG2x%LVp_Bqgb^lIxhx;-+K%JG_P7^!UWX*axP?`Z7WZP7Fvb)$BrRGMtlJmi zB3;<@0z9GFmfPFQk_g#?g(RKXy8C+>{_4o?&1@~Cc|@CC{q@02-;C(U+V}*DM{1o0 zOEQ&pq(SS0kl*!5h^^_f!rFu6+1HYHf`@m`&0rz}CFKnSD3X z(dXyq0jxzw-bvAkS!RXuPg?gGUtH8`!cB2RS3iEQAu}f6WkfceF_W$z zS8uoE-PU%gDgqvRy`ZT_RD%l7lqM@BI(SC10ob~y$FBd&S)l*hlUtkIE&E@_#@z)} z*j~=yIY#l*H5SG;^tJttSrX8AUNI>?CNl%T&@O7x)Yv||I#;DvOfR%=P)HHJEuku)U=Z=f5>-xYqDwbfT^sb3) zuSVH7@~8o3Bv8YfE6Lk1R6@g0{^Zdcd7prGAyH?S{y*gmHhYRZzLD;QgA0FpO$hUdTLN@BQ*Ig%`k%~}L- zYss@`$x|_W^v*NUb(gstx*ma{;h|Ni!Ov|Sx_P+6F8Ub(4_n4r4}XA1!yZ87pYjm6 zU}YFG1EWih1Xu&V-RI^;ML^VicIKnYj~NK%?llCa<3M!O5P-Qdp%e_>d?0=i7QT|* zLdrX=5K4&eH!w)@F$Y!U-8>DD{yao|3GozLaBz~4fabm24g>@sd?yQvPe`ydH~;O8 z9?;XHDASIITQf8|3d&uG&V&GAlmIAX34@0%-}S&?3^g@1=U)V#%8s1puygV z&HTDR`Py-Myf3@Lcs(IW$?G2lOCQ=?1p1f?&Uq4yx*Tc~^*kRGvwdx^3wrbRC}BpC zCHx&{!EgTL5A5K1vrW%pN=JS8zBt2yY1kc7vCxD4{2D`wz{RCKMWy|3}!Ttq>FOsCoT*j=N?jCy+t z>^kytXKzQd6Ut49kL7&o7C$TfIQ7Ash-a2m|)>S^L?Z&0*s9qW_dLx;xqr&kax$n{s-?A0X)NHXy_k9!jhZ827D5>5} zv)DAxiin)g$oN9_i!FHhK1AP=MQCGW?e{eQ;b@4k@m6%NaWpWEs%H>R@MX^#-74*n zvYSEb@Qo|PExk@BR$EESy@Z!N^`F_F%$%=&H}Xjn+|zh<*2L{L`L9Y?{eG|&8d zJxbM6uQ(T;F^_g-3u!O%@I7wKfatr)Dt+7gWdj$*i35LoSQ?+G_1T>Cwd7U@7N-^| zD|;%08uaoGDtz@g>FShpT~ma{C@sEyeZoap#j)xHjkC|O4h@gLesU)Y$-kh* zbhoEyLUZ7n0Q4C72@@T-GDFDo11XcpPbJY;z8#D7lVXXJSw49llwrP z!PFiYVMXHZ?hatuQjy?oyq6M!5Qq-UC5YBR!lJ`Qa-kzm4#XHRGNAQ>KcP`I0Cjdv z4LJgny{Kqtpw&Evhl0^bcNrP4HH}k4BMEKi)qw}`rJ!pAVHc#}yJR*383eL9aPfQ| zS^*me*i){wa@0MYcyF4T{2?kG04|W2l^E8MfWQF)+L5j8?FS%B1pO`We}IpH;x`C~ zd@$mBsZQY)qx#@MO?mkaJQO_Lqd@12M4_VIAn7}JYnC2ceqtzrFJ|tQ^CBMl&$42N zQ#`(x4r@;Ej8oA#MW`8gZ$IvgLMl9j+6(rzq{WozOqxR7&(Ga9Zod6%jh*?h*sJ11 z7o|Bcge6T!XuDZqyx1SRJmt65iT=B_m)9^DQ@Qs>asgkZ-qCsziLW$TW+p)$fl2M%!#?)6c5R?)sfA!x<3&!0|H}^tf}ZVN1eS7UkxWLA)oVP4mC~4msg)mV>0> z>e)h2FT|IQR#>SZWWsKR5FJ_L%oy8USy2H26Tk*?zIH0=#9n%y7Oz@f66ZC7>t9rG4M z6XDZ(-s>=3s;RzS1O!?ZBTm8sz`H@Ld)TGow`vcjnY0g@IibY|9p#RwLEkRCkN+HE zL%C46QwtTtiQwu*VoQSmO^ozUI)m{ySMe1Rn5BLkEbL4rBh;x0An&M%||EbX$rhG#? z8|VLL*N#{gUFAk->9;ET6uV&B>pKf+{2=5KlBfODx>&~l*s@Wa^%ZT0RnV2ZOGw5~ z)9T_n0em6n;gvcpIpZPi3FyE;MD^DWe}2*`E62z5VKdapHVy3Le4%MMgfmTjIkipc zm%CFGz*+tr`Mgx?o$4q9dazVXcv&3+DnN_{NYDedm3+F^>o^@NLI3UYRM-7AJQ;qbgxnwO_OLX5X2HUIFUL|}BZxcN z$5+mAoaI?b=CmBgO)5L_;TBoQy7SfCX%F;4-p%qZ%NxkKv$9)Y45kQ=3q*Dm?Q4#P zxN!}s%3D6?m&ybZ^wS5ynp>{*mSt&}MZQ-Gu|^>fwRH$AKJ>D{THDu|=5~wJz`pA) zjn=%_frC8fSJmelJYFsMM33pc?H<-0By9gRf&WrU1f?Yg*Igs~6XD7~ITVtozKZVC zUH1sE#BY`t(nMGw!z}QoD zMdoWUo%JV8_&U z1yQ_5aTAh4%~oXa@3f@@r60wAU%3BEetWMBl2o~e6a?0+(x$!ZysDp~ zGA<&EGVk0uh#EnL&=WFaO3<4irR*%bUS{$6bTujAer^O0KRP1s;Ub}Q<|{t35K}*3 zEO_|yUdyh?;+~imyaD&P1T*RtsVy}qp0O0)a4X9HOFF`f_neQZ(7_``fA^{Q+NN)> zEP79`#?$kWF9*W;`3s~K9^YoNv+QOjOva+<`c@V4QmY-{%A&fuT94a)KBHLS+w{6; zL>O4X$KtH#*!|EUNTs~M-JpM`YE(^)mW3H#z;_c9VN4;WZcNR|D!ecJQ<|2TrZ(06 zoCdR(O*HYV?U(LG8zYcxcyG@8#GG7-7+DIHT3jhX*#q3BaUt(@x{@znhzsu za?da~8{0dDB_TxQb+gN*?9srF{2@%Ql>Z^6yaw;UmE`fi`bF8L`$Llb8;|hk*Bf*9pk_Nxn#oC8l~WsCw+)NPj$5rt`@Q3|9%hfmH$P08FnZhc=X{G#(y=I zfivO)T@bW?BMR-|1S=bxR4k3q^$zXV9OCgDZcMd0t`iass&EJYn=Vu0e~Lc;Z;#_A zfPLIHj3vlsR3kCW6#ZVfoC@bI)5j>+sE}x8+hk3Ccxm$m@KScz^N5~zt?j%+=YEr^ zNQ*CA&KxYo_UXF(Gv|c6*>*Qp0Sdk$Ntm#%Y+LqR)3g3hISQ12``u*|ZXZ&Wkf?KFW-Huk?#N}Q>^-%i#l z&Qw|aoT44MtCrbm|F4u14k&@x{j5(|J1gPrGuPgSaEk6{e$?e$HqEMZ`5IZAkBZ1< zK411T(vZxPU^*xpFcRufeH8q?kQ8U)B)509Ddai!hAG`A*_O@4DOolmh0OEheNz7= zCf2R8b_{rS|0ELoH7+jMUgkzfszn%cJNBm&Ts*?`*pFrROd)q)o=9c%7sWxG-QMu? z^38Pzqo=22JAAa7mFRCQ?~y`%Olx~YXR*l(qMU4E=O#Di+Iuf$2_w~8h~8Zs4Nw{n z{^zCoy#m4-(!#IIZ|lZgot@+04pdeSg9--xA%ZZ6^jI5G&_T9GLrooa*}1d}k>2pH z{-s^Oj;lZmgN7EU!Z8xtzO>7z7*`4+K2lOrAGsGVw!tY1QSUgoxZw1kUq6ot^ntnu zQruyvDhOKGA(~-kh8h9m5l90WQ`uqG)eVib4=Z1Oz8v{k|IdKXn8^Xel6edUlxVj^ zxIe1+oWkfxAME3!-y&iT#DY$gbOt7Wx8K1*l( zyG*P5H!Db^Zh`SBHIyHDJ=n)$U2Y|lh<03F4A=R8SS^~{i5D&WGLa+x_D27QAWyQ; zT`s;cEj^*BvX`9i`-(9GtR9R6PT|zXKhs$&P+YMf&~8RWxCwTvM!#Q`p@ ze0%aJVvMcvDBWX$iPW73*NUie{3SmeWk(0cl5!4@8w!F?mI_dJ{@_&NB(dL4X1_gM zXdrolF&RJ6*rj|n@#e%u3U!p}$Wm;tNfj5BAZ2++Mqg_v92^iJ8Jz`XQ*~nk3_Da; z2N~yWNL2?ENl#zDn}QmEB$#*$0#oyN2EtCDNJ-greuBSQL7^L%(50m%e^znbxY$?$ z7^=wsvKy=izw8dcaA;;~3iE8PVgSJW!p|z^e=*h9=KvlZ*jqv150Vln>=22ZAwa0= z7kp~ET<)X__ANf9b{>dt#de4Ux)RVuIJ`V!d$*_t*4k@}8>3hha-yHFd3>+EcPArT zHA2!ruyv+gni2sC#L*i0-)~(p%CEnoj()#`XFnMGS6*3~lzZ)XQK;(%=J@f1HJXHL zvWd4wTQ_RVCsZ2^_sGlr>NepH{t#Eu=bYiww95;hg)>U_7d!T*-V1p@s_eb9;bEm7 z9_W13HrT!vH(@_cX;zTo*pF<%vO`1cTNW+*)@D%IwE@X7vtxBghH!cyrQ=Ukw32 za8r}w_%Kno1X~9khjQ6+YuCgZgYb`^*TPDUi!tRoTt0}2t)l{~VKnTXzi?jx^;aNM zhmZ9uC%NuY5QC7srFNnP4}^_3`~Pv>IUBz=^D2CoOEVK+MJ&y~k;JFFR-}rHQ+1HSw1Ih*mwSYHl zmlLcuP^x4fmQgG{W%(p4V5H<7(f@zee>D0*L>0c>59$|4)h3$0Sm!?Sw>rNwIpJp7 zT`{QMSW0X1RO;l*OiV~Z+fQ{Y8vTHqW8V@kB4I~*AS47A9L#K}^4Ek!7zBKZVle0r zCcR+>zI3ldB%GXfz*Pe6S4@0-FlU5(7D$kz&w~hpOp$=GhOrS0->6R+yW+wZ9Kz3~2GM15I}=04`JTwPyxg_~>FMeB{*CX#u&3qex@tR9Jw0$kA4S2`m-zS< zFz$l27I+SkiI%&G{P8sg4kjkQ0batpE!b>X0VyqfV8jRd0SwO&y#QV+-T!I{f0dza z4uFFA5+nkUvm&fh=Xj+iO!YVVGB5lVZKy8R+2h*A0&4R-5iGjIyoP$V-)YJ0E>kwzc3Bj26Do!^cn76y4OBn%lhXz`n+c zAFLup`kNen`A*zg*AQF_c};d>typ=E)JZ1v0N9D>f?b+p-inS*E0{g!fBUstJ6@Ob zD}^?9K@(pP3KCW3DNXTSAhKI}BLr{a?D<+HPGhM>fT@Al&}1#coaeJ?6k3)v7i)ox zTNz2AU#u^Mcgn%`u#<5(y1miwWa~EF)wXLMHOj?d8H^yL(Q;A`HTbcm-2K<9&EuXP zO-~UGV8~c`rpEm}6|7i&p|zir{PmJHLbii+3$%PWDW^=wHbSD^KL)p-k>*E6mr}UT z^yJ6X<uJ>V&7_^!;7|hLKRB}>80HMTM<63NsInuU zF}@3$n?)G8>gafdOagvVF>dG6ssg@ju*5@nge#0u0;8+0w;YTc2Ds(wJcBQRHXY70 zaKX84JpnuUl`*-T7PH-)8)!ds|En=z@jd*kc;wuf@SGqVV|#!99A>sbv`f(2Ko_Cp zA7)BPf>&z}T6QLO_UIxoX}tg`Pt(N-tB{Zv*T~GurDuAMtgaNg$TcXx?!0Pf0Y(S4ST6i2aIXSC$A8a9N^&Z&%iJB= zEqfe2buX=q0*P`?7`BPpvcc=p*x8sIFW|_&ow5>ISrsDH7Q0%Yj`q)GTig>>mAr1n z$8MnHZc^gwIjS)p1~nbAK`aF7-U6BGkh|V+**B}HFtm>0AR9TF<&=s2Vb1E=pSCF8 zq`beh31CO?@5fP^E8HPC5n4rQ_7Hcrzl8xG*Y5IpxuOZK_)gTEmOS`Bg>sD!5kkqq zF@x!8cvvn~(J5Vj;zgB3m~U^G8uXK1c!@nIDfc_Roc(fe-RzpWl>k(WC0^5p~CHSrn;lc8)#+_Iw>SM#fEiTX%` z#&xgka#WjrGE&E9>uMy9r~5rnm$et5E_CzUIn%YYVMl4f&ZhvCaTZ%jV&2Jn)xNfb zFCdSu%I*1z8j}_@m#^9fJ*vgO>`;Og8-}Tl#|=)btG%vKkg=yM#=KWK;KrvJ?xR`z z*D|Iecp`YbaR^Ve`lo8!{i9vgM$}-z{5m-m@98gu9Ef?86X;Q7=}-csFc~Ahsb=Co zI_trk>SnjtT|^%z*5FI>lW&UG$j&N65I`j1h&QVE_2jR*C8p9|HPh0TnR9$`_9}Gv z@is%{-D^BJlnAnuu_d#TFqT5DnR8RPxXU8aTBqt8_#{Vl`@zYOuEOw9o ziFT`YO@vO>f&7Fl=ihkWqUgNLxTFN=;rD%1&$1tC&&k*7+c2|!JecZijMpfZ^!+ z4FFRS13f(=p{pAY|VmuUwntexXQUkY~#tYr0?E%y;*asl*?nf=xJJk zVu^$Btpy8|3h!stVQe6Vuy@QQsfYN5m-280L>S#8CVXi${kcFoTB!!xY2X^dp>iFm zwbk&s^G5dPGo{F2bkjQFXu!=uPo*nkgenn`to-?dSg0i7`z%(@>_YOtuVn1>+v{#l zUkEmSWMKVOlv$oO9b#@M0hKfGPE?yS+*WE=)bxB-3l$|s`l?W4Xk%C18b_jyT#3#7 zG1c!a6v%Tv%~;vAxRL7!i$*!!o9l=<;lsvP^mJ(-lqA-#6Gkva*K-vs&_#6kqv9P< z|F&7fgYVE)c_b4&;ihanmZ6-{BXqJ89zR2L)_ZqOUfNXvOEj7``nwK|`cw9am7F(d zhPZ&0J^m4kS-UL2_hE_Jo3Ao%xPfGHqBh9Awr|gGkaD zy1IStEn7ylZ1`RBi_WqkK>i*oGINBDfJFuBTCN9tjFzXU&3%cw<-s>H>Jstq|Lf{D zLw5;3_dJhyFpLsx+xx6~!k--I{k(>5L@R>sjj^HDbmYfyUIV*->Ob=OH7vxM zNwvIdHN{-Bgk~*a&DftY%i!XO_XE8eGk9chk;J<^7dFDrII>ti2G5=*VEkGU>C1Qf z|H64i^>L_3n6#~F4q*(Qa}trYH&33=45ELmzB_#&ldPuh33LKDj*?f>Egp}HimxEvi#Bn_kB>-n9XPU5lWO@tDL?mWc-KpgGF>td#e%bQS`< zIV8@|u~@K+UTV0H_i%Ct2Rxh%WCn#B>|(=ckN(=u1Z98z#q_(A)5gv=lexZ8bmU6( zb?m6r6duB9I?PL=C6LRk)+ML}elaK)OR#_W=V%dgQD&)ucvZ9VtV{2r#tKJT=_!qj zsDh{kMHa(t#sB}BFRoYRFK68hY(Zw8iR{kic(j?10ma8?^0U`qx4B{Jmw~0v9et9# z_Jsn_NC?CF&=P={nsE@}$fMzI~-^r+`FR(pEjgX?lP`RNbuAf{P zJJjPE?MGovnyhKqXM-AG;yCndn}IHv7`aYHX3NAOl>FNy(;6_~H>EOSBinxWQOl2#SOl0mx9= zhH!_Sr%&P(uV$Iz!bC(=RB(VrCnujDZ9he534;kc^*7EZ_Q(N_>E}gLfIu!*R)qt` z4?hM+9fURcA({)cHXal$_x1HB!T5qa(e(A@DFjTWsEB>7lP;2ytIzjz6GviReC)7fK!lG^OCbQHSBJxviWmv zg=dTyao&2?9AxGKW-@!-J~5Sz(CVai6Xg$eAkXgwrKsxF*?k?7xwYRKDTy6%|J$s= z8~J>D!ffIf0PD}$55v~wapSpR$Df zCQE#7nv|zz^G;0S=J`e~TELqy?uP8oFFC&_+7G_3LB|QHw2rP+3h+UQ4F9@#EiR|D zSSSoG$6KD<+tkVGZb8Ec)_8B)Tu7>j6HY$S+tDuVeXgU;z_K#8Htg$DEcz(kWS%AuZLo(eA+kouZ^6^Wq}&^vB+xrnzqyWh;Nq|FuV#Izo#d zddS1`^GR!6sF3VQ+a;psXg`lax-2_?bk zH!}Z&hiL5P&yi-jShmUEtn@u~d-<6`uh#F}3qXQ=&4MpqX5xCUu8T`z5}{-VbMw70 zdjOz(Km?@_%R$9s8DeJ&x(*nH31yIos3<^M_sv@0!Q|a88GexNf&>`$cEH9E7$h9X z3582nUtf$xcVC~Ii%aW$=+oFB@QjugF^1?Aj@j2k!3KtgV4k8Q#_lxdMqv18b?O|< zoK22BJb8G>MF_t2Gh`is1R2)Ck6mZMB*?yl6t0`&E09M9nKqCa3^&kqfHWC=*X9-$ zBQ}*M*)DqKfrp7AU4__AT+L0H+q_WZj$RypHThidt@3D)o8R&4TP}qD=XlR zwuPpR_n%aV?q6?8%*a#R{_?YjsmOxw`afJvL&aer!}z5$FL{5)=rRiKM~Mk^9H1$9 z05%?wTwD>H(3mjiIL2d-tHBq=G?~sl)x!&_AjITj;g9 zFn0-27Is#cQntavO@r{rwcGEbnTwsatugL|4Wo(Ll&p4rbiVc_eVS$Ui=vFxb;m!N z&99i{KBA-2&s+FQaiNhsM`spqOl|s9x3%S)^8Bo^Y3W$3Uq#zPDXDa3dIJ0Kv%s+m#lf@&akmBT_(2Btzy*7uY4oCoYj;d#aa%oZPy6CWrTPz^Io%)Vewhk2X#HZg zUA7fDOPu9&H4wk`r)Q)-`J3kmdnLQL(UJYvyAc8SgDbd=yo@B%E4Y}NBA@Gb`WuHj z9e-c8`z$xr&TlrsJKyq&g%QtTSKR1M{$@Qdd8ac&E)H}5ZQ8B*N@;hvc*4*6Av9Fl0Ol=e)4fCUoJWAuczDo&F$a!4ictS zjr@u;RV%CfJGiTHLw#E^h$e2DonQErFuG>Ar}`8criVN(4y$^-7ndsfDXe_D!@qXs zo|lN7kk|xyBtOsO3dQZ?f9Kb)QGV}@q46h>rO~ok6*Bks-*6TTJ1<>YsY|04NVFl0a9AV?f4UWLD~<>U{ycA#*B=o&!0bs zOa+*HejOb(wcm<69uy^zhty}x64?o6#m)J7shlBbuAr+rwj-Qmiy8klsnb9-$MJQ* z@=ZI#V-Mv9>FaM!g#-?8jdRy`bpLv|ZF-Rs65QE?_b>L)>p;p|eEHMcW0A!9`@U5F z`J4}x5-jl-8JK-9i=UqTUXX+shm|^%Y?^& z`{vGQ<(^Ue>ay``+akY=*=O-D`rW#2EB*F4QS#P`e!;>9m%3=hkZ@$LAUKI%>(4o7vYrZeKQhR3BmQeC2&0^}Wiq z*k-qH1d;3*OZH@13GAvf_ILYB$4%=_S{-7X8IrVLgvcSL*^#wq#lc3dit7En{_kypKFWA zR#jh^j|q*x6RI=F93kJRlIwk|x?|rSaTK7iqN=vgy3I;gG}CjV z1=3`%qZ@DtK$xPSJ0T^dFfT7mL8p)H)@2(7UoNSQd`nyu^a^0jog5mf`24w9hYoV0 zJ{r!D##B*^h!4&9M{Yx~0&n1eqWTd5cJnG|bs8qZCh_`ynjNx#a&hp|o zIP1{d0m2`>(EHc=A|!ycJpw$<|Bj)t+J{gS2^ zT;7W*d^fzpC&lVPN9%!=Hz0PGD3b5{%Pc(fo?=Ky1!;C;<3(pABgDaFSJs-m(MfwV zWD>7uzsdjkpTxuq*7{ylD~CO=WaW~96+Rd4ryDA0TT@}E&I}JtI)Ajaxw*STLNp{w zV+KY5-_=iN5J3JRW!#7CZRM`fk|V>RZ^XxU#8Nxa1t%jD0~lhRgl}7jif%8P2W8-n ztNm%9a3HR`$SHMoAn<(9B)E5dGQ)mCy{Gxl^OgVH6sWH2aNoB=iyIYNB6?wu1x2vAD9`q&E;sf zS|KO^_Ga^h|fpYcPt;ZL0oAt)k3}3|L<>G(83yC4n`OjEH#7)BT zY9=r8FQ5m)w-MwQO8nJq2Bh@rvEXKM&HO61`u}qO7ig`oce*a$lqOvLzuaSGsbWc! GSN{(VMRZI6 diff --git a/hadoop-hdds/docs/content/security/SecuityWithRanger.md b/hadoop-hdds/docs/content/security/SecuityWithRanger.md deleted file mode 100644 index cbbd53ec7c1..00000000000 --- a/hadoop-hdds/docs/content/security/SecuityWithRanger.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: "Apache Ranger" -date: "2019-April-03" -weight: 5 -summary: Apache Ranger is a framework to enable, monitor and manage comprehensive data security across the Hadoop platform. -icon: user ---- - - - -Apache Ranger™ is a framework to enable, monitor and manage comprehensive data -security across the Hadoop platform. Any version of Apache Ranger which is greater -than 1.20 is aware of Ozone, and can manage an Ozone cluster. - - -To use Apache Ranger, you must have Apache Ranger installed in your Hadoop -Cluster. For installation instructions of Apache Ranger, Please take a look -at the [Apache Ranger website](https://ranger.apache.org/index.html). - -If you have a working Apache Ranger installation that is aware of Ozone, then -configuring Ozone to work with Apache Ranger is trivial. You have to enable -the ACLs support and set the acl authorizer class inside Ozone to be Ranger -authorizer. Please add the following properties to the ozone-site.xml. - -Property|Value ---------|------------------------------------------------------------ -ozone.acl.enabled | true -ozone.acl.authorizer.class| org.apache.ranger.authorization.ozone.authorizer.RangerOzoneAuthorizer diff --git a/hadoop-hdds/docs/content/security/SecureOzone.md b/hadoop-hdds/docs/content/security/SecureOzone.md deleted file mode 100644 index d4d836fcf7f..00000000000 --- a/hadoop-hdds/docs/content/security/SecureOzone.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: "Securing Ozone" -date: "2019-April-03" -summary: Overview of Ozone security concepts and steps to secure Ozone Manager and SCM. -weight: 1 -icon: tower ---- - - - -# Kerberos - -Ozone depends on [Kerberos](https://web.mit.edu/kerberos/) to make the -clusters secure. Historically, HDFS has supported running in an isolated -secure networks where it is possible to deploy without securing the cluster. - -This release of Ozone follows that model, but soon will move to _secure by -default._ Today to enable security in ozone cluster, we need to set the -configuration **ozone.security.enabled** to _true_ and **hadoop.security.authentication** -to _kerberos_. - -Property|Value -----------------------|--------- -ozone.security.enabled| _true_ -hadoop.security.authentication| _kerberos_ - -# Tokens # - -Ozone uses a notion of tokens to avoid overburdening the Kerberos server. -When you serve thousands of requests per second, involving Kerberos might not -work well. Hence once an authentication is done, Ozone issues delegation -tokens and block tokens to the clients. These tokens allow applications to do -specified operations against the cluster, as if they have kerberos tickets -with them. Ozone supports following kinds of tokens. - -### Delegation Token ### -Delegation tokens allow an application to impersonate a users kerberos -credentials. This token is based on verification of kerberos identity and is -issued by the Ozone Manager. Delegation tokens are enabled by default when -security is enabled. - -### Block Token ### - -Block tokens allow a client to read or write a block. This is needed so that -data nodes know that the user/client has permission to read or make -modifications to the block. - -### S3Token ### - -S3 uses a very different shared secret security scheme. Ozone supports the AWS Signature Version 4 protocol, -and from the end users perspective Ozone's s3 feels exactly like AWS S3. - -The S3 credential tokens are called S3 tokens in the code. These tokens are -also enabled by default when security is enabled. - - -Each of the service daemons that make up Ozone needs a Kerberos service -principal name and a corresponding [kerberos key tab](https://web.mit.edu/kerberos/krb5-latest/doc/basic/keytab_def.html) file. - -All these settings should be made in ozone-site.xml. - -
-
-
-

Storage Container Manager

-

-
- SCM requires two Kerberos principals, and the corresponding key tab files - for both of these principals. -
- - - - - - - - - - - - - - - - - - - - - -
PropertyDescription
hdds.scm.kerberos.principal - The SCM service principal.
e.g. scm/_HOST@REALM.COM
hdds.scm.kerberos.keytab.file - The keytab file used by SCM daemon to login as its service principal.
hdds.scm.http.kerberos.principal - SCM http server service principal.
hdds.scm.http.kerberos.keytab - The keytab file used by SCM http server to login as its service principal.
-

-
-
-
-

Ozone Manager

-

-
- Like SCM, OM also requires two Kerberos principals, and the - corresponding key tab files for both of these principals. -
- - - - - - - - - - - - - - - - - - - - - -
PropertyDescription
ozone.om.kerberos.principal - The OzoneManager service principal.
e.g. om/_HOST@REALM.COM
ozone.om.kerberos.keytab.file - TThe keytab file used by SCM daemon to login as its service principal.
ozone.om.http.kerberos.principal - Ozone Manager http server service principal.
ozone.om.http.kerberos.keytab - The keytab file used by OM http server to login as its service principal.
-

-
-
-
-

S3 Gateway

-

-
- S3 gateway requires one service principal and here the configuration values - needed in the ozone-site.xml. -
- - - - - - - - - - - - - - - -
PropertyDescription
ozone.s3g.authentication.kerberos.principal - S3 Gateway principal.
e.g. HTTP/_HOST@EXAMPLE.COM
ozone.s3g.keytab.file - The keytab file used by S3 gateway
-

-
-
diff --git a/hadoop-hdds/docs/content/security/SecuringDatanodes.md b/hadoop-hdds/docs/content/security/SecuringDatanodes.md deleted file mode 100644 index 6b7d82365cb..00000000000 --- a/hadoop-hdds/docs/content/security/SecuringDatanodes.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: "Securing Datanodes" -date: "2019-April-03" -weight: 2 -summary: Explains different modes of securing data nodes. These range from kerberos to auto approval. -icon: th ---- - - - -Datanodes under Hadoop is traditionally secured by creating a Keytab file on -the data nodes. With Ozone, we have moved away to using data node -certificates. That is, Kerberos on data nodes is not needed in case of a -secure Ozone cluster. - -However, we support the legacy Kerberos based Authentication to make it easy -for the current set of users.The HDFS configuration keys are the following -that is setup in hdfs-site.xml. - -Property|Description ---------|-------------- -dfs.datanode.kerberos.principal|The datanode service principal.
e.g. dn/_HOST@REALM.COM -dfs.datanode.keytab.file| The keytab file used by datanode daemon to login as its service principal. -hdds.datanode.http.kerberos.principal| Datanode http server service principal. -hdds.datanode.http.kerberos.keytab| The keytab file used by datanode http server to login as its service principal. - - -## How a data node becomes secure. - -Under Ozone, when a data node boots up and discovers SCM's address, the first -thing that data node does is to create a private key and send a certificate -request to the SCM. - -

Certificate Approval via Kerberos Current Model

-SCM has a built-in CA, and SCM has to approve this request. If the data node -already has a Kerberos key tab, then SCM will trust Kerberos credentials and -issue a certificate automatically. - - -

Manual Approval In Progress

-If these are band new data nodes and Kerberos key tabs are not present at the -data nodes, then this request for the data nodes identity certificate is -queued up for approval from the administrator(This is work in progress, -not committed in Ozone yet). In other words, the web of trust is established -by the administrator of the cluster. - -

Automatic Approval In Progress

-If you running under an container orchestrator like Kubernetes, we rely on -Kubernetes to create a one-time token that will be given to data node during -boot time to prove the identity of the data node container (This is also work -in progress.) - - -Once a certificate is issued, a data node is secure and Ozone manager can -issue block tokens. If there is no data node certificates or the SCM's root -certificate is not present in the data node, then data node will register -itself and down load the SCM's root certificate as well get the certificates -for itself. diff --git a/hadoop-hdds/docs/content/security/SecuringS3.md b/hadoop-hdds/docs/content/security/SecuringS3.md deleted file mode 100644 index 1cb0c809e61..00000000000 --- a/hadoop-hdds/docs/content/security/SecuringS3.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: "Securing S3" -date: "2019-April-03" -summary: Ozone supports S3 protocol, and uses AWS Signature Version 4 protocol which allows a seamless S3 experience. -weight: 4 -icon: cloud ---- - - -To access an S3 bucket, users need AWS access key ID and AWS secret. Both of -these are generated by going to AWS website. When you use Ozone's S3 -protocol, you need the same AWS access key and secret. - -Under Ozone, the clients can download the access key directly from Ozone. -The user needs to `kinit` first and once they have authenticated via kerberos - they can download the S3 access key ID and AWS secret. Just like AWS S3, - both of these are secrets that needs to be protected by the client since it - gives full access to the S3 buckets. - - -* S3 clients can get the secret access id and user secret from OzoneManager. - -```bash -ozone s3 getsecret -``` -This command will talk to ozone, validate the user via kerberos and generate -the AWS credentials. The values will be printed out on the screen. You can -set these values up in your _.aws_ file for automatic access while working -against Ozone S3 buckets. - - - - -* Now you can proceed to setup these secrets in aws configs: - -```bash -aws configure set default.s3.signature_version s3v4 -aws configure set aws_access_key_id ${accessId} -aws configure set aws_secret_access_key ${secret} -aws configure set region us-west-1 -``` -Please refer to AWS S3 documentation on how to use S3 via command line or via -S3 API. diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.md b/hadoop-hdds/docs/content/security/SecuringTDE.md deleted file mode 100644 index 3e8f2d16819..00000000000 --- a/hadoop-hdds/docs/content/security/SecuringTDE.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: "Transparent Data Encryption" -date: "2019-April-03" -summary: TDE allows data on the disks to be encrypted-at-rest and automatically decrypted during access. You can enable this per key or per bucket. -weight: 3 -icon: lock ---- - - -Ozone TDE setup process and usage are very similar to HDFS TDE. -The major difference is that Ozone TDE is enabled at Ozone bucket level -when a bucket is created. - -### Setting up the Key Management Server - -To use TDE, clients must setup a Key Management Server and provide that URI to -Ozone/HDFS. Since Ozone and HDFS can use the same Key Management Server, this - configuration can be provided via *hdfs-site.xml*. - -Property| Value ------------------------------------|----------------------------------------- -hadoop.security.key.provider.path | KMS uri.
e.g. kms://http@kms-host:9600/kms - -### Using Transparent Data Encryption -If this is already configured for your cluster, then you can simply proceed -to create the encryption key and enable encrypted buckets. - -To create an encrypted bucket, client need to: - - * Create a bucket encryption key with hadoop key CLI, which is similar to - how you would use HDFS encryption zones. - - ```bash - hadoop key create encKey - ``` - The above command creates an encryption key for the bucket you want to protect. - Once the key is created, you can tell Ozone to use that key when you are - reading and writing data into a bucket. - - * Assign the encryption key to a bucket. - - ```bash - ozone sh bucket create -k encKey /vol/encryptedBucket - ``` - -After this command, all data written to the _encryptedBucket_ will be encrypted -via the encKey and while reading the clients will talk to Key Management -Server and read the key and decrypt it. In other words, the data stored -inside Ozone is always encrypted. The fact that data is encrypted at rest -will be completely transparent to the clients and end users. diff --git a/hadoop-hdds/docs/content/security/SecurityAcls.md b/hadoop-hdds/docs/content/security/SecurityAcls.md deleted file mode 100644 index 31bbb0a95cc..00000000000 --- a/hadoop-hdds/docs/content/security/SecurityAcls.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: "Ozone ACLs" -date: "2019-April-03" -weight: 6 -summary: Native Ozone Authorizer provides Access Control List (ACL) support for Ozone without Ranger integration. -icon: transfer ---- - - -Ozone supports a set of native ACLs. These ACLs can be used independently or -along with Ranger. If Apache Ranger is enabled, then ACL will be checked -first with Ranger and then Ozone's internal ACLs will be evaluated. - -Ozone ACLs are a super set of Posix and S3 ACLs. - -The general format of an ACL is _object_:_who_:_rights_. - -Where an _object_ can be: - -1. **Volume** - An Ozone volume. e.g. _/volume_ -2. **Bucket** - An Ozone bucket. e.g. _/volume/bucket_ -3. **Key** - An object key or an object. e.g. _/volume/bucket/key_ -4. **Prefix** - A path prefix for a specific key. e.g. _/volume/bucket/prefix1/prefix2_ - -Where a _who_ can be: - -1. **User** - A user in the Kerberos domain. User like in Posix world can be -named or unnamed. -2. **Group** - A group in the Kerberos domain. Group also like in Posix world -can -be named or unnamed. -3. **World** - All authenticated users in the Kerberos domain. This maps to -others in the Posix domain. -4. **Anonymous** - Ignore the user field completely. This is an extension to -the Posix semantics, This is needed for S3 protocol, where we express that -we have no way of knowing who the user is or we don't care. - - - - -Where a _right_ can be: - -1. **Create** – This ACL provides a user the ability to create buckets in a -volume and keys in a bucket. Please note: Under Ozone, Only admins can create volumes. -2. **List** – This ACL allows listing of buckets and keys. This ACL is attached - to the volume and buckets which allow listing of the child objects. Please note: The user and admins can list the volumes owned by the user. -3. **Delete** – Allows the user to delete a volume, bucket or key. -4. **Read** – Allows the user to read the metadata of a Volume and Bucket and -data stream and metadata of a key. -5. **Write** - Allows the user to write the metadata of a Volume and Bucket and -allows the user to overwrite an existing ozone key. -6. **Read_ACL** – Allows a user to read the ACL on a specific object. -7. **Write_ACL** – Allows a user to write the ACL on a specific object. - -

Ozone Native ACL APIs

- -The ACLs can be manipulated by a set of APIs supported by Ozone. The APIs -supported are: - -1. **SetAcl** – This API will take user principal, the name, type -of the ozone object and a list of ACLs. -2. **GetAcl** – This API will take the name and type of the ozone object -and will return a list of ACLs. -3. **AddAcl** - This API will take the name, type of the ozone object, the -ACL, and add it to existing ACL entries of the ozone object. -4. **RemoveAcl** - This API will take the name, type of the -ozone object and the ACL that has to be removed. diff --git a/hadoop-hdds/docs/content/security/_index.md b/hadoop-hdds/docs/content/security/_index.md deleted file mode 100644 index 20967e3343b..00000000000 --- a/hadoop-hdds/docs/content/security/_index.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: Security -name: Security -identifier: SecureOzone -menu: main -weight: 5 ---- - - -{{}} - Ozone is an enterprise class, secure storage system. There are many - optional security features in Ozone. Following pages discuss how - you can leverage the security features of Ozone. -{{}} - - - -Depending on your needs, there are multiple optional steps in securing ozone. diff --git a/hadoop-hdds/docs/content/shell/BucketCommands.md b/hadoop-hdds/docs/content/shell/BucketCommands.md deleted file mode 100644 index e81734924fb..00000000000 --- a/hadoop-hdds/docs/content/shell/BucketCommands.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -title: Bucket Commands -summary: Bucket commands help you to manage the life cycle of a volume. -weight: 3 ---- - - -Ozone shell supports the following bucket commands. - - * [create](#create) - * [delete](#delete) - * [info](#info) - * [list](#list) - -### Create - -The `bucket create` command allows users to create a bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -g, \-\-enforcegdpr | Optional, if set to true it creates a GDPR compliant bucket, if not specified or set to false, it creates an ordinary bucket. -| Uri | The name of the bucket in **/volume/bucket** format. - - -{{< highlight bash >}} -ozone sh bucket create /hive/jan -{{< /highlight >}} - -The above command will create a bucket called _jan_ in the _hive_ volume. -Since no scheme was specified this command defaults to O3 (RPC) protocol. - -### Delete - -The `bucket delete` command allows users to delete a bucket. If the -bucket is not empty then this command will fail. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket - -{{< highlight bash >}} -ozone sh bucket delete /hive/jan -{{< /highlight >}} - -The above command will delete _jan_ bucket if it is empty. - -### Info - -The `bucket info` commands returns the information about the bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket. - -{{< highlight bash >}} -ozone sh bucket info /hive/jan -{{< /highlight >}} - -The above command will print out the information about _jan_ bucket. - -### List - -The `bucket list` command allows users to list the buckets in a volume. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -l, \-\-length | Maximum number of results to return. Default: 100 -| -p, \-\-prefix | Optional, Only buckets that match this prefix will be returned. -| -s, \-\-start | The listing will start from key after the start key. -| Uri | The name of the _volume_. - -{{< highlight bash >}} -ozone sh bucket list /hive -{{< /highlight >}} - -This command will list all buckets on the volume _hive_. diff --git a/hadoop-hdds/docs/content/shell/Format.md b/hadoop-hdds/docs/content/shell/Format.md deleted file mode 100644 index 72174c9ae9a..00000000000 --- a/hadoop-hdds/docs/content/shell/Format.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Shell Overview -summary: Explains the command syntax used by shell command. -weight: 1 ---- - - -Ozone shell help can be invoked at _object_ level or at _action_ level. -For example: - -{{< highlight bash >}} -ozone sh volume --help -{{< /highlight >}} - -This will show all possible actions for volumes. - -or it can be invoked to explain a specific action like -{{< highlight bash >}} -ozone sh volume create --help -{{< /highlight >}} -This command will give you command line options of the create command. - -

- - -### General Command Format - -The Ozone shell commands take the following format. - -> _ozone sh object action url_ - -**ozone** script is used to invoke all Ozone sub-commands. The ozone shell is -invoked via ```sh``` command. - -The object can be a volume, bucket or a key. The action is various verbs like -create, list, delete etc. - - -Ozone URL can point to a volume, bucket or keys in the following format: - -_\[scheme\]\[server:port\]/volume/bucket/key_ - - -Where, - -1. **Scheme** - This should be `o3` which is the native RPC protocol to access - Ozone API. The usage of the schema is optional. - -2. **Server:Port** - This is the address of the Ozone Manager. If the port is -omitted the default port from ozone-site.xml will be used. - -Depending on the call, the volume/bucket/key names will be part of the URL. -Please see volume commands, bucket commands, and key commands section for more -detail. diff --git a/hadoop-hdds/docs/content/shell/KeyCommands.md b/hadoop-hdds/docs/content/shell/KeyCommands.md deleted file mode 100644 index b4a38c8b1b5..00000000000 --- a/hadoop-hdds/docs/content/shell/KeyCommands.md +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: Key Commands -summary: Key commands help you to manage the life cycle of - Keys / Objects. -weight: 4 ---- - - - -Ozone shell supports the following key commands. - - * [get](#get) - * [put](#put) - * [delete](#delete) - * [info](#info) - * [list](#list) - * [rename](#rename) - - -### Get - -The `key get` command downloads a key from Ozone cluster to local file system. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key in **/volume/bucket/key** format. -| FileName | Local file to download the key to. - - -{{< highlight bash >}} -ozone sh key get /hive/jan/sales.orc sales.orc -{{< /highlight >}} -Downloads the file sales.orc from the _/hive/jan_ bucket and writes to the -local file sales.orc. - -### Put - -The `key put` command uploads a file from the local file system to the specified bucket. - -***Params:*** - - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key in **/volume/bucket/key** format. -| FileName | Local file to upload. -| -r, \-\-replication | Optional, Number of copies, ONE or THREE are the options. Picks up the default from cluster configuration. - -{{< highlight bash >}} -ozone sh key put /hive/jan/corrected-sales.orc sales.orc -{{< /highlight >}} -The above command will put the sales.orc as a new key into _/hive/jan/corrected-sales.orc_. - -### Delete - -The `key delete` command removes the key from the bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key. - -{{< highlight bash >}} -ozone sh key delete /hive/jan/corrected-sales.orc -{{< /highlight >}} - -The above command deletes the key _/hive/jan/corrected-sales.orc_. - - -### Info - -The `key info` commands returns the information about the key. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the key. - -{{< highlight bash >}} -ozone sh key info /hive/jan/sales.orc -{{< /highlight >}} - -The above command will print out the information about _/hive/jan/sales.orc_ -key. - -### List - -The `key list` command allows user to list all keys in a bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -l, \-\-length | Maximum number of results to return. Default: 1000 -| -p, \-\-prefix | Optional, Only buckets that match this prefix will be returned. -| -s, \-\-start | The listing will start from key after the start key. -| Uri | The name of the _volume_. - -{{< highlight bash >}} -ozone sh key list /hive/jan -{{< /highlight >}} - -This command will list all keys in the bucket _/hive/jan_. - -### Rename - -The `key rename` command changes the name of an existing key in the specified bucket. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the bucket in **/volume/bucket** format. -| FromKey | The existing key to be renamed -| ToKey | The new desired name of the key - -{{< highlight bash >}} -ozone sh key rename /hive/jan sales.orc new_name.orc -{{< /highlight >}} -The above command will rename _sales.orc_ to _new\_name.orc_ in the bucket _/hive/jan_. diff --git a/hadoop-hdds/docs/content/shell/VolumeCommands.md b/hadoop-hdds/docs/content/shell/VolumeCommands.md deleted file mode 100644 index 47fb9852b86..00000000000 --- a/hadoop-hdds/docs/content/shell/VolumeCommands.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: Volume Commands -weight: 2 -summary: Volume commands help you to manage the life cycle of a volume. ---- - - -Volume commands generally need administrator privileges. The ozone shell supports the following volume commands. - - * [create](#create) - * [delete](#delete) - * [info](#info) - * [list](#list) - * [update](#update) - -### Create - -The `volume create` command allows an administrator to create a volume and -assign it to a user. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -q, \-\-quota | Optional, This argument that specifies the maximum size this volume can use in the Ozone cluster. | -| -u, \-\-user | Required, The name of the user who owns this volume. This user can create, buckets and keys on this volume. | -| Uri | The name of the volume. | - -{{< highlight bash >}} -ozone sh volume create --quota=1TB --user=bilbo /hive -{{< /highlight >}} - -The above command will create a volume called _hive_ on the ozone cluster. This -volume has a quota of 1TB, and the owner is _bilbo_. - -### Delete - -The `volume delete` command allows an administrator to delete a volume. If the -volume is not empty then this command will fail. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the volume. - -{{< highlight bash >}} -ozone sh volume delete /hive -{{< /highlight >}} - -The above command will delete the volume hive, if the volume has no buckets -inside it. - -### Info - -The `volume info` commands returns the information about the volume including -quota and owner information. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| Uri | The name of the volume. - -{{< highlight bash >}} -ozone sh volume info /hive -{{< /highlight >}} - -The above command will print out the information about hive volume. - -### List - -The `volume list` command will list the volumes owned by a user. - -{{< highlight bash >}} -ozone sh volume list --user hadoop -{{< /highlight >}} - -The above command will print out all the volumes owned by the user hadoop. - -### Update - -The volume update command allows changing of owner and quota on a given volume. - -***Params:*** - -| Arguments | Comment | -|--------------------------------|-----------------------------------------| -| -q, \-\-quota | Optional, This argument that specifies the maximum size this volume can use in the Ozone cluster. | -| -u, \-\-user | Optional, The name of the user who owns this volume. This user can create, buckets and keys on this volume. | -| Uri | The name of the volume. | - -{{< highlight bash >}} -ozone sh volume update --quota=10TB /hive -{{< /highlight >}} - -The above command updates the volume quota to 10TB. diff --git a/hadoop-hdds/docs/content/shell/_index.md b/hadoop-hdds/docs/content/shell/_index.md deleted file mode 100644 index 3cb1a9f6167..00000000000 --- a/hadoop-hdds/docs/content/shell/_index.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Command Line Interface -menu: - main: - weight: 3 ---- - - - -{{}} - Ozone shell is the primary interface to interact with Ozone. - It provides a command shell interface to work against Ozone. -{{}} diff --git a/hadoop-hdds/docs/content/start/FromSource.md b/hadoop-hdds/docs/content/start/FromSource.md deleted file mode 100644 index 1e920d97cfc..00000000000 --- a/hadoop-hdds/docs/content/start/FromSource.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: From Source -weight: 30 ---- - - -{{< requirements >}} - * Java 1.8 - * Maven - * Protoc (2.5) -{{< /requirements >}} - - - -If you are a Hadoop ninja, and wise in the ways of Apache, you already know -that a real Apache release is a source release. - -If you want to build from sources, Please untar the source tarball and run -the ozone build command. This instruction assumes that you have all the -dependencies to build Hadoop on your build machine. If you need instructions -on how to build Hadoop, please look at the Apache Hadoop Website. - -```bash -mvn -f pom.ozone.xml clean package -DskipTests=true -``` - -This will build an ozone-\.tar.gz in your `hadoop-ozone/dist/target` directory. - -You can copy this tarball and use this instead of binary artifacts that are -provided along with the official release. - -## How to test the build - -You can run the acceptance tests in the hadoop-ozone directory to make sure -that your build is functional. To launch the acceptance tests, please follow - the instructions in the **README.md** in the `smoketest` directory. - -```bash -cd smoketest -./test.sh -``` - - You can also execute only a minimal subset of the tests: - -```bash -cd smoketest -./test.sh --env ozone basic -``` - -Acceptance tests will start a small ozone cluster and verify that ozone shell and ozone file - system is fully functional. diff --git a/hadoop-hdds/docs/content/start/Kubernetes.md b/hadoop-hdds/docs/content/start/Kubernetes.md deleted file mode 100644 index ad855341aa0..00000000000 --- a/hadoop-hdds/docs/content/start/Kubernetes.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Ozone on Kubernetes -weight: 22 ---- - - - -{{< requirements >}} - * Working kubernetes cluster (LoadBalancer, PersistentVolume are not required) - * kubectl -{{< /requirements >}} - - -As the _apache/ozone_ docker images are available from the dockerhub the deployment process is very similar to Minikube deployment. The only big difference is that we have dedicated set of k8s files for hosted clusters (for example we can use one datanode per host) -Deploy to kubernetes - -`kubernetes/examples` folder of the ozone distribution contains kubernetes deployment resource files for multiple use cases. - -To deploy to a hosted cluster use the ozone subdirectory: - -``` -cd kubernetes/examples/ozone -kubectl apply -f . -``` - -And you can check the results with - -``` -kubectl get pod -Access the services -``` - -Now you can access any of the services. By default the services are not published but you can access them with port-foward rules. - -``` -kubectl port-forward s3g-0 9878:9878 -kubectl port-forward scm-0 9876:9876 -``` diff --git a/hadoop-hdds/docs/content/start/Minikube.md b/hadoop-hdds/docs/content/start/Minikube.md deleted file mode 100644 index ebb249d1337..00000000000 --- a/hadoop-hdds/docs/content/start/Minikube.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Minikube & Ozone -weight: 21 ---- - - - -{{< requirements >}} - * Working minikube setup - * kubectl -{{< /requirements >}} - -`kubernetes/examples` folder of the ozone distribution contains kubernetes deployment resource files for multiple use cases. By default the kubernetes resource files are configured to use `apache/ozone` image from the dockerhub. - -To deploy it to minikube use the minikube configuration set: - -``` -cd kubernetes/examples/minikube -kubectl apply -f . -``` - -And you can check the results with - -``` -kubectl get pod -``` - -Note: the kubernetes/examples/minikube resource set is optimized for minikube usage: - - * You can have multiple datanodes even if you have only one host (in a real production cluster usually you need one datanode per physical host) - * The services are published with node port - -## Access the services - -Now you can access any of the services. For each web endpoint an additional NodeType service is defined in the minikube k8s resource set. NodeType services are available via a generated port of any of the host nodes: - -```bash -kubectl get svc -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -datanode ClusterIP None 27s -kubernetes ClusterIP 10.96.0.1 443/TCP 118m -om ClusterIP None 9874/TCP 27s -om-public NodePort 10.108.48.148 9874:32649/TCP 27s -s3g ClusterIP None 9878/TCP 27s -s3g-public NodePort 10.97.133.137 9878:31880/TCP 27s -scm ClusterIP None 9876/TCP 27s -scm-public NodePort 10.105.231.28 9876:32171/TCP 27s -``` - -Minikube contains a convenience command to access any of the NodePort services: - -``` -minikube service s3g-public -Opening kubernetes service default/s3g-public in default browser... -``` \ No newline at end of file diff --git a/hadoop-hdds/docs/content/start/OnPrem.md b/hadoop-hdds/docs/content/start/OnPrem.md deleted file mode 100644 index 3bf40a6a767..00000000000 --- a/hadoop-hdds/docs/content/start/OnPrem.md +++ /dev/null @@ -1,187 +0,0 @@ ---- -title: Ozone On Premise Installation -weight: 20 - ---- - - -If you are feeling adventurous, you can setup ozone in a real cluster. -Setting up a real cluster requires us to understand the components of Ozone. -Ozone is designed to work concurrently with HDFS. However, Ozone is also -capable of running independently. The components of ozone are the same in both approaches. - -## Ozone Components - -1. Ozone Manager - Is the server that is in charge of the namespace of Ozone. Ozone Manager is responsible for all volume, bucket and key operations. -2. Storage Container Manager - Acts as the block manager. Ozone Manager -requests blocks from SCM, to which clients can write data. -3. Datanodes - Ozone data node code runs inside the HDFS datanode or in the independent deployment case runs an ozone datanode daemon. - -## Setting up an Ozone only cluster - -* Please untar the ozone-\ to the directory where you are going -to run Ozone from. We need Ozone jars on all machines in the cluster. So you -need to do this on all machines in the cluster. - -* Ozone relies on a configuration file called ```ozone-site.xml```. To -generate a template that you can replace with proper values, please run the -following command. This will generate a template called ```ozone-site.xml``` at -the specified path (directory). - -{{< highlight bash >}} -ozone genconf -{{< /highlight >}} - -Let us look at the settings inside the generated file (ozone-site.xml) and -how they control ozone. Once the right values are defined, this file -needs to be copied to ```ozone directory/etc/hadoop```. - - -* **ozone.enabled** This is the most critical setting for ozone. -Ozone is a work in progress and users have to enable this service explicitly. -By default, Ozone is disabled. Setting this flag to `true` enables ozone in the -HDFS or Ozone cluster. - -Here is an example, - -{{< highlight xml >}} - - ozone.enabled - true - -{{< /highlight >}} - -* **ozone.metadata.dirs** Allows Administrators to specify where the - metadata must reside. Usually you pick your fastest disk (SSD if - you have them on your nodes). OzoneManager, SCM and datanode will write the - metadata to this path. This is a required setting, if this is missing Ozone - will fail to come up. - - Here is an example, - -{{< highlight xml >}} - - ozone.metadata.dirs - /data/disk1/meta - -{{< /highlight >}} - -* **ozone.scm.names** Storage container manager(SCM) is a distributed block - service which is used by ozone. This property allows data nodes to discover - SCM's address. Data nodes send heartbeat to SCM. - Until HA feature is complete, we configure ozone.scm.names to be a - single machine. - - Here is an example, - - {{< highlight xml >}} - - ozone.scm.names - scm.hadoop.apache.org - - {{< /highlight >}} - - * **ozone.scm.datanode.id.dir** Data nodes generate a Unique ID called Datanode - ID. This identity is written to the file datanode.id in a directory specified by this path. *Data nodes - will create this path if it doesn't exist already.* - -Here is an example, -{{< highlight xml >}} - - ozone.scm.datanode.id.dir - /data/disk1/meta/node - -{{< /highlight >}} - -* **ozone.om.address** OM server address. This is used by OzoneClient and -Ozone File System. - -Here is an example, -{{< highlight xml >}} - - ozone.om.address - ozonemanager.hadoop.apache.org - -{{< /highlight >}} - - -## Ozone Settings Summary - -| Setting | Value | Comment | -|--------------------------------|------------------------------|------------------------------------------------------------------| -| ozone.enabled | true | This enables SCM and containers in HDFS cluster. | -| ozone.metadata.dirs | file path | The metadata will be stored here. | -| ozone.scm.names | SCM server name | Hostname:port or IP:port address of SCM. | -| ozone.scm.block.client.address | SCM server name and port | Used by services like OM | -| ozone.scm.client.address | SCM server name and port | Used by client-side | -| ozone.scm.datanode.address | SCM server name and port | Used by datanode to talk to SCM | -| ozone.om.address | OM server name | Used by Ozone handler and Ozone file system. | - - -## Startup the cluster - -Before we boot up the Ozone cluster, we need to initialize both SCM and Ozone Manager. - -{{< highlight bash >}} -ozone scm --init -{{< /highlight >}} -This allows SCM to create the cluster Identity and initialize its state. -The ```init``` command is similar to Namenode format. Init command is executed only once, that allows SCM to create all the required on-disk structures to work correctly. -{{< highlight bash >}} -ozone --daemon start scm -{{< /highlight >}} - -Once we know SCM is up and running, we can create an Object Store for our use. This is done by running the following command. - -{{< highlight bash >}} -ozone om --init -{{< /highlight >}} - - -Once Ozone manager is initialized, we are ready to run the name service. - -{{< highlight bash >}} -ozone --daemon start om -{{< /highlight >}} - -At this point Ozone's name services, the Ozone manager, and the block service SCM is both running.\ -**Please note**: If SCM is not running -```om --init``` command will fail. SCM start will fail if on-disk data structures are missing. So please make sure you have done both ```scm --init``` and ```om --init``` commands. - -Now we need to start the data nodes. Please run the following command on each datanode. -{{< highlight bash >}} -ozone --daemon start datanode -{{< /highlight >}} - -At this point SCM, Ozone Manager and data nodes are up and running. - -***Congratulations!, You have set up a functional ozone cluster.*** - -## Shortcut - -If you want to make your life simpler, you can just run -{{< highlight bash >}} -ozone scm --init -ozone om --init -start-ozone.sh -{{< /highlight >}} - -This assumes that you have set up the slaves file correctly and ssh -configuration that allows ssh-ing to all data nodes. This is the same as the -HDFS configuration, so please refer to HDFS documentation on how to set this -up. diff --git a/hadoop-hdds/docs/content/start/RunningViaDocker.md b/hadoop-hdds/docs/content/start/RunningViaDocker.md deleted file mode 100644 index 9e1e3611225..00000000000 --- a/hadoop-hdds/docs/content/start/RunningViaDocker.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Pseudo-cluster -weight: 23 - ---- - - -{{< requirements >}} - * docker and docker-compose -{{< /requirements >}} - -* Download the Ozone binary tarball and untar it. - -* Go to the directory where the docker compose files exist and tell -`docker-compose` to start Ozone in the background. This will start a small -ozone instance on your machine. - -{{< highlight bash >}} -cd compose/ozone/ - -docker-compose up -d -{{< /highlight >}} - -To verify that ozone is working as expected, let us log into a data node and -run _freon_, the load generator for Ozone. The ```exec datanode bash``` command -will open a bash shell on the datanode. - -The `ozone freon` command is executed within the datanode container. You can quit freon via CTRL-C any time. The -```rk``` profile instructs freon to generate random keys. - -{{< highlight bash >}} -docker-compose exec datanode bash -ozone freon rk -{{< /highlight >}} - -You can check out the **OzoneManager UI** at http://localhost:9874/ to see the -activity generated by freon. -While you are there, please don't forget to check out the ozone configuration explorer. - -***Congratulations, You have just run your first ozone cluster.*** - -To shutdown the cluster, please run -{{< highlight bash >}} -docker-compose down -{{< /highlight >}} - diff --git a/hadoop-hdds/docs/content/start/StartFromDockerHub.md b/hadoop-hdds/docs/content/start/StartFromDockerHub.md deleted file mode 100644 index e3e7d41cce6..00000000000 --- a/hadoop-hdds/docs/content/start/StartFromDockerHub.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Simple Single Ozone -weight: 10 - ---- - - -{{< requirements >}} - * Working docker setup - * AWS CLI (optional) -{{< /requirements >}} - -# Ozone in a Single Container - -The easiest way to start up an all-in-one ozone container is to use the latest -docker image from docker hub: - -```bash -docker run -p 9878:9878 -p 9876:9876 apache/ozone -``` -This command will pull down the ozone image from docker hub and start all -ozone services in a single container.
-This container will run the required metadata servers (Ozone Manager, Storage -Container Manager) one data node and the S3 compatible REST server -(S3 Gateway). - -# Local multi-container cluster - -If you would like to use a more realistic pseudo-cluster where each components -run in own containers, you can start it with a docker-compose file. - -We have shipped a docker-compose and an enviorment file as part of the -container image that is uploaded to docker hub. - -The following commands can be used to extract these files from the image in the docker hub. -```bash -docker run apache/ozone cat docker-compose.yaml > docker-compose.yaml -docker run apache/ozone cat docker-config > docker-config -``` - - Now you can start the cluster with docker-compose: - -```bash -docker-compose up -d -``` - -If you need multiple datanodes, we can just scale it up: - -```bash - docker-compose scale datanode=3 - ``` -# Running S3 Clients - -Once the cluster is booted up and ready, you can verify its status by -connecting to the SCM's UI at [http://localhost:9876](http://localhost:9876). - -The S3 gateway endpoint will be exposed at port 9878. You can use Ozone's S3 -support as if you are working against the real S3. - - -Here is how you create buckets from command line: - -```bash -aws s3api --endpoint http://localhost:9878/ create-bucket --bucket=bucket1 -``` - -Only notable difference in the above command line is the fact that you have -to tell the _endpoint_ address to the aws s3api command. - -Now let us put a simple file into the S3 Bucket hosted by Ozone. We will -start by creating a temporary file that we can upload to Ozone via S3 support. -```bash -ls -1 > /tmp/testfile - ``` - This command creates a temporary file that - we can upload to Ozone. The next command actually uploads to Ozone's S3 - bucket using the standard aws s3 command line interface. - -```bash -aws s3 --endpoint http://localhost:9878 cp --storage-class REDUCED_REDUNDANCY /tmp/testfile s3://bucket1/testfile -``` - -We can now verify that file got uploaded by running the list command against -our bucket. - -```bash -aws s3 --endpoint http://localhost:9878 ls s3://bucket1/testfile -``` - - -http://localhost:9878/bucket1?browser diff --git a/hadoop-hdds/docs/content/start/_index.md b/hadoop-hdds/docs/content/start/_index.md deleted file mode 100644 index 5529661b0a2..00000000000 --- a/hadoop-hdds/docs/content/start/_index.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Getting Started -name: Getting Started -identifier: Starting -menu: main -weight: 1 -cards: "false" ---- - - - -{{}} -There are many ways to install and run Ozone. Starting from simple docker -deployments on -local nodes, to full scale multi-node cluster deployment on -Kubernetes or bare-metal. -{{}} - -
- -Easy Start - -

Running Ozone from Docker Hub

- -You can try out Ozone using docker hub without downloading the official release. This makes it easy to explore Ozone. -
- {{}} - The simplest and easiest way to start an ozone cluster - to explore what it can do is to start ozone via docker. - {{}} - -
- -
- -Recommended - - -

Running Ozone from an Official Release

- - Apache Ozone can also be run from the official release packages. Along with the official source releases, we also release a set of convenience binary packages. It is easy to run these binaries in different configurations. -
- {{}} -Ozone is designed to work concurrently with HDFS. The physical cluster instructions explain each component of Ozone and how to deploy with maximum control. - {{}} - - {{}} -Ozone is designed to work well under Kubernetes. These are instructions to deploy Ozone on K8s. Ozone provides a replicated storage solution for K8s based apps. - {{}} - - {{}} -Ozone comes with a standard set of K8s resources. You can deploy them to MiniKube and experiment with the K8s based deployments. - {{}} - - {{}} - We also ship standard docker files with official release. These are part of official release and not depend upon Docker Hub. - {{}} - -
- -
- -Hadoop Ninja - -

Building From Sources

- - Instructions to build Ozone from source to create deployment packages. - - {{}} -If you are a Hadoop ninja, and wise in the ways of Apache, you already know that a real Apache release is a source release. We believe that even ninjas need help at times. - {{}} - -
diff --git a/hadoop-hdds/docs/content/start/docker.png b/hadoop-hdds/docs/content/start/docker.png deleted file mode 100644 index 048730b23d0c39799f42de9d38446f5d870a81aa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7002 zcmZ`;Wl$8}*Iv3?X^8~|$z95&LAq;!73rnByGt5LB?JUPKq=|&2I*#9Lh0^Y{`@|^ zAKp20?lb4P^UR!?dq13r(NtF;!l%Ut002Zvin7{IDD~umxY$qUNz}p5C&09pR+9z* zs*?!r&9I(gCb*)u8UWz;3IGU+003^EsE};{z!L-j?3w}qBA)>ODwmvQEwQHyY;zR_ zS-|6emf!Y0^@+iAQGDkP01#08=V*Ye9O@?$$3sa?4rc?Kj+_P@$^=OS0BDtzWTl}t ze-5&J(~NY}A0_Wrypi{Iszn^SrK5Pv%%M_DQJ54UsSp4VQ(DVG>RtH)F~aV#`Ng#- zT=c67-Th=aHxV%tW-oC#5Fd>YGet^qQ0i^eFw}kahu_(Y->8QZ4ra5DUnhA%KA(|YR% znI28kwcMsh`N3K)$1A2yJg|PkPWaWc>k|xv{&WvVY&czJ570@Ybv=s$PCAbNG5+0X z4|$Xs{`mOF07zF(0e``v77E47S~3qB>5WEvCOzy6T%H%{5%umYbN5h~S__H{Mermm^ zL?kh!Y&{|tsf`KHKyd^D0=T~f7zLpNg} zYL>1X_S1+dJ%v3VCIw0dqR-UNO;NlcLeV8r#q}w&>K(s%{1vWnn7I(Y!_#57Me=-v zhW@W(=y3%A7tdi#yEbLy_evIilh*81`vyna8_>eF35&ib-d`5Dl8BLlDU za6Uz9=&q$8&Y3KH4cM6 zBN@QInccGaw_FV*uB4-!?bRc9ZeOMkvB}?S!NJC^zHSE?ymD@nbiYxzBt7Xz@4I*6 zD3eDlVBW*8O}Pz&A6hUv=SHEYMt6!N`@M)ebdIRenbb*Wxh@%$a5(yrQQCteEIL2l zkR^{qT&5~D8)>ibu!^&ird;@sIqbw8QeM~mvW#OZ$6W~FEw0EnIB$Ef?P}IcEX&T& z09j{PK1H!I5YN?>0x%6~~mpJ1!C|*_|pgK#ccI|FX<4_&20C!Vh>G6)<%Jw*b^cU}InPpq1)JV;iTWm^xEJ^-@K8L3JP& zK6MwK*uE51x({v1;f=aeNCZc+EUlKDoJ%Wtq$~fQy;-5$D2~{Y_w-(TEkX!BA>msZ zS&u*Z9m5nF4&CLIGC0RYDZJ!9tNnn!N=A4Xv7p;8!Q|!y^zg$ZW#r!`$l~#`kPz`O@ve_l-Yg0fjw}WAl$0N z`aA!OcZ%8^uq@3kd&eN4uh*^E)tt06Y+X-zTK>S-OnSLk!MeBLlI4ppZ;Q<&kilm) zbl0N&WgJC}zJzGl_8{gm8IV9Mr*j~J*_@mFy3+9JldfZ;y~1_r2Eb*iDDlL4C0VHw zNlTI_Zf8*ru(9X5p+y0K7x2;CSw-aEcfDD5%EcfLll6a|zIbBjbW<-}X{g0R&BYR^ zTu)XYL0n2dK`_#eR&BSniT>hc#UgFzFBloTChEoI35gY4Y1quixN*Y0t}-u}D|;;1 zDnDk^)2zKv3i%K-P9KD=FjC6!(wXsa$%%^lfQ1icdhtb)h5r$K!}D^6Oy%=-E|IUW zm2orgv@E#x!+@X=Q~H6`X#H$cztK2?HR#wqc%Qnh>1X@jayz`P7P=Gy3po= zT8ZRJ#(*?m687AD;q#LRy(DB$Tyr@o@ECTJ~Ya5)=$gc{o(F$(I1O@6-eb!^ufovxT(*NYq~^C+s^OSTnDP)S zp(<@~N+9ZYut4>6W$Qx`zl4`u(7BI6s;zmVj?u6IVm9eeJ!w~`FTP5AcH+wOA8{Wc z3#Pklp2B5?Hc_>VF189*R4ab<6lj|9@uq$h$M#PF+lKu1>15%z-lWocUU zVKClzkjKKK33&IpdZ)ok`vdE|xZ<~tGf&gX;TU~jwe9oj7ki`~ZkSB;_}RoAb`SVn zxl||E&7@@V2g`P;4|l;ktiMDNLuaIw%+K>c7u_({K)JIaf+9}Unxkv?YR}}JyGMJy z&M8jGKGQ4d0a@VKx=Tv*8J@0x6xd39u#8k)60#Q``49Rc;f5ha+&}u9fvLo~JKG1{ zY+BYKz~W9P?)5)cY!cF)#hB&A&>=aI`*fswUfJKz`*nqQn(9 zYxyLbjVv)ML~WkY>ouO3YCVDVQC#n(V^7j|_V`?>VsE4=*K%NC?BW6BH#OL*qVCco zKjgIXD-kN+&g%i_S$mE*#uD<>ATZ>{`on8o7SC_1mX9Wb~TkzZe z@XON?T|%~Swb3-(?u`?A4huw(kcnMpJDj5YS8LR4?Sfh-b8Z#$db%djj|XN_zuA8b zF*3b@MS82gnxit?p{`&di}S9rbc>2BO3qDRCi8gdcrE*Ky{)%ebzeBFbShu$2Hc=N zTqpvF?E~f;Dz{W5@8#B(^KOr8j&61zedl)mBwUE+*wFi3u15Sb9(R?Tm=$>aH8EU? zE$Qv)tVB63b~JkgI&Dvj;?h+I+Q2NFU9VJYiN>9p9OC(-! z0x>JnM&zd2R%BjkRou=%|K1_})Q}(0AQq!E9nI;=MQ>TjyPkKs(w}k6B^B(Q6U4BZu09f0RQ4;S|VgCDQzV__PrU9y~LDZ*7lY9fDf4 zC?p0Ui9@sJ8v?3g7etv-HQ8k6)y@11`=(p{=Z$|;=2e%nGy15w4SGLs2~AbIWXgzL zqbh9UD)yYRqN&4`UUs$$eSXHo9Ccv!^ zS?f1==im3+-8-+~so?dY=k`&%jhn+o4h#EjzORPuNOTAVAe z*U{N~9T_7N&gJVsr#5W=;OZ|+G-D2nr`g^XVy%51jpf|qi4P=IZdU=6|Mo~SrcZc% z`pgepr@wQHd2Lp+R41*<16e-xyi5N$i*W~=*6&Wl^eNFxAeO;(tRuXPYSr}2wDA3$ z@!K(73n3ar$4&I8qjGe()w(9+rB-g%9p?Je_+ii6L97*vDx{3=VvigXE%!*L!UsY-!SLKz0@5p0p}Kuna-{po8>aTb#QR;m zMaqy{V#eW%`oN=Gsy%=2pdP$dy7iNpiq(Fc#hdjnvUbxy{g21ua*v)GKOcs(#B4cc zLh955E2il4cYBpfZT8+12n7F2hNSQOf;5Kn<;B(bL?^fu{NPK=k@JlEWd5*1>l@8s zE-9|#lCdjrvHg3?{dS^k1yAP=VLj@5<)E|B56i6De#hP&>;9j(I(KL+HdvDG8xj~N zR}bo5s$_r`76xyH&proUDW_0mBi1$7#65{F7_$X@Pg86MRPF5<=MQJ*=PBX_Tv^D7 zH^mQ^!JTj%H^N)yu)b>MSLZQ)FH?$`QzApnMdtlM#`LhgjMI+ibU$ggM-P8wOXz;E z${p)ODSReX(=3I;9t#`BFCQHNl)T2OGCh7GqDY0{j)BWho71;-kh2zse|;6?9xY%t zr{b3C>+a&>ejT06(P~dm&je3F!66?p%09^H+xe=)-<0TV=bU7fP>tvX4T)P;VE66C9#XIeIf;MmMy`jrnGd)M6VB;o zgSdM|9WGtfoDAjt+8)sdH>xdqAjfUQy_~pGVrQ*VhO7n@+c;b$Yg1z76hk+BUxi*00Z1ZRZHyk5JX~Ip2z%#yrbr3_Le_bL)sv<#S@5*mhg9a+-YQ zzWYmsZ*FP$<$TcOT?+wHhD%R|LomElXOCuf`}kglJFQ9coMq)EeHH zl}BSls(hrTc$uBsmw~X%s3T}07lkb$1~+a{iZj#O^*D@N#M;+2v_U0pFUcJo9g7M( zI(zXp-Y3xAgi_O7oK$o-Kfy)gtBi9`9nt$Aa1+OPlDNEMc0N_Pf7+|ILC`CZtlx9{6QZS?HNwSTPX zCRL)|NLi0%Ns;ENm$zgI@;b0(kGr`B@xC`$h6S3dc#gE7{Mu_R@UGOy;iC9rXQRQo zaU4J{!>W+-amfVwhRztEyY4|`=h%>;(-F;$7)p3+!X0&UzH%*idMDBI8B7OpeK=e3 z{)ZS81Z%I1xDxcID5K2mTUoz9Vs`eN7V~u@D>c>-L3Nuv zw}GV-y87fS3ixR#LV!!;i^*J~H|}Wr-V|=YBCWn>5~0mTG%)qa9m1hb!(Z#6=se3ypAxIW^K*(ybV_ z?V~-p-Rwo-zhvc(h4qhELnKr%QOy~nq!kl!7J6Z}G?|(|$oY}j0X^5Q?;V6K4KpP% zIYc>M7p%H0$a1AT^P=w!%Te`>)d92T(@S+M%juMCzc3*><@M2^_(6j@uOi2+cFFdg zAO_@x+Gb8micdW+cegOEwXS6m6EL`T?hggAwLhY{-??*KAmP>Jdi%-x=Qu5+lCsxeE79W+OcaG6w_{NPZ>vAq$L0DjP%xEX=`u%w+tydQ{lZBKN;%g0WgElUl=T`1EG&;NedWRlmh zJvI4i9hF*Uj27)WnqO9|C1D=_j;C!|LK5VNI%89B%v>;Sm5XmeOCAaBgcO_T+LJX6 zLeyW;s|RH_%j{`^hX(JSp@>Sb7`E+R+-V1lR9wZv+lxT@ve> zbb{5qLjErwv@oPXe6PSqU`R5=m`JQlFF1ce95{t*Sj53dxe>sUy7ySZJMt7{)WP_ zZo&$JS^Cdeg9A3a8(nT-y#b7$aaJnk{^0Fjlp}N`(?ny>2&F#g_w+XN+2v4>WnkEA zJ=Cg+beyqNm!H_2;>A4(9FHVlIk2vc>NTpaVM$l&xH9}w_lWbkRI0nI$(PhQ_gKw| zcH?rP8J5?5h~LOSX>L6C%HOnoZ2J`KY-_ci74aIaKuY^#2aW+&#eBd?1{;GP2zt%r z(prWBbezuGh&>ptShp1Hg&H%(@>yQ*^Anl#pi=@Q@n(XA7}Uh{`WKtp#a^{KpYc~> zG4YE@3Gw4cETYp)h)&ZkBC^j)xi6>5G8FslqGp%!fZCdxX*=wNA^vr~>xuObyyV;$ z?gyJVOexgpP8h>LJ`>qZKtg9_?oxyK9sBd<^@N}B&!b09Yt@Mgi9bgnQ2y<`lejmL z3Gn--5s8mv$&peWxSW34g2qc%Ck1@VrYMRvF{pmYuJ^E$vgQ{O!XI^#YRyCD-}S$# zDF?Vf8Lf@{cC;kZK!A-Ux}mErX(bMrt~e}_&N%4UnBlIcN0sh(%8xX%WNGPG_JWVH zRX{h=$}pYm&7lvBn*%Bpz2{ejdN`WgEGgL9Atg(PzJgm5{9CNV5Z}#Bdmq2Km|6#< z&&*HueSAlsybNGaKt2T-Tfs-^x`?_(9ZHE2)9ntbHNA*w%qA=%$tIhVu21mDT4^Ob zJ}wZdW!?a-ttrRA`BPf)UuA0X;QI1LgC#Wg|)0&6J5{Vn!M9y39j@iEEfq#rY5 z=VBErrQYaPSy^8?3wpDSqnqv*zs3YpxsIyJ+UeF&SxS;dgKJ7vWW9p_d7q1)(e3It zJ#)Ip_V!?Sv5!md+`D4R?^FYq$&pgY_b0WL8XZL>F?7(c3F#)HIWg5fg1()!JEIPo zY_{V~qYDfW8W#!nTG*XeSEd|-u)1+TOGpy&d>xNt%iTNc$x}|>ZPAWz4mxZhLD=jp zVrKC_DHgB|b5-Vyk0pd;DT7w=hrc`aE4{Qr&m%k5GK$=->9kta)(28G7=!wa+)n$> z>F3$bezyqlKZeB>&UQ!Rul|?{9W27Wkq4*S(F#rSm2oPZKU8#*pi#|^N0=RB#K~u= z4Yg_$Ncagqlx_kn_-HEbADn&G`ke_y>Iyl4h9QI66yEP;I;Mo2W``|-r@9|Eq=Lhi z^29t(t#6k}#NJCc0{-T7x-L0V*HU7K7vY+qZAK54%l}nCHD;k%H)@kZOw4R$M_G9Z zmsp5S+~Zk-oqMU@4~1neFbSpi#lL3#ncW2Tq}1ZV8d^B~jMQ7X03;#g$S~At1>U2uHpCZa?C(r4b`1 zH!=SDzK(i;rmLEuu?rierha$6zEAWRD&wP2@(TS_SXv^Hj9(kIht?iBd-VM5U%)TU z7`*V7w`}AX61>M+@i&~Ml)c!b)#2&i=9sX^bZTiGbVt4?tXXY9j`YEthOUeiup>_F zp)kY5WVy~eOpfUL$=Ec{{3vChb>VtinFY6m6Q(`#T6Wz<$mg(W>vTe9I@7~7H{&=b|6)>%%X;3r$u}V$lS?oQoBKA?>Sz!G=X3IcqWijP{!Xa^|(qof|Z?^ zKrzs&kgTwrlsWsR*7{A;R|);XNjPa{cnaB97Q%3A4MQ)jqj9Hu{HB^>w_Vh&ulcuX z?bqU`xz4DT$CiI_3+;h-dDwu}DHYy6WL>5bvNhbZ0U1V2^;(`tX&ktNS=v823JYf2 zTiFfvk9S2s9IdNrES3r|Q1o_YIZ0XVE&SH6zS@Ay8%jVfDvKa`C-{><7m`Q+{6Cox z^`_Ry-^2;h)A|Sfdwq4%9Grg)JyYcqBD|8ygzHppjuFEKd3Cy^+02K zX69*0J^_Fa1QzB7332nifr23-e1ali pAx;oT1O%#w5TyMdfupm9t(EWpFBq__VSf?;l;qT9E8d!h{SSJyg$e)w diff --git a/hadoop-hdds/docs/content/start/hadoop.png b/hadoop-hdds/docs/content/start/hadoop.png deleted file mode 100644 index 183867ca0e96e33e9674340416c9d1c110be41d8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4923 zcmZ`-XEYpKv>l8Z#wgJ{(aD5~9(DBIq9hqTjF!=R)F4D}2@x%NqKD`NQAY2+VYDC^ zT`(Esc|YHex9&N2ue;CM|L)o6uKU)=K$D7sg#rKoP-$zyOzu$mE+J$jcYBd;DCZpj zomKQz0D$_Gd)IcvcV{jKEfakJAebKjh=>6I{@zg$zX1S02mr8W3joNy2LKq6AKQ!- z?g}LKx|%S+zyDo@o#h#K3^`KE$_D_rNB6%;0LcBwbVri@k0WTw0^bxQ$I8#;I?)llW$t8KQ-XYlq!TgZ+?Q`xJ?igjV$oN4T%@m$&arMkh@NuB! zE)pWB#95|~x5%3d5dvxuGR403v9=yL+^^*nfWOA&6xVnrf;WP=VVvACf4{B2cu^1< z^fpKU(;?RhFEoy)&B0c+}R;r$Kcv!o#gg&q5O;ou6=?N zaRH0VNKO_V;=95sy=A`J7$Yxsee8k}IcVez^zGoL==1NkNH@2&kmj$WBBEG%*9$DV z?hDt{Z#&8o(2D>9za9|RRJF5w{$x3|B?E?t^gZ}(DksQiXsg)Rn2RH4U-TY5aj0U@ z2mTnW4pc5T&#Yk! zyJc2~u&4v0J9XdE2{`IFIv&44NC-B8eoC z?P4)tKsL(hk1@T5b@ro&^75U#!vgB+?`@aKl4s>U{)?zuiyM znnYX)ZK6;#M_q|7?pSt!DpxX_%CaBhkA0`FGupz1_)rDEKB|7y2)NpUHz9${ib3+` zHW8{IB645(k-nJb%E^Kr>hc51X|A+9WaA#g^24ZJ%^2{m{feCWviCjv zX}(K|_U&Qy%E-#-K7Z=}RdZY0oOG~>Qrz7%u zA52F2s8jlPuOH9NeZ?+v3yQ|P3cL3+iKx39fBV9Q+To|NvGLJ)0*qeD9b(zFT-c=; zgUUi~ZPnXO^Nq5;f>qbXMG|ofkk+)4z(bN~#>VL3U;q)x6*-UxD%HHx_Iy5!BV`{` zq(%zLmDm#IH{{T?vr~fVJKtXKFiALar9AN$;Fvg5z8`Y2NY4-jhIsQiI{nduU%wt2 zS3GN{6DX_!dHZlnp1BWJ1S&WPiLsB(7}j!$i47I2%iu5G)OJM?5UuT1tIXIQ+YSwL zpZ(I-D4>cUjOSM4dO+-d2qSe@2AG-6mh8J$BK8a!7%uW4Zu7ne$@2Uec_QDm`DjJ0 zPQ$U7u!t>GxPigA!5U|b>Z2$XLGf&jo$ZkkN^s(^>ml-EW&DIdZ~DZW1se;?R+FNy zV!d>RJ^Iqx4(awxQR{tqbzeH~bp8kF#@T5Eyc){RNhH4lZvNV_%k;$05DXDyE;Uw$ z-S{$b7Wb%nC%SCmz*?)h4Ge}dRy3k3>7Q`6I<#vc$G1s;40So-e8+Kn6Q zqhxm_aEpM9jYXaNGC{RiLfRw*3{`dSQL^8Si&(F{vod{(vz=#6%`8nVq*l$N`~sSo zjL@gh+_lLf?__rQcMGh2%|%5j!ME;S?RGr?v+|5y;Z)Lhr2GWEIq+hrWJZpJBre}> zjTsW*0?qAdc=yc2)bvy9CFU9FlYiF}f4Ml|B$i5d5qVBblWzl|0$)rox4BgX~f;og*ywut@W&u~cz zbnM+XFfp~;Gkde=niNgi{cFj&?;SI5x&l#ztx?SxuCSH)u#5H1y_cKh*q5825dCRC zE&!XpVfoy_HnkFtd;o{53x3?cNl9`_rG{VfTSyYll|E85G3`{QJC-_@E=-qR1->X4 zdUC0E5W4K9W$i{sF^s&xwLR4!vs&h%(YBgkKoNE;!Su&$n4}GijKBwat&#;(-AZN_ zC<^L^q3tcdn{k-xyi5yg3W$wVm<<%0#4+~XttRpk6aBDjO_{5wJho_M`_dT$eT518 zwS|kMSd-;*rkmhAs&USTGq$i?hd+488w_C>B>KwDdjZR5{ki)MefJvthf z7m%-QbCp5TTM}Y{p9L^s3qAYn16L=!X+kFp97Dry8Of~7qguVVnhgt1lR=mkXDvTdT5&FTs zyx`;Sn5BylUwcf}9JrzqNYkT*Zt+?l9b}HA5BgUGqrS{W`7y+^OJzK)0Hwh zp{p9TNL`_YT?&xc$Be-YBpQq0k;HW=kl_DM|i`D&cL#lEkCU?bZMYJ^rPc|i>4+qAR?LIDP7&4->-?WsGsLxbf z{q?)SM|+qyd$4j^JT1O^v$&(Ti<{0}^%_~M=Au8sBcS@oo&2bVhOyAUyZnX3sph!j zbsv$}7WTOfQPNPYiRYegVUBGFXiZUi`@sd6%c>%vYHuGmm2iKVSV#Ny>yiGA4Hbsa zh*5g{M^d<7)>xfgoMl~MtkO+obQ82!{zxl*`UR`p3p4X=Hy-XRUG3HmKvN5%fn@Z~7D z1IPcxnwU=DLNTYrSUaWzj%zJfU7RLll^zN9DgyLxHcWc{2bA}p%+#v;aa4Z8_L!Gu z5=x{@qbjHhC7?QQ3dzo;DA5>}lJfuZMW8e5rv`I&*x!$&2f7hup1VqmfQ>) ztXx4q?9L>iXDlF8HRc1vD^SGPp}k1{kDaIZH+!-s8*1%_S6a~A8j_z925u`!d~ zZ*Haa7B*s87r}-!?`O?|Y&2q@w<9t>?!{8)tf(0Cxy;Q;l;`Dbb>=tfHdVfLeKDw~ z-K)h{Coe|^<1r(SzV3rqvVt1XyZuki$&Fe7vvZGWDAe&y-~0l*uZrK+JKXdPa}Wp$ zM!dOa*GuT)8pV07+|Xe}=8uh7u*t#XN$CgA?`CoYv^xG`BC+wg!McAxlv}D{5W5>Y zfg9xHmQJPTt`f^CxWdxX z6%%t$zxHUMc7OBJK2ABvyR)O26i(`HJiEdf<#Ip)l#eOX9eEvS#bs(*Ds5`|fb4~b zes$HmnCnooxcb$=Xx_QG^N_y?Hc;HMsb0fqrKufS%Ee==^MmBlpB*LMP(^ZoT~y`# ze=9>_8s4lV{r0~>q|{}lC%>>uxv!2e1fp=P{bXV14l$PyJNtCV+}w0~7=GmW0h`Li zB6wHlhutt=3R@)SKc=ecpy39W)>ECs0(r)nt@KPOhK!8;>e>bo`VH}xyl5(_&ZlRh ze0=m}PA8ik3b_Sg493Qh?lP}q*69rVGId|#v;6<2csX`0?^5vJ1spM8}{=%J2Mhq8|s9UAMh2z_}n~%yl=%> zN|h)~dX>^`UfIKAf;tgaSiC_GP#t=n1anl83uOYa?=eRxE7z>H7#wlJmReE95VCJnFSx(U6NA8KT6fN?!dvuy`= zA`^O51mqUbBxFrN&CkpN@s}uP|3QW~smhO~`vy7t4q8x`0knK^RrbEpJ2vzHF0{F) zc#?qO>7C`>9-IsghpZ8NU`Zfcy{U2cTE)XJ2hG#S+*d)Y(%w9h8SkC!1S!fqHmTki zb->@tTd@3RPT$l#vVU{wlqV{jPUC2`=36X4ThToBAWJgT>e;<|ro8{qxTCph*^5cF zx}e5$cu*^_Ql7ehGd&={8@EM8r6(7h`R{s9WW?c7=218&CXnc`t$H7i{_mI7>mb?C z^E99WPQ09s4fMP@X}>|e2az5>h-)l1`sO2$l9_Yn(arfPtn+iNbHPF+w5{G4BrRaT zrI+E_T|hK9r#CueJG(J}M0>16FHG|!I_M?>5m?LN{fv_ORca3EB|pD+BwgQ^0c-vl z;HYlW=um#3Lv9|kGt;P$IR6p$Nmt;f$nPm!%Dgb-@durH8nzx{BD!8dYCSDG(= zY@F37A8mGD6}fihWg_L--|5LdF7|8^#GZ~(@YOoR&d%1YG+VJJ(dzR#I*t}h42wN; zss8HJaMxVGo{IayZKS54+J@ohCF3@aCCsx3s&tBqQ?)1l1^kwvd|CRrZfR$e2(`he z93EE^Kq{D{d&{ZIy_;`TosGErNP6I_Zt3f2=j$Zr;O%q=013z=Suu!|n1qy>1Vj#c q7g8b+h#UkGXy7pUe*_+{91$;r{(k{E9oqd)aCZd^U^S|?QU3#Y;73RR diff --git a/hadoop-hdds/docs/content/start/k8s.png b/hadoop-hdds/docs/content/start/k8s.png deleted file mode 100644 index 5fa2e9a90a547d1904652e4ad7afa2b615ef916c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6270 zcmV-^7=h=BP)004R>004l5008;`004mK004C`008P>0026e000+ooVrmw00006 zVoOIv0RI600RN!9r;`8x010qNS#tmY3ljhU3ljkVnw%H_000McNliru;|3cN9TnM6 z8Cw7V7i~#IK~!ko#hZDQRn?vEKYO2Z?zuzVTUAt11r(DqNJ#-v5D=7*7)4QowlNy* z^m`eMub1icis`&gUN6%;`X!xC>w6~Md08girrQ^jXyO=$PA~+CqKJw?MG%mo7z%2l z>dyC^vw!c8bL-X&Rbp2E);eq5dxm}X_xJnl-?$I_kw`=a9Y%z_=V3$|MdULgvPDFS z<>Pq~`LD*91`)v+8TR}ii_`}VdMFi(phSp7)g<6@;1)lS<%duB)q9Wm3I%hf*fPDr z?y)WPGVoJiBhc12o>GISIrRU3Is--mE&@uSMVO`(R{=MdJh|wdRxke4D>*j4StOkf z07WXPxqM-iYc7g0Z<>>fIO-MPA>bLHRRlo|pX>+0i1$6h8PrmXFjFDxQ0k^aNfy0z zuoQh_TaL|d6*-j-u(d*$-);naBh)4>R=2o(d|8~9nUO*?qTT==65&auI9MuSB9T#m zyiaDlucwBB5C1EaLIZPv>w!<^3TFO}{iVnwFX!00yGSl)aI`|HP(ua-2+w&&h{rW6 zn_d27d7KMpMM^POzYRQW43DAMs}x3r3IorE3FCTVDDYekVlhw(2WSRv0B%U<%y}=r zTeP3pmgnU?MG6H&q%83>nKMPG5hWwUqMGINT&`OdW9b~%Psa5g;F0q2jtE{R1Igt3 zXT+H9JdL_XfrY>)fpw?S!K@c|7uCZr=h(5|!!H@)R``nX+VQ@LOFl$w;=qN#&A>GsJ;Ai!zg5t`cqPxy z110dmwZg$q2YjFnINbyVCNuk?W5lx~ zfe(zJm8wFqOohO&I@uje*|f8up4gV-?N$#AM67VeXOg%G_ah{>g^Lsu?ct( zctM~C0u>DT`H^*m2)&vWQxfJF#=yzAQwgoX2>{LAgN zh`{IX>*CElB^+yf`}(91O6^wll7kShw=qL41i2NIK;a&Q#WknJ&i z_vti`Zp|tY)&Q4`O&9Z(gXjAs9h*a)ICr#4UwALWo-y0G`0qdW(zCb6I{~ggKKP>QWZ*sKPEs ziJ$ve6*pg!;ABt0yT?2>?=JG%o)Ycd{-`0e0(^m70Jaih)^Ijx1-*IUz;U0&v)$P! zCZ`R1le*s!j${PS4GfdsYV~js%n1Ay4cA>1r?J-J@$GrKIs+U%c#;JJP4yO^xH!(5 z3t}|YTUge}vT__njiabZYUWOLSaV^F!zX>7+Meg}SMqe63WyBQRRwqk>Lyy;wlq## zm(SC$=25}maSdQ2yxr)+~|8{{$*+%#QOsu#0U$<1Ve6GvU4xg>>mgw#Z2H6i0u$5xR z5sysH)KpjL7SB@~9S2jc2vi_!OCQPtM zmjvx7zIb(#^*2>Bx2Yd|CU1E4-6B7DrkB6|Q5UbhQ>={n>N`cg_M>j@dp6DM`- z1VRARC`I$0V?LRz!5+oB_(kDckM^*u)#H{`2`+7ka&MJp%gzGR8*Hw+C{|hD#gg#C zt^$v2&9QBNiBeH0d2sd8SY^CS!SL+PJe%Ih^RJsU7tfAx!^#9Jnq3@QQJv8Iw=0uO zt+nXr3AkiIl=;&mym#E=f#-%z|RXm`(1{Yb{F}|x)dK?7-hlC!R4A`ojyPOb%v+6=P4Eq5le-M zj{{M+U-_c0#&r}?4M9=ZytBZzy(QKyi}AV3lQh;^xQ^n6OA?i5p4*Y^|bLc1dgoFk30r77AVZmw-V~it}>?N$oxA6Vbz~HbW_5D-Is>`1TXM z9O>`}UF*piHayzPgIltQFGQ^Vuvu9OcL1*sQzoiID!ULPK-1}rN*}~w796IUq zohN%aa>6HSo!(iBfTLmWQIA|fQb6-STondbN=1N0o#`ODtIa1mh9sx#HcJ1h5p2GXSuo!0I=+Lg+=7%n7SO(a2zsCJ?j<1F1945r}HV+igBw z=^zR$Qi=-uH9*k=5o&ldc$CkYFlvW{3!Q)ww4<&iDT^Y za#E0RAnNol0oT#Qop5Ox0p%!eU7g?`KU2%Hc~Pt(rh4Wihd)^!#~W*~q#%!W zW`t_rAHV>|2Bnw`ocrbxkH8ySZXO8CXt25Rf@tO3wzmts_-3IZ&89k=zrLZG4L4WQ z*kF?`33ZO5D%$^oq^p=Li=LyQPM&T$f_lSZUncH8;z?jc$r%y=tsv(MA8B!!To+2eCu{hp z|HyFogwJ2ykYerP7`Cl&BZ_O6#W}CZ;b+fhnNeraP}86MH`H3(xGK)kQ-+&Y#hEqP zsa*4X;mK_|?tLsx^E8`zGa@A8nx=Z2)h#X$Y|4^w)M*(Z5>~W!AMwTWrNOb8rxbsv zFrci>EvGUjdhmpgrN>s%in*F4bKHuyym_F+t|K1C6TbD!9=bAyTUI5)H4e<3>hL!= zSEEo^1BxHfh~m?iCqZOTv{)<&|F${9kAIV)R1kI?^4N8_#Iku&w1$i3x;*qk4$K+m zN@y<*obWi6HSWY}YrZi?X|1JibyWciTRVJxurpB37}7t_go(}E$#z9epWj`iPz>V- zFE8Btt6u);*);iLKRaz}F!(;QG=sqB3Wo1K-OK-dI*nHdGtx{!*u1+`SzhN&cBrqh zD2-tmI2sOi2CB8gw}3@j>jdl*KVrES(Zz-w}^OF%)@Cm8P!78s>pU zA_e4&!olM{z-(X=fDXck&-Hz2dh3u!WE=&L#%img-s!y1(_@GzRmuGUR3JRDEl+EE zf1Kd^!cU&h@~7YHAZx~M{zk>-mgRCN)uo{$M3nb5S%aruj@ zs|LBry~jKrcrHU{S3pNszyr@^*>|j;K{Fa1?)-R)Md!Jssx+AZ(q1@rz98hw8emy4 zF{T*{3;HnaI_8rrN)lLLSxVU$Tr@3h-GOfF42JpK)2h{wj;(f}p}}VHIT0?H6=C7@ z2vh4V5-~lLYd;O;-(KLxMpdLW1Rsz5107EdnLBN=Y>I6U%4JN zuIiDcp{>)Wtvk@EgkE3_uB|lHXhlPf#kX#) zp|RGYzQ(E~LnBdBoy~R2z2jo>@}R|4rna9 z(7*`2dBGRKK~;nyjI71BhK{shf1A(TsZO)jG}wd*P}BaS9)2K<=Ko7Z$V*r-&^9zp zv{}*YR+c~_rdc%GWmGChfdElgaog2NKJuY3fSTRpRB8hd)*RH3T9;)h8fq;XYWr2i z(uzyxyX-tv;z+l__rtnkRE1W5q$&7;u}ogBs8gnW@e+!IgQS+0)$9t4|?;4?p{M%SHP~5KIy!0?#l$ z;vI~H{fswSJ#qz88;|M*Hn6liZDRXR_#^htd;={r9KLyL4HK*TL(|UQfVNJb+LTpE zR@%D*9(*pt-V+`Nx(w~GjoE) z>;{`pUs1*6@^UE@h4!9+hz%>7-AWv>W^uq5?+e&<(8C_02}&vUo$%>Sn^-)mm)gKZ zM>~C$%bF3(-wNn07>;%X1cC6@VUNvk7TA8kLkhyDR>k=*Ym#UMjsvg1SK_q;C1U0B zma71*XslAqp5hD|yRY&SkM_@U-c*N4RT}BbbV@>3G2r+~pLACEY=hYwPoYMzd?L1p2Y_8iab$TPXt zB3lwN88b}!eFDWuUtl&<1Krc=ZJekcHKDK+^!6BzpA4`?h&o}Z+Oia_Cw+eVheGAv zk1U9?YEhJeA2L*zjoCFHijgWujUw=?SMof!BRrnmmaBxf$%N+W3t~|2L@B~X%8HgK zmtW9dM%(;Wfdg$mmKEOT+KMP)Yb=*GI3q*B2t;CI z`B9q6hARk!U%Zs%@JYWi*~9HV55JhJgu_$nY?ik~NtcA?=??2}u4Y!*eiTZ={lCqy z=ctckjeVwya49aC7v-~;SK-*|dEg%FZ`QBxvfyA_kdw-S2FZGvmv{D@G2v{;Z!lYUorJ%;u zL{Qv$eTsS0Lfc^sJi0Z<{m*5w#@VFxj4*Su!-h}QFr&fV4}2MT!`iTZJqWERI_gu_ zswuTP{`{UIL1}zVN+q!4V2S#q=Del|ND~KenjQN~gL;k%IF$=? ziBw+f4~-(P09#>2 z6tRS+Ws1%DO%5$HBSa$#t*M05B~N(lr5xXRJWVz;KJ5fB7W~CENj|z*yfgb@u zD7e1HXdZ|$LBTymPyX!gM|<_pf1eF^f{a7mL;wrcUKHo{HC27qwtq4v%=gndL!rDu z!gUm>xF!+P!;-PKu7Dpsm*KIObC{Bh3%mk~0-suy;49asjO(a>2mT7^>bu8QN`;-$ z7#KsB(rj=YHSP8*t1fHp^m*y+;;_c&XB&+y9^vdoyExovG~&~>dLRSu`eLeFl` z@zdXBNZ8}c{Q4TGE9bj>VNDgTqkac$5RtCFh*y=Hp@Sv|900yiUv2HVb6tvQlWn{) zR9uFVu@r8(`*Yxg&%bQU@Y1dV$2)!UMH#GB6559o-2t!dDe~`|G8||N-Kyg#g1&%Y z9N>Ke(Z&z~XjqNOm4vRIfV~|7?WY2YdBgI#QSSa+Et!Jh zw)?s`dfXo|!yA$L0?DN2?>}9`%JZY`z?TSjpI1N*+a%Su6ab1vjO+4CVA{(0(Qn+b zHktU&qrD>+3PvEuQGEWg1k)RAezPObEBlMYEik^&+3xfFNsoero}-Uk4G|R@znHCf z<-He0T>Irv5jDfVR_~KPbR-PZt0(S$CoH~Wn)C>mSC+T5S-gD~# z_W=)Hw>)mHUwP(Ei~{ljT-xGt)rB#P5w`6vhP%NAq_8ndQQNmE!co*j6fsLNaCfKY z3EOuU2?AJqL5vSCaLET}e!nMh{UveMT^u*SPs$j)KJdesHE4k0R;CB|dpDwfcgK}U zR<%UQn=@67g*ikL9Uq#p9SkBo#&+On_6u`ZveJ3NydiSkDH z@Pa6JtV#A6y>FK>jEZqs9ctiGDAPgUYqd#z|DEen3yyXNymzF8GeToh2zO*`ek0F> zgl60RBFEZ&qH6pbDz$s(m4%yKy1Zdu1&IH zo|^;i0e&^s@keEVQfesIiog$mA78sH?%%R1fzRkFbk^Hx2LK$cPN4`b^`}-PxMo@0 z2Yy_hW9c+F#>Rkw0}P-H@DHw|Hh$rXDpoIyh95HgCs8qg)eED1;fg9;M{NZDp*+Wt z=KlT|FqDj;RfM~0s`SpUu1nE8)4>~O4dHC5&*(MJbolSrI=7-;SC6q)Hsgu0*CN>g9$YjU%f8HvkJkIDYiD9ey0~=WCN(wji2C@jnRHeWj-M zfqyl28YWZ`CK-3Mp99Lq2S- z5s|Np$bJzq^2ev3{I0K;-+fjeSpR=($hK6hA$ooQ001R)MObuXVRU6WV{&C-bY%cC zFflnTFg7hRH&ih=Ix;poGB+zQFgh?Wb@#l$0000bbVXQnWMOn=I&E)cX=Zr004R>004l5008;`004mK004C`008P>0026e000+ooVrmw00006 zVoOIv0RI600RN!9r;`8x00(qQO+^Re1{)F`4ay^d4FCWYh)G02RA}DKn|HKbRhhtl z-#+Ku{z_g7$)mSO3nZaLS)hm`D(KRoj5;!Q1qs*!GCBj};4F1i#G!*ItjO347O)Hg zb_nSSF$p0&Qr-)BEw9{q&e`AmvCn<|B`+b&_^ox;Tkqa;_x^g@-xgkGSa5X*R>2rW z0EIaMmjV9>oD6ut7T^a84;tZltDqJ{VAbudFDriXax8lIMO{okx`qNq++&qifs29n z0H*^nup6+zEFcAJ03KBMo@dxX?El8UW2OJqZUOxbN3N{TMT%`m7V1LaES0+ zMkrQvt~fP+7jPWV2mBoPrYe6Hp{gU0uu5QrMZo)jw*u3F9l-a1rD4Rnz(?yE5nE;L zornJaGT@XecM${@)Y)q-M1p`*1pWzlJ8%Th1^fiK-zrZVA&O#n`1UmIosI>6Txogm z;Rq3kEuJwf5N9fv0CRvBfgb@6#FmwwAy#$R+elb**PQ=b2Aq0T2cD-xgxdsZ;Jpdb zvw?P;V_Rx1t389Y3a1`#So{tjKP(aj`J;etU;yAZe~J0gqcKLH8Y1a{lX0f;4&X?j z2Y4u9=2J!pHNhc@Eo<*Q?B!*^X;-!5c~IYUjM}~pXaU-Q?I5eNdPg&Z%A36}$Nfqvj&;2Wy^NrXxrr`QT> z?wGnpOo;)fEpEp%!-T7n5#b!*0}MN^O*qH)UEgC%9J}}5_cotJ^M(j7BpUKDV8Mtj zdV%i(w*h~xBX;ARA-lR1G2l6edPEXJ-wm7!lz?9+jCewXqH5xNEv&v{-yw9b44A*z zN!;2Y!t)}WpAdclkOkHPOI7)SF>JHy2*2=LkGG!Z)tB;s^MFqRuLpujwrm6L1|GoK zJphY8Un7jbm?WfHW{dE)MDxx9YQST_eX9JA2>l8K0yPV(_mWFY$beHmvXhiABPHG> z!W$C8&%?#+WxxXp4;i7Op1d4;xbUg>1|wHK3AhHQ!p&3BIN)jEYdCu^)jz!Qb2XwE zd~YbXm@dLa!25x7lGpx_Xv8BT^bDJZvgVHQQDYnf=6+xog`5S3B>wlnhemAhN8p1Rwrf4Npczr?hcS`Uc=N!2POR1ysUVS%25CDU4#k{HxnB0t)8}JOaF^aEZX0 zx_6B%+WRJqGjN{$5#XaQGx2qSy%HBC9t6%& z5d$DrxcA01t8Qy`)u$)|F97%8wDC=zQFP3LqZr^Bu$DBgT8n@`)+K-MjR9hY6ea^) z3j7rK3UKVpO!!#H;3~_9MD?q{@pa6eda;Xp|1Cvi9ck*KRWGnGiY0HxEHwIm0fa+< z6M=SxXI0_ux|0(GcoohLmjY+=66y&N_q+s{j!UW@!TFvUb*k^VA#h5nYzLkL<{}(A zcE2G8oOV@vLO*kXR$NB5>*JRXQ!8U5e8X zu70P_U@hna)&R2vP5_v{xMMs6(&-`A%*VyiRhgM_^>rttS_H06xO^=z^QB@d0;@M# z{_DYrTfP;sY`r}gb-Ca)aw)JhW#m=wKYOM(yD0)#o_xR16;8vu4Kbh=DzS=(vwzgG z+<#AJ;}1LgzFAYf)d;UR7@`8G)ZnpYG235MIy#ldmc^7R2d6n9kTq~wTP6J2HS3;x z=kX^gxOBY?oE}?Jfp3T2_eV-a&=z1G&?T`v^ZZV2x;N@wxO|{27q&F$vDuU)eZ2!m zP#}(>Ty?oUK(z)@JRG|Y6pX+?WcgEnNq^NdIHRK$o|w%k6f2s445L%#u- z_mN$7`#&02cW*HzZ-*BeDaO|Reou)nZSRur?C#UuVT9qprKJj~0Gb;TVilSSkV(mi zv|>LA2O?FT?l0@jJG%7#j$YYb4Y7(XO{=0R-N1U_FoEL)I5qJDK7c>067z8Je@)+| zFAidvE{W87qnMv`^~>^svc9RMz}byCDfq@E==%wQ53^bv3l*5&ECj*+uT$5?EtQ%+ z(mlwsfs&MCb^XZtOKFP{jc^(5B{(1VJKyLq15_asDAh>LNPf0F{Y-)Im3u~Q2C%If z$~SiP@|XUyzIl2Br{>cVh+vsIh(J?=TT~Pp@=~wvrcAg9?5@Q;+B>L^?v504!zWMb9E1@)qa2l>_yH10NNisM@*A7->OS#IT#++W*QsAh})cu11 zg=|7JkV}sc=RUv)491qf^p*6NJ%h5X8cO~1$sk#FlfW)up6}^2T#=P9z?tJ=xOm#8 zur-<%?Gur*RUYpx%G$vS=TFP)>zWHPGv%SWZ(3@R4%z%vm}Y=bmDPh){YCcx&y*?> z>DWweuN>Hgun||G9wpFo())K}>PbP;o1Y1+w^n*;t<@<>(g^fKF+c3=moIMb)JJ=Z zTC#SOMC$K(20RZ2O0Z&s9U^?~W=sE|;(0DI$e7;>u(c9$Z%41*vZG7Z3|7QWm3SaF ziZPUdRY0S_DFEr5;9I3$N-j*^U*Q>zZ6=qUXGe`Xtt%tX?NGXUlv35@0II_|06-MO?^alhSOOm!^Fr$^;gCa>75RRi3rb== z#T-E4ka{6Y`=uRqAw7dDjOItNV`UUxsgn&8occpf{rxdtQwW$N<=eiZr!~wLSiAizHo_VYvnrM z@Qc{81~^@uz@M$7H=m>c3OpMEZw3xU$*0@0UT$pzQk99*KLM}bC%>YR!pcVc-eBnS z2_!Q7?9X@N2uii5X?U`U+%pAC*-V~}TF7k7li8X(9^qJA75J`%sVfrf7Knf!TRhLW z0ti?cTLD)-4sD*^o&irO%^N^dqg60yPm^0;!0Y#iUG`YQ1TqHrlqdUaZY(pOk7H0+Bd#!7 zXe}w<2s3bDp-Y{U0OfuWrUO@^%tRpFmIa0K$%ELeGl%r^S#r-5kgA!GaP^gFT1ePh zC7e;i=3_LaWH;mqI#a{HQ3&T$sT@~AY6fq>Bj`*Kbfoe7e2B+uU(lJN@N^TU(*_7< z*Bt#dA+sq@c1zA3x;uz~hZOEe__39wFhZE)i!c{g0Bu0ok#MsAnA_&uxaqUo{(^6p z`dz6R5(()SvUmeN;fxvxWS+}8Ny%u!@gSU8rE*N!Ij&%I#v|c)#~_gmX-YK163qyy z98)HEF-2x;7QZ_+>{k+LdjmfCm5r27C?cUartMC<^in{5-7JxOQI#KxFp#9$T_QXK zTqtm&51fHJ+gsC^)&9P+APOrVUPm~iw$2~$i^0zHjO7Wyw67!&J+)LJW(diQWvjpCVo!^BSp zlQ4i-*cU{}9?`ULQf7s-s?-jx4DSUj|JD-#thr-OT`te!1~=vzTyrx{I6^8IuG2^D zAz(9;6uknbZ18)6A^V#QL}fx^CE7 z$gOU`l)OpTPQQ@F6o<@I0c^#V*-W}Ei&)t!4qm@c zdRqn*3p}Sl_UQR6IOH6s)k-t&6e|n-)TldRdJH&Ig#S<_uL=!+YUap2hBz1B>h%Uc zOYh8#M$SOo7)>bYV0VhbQ_W@D||D3io-fiHVFJ3aGnSitNhU~`dT}> zOkr7bIki2r(jV~8#SK3>r#L;NcxE3cF83cJITx4Hc!M5sV}xd$M=~Wtx-B=(4Ah6( z5fztd0)BTuepMssZ4*mfJ*6_YNcqGf!R`@$LBND&h2I@qncA7z70#)V*_g+7R`Y(~ zCfuR#6$0zMS!XW66WVdD;x_{48eyvu)~7b-Y1)`0Y>B!hlue>?0nmVf@`**l*;S$$ zp+mSg7>#K1*X#2MI#WZ*PrB3XDK{ogdI_H zOe{)jFle(W+bdRHA2&up`J_SAcgROGYTzq=Z)&v4*NLkdf?a9S+cV>8fMLRfpgTo+ zN0!vC^!N-jA>L*zZ!9#bBYxTcg;M+OO{jBuS19@2`zR6Syswa*o}4|j2{WtjY~Ir}#<{zNktg)_G|YOKe5|vwCFtdlMmFJEB7I^giN-_@x~m zjnfW@n<56!=p&q{S81Snbos|-!25y-L)~0e0ula|y3DS>K zMTEhmWIP{ujR>(SzZVsiSSX&=Ukm5dmW!1|z~N(Yu?;ctv=Fb)$CSOjCPh;vKAIj< zJiVWI`lMOr%BU&2B5n!W)7!JCK+01Txe~Y$H*@?U5jNBhM>pSnSeor|Fjj>&*7YCP-Z?x;I;t*ReMjriDD0eO`8h6Rq=3&`<<&-D`Y&b@ zt76=&&h@w{@bQQ@M%cpM&laWvQFFLAlPUu@dLmC)g{B|RqIIUAaI(1Zx*5P1!dSj< zle5<~W2Q&P<&<@&IN<7uRpJ^Z=L?*w@LR0(X4`UMo&>dNDm= zyx(7t$Y2{H*OT-ZYf@jcaI=DKe!K-Kd-d_YyKs{%52$jZF|N*;==RBkN%gK}H#29^ z5)%7=T7-GPs|5~I=#()D=shKE!sJk`YXMxkdwKLT8ds{uYKE-504CqKHKR78LM`r*V1oqH5< zy}~898v3Kg@IKE#!*5%Nr-!84GrRmf{wJ}ral*#vLE!@R?(lBEJ9VvB^fzt3{7V#m zatI>OGoWw5dHg}(I)Rn3byMSeyGO83ROc^lN5q}!M=@97h7@~nS7CmeNkiwXB8}S$ zSkGQ0R_^B@%n?lj72uP;a97O^AEqdrCT<||INHZXP+VZJe4t@4duNvGc%CSI`!;Lt3qA5J#B%fDtU7VW~zG1-* z9h@k$MYsod>icz7Zc6(aO@;U=X+RwoT+?3PutkIiaM{%_RCzB#Hv-nT_*D<<+nK1$ z1@e;2lnT_xG<5Hb;Rk!Kt{k&>>Kmlt(!UP512ph9o_@FXMDgB zd%-pBu0~gkXSe`&U*K$B{*n1Txa%-?S><67chzpHPH!yyFV%Hkd05UK!IV~_YEiyM$F*rIhH##*kD=;uRFfhW%@jw6o03~!qSaf7z zbY(hiZ)9m^c>ppnF*z+THZ3wYR53U@GB-LjH!CnOIxsLT2WG_p0000 - -Audit Parser tool can be used for querying the ozone audit logs. -This tool creates a sqllite database at the specified path. If the database -already exists, it will avoid creating a database. - -The database contains only one table called `audit` defined as: - -CREATE TABLE IF NOT EXISTS audit ( -datetime text, -level varchar(7), -logger varchar(7), -user text, -ip text, -op text, -params text, -result varchar(7), -exception text, -UNIQUE(datetime,level,logger,user,ip,op,params,result)) - -Usage: -{{< highlight bash >}} -ozone auditparser [COMMAND] [PARAM] -{{< /highlight >}} - -To load an audit log to database: -{{< highlight bash >}} -ozone auditparser load -{{< /highlight >}} -Load command creates the audit table described above. - -To run a custom read-only query: -{{< highlight bash >}} -ozone auditparser query s in some browsers, due to the limited stylability of `s in IE10+.\n &::-ms-expand {\n background-color: transparent;\n border: 0;\n }\n\n // Disabled and read-only inputs\n //\n // HTML5 says that controls under a fieldset > legend:first-child won't be\n // disabled if the fieldset is disabled. Due to implementation difficulty, we\n // don't honor that edge case; we style them as disabled anyway.\n &[disabled],\n &[readonly],\n fieldset[disabled] & {\n background-color: @input-bg-disabled;\n opacity: 1; // iOS fix for unreadable disabled content; see https://github.com/twbs/bootstrap/issues/11655\n }\n\n &[disabled],\n fieldset[disabled] & {\n cursor: @cursor-disabled;\n }\n\n // Reset height for `textarea`s\n textarea& {\n height: auto;\n }\n}\n\n\n// Special styles for iOS temporal inputs\n//\n// In Mobile Safari, setting `display: block` on temporal inputs causes the\n// text within the input to become vertically misaligned. As a workaround, we\n// set a pixel line-height that matches the given height of the input, but only\n// for Safari. See https://bugs.webkit.org/show_bug.cgi?id=139848\n//\n// Note that as of 9.3, iOS doesn't support `week`.\n\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n input[type=\"date\"],\n input[type=\"time\"],\n input[type=\"datetime-local\"],\n input[type=\"month\"] {\n &.form-control {\n line-height: @input-height-base;\n }\n\n &.input-sm,\n .input-group-sm & {\n line-height: @input-height-small;\n }\n\n &.input-lg,\n .input-group-lg & {\n line-height: @input-height-large;\n }\n }\n}\n\n\n// Form groups\n//\n// Designed to help with the organization and spacing of vertical forms. For\n// horizontal forms, use the predefined grid classes.\n\n.form-group {\n margin-bottom: @form-group-margin-bottom;\n}\n\n\n// Checkboxes and radios\n//\n// Indent the labels to position radios/checkboxes as hanging controls.\n\n.radio,\n.checkbox {\n position: relative;\n display: block;\n margin-top: 10px;\n margin-bottom: 10px;\n\n // These are used on elements with ";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",$)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e===C||e.ownerDocument===m&&y(m,e)?-1:t===C||t.ownerDocument===m&&y(m,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===C?-1:t===C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]===m?-1:s[r]===m?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==C&&T(e),d.matchesSelector&&E&&!A[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){A(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=p[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&p(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?k.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?k.grep(e,function(e){return e===n!==r}):"string"!=typeof n?k.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(k.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof k?t[0]:t,k.merge(this,k.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),D.test(r[1])&&k.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(k):k.makeArray(e,this)}).prototype=k.fn,q=k(E);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}k.fn.extend({has:function(e){var t=k(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,""],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?k.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;nx",y.noCloneChecked=!!me.cloneNode(!0).lastChild.defaultValue;var Te=/^key/,Ce=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ee=/^([^.]*)(?:\.(.+)|)/;function ke(){return!0}function Se(){return!1}function Ne(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function Ae(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)Ae(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Se;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return k().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=k.guid++)),e.each(function(){k.event.add(this,t,i,r,n)})}function De(e,i,o){o?(Q.set(e,i,!1),k.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Q.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(k.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Q.set(this,i,r),t=o(this,i),this[i](),r!==(n=Q.get(this,i))||t?Q.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Q.set(this,i,{value:k.event.trigger(k.extend(r[0],k.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Q.get(e,i)&&k.event.add(e,i,ke)}k.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.get(t);if(v){n.handler&&(n=(o=n).handler,i=o.selector),i&&k.find.matchesSelector(ie,i),n.guid||(n.guid=k.guid++),(u=v.events)||(u=v.events={}),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof k&&k.event.triggered!==e.type?k.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(R)||[""]).length;while(l--)d=g=(s=Ee.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=k.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=k.event.special[d]||{},c=k.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&k.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),k.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.hasData(e)&&Q.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(R)||[""]).length;while(l--)if(d=g=(s=Ee.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=k.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||k.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)k.event.remove(e,d+t[l],n,r,!0);k.isEmptyObject(u)&&Q.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=k.event.fix(e),u=new Array(arguments.length),l=(Q.get(this,"events")||{})[s.type]||[],c=k.event.special[s.type]||{};for(u[0]=s,t=1;t\x20\t\r\n\f]*)[^>]*)\/>/gi,qe=/\s*$/g;function Oe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&k(e).children("tbody")[0]||e}function Pe(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Re(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Me(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(Q.hasData(e)&&(o=Q.access(e),a=Q.set(t,o),l=o.events))for(i in delete a.handle,a.events={},l)for(n=0,r=l[i].length;n")},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=oe(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||k.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Vt,Gt=[],Yt=/(=)\?(?=&|$)|\?\?/;k.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Gt.pop()||k.expando+"_"+kt++;return this[e]=!0,e}}),k.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Yt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Yt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Yt,"$1"+r):!1!==e.jsonp&&(e.url+=(St.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||k.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?k(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Gt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Vt=E.implementation.createHTMLDocument("").body).innerHTML="
",2===Vt.childNodes.length),k.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=D.exec(e))?[t.createElement(i[1])]:(i=we([e],t,o),o&&o.length&&k(o).remove(),k.merge([],i.childNodes)));var r,i,o},k.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(k.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},k.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){k.fn[t]=function(e){return this.on(t,e)}}),k.expr.pseudos.animated=function(t){return k.grep(k.timers,function(e){return t===e.elem}).length},k.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=k.css(e,"position"),c=k(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=k.css(e,"top"),u=k.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,k.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},k.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){k.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===k.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===k.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=k(e).offset()).top+=k.css(e,"borderTopWidth",!0),i.left+=k.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-k.css(r,"marginTop",!0),left:t.left-i.left-k.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===k.css(e,"position"))e=e.offsetParent;return e||ie})}}),k.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;k.fn[t]=function(e){return _(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),k.each(["top","left"],function(e,n){k.cssHooks[n]=ze(y.pixelPosition,function(e,t){if(t)return t=_e(e,n),$e.test(t)?k(e).position()[n]+"px":t})}),k.each({Height:"height",Width:"width"},function(a,s){k.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){k.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return _(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?k.css(e,t,i):k.style(e,t,n,i)},s,n?e:void 0,n)}})}),k.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){k.fn[n]=function(e,t){return 0 - -# Server framework for HDDS/Ozone - -This project contains generic utilities and resources for all the HDDS/Ozone -server-side components. - -The project is shared between the server/service projects but not with the -client packages. \ No newline at end of file diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml deleted file mode 100644 index 17926897654..00000000000 --- a/hadoop-hdds/framework/pom.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-hdds - 0.5.0-SNAPSHOT - - hadoop-hdds-server-framework - 0.5.0-SNAPSHOT - Apache Hadoop Distributed Data Store Server Framework - Apache Hadoop HDDS Server Framework - jar - - - - org.apache.hadoop - hadoop-hdds-common - - - org.mockito - mockito-all - test - - - diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/BaseHttpServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/BaseHttpServer.java deleted file mode 100644 index 990d89dc0cb..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/BaseHttpServer.java +++ /dev/null @@ -1,258 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.server; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdds.conf.HddsConfServlet; -import org.apache.hadoop.http.HttpConfig; -import org.apache.hadoop.http.HttpServer2; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.net.NetUtils; - -import org.eclipse.jetty.webapp.WebAppContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.servlet.http.HttpServlet; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.Optional; - -import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys; -import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys; - -/** - * Base class for HTTP server of the Ozone related components. - */ -public abstract class BaseHttpServer { - - private static final Logger LOG = - LoggerFactory.getLogger(BaseHttpServer.class); - protected static final String PROMETHEUS_SINK = "PROMETHEUS_SINK"; - - private HttpServer2 httpServer; - private final Configuration conf; - - private InetSocketAddress httpAddress; - private InetSocketAddress httpsAddress; - - private HttpConfig.Policy policy; - - private String name; - private PrometheusMetricsSink prometheusMetricsSink; - - private boolean prometheusSupport; - - private boolean profilerSupport; - - public BaseHttpServer(Configuration conf, String name) throws IOException { - this.name = name; - this.conf = conf; - policy = DFSUtil.getHttpPolicy(conf); - if (isEnabled()) { - this.httpAddress = getHttpBindAddress(); - this.httpsAddress = getHttpsBindAddress(); - HttpServer2.Builder builder = null; - - // Avoid registering o.a.h.http.PrometheusServlet in HttpServer2. - // TODO: Replace "hadoop.prometheus.endpoint.enabled" with - // CommonConfigurationKeysPublic.HADOOP_PROMETHEUS_ENABLED when possible. - conf.setBoolean("hadoop.prometheus.endpoint.enabled", false); - - builder = DFSUtil.httpServerTemplateForNNAndJN(conf, this.httpAddress, - this.httpsAddress, name, getSpnegoPrincipal(), getKeytabFile()); - - final boolean xFrameEnabled = conf.getBoolean( - DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED, - DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED_DEFAULT); - - final String xFrameOptionValue = conf.getTrimmed( - DFSConfigKeys.DFS_XFRAME_OPTION_VALUE, - DFSConfigKeys.DFS_XFRAME_OPTION_VALUE_DEFAULT); - - builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue); - - httpServer = builder.build(); - httpServer.addServlet("conf", "/conf", HddsConfServlet.class); - - httpServer.addServlet("logstream", "/logstream", LogStreamServlet.class); - prometheusSupport = - conf.getBoolean(HddsConfigKeys.HDDS_PROMETHEUS_ENABLED, true); - - profilerSupport = - conf.getBoolean(HddsConfigKeys.HDDS_PROFILER_ENABLED, false); - - if (prometheusSupport) { - prometheusMetricsSink = new PrometheusMetricsSink(); - httpServer.getWebAppContext().getServletContext() - .setAttribute(PROMETHEUS_SINK, prometheusMetricsSink); - httpServer.addServlet("prometheus", "/prom", PrometheusServlet.class); - } - - if (profilerSupport) { - LOG.warn( - "/prof java profiling servlet is activated. Not safe for " - + "production!"); - httpServer.addServlet("profile", "/prof", ProfileServlet.class); - } - } - - } - - /** - * Add a servlet to BaseHttpServer. - * - * @param servletName The name of the servlet - * @param pathSpec The path spec for the servlet - * @param clazz The servlet class - */ - protected void addServlet(String servletName, String pathSpec, - Class clazz) { - httpServer.addServlet(servletName, pathSpec, clazz); - } - - /** - * Returns the WebAppContext associated with this HttpServer. - * - * @return WebAppContext - */ - protected WebAppContext getWebAppContext() { - return httpServer.getWebAppContext(); - } - - protected InetSocketAddress getBindAddress(String bindHostKey, - String addressKey, String bindHostDefault, int bindPortdefault) { - final Optional bindHost = - getHostNameFromConfigKeys(conf, bindHostKey); - - final Optional addressPort = - getPortNumberFromConfigKeys(conf, addressKey); - - final Optional addressHost = - getHostNameFromConfigKeys(conf, addressKey); - - String hostName = bindHost.orElse(addressHost.orElse(bindHostDefault)); - - return NetUtils.createSocketAddr( - hostName + ":" + addressPort.orElse(bindPortdefault)); - } - - /** - * Retrieve the socket address that should be used by clients to connect - * to the HTTPS web interface. - * - * @return Target InetSocketAddress for the Ozone HTTPS endpoint. - */ - public InetSocketAddress getHttpsBindAddress() { - return getBindAddress(getHttpsBindHostKey(), getHttpsAddressKey(), - getBindHostDefault(), getHttpsBindPortDefault()); - } - - /** - * Retrieve the socket address that should be used by clients to connect - * to the HTTP web interface. - *

- * * @return Target InetSocketAddress for the Ozone HTTP endpoint. - */ - public InetSocketAddress getHttpBindAddress() { - return getBindAddress(getHttpBindHostKey(), getHttpAddressKey(), - getBindHostDefault(), getHttpBindPortDefault()); - - } - - public void start() throws IOException { - if (httpServer != null && isEnabled()) { - httpServer.start(); - if (prometheusSupport) { - DefaultMetricsSystem.instance() - .register("prometheus", "Hadoop metrics prometheus exporter", - prometheusMetricsSink); - } - updateConnectorAddress(); - } - - } - - private boolean isEnabled() { - return conf.getBoolean(getEnabledKey(), true); - } - - public void stop() throws Exception { - if (httpServer != null) { - httpServer.stop(); - } - } - - /** - * Update the configured listen address based on the real port - *

- * (eg. replace :0 with real port) - */ - public void updateConnectorAddress() { - int connIdx = 0; - if (policy.isHttpEnabled()) { - httpAddress = httpServer.getConnectorAddress(connIdx++); - String realAddress = NetUtils.getHostPortString(httpAddress); - conf.set(getHttpAddressKey(), realAddress); - LOG.info( - String.format("HTTP server of %s is listening at http://%s", - name.toUpperCase(), realAddress)); - } - - if (policy.isHttpsEnabled()) { - httpsAddress = httpServer.getConnectorAddress(connIdx); - String realAddress = NetUtils.getHostPortString(httpsAddress); - conf.set(getHttpsAddressKey(), realAddress); - LOG.info( - String.format("HTTP server of %s is listening at https://%s", - name.toUpperCase(), realAddress)); - } - } - - public InetSocketAddress getHttpAddress() { - return httpAddress; - } - - public InetSocketAddress getHttpsAddress() { - return httpsAddress; - } - - protected abstract String getHttpAddressKey(); - - protected abstract String getHttpsAddressKey(); - - protected abstract String getHttpBindHostKey(); - - protected abstract String getHttpsBindHostKey(); - - protected abstract String getBindHostDefault(); - - protected abstract int getHttpBindPortDefault(); - - protected abstract int getHttpsBindPortDefault(); - - protected abstract String getKeytabFile(); - - protected abstract String getSpnegoPrincipal(); - - protected abstract String getEnabledKey(); - -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/LogStreamServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/LogStreamServlet.java deleted file mode 100644 index 1869c8b19ec..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/LogStreamServlet.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import java.io.IOException; - -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.WriterAppender; - -/** - * Servlet to stream the current logs to the response. - */ -public class LogStreamServlet extends HttpServlet { - - private static final String PATTERN = "%d [%p|%c|%C{1}] %m%n"; - - @Override - protected void doGet(HttpServletRequest req, HttpServletResponse resp) - throws ServletException, IOException { - - WriterAppender appender = - new WriterAppender(new PatternLayout(PATTERN), resp.getWriter()); - appender.setThreshold(Level.TRACE); - - try { - Logger.getRootLogger().addAppender(appender); - try { - Thread.sleep(Integer.MAX_VALUE); - } catch (InterruptedException e) { - //interrupted - } - } finally { - Logger.getRootLogger().removeAppender(appender); - } - } - -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneProtocolMessageDispatcher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneProtocolMessageDispatcher.java deleted file mode 100644 index d67a759f8d8..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/OzoneProtocolMessageDispatcher.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server; - -import org.apache.hadoop.hdds.function.FunctionWithServiceException; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics; - -import com.google.protobuf.ProtocolMessageEnum; -import com.google.protobuf.ServiceException; -import io.opentracing.Scope; -import org.slf4j.Logger; - -/** - * Dispatch message after tracing and message logging for insight. - *

- * This is a generic utility to dispatch message in ServerSide translators. - *

- * It logs the message type/content on DEBUG/TRACING log for insight and create - * a new span based on the tracing information. - */ -public class OzoneProtocolMessageDispatcher { - - private String serviceName; - - private final ProtocolMessageMetrics protocolMessageMetrics; - - private Logger logger; - - public OzoneProtocolMessageDispatcher(String serviceName, - ProtocolMessageMetrics protocolMessageMetrics, Logger logger) { - this.serviceName = serviceName; - this.protocolMessageMetrics = protocolMessageMetrics; - this.logger = logger; - } - - public RESPONSE processRequest( - REQUEST request, - FunctionWithServiceException methodCall, - ProtocolMessageEnum type, - String traceId) throws ServiceException { - Scope scope = TracingUtil - .importAndCreateScope(type.toString(), traceId); - try { - if (logger.isTraceEnabled()) { - logger.trace( - "{} {} request is received: {}", - serviceName, - type.toString(), - request.toString().replaceAll("\n", "\\\\n")); - } else if (logger.isDebugEnabled()) { - logger.debug("{} {} request is received", - serviceName, type.toString()); - } - protocolMessageMetrics.increment(type); - - RESPONSE response = methodCall.apply(request); - - if (logger.isTraceEnabled()) { - logger.trace( - "{} {} request is processed. Response: " - + "{}", - serviceName, - type.toString(), - response.toString().replaceAll("\n", "\\\\n")); - } - return response; - - } finally { - scope.close(); - } - } -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java deleted file mode 100644 index 7cea58236a7..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ProfileServlet.java +++ /dev/null @@ -1,507 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server; - -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.lang.management.ManagementFactory; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.regex.Pattern; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Joiner; -import org.apache.commons.io.IOUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Servlet that runs async-profiler as web-endpoint. - *

- * Source: https://github.com/apache/hive/blob/master/common/src/java/org - * /apache/hive/http/ProfileServlet.java - *

- * Following options from async-profiler can be specified as query paramater. - * // -e event profiling event: cpu|alloc|lock|cache-misses etc. - * // -d duration run profiling for seconds (integer) - * // -i interval sampling interval in nanoseconds (long) - * // -j jstackdepth maximum Java stack depth (integer) - * // -b bufsize frame buffer size (long) - * // -t profile different threads separately - * // -s simple class names instead of FQN - * // -o fmt[,fmt...] output format: - * summary|traces|flat|collapsed|svg|tree|jfr - * // --width px SVG width pixels (integer) - * // --height px SVG frame height pixels (integer) - * // --minwidth px skip frames smaller than px (double) - * // --reverse generate stack-reversed FlameGraph / Call tree - * Example: - * - To collect 30 second CPU profile of current process (returns FlameGraph - * svg) - * curl "http://localhost:10002/prof" - * - To collect 1 minute CPU profile of current process and output in tree - * format (html) - * curl "http://localhost:10002/prof?output=tree&duration=60" - * - To collect 30 second heap allocation profile of current process (returns - * FlameGraph svg) - * curl "http://localhost:10002/prof?event=alloc" - * - To collect lock contention profile of current process (returns - * FlameGraph svg) - * curl "http://localhost:10002/prof?event=lock" - * Following event types are supported (default is 'cpu') (NOTE: not all - * OS'es support all events) - * // Perf events: - * // cpu - * // page-faults - * // context-switches - * // cycles - * // instructions - * // cache-references - * // cache-misses - * // branches - * // branch-misses - * // bus-cycles - * // L1-dcache-load-misses - * // LLC-load-misses - * // dTLB-load-misses - * // mem:breakpoint - * // trace:tracepoint - * // Java events: - * // alloc - * // lock - */ -public class ProfileServlet extends HttpServlet { - private static final long serialVersionUID = 1L; - private static final Logger LOG = - LoggerFactory.getLogger(ProfileServlet.class); - private static final String ACCESS_CONTROL_ALLOW_METHODS = - "Access-Control-Allow-Methods"; - private static final String ALLOWED_METHODS = "GET"; - private static final String ACCESS_CONTROL_ALLOW_ORIGIN = - "Access-Control-Allow-Origin"; - private static final String CONTENT_TYPE_TEXT = "text/plain; charset=utf-8"; - private static final String ASYNC_PROFILER_HOME_ENV = "ASYNC_PROFILER_HOME"; - private static final String ASYNC_PROFILER_HOME_SYSTEM_PROPERTY = - "async.profiler.home"; - private static final String PROFILER_SCRIPT = "/profiler.sh"; - private static final int DEFAULT_DURATION_SECONDS = 10; - private static final AtomicInteger ID_GEN = new AtomicInteger(0); - static final Path OUTPUT_DIR = - Paths.get(System.getProperty("java.io.tmpdir"), "prof-output"); - public static final String FILE_PREFIX = "async-prof-pid-"; - - public static final Pattern FILE_NAME_PATTERN = - Pattern.compile(FILE_PREFIX + "[0-9]+-[0-9A-Za-z\\-_]+-[0-9]+\\.[a-z]+"); - - private Lock profilerLock = new ReentrantLock(); - private final Integer pid; - private String asyncProfilerHome; - private transient Process process; - - public ProfileServlet() { - this.asyncProfilerHome = getAsyncProfilerHome(); - this.pid = getPid(); - LOG.info("Servlet process PID: {} asyncProfilerHome: {}", pid, - asyncProfilerHome); - try { - Files.createDirectories(OUTPUT_DIR); - } catch (IOException e) { - LOG.error( - "Can't create the output directory for java profiler: " + OUTPUT_DIR, - e); - } - } - - private Integer getPid() { - // JVM_PID is exported by bin/ozone - String pidStr = System.getenv("JVM_PID"); - - // in case if it is not set correctly used fallback from mxbean which is - // implementation specific - if (pidStr == null || pidStr.trim().isEmpty()) { - String name = ManagementFactory.getRuntimeMXBean().getName(); - if (name != null) { - int idx = name.indexOf("@"); - if (idx != -1) { - pidStr = name.substring(0, name.indexOf("@")); - } - } - } - try { - if (pidStr != null) { - return Integer.valueOf(pidStr); - } - } catch (NumberFormatException nfe) { - // ignore - } - return null; - } - - public Process runCmdAsync(List cmd) { - try { - LOG.info("Running command async: " + cmd); - return new ProcessBuilder(cmd).inheritIO().start(); - } catch (IOException ex) { - throw new IllegalStateException(ex); - } - } - - @VisibleForTesting - protected static String generateFileName(Integer pid, Output output, - Event event) { - return FILE_PREFIX + pid + "-" + - event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet() - + "." + - output.name().toLowerCase(); - } - - @VisibleForTesting - protected static String validateFileName(String filename) { - if (!FILE_NAME_PATTERN.matcher(filename).matches()) { - throw new IllegalArgumentException( - "Invalid file name parameter " + filename + " doesn't match pattern " - + FILE_NAME_PATTERN); - - } - return filename; - } - - @Override - protected void doGet(final HttpServletRequest req, - final HttpServletResponse resp) throws IOException { - // make sure async profiler home is set - if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) { - resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); - setResponseHeader(resp); - resp.getWriter().write("ASYNC_PROFILER_HOME env is not set."); - return; - } - - //download the finished file - if (req.getParameter("file") != null) { - doGetDownload(req.getParameter("file"), req, resp); - return; - } - // if pid is explicitly specified, use it else default to current process - Integer processId = getInteger(req, "pid", pid); - - // if pid is not specified in query param and if current process pid - // cannot be determined - if (processId == null) { - resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); - setResponseHeader(resp); - resp.getWriter().write( - "'pid' query parameter unspecified or unable to determine PID of " - + "current process."); - return; - } - - final int duration = - getInteger(req, "duration", DEFAULT_DURATION_SECONDS); - final Output output = getOutput(req); - final Event event = getEvent(req); - final Long interval = getLong(req, "interval"); - final Integer jstackDepth = getInteger(req, "jstackdepth", null); - final Long bufsize = getLong(req, "bufsize"); - final boolean thread = req.getParameterMap().containsKey("thread"); - final boolean simple = req.getParameterMap().containsKey("simple"); - final Integer width = getInteger(req, "width", null); - final Integer height = getInteger(req, "height", null); - final Double minwidth = getMinWidth(req); - final boolean reverse = req.getParameterMap().containsKey("reverse"); - - if (process == null || !process.isAlive()) { - try { - int lockTimeoutSecs = 3; - if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) { - try { - //Should be in sync with FILE_NAME_PATTERN - File outputFile = - OUTPUT_DIR.resolve( - ProfileServlet.generateFileName(processId, output, event)) - .toFile(); - List cmd = new ArrayList<>(); - cmd.add(asyncProfilerHome + PROFILER_SCRIPT); - cmd.add("-e"); - cmd.add(event.getInternalName()); - cmd.add("-d"); - cmd.add("" + duration); - cmd.add("-o"); - cmd.add(output.name().toLowerCase()); - cmd.add("-f"); - cmd.add(outputFile.getAbsolutePath()); - if (interval != null) { - cmd.add("-i"); - cmd.add(interval.toString()); - } - if (jstackDepth != null) { - cmd.add("-j"); - cmd.add(jstackDepth.toString()); - } - if (bufsize != null) { - cmd.add("-b"); - cmd.add(bufsize.toString()); - } - if (thread) { - cmd.add("-t"); - } - if (simple) { - cmd.add("-s"); - } - if (width != null) { - cmd.add("--width"); - cmd.add(width.toString()); - } - if (height != null) { - cmd.add("--height"); - cmd.add(height.toString()); - } - if (minwidth != null) { - cmd.add("--minwidth"); - cmd.add(minwidth.toString()); - } - if (reverse) { - cmd.add("--reverse"); - } - cmd.add(processId.toString()); - process = runCmdAsync(cmd); - - // set response and set refresh header to output location - setResponseHeader(resp); - resp.setStatus(HttpServletResponse.SC_ACCEPTED); - String relativeUrl = "/prof?file=" + outputFile.getName(); - resp.getWriter().write( - "Started [" + event.getInternalName() - + "] profiling. This page will automatically redirect to " - + - relativeUrl + " after " + duration - + " seconds.\n\ncommand:\n" + Joiner.on(" ").join(cmd)); - resp.getWriter().write( - "\n\n\nPlease make sure that you enabled the profiling on " - + "kernel level:\n" - + "echo 1 > /proc/sys/kernel/perf_event_paranoid\n" - + "echo 0 > /proc/sys/kernel/kptr_restrict\n\n" - + "See https://github" - + ".com/jvm-profiling-tools/async-profiler#basic-usage" - + " for more details."); - // to avoid auto-refresh by ProfileOutputServlet, refreshDelay - // can be specified via url param - int refreshDelay = getInteger(req, "refreshDelay", 0); - - // instead of sending redirect, set auto-refresh so that browsers - // will refresh with redirected url - resp.setHeader("Refresh", - (duration + refreshDelay) + ";" + relativeUrl); - resp.getWriter().flush(); - } finally { - profilerLock.unlock(); - } - } else { - setResponseHeader(resp); - resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); - resp.getWriter().write( - "Unable to acquire lock. Another instance of profiler might be " - + "running."); - LOG.warn( - "Unable to acquire lock in {} seconds. Another instance of " - + "profiler might be running.", - lockTimeoutSecs); - } - } catch (InterruptedException e) { - LOG.warn("Interrupted while acquiring profile lock.", e); - resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); - } - } else { - setResponseHeader(resp); - resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); - resp.getWriter() - .write("Another instance of profiler is already running."); - } - } - - protected void doGetDownload(String fileName, final HttpServletRequest req, - final HttpServletResponse resp) - throws IOException { - - String safeFileName = validateFileName(fileName); - File requestedFile = - ProfileServlet.OUTPUT_DIR - .resolve(safeFileName) - .toAbsolutePath().toFile(); - // async-profiler version 1.4 writes 'Started [cpu] profiling' to output - // file when profiler is running which - // gets replaced by final output. If final output is not ready yet, the - // file size will be <100 bytes (in all modes). - if (requestedFile.length() < 100) { - LOG.info("{} is incomplete. Sending auto-refresh header..", - requestedFile); - resp.setHeader("Refresh", - "2," + req.getRequestURI() + "?file=" + safeFileName); - resp.getWriter().write( - "This page will auto-refresh every 2 second until output file is " - + "ready.."); - } else { - if (safeFileName.endsWith(".svg")) { - resp.setContentType("image/svg+xml"); - } else if (safeFileName.endsWith(".tree")) { - resp.setContentType("text/html"); - } - try (InputStream input = new FileInputStream(requestedFile)) { - IOUtils.copy(input, resp.getOutputStream()); - } - } - } - - private Integer getInteger(final HttpServletRequest req, - final String param, - final Integer defaultValue) { - final String value = req.getParameter(param); - if (value != null) { - try { - return Integer.valueOf(value); - } catch (NumberFormatException e) { - return defaultValue; - } - } - return defaultValue; - } - - private Long getLong(final HttpServletRequest req, final String param) { - final String value = req.getParameter(param); - if (value != null) { - try { - return Long.valueOf(value); - } catch (NumberFormatException e) { - return null; - } - } - return null; - } - - private Double getMinWidth(final HttpServletRequest req) { - final String value = req.getParameter("minwidth"); - if (value != null) { - try { - return Double.valueOf(value); - } catch (NumberFormatException e) { - return null; - } - } - return null; - } - - private Event getEvent(final HttpServletRequest req) { - final String eventArg = req.getParameter("event"); - if (eventArg != null) { - Event event = Event.fromInternalName(eventArg); - return event == null ? Event.CPU : event; - } - return Event.CPU; - } - - private Output getOutput(final HttpServletRequest req) { - final String outputArg = req.getParameter("output"); - if (req.getParameter("output") != null) { - try { - return Output.valueOf(outputArg.trim().toUpperCase()); - } catch (IllegalArgumentException e) { - return Output.SVG; - } - } - return Output.SVG; - } - - private void setResponseHeader(final HttpServletResponse response) { - response.setHeader(ACCESS_CONTROL_ALLOW_METHODS, ALLOWED_METHODS); - response.setHeader(ACCESS_CONTROL_ALLOW_ORIGIN, "*"); - response.setContentType(CONTENT_TYPE_TEXT); - } - - static String getAsyncProfilerHome() { - String asyncProfilerHome = System.getenv(ASYNC_PROFILER_HOME_ENV); - // if ENV is not set, see if -Dasync.profiler - // .home=/path/to/async/profiler/home is set - if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) { - asyncProfilerHome = - System.getProperty(ASYNC_PROFILER_HOME_SYSTEM_PROPERTY); - } - - return asyncProfilerHome; - } - - enum Event { - CPU("cpu"), - ALLOC("alloc"), - LOCK("lock"), - PAGE_FAULTS("page-faults"), - CONTEXT_SWITCHES("context-switches"), - CYCLES("cycles"), - INSTRUCTIONS("instructions"), - CACHE_REFERENCES("cache-references"), - CACHE_MISSES("cache-misses"), - BRANCHES("branches"), - BRANCH_MISSES("branch-misses"), - BUS_CYCLES("bus-cycles"), - L1_DCACHE_LOAD_MISSES("L1-dcache-load-misses"), - LLC_LOAD_MISSES("LLC-load-misses"), - DTLB_LOAD_MISSES("dTLB-load-misses"), - MEM_BREAKPOINT("mem-breakpoint"), - TRACE_TRACEPOINT("trace-tracepoint"); - - private String internalName; - - Event(final String internalName) { - this.internalName = internalName; - } - - public String getInternalName() { - return internalName; - } - - public static Event fromInternalName(final String name) { - for (Event event : values()) { - if (event.getInternalName().equalsIgnoreCase(name)) { - return event; - } - } - - return null; - } - } - - enum Output { - SUMMARY, - TRACES, - FLAT, - COLLAPSED, - SVG, - TREE, - JFR - } - -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java deleted file mode 100644 index f37d32340fa..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/PrometheusMetricsSink.java +++ /dev/null @@ -1,145 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server; - -import static org.apache.hadoop.hdds.utils.RocksDBStoreMBean.ROCKSDB_CONTEXT_PREFIX; - -import java.io.IOException; -import java.io.Writer; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.regex.Pattern; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.metrics2.AbstractMetric; -import org.apache.hadoop.metrics2.MetricType; -import org.apache.hadoop.metrics2.MetricsRecord; -import org.apache.hadoop.metrics2.MetricsSink; -import org.apache.hadoop.metrics2.MetricsTag; - -import org.apache.commons.configuration2.SubsetConfiguration; - -/** - * Metrics sink for prometheus exporter. - *

- * Stores the metric data in-memory and return with it on request. - */ -public class PrometheusMetricsSink implements MetricsSink { - - /** - * Cached output lines for each metrics. - */ - private final Map metricLines = new ConcurrentHashMap<>(); - - private static final Pattern SPLIT_PATTERN = - Pattern.compile("(? - * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.server; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ipc.RPC; -import org.apache.http.client.methods.HttpRequestBase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.net.InetSocketAddress; -import java.util.Collection; - -/** - * Generic utilities for all HDDS/Ozone servers. - */ -public final class ServerUtils { - - private static final Logger LOG = LoggerFactory.getLogger( - ServerUtils.class); - - private ServerUtils() { - } - - /** - * Checks that a given value is with a range. - * - * For example, sanitizeUserArgs(17, 3, 5, 10) - * ensures that 17 is greater/equal than 3 * 5 and less/equal to 3 * 10. - * - * @param key - config key of the value - * @param valueTocheck - value to check - * @param baseKey - config key of the baseValue - * @param baseValue - the base value that is being used. - * @param minFactor - range min - a 2 here makes us ensure that value - * valueTocheck is at least twice the baseValue. - * @param maxFactor - range max - * @return long - */ - public static long sanitizeUserArgs(String key, long valueTocheck, - String baseKey, long baseValue, long minFactor, long maxFactor) { - long minLimit = baseValue * minFactor; - long maxLimit = baseValue * maxFactor; - if (valueTocheck < minLimit) { - LOG.warn( - "{} value = {} is smaller than min = {} based on" - + " the key value of {}, reset to the min value {}.", - key, valueTocheck, minLimit, baseKey, minLimit); - valueTocheck = minLimit; - } else if (valueTocheck > maxLimit) { - LOG.warn( - "{} value = {} is larger than max = {} based on" - + " the key value of {}, reset to the max value {}.", - key, valueTocheck, maxLimit, baseKey, maxLimit); - valueTocheck = maxLimit; - } - - return valueTocheck; - } - - - /** - * After starting an RPC server, updates configuration with the actual - * listening address of that server. The listening address may be different - * from the configured address if, for example, the configured address uses - * port 0 to request use of an ephemeral port. - * - * @param conf configuration to update - * @param rpcAddressKey configuration key for RPC server address - * @param addr configured address - * @param rpcServer started RPC server. - */ - public static InetSocketAddress updateRPCListenAddress( - OzoneConfiguration conf, String rpcAddressKey, - InetSocketAddress addr, RPC.Server rpcServer) { - return updateListenAddress(conf, rpcAddressKey, addr, - rpcServer.getListenerAddress()); - } - - - /** - * After starting an server, updates configuration with the actual - * listening address of that server. The listening address may be different - * from the configured address if, for example, the configured address uses - * port 0 to request use of an ephemeral port. - * - * @param conf configuration to update - * @param addressKey configuration key for RPC server address - * @param addr configured address - * @param listenAddr the real listening address. - */ - public static InetSocketAddress updateListenAddress(OzoneConfiguration conf, - String addressKey, InetSocketAddress addr, InetSocketAddress listenAddr) { - InetSocketAddress updatedAddr = new InetSocketAddress(addr.getHostString(), - listenAddr.getPort()); - conf.set(addressKey, - addr.getHostString() + ":" + listenAddr.getPort()); - return updatedAddr; - } - - - /** - * Releases a http connection if the request is not null. - * @param request - */ - public static void releaseConnection(HttpRequestBase request) { - if (request != null) { - request.releaseConnection(); - } - } - - /** - * Get the location where SCM should store its metadata directories. - * Fall back to OZONE_METADATA_DIRS if not defined. - * - * @param conf - * @return - */ - public static File getScmDbDir(Configuration conf) { - File metadataDir = getDirectoryFromConfig(conf, - ScmConfigKeys.OZONE_SCM_DB_DIRS, "SCM"); - if (metadataDir != null) { - return metadataDir; - } - - LOG.warn("{} is not configured. We recommend adding this setting. " + - "Falling back to {} instead.", - ScmConfigKeys.OZONE_SCM_DB_DIRS, HddsConfigKeys.OZONE_METADATA_DIRS); - return getOzoneMetaDirPath(conf); - } - - /** - * Utility method to get value of a given key that corresponds to a DB - * directory. - * @param conf configuration bag - * @param key Key to test - * @param componentName Which component's key is this - * @return File created from the value of the key in conf. - */ - public static File getDirectoryFromConfig(Configuration conf, - String key, - String componentName) { - final Collection metadirs = conf.getTrimmedStringCollection(key); - - if (metadirs.size() > 1) { - throw new IllegalArgumentException( - "Bad config setting " + key + - ". " + componentName + - " does not support multiple metadata dirs currently"); - } - - if (metadirs.size() == 1) { - final File dbDirPath = new File(metadirs.iterator().next()); - if (!dbDirPath.exists() && !dbDirPath.mkdirs()) { - throw new IllegalArgumentException("Unable to create directory " + - dbDirPath + " specified in configuration setting " + - key); - } - return dbDirPath; - } - - return null; - } - - /** - * Checks and creates Ozone Metadir Path if it does not exist. - * - * @param conf - Configuration - * @return File MetaDir - * @throws IllegalArgumentException if the configuration setting is not set - */ - public static File getOzoneMetaDirPath(Configuration conf) { - File dirPath = getDirectoryFromConfig(conf, - HddsConfigKeys.OZONE_METADATA_DIRS, "Ozone"); - if (dirPath == null) { - throw new IllegalArgumentException( - HddsConfigKeys.OZONE_METADATA_DIRS + " must be defined."); - } - return dirPath; - } - - public static void setOzoneMetaDirPath(OzoneConfiguration conf, - String path) { - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, path); - } - - /** - * Returns with the service specific metadata directory. - *

- * If the directory is missing the method tries to create it. - * - * @param conf The ozone configuration object - * @param key The configuration key which specify the directory. - * @return The path of the directory. - */ - public static File getDBPath(Configuration conf, String key) { - final File dbDirPath = - getDirectoryFromConfig(conf, key, "OM"); - if (dbDirPath != null) { - return dbDirPath; - } - - LOG.warn("{} is not configured. We recommend adding this setting. " - + "Falling back to {} instead.", key, - HddsConfigKeys.OZONE_METADATA_DIRS); - return ServerUtils.getOzoneMetaDirPath(conf); - } -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java deleted file mode 100644 index bcd75f3f215..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfo.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.server; - -/** - * Common runtime information for any service components. - * - * Note: it's intentional to not use MXBean or MBean as a suffix of the name. - * - * Most of the services extends the ServiceRuntimeInfoImpl class and also - * implements a specific MXBean interface which extends this interface. - * - * This inheritance from multiple path could confuse the jmx system and - * some jmx properties could be disappeared. - * - * The solution is to always extend this interface and use the jmx naming - * convention in the new interface.. - */ -public interface ServiceRuntimeInfo { - - /** - * Gets the version of Hadoop. - * - * @return the version - */ - String getVersion(); - - /** - * Get the version of software running on the Namenode. - * - * @return a string representing the version - */ - String getSoftwareVersion(); - - /** - * Get the compilation information which contains date, user and branch. - * - * @return the compilation information, as a JSON string. - */ - String getCompileInfo(); - - /** - * Gets the NN start time in milliseconds. - * - * @return the NN start time in msec - */ - long getStartedTimeInMillis(); - -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java deleted file mode 100644 index 2dffc6f902f..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServiceRuntimeInfoImpl.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.server; - -import org.apache.hadoop.hdds.utils.VersionInfo; - -/** - * Helper base class to report the standard version and runtime information. - * - */ -public class ServiceRuntimeInfoImpl implements ServiceRuntimeInfo { - - private long startedTimeInMillis; - private final VersionInfo versionInfo; - - protected ServiceRuntimeInfoImpl(VersionInfo versionInfo) { - this.versionInfo = versionInfo; - } - - @Override - public String getVersion() { - return versionInfo.getVersion() + ", r" + versionInfo.getRevision(); - } - - @Override - public String getSoftwareVersion() { - return versionInfo.getVersion(); - } - - @Override - public String getCompileInfo() { - return versionInfo.getDate() + " by " + versionInfo.getUser() + " from " - + versionInfo.getBranch(); - } - - @Override - public long getStartedTimeInMillis() { - return startedTimeInMillis; - } - - public void setStartTime() { - startedTimeInMillis = System.currentTimeMillis(); - } - -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java deleted file mode 100644 index 810c8b3437a..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/Event.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -/** - * Identifier of an async event. - * - * @param THe message payload type of this event. - */ -public interface Event { - - /** - * The type of the event payload. Payload contains all the required data - * to process the event. - * - */ - Class getPayloadType(); - - /** - * The human readable name of the event. - * - * Used for display in thread names - * and monitoring. - * - */ - String getName(); -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java deleted file mode 100644 index 42578394162..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutor.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -/** - * Executors defined the way how an EventHandler should be called. - *

- * Executors are used only by the EventQueue and they do the thread separation - * between the caller and the EventHandler. - *

- * Executors should guarantee that only one thread is executing one - * EventHandler at the same time. - * - * @param the payload type of the event. - */ -public interface EventExecutor extends AutoCloseable { - - /** - * Process an event payload. - * - * @param handler the handler to process the payload - * @param eventPayload to be processed. - * @param publisher to send response/other message forward to the chain. - */ - void onMessage(EventHandler handler, - PAYLOAD eventPayload, - EventPublisher - publisher); - - /** - * Return the number of the failed events. - */ - long failedEvents(); - - - /** - * Return the number of the processed events. - */ - long successfulEvents(); - - /** - * Return the number of the not-yet processed events. - */ - long queuedEvents(); - - /** - * The human readable name for the event executor. - *

- * Used in monitoring and logging. - * - */ - String getName(); -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java deleted file mode 100644 index f40fc9e4f15..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventHandler.java +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -/** - * Processor to react on an event. - * - * EventExecutors should guarantee that the implementations are called only - * from one thread. - * - * @param - */ -@FunctionalInterface -public interface EventHandler { - - void onMessage(PAYLOAD payload, EventPublisher publisher); - -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java deleted file mode 100644 index a47fb5721e2..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventPublisher.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -/** - * Client interface to send a new event. - */ -public interface EventPublisher { - - > void - fireEvent(EVENT_TYPE event, PAYLOAD payload); - -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java deleted file mode 100644 index cd09da66650..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java +++ /dev/null @@ -1,262 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -import com.google.common.annotations.VisibleForTesting; - -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; - -import com.google.common.base.Preconditions; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -/** - * Simple async event processing utility. - *

- * Event queue handles a collection of event handlers and routes the incoming - * events to one (or more) event handler. - */ -public class EventQueue implements EventPublisher, AutoCloseable { - - private static final Logger LOG = - LoggerFactory.getLogger(EventQueue.class); - - private static final String EXECUTOR_NAME_SEPARATOR = "For"; - - private final Map>> executors = - new HashMap<>(); - - private final AtomicLong queuedCount = new AtomicLong(0); - - private final AtomicLong eventCount = new AtomicLong(0); - - private boolean isRunning = true; - - private static final Gson TRACING_SERIALIZER = new GsonBuilder().create(); - - public > void addHandler( - EVENT_TYPE event, EventHandler handler) { - this.addHandler(event, handler, generateHandlerName(handler)); - } - - /** - * Add new handler to the event queue. - *

- * By default a separated single thread executor will be dedicated to - * deliver the events to the registered event handler. - * - * @param event Triggering event. - * @param handler Handler of event (will be called from a separated - * thread) - * @param handlerName The name of handler (should be unique together with - * the event name) - * @param The type of the event payload. - * @param The type of the event identifier. - */ - public > void addHandler( - EVENT_TYPE event, EventHandler handler, String handlerName) { - validateEvent(event); - Preconditions.checkNotNull(handler, "Handler name should not be null."); - String executorName = - StringUtils.camelize(event.getName()) + EXECUTOR_NAME_SEPARATOR - + handlerName; - this.addHandler(event, new SingleThreadExecutor<>(executorName), handler); - } - - private > void validateEvent(EVENT_TYPE event) { - Preconditions - .checkArgument(!event.getName().contains(EXECUTOR_NAME_SEPARATOR), - "Event name should not contain " + EXECUTOR_NAME_SEPARATOR - + " string."); - - } - - private String generateHandlerName(EventHandler handler) { - if (!"".equals(handler.getClass().getSimpleName())) { - return handler.getClass().getSimpleName(); - } else { - return handler.getClass().getName(); - } - } - - /** - * Add event handler with custom executor. - * - * @param event Triggering event. - * @param executor The executor imlementation to deliver events from a - * separated threads. Please keep in your mind that - * registering metrics is the responsibility of the - * caller. - * @param handler Handler of event (will be called from a separated - * thread) - * @param The type of the event payload. - * @param The type of the event identifier. - */ - public > void addHandler( - EVENT_TYPE event, EventExecutor executor, - EventHandler handler) { - if (!isRunning) { - LOG.warn("Not adding handler for {}, EventQueue is not running", event); - return; - } - validateEvent(event); - executors.putIfAbsent(event, new HashMap<>()); - executors.get(event).putIfAbsent(executor, new ArrayList<>()); - - executors.get(event).get(executor).add(handler); - } - - /** - * Route an event with payload to the right listener(s). - * - * @param event The event identifier - * @param payload The payload of the event. - * @throws IllegalArgumentException If there is no EventHandler for - * the specific event. - */ - @Override - public > void fireEvent( - EVENT_TYPE event, PAYLOAD payload) { - - if (!isRunning) { - LOG.warn("Processing of {} is skipped, EventQueue is not running", event); - return; - } - - Map> eventExecutorListMap = - this.executors.get(event); - - eventCount.incrementAndGet(); - if (eventExecutorListMap != null) { - - for (Map.Entry> executorAndHandlers : - eventExecutorListMap.entrySet()) { - - for (EventHandler handler : executorAndHandlers.getValue()) { - queuedCount.incrementAndGet(); - if (LOG.isDebugEnabled()) { - LOG.debug( - "Delivering event {} to executor/handler {}: {}", - event.getName(), - executorAndHandlers.getKey().getName(), - TRACING_SERIALIZER.toJson(payload).replaceAll("\n", "\\\\n")); - } else if (LOG.isDebugEnabled()) { - LOG.debug("Delivering event {} to executor/handler {}: {}", - event.getName(), - executorAndHandlers.getKey().getName(), - payload.getClass().getSimpleName()); - } - executorAndHandlers.getKey() - .onMessage(handler, payload, this); - - } - } - - } else { - LOG.warn("No event handler registered for event " + event); - } - - } - - /** - * This is just for unit testing, don't use it for production code. - *

- * It waits for all messages to be processed. If one event handler invokes an - * other one, the later one also should be finished. - *

- * Long counter overflow is not handled, therefore it's safe only for unit - * testing. - *

- * This method is just eventually consistent. In some cases it could return - * even if there are new messages in some of the handler. But in a simple - * case (one message) it will return only if the message is processed and - * all the dependent messages (messages which are sent by current handlers) - * are processed. - * - * @param timeout Timeout in seconds to wait for the processing. - */ - @VisibleForTesting - public void processAll(long timeout) { - long currentTime = Time.now(); - while (true) { - - if (!isRunning) { - LOG.warn("Processing of event skipped. EventQueue is not running"); - return; - } - - long processed = 0; - - Stream allExecutor = this.executors.values().stream() - .flatMap(handlerMap -> handlerMap.keySet().stream()); - - boolean allIdle = - allExecutor.allMatch(executor -> executor.queuedEvents() == executor - .successfulEvents() + executor.failedEvents()); - - if (allIdle) { - return; - } - - try { - Thread.sleep(100); - } catch (InterruptedException e) { - LOG.warn("Interrupted exception while sleeping.", e); - // We ignore this exception for time being. Review? should we - // propogate it back to caller? - } - - if (Time.now() > currentTime + timeout) { - throw new AssertionError( - "Messages are not processed in the given timeframe. Queued: " - + queuedCount.get() + " Processed: " + processed); - } - } - } - - @Override - public void close() { - - isRunning = false; - - Set allExecutors = this.executors.values().stream() - .flatMap(handlerMap -> handlerMap.keySet().stream()) - .collect(Collectors.toSet()); - - allExecutors.forEach(executor -> { - try { - executor.close(); - } catch (Exception ex) { - LOG.error("Can't close the executor " + executor.getName(), ex); - } - }); - } - -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java deleted file mode 100644 index 301c71e8775..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcher.java +++ /dev/null @@ -1,218 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Predicate; -import java.util.stream.Collectors; - -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.ozone.lease.Lease; -import org.apache.hadoop.ozone.lease.LeaseAlreadyExistException; -import org.apache.hadoop.ozone.lease.LeaseExpiredException; -import org.apache.hadoop.ozone.lease.LeaseManager; -import org.apache.hadoop.ozone.lease.LeaseNotFoundException; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.commons.collections.map.HashedMap; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Event watcher the (re)send a message after timeout. - *

- * Event watcher will send the tracked payload/event after a timeout period - * unless a confirmation from the original event (completion event) is arrived. - * - * @param The type of the events which are tracked. - * @param The type of event which could cancel the - * tracking. - */ -@SuppressWarnings("CheckStyle") -public abstract class EventWatcher { - - private static final Logger LOG = LoggerFactory.getLogger(EventWatcher.class); - - private final Event startEvent; - - private final Event completionEvent; - - private final LeaseManager leaseManager; - - private final EventWatcherMetrics metrics; - - private final String name; - - private final Map trackedEventsByID = - new ConcurrentHashMap<>(); - - private final Set trackedEvents = new HashSet<>(); - - private final Map startTrackingTimes = new HashedMap(); - - public EventWatcher(String name, Event startEvent, - Event completionEvent, - LeaseManager leaseManager) { - this.startEvent = startEvent; - this.completionEvent = completionEvent; - this.leaseManager = leaseManager; - this.metrics = new EventWatcherMetrics(); - Preconditions.checkNotNull(name); - if (name.equals("")) { - name = getClass().getSimpleName(); - } - if (name.equals("")) { - //for anonymous inner classes - name = getClass().getName(); - } - this.name = name; - } - - public EventWatcher(Event startEvent, - Event completionEvent, - LeaseManager leaseManager) { - this("", startEvent, completionEvent, leaseManager); - } - - public void start(EventQueue queue) { - - queue.addHandler(startEvent, this::handleStartEvent); - - queue.addHandler(completionEvent, (completionPayload, publisher) -> { - try { - handleCompletion(completionPayload, publisher); - } catch (LeaseNotFoundException e) { - //It's already done. Too late, we already retried it. - //Not a real problem. - LOG.warn("Completion event without active lease. Id={}", - completionPayload.getId()); - } - }); - - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.register(name, "EventWatcher metrics", metrics); - } - - private synchronized void handleStartEvent(TIMEOUT_PAYLOAD payload, - EventPublisher publisher) { - metrics.incrementTrackedEvents(); - long identifier = payload.getId(); - startTrackingTimes.put(identifier, System.currentTimeMillis()); - - trackedEventsByID.put(identifier, payload); - trackedEvents.add(payload); - try { - Lease lease = leaseManager.acquire(identifier); - try { - lease.registerCallBack(() -> { - handleTimeout(publisher, identifier); - return null; - }); - - } catch (LeaseExpiredException e) { - handleTimeout(publisher, identifier); - } - } catch (LeaseAlreadyExistException e) { - //No problem at all. But timer is not reset. - } - } - - protected synchronized void handleCompletion(COMPLETION_PAYLOAD - completionPayload, EventPublisher publisher) throws - LeaseNotFoundException { - long id = completionPayload.getId(); - leaseManager.release(id); - TIMEOUT_PAYLOAD payload = trackedEventsByID.remove(id); - if (trackedEvents.remove(payload)) { - metrics.incrementCompletedEvents(); - long originalTime = startTrackingTimes.remove(id); - metrics.updateFinishingTime(System.currentTimeMillis() - originalTime); - onFinished(publisher, payload); - } - } - - private synchronized void handleTimeout(EventPublisher publisher, - long identifier) { - metrics.incrementTimedOutEvents(); - TIMEOUT_PAYLOAD payload = trackedEventsByID.remove(identifier); - trackedEvents.remove(payload); - startTrackingTimes.remove(payload.getId()); - onTimeout(publisher, payload); - } - - - /** - * Check if a specific payload is in-progress. - */ - public synchronized boolean contains(TIMEOUT_PAYLOAD payload) { - return trackedEvents.contains(payload); - } - - public synchronized boolean remove(TIMEOUT_PAYLOAD payload) { - try { - leaseManager.release(payload.getId()); - } catch (LeaseNotFoundException e) { - LOG.warn("Completion event without active lease. Id={}", - payload.getId()); - } - trackedEventsByID.remove(payload.getId()); - return trackedEvents.remove(payload); - - } - - protected abstract void onTimeout( - EventPublisher publisher, TIMEOUT_PAYLOAD payload); - - protected abstract void onFinished( - EventPublisher publisher, TIMEOUT_PAYLOAD payload); - - public List getTimeoutEvents( - Predicate predicate) { - return trackedEventsByID.values().stream().filter(predicate) - .collect(Collectors.toList()); - } - - @VisibleForTesting - protected EventWatcherMetrics getMetrics() { - return metrics; - } - - /** - * Returns a tracked event to which the specified id is - * mapped, or {@code null} if there is no mapping for the id. - */ - public TIMEOUT_PAYLOAD getTrackedEventbyId(long id) { - return trackedEventsByID.get(id); - } - - public Map getTrackedEventsByID() { - return trackedEventsByID; - } - - public Set getTrackedEvents() { - return trackedEvents; - } -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java deleted file mode 100644 index 1db81a98890..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableRate; - -import com.google.common.annotations.VisibleForTesting; - -/** - * Metrics for any event watcher. - */ -public class EventWatcherMetrics { - - @Metric() - private MutableCounterLong trackedEvents; - - @Metric() - private MutableCounterLong timedOutEvents; - - @Metric() - private MutableCounterLong completedEvents; - - @Metric() - private MutableRate completionTime; - - public void incrementTrackedEvents() { - trackedEvents.incr(); - } - - public void incrementTimedOutEvents() { - timedOutEvents.incr(); - } - - public void incrementCompletedEvents() { - completedEvents.incr(); - } - - @VisibleForTesting - public void updateFinishingTime(long duration) { - completionTime.add(duration); - } - - @VisibleForTesting - public MutableCounterLong getTrackedEvents() { - return trackedEvents; - } - - @VisibleForTesting - public MutableCounterLong getTimedOutEvents() { - return timedOutEvents; - } - - @VisibleForTesting - public MutableCounterLong getCompletedEvents() { - return completedEvents; - } - - @VisibleForTesting - public MutableRate getCompletionTime() { - return completionTime; - } -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java deleted file mode 100644 index 3faa8e70d1b..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/IdentifiableEventPayload.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -/** - * Event with an additional unique identifier. - * - */ -public interface IdentifiableEventPayload { - - long getId(); - -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java deleted file mode 100644 index 3253f2d5db2..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/SingleThreadExecutor.java +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; - -/** - * Simple EventExecutor to call all the event handler one-by-one. - * - * @param - */ -@Metrics(context = "EventQueue") -public class SingleThreadExecutor implements EventExecutor { - - public static final String THREAD_NAME_PREFIX = "EventQueue"; - - private static final Logger LOG = - LoggerFactory.getLogger(SingleThreadExecutor.class); - - private final String name; - - private final ThreadPoolExecutor executor; - - @Metric - private MutableCounterLong queued; - - @Metric - private MutableCounterLong done; - - @Metric - private MutableCounterLong failed; - - /** - * Create SingleThreadExecutor. - * - * @param name Unique name used in monitoring and metrics. - */ - public SingleThreadExecutor(String name) { - this.name = name; - DefaultMetricsSystem.instance() - .register("EventQueue" + name, "Event Executor metrics ", this); - - LinkedBlockingQueue workQueue = new LinkedBlockingQueue<>(); - executor = - new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, workQueue, - runnable -> { - Thread thread = new Thread(runnable); - thread.setName(THREAD_NAME_PREFIX + "-" + name); - return thread; - }); - - } - - @Override - public void onMessage(EventHandler handler, T message, EventPublisher - publisher) { - queued.incr(); - executor.execute(() -> { - try { - handler.onMessage(message, publisher); - done.incr(); - } catch (Exception ex) { - LOG.error("Error on execution message {}", message, ex); - failed.incr(); - } - }); - } - - @Override - public long failedEvents() { - return failed.value(); - } - - @Override - public long successfulEvents() { - return done.value(); - } - - @Override - public long queuedEvents() { - return queued.value(); - } - - @Override - public void close() { - executor.shutdown(); - } - - @Override - public String getName() { - return name; - } -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java deleted file mode 100644 index 27bba3ab6b4..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/TypedEvent.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -/** - * Basic event implementation to implement custom events. - * - * @param - */ -public class TypedEvent implements Event { - - private final Class payloadType; - - private final String name; - - public TypedEvent(Class payloadType, String name) { - this.payloadType = payloadType; - this.name = name; - } - - public TypedEvent(Class payloadType) { - this.payloadType = payloadType; - this.name = payloadType.getSimpleName(); - } - - @Override - public Class getPayloadType() { - return payloadType; - } - - @Override - public String getName() { - return name; - } - - @Override - public String toString() { - return "TypedEvent{" + - "payloadType=" + payloadType.getSimpleName() + - ", name='" + name + '\'' + - '}'; - } -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java deleted file mode 100644 index 89999ee6d8a..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.server.events; - -/** - * Simple event queue implementation for hdds/ozone components. - */ \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/package-info.java deleted file mode 100644 index 35ad5e7f496..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.server; - -/** - * Common server side utilities for all the hdds/ozone server components. - */ \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/resources/webapps/datanode/dn.js b/hadoop-hdds/framework/src/main/resources/webapps/datanode/dn.js deleted file mode 100644 index 3b671671de2..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/datanode/dn.js +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -(function () { - "use strict"; - - var data = {ozone: {enabled: false}}; - - dust.loadSource(dust.compile($('#tmpl-dn').html(), 'dn')); - - function loadDatanodeInfo() { - $.get('/jmx?qry=Hadoop:service=DataNode,name=DataNodeInfo', function(resp) { - data.dn = workaround(resp.beans[0]); - data.dn.HostName = resp.beans[0]['DatanodeHostname']; - render(); - }).fail(show_err_msg); - } - - function loadOzoneScmInfo() { - $.get('/jmx?qry=Hadoop:service=OzoneDataNode,name=SCMConnectionManager', function (resp) { - if (resp.beans.length > 0) { - data.ozone.SCMServers = resp.beans[0].SCMServers; - data.ozone.enabled = true; - render(); - } - }).fail(show_err_msg); - } - - function loadOzoneStorageInfo() { - $.get('/jmx?qry=Hadoop:service=OzoneDataNode,name=ContainerLocationManager', function (resp) { - if (resp.beans.length > 0) { - data.ozone.LocationReport = resp.beans[0].LocationReport; - data.ozone.enabled = true; - render(); - } - }).fail(show_err_msg); - } - - function workaround(dn) { - function node_map_to_array(nodes) { - var res = []; - for (var n in nodes) { - var p = nodes[n]; - p.name = n; - res.push(p); - } - return res; - } - - dn.VolumeInfo = node_map_to_array(JSON.parse(dn.VolumeInfo)); - dn.BPServiceActorInfo = JSON.parse(dn.BPServiceActorInfo); - - return dn; - } - - function render() { - var base = dust.makeBase({ - 'helper_relative_time' : function (chunk, ctx, bodies, params) { - var value = dust.helpers.tap(params.value, chunk, ctx); - return chunk.write(moment().subtract(Number(value), 'seconds').fromNow(true)); - } - }); - dust.render('dn', base.push(data), function(err, out) { - $('#tab-overview').html(out); - $('#tab-overview').addClass('active'); - }); - } - - function show_err_msg() { - $('#alert-panel-body').html("Failed to load datanode information"); - $('#alert-panel').show(); - } - - loadDatanodeInfo(); - loadOzoneScmInfo(); - loadOzoneStorageInfo(); - -})(); diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.6.4.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.6.4.min.js deleted file mode 100644 index c4bf158ece9..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.6.4.min.js +++ /dev/null @@ -1,332 +0,0 @@ -/* - AngularJS v1.6.4 - (c) 2010-2017 Google, Inc. http://angularjs.org - License: MIT -*/ -(function(x){'use strict';function L(a,b){b=b||Error;return function(){var d=arguments[0],c;c="["+(a?a+":":"")+d+"] http://errors.angularjs.org/1.6.4/"+(a?a+"/":"")+d;for(d=1;dc)return"...";var d=b.$$hashKey,f;if(H(a)){f=0;for(var g=a.length;f").append(a).html();try{return a[0].nodeType===Ia?Q(d):d.match(/^(<[^>]+>)/)[1].replace(/^<([\w-]+)/,function(a,b){return"<"+Q(b)})}catch(c){return Q(d)}}function Qc(a){try{return decodeURIComponent(a)}catch(b){}}function Rc(a){var b={};q((a||"").split("&"),function(a){var c,e,f;a&&(e=a=a.replace(/\+/g,"%20"),c=a.indexOf("="),-1!==c&&(e=a.substring(0,c),f=a.substring(c+1)),e=Qc(e),u(e)&&(f= -u(f)?Qc(f):!0,ua.call(b,e)?H(b[e])?b[e].push(f):b[e]=[b[e],f]:b[e]=f))});return b}function Zb(a){var b=[];q(a,function(a,c){H(a)?q(a,function(a){b.push($(c,!0)+(!0===a?"":"="+$(a,!0)))}):b.push($(c,!0)+(!0===a?"":"="+$(a,!0)))});return b.length?b.join("&"):""}function db(a){return $(a,!0).replace(/%26/gi,"&").replace(/%3D/gi,"=").replace(/%2B/gi,"+")}function $(a,b){return encodeURIComponent(a).replace(/%40/gi,"@").replace(/%3A/gi,":").replace(/%24/g,"$").replace(/%2C/gi,",").replace(/%3B/gi,";").replace(/%20/g, -b?"%20":"+")}function te(a,b){var d,c,e=Ja.length;for(c=0;c protocol indicates an extension, document.location.href does not match."))} -function Sc(a,b,d){C(d)||(d={});d=S({strictDi:!1},d);var c=function(){a=B(a);if(a.injector()){var c=a[0]===x.document?"document":xa(a);throw Fa("btstrpd",c.replace(//,">"));}b=b||[];b.unshift(["$provide",function(b){b.value("$rootElement",a)}]);d.debugInfoEnabled&&b.push(["$compileProvider",function(a){a.debugInfoEnabled(!0)}]);b.unshift("ng");c=eb(b,d.strictDi);c.invoke(["$rootScope","$rootElement","$compile","$injector",function(a,b,c,d){a.$apply(function(){b.data("$injector", -d);c(b)(a)})}]);return c},e=/^NG_ENABLE_DEBUG_INFO!/,f=/^NG_DEFER_BOOTSTRAP!/;x&&e.test(x.name)&&(d.debugInfoEnabled=!0,x.name=x.name.replace(e,""));if(x&&!f.test(x.name))return c();x.name=x.name.replace(f,"");ea.resumeBootstrap=function(a){q(a,function(a){b.push(a)});return c()};D(ea.resumeDeferredBootstrap)&&ea.resumeDeferredBootstrap()}function we(){x.name="NG_ENABLE_DEBUG_INFO!"+x.name;x.location.reload()}function xe(a){a=ea.element(a).injector();if(!a)throw Fa("test");return a.get("$$testability")} -function Tc(a,b){b=b||"_";return a.replace(ye,function(a,c){return(c?b:"")+a.toLowerCase()})}function ze(){var a;if(!Uc){var b=rb();(na=w(b)?x.jQuery:b?x[b]:void 0)&&na.fn.on?(B=na,S(na.fn,{scope:Na.scope,isolateScope:Na.isolateScope,controller:Na.controller,injector:Na.injector,inheritedData:Na.inheritedData}),a=na.cleanData,na.cleanData=function(b){for(var c,e=0,f;null!=(f=b[e]);e++)(c=na._data(f,"events"))&&c.$destroy&&na(f).triggerHandler("$destroy");a(b)}):B=W;ea.element=B;Uc=!0}}function fb(a, -b,d){if(!a)throw Fa("areq",b||"?",d||"required");return a}function sb(a,b,d){d&&H(a)&&(a=a[a.length-1]);fb(D(a),b,"not a function, got "+(a&&"object"===typeof a?a.constructor.name||"Object":typeof a));return a}function Ka(a,b){if("hasOwnProperty"===a)throw Fa("badname",b);}function Vc(a,b,d){if(!b)return a;b=b.split(".");for(var c,e=a,f=b.length,g=0;g")+c[2];for(c=c[0];c--;)d=d.lastChild;f=ab(f,d.childNodes); -d=e.firstChild;d.textContent=""}else f.push(b.createTextNode(a));e.textContent="";e.innerHTML="";q(f,function(a){e.appendChild(a)});return e}function W(a){if(a instanceof W)return a;var b;F(a)&&(a=T(a),b=!0);if(!(this instanceof W)){if(b&&"<"!==a.charAt(0))throw dc("nosel");return new W(a)}if(b){b=x.document;var d;a=(d=dg.exec(a))?[b.createElement(d[1])]:(d=dd(a,b))?d.childNodes:[];ec(this,a)}else D(a)?ed(a):ec(this,a)}function fc(a){return a.cloneNode(!0)}function xb(a,b){!b&&bc(a)&&B.cleanData([a]); -a.querySelectorAll&&B.cleanData(a.querySelectorAll("*"))}function fd(a,b,d,c){if(u(c))throw dc("offargs");var e=(c=yb(a))&&c.events,f=c&&c.handle;if(f)if(b){var g=function(b){var c=e[b];u(d)&&$a(c||[],d);u(d)&&c&&0l&&this.remove(p.key);return b}},get:function(a){if(l";b=ta.firstChild.attributes;var d=b[0];b.removeNamedItem(d.name);d.value=c;a.attributes.setNamedItem(d)}function La(a, -b){try{a.addClass(b)}catch(c){}}function ca(a,b,c,d,e){a instanceof B||(a=B(a));var f=Ma(a,b,a,c,d,e);ca.$$addScopeClass(a);var g=null;return function(b,c,d){if(!a)throw fa("multilink");fb(b,"scope");e&&e.needsNewScope&&(b=b.$parent.$new());d=d||{};var h=d.parentBoundTranscludeFn,k=d.transcludeControllers;d=d.futureParentElement;h&&h.$$boundTransclude&&(h=h.$$boundTransclude);g||(g=(d=d&&d[0])?"foreignobject"!==wa(d)&&ma.call(d).match(/SVG/)?"svg":"html":"html");d="html"!==g?B(ha(g,B("

").append(a).html())): -c?Na.clone.call(a):a;if(k)for(var l in k)d.data("$"+l+"Controller",k[l].instance);ca.$$addScopeInfo(d,b);c&&c(d,b);f&&f(b,d,d,h);c||(a=f=null);return d}}function Ma(a,b,c,d,e,f){function g(a,c,d,e){var f,k,l,m,n,p,r;if(K)for(r=Array(c.length),m=0;my.priority)break;if(x=y.scope)y.templateUrl||(C(x)?($("new/isolated scope",E||K,y,v),E=y):$("new/isolated scope",E,y,v)),K=K||y;P=y.name;if(!u&&(y.replace&&(y.templateUrl||y.template)||y.transclude&&!y.$$tlb)){for(x=z+1;u=a[x++];)if(u.transclude&&!u.$$tlb||u.replace&&(u.templateUrl||u.template)){La=!0;break}u=!0}!y.templateUrl&& -y.controller&&(G=G||V(),$("'"+P+"' controller",G[P],y,v),G[P]=y);if(x=y.transclude)if(J=!0,y.$$tlb||($("transclusion",t,y,v),t=y),"element"===x)X=!0,p=y.priority,N=v,v=d.$$element=B(ca.$$createComment(P,d[P])),b=v[0],ka(f,va.call(N,0),b),N[0].$$parentNode=N[0].parentNode,A=kc(La,N,e,p,g&&g.name,{nonTlbTranscludeDirective:t});else{var ja=V();if(C(x)){N=[];var Q=V(),jb=V();q(x,function(a,b){var c="?"===a.charAt(0);a=c?a.substring(1):a;Q[a]=b;ja[b]=null;jb[b]=c});q(v.contents(),function(a){var b=Q[Ba(wa(a))]; -b?(jb[b]=!0,ja[b]=ja[b]||[],ja[b].push(a)):N.push(a)});q(jb,function(a,b){if(!a)throw fa("reqslot",b);});for(var ic in ja)ja[ic]&&(ja[ic]=kc(La,ja[ic],e))}else N=B(fc(b)).contents();v.empty();A=kc(La,N,e,void 0,void 0,{needsNewScope:y.$$isolateScope||y.$$newScope});A.$$slots=ja}if(y.template)if(O=!0,$("template",I,y,v),I=y,x=D(y.template)?y.template(v,d):y.template,x=Ea(x),y.replace){g=y;N=cc.test(x)?pd(ha(y.templateNamespace,T(x))):[];b=N[0];if(1!==N.length||1!==b.nodeType)throw fa("tplrt",P,""); -ka(f,v,b);F={$attr:{}};x=jc(b,[],F);var Y=a.splice(z+1,a.length-(z+1));(E||K)&&aa(x,E,K);a=a.concat(x).concat(Y);da(d,F);F=a.length}else v.html(x);if(y.templateUrl)O=!0,$("template",I,y,v),I=y,y.replace&&(g=y),n=ga(a.splice(z,a.length-z),v,d,f,J&&A,h,k,{controllerDirectives:G,newScopeDirective:K!==y&&K,newIsolateScopeDirective:E,templateDirective:I,nonTlbTranscludeDirective:t}),F=a.length;else if(y.compile)try{R=y.compile(v,d,A);var Z=y.$$originalDirective||y;D(R)?m(null,bb(Z,R),Ma,L):R&&m(bb(Z,R.pre), -bb(Z,R.post),Ma,L)}catch(ea){c(ea,xa(v))}y.terminal&&(n.terminal=!0,p=Math.max(p,y.priority))}n.scope=K&&!0===K.scope;n.transcludeOnThisElement=J;n.templateOnThisElement=O;n.transclude=A;l.hasElementTranscludeDirective=X;return n}function U(a,b,c,d){var e;if(F(b)){var f=b.match(l);b=b.substring(f[0].length);var g=f[1]||f[3],f="?"===f[2];"^^"===g?c=c.parent():e=(e=d&&d[b])&&e.instance;if(!e){var h="$"+b+"Controller";e=g?c.inheritedData(h):c.data(h)}if(!e&&!f)throw fa("ctreq",b,a);}else if(H(b))for(e= -[],g=0,f=b.length;gc.priority)&&-1!==c.restrict.indexOf(e)){k&&(c=Vb(c,{$$start:k,$$end:l}));if(!c.$$bindings){var K=m=c,r=c.name,t={isolateScope:null,bindToController:null};C(K.scope)&&(!0===K.bindToController?(t.bindToController=d(K.scope,r,!0),t.isolateScope={}):t.isolateScope=d(K.scope,r,!1));C(K.bindToController)&&(t.bindToController=d(K.bindToController,r,!0));if(t.bindToController&&!K.controller)throw fa("noctrl", -r);m=m.$$bindings=t;C(m.isolateScope)&&(c.$$isolateBindings=m.isolateScope)}b.push(c);m=c}}return m}function Z(b){if(f.hasOwnProperty(b))for(var c=a.get(b+"Directive"),d=0,e=c.length;d"+b+"";return c.childNodes[0].childNodes;default:return b}}function oa(a,b){if("srcdoc"===b)return y.HTML;var c=wa(a);if("src"===b||"ngSrc"===b){if(-1===["img","video","audio","source","track"].indexOf(c))return y.RESOURCE_URL}else if("xlinkHref"===b||"form"===c&&"action"===b||"link"===c&&"href"===b)return y.RESOURCE_URL}function pa(a, -c,d,e,f){var g=oa(a,e),h=k[e]||f,l=b(d,!f,g,h);if(l){if("multiple"===e&&"select"===wa(a))throw fa("selmulti",xa(a));if(m.test(e))throw fa("nodomevents");c.push({priority:100,compile:function(){return{pre:function(a,c,f){c=f.$$observers||(f.$$observers=V());var k=f[e];k!==d&&(l=k&&b(k,!0,g,h),d=k);l&&(f[e]=l(a),(c[e]||(c[e]=[])).$$inter=!0,(f.$$observers&&f.$$observers[e].$$scope||a).$watch(l,function(a,b){"class"===e&&a!==b?f.$updateClass(a,b):f.$set(e,a)}))}}}})}}function ka(a,b,c){var d=b[0],e= -b.length,f=d.parentNode,g,h;if(a)for(g=0,h=a.length;g=b)return a;for(;b--;){var d=a[b];(8===d.nodeType||d.nodeType===Ia&&""===d.nodeValue.trim())&&sg.call(a,b,1)}return a}function qg(a,b){if(b&&F(b))return b;if(F(a)){var d=sd.exec(a);if(d)return d[3]}}function wf(){var a={},b=!1;this.has=function(b){return a.hasOwnProperty(b)};this.register=function(b,c){Ka(b,"controller");C(b)? -S(a,b):a[b]=c};this.allowGlobals=function(){b=!0};this.$get=["$injector","$window",function(d,c){function e(a,b,c,d){if(!a||!C(a.$scope))throw L("$controller")("noscp",d,b);a.$scope[b]=c}return function(f,g,h,k){var l,m,n;h=!0===h;k&&F(k)&&(n=k);if(F(f)){k=f.match(sd);if(!k)throw td("ctrlfmt",f);m=k[1];n=n||k[3];f=a.hasOwnProperty(m)?a[m]:Vc(g.$scope,m,!0)||(b?Vc(c,m,!0):void 0);if(!f)throw td("ctrlreg",m);sb(f,m,!0)}if(h)return h=(H(f)?f[f.length-1]:f).prototype,l=Object.create(h||null),n&&e(g,n, -l,m||f.name),S(function(){var a=d.invoke(f,l,g,m);a!==l&&(C(a)||D(a))&&(l=a,n&&e(g,n,l,m||f.name));return l},{instance:l,identifier:n});l=d.instantiate(f,g,m);n&&e(g,n,l,m||f.name);return l}}]}function xf(){this.$get=["$window",function(a){return B(a.document)}]}function yf(){this.$get=["$document","$rootScope",function(a,b){function d(){e=c.hidden}var c=a[0],e=c&&c.hidden;a.on("visibilitychange",d);b.$on("$destroy",function(){a.off("visibilitychange",d)});return function(){return e}}]}function zf(){this.$get= -["$log",function(a){return function(b,d){a.error.apply(a,arguments)}}]}function mc(a){return C(a)?ga(a)?a.toISOString():cb(a):a}function Ef(){this.$get=function(){return function(a){if(!a)return"";var b=[];Kc(a,function(a,c){null===a||w(a)||(H(a)?q(a,function(a){b.push($(c)+"="+$(mc(a)))}):b.push($(c)+"="+$(mc(a))))});return b.join("&")}}}function Ff(){this.$get=function(){return function(a){function b(a,e,f){null===a||w(a)||(H(a)?q(a,function(a,c){b(a,e+"["+(C(a)?c:"")+"]")}):C(a)&&!ga(a)?Kc(a,function(a, -c){b(a,e+(f?"":"[")+c+(f?"":"]"))}):d.push($(e)+"="+$(mc(a))))}if(!a)return"";var d=[];b(a,"",!0);return d.join("&")}}}function nc(a,b){if(F(a)){var d=a.replace(tg,"").trim();if(d){var c=b("Content-Type");(c=c&&0===c.indexOf(ud))||(c=(c=d.match(ug))&&vg[c[0]].test(d));if(c)try{a=Oc(d)}catch(e){throw oc("baddata",a,e);}}}return a}function vd(a){var b=V(),d;F(a)?q(a.split("\n"),function(a){d=a.indexOf(":");var e=Q(T(a.substr(0,d)));a=T(a.substr(d+1));e&&(b[e]=b[e]?b[e]+", "+a:a)}):C(a)&&q(a,function(a, -d){var f=Q(d),g=T(a);f&&(b[f]=b[f]?b[f]+", "+g:g)});return b}function wd(a){var b;return function(d){b||(b=vd(a));return d?(d=b[Q(d)],void 0===d&&(d=null),d):b}}function xd(a,b,d,c){if(D(c))return c(a,b,d);q(c,function(c){a=c(a,b,d)});return a}function Df(){var a=this.defaults={transformResponse:[nc],transformRequest:[function(a){return C(a)&&"[object File]"!==ma.call(a)&&"[object Blob]"!==ma.call(a)&&"[object FormData]"!==ma.call(a)?cb(a):a}],headers:{common:{Accept:"application/json, text/plain, */*"}, -post:pa(pc),put:pa(pc),patch:pa(pc)},xsrfCookieName:"XSRF-TOKEN",xsrfHeaderName:"X-XSRF-TOKEN",paramSerializer:"$httpParamSerializer",jsonpCallbackParam:"callback"},b=!1;this.useApplyAsync=function(a){return u(a)?(b=!!a,this):b};var d=this.interceptors=[];this.$get=["$browser","$httpBackend","$$cookieReader","$cacheFactory","$rootScope","$q","$injector","$sce",function(c,e,f,g,h,k,l,m){function n(b){function d(a,b){for(var c=0,e=b.length;ca?b:k.reject(b)}if(!C(b))throw L("$http")("badreq",b);if(!F(m.valueOf(b.url)))throw L("$http")("badreq",b.url);var g=S({method:"get",transformRequest:a.transformRequest,transformResponse:a.transformResponse,paramSerializer:a.paramSerializer,jsonpCallbackParam:a.jsonpCallbackParam},b);g.headers= -function(b){var c=a.headers,d=S({},b.headers),f,g,h,c=S({},c.common,c[Q(b.method)]);a:for(f in c){g=Q(f);for(h in d)if(Q(h)===g)continue a;d[f]=c[f]}return e(d,pa(b))}(b);g.method=ub(g.method);g.paramSerializer=F(g.paramSerializer)?l.get(g.paramSerializer):g.paramSerializer;c.$$incOutstandingRequestCount();var h=[],n=[];b=k.resolve(g);q(t,function(a){(a.request||a.requestError)&&h.unshift(a.request,a.requestError);(a.response||a.responseError)&&n.push(a.response,a.responseError)});b=d(b,h);b=b.then(function(b){var c= -b.headers,d=xd(b.data,wd(c),void 0,b.transformRequest);w(d)&&q(c,function(a,b){"content-type"===Q(b)&&delete c[b]});w(b.withCredentials)&&!w(a.withCredentials)&&(b.withCredentials=a.withCredentials);return p(b,d).then(f,f)});b=d(b,n);return b=b.finally(function(){c.$$completeOutstandingRequest(z)})}function p(c,d){function g(a){if(a){var c={};q(a,function(a,d){c[d]=function(c){function d(){a(c)}b?h.$applyAsync(d):h.$$phase?d():h.$apply(d)}});return c}}function l(a,c,d,e){function f(){p(c,a,d,e)}O&& -(200<=a&&300>a?O.put(R,[a,c,vd(d),e]):O.remove(R));b?h.$applyAsync(f):(f(),h.$$phase||h.$apply())}function p(a,b,d,e){b=-1<=b?b:0;(200<=b&&300>b?G.resolve:G.reject)({data:a,status:b,headers:wd(d),config:c,statusText:e})}function K(a){p(a.data,a.status,pa(a.headers()),a.statusText)}function t(){var a=n.pendingRequests.indexOf(c);-1!==a&&n.pendingRequests.splice(a,1)}var G=k.defer(),y=G.promise,O,X,P=c.headers,s="jsonp"===Q(c.method),R=c.url;s?R=m.getTrustedResourceUrl(R):F(R)||(R=m.valueOf(R));R=r(R, -c.paramSerializer(c.params));s&&(R=J(R,c.jsonpCallbackParam));n.pendingRequests.push(c);y.then(t,t);!c.cache&&!a.cache||!1===c.cache||"GET"!==c.method&&"JSONP"!==c.method||(O=C(c.cache)?c.cache:C(a.cache)?a.cache:v);O&&(X=O.get(R),u(X)?X&&D(X.then)?X.then(K,K):H(X)?p(X[1],X[0],pa(X[2]),X[3]):p(X,200,{},"OK"):O.put(R,y));w(X)&&((X=yd(c.url)?f()[c.xsrfCookieName||a.xsrfCookieName]:void 0)&&(P[c.xsrfHeaderName||a.xsrfHeaderName]=X),e(c.method,R,d,l,P,c.timeout,c.withCredentials,c.responseType,g(c.eventHandlers), -g(c.uploadEventHandlers)));return y}function r(a,b){0=l&&(q.resolve(t),v(A.$$intervalId),delete g[A.$$intervalId]);M||a.$apply()},k);g[A.$$intervalId]=q;return A}var g={};f.cancel=function(a){return a&&a.$$intervalId in g?(g[a.$$intervalId].promise.catch(z),g[a.$$intervalId].reject("canceled"),b.clearInterval(a.$$intervalId),delete g[a.$$intervalId],!0):!1};return f}]}function qc(a){a=a.split("/");for(var b=a.length;b--;)a[b]= -db(a[b]);return a.join("/")}function zd(a,b){var d=Ca(a);b.$$protocol=d.protocol;b.$$host=d.hostname;b.$$port=Z(d.port)||xg[d.protocol]||null}function Ad(a,b){if(yg.test(a))throw kb("badpath",a);var d="/"!==a.charAt(0);d&&(a="/"+a);var c=Ca(a);b.$$path=decodeURIComponent(d&&"/"===c.pathname.charAt(0)?c.pathname.substring(1):c.pathname);b.$$search=Rc(c.search);b.$$hash=decodeURIComponent(c.hash);b.$$path&&"/"!==b.$$path.charAt(0)&&(b.$$path="/"+b.$$path)}function rc(a,b){return a.slice(0,b.length)=== -b}function ka(a,b){if(rc(b,a))return b.substr(a.length)}function Aa(a){var b=a.indexOf("#");return-1===b?a:a.substr(0,b)}function lb(a){return a.replace(/(#.+)|#$/,"$1")}function sc(a,b,d){this.$$html5=!0;d=d||"";zd(a,this);this.$$parse=function(a){var d=ka(b,a);if(!F(d))throw kb("ipthprfx",a,b);Ad(d,this);this.$$path||(this.$$path="/");this.$$compose()};this.$$compose=function(){var a=Zb(this.$$search),d=this.$$hash?"#"+db(this.$$hash):"";this.$$url=qc(this.$$path)+(a?"?"+a:"")+d;this.$$absUrl=b+ -this.$$url.substr(1);this.$$urlUpdatedByLocation=!0};this.$$parseLinkUrl=function(c,e){if(e&&"#"===e[0])return this.hash(e.slice(1)),!0;var f,g;u(f=ka(a,c))?(g=f,g=d&&u(f=ka(d,f))?b+(ka("/",f)||f):a+g):u(f=ka(b,c))?g=b+f:b===c+"/"&&(g=b);g&&this.$$parse(g);return!!g}}function tc(a,b,d){zd(a,this);this.$$parse=function(c){var e=ka(a,c)||ka(b,c),f;w(e)||"#"!==e.charAt(0)?this.$$html5?f=e:(f="",w(e)&&(a=c,this.replace())):(f=ka(d,e),w(f)&&(f=e));Ad(f,this);c=this.$$path;var e=a,g=/^\/[A-Z]:(\/.*)/;rc(f, -e)&&(f=f.replace(e,""));g.exec(f)||(c=(f=g.exec(c))?f[1]:c);this.$$path=c;this.$$compose()};this.$$compose=function(){var b=Zb(this.$$search),e=this.$$hash?"#"+db(this.$$hash):"";this.$$url=qc(this.$$path)+(b?"?"+b:"")+e;this.$$absUrl=a+(this.$$url?d+this.$$url:"");this.$$urlUpdatedByLocation=!0};this.$$parseLinkUrl=function(b,d){return Aa(a)===Aa(b)?(this.$$parse(b),!0):!1}}function Bd(a,b,d){this.$$html5=!0;tc.apply(this,arguments);this.$$parseLinkUrl=function(c,e){if(e&&"#"===e[0])return this.hash(e.slice(1)), -!0;var f,g;a===Aa(c)?f=c:(g=ka(b,c))?f=a+d+g:b===c+"/"&&(f=b);f&&this.$$parse(f);return!!f};this.$$compose=function(){var b=Zb(this.$$search),e=this.$$hash?"#"+db(this.$$hash):"";this.$$url=qc(this.$$path)+(b?"?"+b:"")+e;this.$$absUrl=a+d+this.$$url;this.$$urlUpdatedByLocation=!0}}function Jb(a){return function(){return this[a]}}function Cd(a,b){return function(d){if(w(d))return this[a];this[a]=b(d);this.$$compose();return this}}function Jf(){var a="!",b={enabled:!1,requireBase:!0,rewriteLinks:!0}; -this.hashPrefix=function(b){return u(b)?(a=b,this):a};this.html5Mode=function(a){if(Ha(a))return b.enabled=a,this;if(C(a)){Ha(a.enabled)&&(b.enabled=a.enabled);Ha(a.requireBase)&&(b.requireBase=a.requireBase);if(Ha(a.rewriteLinks)||F(a.rewriteLinks))b.rewriteLinks=a.rewriteLinks;return this}return b};this.$get=["$rootScope","$browser","$sniffer","$rootElement","$window",function(d,c,e,f,g){function h(a,b,d){var e=l.url(),f=l.$$state;try{c.url(a,b,d),l.$$state=c.state()}catch(g){throw l.url(e),l.$$state= -f,g;}}function k(a,b){d.$broadcast("$locationChangeSuccess",l.absUrl(),a,l.$$state,b)}var l,m;m=c.baseHref();var n=c.url(),p;if(b.enabled){if(!m&&b.requireBase)throw kb("nobase");p=n.substring(0,n.indexOf("/",n.indexOf("//")+2))+(m||"/");m=e.history?sc:Bd}else p=Aa(n),m=tc;var r=p.substr(0,Aa(p).lastIndexOf("/")+1);l=new m(p,r,"#"+a);l.$$parseLinkUrl(n,n);l.$$state=c.state();var J=/^\s*(javascript|mailto):/i;f.on("click",function(a){var e=b.rewriteLinks;if(e&&!a.ctrlKey&&!a.metaKey&&!a.shiftKey&& -2!==a.which&&2!==a.button){for(var h=B(a.target);"a"!==wa(h[0]);)if(h[0]===f[0]||!(h=h.parent())[0])return;if(!F(e)||!w(h.attr(e))){var e=h.prop("href"),k=h.attr("href")||h.attr("xlink:href");C(e)&&"[object SVGAnimatedString]"===e.toString()&&(e=Ca(e.animVal).href);J.test(e)||!e||h.attr("target")||a.isDefaultPrevented()||!l.$$parseLinkUrl(e,k)||(a.preventDefault(),l.absUrl()!==c.url()&&(d.$apply(),g.angular["ff-684208-preventDefault"]=!0))}}});lb(l.absUrl())!==lb(n)&&c.url(l.absUrl(),!0);var v=!0; -c.onUrlChange(function(a,b){rc(a,r)?(d.$evalAsync(function(){var c=l.absUrl(),e=l.$$state,f;a=lb(a);l.$$parse(a);l.$$state=b;f=d.$broadcast("$locationChangeStart",a,c,b,e).defaultPrevented;l.absUrl()===a&&(f?(l.$$parse(c),l.$$state=e,h(c,!1,e)):(v=!1,k(c,e)))}),d.$$phase||d.$digest()):g.location.href=a});d.$watch(function(){if(v||l.$$urlUpdatedByLocation){l.$$urlUpdatedByLocation=!1;var a=lb(c.url()),b=lb(l.absUrl()),f=c.state(),g=l.$$replace,m=a!==b||l.$$html5&&e.history&&f!==l.$$state;if(v||m)v= -!1,d.$evalAsync(function(){var b=l.absUrl(),c=d.$broadcast("$locationChangeStart",b,a,l.$$state,f).defaultPrevented;l.absUrl()===b&&(c?(l.$$parse(a),l.$$state=f):(m&&h(b,g,f===l.$$state?null:l.$$state),k(a,f)))})}l.$$replace=!1});return l}]}function Kf(){var a=!0,b=this;this.debugEnabled=function(b){return u(b)?(a=b,this):a};this.$get=["$window",function(d){function c(a){a instanceof Error&&(a.stack&&f?a=a.message&&-1===a.stack.indexOf(a.message)?"Error: "+a.message+"\n"+a.stack:a.stack:a.sourceURL&& -(a=a.message+"\n"+a.sourceURL+":"+a.line));return a}function e(a){var b=d.console||{},e=b[a]||b.log||z;a=!1;try{a=!!e.apply}catch(f){}return a?function(){var a=[];q(arguments,function(b){a.push(c(b))});return e.apply(b,a)}:function(a,b){e(a,null==b?"":b)}}var f=za||/\bEdge\//.test(d.navigator&&d.navigator.userAgent);return{log:e("log"),info:e("info"),warn:e("warn"),error:e("error"),debug:function(){var c=e("debug");return function(){a&&c.apply(b,arguments)}}()}}]}function zg(a){return a+""}function Ag(a, -b){return"undefined"!==typeof a?a:b}function Dd(a,b){return"undefined"===typeof a?b:"undefined"===typeof b?a:a+b}function U(a,b){var d,c,e;switch(a.type){case s.Program:d=!0;q(a.body,function(a){U(a.expression,b);d=d&&a.expression.constant});a.constant=d;break;case s.Literal:a.constant=!0;a.toWatch=[];break;case s.UnaryExpression:U(a.argument,b);a.constant=a.argument.constant;a.toWatch=a.argument.toWatch;break;case s.BinaryExpression:U(a.left,b);U(a.right,b);a.constant=a.left.constant&&a.right.constant; -a.toWatch=a.left.toWatch.concat(a.right.toWatch);break;case s.LogicalExpression:U(a.left,b);U(a.right,b);a.constant=a.left.constant&&a.right.constant;a.toWatch=a.constant?[]:[a];break;case s.ConditionalExpression:U(a.test,b);U(a.alternate,b);U(a.consequent,b);a.constant=a.test.constant&&a.alternate.constant&&a.consequent.constant;a.toWatch=a.constant?[]:[a];break;case s.Identifier:a.constant=!1;a.toWatch=[a];break;case s.MemberExpression:U(a.object,b);a.computed&&U(a.property,b);a.constant=a.object.constant&& -(!a.computed||a.property.constant);a.toWatch=[a];break;case s.CallExpression:d=e=a.filter?!b(a.callee.name).$stateful:!1;c=[];q(a.arguments,function(a){U(a,b);d=d&&a.constant;a.constant||c.push.apply(c,a.toWatch)});a.constant=d;a.toWatch=e?c:[a];break;case s.AssignmentExpression:U(a.left,b);U(a.right,b);a.constant=a.left.constant&&a.right.constant;a.toWatch=[a];break;case s.ArrayExpression:d=!0;c=[];q(a.elements,function(a){U(a,b);d=d&&a.constant;a.constant||c.push.apply(c,a.toWatch)});a.constant= -d;a.toWatch=c;break;case s.ObjectExpression:d=!0;c=[];q(a.properties,function(a){U(a.value,b);d=d&&a.value.constant&&!a.computed;a.value.constant||c.push.apply(c,a.value.toWatch);a.computed&&(U(a.key,b),a.key.constant||c.push.apply(c,a.key.toWatch))});a.constant=d;a.toWatch=c;break;case s.ThisExpression:a.constant=!1;a.toWatch=[];break;case s.LocalsExpression:a.constant=!1,a.toWatch=[]}}function Ed(a){if(1===a.length){a=a[0].expression;var b=a.toWatch;return 1!==b.length?b:b[0]!==a?b:void 0}}function Fd(a){return a.type=== -s.Identifier||a.type===s.MemberExpression}function Gd(a){if(1===a.body.length&&Fd(a.body[0].expression))return{type:s.AssignmentExpression,left:a.body[0].expression,right:{type:s.NGValueParameter},operator:"="}}function Hd(a){this.$filter=a}function Id(a){this.$filter=a}function uc(a,b,d){this.ast=new s(a,d);this.astCompiler=d.csp?new Id(b):new Hd(b)}function vc(a){return D(a.valueOf)?a.valueOf():Bg.call(a)}function Lf(){var a=V(),b={"true":!0,"false":!1,"null":null,undefined:void 0},d,c;this.addLiteral= -function(a,c){b[a]=c};this.setIdentifierFns=function(a,b){d=a;c=b;return this};this.$get=["$filter",function(e){function f(a,b,c){return null==a||null==b?a===b:"object"!==typeof a||(a=vc(a),"object"!==typeof a||c)?a===b||a!==a&&b!==b:!1}function g(a,b,c,d,e){var g=d.inputs,h;if(1===g.length){var k=f,g=g[0];return a.$watch(function(a){var b=g(a);f(b,k,d.literal)||(h=d(a,void 0,void 0,[b]),k=b&&vc(b));return h},b,c,e)}for(var l=[],m=[],n=0,E=g.length;n=c.$$state.status&&e&&e.length&&a(function(){for(var a,c,f=0,g=e.length;fa)for(b in l++,f)ua.call(e,b)||(t--,delete f[b])}else f!==e&&(f=e,l++);return l}}c.$stateful=!0;var d=this,e,f,h,k=1t&&(w=4-t,u[w]||(u[w]=[]),u[w].push({msg:D(a.exp)?"fn: "+(a.exp.name||a.exp.toString()):a.exp,newVal:g,oldVal:k}));else if(a===c){r=!1;break a}}catch(B){f(B)}if(!(p=q.$$watchersCount&&q.$$childHead||q!==this&&q.$$nextSibling))for(;q!==this&&!(p=q.$$nextSibling);)q=q.$parent}while(q=p);if((r||s.length)&&!t--)throw M.$$phase= -null,d("infdig",b,u);}while(r||s.length);for(M.$$phase=null;Iza)throw ta("iequirks");var c=pa(oa);c.isEnabled=function(){return a};c.trustAs=d.trustAs;c.getTrusted=d.getTrusted;c.valueOf=d.valueOf;a||(c.trustAs=c.getTrusted=function(a,b){return b},c.valueOf=Ya);c.parseAs=function(a,d){var e=b(d);return e.literal&&e.constant?e:b(d,function(b){return c.getTrusted(a,b)})};var e=c.parseAs, -f=c.getTrusted,g=c.trustAs;q(oa,function(a,b){var d=Q(b);c[("parse_as_"+d).replace(xc,gb)]=function(b){return e(a,b)};c[("get_trusted_"+d).replace(xc,gb)]=function(b){return f(a,b)};c[("trust_as_"+d).replace(xc,gb)]=function(b){return g(a,b)}});return c}]}function Rf(){this.$get=["$window","$document",function(a,b){var d={},c=!((!a.nw||!a.nw.process)&&a.chrome&&(a.chrome.app&&a.chrome.app.runtime||!a.chrome.app&&a.chrome.runtime&&a.chrome.runtime.id))&&a.history&&a.history.pushState,e=Z((/android (\d+)/.exec(Q((a.navigator|| -{}).userAgent))||[])[1]),f=/Boxee/i.test((a.navigator||{}).userAgent),g=b[0]||{},h=g.body&&g.body.style,k=!1,l=!1;h&&(k=!!("transition"in h||"webkitTransition"in h),l=!!("animation"in h||"webkitAnimation"in h));return{history:!(!c||4>e||f),hasEvent:function(a){if("input"===a&&za)return!1;if(w(d[a])){var b=g.createElement("div");d[a]="on"+a in b}return d[a]},csp:Ga(),transitions:k,animations:l,android:e}}]}function Tf(){var a;this.httpOptions=function(b){return b?(a=b,this):a};this.$get=["$exceptionHandler", -"$templateCache","$http","$q","$sce",function(b,d,c,e,f){function g(h,k){g.totalPendingRequests++;if(!F(h)||w(d.get(h)))h=f.getTrustedResourceUrl(h);var l=c.defaults&&c.defaults.transformResponse;H(l)?l=l.filter(function(a){return a!==nc}):l===nc&&(l=null);return c.get(h,S({cache:d,transformResponse:l},a)).finally(function(){g.totalPendingRequests--}).then(function(a){d.put(h,a.data);return a.data},function(a){k||(a=Dg("tpload",h,a.status,a.statusText),b(a));return e.reject(a)})}g.totalPendingRequests= -0;return g}]}function Uf(){this.$get=["$rootScope","$browser","$location",function(a,b,d){return{findBindings:function(a,b,d){a=a.getElementsByClassName("ng-binding");var g=[];q(a,function(a){var c=ea.element(a).data("$binding");c&&q(c,function(c){d?(new RegExp("(^|\\s)"+Kd(b)+"(\\s|\\||$)")).test(c)&&g.push(a):-1!==c.indexOf(b)&&g.push(a)})});return g},findModels:function(a,b,d){for(var g=["ng-","data-ng-","ng\\:"],h=0;hc&&(c=e),c+=+a.slice(e+1),a=a.substring(0,e)):0>c&&(c=a.length);for(e=0;a.charAt(e)===zc;e++); -if(e===(g=a.length))d=[0],c=1;else{for(g--;a.charAt(g)===zc;)g--;c-=e;d=[];for(f=0;e<=g;e++,f++)d[f]=+a.charAt(e)}c>Ud&&(d=d.splice(0,Ud-1),b=c-1,c=1);return{d:d,e:b,i:c}}function Lg(a,b,d,c){var e=a.d,f=e.length-a.i;b=w(b)?Math.min(Math.max(d,f),c):+b;d=b+a.i;c=e[d];if(0d-1){for(c=0;c>d;c--)e.unshift(0),a.i++;e.unshift(1);a.i++}else e[d- -1]++;for(;fh;)k.unshift(0),h++;0=b.lgSize&&h.unshift(k.splice(-b.lgSize,k.length).join(""));k.length> -b.gSize;)h.unshift(k.splice(-b.gSize,k.length).join(""));k.length&&h.unshift(k.join(""));k=h.join(d);f.length&&(k+=c+f.join(""));e&&(k+="e+"+e)}return 0>a&&!g?b.negPre+k+b.negSuf:b.posPre+k+b.posSuf}function Kb(a,b,d,c){var e="";if(0>a||c&&0>=a)c?a=-a+1:(a=-a,e="-");for(a=""+a;a.length-d)f+=d;0===f&&-12===d&&(f=12);return Kb(f,b,c,e)}}function mb(a,b,d){return function(c,e){var f= -c["get"+a](),g=ub((d?"STANDALONE":"")+(b?"SHORT":"")+a);return e[g][f]}}function Vd(a){var b=(new Date(a,0,1)).getDay();return new Date(a,0,(4>=b?5:12)-b)}function Wd(a){return function(b){var d=Vd(b.getFullYear());b=+new Date(b.getFullYear(),b.getMonth(),b.getDate()+(4-b.getDay()))-+d;b=1+Math.round(b/6048E5);return Kb(b,a)}}function Ac(a,b){return 0>=a.getFullYear()?b.ERAS[0]:b.ERAS[1]}function Pd(a){function b(a){var b;if(b=a.match(d)){a=new Date(0);var f=0,g=0,h=b[8]?a.setUTCFullYear:a.setFullYear, -k=b[8]?a.setUTCHours:a.setHours;b[9]&&(f=Z(b[9]+b[10]),g=Z(b[9]+b[11]));h.call(a,Z(b[1]),Z(b[2])-1,Z(b[3]));f=Z(b[4]||0)-f;g=Z(b[5]||0)-g;h=Z(b[6]||0);b=Math.round(1E3*parseFloat("0."+(b[7]||0)));k.call(a,f,g,h,b)}return a}var d=/^(\d{4})-?(\d\d)-?(\d\d)(?:T(\d\d)(?::?(\d\d)(?::?(\d\d)(?:\.(\d+))?)?)?(Z|([+-])(\d\d):?(\d\d))?)?$/;return function(c,d,f){var g="",h=[],k,l;d=d||"mediumDate";d=a.DATETIME_FORMATS[d]||d;F(c)&&(c=Mg.test(c)?Z(c):b(c));ba(c)&&(c=new Date(c));if(!ga(c)||!isFinite(c.getTime()))return c; -for(;d;)(l=Ng.exec(d))?(h=ab(h,l,1),d=h.pop()):(h.push(d),d=null);var m=c.getTimezoneOffset();f&&(m=Pc(f,m),c=Yb(c,f,!0));q(h,function(b){k=Og[b];g+=k?k(c,a.DATETIME_FORMATS,m):"''"===b?"'":b.replace(/(^'|'$)/g,"").replace(/''/g,"'")});return g}}function Fg(){return function(a,b){w(b)&&(b=2);return cb(a,b)}}function Gg(){return function(a,b,d){b=Infinity===Math.abs(Number(b))?Number(b):Z(b);if(da(b))return a;ba(a)&&(a=a.toString());if(!qa(a))return a;d=!d||isNaN(d)?0:Z(d);d=0>d?Math.max(0,a.length+ -d):d;return 0<=b?Bc(a,d,d+b):0===d?Bc(a,b,a.length):Bc(a,Math.max(0,d+b),d)}}function Bc(a,b,d){return F(a)?a.slice(b,d):va.call(a,b,d)}function Rd(a){function b(b){return b.map(function(b){var c=1,d=Ya;if(D(b))d=b;else if(F(b)){if("+"===b.charAt(0)||"-"===b.charAt(0))c="-"===b.charAt(0)?-1:1,b=b.substring(1);if(""!==b&&(d=a(b),d.constant))var e=d(),d=function(a){return a[e]}}return{get:d,descending:c}})}function d(a){switch(typeof a){case "number":case "boolean":case "string":return!0;default:return!1}} -function c(a,b){var c=0,d=a.type,k=b.type;if(d===k){var k=a.value,l=b.value;"string"===d?(k=k.toLowerCase(),l=l.toLowerCase()):"object"===d&&(C(k)&&(k=a.index),C(l)&&(l=b.index));k!==l&&(c=kb||37<=b&&40>=b||m(a,this,this.value)});if(e.hasEvent("paste"))b.on("paste cut", -m)}b.on("change",l);if(ae[g]&&c.$$hasNativeValidators&&g===d.type)b.on("keydown wheel mousedown",function(a){if(!k){var b=this.validity,c=b.badInput,d=b.typeMismatch;k=f.defer(function(){k=null;b.badInput===c&&b.typeMismatch===d||l(a)})}});c.$render=function(){var a=c.$isEmpty(c.$viewValue)?"":c.$viewValue;b.val()!==a&&b.val(a)}}function Nb(a,b){return function(d,c){var e,f;if(ga(d))return d;if(F(d)){'"'===d.charAt(0)&&'"'===d.charAt(d.length-1)&&(d=d.substring(1,d.length-1));if(Pg.test(d))return new Date(d); -a.lastIndex=0;if(e=a.exec(d))return e.shift(),f=c?{yyyy:c.getFullYear(),MM:c.getMonth()+1,dd:c.getDate(),HH:c.getHours(),mm:c.getMinutes(),ss:c.getSeconds(),sss:c.getMilliseconds()/1E3}:{yyyy:1970,MM:1,dd:1,HH:0,mm:0,ss:0,sss:0},q(e,function(a,c){c=v};g.$observe("min",function(a){v=p(a);h.$validate()})}if(u(g.max)||g.ngMax){var t; -h.$validators.max=function(a){return!n(a)||w(t)||d(a)<=t};g.$observe("max",function(a){t=p(a);h.$validate()})}}}function Dc(a,b,d,c){(c.$$hasNativeValidators=C(b[0].validity))&&c.$parsers.push(function(a){var c=b.prop("validity")||{};return c.badInput||c.typeMismatch?void 0:a})}function be(a){a.$$parserName="number";a.$parsers.push(function(b){if(a.$isEmpty(b))return null;if(Qg.test(b))return parseFloat(b)});a.$formatters.push(function(b){if(!a.$isEmpty(b)){if(!ba(b))throw pb("numfmt",b);b=b.toString()}return b})} -function Sa(a){u(a)&&!ba(a)&&(a=parseFloat(a));return da(a)?void 0:a}function Ec(a){var b=a.toString(),d=b.indexOf(".");return-1===d?-1a&&(a=/e-(\d+)$/.exec(b))?Number(a[1]):0:b.length-d-1}function ce(a,b,d){a=Number(a);var c=(a|0)!==a,e=(b|0)!==b,f=(d|0)!==d;if(c||e||f){var g=c?Ec(a):0,h=e?Ec(b):0,k=f?Ec(d):0,g=Math.max(g,h,k),g=Math.pow(10,g);a*=g;b*=g;d*=g;c&&(a=Math.round(a));e&&(b=Math.round(b));f&&(d=Math.round(d))}return 0===(a-b)%d}function de(a,b,d,c,e){if(u(c)){a=a(c);if(!a.constant)throw pb("constexpr", -d,c);return a(b)}return e}function Fc(a,b){function d(a,b){if(!a||!a.length)return[];if(!b||!b.length)return a;var c=[],d=0;a:for(;d(?:<\/\1>|)$/, -cc=/<|&#?\w+;/,bg=/<([\w:-]+)/,cg=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:-]+)[^>]*)\/>/gi,ha={option:[1,'"],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};ha.optgroup=ha.option;ha.tbody=ha.tfoot=ha.colgroup=ha.caption=ha.thead;ha.th=ha.td;var jg=x.Node.prototype.contains||function(a){return!!(this.compareDocumentPosition(a)& -16)},Na=W.prototype={ready:ed,toString:function(){var a=[];q(this,function(b){a.push(""+b)});return"["+a.join(", ")+"]"},eq:function(a){return 0<=a?B(this[a]):B(this[this.length+a])},length:0,push:Tg,sort:[].sort,splice:[].splice},Fb={};q("multiple selected checked disabled readOnly required open".split(" "),function(a){Fb[Q(a)]=a});var jd={};q("input select option textarea button form details".split(" "),function(a){jd[a]=!0});var rd={ngMinlength:"minlength",ngMaxlength:"maxlength",ngMin:"min",ngMax:"max", -ngPattern:"pattern",ngStep:"step"};q({data:hc,removeData:gc,hasData:function(a){for(var b in hb[a.ng339])return!0;return!1},cleanData:function(a){for(var b=0,d=a.length;b/,mg=/^[^(]*\(\s*([^)]*)\)/m,Wg=/,/,Xg=/^\s*(_?)(\S+?)\1\s*$/,kg=/((\/\/.*$)|(\/\*[\s\S]*?\*\/))/mg,ya=L("$injector");eb.$$annotate=function(a,b,d){var c;if("function"===typeof a){if(!(c=a.$inject)){c=[];if(a.length){if(b)throw F(d)&&d||(d=a.name||ng(a)),ya("strictdi",d);b=ld(a);q(b[1].split(Wg),function(a){a.replace(Xg,function(a,b,d){c.push(d)})})}a.$inject=c}}else H(a)?(b=a.length-1,sb(a[b],"fn"),c=a.slice(0,b)):sb(a,"fn", -!0);return c};var fe=L("$animate"),qf=function(){this.$get=z},rf=function(){var a=new Gb,b=[];this.$get=["$$AnimateRunner","$rootScope",function(d,c){function e(a,b,c){var d=!1;b&&(b=F(b)?b.split(" "):H(b)?b:[],q(b,function(b){b&&(d=!0,a[b]=c)}));return d}function f(){q(b,function(b){var c=a.get(b);if(c){var d=og(b.attr("class")),e="",f="";q(c,function(a,b){a!==!!d[b]&&(a?e+=(e.length?" ":"")+b:f+=(f.length?" ":"")+b)});q(b,function(a){e&&Cb(a,e);f&&Bb(a,f)});a.delete(b)}});b.length=0}return{enabled:z, -on:z,off:z,pin:z,push:function(g,h,k,l){l&&l();k=k||{};k.from&&g.css(k.from);k.to&&g.css(k.to);if(k.addClass||k.removeClass)if(h=k.addClass,l=k.removeClass,k=a.get(g)||{},h=e(k,h,!0),l=e(k,l,!1),h||l)a.set(g,k),b.push(g),1===b.length&&c.$$postDigest(f);g=new d;g.complete();return g}}}]},of=["$provide",function(a){var b=this,d=null;this.$$registeredAnimations=Object.create(null);this.register=function(c,d){if(c&&"."!==c.charAt(0))throw fe("notcsel",c);var f=c+"-animation";b.$$registeredAnimations[c.substr(1)]= -f;a.factory(f,d)};this.classNameFilter=function(a){if(1===arguments.length&&(d=a instanceof RegExp?a:null)&&/[(\s|\/)]ng-animate[(\s|\/)]/.test(d.toString()))throw d=null,fe("nongcls","ng-animate");return d};this.$get=["$$animateQueue",function(a){function b(a,c,d){if(d){var e;a:{for(e=0;e <= >= && || ! = |".split(" "),function(a){Qb[a]=!0});var $g={n:"\n",f:"\f",r:"\r",t:"\t",v:"\v","'":"'",'"':'"'},wc=function(a){this.options=a};wc.prototype={constructor:wc,lex:function(a){this.text=a;this.index=0;for(this.tokens=[];this.index=a&&"string"===typeof a},isWhitespace:function(a){return" "===a||"\r"===a||"\t"===a||"\n"===a||"\v"===a||"\u00a0"===a},isIdentifierStart:function(a){return this.options.isIdentifierStart?this.options.isIdentifierStart(a,this.codePointAt(a)):this.isValidIdentifierStart(a)},isValidIdentifierStart:function(a){return"a"<=a&&"z">=a||"A"<=a&&"Z">=a||"_"===a||"$"===a},isIdentifierContinue:function(a){return this.options.isIdentifierContinue? -this.options.isIdentifierContinue(a,this.codePointAt(a)):this.isValidIdentifierContinue(a)},isValidIdentifierContinue:function(a,b){return this.isValidIdentifierStart(a,b)||this.isNumber(a)},codePointAt:function(a){return 1===a.length?a.charCodeAt(0):(a.charCodeAt(0)<<10)+a.charCodeAt(1)-56613888},peekMultichar:function(){var a=this.text.charAt(this.index),b=this.peek();if(!b)return a;var d=a.charCodeAt(0),c=b.charCodeAt(0);return 55296<=d&&56319>=d&&56320<=c&&57343>=c?a+b:a},isExpOperator:function(a){return"-"=== -a||"+"===a||this.isNumber(a)},throwError:function(a,b,d){d=d||this.index;b=u(b)?"s "+b+"-"+this.index+" ["+this.text.substring(b,d)+"]":" "+d;throw Ua("lexerr",a,b,this.text);},readNumber:function(){for(var a="",b=this.index;this.index","<=",">=");)a={type:s.BinaryExpression,operator:b.text,left:a,right:this.additive()};return a},additive:function(){for(var a=this.multiplicative(),b;b=this.expect("+","-");)a={type:s.BinaryExpression,operator:b.text,left:a,right:this.multiplicative()};return a},multiplicative:function(){for(var a=this.unary(),b;b=this.expect("*","/","%");)a={type:s.BinaryExpression,operator:b.text,left:a,right:this.unary()};return a}, -unary:function(){var a;return(a=this.expect("+","-","!"))?{type:s.UnaryExpression,operator:a.text,prefix:!0,argument:this.unary()}:this.primary()},primary:function(){var a;this.expect("(")?(a=this.filterChain(),this.consume(")")):this.expect("[")?a=this.arrayDeclaration():this.expect("{")?a=this.object():this.selfReferential.hasOwnProperty(this.peek().text)?a=ra(this.selfReferential[this.consume().text]):this.options.literals.hasOwnProperty(this.peek().text)?a={type:s.Literal,value:this.options.literals[this.consume().text]}: -this.peek().identifier?a=this.identifier():this.peek().constant?a=this.constant():this.throwError("not a primary expression",this.peek());for(var b;b=this.expect("(","[",".");)"("===b.text?(a={type:s.CallExpression,callee:a,arguments:this.parseArguments()},this.consume(")")):"["===b.text?(a={type:s.MemberExpression,object:a,property:this.expression(),computed:!0},this.consume("]")):"."===b.text?a={type:s.MemberExpression,object:a,property:this.identifier(),computed:!1}:this.throwError("IMPOSSIBLE"); -return a},filter:function(a){a=[a];for(var b={type:s.CallExpression,callee:this.identifier(),arguments:a,filter:!0};this.expect(":");)a.push(this.expression());return b},parseArguments:function(){var a=[];if(")"!==this.peekToken().text){do a.push(this.filterChain());while(this.expect(","))}return a},identifier:function(){var a=this.consume();a.identifier||this.throwError("is not a valid identifier",a);return{type:s.Identifier,name:a.text}},constant:function(){return{type:s.Literal,value:this.consume().value}}, -arrayDeclaration:function(){var a=[];if("]"!==this.peekToken().text){do{if(this.peek("]"))break;a.push(this.expression())}while(this.expect(","))}this.consume("]");return{type:s.ArrayExpression,elements:a}},object:function(){var a=[],b;if("}"!==this.peekToken().text){do{if(this.peek("}"))break;b={type:s.Property,kind:"init"};this.peek().constant?(b.key=this.constant(),b.computed=!1,this.consume(":"),b.value=this.expression()):this.peek().identifier?(b.key=this.identifier(),b.computed=!1,this.peek(":")? -(this.consume(":"),b.value=this.expression()):b.value=b.key):this.peek("[")?(this.consume("["),b.key=this.expression(),this.consume("]"),b.computed=!0,this.consume(":"),b.value=this.expression()):this.throwError("invalid key",this.peek());a.push(b)}while(this.expect(","))}this.consume("}");return{type:s.ObjectExpression,properties:a}},throwError:function(a,b){throw Ua("syntax",b.text,a,b.index+1,this.text,this.text.substring(b.index));},consume:function(a){if(0===this.tokens.length)throw Ua("ueoe", -this.text);var b=this.expect(a);b||this.throwError("is unexpected, expecting ["+a+"]",this.peek());return b},peekToken:function(){if(0===this.tokens.length)throw Ua("ueoe",this.text);return this.tokens[0]},peek:function(a,b,d,c){return this.peekAhead(0,a,b,d,c)},peekAhead:function(a,b,d,c,e){if(this.tokens.length>a){a=this.tokens[a];var f=a.text;if(f===b||f===d||f===c||f===e||!(b||d||c||e))return a}return!1},expect:function(a,b,d,c){return(a=this.peek(a,b,d,c))?(this.tokens.shift(),a):!1},selfReferential:{"this":{type:s.ThisExpression}, -$locals:{type:s.LocalsExpression}}};Hd.prototype={compile:function(a){var b=this;this.state={nextId:0,filters:{},fn:{vars:[],body:[],own:{}},assign:{vars:[],body:[],own:{}},inputs:[]};U(a,b.$filter);var d="",c;this.stage="assign";if(c=Gd(a))this.state.computing="assign",d=this.nextId(),this.recurse(c,d),this.return_(d),d="fn.assign="+this.generateFunction("assign","s,v,l");c=Ed(a.body);b.stage="inputs";q(c,function(a,c){var d="fn"+c;b.state[d]={vars:[],body:[],own:{}};b.state.computing=d;var h=b.nextId(); -b.recurse(a,h);b.return_(h);b.state.inputs.push(d);a.watchId=c});this.state.computing="fn";this.stage="main";this.recurse(a);a='"'+this.USE+" "+this.STRICT+'";\n'+this.filterPrefix()+"var fn="+this.generateFunction("fn","s,l,a,i")+d+this.watchFns()+"return fn;";a=(new Function("$filter","getStringValue","ifDefined","plus",a))(this.$filter,zg,Ag,Dd);this.state=this.stage=void 0;return a},USE:"use",STRICT:"strict",watchFns:function(){var a=[],b=this.state.inputs,d=this;q(b,function(b){a.push("var "+ -b+"="+d.generateFunction(b,"s"))});b.length&&a.push("fn.inputs=["+b.join(",")+"];");return a.join("")},generateFunction:function(a,b){return"function("+b+"){"+this.varsPrefix(a)+this.body(a)+"};"},filterPrefix:function(){var a=[],b=this;q(this.state.filters,function(d,c){a.push(d+"=$filter("+b.escape(c)+")")});return a.length?"var "+a.join(",")+";":""},varsPrefix:function(a){return this.state[a].vars.length?"var "+this.state[a].vars.join(",")+";":""},body:function(a){return this.state[a].body.join("")}, -recurse:function(a,b,d,c,e,f){var g,h,k=this,l,m,n;c=c||z;if(!f&&u(a.watchId))b=b||this.nextId(),this.if_("i",this.lazyAssign(b,this.computedMember("i",a.watchId)),this.lazyRecurse(a,b,d,c,e,!0));else switch(a.type){case s.Program:q(a.body,function(b,c){k.recurse(b.expression,void 0,void 0,function(a){h=a});c!==a.body.length-1?k.current().body.push(h,";"):k.return_(h)});break;case s.Literal:m=this.escape(a.value);this.assign(b,m);c(b||m);break;case s.UnaryExpression:this.recurse(a.argument,void 0, -void 0,function(a){h=a});m=a.operator+"("+this.ifDefined(h,0)+")";this.assign(b,m);c(m);break;case s.BinaryExpression:this.recurse(a.left,void 0,void 0,function(a){g=a});this.recurse(a.right,void 0,void 0,function(a){h=a});m="+"===a.operator?this.plus(g,h):"-"===a.operator?this.ifDefined(g,0)+a.operator+this.ifDefined(h,0):"("+g+")"+a.operator+"("+h+")";this.assign(b,m);c(m);break;case s.LogicalExpression:b=b||this.nextId();k.recurse(a.left,b);k.if_("&&"===a.operator?b:k.not(b),k.lazyRecurse(a.right, -b));c(b);break;case s.ConditionalExpression:b=b||this.nextId();k.recurse(a.test,b);k.if_(b,k.lazyRecurse(a.alternate,b),k.lazyRecurse(a.consequent,b));c(b);break;case s.Identifier:b=b||this.nextId();d&&(d.context="inputs"===k.stage?"s":this.assign(this.nextId(),this.getHasOwnProperty("l",a.name)+"?l:s"),d.computed=!1,d.name=a.name);k.if_("inputs"===k.stage||k.not(k.getHasOwnProperty("l",a.name)),function(){k.if_("inputs"===k.stage||"s",function(){e&&1!==e&&k.if_(k.isNull(k.nonComputedMember("s",a.name)), -k.lazyAssign(k.nonComputedMember("s",a.name),"{}"));k.assign(b,k.nonComputedMember("s",a.name))})},b&&k.lazyAssign(b,k.nonComputedMember("l",a.name)));c(b);break;case s.MemberExpression:g=d&&(d.context=this.nextId())||this.nextId();b=b||this.nextId();k.recurse(a.object,g,void 0,function(){k.if_(k.notNull(g),function(){a.computed?(h=k.nextId(),k.recurse(a.property,h),k.getStringValue(h),e&&1!==e&&k.if_(k.not(k.computedMember(g,h)),k.lazyAssign(k.computedMember(g,h),"{}")),m=k.computedMember(g,h),k.assign(b, -m),d&&(d.computed=!0,d.name=h)):(e&&1!==e&&k.if_(k.isNull(k.nonComputedMember(g,a.property.name)),k.lazyAssign(k.nonComputedMember(g,a.property.name),"{}")),m=k.nonComputedMember(g,a.property.name),k.assign(b,m),d&&(d.computed=!1,d.name=a.property.name))},function(){k.assign(b,"undefined")});c(b)},!!e);break;case s.CallExpression:b=b||this.nextId();a.filter?(h=k.filter(a.callee.name),l=[],q(a.arguments,function(a){var b=k.nextId();k.recurse(a,b);l.push(b)}),m=h+"("+l.join(",")+")",k.assign(b,m),c(b)): -(h=k.nextId(),g={},l=[],k.recurse(a.callee,h,g,function(){k.if_(k.notNull(h),function(){q(a.arguments,function(b){k.recurse(b,a.constant?void 0:k.nextId(),void 0,function(a){l.push(a)})});m=g.name?k.member(g.context,g.name,g.computed)+"("+l.join(",")+")":h+"("+l.join(",")+")";k.assign(b,m)},function(){k.assign(b,"undefined")});c(b)}));break;case s.AssignmentExpression:h=this.nextId();g={};this.recurse(a.left,void 0,g,function(){k.if_(k.notNull(g.context),function(){k.recurse(a.right,h);m=k.member(g.context, -g.name,g.computed)+a.operator+h;k.assign(b,m);c(b||m)})},1);break;case s.ArrayExpression:l=[];q(a.elements,function(b){k.recurse(b,a.constant?void 0:k.nextId(),void 0,function(a){l.push(a)})});m="["+l.join(",")+"]";this.assign(b,m);c(b||m);break;case s.ObjectExpression:l=[];n=!1;q(a.properties,function(a){a.computed&&(n=!0)});n?(b=b||this.nextId(),this.assign(b,"{}"),q(a.properties,function(a){a.computed?(g=k.nextId(),k.recurse(a.key,g)):g=a.key.type===s.Identifier?a.key.name:""+a.key.value;h=k.nextId(); -k.recurse(a.value,h);k.assign(k.member(b,g,a.computed),h)})):(q(a.properties,function(b){k.recurse(b.value,a.constant?void 0:k.nextId(),void 0,function(a){l.push(k.escape(b.key.type===s.Identifier?b.key.name:""+b.key.value)+":"+a)})}),m="{"+l.join(",")+"}",this.assign(b,m));c(b||m);break;case s.ThisExpression:this.assign(b,"s");c(b||"s");break;case s.LocalsExpression:this.assign(b,"l");c(b||"l");break;case s.NGValueParameter:this.assign(b,"v"),c(b||"v")}},getHasOwnProperty:function(a,b){var d=a+"."+ -b,c=this.current().own;c.hasOwnProperty(d)||(c[d]=this.nextId(!1,a+"&&("+this.escape(b)+" in "+a+")"));return c[d]},assign:function(a,b){if(a)return this.current().body.push(a,"=",b,";"),a},filter:function(a){this.state.filters.hasOwnProperty(a)||(this.state.filters[a]=this.nextId(!0));return this.state.filters[a]},ifDefined:function(a,b){return"ifDefined("+a+","+this.escape(b)+")"},plus:function(a,b){return"plus("+a+","+b+")"},return_:function(a){this.current().body.push("return ",a,";")},if_:function(a, -b,d){if(!0===a)b();else{var c=this.current().body;c.push("if(",a,"){");b();c.push("}");d&&(c.push("else{"),d(),c.push("}"))}},not:function(a){return"!("+a+")"},isNull:function(a){return a+"==null"},notNull:function(a){return a+"!=null"},nonComputedMember:function(a,b){var d=/[^$_a-zA-Z0-9]/g;return/^[$_a-zA-Z][$_a-zA-Z0-9]*$/.test(b)?a+"."+b:a+'["'+b.replace(d,this.stringEscapeFn)+'"]'},computedMember:function(a,b){return a+"["+b+"]"},member:function(a,b,d){return d?this.computedMember(a,b):this.nonComputedMember(a, -b)},getStringValue:function(a){this.assign(a,"getStringValue("+a+")")},lazyRecurse:function(a,b,d,c,e,f){var g=this;return function(){g.recurse(a,b,d,c,e,f)}},lazyAssign:function(a,b){var d=this;return function(){d.assign(a,b)}},stringEscapeRegex:/[^ a-zA-Z0-9]/g,stringEscapeFn:function(a){return"\\u"+("0000"+a.charCodeAt(0).toString(16)).slice(-4)},escape:function(a){if(F(a))return"'"+a.replace(this.stringEscapeRegex,this.stringEscapeFn)+"'";if(ba(a))return a.toString();if(!0===a)return"true";if(!1=== -a)return"false";if(null===a)return"null";if("undefined"===typeof a)return"undefined";throw Ua("esc");},nextId:function(a,b){var d="v"+this.state.nextId++;a||this.current().vars.push(d+(b?"="+b:""));return d},current:function(){return this.state[this.state.computing]}};Id.prototype={compile:function(a){var b=this;U(a,b.$filter);var d,c;if(d=Gd(a))c=this.recurse(d);d=Ed(a.body);var e;d&&(e=[],q(d,function(a,c){var d=b.recurse(a);a.input=d;e.push(d);a.watchId=c}));var f=[];q(a.body,function(a){f.push(b.recurse(a.expression))}); -a=0===a.body.length?z:1===a.body.length?f[0]:function(a,b){var c;q(f,function(d){c=d(a,b)});return c};c&&(a.assign=function(a,b,d){return c(a,d,b)});e&&(a.inputs=e);return a},recurse:function(a,b,d){var c,e,f=this,g;if(a.input)return this.inputs(a.input,a.watchId);switch(a.type){case s.Literal:return this.value(a.value,b);case s.UnaryExpression:return e=this.recurse(a.argument),this["unary"+a.operator](e,b);case s.BinaryExpression:return c=this.recurse(a.left),e=this.recurse(a.right),this["binary"+ -a.operator](c,e,b);case s.LogicalExpression:return c=this.recurse(a.left),e=this.recurse(a.right),this["binary"+a.operator](c,e,b);case s.ConditionalExpression:return this["ternary?:"](this.recurse(a.test),this.recurse(a.alternate),this.recurse(a.consequent),b);case s.Identifier:return f.identifier(a.name,b,d);case s.MemberExpression:return c=this.recurse(a.object,!1,!!d),a.computed||(e=a.property.name),a.computed&&(e=this.recurse(a.property)),a.computed?this.computedMember(c,e,b,d):this.nonComputedMember(c, -e,b,d);case s.CallExpression:return g=[],q(a.arguments,function(a){g.push(f.recurse(a))}),a.filter&&(e=this.$filter(a.callee.name)),a.filter||(e=this.recurse(a.callee,!0)),a.filter?function(a,c,d,f){for(var n=[],p=0;p":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)>b(c,e,f,g);return d?{value:c}:c}},"binary<=":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)<=b(c,e,f,g);return d?{value:c}:c}},"binary>=":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)>=b(c,e,f,g);return d?{value:c}: -c}},"binary&&":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)&&b(c,e,f,g);return d?{value:c}:c}},"binary||":function(a,b,d){return function(c,e,f,g){c=a(c,e,f,g)||b(c,e,f,g);return d?{value:c}:c}},"ternary?:":function(a,b,d,c){return function(e,f,g,h){e=a(e,f,g,h)?b(e,f,g,h):d(e,f,g,h);return c?{value:e}:e}},value:function(a,b){return function(){return b?{context:void 0,name:void 0,value:a}:a}},identifier:function(a,b,d){return function(c,e,f,g){c=e&&a in e?e:c;d&&1!==d&&c&&null==c[a]&&(c[a]= -{});e=c?c[a]:void 0;return b?{context:c,name:a,value:e}:e}},computedMember:function(a,b,d,c){return function(e,f,g,h){var k=a(e,f,g,h),l,m;null!=k&&(l=b(e,f,g,h),l+="",c&&1!==c&&k&&!k[l]&&(k[l]={}),m=k[l]);return d?{context:k,name:l,value:m}:m}},nonComputedMember:function(a,b,d,c){return function(e,f,g,h){e=a(e,f,g,h);c&&1!==c&&e&&null==e[b]&&(e[b]={});f=null!=e?e[b]:void 0;return d?{context:e,name:b,value:f}:f}},inputs:function(a,b){return function(d,c,e,f){return f?f[b]:a(d,c,e)}}};uc.prototype= -{constructor:uc,parse:function(a){a=this.ast.ast(a);var b=this.astCompiler.compile(a);b.literal=0===a.body.length||1===a.body.length&&(a.body[0].expression.type===s.Literal||a.body[0].expression.type===s.ArrayExpression||a.body[0].expression.type===s.ObjectExpression);b.constant=a.constant;return b}};var ta=L("$sce"),oa={HTML:"html",CSS:"css",URL:"url",RESOURCE_URL:"resourceUrl",JS:"js"},xc=/_([a-z])/g,Dg=L("$compile"),aa=x.document.createElement("a"),Md=Ca(x.location.href);Nd.$inject=["$document"]; -cd.$inject=["$provide"];var Ud=22,Td=".",zc="0";Od.$inject=["$locale"];Qd.$inject=["$locale"];var Og={yyyy:Y("FullYear",4,0,!1,!0),yy:Y("FullYear",2,0,!0,!0),y:Y("FullYear",1,0,!1,!0),MMMM:mb("Month"),MMM:mb("Month",!0),MM:Y("Month",2,1),M:Y("Month",1,1),LLLL:mb("Month",!1,!0),dd:Y("Date",2),d:Y("Date",1),HH:Y("Hours",2),H:Y("Hours",1),hh:Y("Hours",2,-12),h:Y("Hours",1,-12),mm:Y("Minutes",2),m:Y("Minutes",1),ss:Y("Seconds",2),s:Y("Seconds",1),sss:Y("Milliseconds",3),EEEE:mb("Day"),EEE:mb("Day",!0), -a:function(a,b){return 12>a.getHours()?b.AMPMS[0]:b.AMPMS[1]},Z:function(a,b,d){a=-1*d;return a=(0<=a?"+":"")+(Kb(Math[0=a.getFullYear()?b.ERANAMES[0]:b.ERANAMES[1]}},Ng=/((?:[^yMLdHhmsaZEwG']+)|(?:'(?:[^']|'')*')|(?:E+|y+|M+|L+|d+|H+|h+|m+|s+|a|Z|G+|w+))([\s\S]*)/,Mg=/^-?\d+$/;Pd.$inject=["$locale"];var Hg=la(Q),Ig=la(ub);Rd.$inject=["$parse"];var Fe=la({restrict:"E",compile:function(a, -b){if(!b.href&&!b.xlinkHref)return function(a,b){if("a"===b[0].nodeName.toLowerCase()){var e="[object SVGAnimatedString]"===ma.call(b.prop("href"))?"xlink:href":"href";b.on("click",function(a){b.attr(e)||a.preventDefault()})}}}}),vb={};q(Fb,function(a,b){function d(a,d,e){a.$watch(e[c],function(a){e.$set(b,!!a)})}if("multiple"!==a){var c=Ba("ng-"+b),e=d;"checked"===a&&(e=function(a,b,e){e.ngModel!==e[c]&&d(a,b,e)});vb[c]=function(){return{restrict:"A",priority:100,link:e}}}});q(rd,function(a,b){vb[b]= -function(){return{priority:100,link:function(a,c,e){if("ngPattern"===b&&"/"===e.ngPattern.charAt(0)&&(c=e.ngPattern.match(Sg))){e.$set("ngPattern",new RegExp(c[1],c[2]));return}a.$watch(e[b],function(a){e.$set(b,a)})}}}});q(["src","srcset","href"],function(a){var b=Ba("ng-"+a);vb[b]=function(){return{priority:99,link:function(d,c,e){var f=a,g=a;"href"===a&&"[object SVGAnimatedString]"===ma.call(c.prop("href"))&&(g="xlinkHref",e.$attr[g]="xlink:href",f=null);e.$observe(b,function(b){b?(e.$set(g,b), -za&&f&&c.prop(f,e[g])):"href"===a&&e.$set(g,null)})}}}});var Mb={$addControl:z,$$renameControl:function(a,b){a.$name=b},$removeControl:z,$setValidity:z,$setDirty:z,$setPristine:z,$setSubmitted:z};Lb.$inject=["$element","$attrs","$scope","$animate","$interpolate"];Lb.prototype={$rollbackViewValue:function(){q(this.$$controls,function(a){a.$rollbackViewValue()})},$commitViewValue:function(){q(this.$$controls,function(a){a.$commitViewValue()})},$addControl:function(a){Ka(a.$name,"input");this.$$controls.push(a); -a.$name&&(this[a.$name]=a);a.$$parentForm=this},$$renameControl:function(a,b){var d=a.$name;this[d]===a&&delete this[d];this[b]=a;a.$name=b},$removeControl:function(a){a.$name&&this[a.$name]===a&&delete this[a.$name];q(this.$pending,function(b,d){this.$setValidity(d,null,a)},this);q(this.$error,function(b,d){this.$setValidity(d,null,a)},this);q(this.$$success,function(b,d){this.$setValidity(d,null,a)},this);$a(this.$$controls,a);a.$$parentForm=Mb},$setDirty:function(){this.$$animate.removeClass(this.$$element, -Va);this.$$animate.addClass(this.$$element,Rb);this.$dirty=!0;this.$pristine=!1;this.$$parentForm.$setDirty()},$setPristine:function(){this.$$animate.setClass(this.$$element,Va,Rb+" ng-submitted");this.$dirty=!1;this.$pristine=!0;this.$submitted=!1;q(this.$$controls,function(a){a.$setPristine()})},$setUntouched:function(){q(this.$$controls,function(a){a.$setUntouched()})},$setSubmitted:function(){this.$$animate.addClass(this.$$element,"ng-submitted");this.$submitted=!0;this.$$parentForm.$setSubmitted()}}; -Zd({clazz:Lb,set:function(a,b,d){var c=a[b];c?-1===c.indexOf(d)&&c.push(d):a[b]=[d]},unset:function(a,b,d){var c=a[b];c&&($a(c,d),0===c.length&&delete a[b])}});var ge=function(a){return["$timeout","$parse",function(b,d){function c(a){return""===a?d('this[""]').assign:d(a).assign||z}return{name:"form",restrict:a?"EAC":"E",require:["form","^^?form"],controller:Lb,compile:function(d,f){d.addClass(Va).addClass(nb);var g=f.name?"name":a&&f.ngForm?"ngForm":!1;return{pre:function(a,d,e,f){var n=f[0];if(!("action"in -e)){var p=function(b){a.$apply(function(){n.$commitViewValue();n.$setSubmitted()});b.preventDefault()};d[0].addEventListener("submit",p);d.on("$destroy",function(){b(function(){d[0].removeEventListener("submit",p)},0,!1)})}(f[1]||n.$$parentForm).$addControl(n);var r=g?c(n.$name):z;g&&(r(a,n),e.$observe(g,function(b){n.$name!==b&&(r(a,void 0),n.$$parentForm.$$renameControl(n,b),r=c(n.$name),r(a,n))}));d.on("$destroy",function(){n.$$parentForm.$removeControl(n);r(a,void 0);S(n,Mb)})}}}}}]},Ge=ge(), -Se=ge(!0),Pg=/^\d{4,}-[01]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d\.\d+(?:[+-][0-2]\d:[0-5]\d|Z)$/,ah=/^[a-z][a-z\d.+-]*:\/*(?:[^:@]+(?::[^@]+)?@)?(?:[^\s:/?#]+|\[[a-f\d:]+])(?::\d+)?(?:\/[^?#]*)?(?:\?[^#]*)?(?:#.*)?$/i,bh=/^(?=.{1,254}$)(?=.{1,64}@)[-!#$%&'*+/0-9=?A-Z^_`a-z{|}~]+(\.[-!#$%&'*+/0-9=?A-Z^_`a-z{|}~]+)*@[A-Za-z0-9]([A-Za-z0-9-]{0,61}[A-Za-z0-9])?(\.[A-Za-z0-9]([A-Za-z0-9-]{0,61}[A-Za-z0-9])?)*$/,Qg=/^\s*(-|\+)?(\d+|(\d*(\.\d*)))([eE][+-]?\d+)?\s*$/,he=/^(\d{4,})-(\d{2})-(\d{2})$/,ie=/^(\d{4,})-(\d\d)-(\d\d)T(\d\d):(\d\d)(?::(\d\d)(\.\d{1,3})?)?$/, -Hc=/^(\d{4,})-W(\d\d)$/,je=/^(\d{4,})-(\d\d)$/,ke=/^(\d\d):(\d\d)(?::(\d\d)(\.\d{1,3})?)?$/,ae=V();q(["date","datetime-local","month","time","week"],function(a){ae[a]=!0});var le={text:function(a,b,d,c,e,f){Ra(a,b,d,c,e,f);Cc(c)},date:ob("date",he,Nb(he,["yyyy","MM","dd"]),"yyyy-MM-dd"),"datetime-local":ob("datetimelocal",ie,Nb(ie,"yyyy MM dd HH mm ss sss".split(" ")),"yyyy-MM-ddTHH:mm:ss.sss"),time:ob("time",ke,Nb(ke,["HH","mm","ss","sss"]),"HH:mm:ss.sss"),week:ob("week",Hc,function(a,b){if(ga(a))return a; -if(F(a)){Hc.lastIndex=0;var d=Hc.exec(a);if(d){var c=+d[1],e=+d[2],f=d=0,g=0,h=0,k=Vd(c),e=7*(e-1);b&&(d=b.getHours(),f=b.getMinutes(),g=b.getSeconds(),h=b.getMilliseconds());return new Date(c,0,k.getDate()+e,d,f,g,h)}}return NaN},"yyyy-Www"),month:ob("month",je,Nb(je,["yyyy","MM"]),"yyyy-MM"),number:function(a,b,d,c,e,f){Dc(a,b,d,c);be(c);Ra(a,b,d,c,e,f);var g,h;if(u(d.min)||d.ngMin)c.$validators.min=function(a){return c.$isEmpty(a)||w(g)||a>=g},d.$observe("min",function(a){g=Sa(a);c.$validate()}); -if(u(d.max)||d.ngMax)c.$validators.max=function(a){return c.$isEmpty(a)||w(h)||a<=h},d.$observe("max",function(a){h=Sa(a);c.$validate()});if(u(d.step)||d.ngStep){var k;c.$validators.step=function(a,b){return c.$isEmpty(b)||w(k)||ce(b,g||0,k)};d.$observe("step",function(a){k=Sa(a);c.$validate()})}},url:function(a,b,d,c,e,f){Ra(a,b,d,c,e,f);Cc(c);c.$$parserName="url";c.$validators.url=function(a,b){var d=a||b;return c.$isEmpty(d)||ah.test(d)}},email:function(a,b,d,c,e,f){Ra(a,b,d,c,e,f);Cc(c);c.$$parserName= -"email";c.$validators.email=function(a,b){var d=a||b;return c.$isEmpty(d)||bh.test(d)}},radio:function(a,b,d,c){var e=!d.ngTrim||"false"!==T(d.ngTrim);w(d.name)&&b.attr("name",++qb);b.on("click",function(a){var g;b[0].checked&&(g=d.value,e&&(g=T(g)),c.$setViewValue(g,a&&a.type))});c.$render=function(){var a=d.value;e&&(a=T(a));b[0].checked=a===c.$viewValue};d.$observe("value",c.$render)},range:function(a,b,d,c,e,f){function g(a,c){b.attr(a,d[a]);d.$observe(a,c)}function h(a){n=Sa(a);da(c.$modelValue)|| -(m?(a=b.val(),n>a&&(a=n,b.val(a)),c.$setViewValue(a)):c.$validate())}function k(a){p=Sa(a);da(c.$modelValue)||(m?(a=b.val(),p=n},g("min",h));e&&(c.$validators.max=m?function(){return!0}:function(a,b){return c.$isEmpty(b)||w(p)||b<=p},g("max",k));f&&(c.$validators.step=m?function(){return!q.stepMismatch}:function(a,b){return c.$isEmpty(b)||w(r)||ce(b,n||0,r)},g("step",l))},checkbox:function(a,b,d,c,e,f,g,h){var k=de(h,a,"ngTrueValue",d.ngTrueValue,!0),l=de(h,a,"ngFalseValue", -d.ngFalseValue,!1);b.on("click",function(a){c.$setViewValue(b[0].checked,a&&a.type)});c.$render=function(){b[0].checked=c.$viewValue};c.$isEmpty=function(a){return!1===a};c.$formatters.push(function(a){return sa(a,k)});c.$parsers.push(function(a){return a?k:l})},hidden:z,button:z,submit:z,reset:z,file:z},Xc=["$browser","$sniffer","$filter","$parse",function(a,b,d,c){return{restrict:"E",require:["?ngModel"],link:{pre:function(e,f,g,h){h[0]&&(le[Q(g.type)]||le.text)(e,f,g,h[0],b,a,d,c)}}}}],ch=/^(true|false|\d+)$/, -kf=function(){function a(a,d,c){var e=u(c)?c:9===za?"":null;a.prop("value",e);d.$set("value",c)}return{restrict:"A",priority:100,compile:function(b,d){return ch.test(d.ngValue)?function(b,d,f){b=b.$eval(f.ngValue);a(d,f,b)}:function(b,d,f){b.$watch(f.ngValue,function(b){a(d,f,b)})}}}},Ke=["$compile",function(a){return{restrict:"AC",compile:function(b){a.$$addBindingClass(b);return function(b,c,e){a.$$addBindingInfo(c,e.ngBind);c=c[0];b.$watch(e.ngBind,function(a){c.textContent=$b(a)})}}}}],Me=["$interpolate", -"$compile",function(a,b){return{compile:function(d){b.$$addBindingClass(d);return function(c,d,f){c=a(d.attr(f.$attr.ngBindTemplate));b.$$addBindingInfo(d,c.expressions);d=d[0];f.$observe("ngBindTemplate",function(a){d.textContent=w(a)?"":a})}}}}],Le=["$sce","$parse","$compile",function(a,b,d){return{restrict:"A",compile:function(c,e){var f=b(e.ngBindHtml),g=b(e.ngBindHtml,function(b){return a.valueOf(b)});d.$$addBindingClass(c);return function(b,c,e){d.$$addBindingInfo(c,e.ngBindHtml);b.$watch(g, -function(){var d=f(b);c.html(a.getTrustedHtml(d)||"")})}}}}],jf=la({restrict:"A",require:"ngModel",link:function(a,b,d,c){c.$viewChangeListeners.push(function(){a.$eval(d.ngChange)})}}),Ne=Fc("",!0),Pe=Fc("Odd",0),Oe=Fc("Even",1),Qe=Qa({compile:function(a,b){b.$set("ngCloak",void 0);a.removeClass("ng-cloak")}}),Re=[function(){return{restrict:"A",scope:!0,controller:"@",priority:500}}],bd={},dh={blur:!0,focus:!0};q("click dblclick mousedown mouseup mouseover mouseout mousemove mouseenter mouseleave keydown keyup keypress submit focus blur copy cut paste".split(" "), -function(a){var b=Ba("ng-"+a);bd[b]=["$parse","$rootScope",function(d,c){return{restrict:"A",compile:function(e,f){var g=d(f[b]);return function(b,d){d.on(a,function(d){var e=function(){g(b,{$event:d})};dh[a]&&c.$$phase?b.$evalAsync(e):b.$apply(e)})}}}}]});var Ue=["$animate","$compile",function(a,b){return{multiElement:!0,transclude:"element",priority:600,terminal:!0,restrict:"A",$$tlb:!0,link:function(d,c,e,f,g){var h,k,l;d.$watch(e.ngIf,function(d){d?k||g(function(d,f){k=f;d[d.length++]=b.$$createComment("end ngIf", -e.ngIf);h={clone:d};a.enter(d,c.parent(),c)}):(l&&(l.remove(),l=null),k&&(k.$destroy(),k=null),h&&(l=tb(h.clone),a.leave(l).done(function(a){!1!==a&&(l=null)}),h=null))})}}}],Ve=["$templateRequest","$anchorScroll","$animate",function(a,b,d){return{restrict:"ECA",priority:400,terminal:!0,transclude:"element",controller:ea.noop,compile:function(c,e){var f=e.ngInclude||e.src,g=e.onload||"",h=e.autoscroll;return function(c,e,m,n,p){var r=0,q,s,t,w=function(){s&&(s.remove(),s=null);q&&(q.$destroy(),q= -null);t&&(d.leave(t).done(function(a){!1!==a&&(s=null)}),s=t,t=null)};c.$watch(f,function(f){var m=function(a){!1===a||!u(h)||h&&!c.$eval(h)||b()},s=++r;f?(a(f,!0).then(function(a){if(!c.$$destroyed&&s===r){var b=c.$new();n.template=a;a=p(b,function(a){w();d.enter(a,null,e).done(m)});q=b;t=a;q.$emit("$includeContentLoaded",f);c.$eval(g)}},function(){c.$$destroyed||s!==r||(w(),c.$emit("$includeContentError",f))}),c.$emit("$includeContentRequested",f)):(w(),n.template=null)})}}}}],mf=["$compile",function(a){return{restrict:"ECA", -priority:-400,require:"ngInclude",link:function(b,d,c,e){ma.call(d[0]).match(/SVG/)?(d.empty(),a(dd(e.template,x.document).childNodes)(b,function(a){d.append(a)},{futureParentElement:d})):(d.html(e.template),a(d.contents())(b))}}}],We=Qa({priority:450,compile:function(){return{pre:function(a,b,d){a.$eval(d.ngInit)}}}}),hf=function(){return{restrict:"A",priority:100,require:"ngModel",link:function(a,b,d,c){var e=d.ngList||", ",f="false"!==d.ngTrim,g=f?T(e):e;c.$parsers.push(function(a){if(!w(a)){var b= -[];a&&q(a.split(g),function(a){a&&b.push(f?T(a):a)});return b}});c.$formatters.push(function(a){if(H(a))return a.join(e)});c.$isEmpty=function(a){return!a||!a.length}}}},nb="ng-valid",Yd="ng-invalid",Va="ng-pristine",Rb="ng-dirty",pb=L("ngModel");Ob.$inject="$scope $exceptionHandler $attrs $element $parse $animate $timeout $q $interpolate".split(" ");Ob.prototype={$$initGetterSetters:function(){if(this.$options.getOption("getterSetter")){var a=this.$$parse(this.$$attr.ngModel+"()"),b=this.$$parse(this.$$attr.ngModel+ -"($$$p)");this.$$ngModelGet=function(b){var c=this.$$parsedNgModel(b);D(c)&&(c=a(b));return c};this.$$ngModelSet=function(a,c){D(this.$$parsedNgModel(a))?b(a,{$$$p:c}):this.$$parsedNgModelAssign(a,c)}}else if(!this.$$parsedNgModel.assign)throw pb("nonassign",this.$$attr.ngModel,xa(this.$$element));},$render:z,$isEmpty:function(a){return w(a)||""===a||null===a||a!==a},$$updateEmptyClasses:function(a){this.$isEmpty(a)?(this.$$animate.removeClass(this.$$element,"ng-not-empty"),this.$$animate.addClass(this.$$element, -"ng-empty")):(this.$$animate.removeClass(this.$$element,"ng-empty"),this.$$animate.addClass(this.$$element,"ng-not-empty"))},$setPristine:function(){this.$dirty=!1;this.$pristine=!0;this.$$animate.removeClass(this.$$element,Rb);this.$$animate.addClass(this.$$element,Va)},$setDirty:function(){this.$dirty=!0;this.$pristine=!1;this.$$animate.removeClass(this.$$element,Va);this.$$animate.addClass(this.$$element,Rb);this.$$parentForm.$setDirty()},$setUntouched:function(){this.$touched=!1;this.$untouched= -!0;this.$$animate.setClass(this.$$element,"ng-untouched","ng-touched")},$setTouched:function(){this.$touched=!0;this.$untouched=!1;this.$$animate.setClass(this.$$element,"ng-touched","ng-untouched")},$rollbackViewValue:function(){this.$$timeout.cancel(this.$$pendingDebounce);this.$viewValue=this.$$lastCommittedViewValue;this.$render()},$validate:function(){if(!da(this.$modelValue)){var a=this.$$lastCommittedViewValue,b=this.$$rawModelValue,d=this.$valid,c=this.$modelValue,e=this.$options.getOption("allowInvalid"), -f=this;this.$$runValidators(b,a,function(a){e||d===a||(f.$modelValue=a?b:void 0,f.$modelValue!==c&&f.$$writeModelToScope())})}},$$runValidators:function(a,b,d){function c(){var c=!0;q(k.$validators,function(d,e){var g=Boolean(d(a,b));c=c&&g;f(e,g)});return c?!0:(q(k.$asyncValidators,function(a,b){f(b,null)}),!1)}function e(){var c=[],d=!0;q(k.$asyncValidators,function(e,g){var k=e(a,b);if(!k||!D(k.then))throw pb("nopromise",k);f(g,void 0);c.push(k.then(function(){f(g,!0)},function(){d=!1;f(g,!1)}))}); -c.length?k.$$q.all(c).then(function(){g(d)},z):g(!0)}function f(a,b){h===k.$$currentValidationRunId&&k.$setValidity(a,b)}function g(a){h===k.$$currentValidationRunId&&d(a)}this.$$currentValidationRunId++;var h=this.$$currentValidationRunId,k=this;(function(){var a=k.$$parserName||"parse";if(w(k.$$parserValid))f(a,null);else return k.$$parserValid||(q(k.$validators,function(a,b){f(b,null)}),q(k.$asyncValidators,function(a,b){f(b,null)})),f(a,k.$$parserValid),k.$$parserValid;return!0})()?c()?e():g(!1): -g(!1)},$commitViewValue:function(){var a=this.$viewValue;this.$$timeout.cancel(this.$$pendingDebounce);if(this.$$lastCommittedViewValue!==a||""===a&&this.$$hasNativeValidators)this.$$updateEmptyClasses(a),this.$$lastCommittedViewValue=a,this.$pristine&&this.$setDirty(),this.$$parseAndValidate()},$$parseAndValidate:function(){var a=this.$$lastCommittedViewValue,b=this;if(this.$$parserValid=w(a)?void 0:!0)for(var d=0;de||c.$isEmpty(b)|| -b.length<=e}}}}},$c=function(){return{restrict:"A",require:"?ngModel",link:function(a,b,d,c){if(c){var e=0;d.$observe("minlength",function(a){e=Z(a)||0;c.$validate()});c.$validators.minlength=function(a,b){return c.$isEmpty(b)||b.length>=e}}}}};x.angular.bootstrap?x.console&&console.log("WARNING: Tried to load angular more than once."):(ze(),Ce(ea),ea.module("ngLocale",[],["$provide",function(a){function b(a){a+="";var b=a.indexOf(".");return-1==b?0:a.length-b-1}a.value("$locale",{DATETIME_FORMATS:{AMPMS:["AM", -"PM"],DAY:"Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "),ERANAMES:["Before Christ","Anno Domini"],ERAS:["BC","AD"],FIRSTDAYOFWEEK:6,MONTH:"January February March April May June July August September October November December".split(" "),SHORTDAY:"Sun Mon Tue Wed Thu Fri Sat".split(" "),SHORTMONTH:"Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec".split(" "),STANDALONEMONTH:"January February March April May June July August September October November December".split(" "),WEEKENDRANGE:[5, -6],fullDate:"EEEE, MMMM d, y",longDate:"MMMM d, y",medium:"MMM d, y h:mm:ss a",mediumDate:"MMM d, y",mediumTime:"h:mm:ss a","short":"M/d/yy h:mm a",shortDate:"M/d/yy",shortTime:"h:mm a"},NUMBER_FORMATS:{CURRENCY_SYM:"$",DECIMAL_SEP:".",GROUP_SEP:",",PATTERNS:[{gSize:3,lgSize:3,maxFrac:3,minFrac:0,minInt:1,negPre:"-",negSuf:"",posPre:"",posSuf:""},{gSize:3,lgSize:3,maxFrac:2,minFrac:2,minInt:1,negPre:"-\u00a4",negSuf:"",posPre:"\u00a4",posSuf:""}]},id:"en-us",localeID:"en_US",pluralCat:function(a, -c){var e=a|0,f=c;void 0===f&&(f=Math.min(b(a),3));Math.pow(10,f);return 1==e&&0==f?"one":"other"}})}]),B(function(){ue(x.document,Sc)}))})(window);!window.angular.$$csp().noInlineStyle&&window.angular.element(document.head).prepend(''); -//# sourceMappingURL=angular.min.js.map diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js deleted file mode 100644 index 4aced5761e8..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(window){"use strict";var nv=window.nv;"undefined"!=typeof exports&&(nv=require("nvd3")),angular.module("nvd3",[]).directive("nvd3",["nvd3Utils",function(nvd3Utils){return{restrict:"AE",scope:{data:"=",options:"=",api:"=?",events:"=?",config:"=?",onReady:"&?"},link:function(scope,element,attrs){function configure(chart,options,chartType){chart&&options&&angular.forEach(chart,function(value,key){"_"===key[0]||("dispatch"===key?(void 0!==options[key]&&null!==options[key]||scope._config.extended&&(options[key]={}),configureEvents(value,options[key])):"tooltip"===key?(void 0!==options[key]&&null!==options[key]||scope._config.extended&&(options[key]={}),configure(chart[key],options[key],chartType)):"contentGenerator"===key?options[key]&&chart[key](options[key]):-1===["axis","clearHighlights","defined","highlightPoint","nvPointerEventsClass","options","rangeBand","rangeBands","scatter","open","close","node"].indexOf(key)&&(void 0===options[key]||null===options[key]?scope._config.extended&&(options[key]=value()):chart[key](options[key])))})}function configureEvents(dispatch,options){dispatch&&options&&angular.forEach(dispatch,function(value,key){void 0===options[key]||null===options[key]?scope._config.extended&&(options[key]=value.on):dispatch.on(key+"._",options[key])})}function configureWrapper(name){var _=nvd3Utils.deepExtend(defaultWrapper(name),scope.options[name]||{});scope._config.extended&&(scope.options[name]=_);var wrapElement=angular.element("
").html(_.html||"").addClass(name).addClass(_.className).removeAttr("style").css(_.css);_.html||wrapElement.text(_.text),_.enable&&("title"===name?element.prepend(wrapElement):"subtitle"===name?angular.element(element[0].querySelector(".title")).after(wrapElement):"caption"===name&&element.append(wrapElement))}function configureStyles(){var _=nvd3Utils.deepExtend(defaultStyles(),scope.options.styles||{});scope._config.extended&&(scope.options.styles=_),angular.forEach(_.classes,function(value,key){value?element.addClass(key):element.removeClass(key)}),element.removeAttr("style").css(_.css)}function defaultWrapper(_){switch(_){case"title":return{enable:!1,text:"Write Your Title",className:"h4",css:{width:scope.options.chart.width+"px",textAlign:"center"}};case"subtitle":return{enable:!1,text:"Write Your Subtitle",css:{width:scope.options.chart.width+"px",textAlign:"center"}};case"caption":return{enable:!1,text:"Figure 1. Write Your Caption text.",css:{width:scope.options.chart.width+"px",textAlign:"center"}}}}function defaultStyles(){return{classes:{"with-3d-shadow":!0,"with-transitions":!0,gallery:!1},css:{}}}function dataWatchFn(newData,oldData){newData!==oldData&&(scope._config.disabled||(scope._config.refreshDataOnly?scope.api.update():scope.api.refresh()))}var defaultConfig={extended:!1,visible:!0,disabled:!1,refreshDataOnly:!0,deepWatchOptions:!0,deepWatchData:!0,deepWatchDataDepth:2,debounce:10,debounceImmediate:!0};scope.isReady=!1,scope._config=angular.extend(defaultConfig,scope.config),scope.api={refresh:function(){scope.api.updateWithOptions(),scope.isReady=!0},refreshWithTimeout:function(t){setTimeout(function(){scope.api.refresh()},t)},update:function(){scope.chart&&scope.svg?"sunburstChart"===scope.options.chart.type?scope.svg.datum(angular.copy(scope.data)).call(scope.chart):scope.svg.datum(scope.data).call(scope.chart):scope.api.refresh()},updateWithTimeout:function(t){setTimeout(function(){scope.api.update()},t)},updateWithOptions:function(options){if(arguments.length){if(scope.options=options,scope._config.deepWatchOptions&&!scope._config.disabled)return}else options=scope.options;scope.api.clearElement(),angular.isDefined(options)!==!1&&scope._config.visible&&(scope.chart=nv.models[options.chart.type](),scope.chart.id=Math.random().toString(36).substr(2,15),angular.forEach(scope.chart,function(value,key){"_"===key[0]||["clearHighlights","highlightPoint","id","options","resizeHandler","state","open","close","tooltipContent"].indexOf(key)>=0||("dispatch"===key?(void 0!==options.chart[key]&&null!==options.chart[key]||scope._config.extended&&(options.chart[key]={}),configureEvents(scope.chart[key],options.chart[key])):["bars","bars1","bars2","boxplot","bullet","controls","discretebar","distX","distY","focus","interactiveLayer","legend","lines","lines1","lines2","multibar","pie","scatter","scatters1","scatters2","sparkline","stack1","stack2","sunburst","tooltip","x2Axis","xAxis","y1Axis","y2Axis","y3Axis","y4Axis","yAxis","yAxis1","yAxis2"].indexOf(key)>=0||"stacked"===key&&"stackedAreaChart"===options.chart.type?(void 0!==options.chart[key]&&null!==options.chart[key]||scope._config.extended&&(options.chart[key]={}),configure(scope.chart[key],options.chart[key],options.chart.type)):"focusHeight"===key&&"lineChart"===options.chart.type||"focusHeight"===key&&"lineWithFocusChart"===options.chart.type||("xTickFormat"!==key&&"yTickFormat"!==key||"lineWithFocusChart"!==options.chart.type)&&("tooltips"===key&&"boxPlotChart"===options.chart.type||("tooltipXContent"!==key&&"tooltipYContent"!==key||"scatterChart"!==options.chart.type)&&("x"!==key&&"y"!==key||"forceDirectedGraph"!==options.chart.type)&&(void 0===options.chart[key]||null===options.chart[key]?scope._config.extended&&("barColor"===key?options.chart[key]=value()():options.chart[key]=value()):scope.chart[key](options.chart[key]))))}),scope.api.updateWithData(),(options.title||scope._config.extended)&&configureWrapper("title"),(options.subtitle||scope._config.extended)&&configureWrapper("subtitle"),(options.caption||scope._config.extended)&&configureWrapper("caption"),(options.styles||scope._config.extended)&&configureStyles(),nv.addGraph(function(){return scope.chart?(scope.chart.resizeHandler&&scope.chart.resizeHandler.clear(),scope.chart.resizeHandler=nv.utils.windowResize(function(){scope.chart&&scope.chart.update&&scope.chart.update()}),void 0!==options.chart.zoom&&["scatterChart","lineChart","candlestickBarChart","cumulativeLineChart","historicalBarChart","ohlcBarChart","stackedAreaChart"].indexOf(options.chart.type)>-1&&nvd3Utils.zoom(scope,options),scope.chart):void 0},options.chart.callback))},updateWithData:function(data){if(arguments.length){if(scope.data=data,scope._config.deepWatchData&&!scope._config.disabled)return}else data="sunburstChart"===scope.options.chart.type?angular.copy(scope.data):scope.data;if(data){d3.select(element[0]).select("svg").remove();var h,w;scope.svg=d3.select(element[0]).insert("svg",".caption"),(h=scope.options.chart.height)&&(isNaN(+h)||(h+="px"),scope.svg.attr("height",h).style({height:h})),(w=scope.options.chart.width)?(isNaN(+w)||(w+="px"),scope.svg.attr("width",w).style({width:w})):scope.svg.attr("width","100%").style({width:"100%"}),scope.svg.datum(data).call(scope.chart),scope.chart&&scope.chart.zoomRender&&scope.chart.zoomRender()}},clearElement:function(){if(element.find(".title").remove(),element.find(".subtitle").remove(),element.find(".caption").remove(),element.empty(),scope.chart&&scope.chart.tooltip&&scope.chart.tooltip.id&&d3.select("#"+scope.chart.tooltip.id()).remove(),nv.graphs&&scope.chart)for(var i=nv.graphs.length-1;i>=0;i--)nv.graphs[i]&&nv.graphs[i].id===scope.chart.id&&nv.graphs.splice(i,1);nv.tooltip&&nv.tooltip.cleanup&&nv.tooltip.cleanup(),scope.chart&&scope.chart.resizeHandler&&scope.chart.resizeHandler.clear(),scope.chart=null},getScope:function(){return scope},getElement:function(){return element}},scope._config.deepWatchOptions&&scope.$watch("options",nvd3Utils.debounce(function(newOptions){scope._config.disabled||scope.api.refresh()},scope._config.debounce,scope._config.debounceImmediate),!0),scope._config.deepWatchData&&(1===scope._config.deepWatchDataDepth?scope.$watchCollection("data",dataWatchFn):scope.$watch("data",dataWatchFn,2===scope._config.deepWatchDataDepth)),scope.$watch("config",function(newConfig,oldConfig){newConfig!==oldConfig&&(scope._config=angular.extend(defaultConfig,newConfig),scope.api.refresh())},!0),scope._config.deepWatchOptions||scope._config.deepWatchData||scope.api.refresh(),angular.forEach(scope.events,function(eventHandler,event){scope.$on(event,function(e,args){return eventHandler(e,scope,args)})}),element.on("$destroy",function(){scope.api.clearElement()}),scope.$watch("isReady",function(isReady){isReady&&scope.onReady&&"function"==typeof scope.onReady()&&scope.onReady()(scope,element)})}}}]).factory("nvd3Utils",function(){return{debounce:function(func,wait,immediate){var timeout;return function(){var context=this,args=arguments,later=function(){timeout=null,immediate||func.apply(context,args)},callNow=immediate&&!timeout;clearTimeout(timeout),timeout=setTimeout(later,wait),callNow&&func.apply(context,args)}},deepExtend:function(dst){var me=this;return angular.forEach(arguments,function(obj){obj!==dst&&angular.forEach(obj,function(value,key){dst[key]&&dst[key].constructor&&dst[key].constructor===Object?me.deepExtend(dst[key],value):dst[key]=value})}),dst},zoom:function(scope,options){var zoom=options.chart.zoom,enabled="undefined"==typeof zoom.enabled||null===zoom.enabled?!0:zoom.enabled;if(enabled){var fixDomain,d3zoom,zoomed,unzoomed,zoomend,xScale=scope.chart.xAxis.scale(),yScale=scope.chart.yAxis.scale(),xDomain=scope.chart.xDomain||xScale.domain,yDomain=scope.chart.yDomain||yScale.domain,x_boundary=xScale.domain().slice(),y_boundary=yScale.domain().slice(),scale=zoom.scale||1,translate=zoom.translate||[0,0],scaleExtent=zoom.scaleExtent||[1,10],useFixedDomain=zoom.useFixedDomain||!1,useNiceScale=zoom.useNiceScale||!1,horizontalOff=zoom.horizontalOff||!1,verticalOff=zoom.verticalOff||!1,unzoomEventType=zoom.unzoomEventType||"dblclick.zoom";useNiceScale&&(xScale.nice(),yScale.nice()),fixDomain=function(domain,boundary){return domain[0]=Math.min(Math.max(domain[0],boundary[0]),boundary[1]-boundary[1]/scaleExtent[1]),domain[1]=Math.max(boundary[0]+boundary[1]/scaleExtent[1],Math.min(domain[1],boundary[1])),domain},zoomed=function(){if(void 0!==zoom.zoomed){var domains=zoom.zoomed(xScale.domain(),yScale.domain());horizontalOff||xDomain([domains.x1,domains.x2]),verticalOff||yDomain([domains.y1,domains.y2])}else horizontalOff||xDomain(useFixedDomain?fixDomain(xScale.domain(),x_boundary):xScale.domain()),verticalOff||yDomain(useFixedDomain?fixDomain(yScale.domain(),y_boundary):yScale.domain());scope.chart&&scope.chart.update()},unzoomed=function(){if(void 0!==zoom.unzoomed){var domains=zoom.unzoomed(xScale.domain(),yScale.domain());horizontalOff||xDomain([domains.x1,domains.x2]),verticalOff||yDomain([domains.y1,domains.y2])}else horizontalOff||xDomain(x_boundary),verticalOff||yDomain(y_boundary);d3zoom.scale(scale).translate(translate),scope.chart&&scope.chart.update()},zoomend=function(){void 0!==zoom.zoomend&&zoom.zoomend()},d3zoom=d3.behavior.zoom().x(xScale).y(yScale).scaleExtent(scaleExtent).on("zoom",zoomed).on("zoomend",zoomend),scope.svg&&(scope.svg.call(d3zoom),d3zoom.scale(scale).translate(translate).event(scope.svg),"none"!==unzoomEventType&&scope.svg.on(unzoomEventType,unzoomed)),scope.chart&&(scope.chart.zoomRender=function(){d3zoom.scale(scale).translate(translate),xScale=scope.chart.xAxis.scale(),yScale=scope.chart.yAxis.scale(),xDomain=scope.chart.xDomain||xScale.domain,yDomain=scope.chart.yDomain||yScale.domain,x_boundary=xScale.domain().slice(),y_boundary=yScale.domain().slice(),d3zoom.x(xScale).y(yScale),scope.svg.call(d3zoom),"none"!==unzoomEventType&&scope.svg.on(unzoomEventType,unzoomed)})}}}})}(window); \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/angular-route-1.6.4.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/angular-route-1.6.4.min.js deleted file mode 100644 index 3f985d1422b..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/angular-route-1.6.4.min.js +++ /dev/null @@ -1,17 +0,0 @@ -/* - AngularJS v1.6.4 - (c) 2010-2017 Google, Inc. http://angularjs.org - License: MIT -*/ -(function(J,d){'use strict';function A(d){k&&d.get("$route")}function B(t,u,g){return{restrict:"ECA",terminal:!0,priority:400,transclude:"element",link:function(a,f,b,c,m){function v(){l&&(g.cancel(l),l=null);n&&(n.$destroy(),n=null);p&&(l=g.leave(p),l.done(function(a){!1!==a&&(l=null)}),p=null)}function E(){var b=t.current&&t.current.locals;if(d.isDefined(b&&b.$template)){var b=a.$new(),c=t.current;p=m(b,function(b){g.enter(b,null,p||f).done(function(b){!1===b||!d.isDefined(w)||w&&!a.$eval(w)||u()}); -v()});n=c.scope=b;n.$emit("$viewContentLoaded");n.$eval(k)}else v()}var n,p,l,w=b.autoscroll,k=b.onload||"";a.$on("$routeChangeSuccess",E);E()}}}function C(d,k,g){return{restrict:"ECA",priority:-400,link:function(a,f){var b=g.current,c=b.locals;f.html(c.$template);var m=d(f.contents());if(b.controller){c.$scope=a;var v=k(b.controller,c);b.controllerAs&&(a[b.controllerAs]=v);f.data("$ngControllerController",v);f.children().data("$ngControllerController",v)}a[b.resolveAs||"$resolve"]=c;m(a)}}}var x, -y,F,G,z=d.module("ngRoute",[]).info({angularVersion:"1.6.4"}).provider("$route",function(){function t(a,f){return d.extend(Object.create(a),f)}function u(a,d){var b=d.caseInsensitiveMatch,c={originalPath:a,regexp:a},g=c.keys=[];a=a.replace(/([().])/g,"\\$1").replace(/(\/)?:(\w+)(\*\?|[?*])?/g,function(a,b,d,c){a="?"===c||"*?"===c?"?":null;c="*"===c||"*?"===c?"*":null;g.push({name:d,optional:!!a});b=b||"";return""+(a?"":b)+"(?:"+(a?b:"")+(c&&"(.+?)"||"([^/]+)")+(a||"")+")"+(a||"")}).replace(/([/$*])/g, -"\\$1");c.regexp=new RegExp("^"+a+"$",b?"i":"");return c}x=d.isArray;y=d.isObject;F=d.isDefined;G=d.noop;var g={};this.when=function(a,f){var b;b=void 0;if(x(f)){b=b||[];for(var c=0,m=f.length;c div { - display: none; -} -.datepicker.days div.datepicker-days { - display: block; -} -.datepicker.months div.datepicker-months { - display: block; -} -.datepicker.years div.datepicker-years { - display: block; -} -.datepicker table { - margin: 0; -} -.datepicker td, -.datepicker th { - text-align: center; - width: 20px; - height: 20px; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; - border: none; -} -.table-striped .datepicker table tr td, -.table-striped .datepicker table tr th { - background-color: transparent; -} -.datepicker table tr td.day:hover { - background: #eeeeee; - cursor: pointer; -} -.datepicker table tr td.old, -.datepicker table tr td.new { - color: #999999; -} -.datepicker table tr td.disabled, -.datepicker table tr td.disabled:hover { - background: none; - color: #999999; - cursor: default; -} -.datepicker table tr td.today, -.datepicker table tr td.today:hover, -.datepicker table tr td.today.disabled, -.datepicker table tr td.today.disabled:hover { - background-color: #fde19a; - background-image: -moz-linear-gradient(top, #fdd49a, #fdf59a); - background-image: -ms-linear-gradient(top, #fdd49a, #fdf59a); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#fdd49a), to(#fdf59a)); - background-image: -webkit-linear-gradient(top, #fdd49a, #fdf59a); - background-image: -o-linear-gradient(top, #fdd49a, #fdf59a); - background-image: linear-gradient(top, #fdd49a, #fdf59a); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fdd49a', endColorstr='#fdf59a', GradientType=0); - border-color: #fdf59a #fdf59a #fbed50; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); - color: #000; -} -.datepicker table tr td.today:hover, -.datepicker table tr td.today:hover:hover, -.datepicker table tr td.today.disabled:hover, -.datepicker table tr td.today.disabled:hover:hover, -.datepicker table tr td.today:active, -.datepicker table tr td.today:hover:active, -.datepicker table tr td.today.disabled:active, -.datepicker table tr td.today.disabled:hover:active, -.datepicker table tr td.today.active, -.datepicker table tr td.today:hover.active, -.datepicker table tr td.today.disabled.active, -.datepicker table tr td.today.disabled:hover.active, -.datepicker table tr td.today.disabled, -.datepicker table tr td.today:hover.disabled, -.datepicker table tr td.today.disabled.disabled, -.datepicker table tr td.today.disabled:hover.disabled, -.datepicker table tr td.today[disabled], -.datepicker table tr td.today:hover[disabled], -.datepicker table tr td.today.disabled[disabled], -.datepicker table tr td.today.disabled:hover[disabled] { - background-color: #fdf59a; -} -.datepicker table tr td.today:active, -.datepicker table tr td.today:hover:active, -.datepicker table tr td.today.disabled:active, -.datepicker table tr td.today.disabled:hover:active, -.datepicker table tr td.today.active, -.datepicker table tr td.today:hover.active, -.datepicker table tr td.today.disabled.active, -.datepicker table tr td.today.disabled:hover.active { - background-color: #fbf069 \9; -} -.datepicker table tr td.today:hover:hover { - color: #000; -} -.datepicker table tr td.today.active:hover { - color: #fff; -} -.datepicker table tr td.range, -.datepicker table tr td.range:hover, -.datepicker table tr td.range.disabled, -.datepicker table tr td.range.disabled:hover { - background: #eeeeee; - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} -.datepicker table tr td.range.today, -.datepicker table tr td.range.today:hover, -.datepicker table tr td.range.today.disabled, -.datepicker table tr td.range.today.disabled:hover { - background-color: #f3d17a; - background-image: -moz-linear-gradient(top, #f3c17a, #f3e97a); - background-image: -ms-linear-gradient(top, #f3c17a, #f3e97a); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#f3c17a), to(#f3e97a)); - background-image: -webkit-linear-gradient(top, #f3c17a, #f3e97a); - background-image: -o-linear-gradient(top, #f3c17a, #f3e97a); - background-image: linear-gradient(top, #f3c17a, #f3e97a); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#f3c17a', endColorstr='#f3e97a', GradientType=0); - border-color: #f3e97a #f3e97a #edde34; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); - -webkit-border-radius: 0; - -moz-border-radius: 0; - border-radius: 0; -} -.datepicker table tr td.range.today:hover, -.datepicker table tr td.range.today:hover:hover, -.datepicker table tr td.range.today.disabled:hover, -.datepicker table tr td.range.today.disabled:hover:hover, -.datepicker table tr td.range.today:active, -.datepicker table tr td.range.today:hover:active, -.datepicker table tr td.range.today.disabled:active, -.datepicker table tr td.range.today.disabled:hover:active, -.datepicker table tr td.range.today.active, -.datepicker table tr td.range.today:hover.active, -.datepicker table tr td.range.today.disabled.active, -.datepicker table tr td.range.today.disabled:hover.active, -.datepicker table tr td.range.today.disabled, -.datepicker table tr td.range.today:hover.disabled, -.datepicker table tr td.range.today.disabled.disabled, -.datepicker table tr td.range.today.disabled:hover.disabled, -.datepicker table tr td.range.today[disabled], -.datepicker table tr td.range.today:hover[disabled], -.datepicker table tr td.range.today.disabled[disabled], -.datepicker table tr td.range.today.disabled:hover[disabled] { - background-color: #f3e97a; -} -.datepicker table tr td.range.today:active, -.datepicker table tr td.range.today:hover:active, -.datepicker table tr td.range.today.disabled:active, -.datepicker table tr td.range.today.disabled:hover:active, -.datepicker table tr td.range.today.active, -.datepicker table tr td.range.today:hover.active, -.datepicker table tr td.range.today.disabled.active, -.datepicker table tr td.range.today.disabled:hover.active { - background-color: #efe24b \9; -} -.datepicker table tr td.selected, -.datepicker table tr td.selected:hover, -.datepicker table tr td.selected.disabled, -.datepicker table tr td.selected.disabled:hover { - background-color: #9e9e9e; - background-image: -moz-linear-gradient(top, #b3b3b3, #808080); - background-image: -ms-linear-gradient(top, #b3b3b3, #808080); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#b3b3b3), to(#808080)); - background-image: -webkit-linear-gradient(top, #b3b3b3, #808080); - background-image: -o-linear-gradient(top, #b3b3b3, #808080); - background-image: linear-gradient(top, #b3b3b3, #808080); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#b3b3b3', endColorstr='#808080', GradientType=0); - border-color: #808080 #808080 #595959; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); - color: #fff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); -} -.datepicker table tr td.selected:hover, -.datepicker table tr td.selected:hover:hover, -.datepicker table tr td.selected.disabled:hover, -.datepicker table tr td.selected.disabled:hover:hover, -.datepicker table tr td.selected:active, -.datepicker table tr td.selected:hover:active, -.datepicker table tr td.selected.disabled:active, -.datepicker table tr td.selected.disabled:hover:active, -.datepicker table tr td.selected.active, -.datepicker table tr td.selected:hover.active, -.datepicker table tr td.selected.disabled.active, -.datepicker table tr td.selected.disabled:hover.active, -.datepicker table tr td.selected.disabled, -.datepicker table tr td.selected:hover.disabled, -.datepicker table tr td.selected.disabled.disabled, -.datepicker table tr td.selected.disabled:hover.disabled, -.datepicker table tr td.selected[disabled], -.datepicker table tr td.selected:hover[disabled], -.datepicker table tr td.selected.disabled[disabled], -.datepicker table tr td.selected.disabled:hover[disabled] { - background-color: #808080; -} -.datepicker table tr td.selected:active, -.datepicker table tr td.selected:hover:active, -.datepicker table tr td.selected.disabled:active, -.datepicker table tr td.selected.disabled:hover:active, -.datepicker table tr td.selected.active, -.datepicker table tr td.selected:hover.active, -.datepicker table tr td.selected.disabled.active, -.datepicker table tr td.selected.disabled:hover.active { - background-color: #666666 \9; -} -.datepicker table tr td.active, -.datepicker table tr td.active:hover, -.datepicker table tr td.active.disabled, -.datepicker table tr td.active.disabled:hover { - background-color: #006dcc; - background-image: -moz-linear-gradient(top, #0088cc, #0044cc); - background-image: -ms-linear-gradient(top, #0088cc, #0044cc); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#0088cc), to(#0044cc)); - background-image: -webkit-linear-gradient(top, #0088cc, #0044cc); - background-image: -o-linear-gradient(top, #0088cc, #0044cc); - background-image: linear-gradient(top, #0088cc, #0044cc); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#0088cc', endColorstr='#0044cc', GradientType=0); - border-color: #0044cc #0044cc #002a80; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); - color: #fff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); -} -.datepicker table tr td.active:hover, -.datepicker table tr td.active:hover:hover, -.datepicker table tr td.active.disabled:hover, -.datepicker table tr td.active.disabled:hover:hover, -.datepicker table tr td.active:active, -.datepicker table tr td.active:hover:active, -.datepicker table tr td.active.disabled:active, -.datepicker table tr td.active.disabled:hover:active, -.datepicker table tr td.active.active, -.datepicker table tr td.active:hover.active, -.datepicker table tr td.active.disabled.active, -.datepicker table tr td.active.disabled:hover.active, -.datepicker table tr td.active.disabled, -.datepicker table tr td.active:hover.disabled, -.datepicker table tr td.active.disabled.disabled, -.datepicker table tr td.active.disabled:hover.disabled, -.datepicker table tr td.active[disabled], -.datepicker table tr td.active:hover[disabled], -.datepicker table tr td.active.disabled[disabled], -.datepicker table tr td.active.disabled:hover[disabled] { - background-color: #0044cc; -} -.datepicker table tr td.active:active, -.datepicker table tr td.active:hover:active, -.datepicker table tr td.active.disabled:active, -.datepicker table tr td.active.disabled:hover:active, -.datepicker table tr td.active.active, -.datepicker table tr td.active:hover.active, -.datepicker table tr td.active.disabled.active, -.datepicker table tr td.active.disabled:hover.active { - background-color: #003399 \9; -} -.datepicker table tr td span { - display: block; - width: 23%; - height: 54px; - line-height: 54px; - float: left; - margin: 1%; - cursor: pointer; - -webkit-border-radius: 4px; - -moz-border-radius: 4px; - border-radius: 4px; -} -.datepicker table tr td span:hover { - background: #eeeeee; -} -.datepicker table tr td span.disabled, -.datepicker table tr td span.disabled:hover { - background: none; - color: #999999; - cursor: default; -} -.datepicker table tr td span.active, -.datepicker table tr td span.active:hover, -.datepicker table tr td span.active.disabled, -.datepicker table tr td span.active.disabled:hover { - background-color: #006dcc; - background-image: -moz-linear-gradient(top, #0088cc, #0044cc); - background-image: -ms-linear-gradient(top, #0088cc, #0044cc); - background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#0088cc), to(#0044cc)); - background-image: -webkit-linear-gradient(top, #0088cc, #0044cc); - background-image: -o-linear-gradient(top, #0088cc, #0044cc); - background-image: linear-gradient(top, #0088cc, #0044cc); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#0088cc', endColorstr='#0044cc', GradientType=0); - border-color: #0044cc #0044cc #002a80; - border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); - filter: progid:DXImageTransform.Microsoft.gradient(enabled=false); - color: #fff; - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); -} -.datepicker table tr td span.active:hover, -.datepicker table tr td span.active:hover:hover, -.datepicker table tr td span.active.disabled:hover, -.datepicker table tr td span.active.disabled:hover:hover, -.datepicker table tr td span.active:active, -.datepicker table tr td span.active:hover:active, -.datepicker table tr td span.active.disabled:active, -.datepicker table tr td span.active.disabled:hover:active, -.datepicker table tr td span.active.active, -.datepicker table tr td span.active:hover.active, -.datepicker table tr td span.active.disabled.active, -.datepicker table tr td span.active.disabled:hover.active, -.datepicker table tr td span.active.disabled, -.datepicker table tr td span.active:hover.disabled, -.datepicker table tr td span.active.disabled.disabled, -.datepicker table tr td span.active.disabled:hover.disabled, -.datepicker table tr td span.active[disabled], -.datepicker table tr td span.active:hover[disabled], -.datepicker table tr td span.active.disabled[disabled], -.datepicker table tr td span.active.disabled:hover[disabled] { - background-color: #0044cc; -} -.datepicker table tr td span.active:active, -.datepicker table tr td span.active:hover:active, -.datepicker table tr td span.active.disabled:active, -.datepicker table tr td span.active.disabled:hover:active, -.datepicker table tr td span.active.active, -.datepicker table tr td span.active:hover.active, -.datepicker table tr td span.active.disabled.active, -.datepicker table tr td span.active.disabled:hover.active { - background-color: #003399 \9; -} -.datepicker table tr td span.old, -.datepicker table tr td span.new { - color: #999999; -} -.datepicker th.datepicker-switch { - width: 145px; -} -.datepicker thead tr:first-child th, -.datepicker tfoot tr th { - cursor: pointer; -} -.datepicker thead tr:first-child th:hover, -.datepicker tfoot tr th:hover { - background: #eeeeee; -} -.datepicker .cw { - font-size: 10px; - width: 12px; - padding: 0 2px 0 5px; - vertical-align: middle; -} -.datepicker thead tr:first-child th.cw { - cursor: default; - background-color: transparent; -} -.input-append.date .add-on i, -.input-prepend.date .add-on i { - display: block; - cursor: pointer; - width: 16px; - height: 16px; -} -.input-daterange input { - text-align: center; -} -.input-daterange input:first-child { - -webkit-border-radius: 3px 0 0 3px; - -moz-border-radius: 3px 0 0 3px; - border-radius: 3px 0 0 3px; -} -.input-daterange input:last-child { - -webkit-border-radius: 0 3px 3px 0; - -moz-border-radius: 0 3px 3px 0; - border-radius: 0 3px 3px 0; -} -.input-daterange .add-on { - display: inline-block; - width: auto; - min-width: 16px; - height: 18px; - padding: 4px 5px; - font-weight: normal; - line-height: 18px; - text-align: center; - text-shadow: 0 1px 0 #ffffff; - vertical-align: middle; - background-color: #eeeeee; - border: 1px solid #ccc; - margin-left: -5px; - margin-right: -5px; -} diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css deleted file mode 100644 index ea33f76a772..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css +++ /dev/null @@ -1,587 +0,0 @@ -/*! - * Bootstrap v3.4.1 (https://getbootstrap.com/) - * Copyright 2011-2019 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - */ -.btn-default, -.btn-primary, -.btn-success, -.btn-info, -.btn-warning, -.btn-danger { - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2); - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075); -} -.btn-default:active, -.btn-primary:active, -.btn-success:active, -.btn-info:active, -.btn-warning:active, -.btn-danger:active, -.btn-default.active, -.btn-primary.active, -.btn-success.active, -.btn-info.active, -.btn-warning.active, -.btn-danger.active { - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -} -.btn-default.disabled, -.btn-primary.disabled, -.btn-success.disabled, -.btn-info.disabled, -.btn-warning.disabled, -.btn-danger.disabled, -.btn-default[disabled], -.btn-primary[disabled], -.btn-success[disabled], -.btn-info[disabled], -.btn-warning[disabled], -.btn-danger[disabled], -fieldset[disabled] .btn-default, -fieldset[disabled] .btn-primary, -fieldset[disabled] .btn-success, -fieldset[disabled] .btn-info, -fieldset[disabled] .btn-warning, -fieldset[disabled] .btn-danger { - -webkit-box-shadow: none; - box-shadow: none; -} -.btn-default .badge, -.btn-primary .badge, -.btn-success .badge, -.btn-info .badge, -.btn-warning .badge, -.btn-danger .badge { - text-shadow: none; -} -.btn:active, -.btn.active { - background-image: none; -} -.btn-default { - background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%); - background-image: -o-linear-gradient(top, #fff 0%, #e0e0e0 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#fff), to(#e0e0e0)); - background-image: linear-gradient(to bottom, #fff 0%, #e0e0e0 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); - background-repeat: repeat-x; - border-color: #dbdbdb; - text-shadow: 0 1px 0 #fff; - border-color: #ccc; -} -.btn-default:hover, -.btn-default:focus { - background-color: #e0e0e0; - background-position: 0 -15px; -} -.btn-default:active, -.btn-default.active { - background-color: #e0e0e0; - border-color: #dbdbdb; -} -.btn-default.disabled, -.btn-default[disabled], -fieldset[disabled] .btn-default, -.btn-default.disabled:hover, -.btn-default[disabled]:hover, -fieldset[disabled] .btn-default:hover, -.btn-default.disabled:focus, -.btn-default[disabled]:focus, -fieldset[disabled] .btn-default:focus, -.btn-default.disabled.focus, -.btn-default[disabled].focus, -fieldset[disabled] .btn-default.focus, -.btn-default.disabled:active, -.btn-default[disabled]:active, -fieldset[disabled] .btn-default:active, -.btn-default.disabled.active, -.btn-default[disabled].active, -fieldset[disabled] .btn-default.active { - background-color: #e0e0e0; - background-image: none; -} -.btn-primary { - background-image: -webkit-linear-gradient(top, #337ab7 0%, #265a88 100%); - background-image: -o-linear-gradient(top, #337ab7 0%, #265a88 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#265a88)); - background-image: linear-gradient(to bottom, #337ab7 0%, #265a88 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); - background-repeat: repeat-x; - border-color: #245580; -} -.btn-primary:hover, -.btn-primary:focus { - background-color: #265a88; - background-position: 0 -15px; -} -.btn-primary:active, -.btn-primary.active { - background-color: #265a88; - border-color: #245580; -} -.btn-primary.disabled, -.btn-primary[disabled], -fieldset[disabled] .btn-primary, -.btn-primary.disabled:hover, -.btn-primary[disabled]:hover, -fieldset[disabled] .btn-primary:hover, -.btn-primary.disabled:focus, -.btn-primary[disabled]:focus, -fieldset[disabled] .btn-primary:focus, -.btn-primary.disabled.focus, -.btn-primary[disabled].focus, -fieldset[disabled] .btn-primary.focus, -.btn-primary.disabled:active, -.btn-primary[disabled]:active, -fieldset[disabled] .btn-primary:active, -.btn-primary.disabled.active, -.btn-primary[disabled].active, -fieldset[disabled] .btn-primary.active { - background-color: #265a88; - background-image: none; -} -.btn-success { - background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%); - background-image: -o-linear-gradient(top, #5cb85c 0%, #419641 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#5cb85c), to(#419641)); - background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); - background-repeat: repeat-x; - border-color: #3e8f3e; -} -.btn-success:hover, -.btn-success:focus { - background-color: #419641; - background-position: 0 -15px; -} -.btn-success:active, -.btn-success.active { - background-color: #419641; - border-color: #3e8f3e; -} -.btn-success.disabled, -.btn-success[disabled], -fieldset[disabled] .btn-success, -.btn-success.disabled:hover, -.btn-success[disabled]:hover, -fieldset[disabled] .btn-success:hover, -.btn-success.disabled:focus, -.btn-success[disabled]:focus, -fieldset[disabled] .btn-success:focus, -.btn-success.disabled.focus, -.btn-success[disabled].focus, -fieldset[disabled] .btn-success.focus, -.btn-success.disabled:active, -.btn-success[disabled]:active, -fieldset[disabled] .btn-success:active, -.btn-success.disabled.active, -.btn-success[disabled].active, -fieldset[disabled] .btn-success.active { - background-color: #419641; - background-image: none; -} -.btn-info { - background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%); - background-image: -o-linear-gradient(top, #5bc0de 0%, #2aabd2 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#5bc0de), to(#2aabd2)); - background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); - background-repeat: repeat-x; - border-color: #28a4c9; -} -.btn-info:hover, -.btn-info:focus { - background-color: #2aabd2; - background-position: 0 -15px; -} -.btn-info:active, -.btn-info.active { - background-color: #2aabd2; - border-color: #28a4c9; -} -.btn-info.disabled, -.btn-info[disabled], -fieldset[disabled] .btn-info, -.btn-info.disabled:hover, -.btn-info[disabled]:hover, -fieldset[disabled] .btn-info:hover, -.btn-info.disabled:focus, -.btn-info[disabled]:focus, -fieldset[disabled] .btn-info:focus, -.btn-info.disabled.focus, -.btn-info[disabled].focus, -fieldset[disabled] .btn-info.focus, -.btn-info.disabled:active, -.btn-info[disabled]:active, -fieldset[disabled] .btn-info:active, -.btn-info.disabled.active, -.btn-info[disabled].active, -fieldset[disabled] .btn-info.active { - background-color: #2aabd2; - background-image: none; -} -.btn-warning { - background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%); - background-image: -o-linear-gradient(top, #f0ad4e 0%, #eb9316 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#f0ad4e), to(#eb9316)); - background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); - background-repeat: repeat-x; - border-color: #e38d13; -} -.btn-warning:hover, -.btn-warning:focus { - background-color: #eb9316; - background-position: 0 -15px; -} -.btn-warning:active, -.btn-warning.active { - background-color: #eb9316; - border-color: #e38d13; -} -.btn-warning.disabled, -.btn-warning[disabled], -fieldset[disabled] .btn-warning, -.btn-warning.disabled:hover, -.btn-warning[disabled]:hover, -fieldset[disabled] .btn-warning:hover, -.btn-warning.disabled:focus, -.btn-warning[disabled]:focus, -fieldset[disabled] .btn-warning:focus, -.btn-warning.disabled.focus, -.btn-warning[disabled].focus, -fieldset[disabled] .btn-warning.focus, -.btn-warning.disabled:active, -.btn-warning[disabled]:active, -fieldset[disabled] .btn-warning:active, -.btn-warning.disabled.active, -.btn-warning[disabled].active, -fieldset[disabled] .btn-warning.active { - background-color: #eb9316; - background-image: none; -} -.btn-danger { - background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%); - background-image: -o-linear-gradient(top, #d9534f 0%, #c12e2a 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#d9534f), to(#c12e2a)); - background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0); - filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); - background-repeat: repeat-x; - border-color: #b92c28; -} -.btn-danger:hover, -.btn-danger:focus { - background-color: #c12e2a; - background-position: 0 -15px; -} -.btn-danger:active, -.btn-danger.active { - background-color: #c12e2a; - border-color: #b92c28; -} -.btn-danger.disabled, -.btn-danger[disabled], -fieldset[disabled] .btn-danger, -.btn-danger.disabled:hover, -.btn-danger[disabled]:hover, -fieldset[disabled] .btn-danger:hover, -.btn-danger.disabled:focus, -.btn-danger[disabled]:focus, -fieldset[disabled] .btn-danger:focus, -.btn-danger.disabled.focus, -.btn-danger[disabled].focus, -fieldset[disabled] .btn-danger.focus, -.btn-danger.disabled:active, -.btn-danger[disabled]:active, -fieldset[disabled] .btn-danger:active, -.btn-danger.disabled.active, -.btn-danger[disabled].active, -fieldset[disabled] .btn-danger.active { - background-color: #c12e2a; - background-image: none; -} -.thumbnail, -.img-thumbnail { - -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); - box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); -} -.dropdown-menu > li > a:hover, -.dropdown-menu > li > a:focus { - background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); - background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#f5f5f5), to(#e8e8e8)); - background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0); - background-repeat: repeat-x; - background-color: #e8e8e8; -} -.dropdown-menu > .active > a, -.dropdown-menu > .active > a:hover, -.dropdown-menu > .active > a:focus { - background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%); - background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4)); - background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0); - background-repeat: repeat-x; - background-color: #2e6da4; -} -.navbar-default { - background-image: -webkit-linear-gradient(top, #ffffff 0%, #f8f8f8 100%); - background-image: -o-linear-gradient(top, #ffffff 0%, #f8f8f8 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#ffffff), to(#f8f8f8)); - background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075); -} -.navbar-default .navbar-nav > .open > a, -.navbar-default .navbar-nav > .active > a { - background-image: -webkit-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%); - background-image: -o-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#dbdbdb), to(#e2e2e2)); - background-image: linear-gradient(to bottom, #dbdbdb 0%, #e2e2e2 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0); - background-repeat: repeat-x; - -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075); -} -.navbar-brand, -.navbar-nav > li > a { - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25); -} -.navbar-inverse { - background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%); - background-image: -o-linear-gradient(top, #3c3c3c 0%, #222 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#3c3c3c), to(#222)); - background-image: linear-gradient(to bottom, #3c3c3c 0%, #222 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0); - background-repeat: repeat-x; - filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); - border-radius: 4px; -} -.navbar-inverse .navbar-nav > .open > a, -.navbar-inverse .navbar-nav > .active > a { - background-image: -webkit-linear-gradient(top, #080808 0%, #0f0f0f 100%); - background-image: -o-linear-gradient(top, #080808 0%, #0f0f0f 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#080808), to(#0f0f0f)); - background-image: linear-gradient(to bottom, #080808 0%, #0f0f0f 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0); - background-repeat: repeat-x; - -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25); - box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25); -} -.navbar-inverse .navbar-brand, -.navbar-inverse .navbar-nav > li > a { - text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); -} -.navbar-static-top, -.navbar-fixed-top, -.navbar-fixed-bottom { - border-radius: 0; -} -@media (max-width: 767px) { - .navbar .navbar-nav .open .dropdown-menu > .active > a, - .navbar .navbar-nav .open .dropdown-menu > .active > a:hover, - .navbar .navbar-nav .open .dropdown-menu > .active > a:focus { - color: #fff; - background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%); - background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4)); - background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0); - background-repeat: repeat-x; - } -} -.alert { - text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2); - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05); -} -.alert-success { - background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%); - background-image: -o-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#dff0d8), to(#c8e5bc)); - background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0); - background-repeat: repeat-x; - border-color: #b2dba1; -} -.alert-info { - background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%); - background-image: -o-linear-gradient(top, #d9edf7 0%, #b9def0 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#d9edf7), to(#b9def0)); - background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0); - background-repeat: repeat-x; - border-color: #9acfea; -} -.alert-warning { - background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%); - background-image: -o-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#fcf8e3), to(#f8efc0)); - background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0); - background-repeat: repeat-x; - border-color: #f5e79e; -} -.alert-danger { - background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%); - background-image: -o-linear-gradient(top, #f2dede 0%, #e7c3c3 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#f2dede), to(#e7c3c3)); - background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0); - background-repeat: repeat-x; - border-color: #dca7a7; -} -.progress { - background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%); - background-image: -o-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#ebebeb), to(#f5f5f5)); - background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0); - background-repeat: repeat-x; -} -.progress-bar { - background-image: -webkit-linear-gradient(top, #337ab7 0%, #286090 100%); - background-image: -o-linear-gradient(top, #337ab7 0%, #286090 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#286090)); - background-image: linear-gradient(to bottom, #337ab7 0%, #286090 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0); - background-repeat: repeat-x; -} -.progress-bar-success { - background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%); - background-image: -o-linear-gradient(top, #5cb85c 0%, #449d44 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#5cb85c), to(#449d44)); - background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0); - background-repeat: repeat-x; -} -.progress-bar-info { - background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%); - background-image: -o-linear-gradient(top, #5bc0de 0%, #31b0d5 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#5bc0de), to(#31b0d5)); - background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0); - background-repeat: repeat-x; -} -.progress-bar-warning { - background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%); - background-image: -o-linear-gradient(top, #f0ad4e 0%, #ec971f 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#f0ad4e), to(#ec971f)); - background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0); - background-repeat: repeat-x; -} -.progress-bar-danger { - background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%); - background-image: -o-linear-gradient(top, #d9534f 0%, #c9302c 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#d9534f), to(#c9302c)); - background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0); - background-repeat: repeat-x; -} -.progress-bar-striped { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} -.list-group { - border-radius: 4px; - -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); - box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075); -} -.list-group-item.active, -.list-group-item.active:hover, -.list-group-item.active:focus { - text-shadow: 0 -1px 0 #286090; - background-image: -webkit-linear-gradient(top, #337ab7 0%, #2b669a 100%); - background-image: -o-linear-gradient(top, #337ab7 0%, #2b669a 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2b669a)); - background-image: linear-gradient(to bottom, #337ab7 0%, #2b669a 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0); - background-repeat: repeat-x; - border-color: #2b669a; -} -.list-group-item.active .badge, -.list-group-item.active:hover .badge, -.list-group-item.active:focus .badge { - text-shadow: none; -} -.panel { - -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); - box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05); -} -.panel-default > .panel-heading { - background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); - background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#f5f5f5), to(#e8e8e8)); - background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0); - background-repeat: repeat-x; -} -.panel-primary > .panel-heading { - background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%); - background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4)); - background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0); - background-repeat: repeat-x; -} -.panel-success > .panel-heading { - background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%); - background-image: -o-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#dff0d8), to(#d0e9c6)); - background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0); - background-repeat: repeat-x; -} -.panel-info > .panel-heading { - background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%); - background-image: -o-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#d9edf7), to(#c4e3f3)); - background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0); - background-repeat: repeat-x; -} -.panel-warning > .panel-heading { - background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%); - background-image: -o-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#fcf8e3), to(#faf2cc)); - background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0); - background-repeat: repeat-x; -} -.panel-danger > .panel-heading { - background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%); - background-image: -o-linear-gradient(top, #f2dede 0%, #ebcccc 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#f2dede), to(#ebcccc)); - background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0); - background-repeat: repeat-x; -} -.well { - background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%); - background-image: -o-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%); - background-image: -webkit-gradient(linear, left top, left bottom, from(#e8e8e8), to(#f5f5f5)); - background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0); - background-repeat: repeat-x; - border-color: #dcdcdc; - -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1); - box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1); -} -/*# sourceMappingURL=bootstrap-theme.css.map */ \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css.map b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css.map deleted file mode 100644 index 949d09738fd..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.css.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"sources":["bootstrap-theme.css","less/theme.less","less/mixins/vendor-prefixes.less","less/mixins/gradients.less","less/mixins/reset-filter.less"],"names":[],"mappings":"AAAA;;;;GAIG;ACiBH;;;;;;EAME,yCAAA;EC2CA,4FAAA;EACQ,oFAAA;CFzDT;ACkBC;;;;;;;;;;;;ECsCA,yDAAA;EACQ,iDAAA;CF1CT;ACQC;;;;;;;;;;;;;;;;;;ECiCA,yBAAA;EACQ,iBAAA;CFrBT;AC7BD;;;;;;EAuBI,kBAAA;CDcH;AC2BC;;EAEE,uBAAA;CDzBH;AC8BD;EEvEI,sEAAA;EACA,iEAAA;EACA,2FAAA;EAAA,oEAAA;EACA,uHAAA;EClBF,oEAAA;EH8CA,4BAAA;EACA,sBAAA;EAyCA,0BAAA;EACA,mBAAA;CDtBD;AClBC;;EAEE,0BAAA;EACA,6BAAA;CDoBH;ACjBC;;EAEE,0BAAA;EACA,sBAAA;CDmBH;ACbG;;;;;;;;;;;;;;;;;;EAME,0BAAA;EACA,uBAAA;CD2BL;ACPD;EE5EI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EClBF,oEAAA;EH8CA,4BAAA;EACA,sBAAA;CD4DD;AC1DC;;EAEE,0BAAA;EACA,6BAAA;CD4DH;ACzDC;;EAEE,0BAAA;EACA,sBAAA;CD2DH;ACrDG;;;;;;;;;;;;;;;;;;EAME,0BAAA;EACA,uBAAA;CDmEL;AC9CD;EE7EI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EClBF,oEAAA;EH8CA,4BAAA;EACA,sBAAA;CDoGD;AClGC;;EAEE,0BAAA;EACA,6BAAA;CDoGH;ACjGC;;EAEE,0BAAA;EACA,sBAAA;CDmGH;AC7FG;;;;;;;;;;;;;;;;;;EAME,0BAAA;EACA,uBAAA;CD2GL;ACrFD;EE9EI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EClBF,oEAAA;EH8CA,4BAAA;EACA,sBAAA;CD4ID;AC1IC;;EAEE,0BAAA;EACA,6BAAA;CD4IH;ACzIC;;EAEE,0BAAA;EACA,sBAAA;CD2IH;ACrIG;;;;;;;;;;;;;;;;;;EAME,0BAAA;EACA,uBAAA;CDmJL;AC5HD;EE/EI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EClBF,oEAAA;EH8CA,4BAAA;EACA,sBAAA;CDoLD;AClLC;;EAEE,0BAAA;EACA,6BAAA;CDoLH;ACjLC;;EAEE,0BAAA;EACA,sBAAA;CDmLH;AC7KG;;;;;;;;;;;;;;;;;;EAME,0BAAA;EACA,uBAAA;CD2LL;ACnKD;EEhFI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EClBF,oEAAA;EH8CA,4BAAA;EACA,sBAAA;CD4ND;AC1NC;;EAEE,0BAAA;EACA,6BAAA;CD4NH;ACzNC;;EAEE,0BAAA;EACA,sBAAA;CD2NH;ACrNG;;;;;;;;;;;;;;;;;;EAME,0BAAA;EACA,uBAAA;CDmOL;ACpMD;;ECtCE,mDAAA;EACQ,2CAAA;CF8OT;AC/LD;;EEjGI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EFgGF,0BAAA;CDqMD;ACnMD;;;EEtGI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EFsGF,0BAAA;CDyMD;AChMD;EEnHI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;ECnBF,oEAAA;EHqIA,mBAAA;ECrEA,4FAAA;EACQ,oFAAA;CF4QT;AC3MD;;EEnHI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;ED6CF,yDAAA;EACQ,iDAAA;CFsRT;ACxMD;;EAEE,+CAAA;CD0MD;ACtMD;EEtII,sEAAA;EACA,iEAAA;EACA,2FAAA;EAAA,oEAAA;EACA,uHAAA;EACA,4BAAA;ECnBF,oEAAA;EHwJA,mBAAA;CD4MD;AC/MD;;EEtII,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;ED6CF,wDAAA;EACQ,gDAAA;CF6ST;ACzND;;EAYI,0CAAA;CDiNH;AC5MD;;;EAGE,iBAAA;CD8MD;AC1MD;EAEI;;;IAGE,YAAA;IEnKF,yEAAA;IACA,oEAAA;IACA,8FAAA;IAAA,uEAAA;IACA,uHAAA;IACA,4BAAA;GH+WD;CACF;ACrMD;EACE,8CAAA;EC/HA,2FAAA;EACQ,mFAAA;CFuUT;AC7LD;EE5LI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EFoLF,sBAAA;CDyMD;ACpMD;EE7LI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EFoLF,sBAAA;CDiND;AC3MD;EE9LI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EFoLF,sBAAA;CDyND;AClND;EE/LI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EFoLF,sBAAA;CDiOD;AClND;EEvMI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CH4ZH;AC/MD;EEjNI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHmaH;ACrND;EElNI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CH0aH;AC3ND;EEnNI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHibH;ACjOD;EEpNI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHwbH;ACvOD;EErNI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CH+bH;AC1OD;EExLI,8MAAA;EACA,yMAAA;EACA,sMAAA;CHqaH;ACtOD;EACE,mBAAA;EClLA,mDAAA;EACQ,2CAAA;CF2ZT;ACvOD;;;EAGE,8BAAA;EEzOE,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EFuOF,sBAAA;CD6OD;AClPD;;;EAQI,kBAAA;CD+OH;ACrOD;ECvME,kDAAA;EACQ,0CAAA;CF+aT;AC/ND;EElQI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHoeH;ACrOD;EEnQI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CH2eH;AC3OD;EEpQI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHkfH;ACjPD;EErQI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHyfH;ACvPD;EEtQI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHggBH;AC7PD;EEvQI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;CHugBH;AC7PD;EE9QI,yEAAA;EACA,oEAAA;EACA,8FAAA;EAAA,uEAAA;EACA,uHAAA;EACA,4BAAA;EF4QF,sBAAA;EC/NA,0FAAA;EACQ,kFAAA;CFmeT","file":"bootstrap-theme.css","sourcesContent":["/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.btn-default:active,\n.btn-primary:active,\n.btn-success:active,\n.btn-info:active,\n.btn-warning:active,\n.btn-danger:active,\n.btn-default.active,\n.btn-primary.active,\n.btn-success.active,\n.btn-info.active,\n.btn-warning.active,\n.btn-danger.active {\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-default.disabled,\n.btn-primary.disabled,\n.btn-success.disabled,\n.btn-info.disabled,\n.btn-warning.disabled,\n.btn-danger.disabled,\n.btn-default[disabled],\n.btn-primary[disabled],\n.btn-success[disabled],\n.btn-info[disabled],\n.btn-warning[disabled],\n.btn-danger[disabled],\nfieldset[disabled] .btn-default,\nfieldset[disabled] .btn-primary,\nfieldset[disabled] .btn-success,\nfieldset[disabled] .btn-info,\nfieldset[disabled] .btn-warning,\nfieldset[disabled] .btn-danger {\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn-default .badge,\n.btn-primary .badge,\n.btn-success .badge,\n.btn-info .badge,\n.btn-warning .badge,\n.btn-danger .badge {\n text-shadow: none;\n}\n.btn:active,\n.btn.active {\n background-image: none;\n}\n.btn-default {\n background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n background-image: -o-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n background-image: linear-gradient(to bottom, #fff 0%, #e0e0e0 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #dbdbdb;\n text-shadow: 0 1px 0 #fff;\n border-color: #ccc;\n}\n.btn-default:hover,\n.btn-default:focus {\n background-color: #e0e0e0;\n background-position: 0 -15px;\n}\n.btn-default:active,\n.btn-default.active {\n background-color: #e0e0e0;\n border-color: #dbdbdb;\n}\n.btn-default.disabled,\n.btn-default[disabled],\nfieldset[disabled] .btn-default,\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus,\n.btn-default.disabled:active,\n.btn-default[disabled]:active,\nfieldset[disabled] .btn-default:active,\n.btn-default.disabled.active,\n.btn-default[disabled].active,\nfieldset[disabled] .btn-default.active {\n background-color: #e0e0e0;\n background-image: none;\n}\n.btn-primary {\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #265a88 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #265a88 100%);\n background-image: linear-gradient(to bottom, #337ab7 0%, #265a88 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #245580;\n}\n.btn-primary:hover,\n.btn-primary:focus {\n background-color: #265a88;\n background-position: 0 -15px;\n}\n.btn-primary:active,\n.btn-primary.active {\n background-color: #265a88;\n border-color: #245580;\n}\n.btn-primary.disabled,\n.btn-primary[disabled],\nfieldset[disabled] .btn-primary,\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus,\n.btn-primary.disabled:active,\n.btn-primary[disabled]:active,\nfieldset[disabled] .btn-primary:active,\n.btn-primary.disabled.active,\n.btn-primary[disabled].active,\nfieldset[disabled] .btn-primary.active {\n background-color: #265a88;\n background-image: none;\n}\n.btn-success {\n background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);\n background-image: -o-linear-gradient(top, #5cb85c 0%, #419641 100%);\n background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #3e8f3e;\n}\n.btn-success:hover,\n.btn-success:focus {\n background-color: #419641;\n background-position: 0 -15px;\n}\n.btn-success:active,\n.btn-success.active {\n background-color: #419641;\n border-color: #3e8f3e;\n}\n.btn-success.disabled,\n.btn-success[disabled],\nfieldset[disabled] .btn-success,\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus,\n.btn-success.disabled:active,\n.btn-success[disabled]:active,\nfieldset[disabled] .btn-success:active,\n.btn-success.disabled.active,\n.btn-success[disabled].active,\nfieldset[disabled] .btn-success.active {\n background-color: #419641;\n background-image: none;\n}\n.btn-info {\n background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n background-image: -o-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #28a4c9;\n}\n.btn-info:hover,\n.btn-info:focus {\n background-color: #2aabd2;\n background-position: 0 -15px;\n}\n.btn-info:active,\n.btn-info.active {\n background-color: #2aabd2;\n border-color: #28a4c9;\n}\n.btn-info.disabled,\n.btn-info[disabled],\nfieldset[disabled] .btn-info,\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus,\n.btn-info.disabled:active,\n.btn-info[disabled]:active,\nfieldset[disabled] .btn-info:active,\n.btn-info.disabled.active,\n.btn-info[disabled].active,\nfieldset[disabled] .btn-info.active {\n background-color: #2aabd2;\n background-image: none;\n}\n.btn-warning {\n background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n background-image: -o-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #e38d13;\n}\n.btn-warning:hover,\n.btn-warning:focus {\n background-color: #eb9316;\n background-position: 0 -15px;\n}\n.btn-warning:active,\n.btn-warning.active {\n background-color: #eb9316;\n border-color: #e38d13;\n}\n.btn-warning.disabled,\n.btn-warning[disabled],\nfieldset[disabled] .btn-warning,\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus,\n.btn-warning.disabled:active,\n.btn-warning[disabled]:active,\nfieldset[disabled] .btn-warning:active,\n.btn-warning.disabled.active,\n.btn-warning[disabled].active,\nfieldset[disabled] .btn-warning.active {\n background-color: #eb9316;\n background-image: none;\n}\n.btn-danger {\n background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n background-image: -o-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #b92c28;\n}\n.btn-danger:hover,\n.btn-danger:focus {\n background-color: #c12e2a;\n background-position: 0 -15px;\n}\n.btn-danger:active,\n.btn-danger.active {\n background-color: #c12e2a;\n border-color: #b92c28;\n}\n.btn-danger.disabled,\n.btn-danger[disabled],\nfieldset[disabled] .btn-danger,\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus,\n.btn-danger.disabled:active,\n.btn-danger[disabled]:active,\nfieldset[disabled] .btn-danger:active,\n.btn-danger.disabled.active,\n.btn-danger[disabled].active,\nfieldset[disabled] .btn-danger.active {\n background-color: #c12e2a;\n background-image: none;\n}\n.thumbnail,\n.img-thumbnail {\n -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n background-repeat: repeat-x;\n background-color: #e8e8e8;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n background-repeat: repeat-x;\n background-color: #2e6da4;\n}\n.navbar-default {\n background-image: -webkit-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n background-image: -o-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);\n background-repeat: repeat-x;\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .active > a {\n background-image: -webkit-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n background-image: -o-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n background-image: linear-gradient(to bottom, #dbdbdb 0%, #e2e2e2 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);\n background-repeat: repeat-x;\n -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n}\n.navbar-brand,\n.navbar-nav > li > a {\n text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25);\n}\n.navbar-inverse {\n background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%);\n background-image: -o-linear-gradient(top, #3c3c3c 0%, #222 100%);\n background-image: linear-gradient(to bottom, #3c3c3c 0%, #222 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);\n background-repeat: repeat-x;\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n border-radius: 4px;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .active > a {\n background-image: -webkit-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n background-image: -o-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n background-image: linear-gradient(to bottom, #080808 0%, #0f0f0f 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);\n background-repeat: repeat-x;\n -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n}\n.navbar-inverse .navbar-brand,\n.navbar-inverse .navbar-nav > li > a {\n text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n border-radius: 0;\n}\n@media (max-width: 767px) {\n .navbar .navbar-nav .open .dropdown-menu > .active > a,\n .navbar .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #fff;\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n background-repeat: repeat-x;\n }\n}\n.alert {\n text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2);\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.alert-success {\n background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n background-image: -o-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);\n background-repeat: repeat-x;\n border-color: #b2dba1;\n}\n.alert-info {\n background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n background-image: -o-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);\n background-repeat: repeat-x;\n border-color: #9acfea;\n}\n.alert-warning {\n background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n background-image: -o-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);\n background-repeat: repeat-x;\n border-color: #f5e79e;\n}\n.alert-danger {\n background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n background-image: -o-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);\n background-repeat: repeat-x;\n border-color: #dca7a7;\n}\n.progress {\n background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n background-image: -o-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar {\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #286090 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #286090 100%);\n background-image: linear-gradient(to bottom, #337ab7 0%, #286090 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-success {\n background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n background-image: -o-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-info {\n background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n background-image: -o-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-warning {\n background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n background-image: -o-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-danger {\n background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n background-image: -o-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-striped {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.list-group {\n border-radius: 4px;\n -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n text-shadow: 0 -1px 0 #286090;\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n background-image: linear-gradient(to bottom, #337ab7 0%, #2b669a 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);\n background-repeat: repeat-x;\n border-color: #2b669a;\n}\n.list-group-item.active .badge,\n.list-group-item.active:hover .badge,\n.list-group-item.active:focus .badge {\n text-shadow: none;\n}\n.panel {\n -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.panel-default > .panel-heading {\n background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-primary > .panel-heading {\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-success > .panel-heading {\n background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n background-image: -o-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-info > .panel-heading {\n background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n background-image: -o-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-warning > .panel-heading {\n background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n background-image: -o-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-danger > .panel-heading {\n background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n background-image: -o-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);\n background-repeat: repeat-x;\n}\n.well {\n background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n background-image: -o-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);\n background-repeat: repeat-x;\n border-color: #dcdcdc;\n -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n}\n/*# sourceMappingURL=bootstrap-theme.css.map */","// stylelint-disable selector-no-qualifying-type, selector-max-compound-selectors\n\n/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n\n//\n// Load core variables and mixins\n// --------------------------------------------------\n\n@import \"variables.less\";\n@import \"mixins.less\";\n\n\n//\n// Buttons\n// --------------------------------------------------\n\n// Common styles\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n text-shadow: 0 -1px 0 rgba(0, 0, 0, .2);\n @shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);\n .box-shadow(@shadow);\n\n // Reset the shadow\n &:active,\n &.active {\n .box-shadow(inset 0 3px 5px rgba(0, 0, 0, .125));\n }\n\n &.disabled,\n &[disabled],\n fieldset[disabled] & {\n .box-shadow(none);\n }\n\n .badge {\n text-shadow: none;\n }\n}\n\n// Mixin for generating new styles\n.btn-styles(@btn-color: #555) {\n #gradient > .vertical(@start-color: @btn-color; @end-color: darken(@btn-color, 12%));\n .reset-filter(); // Disable gradients for IE9 because filter bleeds through rounded corners; see https://github.com/twbs/bootstrap/issues/10620\n background-repeat: repeat-x;\n border-color: darken(@btn-color, 14%);\n\n &:hover,\n &:focus {\n background-color: darken(@btn-color, 12%);\n background-position: 0 -15px;\n }\n\n &:active,\n &.active {\n background-color: darken(@btn-color, 12%);\n border-color: darken(@btn-color, 14%);\n }\n\n &.disabled,\n &[disabled],\n fieldset[disabled] & {\n &,\n &:hover,\n &:focus,\n &.focus,\n &:active,\n &.active {\n background-color: darken(@btn-color, 12%);\n background-image: none;\n }\n }\n}\n\n// Common styles\n.btn {\n // Remove the gradient for the pressed/active state\n &:active,\n &.active {\n background-image: none;\n }\n}\n\n// Apply the mixin to the buttons\n.btn-default {\n .btn-styles(@btn-default-bg);\n text-shadow: 0 1px 0 #fff;\n border-color: #ccc;\n}\n.btn-primary { .btn-styles(@btn-primary-bg); }\n.btn-success { .btn-styles(@btn-success-bg); }\n.btn-info { .btn-styles(@btn-info-bg); }\n.btn-warning { .btn-styles(@btn-warning-bg); }\n.btn-danger { .btn-styles(@btn-danger-bg); }\n\n\n//\n// Images\n// --------------------------------------------------\n\n.thumbnail,\n.img-thumbnail {\n .box-shadow(0 1px 2px rgba(0, 0, 0, .075));\n}\n\n\n//\n// Dropdowns\n// --------------------------------------------------\n\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n #gradient > .vertical(@start-color: @dropdown-link-hover-bg; @end-color: darken(@dropdown-link-hover-bg, 5%));\n background-color: darken(@dropdown-link-hover-bg, 5%);\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n background-color: darken(@dropdown-link-active-bg, 5%);\n}\n\n\n//\n// Navbar\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n #gradient > .vertical(@start-color: lighten(@navbar-default-bg, 10%); @end-color: @navbar-default-bg);\n .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered\n border-radius: @navbar-border-radius;\n @shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);\n .box-shadow(@shadow);\n\n .navbar-nav > .open > a,\n .navbar-nav > .active > a {\n #gradient > .vertical(@start-color: darken(@navbar-default-link-active-bg, 5%); @end-color: darken(@navbar-default-link-active-bg, 2%));\n .box-shadow(inset 0 3px 9px rgba(0, 0, 0, .075));\n }\n}\n.navbar-brand,\n.navbar-nav > li > a {\n text-shadow: 0 1px 0 rgba(255, 255, 255, .25);\n}\n\n// Inverted navbar\n.navbar-inverse {\n #gradient > .vertical(@start-color: lighten(@navbar-inverse-bg, 10%); @end-color: @navbar-inverse-bg);\n .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered; see https://github.com/twbs/bootstrap/issues/10257\n border-radius: @navbar-border-radius;\n .navbar-nav > .open > a,\n .navbar-nav > .active > a {\n #gradient > .vertical(@start-color: @navbar-inverse-link-active-bg; @end-color: lighten(@navbar-inverse-link-active-bg, 2.5%));\n .box-shadow(inset 0 3px 9px rgba(0, 0, 0, .25));\n }\n\n .navbar-brand,\n .navbar-nav > li > a {\n text-shadow: 0 -1px 0 rgba(0, 0, 0, .25);\n }\n}\n\n// Undo rounded corners in static and fixed navbars\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n border-radius: 0;\n}\n\n// Fix active state of dropdown items in collapsed mode\n@media (max-width: @grid-float-breakpoint-max) {\n .navbar .navbar-nav .open .dropdown-menu > .active > a {\n &,\n &:hover,\n &:focus {\n color: #fff;\n #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n }\n }\n}\n\n\n//\n// Alerts\n// --------------------------------------------------\n\n// Common styles\n.alert {\n text-shadow: 0 1px 0 rgba(255, 255, 255, .2);\n @shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);\n .box-shadow(@shadow);\n}\n\n// Mixin for generating new styles\n.alert-styles(@color) {\n #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 7.5%));\n border-color: darken(@color, 15%);\n}\n\n// Apply the mixin to the alerts\n.alert-success { .alert-styles(@alert-success-bg); }\n.alert-info { .alert-styles(@alert-info-bg); }\n.alert-warning { .alert-styles(@alert-warning-bg); }\n.alert-danger { .alert-styles(@alert-danger-bg); }\n\n\n//\n// Progress bars\n// --------------------------------------------------\n\n// Give the progress background some depth\n.progress {\n #gradient > .vertical(@start-color: darken(@progress-bg, 4%); @end-color: @progress-bg)\n}\n\n// Mixin for generating new styles\n.progress-bar-styles(@color) {\n #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 10%));\n}\n\n// Apply the mixin to the progress bars\n.progress-bar { .progress-bar-styles(@progress-bar-bg); }\n.progress-bar-success { .progress-bar-styles(@progress-bar-success-bg); }\n.progress-bar-info { .progress-bar-styles(@progress-bar-info-bg); }\n.progress-bar-warning { .progress-bar-styles(@progress-bar-warning-bg); }\n.progress-bar-danger { .progress-bar-styles(@progress-bar-danger-bg); }\n\n// Reset the striped class because our mixins don't do multiple gradients and\n// the above custom styles override the new `.progress-bar-striped` in v3.2.0.\n.progress-bar-striped {\n #gradient > .striped();\n}\n\n\n//\n// List groups\n// --------------------------------------------------\n\n.list-group {\n border-radius: @border-radius-base;\n .box-shadow(0 1px 2px rgba(0, 0, 0, .075));\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n text-shadow: 0 -1px 0 darken(@list-group-active-bg, 10%);\n #gradient > .vertical(@start-color: @list-group-active-bg; @end-color: darken(@list-group-active-bg, 7.5%));\n border-color: darken(@list-group-active-border, 7.5%);\n\n .badge {\n text-shadow: none;\n }\n}\n\n\n//\n// Panels\n// --------------------------------------------------\n\n// Common styles\n.panel {\n .box-shadow(0 1px 2px rgba(0, 0, 0, .05));\n}\n\n// Mixin for generating new styles\n.panel-heading-styles(@color) {\n #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 5%));\n}\n\n// Apply the mixin to the panel headings only\n.panel-default > .panel-heading { .panel-heading-styles(@panel-default-heading-bg); }\n.panel-primary > .panel-heading { .panel-heading-styles(@panel-primary-heading-bg); }\n.panel-success > .panel-heading { .panel-heading-styles(@panel-success-heading-bg); }\n.panel-info > .panel-heading { .panel-heading-styles(@panel-info-heading-bg); }\n.panel-warning > .panel-heading { .panel-heading-styles(@panel-warning-heading-bg); }\n.panel-danger > .panel-heading { .panel-heading-styles(@panel-danger-heading-bg); }\n\n\n//\n// Wells\n// --------------------------------------------------\n\n.well {\n #gradient > .vertical(@start-color: darken(@well-bg, 5%); @end-color: @well-bg);\n border-color: darken(@well-bg, 10%);\n @shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);\n .box-shadow(@shadow);\n}\n","// stylelint-disable indentation, property-no-vendor-prefix, selector-no-vendor-prefix\n\n// Vendor Prefixes\n//\n// All vendor mixins are deprecated as of v3.2.0 due to the introduction of\n// Autoprefixer in our Gruntfile. They have been removed in v4.\n\n// - Animations\n// - Backface visibility\n// - Box shadow\n// - Box sizing\n// - Content columns\n// - Hyphens\n// - Placeholder text\n// - Transformations\n// - Transitions\n// - User Select\n\n\n// Animations\n.animation(@animation) {\n -webkit-animation: @animation;\n -o-animation: @animation;\n animation: @animation;\n}\n.animation-name(@name) {\n -webkit-animation-name: @name;\n animation-name: @name;\n}\n.animation-duration(@duration) {\n -webkit-animation-duration: @duration;\n animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n -webkit-animation-timing-function: @timing-function;\n animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n -webkit-animation-delay: @delay;\n animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n -webkit-animation-iteration-count: @iteration-count;\n animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n -webkit-animation-direction: @direction;\n animation-direction: @direction;\n}\n.animation-fill-mode(@fill-mode) {\n -webkit-animation-fill-mode: @fill-mode;\n animation-fill-mode: @fill-mode;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n\n.backface-visibility(@visibility) {\n -webkit-backface-visibility: @visibility;\n -moz-backface-visibility: @visibility;\n backface-visibility: @visibility;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n// supported browsers that have box shadow capabilities now support it.\n\n.box-shadow(@shadow) {\n -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n box-shadow: @shadow;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n -webkit-box-sizing: @boxmodel;\n -moz-box-sizing: @boxmodel;\n box-sizing: @boxmodel;\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n -webkit-column-count: @column-count;\n -moz-column-count: @column-count;\n column-count: @column-count;\n -webkit-column-gap: @column-gap;\n -moz-column-gap: @column-gap;\n column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n -webkit-hyphens: @mode;\n -moz-hyphens: @mode;\n -ms-hyphens: @mode; // IE10+\n -o-hyphens: @mode;\n hyphens: @mode;\n word-wrap: break-word;\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n // Firefox\n &::-moz-placeholder {\n color: @color;\n opacity: 1; // Override Firefox's unusual default opacity; see https://github.com/twbs/bootstrap/pull/11526\n }\n &:-ms-input-placeholder { color: @color; } // Internet Explorer 10+\n &::-webkit-input-placeholder { color: @color; } // Safari and Chrome\n}\n\n// Transformations\n.scale(@ratio) {\n -webkit-transform: scale(@ratio);\n -ms-transform: scale(@ratio); // IE9 only\n -o-transform: scale(@ratio);\n transform: scale(@ratio);\n}\n.scale(@ratioX; @ratioY) {\n -webkit-transform: scale(@ratioX, @ratioY);\n -ms-transform: scale(@ratioX, @ratioY); // IE9 only\n -o-transform: scale(@ratioX, @ratioY);\n transform: scale(@ratioX, @ratioY);\n}\n.scaleX(@ratio) {\n -webkit-transform: scaleX(@ratio);\n -ms-transform: scaleX(@ratio); // IE9 only\n -o-transform: scaleX(@ratio);\n transform: scaleX(@ratio);\n}\n.scaleY(@ratio) {\n -webkit-transform: scaleY(@ratio);\n -ms-transform: scaleY(@ratio); // IE9 only\n -o-transform: scaleY(@ratio);\n transform: scaleY(@ratio);\n}\n.skew(@x; @y) {\n -webkit-transform: skewX(@x) skewY(@y);\n -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n -o-transform: skewX(@x) skewY(@y);\n transform: skewX(@x) skewY(@y);\n}\n.translate(@x; @y) {\n -webkit-transform: translate(@x, @y);\n -ms-transform: translate(@x, @y); // IE9 only\n -o-transform: translate(@x, @y);\n transform: translate(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n -webkit-transform: translate3d(@x, @y, @z);\n transform: translate3d(@x, @y, @z);\n}\n.rotate(@degrees) {\n -webkit-transform: rotate(@degrees);\n -ms-transform: rotate(@degrees); // IE9 only\n -o-transform: rotate(@degrees);\n transform: rotate(@degrees);\n}\n.rotateX(@degrees) {\n -webkit-transform: rotateX(@degrees);\n -ms-transform: rotateX(@degrees); // IE9 only\n -o-transform: rotateX(@degrees);\n transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n -webkit-transform: rotateY(@degrees);\n -ms-transform: rotateY(@degrees); // IE9 only\n -o-transform: rotateY(@degrees);\n transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n -webkit-perspective: @perspective;\n -moz-perspective: @perspective;\n perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n -webkit-perspective-origin: @perspective;\n -moz-perspective-origin: @perspective;\n perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n -webkit-transform-origin: @origin;\n -moz-transform-origin: @origin;\n -ms-transform-origin: @origin; // IE9 only\n transform-origin: @origin;\n}\n\n\n// Transitions\n\n.transition(@transition) {\n -webkit-transition: @transition;\n -o-transition: @transition;\n transition: @transition;\n}\n.transition-property(@transition-property) {\n -webkit-transition-property: @transition-property;\n transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n -webkit-transition-delay: @transition-delay;\n transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n -webkit-transition-duration: @transition-duration;\n transition-duration: @transition-duration;\n}\n.transition-timing-function(@timing-function) {\n -webkit-transition-timing-function: @timing-function;\n transition-timing-function: @timing-function;\n}\n.transition-transform(@transition) {\n -webkit-transition: -webkit-transform @transition;\n -moz-transition: -moz-transform @transition;\n -o-transition: -o-transform @transition;\n transition: transform @transition;\n}\n\n\n// User select\n// For selecting text on the page\n\n.user-select(@select) {\n -webkit-user-select: @select;\n -moz-user-select: @select;\n -ms-user-select: @select; // IE10+\n user-select: @select;\n}\n","// stylelint-disable value-no-vendor-prefix, selector-max-id\n\n#gradient {\n\n // Horizontal gradient, from left to right\n //\n // Creates two color stops, start and end, by specifying a color and position for each color stop.\n // Color stops are not available in IE9 and below.\n .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n background-image: -webkit-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n background-image: -o-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Opera 12\n background-image: linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down\n background-repeat: repeat-x;\n }\n\n // Vertical gradient, from top to bottom\n //\n // Creates two color stops, start and end, by specifying a color and position for each color stop.\n // Color stops are not available in IE9 and below.\n .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n background-image: -o-linear-gradient(top, @start-color @start-percent, @end-color @end-percent); // Opera 12\n background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down\n background-repeat: repeat-x;\n }\n\n .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n background-image: -o-linear-gradient(@deg, @start-color, @end-color); // Opera 12\n background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n background-repeat: repeat-x;\n }\n .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n background-image: -o-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n background-repeat: no-repeat;\n }\n .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n background-image: -o-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n background-repeat: no-repeat;\n }\n .radial(@inner-color: #555; @outer-color: #333) {\n background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n background-image: radial-gradient(circle, @inner-color, @outer-color);\n background-repeat: no-repeat;\n }\n .striped(@color: rgba(255, 255, 255, .15); @angle: 45deg) {\n background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n }\n}\n","// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n\n.reset-filter() {\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n"]} \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css deleted file mode 100644 index 2a69f48c7f5..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css +++ /dev/null @@ -1,6 +0,0 @@ -/*! - * Bootstrap v3.4.1 (https://getbootstrap.com/) - * Copyright 2011-2019 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - */.btn-danger,.btn-default,.btn-info,.btn-primary,.btn-success,.btn-warning{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-danger.active,.btn-danger:active,.btn-default.active,.btn-default:active,.btn-info.active,.btn-info:active,.btn-primary.active,.btn-primary:active,.btn-success.active,.btn-success:active,.btn-warning.active,.btn-warning:active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-danger.disabled,.btn-danger[disabled],.btn-default.disabled,.btn-default[disabled],.btn-info.disabled,.btn-info[disabled],.btn-primary.disabled,.btn-primary[disabled],.btn-success.disabled,.btn-success[disabled],.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-danger,fieldset[disabled] .btn-default,fieldset[disabled] .btn-info,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-success,fieldset[disabled] .btn-warning{-webkit-box-shadow:none;box-shadow:none}.btn-danger .badge,.btn-default .badge,.btn-info .badge,.btn-primary .badge,.btn-success .badge,.btn-warning .badge{text-shadow:none}.btn.active,.btn:active{background-image:none}.btn-default{background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-o-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#e0e0e0));background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;text-shadow:0 1px 0 #fff;border-color:#ccc}.btn-default:focus,.btn-default:hover{background-color:#e0e0e0;background-position:0 -15px}.btn-default.active,.btn-default:active{background-color:#e0e0e0;border-color:#dbdbdb}.btn-default.disabled,.btn-default.disabled.active,.btn-default.disabled.focus,.btn-default.disabled:active,.btn-default.disabled:focus,.btn-default.disabled:hover,.btn-default[disabled],.btn-default[disabled].active,.btn-default[disabled].focus,.btn-default[disabled]:active,.btn-default[disabled]:focus,.btn-default[disabled]:hover,fieldset[disabled] .btn-default,fieldset[disabled] .btn-default.active,fieldset[disabled] .btn-default.focus,fieldset[disabled] .btn-default:active,fieldset[disabled] .btn-default:focus,fieldset[disabled] .btn-default:hover{background-color:#e0e0e0;background-image:none}.btn-primary{background-image:-webkit-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-o-linear-gradient(top,#337ab7 0,#265a88 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#265a88));background-image:linear-gradient(to bottom,#337ab7 0,#265a88 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#245580}.btn-primary:focus,.btn-primary:hover{background-color:#265a88;background-position:0 -15px}.btn-primary.active,.btn-primary:active{background-color:#265a88;border-color:#245580}.btn-primary.disabled,.btn-primary.disabled.active,.btn-primary.disabled.focus,.btn-primary.disabled:active,.btn-primary.disabled:focus,.btn-primary.disabled:hover,.btn-primary[disabled],.btn-primary[disabled].active,.btn-primary[disabled].focus,.btn-primary[disabled]:active,.btn-primary[disabled]:focus,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary,fieldset[disabled] .btn-primary.active,fieldset[disabled] .btn-primary.focus,fieldset[disabled] .btn-primary:active,fieldset[disabled] .btn-primary:focus,fieldset[disabled] .btn-primary:hover{background-color:#265a88;background-image:none}.btn-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#419641));background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:focus,.btn-success:hover{background-color:#419641;background-position:0 -15px}.btn-success.active,.btn-success:active{background-color:#419641;border-color:#3e8f3e}.btn-success.disabled,.btn-success.disabled.active,.btn-success.disabled.focus,.btn-success.disabled:active,.btn-success.disabled:focus,.btn-success.disabled:hover,.btn-success[disabled],.btn-success[disabled].active,.btn-success[disabled].focus,.btn-success[disabled]:active,.btn-success[disabled]:focus,.btn-success[disabled]:hover,fieldset[disabled] .btn-success,fieldset[disabled] .btn-success.active,fieldset[disabled] .btn-success.focus,fieldset[disabled] .btn-success:active,fieldset[disabled] .btn-success:focus,fieldset[disabled] .btn-success:hover{background-color:#419641;background-image:none}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#2aabd2));background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:focus,.btn-info:hover{background-color:#2aabd2;background-position:0 -15px}.btn-info.active,.btn-info:active{background-color:#2aabd2;border-color:#28a4c9}.btn-info.disabled,.btn-info.disabled.active,.btn-info.disabled.focus,.btn-info.disabled:active,.btn-info.disabled:focus,.btn-info.disabled:hover,.btn-info[disabled],.btn-info[disabled].active,.btn-info[disabled].focus,.btn-info[disabled]:active,.btn-info[disabled]:focus,.btn-info[disabled]:hover,fieldset[disabled] .btn-info,fieldset[disabled] .btn-info.active,fieldset[disabled] .btn-info.focus,fieldset[disabled] .btn-info:active,fieldset[disabled] .btn-info:focus,fieldset[disabled] .btn-info:hover{background-color:#2aabd2;background-image:none}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#eb9316));background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#e38d13}.btn-warning:focus,.btn-warning:hover{background-color:#eb9316;background-position:0 -15px}.btn-warning.active,.btn-warning:active{background-color:#eb9316;border-color:#e38d13}.btn-warning.disabled,.btn-warning.disabled.active,.btn-warning.disabled.focus,.btn-warning.disabled:active,.btn-warning.disabled:focus,.btn-warning.disabled:hover,.btn-warning[disabled],.btn-warning[disabled].active,.btn-warning[disabled].focus,.btn-warning[disabled]:active,.btn-warning[disabled]:focus,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning,fieldset[disabled] .btn-warning.active,fieldset[disabled] .btn-warning.focus,fieldset[disabled] .btn-warning:active,fieldset[disabled] .btn-warning:focus,fieldset[disabled] .btn-warning:hover{background-color:#eb9316;background-image:none}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c12e2a));background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:focus,.btn-danger:hover{background-color:#c12e2a;background-position:0 -15px}.btn-danger.active,.btn-danger:active{background-color:#c12e2a;border-color:#b92c28}.btn-danger.disabled,.btn-danger.disabled.active,.btn-danger.disabled.focus,.btn-danger.disabled:active,.btn-danger.disabled:focus,.btn-danger.disabled:hover,.btn-danger[disabled],.btn-danger[disabled].active,.btn-danger[disabled].focus,.btn-danger[disabled]:active,.btn-danger[disabled]:focus,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger,fieldset[disabled] .btn-danger.active,fieldset[disabled] .btn-danger.focus,fieldset[disabled] .btn-danger:active,fieldset[disabled] .btn-danger:focus,fieldset[disabled] .btn-danger:hover{background-color:#c12e2a;background-image:none}.img-thumbnail,.thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:focus,.dropdown-menu>li>a:hover{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x;background-color:#e8e8e8}.dropdown-menu>.active>a,.dropdown-menu>.active>a:focus,.dropdown-menu>.active>a:hover{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x;background-color:#2e6da4}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-o-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fff),to(#f8f8f8));background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-o-linear-gradient(top,#dbdbdb 0,#e2e2e2 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dbdbdb),to(#e2e2e2));background-image:linear-gradient(to bottom,#dbdbdb 0,#e2e2e2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-o-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#3c3c3c),to(#222));background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);border-radius:4px}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.open>a{background-image:-webkit-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-o-linear-gradient(top,#080808 0,#0f0f0f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#080808),to(#0f0f0f));background-image:linear-gradient(to bottom,#080808 0,#0f0f0f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);background-repeat:repeat-x;-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-fixed-bottom,.navbar-fixed-top,.navbar-static-top{border-radius:0}@media (max-width:767px){.navbar .navbar-nav .open .dropdown-menu>.active>a,.navbar .navbar-nav .open .dropdown-menu>.active>a:focus,.navbar .navbar-nav .open .dropdown-menu>.active>a:hover{color:#fff;background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#c8e5bc));background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);background-repeat:repeat-x;border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#b9def0));background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);background-repeat:repeat-x;border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#f8efc0));background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);background-repeat:repeat-x;border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-o-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#e7c3c3));background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);background-repeat:repeat-x;border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#ebebeb),to(#f5f5f5));background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x}.progress-bar{background-image:-webkit-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-o-linear-gradient(top,#337ab7 0,#286090 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#286090));background-image:linear-gradient(to bottom,#337ab7 0,#286090 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);background-repeat:repeat-x}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-o-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5cb85c),to(#449d44));background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);background-repeat:repeat-x}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-o-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#5bc0de),to(#31b0d5));background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);background-repeat:repeat-x}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-o-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f0ad4e),to(#ec971f));background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);background-repeat:repeat-x}.progress-bar-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-o-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9534f),to(#c9302c));background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);background-repeat:repeat-x}.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:focus,.list-group-item.active:hover{text-shadow:0 -1px 0 #286090;background-image:-webkit-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2b669a 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2b669a));background-image:linear-gradient(to bottom,#337ab7 0,#2b669a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);background-repeat:repeat-x;border-color:#2b669a}.list-group-item.active .badge,.list-group-item.active:focus .badge,.list-group-item.active:hover .badge{text-shadow:none}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-o-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f5f5f5),to(#e8e8e8));background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-repeat:repeat-x}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-o-linear-gradient(top,#337ab7 0,#2e6da4 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#337ab7),to(#2e6da4));background-image:linear-gradient(to bottom,#337ab7 0,#2e6da4 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);background-repeat:repeat-x}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-o-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#dff0d8),to(#d0e9c6));background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);background-repeat:repeat-x}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-o-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#d9edf7),to(#c4e3f3));background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);background-repeat:repeat-x}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-o-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#fcf8e3),to(#faf2cc));background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);background-repeat:repeat-x}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-o-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#f2dede),to(#ebcccc));background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);background-repeat:repeat-x}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-o-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:-webkit-gradient(linear,left top,left bottom,from(#e8e8e8),to(#f5f5f5));background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);background-repeat:repeat-x;border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)} -/*# sourceMappingURL=bootstrap-theme.min.css.map */ \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css.map b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css.map deleted file mode 100644 index 5d75106e042..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap-theme.min.css.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"sources":["bootstrap-theme.css","dist/css/bootstrap-theme.css","less/theme.less","less/mixins/vendor-prefixes.less","less/mixins/gradients.less","less/mixins/reset-filter.less"],"names":[],"mappings":"AAAA;;;;ACUA,YCWA,aDbA,UAFA,aACA,aAEA,aCkBE,YAAA,EAAA,KAAA,EAAA,eC2CA,mBAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,iBF7CV,mBANA,mBACA,oBCWE,oBDRF,iBANA,iBAIA,oBANA,oBAOA,oBANA,oBAQA,oBANA,oBEmDE,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBFpCV,qBAMA,sBCJE,sBDDF,uBAHA,mBAMA,oBARA,sBAMA,uBALA,sBAMA,uBAJA,sBAMA,uBAOA,+BALA,gCAGA,6BAFA,gCACA,gCAEA,gCEwBE,mBAAA,KACQ,WAAA,KFfV,mBCnCA,oBDiCA,iBAFA,oBACA,oBAEA,oBCXI,YAAA,KDgBJ,YCyBE,YAEE,iBAAA,KAKJ,aEvEI,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QAyCA,YAAA,EAAA,IAAA,EAAA,KACA,aAAA,KDnBF,mBCrBE,mBAEE,iBAAA,QACA,oBAAA,EAAA,MDuBJ,oBCpBE,oBAEE,iBAAA,QACA,aAAA,QAMA,sBD8BJ,6BANA,4BAGA,6BANA,4BAHA,4BAFA,uBAeA,8BANA,6BAGA,8BANA,6BAHA,6BAFA,gCAeA,uCANA,sCAGA,uCANA,sCAHA,sCCdM,iBAAA,QACA,iBAAA,KAoBN,aE5EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QDgEF,mBC9DE,mBAEE,iBAAA,QACA,oBAAA,EAAA,MDgEJ,oBC7DE,oBAEE,iBAAA,QACA,aAAA,QAMA,sBDuEJ,6BANA,4BAGA,6BANA,4BAHA,4BAFA,uBAeA,8BANA,6BAGA,8BANA,6BAHA,6BAFA,gCAeA,uCANA,sCAGA,uCANA,sCAHA,sCCvDM,iBAAA,QACA,iBAAA,KAqBN,aE7EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QDyGF,mBCvGE,mBAEE,iBAAA,QACA,oBAAA,EAAA,MDyGJ,oBCtGE,oBAEE,iBAAA,QACA,aAAA,QAMA,sBDgHJ,6BANA,4BAGA,6BANA,4BAHA,4BAFA,uBAeA,8BANA,6BAGA,8BANA,6BAHA,6BAFA,gCAeA,uCANA,sCAGA,uCANA,sCAHA,sCChGM,iBAAA,QACA,iBAAA,KAsBN,UE9EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QDkJF,gBChJE,gBAEE,iBAAA,QACA,oBAAA,EAAA,MDkJJ,iBC/IE,iBAEE,iBAAA,QACA,aAAA,QAMA,mBDyJJ,0BANA,yBAGA,0BANA,yBAHA,yBAFA,oBAeA,2BANA,0BAGA,2BANA,0BAHA,0BAFA,6BAeA,oCANA,mCAGA,oCANA,mCAHA,mCCzIM,iBAAA,QACA,iBAAA,KAuBN,aE/EI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QD2LF,mBCzLE,mBAEE,iBAAA,QACA,oBAAA,EAAA,MD2LJ,oBCxLE,oBAEE,iBAAA,QACA,aAAA,QAMA,sBDkMJ,6BANA,4BAGA,6BANA,4BAHA,4BAFA,uBAeA,8BANA,6BAGA,8BANA,6BAHA,6BAFA,gCAeA,uCANA,sCAGA,uCANA,sCAHA,sCClLM,iBAAA,QACA,iBAAA,KAwBN,YEhFI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GClBF,OAAA,0DH8CA,kBAAA,SACA,aAAA,QDoOF,kBClOE,kBAEE,iBAAA,QACA,oBAAA,EAAA,MDoOJ,mBCjOE,mBAEE,iBAAA,QACA,aAAA,QAMA,qBD2OJ,4BANA,2BAGA,4BANA,2BAHA,2BAFA,sBAeA,6BANA,4BAGA,6BANA,4BAHA,4BAFA,+BAeA,sCANA,qCAGA,sCANA,qCAHA,qCC3NM,iBAAA,QACA,iBAAA,KD2ON,eC5MA,WCtCE,mBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,EAAA,IAAA,IAAA,iBFsPV,0BCvMA,0BEjGI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFgGF,iBAAA,QAEF,yBD6MA,+BADA,+BGlTI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFsGF,iBAAA,QASF,gBEnHI,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GACA,kBAAA,SCnBF,OAAA,0DHqIA,cAAA,ICrEA,mBAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,iBFuRV,sCCtNA,oCEnHI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SD6CF,mBAAA,MAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,iBD8EV,cDoNA,iBClNE,YAAA,EAAA,IAAA,EAAA,sBAIF,gBEtII,iBAAA,iDACA,iBAAA,4CACA,iBAAA,qEAAA,iBAAA,+CACA,OAAA,+GACA,kBAAA,SCnBF,OAAA,0DHwJA,cAAA,IDyNF,sCC5NA,oCEtII,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SD6CF,mBAAA,MAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,gBDoFV,8BDuOA,iCC3NI,YAAA,EAAA,KAAA,EAAA,gBDgOJ,qBADA,kBC1NA,mBAGE,cAAA,EAIF,yBAEI,mDDwNF,yDADA,yDCpNI,MAAA,KEnKF,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,UF2KJ,OACE,YAAA,EAAA,IAAA,EAAA,qBC/HA,mBAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,MAAA,EAAA,IAAA,EAAA,qBAAA,CAAA,EAAA,IAAA,IAAA,gBD0IV,eE5LI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFoLF,aAAA,QAKF,YE7LI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFoLF,aAAA,QAMF,eE9LI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFoLF,aAAA,QAOF,cE/LI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFoLF,aAAA,QAeF,UEvMI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF6MJ,cEjNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF8MJ,sBElNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF+MJ,mBEnNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFgNJ,sBEpNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFiNJ,qBErNI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFqNJ,sBExLI,iBAAA,yKACA,iBAAA,oKACA,iBAAA,iKF+LJ,YACE,cAAA,IClLA,mBAAA,EAAA,IAAA,IAAA,iBACQ,WAAA,EAAA,IAAA,IAAA,iBDoLV,wBDiQA,8BADA,8BC7PE,YAAA,EAAA,KAAA,EAAA,QEzOE,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFuOF,aAAA,QALF,+BD6QA,qCADA,qCCpQI,YAAA,KAUJ,OCvME,mBAAA,EAAA,IAAA,IAAA,gBACQ,WAAA,EAAA,IAAA,IAAA,gBDgNV,8BElQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF+PJ,8BEnQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFgQJ,8BEpQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFiQJ,2BErQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFkQJ,8BEtQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SFmQJ,6BEvQI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF0QJ,ME9QI,iBAAA,oDACA,iBAAA,+CACA,iBAAA,wEAAA,iBAAA,kDACA,OAAA,+GACA,kBAAA,SF4QF,aAAA,QC/NA,mBAAA,MAAA,EAAA,IAAA,IAAA,eAAA,CAAA,EAAA,IAAA,EAAA,qBACQ,WAAA,MAAA,EAAA,IAAA,IAAA,eAAA,CAAA,EAAA,IAAA,EAAA","sourcesContent":["/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.btn-default:active,\n.btn-primary:active,\n.btn-success:active,\n.btn-info:active,\n.btn-warning:active,\n.btn-danger:active,\n.btn-default.active,\n.btn-primary.active,\n.btn-success.active,\n.btn-info.active,\n.btn-warning.active,\n.btn-danger.active {\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-default.disabled,\n.btn-primary.disabled,\n.btn-success.disabled,\n.btn-info.disabled,\n.btn-warning.disabled,\n.btn-danger.disabled,\n.btn-default[disabled],\n.btn-primary[disabled],\n.btn-success[disabled],\n.btn-info[disabled],\n.btn-warning[disabled],\n.btn-danger[disabled],\nfieldset[disabled] .btn-default,\nfieldset[disabled] .btn-primary,\nfieldset[disabled] .btn-success,\nfieldset[disabled] .btn-info,\nfieldset[disabled] .btn-warning,\nfieldset[disabled] .btn-danger {\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn-default .badge,\n.btn-primary .badge,\n.btn-success .badge,\n.btn-info .badge,\n.btn-warning .badge,\n.btn-danger .badge {\n text-shadow: none;\n}\n.btn:active,\n.btn.active {\n background-image: none;\n}\n.btn-default {\n background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n background-image: -o-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n background-image: linear-gradient(to bottom, #fff 0%, #e0e0e0 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #dbdbdb;\n text-shadow: 0 1px 0 #fff;\n border-color: #ccc;\n}\n.btn-default:hover,\n.btn-default:focus {\n background-color: #e0e0e0;\n background-position: 0 -15px;\n}\n.btn-default:active,\n.btn-default.active {\n background-color: #e0e0e0;\n border-color: #dbdbdb;\n}\n.btn-default.disabled,\n.btn-default[disabled],\nfieldset[disabled] .btn-default,\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus,\n.btn-default.disabled:active,\n.btn-default[disabled]:active,\nfieldset[disabled] .btn-default:active,\n.btn-default.disabled.active,\n.btn-default[disabled].active,\nfieldset[disabled] .btn-default.active {\n background-color: #e0e0e0;\n background-image: none;\n}\n.btn-primary {\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #265a88 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #265a88 100%);\n background-image: linear-gradient(to bottom, #337ab7 0%, #265a88 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #245580;\n}\n.btn-primary:hover,\n.btn-primary:focus {\n background-color: #265a88;\n background-position: 0 -15px;\n}\n.btn-primary:active,\n.btn-primary.active {\n background-color: #265a88;\n border-color: #245580;\n}\n.btn-primary.disabled,\n.btn-primary[disabled],\nfieldset[disabled] .btn-primary,\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus,\n.btn-primary.disabled:active,\n.btn-primary[disabled]:active,\nfieldset[disabled] .btn-primary:active,\n.btn-primary.disabled.active,\n.btn-primary[disabled].active,\nfieldset[disabled] .btn-primary.active {\n background-color: #265a88;\n background-image: none;\n}\n.btn-success {\n background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);\n background-image: -o-linear-gradient(top, #5cb85c 0%, #419641 100%);\n background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #3e8f3e;\n}\n.btn-success:hover,\n.btn-success:focus {\n background-color: #419641;\n background-position: 0 -15px;\n}\n.btn-success:active,\n.btn-success.active {\n background-color: #419641;\n border-color: #3e8f3e;\n}\n.btn-success.disabled,\n.btn-success[disabled],\nfieldset[disabled] .btn-success,\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus,\n.btn-success.disabled:active,\n.btn-success[disabled]:active,\nfieldset[disabled] .btn-success:active,\n.btn-success.disabled.active,\n.btn-success[disabled].active,\nfieldset[disabled] .btn-success.active {\n background-color: #419641;\n background-image: none;\n}\n.btn-info {\n background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n background-image: -o-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #28a4c9;\n}\n.btn-info:hover,\n.btn-info:focus {\n background-color: #2aabd2;\n background-position: 0 -15px;\n}\n.btn-info:active,\n.btn-info.active {\n background-color: #2aabd2;\n border-color: #28a4c9;\n}\n.btn-info.disabled,\n.btn-info[disabled],\nfieldset[disabled] .btn-info,\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus,\n.btn-info.disabled:active,\n.btn-info[disabled]:active,\nfieldset[disabled] .btn-info:active,\n.btn-info.disabled.active,\n.btn-info[disabled].active,\nfieldset[disabled] .btn-info.active {\n background-color: #2aabd2;\n background-image: none;\n}\n.btn-warning {\n background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n background-image: -o-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #e38d13;\n}\n.btn-warning:hover,\n.btn-warning:focus {\n background-color: #eb9316;\n background-position: 0 -15px;\n}\n.btn-warning:active,\n.btn-warning.active {\n background-color: #eb9316;\n border-color: #e38d13;\n}\n.btn-warning.disabled,\n.btn-warning[disabled],\nfieldset[disabled] .btn-warning,\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus,\n.btn-warning.disabled:active,\n.btn-warning[disabled]:active,\nfieldset[disabled] .btn-warning:active,\n.btn-warning.disabled.active,\n.btn-warning[disabled].active,\nfieldset[disabled] .btn-warning.active {\n background-color: #eb9316;\n background-image: none;\n}\n.btn-danger {\n background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n background-image: -o-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #b92c28;\n}\n.btn-danger:hover,\n.btn-danger:focus {\n background-color: #c12e2a;\n background-position: 0 -15px;\n}\n.btn-danger:active,\n.btn-danger.active {\n background-color: #c12e2a;\n border-color: #b92c28;\n}\n.btn-danger.disabled,\n.btn-danger[disabled],\nfieldset[disabled] .btn-danger,\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus,\n.btn-danger.disabled:active,\n.btn-danger[disabled]:active,\nfieldset[disabled] .btn-danger:active,\n.btn-danger.disabled.active,\n.btn-danger[disabled].active,\nfieldset[disabled] .btn-danger.active {\n background-color: #c12e2a;\n background-image: none;\n}\n.thumbnail,\n.img-thumbnail {\n -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n background-repeat: repeat-x;\n background-color: #e8e8e8;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n background-repeat: repeat-x;\n background-color: #2e6da4;\n}\n.navbar-default {\n background-image: -webkit-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n background-image: -o-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);\n background-repeat: repeat-x;\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .active > a {\n background-image: -webkit-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n background-image: -o-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n background-image: linear-gradient(to bottom, #dbdbdb 0%, #e2e2e2 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);\n background-repeat: repeat-x;\n -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n}\n.navbar-brand,\n.navbar-nav > li > a {\n text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25);\n}\n.navbar-inverse {\n background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%);\n background-image: -o-linear-gradient(top, #3c3c3c 0%, #222 100%);\n background-image: linear-gradient(to bottom, #3c3c3c 0%, #222 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);\n background-repeat: repeat-x;\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n border-radius: 4px;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .active > a {\n background-image: -webkit-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n background-image: -o-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n background-image: linear-gradient(to bottom, #080808 0%, #0f0f0f 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);\n background-repeat: repeat-x;\n -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n}\n.navbar-inverse .navbar-brand,\n.navbar-inverse .navbar-nav > li > a {\n text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n border-radius: 0;\n}\n@media (max-width: 767px) {\n .navbar .navbar-nav .open .dropdown-menu > .active > a,\n .navbar .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #fff;\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n background-repeat: repeat-x;\n }\n}\n.alert {\n text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2);\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.alert-success {\n background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n background-image: -o-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);\n background-repeat: repeat-x;\n border-color: #b2dba1;\n}\n.alert-info {\n background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n background-image: -o-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);\n background-repeat: repeat-x;\n border-color: #9acfea;\n}\n.alert-warning {\n background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n background-image: -o-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);\n background-repeat: repeat-x;\n border-color: #f5e79e;\n}\n.alert-danger {\n background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n background-image: -o-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);\n background-repeat: repeat-x;\n border-color: #dca7a7;\n}\n.progress {\n background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n background-image: -o-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar {\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #286090 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #286090 100%);\n background-image: linear-gradient(to bottom, #337ab7 0%, #286090 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-success {\n background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n background-image: -o-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-info {\n background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n background-image: -o-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-warning {\n background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n background-image: -o-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-danger {\n background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n background-image: -o-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-striped {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.list-group {\n border-radius: 4px;\n -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n text-shadow: 0 -1px 0 #286090;\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n background-image: linear-gradient(to bottom, #337ab7 0%, #2b669a 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);\n background-repeat: repeat-x;\n border-color: #2b669a;\n}\n.list-group-item.active .badge,\n.list-group-item.active:hover .badge,\n.list-group-item.active:focus .badge {\n text-shadow: none;\n}\n.panel {\n -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.panel-default > .panel-heading {\n background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-primary > .panel-heading {\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-success > .panel-heading {\n background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n background-image: -o-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-info > .panel-heading {\n background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n background-image: -o-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-warning > .panel-heading {\n background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n background-image: -o-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-danger > .panel-heading {\n background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n background-image: -o-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);\n background-repeat: repeat-x;\n}\n.well {\n background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n background-image: -o-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);\n background-repeat: repeat-x;\n border-color: #dcdcdc;\n -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n}\n/*# sourceMappingURL=bootstrap-theme.css.map */","/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.btn-default:active,\n.btn-primary:active,\n.btn-success:active,\n.btn-info:active,\n.btn-warning:active,\n.btn-danger:active,\n.btn-default.active,\n.btn-primary.active,\n.btn-success.active,\n.btn-info.active,\n.btn-warning.active,\n.btn-danger.active {\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-default.disabled,\n.btn-primary.disabled,\n.btn-success.disabled,\n.btn-info.disabled,\n.btn-warning.disabled,\n.btn-danger.disabled,\n.btn-default[disabled],\n.btn-primary[disabled],\n.btn-success[disabled],\n.btn-info[disabled],\n.btn-warning[disabled],\n.btn-danger[disabled],\nfieldset[disabled] .btn-default,\nfieldset[disabled] .btn-primary,\nfieldset[disabled] .btn-success,\nfieldset[disabled] .btn-info,\nfieldset[disabled] .btn-warning,\nfieldset[disabled] .btn-danger {\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn-default .badge,\n.btn-primary .badge,\n.btn-success .badge,\n.btn-info .badge,\n.btn-warning .badge,\n.btn-danger .badge {\n text-shadow: none;\n}\n.btn:active,\n.btn.active {\n background-image: none;\n}\n.btn-default {\n background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n background-image: -o-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#fff), to(#e0e0e0));\n background-image: linear-gradient(to bottom, #fff 0%, #e0e0e0 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #dbdbdb;\n text-shadow: 0 1px 0 #fff;\n border-color: #ccc;\n}\n.btn-default:hover,\n.btn-default:focus {\n background-color: #e0e0e0;\n background-position: 0 -15px;\n}\n.btn-default:active,\n.btn-default.active {\n background-color: #e0e0e0;\n border-color: #dbdbdb;\n}\n.btn-default.disabled,\n.btn-default[disabled],\nfieldset[disabled] .btn-default,\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus,\n.btn-default.disabled:active,\n.btn-default[disabled]:active,\nfieldset[disabled] .btn-default:active,\n.btn-default.disabled.active,\n.btn-default[disabled].active,\nfieldset[disabled] .btn-default.active {\n background-color: #e0e0e0;\n background-image: none;\n}\n.btn-primary {\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #265a88 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #265a88 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#265a88));\n background-image: linear-gradient(to bottom, #337ab7 0%, #265a88 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff265a88', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #245580;\n}\n.btn-primary:hover,\n.btn-primary:focus {\n background-color: #265a88;\n background-position: 0 -15px;\n}\n.btn-primary:active,\n.btn-primary.active {\n background-color: #265a88;\n border-color: #245580;\n}\n.btn-primary.disabled,\n.btn-primary[disabled],\nfieldset[disabled] .btn-primary,\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus,\n.btn-primary.disabled:active,\n.btn-primary[disabled]:active,\nfieldset[disabled] .btn-primary:active,\n.btn-primary.disabled.active,\n.btn-primary[disabled].active,\nfieldset[disabled] .btn-primary.active {\n background-color: #265a88;\n background-image: none;\n}\n.btn-success {\n background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);\n background-image: -o-linear-gradient(top, #5cb85c 0%, #419641 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#5cb85c), to(#419641));\n background-image: linear-gradient(to bottom, #5cb85c 0%, #419641 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #3e8f3e;\n}\n.btn-success:hover,\n.btn-success:focus {\n background-color: #419641;\n background-position: 0 -15px;\n}\n.btn-success:active,\n.btn-success.active {\n background-color: #419641;\n border-color: #3e8f3e;\n}\n.btn-success.disabled,\n.btn-success[disabled],\nfieldset[disabled] .btn-success,\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus,\n.btn-success.disabled:active,\n.btn-success[disabled]:active,\nfieldset[disabled] .btn-success:active,\n.btn-success.disabled.active,\n.btn-success[disabled].active,\nfieldset[disabled] .btn-success.active {\n background-color: #419641;\n background-image: none;\n}\n.btn-info {\n background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n background-image: -o-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#5bc0de), to(#2aabd2));\n background-image: linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #28a4c9;\n}\n.btn-info:hover,\n.btn-info:focus {\n background-color: #2aabd2;\n background-position: 0 -15px;\n}\n.btn-info:active,\n.btn-info.active {\n background-color: #2aabd2;\n border-color: #28a4c9;\n}\n.btn-info.disabled,\n.btn-info[disabled],\nfieldset[disabled] .btn-info,\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus,\n.btn-info.disabled:active,\n.btn-info[disabled]:active,\nfieldset[disabled] .btn-info:active,\n.btn-info.disabled.active,\n.btn-info[disabled].active,\nfieldset[disabled] .btn-info.active {\n background-color: #2aabd2;\n background-image: none;\n}\n.btn-warning {\n background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n background-image: -o-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#f0ad4e), to(#eb9316));\n background-image: linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #e38d13;\n}\n.btn-warning:hover,\n.btn-warning:focus {\n background-color: #eb9316;\n background-position: 0 -15px;\n}\n.btn-warning:active,\n.btn-warning.active {\n background-color: #eb9316;\n border-color: #e38d13;\n}\n.btn-warning.disabled,\n.btn-warning[disabled],\nfieldset[disabled] .btn-warning,\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus,\n.btn-warning.disabled:active,\n.btn-warning[disabled]:active,\nfieldset[disabled] .btn-warning:active,\n.btn-warning.disabled.active,\n.btn-warning[disabled].active,\nfieldset[disabled] .btn-warning.active {\n background-color: #eb9316;\n background-image: none;\n}\n.btn-danger {\n background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n background-image: -o-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#d9534f), to(#c12e2a));\n background-image: linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n background-repeat: repeat-x;\n border-color: #b92c28;\n}\n.btn-danger:hover,\n.btn-danger:focus {\n background-color: #c12e2a;\n background-position: 0 -15px;\n}\n.btn-danger:active,\n.btn-danger.active {\n background-color: #c12e2a;\n border-color: #b92c28;\n}\n.btn-danger.disabled,\n.btn-danger[disabled],\nfieldset[disabled] .btn-danger,\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus,\n.btn-danger.disabled:active,\n.btn-danger[disabled]:active,\nfieldset[disabled] .btn-danger:active,\n.btn-danger.disabled.active,\n.btn-danger[disabled].active,\nfieldset[disabled] .btn-danger.active {\n background-color: #c12e2a;\n background-image: none;\n}\n.thumbnail,\n.img-thumbnail {\n -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#f5f5f5), to(#e8e8e8));\n background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n background-repeat: repeat-x;\n background-color: #e8e8e8;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));\n background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n background-repeat: repeat-x;\n background-color: #2e6da4;\n}\n.navbar-default {\n background-image: -webkit-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n background-image: -o-linear-gradient(top, #ffffff 0%, #f8f8f8 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#ffffff), to(#f8f8f8));\n background-image: linear-gradient(to bottom, #ffffff 0%, #f8f8f8 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);\n background-repeat: repeat-x;\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.15), 0 1px 5px rgba(0, 0, 0, 0.075);\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .active > a {\n background-image: -webkit-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n background-image: -o-linear-gradient(top, #dbdbdb 0%, #e2e2e2 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#dbdbdb), to(#e2e2e2));\n background-image: linear-gradient(to bottom, #dbdbdb 0%, #e2e2e2 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdbdbdb', endColorstr='#ffe2e2e2', GradientType=0);\n background-repeat: repeat-x;\n -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.075);\n}\n.navbar-brand,\n.navbar-nav > li > a {\n text-shadow: 0 1px 0 rgba(255, 255, 255, 0.25);\n}\n.navbar-inverse {\n background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%);\n background-image: -o-linear-gradient(top, #3c3c3c 0%, #222 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#3c3c3c), to(#222));\n background-image: linear-gradient(to bottom, #3c3c3c 0%, #222 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);\n background-repeat: repeat-x;\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n border-radius: 4px;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .active > a {\n background-image: -webkit-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n background-image: -o-linear-gradient(top, #080808 0%, #0f0f0f 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#080808), to(#0f0f0f));\n background-image: linear-gradient(to bottom, #080808 0%, #0f0f0f 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff080808', endColorstr='#ff0f0f0f', GradientType=0);\n background-repeat: repeat-x;\n -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n box-shadow: inset 0 3px 9px rgba(0, 0, 0, 0.25);\n}\n.navbar-inverse .navbar-brand,\n.navbar-inverse .navbar-nav > li > a {\n text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n border-radius: 0;\n}\n@media (max-width: 767px) {\n .navbar .navbar-nav .open .dropdown-menu > .active > a,\n .navbar .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #fff;\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));\n background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n background-repeat: repeat-x;\n }\n}\n.alert {\n text-shadow: 0 1px 0 rgba(255, 255, 255, 0.2);\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25), 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.alert-success {\n background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n background-image: -o-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#dff0d8), to(#c8e5bc));\n background-image: linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);\n background-repeat: repeat-x;\n border-color: #b2dba1;\n}\n.alert-info {\n background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n background-image: -o-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#d9edf7), to(#b9def0));\n background-image: linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);\n background-repeat: repeat-x;\n border-color: #9acfea;\n}\n.alert-warning {\n background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n background-image: -o-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#fcf8e3), to(#f8efc0));\n background-image: linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);\n background-repeat: repeat-x;\n border-color: #f5e79e;\n}\n.alert-danger {\n background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n background-image: -o-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#f2dede), to(#e7c3c3));\n background-image: linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);\n background-repeat: repeat-x;\n border-color: #dca7a7;\n}\n.progress {\n background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n background-image: -o-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#ebebeb), to(#f5f5f5));\n background-image: linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar {\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #286090 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #286090 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#286090));\n background-image: linear-gradient(to bottom, #337ab7 0%, #286090 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff286090', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-success {\n background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n background-image: -o-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#5cb85c), to(#449d44));\n background-image: linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-info {\n background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n background-image: -o-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#5bc0de), to(#31b0d5));\n background-image: linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-warning {\n background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n background-image: -o-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#f0ad4e), to(#ec971f));\n background-image: linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-danger {\n background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n background-image: -o-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#d9534f), to(#c9302c));\n background-image: linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);\n background-repeat: repeat-x;\n}\n.progress-bar-striped {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.list-group {\n border-radius: 4px;\n -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n box-shadow: 0 1px 2px rgba(0, 0, 0, 0.075);\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n text-shadow: 0 -1px 0 #286090;\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #2b669a 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2b669a));\n background-image: linear-gradient(to bottom, #337ab7 0%, #2b669a 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2b669a', GradientType=0);\n background-repeat: repeat-x;\n border-color: #2b669a;\n}\n.list-group-item.active .badge,\n.list-group-item.active:hover .badge,\n.list-group-item.active:focus .badge {\n text-shadow: none;\n}\n.panel {\n -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);\n}\n.panel-default > .panel-heading {\n background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n background-image: -o-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#f5f5f5), to(#e8e8e8));\n background-image: linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-primary > .panel-heading {\n background-image: -webkit-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: -o-linear-gradient(top, #337ab7 0%, #2e6da4 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#337ab7), to(#2e6da4));\n background-image: linear-gradient(to bottom, #337ab7 0%, #2e6da4 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff337ab7', endColorstr='#ff2e6da4', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-success > .panel-heading {\n background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n background-image: -o-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#dff0d8), to(#d0e9c6));\n background-image: linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-info > .panel-heading {\n background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n background-image: -o-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#d9edf7), to(#c4e3f3));\n background-image: linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-warning > .panel-heading {\n background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n background-image: -o-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#fcf8e3), to(#faf2cc));\n background-image: linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);\n background-repeat: repeat-x;\n}\n.panel-danger > .panel-heading {\n background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n background-image: -o-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#f2dede), to(#ebcccc));\n background-image: linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);\n background-repeat: repeat-x;\n}\n.well {\n background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n background-image: -o-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n background-image: -webkit-gradient(linear, left top, left bottom, from(#e8e8e8), to(#f5f5f5));\n background-image: linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);\n background-repeat: repeat-x;\n border-color: #dcdcdc;\n -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.05), 0 1px 0 rgba(255, 255, 255, 0.1);\n}\n/*# sourceMappingURL=bootstrap-theme.css.map */","// stylelint-disable selector-no-qualifying-type, selector-max-compound-selectors\n\n/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n\n//\n// Load core variables and mixins\n// --------------------------------------------------\n\n@import \"variables.less\";\n@import \"mixins.less\";\n\n\n//\n// Buttons\n// --------------------------------------------------\n\n// Common styles\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n text-shadow: 0 -1px 0 rgba(0, 0, 0, .2);\n @shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);\n .box-shadow(@shadow);\n\n // Reset the shadow\n &:active,\n &.active {\n .box-shadow(inset 0 3px 5px rgba(0, 0, 0, .125));\n }\n\n &.disabled,\n &[disabled],\n fieldset[disabled] & {\n .box-shadow(none);\n }\n\n .badge {\n text-shadow: none;\n }\n}\n\n// Mixin for generating new styles\n.btn-styles(@btn-color: #555) {\n #gradient > .vertical(@start-color: @btn-color; @end-color: darken(@btn-color, 12%));\n .reset-filter(); // Disable gradients for IE9 because filter bleeds through rounded corners; see https://github.com/twbs/bootstrap/issues/10620\n background-repeat: repeat-x;\n border-color: darken(@btn-color, 14%);\n\n &:hover,\n &:focus {\n background-color: darken(@btn-color, 12%);\n background-position: 0 -15px;\n }\n\n &:active,\n &.active {\n background-color: darken(@btn-color, 12%);\n border-color: darken(@btn-color, 14%);\n }\n\n &.disabled,\n &[disabled],\n fieldset[disabled] & {\n &,\n &:hover,\n &:focus,\n &.focus,\n &:active,\n &.active {\n background-color: darken(@btn-color, 12%);\n background-image: none;\n }\n }\n}\n\n// Common styles\n.btn {\n // Remove the gradient for the pressed/active state\n &:active,\n &.active {\n background-image: none;\n }\n}\n\n// Apply the mixin to the buttons\n.btn-default {\n .btn-styles(@btn-default-bg);\n text-shadow: 0 1px 0 #fff;\n border-color: #ccc;\n}\n.btn-primary { .btn-styles(@btn-primary-bg); }\n.btn-success { .btn-styles(@btn-success-bg); }\n.btn-info { .btn-styles(@btn-info-bg); }\n.btn-warning { .btn-styles(@btn-warning-bg); }\n.btn-danger { .btn-styles(@btn-danger-bg); }\n\n\n//\n// Images\n// --------------------------------------------------\n\n.thumbnail,\n.img-thumbnail {\n .box-shadow(0 1px 2px rgba(0, 0, 0, .075));\n}\n\n\n//\n// Dropdowns\n// --------------------------------------------------\n\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n #gradient > .vertical(@start-color: @dropdown-link-hover-bg; @end-color: darken(@dropdown-link-hover-bg, 5%));\n background-color: darken(@dropdown-link-hover-bg, 5%);\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n background-color: darken(@dropdown-link-active-bg, 5%);\n}\n\n\n//\n// Navbar\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n #gradient > .vertical(@start-color: lighten(@navbar-default-bg, 10%); @end-color: @navbar-default-bg);\n .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered\n border-radius: @navbar-border-radius;\n @shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);\n .box-shadow(@shadow);\n\n .navbar-nav > .open > a,\n .navbar-nav > .active > a {\n #gradient > .vertical(@start-color: darken(@navbar-default-link-active-bg, 5%); @end-color: darken(@navbar-default-link-active-bg, 2%));\n .box-shadow(inset 0 3px 9px rgba(0, 0, 0, .075));\n }\n}\n.navbar-brand,\n.navbar-nav > li > a {\n text-shadow: 0 1px 0 rgba(255, 255, 255, .25);\n}\n\n// Inverted navbar\n.navbar-inverse {\n #gradient > .vertical(@start-color: lighten(@navbar-inverse-bg, 10%); @end-color: @navbar-inverse-bg);\n .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered; see https://github.com/twbs/bootstrap/issues/10257\n border-radius: @navbar-border-radius;\n .navbar-nav > .open > a,\n .navbar-nav > .active > a {\n #gradient > .vertical(@start-color: @navbar-inverse-link-active-bg; @end-color: lighten(@navbar-inverse-link-active-bg, 2.5%));\n .box-shadow(inset 0 3px 9px rgba(0, 0, 0, .25));\n }\n\n .navbar-brand,\n .navbar-nav > li > a {\n text-shadow: 0 -1px 0 rgba(0, 0, 0, .25);\n }\n}\n\n// Undo rounded corners in static and fixed navbars\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n border-radius: 0;\n}\n\n// Fix active state of dropdown items in collapsed mode\n@media (max-width: @grid-float-breakpoint-max) {\n .navbar .navbar-nav .open .dropdown-menu > .active > a {\n &,\n &:hover,\n &:focus {\n color: #fff;\n #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n }\n }\n}\n\n\n//\n// Alerts\n// --------------------------------------------------\n\n// Common styles\n.alert {\n text-shadow: 0 1px 0 rgba(255, 255, 255, .2);\n @shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);\n .box-shadow(@shadow);\n}\n\n// Mixin for generating new styles\n.alert-styles(@color) {\n #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 7.5%));\n border-color: darken(@color, 15%);\n}\n\n// Apply the mixin to the alerts\n.alert-success { .alert-styles(@alert-success-bg); }\n.alert-info { .alert-styles(@alert-info-bg); }\n.alert-warning { .alert-styles(@alert-warning-bg); }\n.alert-danger { .alert-styles(@alert-danger-bg); }\n\n\n//\n// Progress bars\n// --------------------------------------------------\n\n// Give the progress background some depth\n.progress {\n #gradient > .vertical(@start-color: darken(@progress-bg, 4%); @end-color: @progress-bg)\n}\n\n// Mixin for generating new styles\n.progress-bar-styles(@color) {\n #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 10%));\n}\n\n// Apply the mixin to the progress bars\n.progress-bar { .progress-bar-styles(@progress-bar-bg); }\n.progress-bar-success { .progress-bar-styles(@progress-bar-success-bg); }\n.progress-bar-info { .progress-bar-styles(@progress-bar-info-bg); }\n.progress-bar-warning { .progress-bar-styles(@progress-bar-warning-bg); }\n.progress-bar-danger { .progress-bar-styles(@progress-bar-danger-bg); }\n\n// Reset the striped class because our mixins don't do multiple gradients and\n// the above custom styles override the new `.progress-bar-striped` in v3.2.0.\n.progress-bar-striped {\n #gradient > .striped();\n}\n\n\n//\n// List groups\n// --------------------------------------------------\n\n.list-group {\n border-radius: @border-radius-base;\n .box-shadow(0 1px 2px rgba(0, 0, 0, .075));\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n text-shadow: 0 -1px 0 darken(@list-group-active-bg, 10%);\n #gradient > .vertical(@start-color: @list-group-active-bg; @end-color: darken(@list-group-active-bg, 7.5%));\n border-color: darken(@list-group-active-border, 7.5%);\n\n .badge {\n text-shadow: none;\n }\n}\n\n\n//\n// Panels\n// --------------------------------------------------\n\n// Common styles\n.panel {\n .box-shadow(0 1px 2px rgba(0, 0, 0, .05));\n}\n\n// Mixin for generating new styles\n.panel-heading-styles(@color) {\n #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 5%));\n}\n\n// Apply the mixin to the panel headings only\n.panel-default > .panel-heading { .panel-heading-styles(@panel-default-heading-bg); }\n.panel-primary > .panel-heading { .panel-heading-styles(@panel-primary-heading-bg); }\n.panel-success > .panel-heading { .panel-heading-styles(@panel-success-heading-bg); }\n.panel-info > .panel-heading { .panel-heading-styles(@panel-info-heading-bg); }\n.panel-warning > .panel-heading { .panel-heading-styles(@panel-warning-heading-bg); }\n.panel-danger > .panel-heading { .panel-heading-styles(@panel-danger-heading-bg); }\n\n\n//\n// Wells\n// --------------------------------------------------\n\n.well {\n #gradient > .vertical(@start-color: darken(@well-bg, 5%); @end-color: @well-bg);\n border-color: darken(@well-bg, 10%);\n @shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);\n .box-shadow(@shadow);\n}\n","// stylelint-disable indentation, property-no-vendor-prefix, selector-no-vendor-prefix\n\n// Vendor Prefixes\n//\n// All vendor mixins are deprecated as of v3.2.0 due to the introduction of\n// Autoprefixer in our Gruntfile. They have been removed in v4.\n\n// - Animations\n// - Backface visibility\n// - Box shadow\n// - Box sizing\n// - Content columns\n// - Hyphens\n// - Placeholder text\n// - Transformations\n// - Transitions\n// - User Select\n\n\n// Animations\n.animation(@animation) {\n -webkit-animation: @animation;\n -o-animation: @animation;\n animation: @animation;\n}\n.animation-name(@name) {\n -webkit-animation-name: @name;\n animation-name: @name;\n}\n.animation-duration(@duration) {\n -webkit-animation-duration: @duration;\n animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n -webkit-animation-timing-function: @timing-function;\n animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n -webkit-animation-delay: @delay;\n animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n -webkit-animation-iteration-count: @iteration-count;\n animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n -webkit-animation-direction: @direction;\n animation-direction: @direction;\n}\n.animation-fill-mode(@fill-mode) {\n -webkit-animation-fill-mode: @fill-mode;\n animation-fill-mode: @fill-mode;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n\n.backface-visibility(@visibility) {\n -webkit-backface-visibility: @visibility;\n -moz-backface-visibility: @visibility;\n backface-visibility: @visibility;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n// supported browsers that have box shadow capabilities now support it.\n\n.box-shadow(@shadow) {\n -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n box-shadow: @shadow;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n -webkit-box-sizing: @boxmodel;\n -moz-box-sizing: @boxmodel;\n box-sizing: @boxmodel;\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n -webkit-column-count: @column-count;\n -moz-column-count: @column-count;\n column-count: @column-count;\n -webkit-column-gap: @column-gap;\n -moz-column-gap: @column-gap;\n column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n -webkit-hyphens: @mode;\n -moz-hyphens: @mode;\n -ms-hyphens: @mode; // IE10+\n -o-hyphens: @mode;\n hyphens: @mode;\n word-wrap: break-word;\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n // Firefox\n &::-moz-placeholder {\n color: @color;\n opacity: 1; // Override Firefox's unusual default opacity; see https://github.com/twbs/bootstrap/pull/11526\n }\n &:-ms-input-placeholder { color: @color; } // Internet Explorer 10+\n &::-webkit-input-placeholder { color: @color; } // Safari and Chrome\n}\n\n// Transformations\n.scale(@ratio) {\n -webkit-transform: scale(@ratio);\n -ms-transform: scale(@ratio); // IE9 only\n -o-transform: scale(@ratio);\n transform: scale(@ratio);\n}\n.scale(@ratioX; @ratioY) {\n -webkit-transform: scale(@ratioX, @ratioY);\n -ms-transform: scale(@ratioX, @ratioY); // IE9 only\n -o-transform: scale(@ratioX, @ratioY);\n transform: scale(@ratioX, @ratioY);\n}\n.scaleX(@ratio) {\n -webkit-transform: scaleX(@ratio);\n -ms-transform: scaleX(@ratio); // IE9 only\n -o-transform: scaleX(@ratio);\n transform: scaleX(@ratio);\n}\n.scaleY(@ratio) {\n -webkit-transform: scaleY(@ratio);\n -ms-transform: scaleY(@ratio); // IE9 only\n -o-transform: scaleY(@ratio);\n transform: scaleY(@ratio);\n}\n.skew(@x; @y) {\n -webkit-transform: skewX(@x) skewY(@y);\n -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n -o-transform: skewX(@x) skewY(@y);\n transform: skewX(@x) skewY(@y);\n}\n.translate(@x; @y) {\n -webkit-transform: translate(@x, @y);\n -ms-transform: translate(@x, @y); // IE9 only\n -o-transform: translate(@x, @y);\n transform: translate(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n -webkit-transform: translate3d(@x, @y, @z);\n transform: translate3d(@x, @y, @z);\n}\n.rotate(@degrees) {\n -webkit-transform: rotate(@degrees);\n -ms-transform: rotate(@degrees); // IE9 only\n -o-transform: rotate(@degrees);\n transform: rotate(@degrees);\n}\n.rotateX(@degrees) {\n -webkit-transform: rotateX(@degrees);\n -ms-transform: rotateX(@degrees); // IE9 only\n -o-transform: rotateX(@degrees);\n transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n -webkit-transform: rotateY(@degrees);\n -ms-transform: rotateY(@degrees); // IE9 only\n -o-transform: rotateY(@degrees);\n transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n -webkit-perspective: @perspective;\n -moz-perspective: @perspective;\n perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n -webkit-perspective-origin: @perspective;\n -moz-perspective-origin: @perspective;\n perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n -webkit-transform-origin: @origin;\n -moz-transform-origin: @origin;\n -ms-transform-origin: @origin; // IE9 only\n transform-origin: @origin;\n}\n\n\n// Transitions\n\n.transition(@transition) {\n -webkit-transition: @transition;\n -o-transition: @transition;\n transition: @transition;\n}\n.transition-property(@transition-property) {\n -webkit-transition-property: @transition-property;\n transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n -webkit-transition-delay: @transition-delay;\n transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n -webkit-transition-duration: @transition-duration;\n transition-duration: @transition-duration;\n}\n.transition-timing-function(@timing-function) {\n -webkit-transition-timing-function: @timing-function;\n transition-timing-function: @timing-function;\n}\n.transition-transform(@transition) {\n -webkit-transition: -webkit-transform @transition;\n -moz-transition: -moz-transform @transition;\n -o-transition: -o-transform @transition;\n transition: transform @transition;\n}\n\n\n// User select\n// For selecting text on the page\n\n.user-select(@select) {\n -webkit-user-select: @select;\n -moz-user-select: @select;\n -ms-user-select: @select; // IE10+\n user-select: @select;\n}\n","// stylelint-disable value-no-vendor-prefix, selector-max-id\n\n#gradient {\n\n // Horizontal gradient, from left to right\n //\n // Creates two color stops, start and end, by specifying a color and position for each color stop.\n // Color stops are not available in IE9 and below.\n .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n background-image: -webkit-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n background-image: -o-linear-gradient(left, @start-color @start-percent, @end-color @end-percent); // Opera 12\n background-image: linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down\n background-repeat: repeat-x;\n }\n\n // Vertical gradient, from top to bottom\n //\n // Creates two color stops, start and end, by specifying a color and position for each color stop.\n // Color stops are not available in IE9 and below.\n .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent); // Safari 5.1-6, Chrome 10+\n background-image: -o-linear-gradient(top, @start-color @start-percent, @end-color @end-percent); // Opera 12\n background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down\n background-repeat: repeat-x;\n }\n\n .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n background-image: -o-linear-gradient(@deg, @start-color, @end-color); // Opera 12\n background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n background-repeat: repeat-x;\n }\n .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n background-image: -o-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n background-repeat: no-repeat;\n }\n .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n background-image: -o-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\", argb(@start-color), argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n background-repeat: no-repeat;\n }\n .radial(@inner-color: #555; @outer-color: #333) {\n background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n background-image: radial-gradient(circle, @inner-color, @outer-color);\n background-repeat: no-repeat;\n }\n .striped(@color: rgba(255, 255, 255, .15); @angle: 45deg) {\n background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n }\n}\n","// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n\n.reset-filter() {\n filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n"]} \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css deleted file mode 100644 index fcab41554ad..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css +++ /dev/null @@ -1,6834 +0,0 @@ -/*! - * Bootstrap v3.4.1 (https://getbootstrap.com/) - * Copyright 2011-2019 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - */ -/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */ -html { - font-family: sans-serif; - -ms-text-size-adjust: 100%; - -webkit-text-size-adjust: 100%; -} -body { - margin: 0; -} -article, -aside, -details, -figcaption, -figure, -footer, -header, -hgroup, -main, -menu, -nav, -section, -summary { - display: block; -} -audio, -canvas, -progress, -video { - display: inline-block; - vertical-align: baseline; -} -audio:not([controls]) { - display: none; - height: 0; -} -[hidden], -template { - display: none; -} -a { - background-color: transparent; -} -a:active, -a:hover { - outline: 0; -} -abbr[title] { - border-bottom: none; - text-decoration: underline; - -webkit-text-decoration: underline dotted; - -moz-text-decoration: underline dotted; - text-decoration: underline dotted; -} -b, -strong { - font-weight: bold; -} -dfn { - font-style: italic; -} -h1 { - font-size: 2em; - margin: 0.67em 0; -} -mark { - background: #ff0; - color: #000; -} -small { - font-size: 80%; -} -sub, -sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline; -} -sup { - top: -0.5em; -} -sub { - bottom: -0.25em; -} -img { - border: 0; -} -svg:not(:root) { - overflow: hidden; -} -figure { - margin: 1em 40px; -} -hr { - -webkit-box-sizing: content-box; - -moz-box-sizing: content-box; - box-sizing: content-box; - height: 0; -} -pre { - overflow: auto; -} -code, -kbd, -pre, -samp { - font-family: monospace, monospace; - font-size: 1em; -} -button, -input, -optgroup, -select, -textarea { - color: inherit; - font: inherit; - margin: 0; -} -button { - overflow: visible; -} -button, -select { - text-transform: none; -} -button, -html input[type="button"], -input[type="reset"], -input[type="submit"] { - -webkit-appearance: button; - cursor: pointer; -} -button[disabled], -html input[disabled] { - cursor: default; -} -button::-moz-focus-inner, -input::-moz-focus-inner { - border: 0; - padding: 0; -} -input { - line-height: normal; -} -input[type="checkbox"], -input[type="radio"] { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; - padding: 0; -} -input[type="number"]::-webkit-inner-spin-button, -input[type="number"]::-webkit-outer-spin-button { - height: auto; -} -input[type="search"] { - -webkit-appearance: textfield; - -webkit-box-sizing: content-box; - -moz-box-sizing: content-box; - box-sizing: content-box; -} -input[type="search"]::-webkit-search-cancel-button, -input[type="search"]::-webkit-search-decoration { - -webkit-appearance: none; -} -fieldset { - border: 1px solid #c0c0c0; - margin: 0 2px; - padding: 0.35em 0.625em 0.75em; -} -legend { - border: 0; - padding: 0; -} -textarea { - overflow: auto; -} -optgroup { - font-weight: bold; -} -table { - border-collapse: collapse; - border-spacing: 0; -} -td, -th { - padding: 0; -} -/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */ -@media print { - *, - *:before, - *:after { - color: #000 !important; - text-shadow: none !important; - background: transparent !important; - -webkit-box-shadow: none !important; - box-shadow: none !important; - } - a, - a:visited { - text-decoration: underline; - } - a[href]:after { - content: " (" attr(href) ")"; - } - abbr[title]:after { - content: " (" attr(title) ")"; - } - a[href^="#"]:after, - a[href^="javascript:"]:after { - content: ""; - } - pre, - blockquote { - border: 1px solid #999; - page-break-inside: avoid; - } - thead { - display: table-header-group; - } - tr, - img { - page-break-inside: avoid; - } - img { - max-width: 100% !important; - } - p, - h2, - h3 { - orphans: 3; - widows: 3; - } - h2, - h3 { - page-break-after: avoid; - } - .navbar { - display: none; - } - .btn > .caret, - .dropup > .btn > .caret { - border-top-color: #000 !important; - } - .label { - border: 1px solid #000; - } - .table { - border-collapse: collapse !important; - } - .table td, - .table th { - background-color: #fff !important; - } - .table-bordered th, - .table-bordered td { - border: 1px solid #ddd !important; - } -} -@font-face { - font-family: "Glyphicons Halflings"; - src: url("../fonts/glyphicons-halflings-regular.eot"); - src: url("../fonts/glyphicons-halflings-regular.eot?#iefix") format("embedded-opentype"), url("../fonts/glyphicons-halflings-regular.woff2") format("woff2"), url("../fonts/glyphicons-halflings-regular.woff") format("woff"), url("../fonts/glyphicons-halflings-regular.ttf") format("truetype"), url("../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular") format("svg"); -} -.glyphicon { - position: relative; - top: 1px; - display: inline-block; - font-family: "Glyphicons Halflings"; - font-style: normal; - font-weight: 400; - line-height: 1; - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; -} -.glyphicon-asterisk:before { - content: "\002a"; -} -.glyphicon-plus:before { - content: "\002b"; -} -.glyphicon-euro:before, -.glyphicon-eur:before { - content: "\20ac"; -} -.glyphicon-minus:before { - content: "\2212"; -} -.glyphicon-cloud:before { - content: "\2601"; -} -.glyphicon-envelope:before { - content: "\2709"; -} -.glyphicon-pencil:before { - content: "\270f"; -} -.glyphicon-glass:before { - content: "\e001"; -} -.glyphicon-music:before { - content: "\e002"; -} -.glyphicon-search:before { - content: "\e003"; -} -.glyphicon-heart:before { - content: "\e005"; -} -.glyphicon-star:before { - content: "\e006"; -} -.glyphicon-star-empty:before { - content: "\e007"; -} -.glyphicon-user:before { - content: "\e008"; -} -.glyphicon-film:before { - content: "\e009"; -} -.glyphicon-th-large:before { - content: "\e010"; -} -.glyphicon-th:before { - content: "\e011"; -} -.glyphicon-th-list:before { - content: "\e012"; -} -.glyphicon-ok:before { - content: "\e013"; -} -.glyphicon-remove:before { - content: "\e014"; -} -.glyphicon-zoom-in:before { - content: "\e015"; -} -.glyphicon-zoom-out:before { - content: "\e016"; -} -.glyphicon-off:before { - content: "\e017"; -} -.glyphicon-signal:before { - content: "\e018"; -} -.glyphicon-cog:before { - content: "\e019"; -} -.glyphicon-trash:before { - content: "\e020"; -} -.glyphicon-home:before { - content: "\e021"; -} -.glyphicon-file:before { - content: "\e022"; -} -.glyphicon-time:before { - content: "\e023"; -} -.glyphicon-road:before { - content: "\e024"; -} -.glyphicon-download-alt:before { - content: "\e025"; -} -.glyphicon-download:before { - content: "\e026"; -} -.glyphicon-upload:before { - content: "\e027"; -} -.glyphicon-inbox:before { - content: "\e028"; -} -.glyphicon-play-circle:before { - content: "\e029"; -} -.glyphicon-repeat:before { - content: "\e030"; -} -.glyphicon-refresh:before { - content: "\e031"; -} -.glyphicon-list-alt:before { - content: "\e032"; -} -.glyphicon-lock:before { - content: "\e033"; -} -.glyphicon-flag:before { - content: "\e034"; -} -.glyphicon-headphones:before { - content: "\e035"; -} -.glyphicon-volume-off:before { - content: "\e036"; -} -.glyphicon-volume-down:before { - content: "\e037"; -} -.glyphicon-volume-up:before { - content: "\e038"; -} -.glyphicon-qrcode:before { - content: "\e039"; -} -.glyphicon-barcode:before { - content: "\e040"; -} -.glyphicon-tag:before { - content: "\e041"; -} -.glyphicon-tags:before { - content: "\e042"; -} -.glyphicon-book:before { - content: "\e043"; -} -.glyphicon-bookmark:before { - content: "\e044"; -} -.glyphicon-print:before { - content: "\e045"; -} -.glyphicon-camera:before { - content: "\e046"; -} -.glyphicon-font:before { - content: "\e047"; -} -.glyphicon-bold:before { - content: "\e048"; -} -.glyphicon-italic:before { - content: "\e049"; -} -.glyphicon-text-height:before { - content: "\e050"; -} -.glyphicon-text-width:before { - content: "\e051"; -} -.glyphicon-align-left:before { - content: "\e052"; -} -.glyphicon-align-center:before { - content: "\e053"; -} -.glyphicon-align-right:before { - content: "\e054"; -} -.glyphicon-align-justify:before { - content: "\e055"; -} -.glyphicon-list:before { - content: "\e056"; -} -.glyphicon-indent-left:before { - content: "\e057"; -} -.glyphicon-indent-right:before { - content: "\e058"; -} -.glyphicon-facetime-video:before { - content: "\e059"; -} -.glyphicon-picture:before { - content: "\e060"; -} -.glyphicon-map-marker:before { - content: "\e062"; -} -.glyphicon-adjust:before { - content: "\e063"; -} -.glyphicon-tint:before { - content: "\e064"; -} -.glyphicon-edit:before { - content: "\e065"; -} -.glyphicon-share:before { - content: "\e066"; -} -.glyphicon-check:before { - content: "\e067"; -} -.glyphicon-move:before { - content: "\e068"; -} -.glyphicon-step-backward:before { - content: "\e069"; -} -.glyphicon-fast-backward:before { - content: "\e070"; -} -.glyphicon-backward:before { - content: "\e071"; -} -.glyphicon-play:before { - content: "\e072"; -} -.glyphicon-pause:before { - content: "\e073"; -} -.glyphicon-stop:before { - content: "\e074"; -} -.glyphicon-forward:before { - content: "\e075"; -} -.glyphicon-fast-forward:before { - content: "\e076"; -} -.glyphicon-step-forward:before { - content: "\e077"; -} -.glyphicon-eject:before { - content: "\e078"; -} -.glyphicon-chevron-left:before { - content: "\e079"; -} -.glyphicon-chevron-right:before { - content: "\e080"; -} -.glyphicon-plus-sign:before { - content: "\e081"; -} -.glyphicon-minus-sign:before { - content: "\e082"; -} -.glyphicon-remove-sign:before { - content: "\e083"; -} -.glyphicon-ok-sign:before { - content: "\e084"; -} -.glyphicon-question-sign:before { - content: "\e085"; -} -.glyphicon-info-sign:before { - content: "\e086"; -} -.glyphicon-screenshot:before { - content: "\e087"; -} -.glyphicon-remove-circle:before { - content: "\e088"; -} -.glyphicon-ok-circle:before { - content: "\e089"; -} -.glyphicon-ban-circle:before { - content: "\e090"; -} -.glyphicon-arrow-left:before { - content: "\e091"; -} -.glyphicon-arrow-right:before { - content: "\e092"; -} -.glyphicon-arrow-up:before { - content: "\e093"; -} -.glyphicon-arrow-down:before { - content: "\e094"; -} -.glyphicon-share-alt:before { - content: "\e095"; -} -.glyphicon-resize-full:before { - content: "\e096"; -} -.glyphicon-resize-small:before { - content: "\e097"; -} -.glyphicon-exclamation-sign:before { - content: "\e101"; -} -.glyphicon-gift:before { - content: "\e102"; -} -.glyphicon-leaf:before { - content: "\e103"; -} -.glyphicon-fire:before { - content: "\e104"; -} -.glyphicon-eye-open:before { - content: "\e105"; -} -.glyphicon-eye-close:before { - content: "\e106"; -} -.glyphicon-warning-sign:before { - content: "\e107"; -} -.glyphicon-plane:before { - content: "\e108"; -} -.glyphicon-calendar:before { - content: "\e109"; -} -.glyphicon-random:before { - content: "\e110"; -} -.glyphicon-comment:before { - content: "\e111"; -} -.glyphicon-magnet:before { - content: "\e112"; -} -.glyphicon-chevron-up:before { - content: "\e113"; -} -.glyphicon-chevron-down:before { - content: "\e114"; -} -.glyphicon-retweet:before { - content: "\e115"; -} -.glyphicon-shopping-cart:before { - content: "\e116"; -} -.glyphicon-folder-close:before { - content: "\e117"; -} -.glyphicon-folder-open:before { - content: "\e118"; -} -.glyphicon-resize-vertical:before { - content: "\e119"; -} -.glyphicon-resize-horizontal:before { - content: "\e120"; -} -.glyphicon-hdd:before { - content: "\e121"; -} -.glyphicon-bullhorn:before { - content: "\e122"; -} -.glyphicon-bell:before { - content: "\e123"; -} -.glyphicon-certificate:before { - content: "\e124"; -} -.glyphicon-thumbs-up:before { - content: "\e125"; -} -.glyphicon-thumbs-down:before { - content: "\e126"; -} -.glyphicon-hand-right:before { - content: "\e127"; -} -.glyphicon-hand-left:before { - content: "\e128"; -} -.glyphicon-hand-up:before { - content: "\e129"; -} -.glyphicon-hand-down:before { - content: "\e130"; -} -.glyphicon-circle-arrow-right:before { - content: "\e131"; -} -.glyphicon-circle-arrow-left:before { - content: "\e132"; -} -.glyphicon-circle-arrow-up:before { - content: "\e133"; -} -.glyphicon-circle-arrow-down:before { - content: "\e134"; -} -.glyphicon-globe:before { - content: "\e135"; -} -.glyphicon-wrench:before { - content: "\e136"; -} -.glyphicon-tasks:before { - content: "\e137"; -} -.glyphicon-filter:before { - content: "\e138"; -} -.glyphicon-briefcase:before { - content: "\e139"; -} -.glyphicon-fullscreen:before { - content: "\e140"; -} -.glyphicon-dashboard:before { - content: "\e141"; -} -.glyphicon-paperclip:before { - content: "\e142"; -} -.glyphicon-heart-empty:before { - content: "\e143"; -} -.glyphicon-link:before { - content: "\e144"; -} -.glyphicon-phone:before { - content: "\e145"; -} -.glyphicon-pushpin:before { - content: "\e146"; -} -.glyphicon-usd:before { - content: "\e148"; -} -.glyphicon-gbp:before { - content: "\e149"; -} -.glyphicon-sort:before { - content: "\e150"; -} -.glyphicon-sort-by-alphabet:before { - content: "\e151"; -} -.glyphicon-sort-by-alphabet-alt:before { - content: "\e152"; -} -.glyphicon-sort-by-order:before { - content: "\e153"; -} -.glyphicon-sort-by-order-alt:before { - content: "\e154"; -} -.glyphicon-sort-by-attributes:before { - content: "\e155"; -} -.glyphicon-sort-by-attributes-alt:before { - content: "\e156"; -} -.glyphicon-unchecked:before { - content: "\e157"; -} -.glyphicon-expand:before { - content: "\e158"; -} -.glyphicon-collapse-down:before { - content: "\e159"; -} -.glyphicon-collapse-up:before { - content: "\e160"; -} -.glyphicon-log-in:before { - content: "\e161"; -} -.glyphicon-flash:before { - content: "\e162"; -} -.glyphicon-log-out:before { - content: "\e163"; -} -.glyphicon-new-window:before { - content: "\e164"; -} -.glyphicon-record:before { - content: "\e165"; -} -.glyphicon-save:before { - content: "\e166"; -} -.glyphicon-open:before { - content: "\e167"; -} -.glyphicon-saved:before { - content: "\e168"; -} -.glyphicon-import:before { - content: "\e169"; -} -.glyphicon-export:before { - content: "\e170"; -} -.glyphicon-send:before { - content: "\e171"; -} -.glyphicon-floppy-disk:before { - content: "\e172"; -} -.glyphicon-floppy-saved:before { - content: "\e173"; -} -.glyphicon-floppy-remove:before { - content: "\e174"; -} -.glyphicon-floppy-save:before { - content: "\e175"; -} -.glyphicon-floppy-open:before { - content: "\e176"; -} -.glyphicon-credit-card:before { - content: "\e177"; -} -.glyphicon-transfer:before { - content: "\e178"; -} -.glyphicon-cutlery:before { - content: "\e179"; -} -.glyphicon-header:before { - content: "\e180"; -} -.glyphicon-compressed:before { - content: "\e181"; -} -.glyphicon-earphone:before { - content: "\e182"; -} -.glyphicon-phone-alt:before { - content: "\e183"; -} -.glyphicon-tower:before { - content: "\e184"; -} -.glyphicon-stats:before { - content: "\e185"; -} -.glyphicon-sd-video:before { - content: "\e186"; -} -.glyphicon-hd-video:before { - content: "\e187"; -} -.glyphicon-subtitles:before { - content: "\e188"; -} -.glyphicon-sound-stereo:before { - content: "\e189"; -} -.glyphicon-sound-dolby:before { - content: "\e190"; -} -.glyphicon-sound-5-1:before { - content: "\e191"; -} -.glyphicon-sound-6-1:before { - content: "\e192"; -} -.glyphicon-sound-7-1:before { - content: "\e193"; -} -.glyphicon-copyright-mark:before { - content: "\e194"; -} -.glyphicon-registration-mark:before { - content: "\e195"; -} -.glyphicon-cloud-download:before { - content: "\e197"; -} -.glyphicon-cloud-upload:before { - content: "\e198"; -} -.glyphicon-tree-conifer:before { - content: "\e199"; -} -.glyphicon-tree-deciduous:before { - content: "\e200"; -} -.glyphicon-cd:before { - content: "\e201"; -} -.glyphicon-save-file:before { - content: "\e202"; -} -.glyphicon-open-file:before { - content: "\e203"; -} -.glyphicon-level-up:before { - content: "\e204"; -} -.glyphicon-copy:before { - content: "\e205"; -} -.glyphicon-paste:before { - content: "\e206"; -} -.glyphicon-alert:before { - content: "\e209"; -} -.glyphicon-equalizer:before { - content: "\e210"; -} -.glyphicon-king:before { - content: "\e211"; -} -.glyphicon-queen:before { - content: "\e212"; -} -.glyphicon-pawn:before { - content: "\e213"; -} -.glyphicon-bishop:before { - content: "\e214"; -} -.glyphicon-knight:before { - content: "\e215"; -} -.glyphicon-baby-formula:before { - content: "\e216"; -} -.glyphicon-tent:before { - content: "\26fa"; -} -.glyphicon-blackboard:before { - content: "\e218"; -} -.glyphicon-bed:before { - content: "\e219"; -} -.glyphicon-apple:before { - content: "\f8ff"; -} -.glyphicon-erase:before { - content: "\e221"; -} -.glyphicon-hourglass:before { - content: "\231b"; -} -.glyphicon-lamp:before { - content: "\e223"; -} -.glyphicon-duplicate:before { - content: "\e224"; -} -.glyphicon-piggy-bank:before { - content: "\e225"; -} -.glyphicon-scissors:before { - content: "\e226"; -} -.glyphicon-bitcoin:before { - content: "\e227"; -} -.glyphicon-btc:before { - content: "\e227"; -} -.glyphicon-xbt:before { - content: "\e227"; -} -.glyphicon-yen:before { - content: "\00a5"; -} -.glyphicon-jpy:before { - content: "\00a5"; -} -.glyphicon-ruble:before { - content: "\20bd"; -} -.glyphicon-rub:before { - content: "\20bd"; -} -.glyphicon-scale:before { - content: "\e230"; -} -.glyphicon-ice-lolly:before { - content: "\e231"; -} -.glyphicon-ice-lolly-tasted:before { - content: "\e232"; -} -.glyphicon-education:before { - content: "\e233"; -} -.glyphicon-option-horizontal:before { - content: "\e234"; -} -.glyphicon-option-vertical:before { - content: "\e235"; -} -.glyphicon-menu-hamburger:before { - content: "\e236"; -} -.glyphicon-modal-window:before { - content: "\e237"; -} -.glyphicon-oil:before { - content: "\e238"; -} -.glyphicon-grain:before { - content: "\e239"; -} -.glyphicon-sunglasses:before { - content: "\e240"; -} -.glyphicon-text-size:before { - content: "\e241"; -} -.glyphicon-text-color:before { - content: "\e242"; -} -.glyphicon-text-background:before { - content: "\e243"; -} -.glyphicon-object-align-top:before { - content: "\e244"; -} -.glyphicon-object-align-bottom:before { - content: "\e245"; -} -.glyphicon-object-align-horizontal:before { - content: "\e246"; -} -.glyphicon-object-align-left:before { - content: "\e247"; -} -.glyphicon-object-align-vertical:before { - content: "\e248"; -} -.glyphicon-object-align-right:before { - content: "\e249"; -} -.glyphicon-triangle-right:before { - content: "\e250"; -} -.glyphicon-triangle-left:before { - content: "\e251"; -} -.glyphicon-triangle-bottom:before { - content: "\e252"; -} -.glyphicon-triangle-top:before { - content: "\e253"; -} -.glyphicon-console:before { - content: "\e254"; -} -.glyphicon-superscript:before { - content: "\e255"; -} -.glyphicon-subscript:before { - content: "\e256"; -} -.glyphicon-menu-left:before { - content: "\e257"; -} -.glyphicon-menu-right:before { - content: "\e258"; -} -.glyphicon-menu-down:before { - content: "\e259"; -} -.glyphicon-menu-up:before { - content: "\e260"; -} -* { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} -*:before, -*:after { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} -html { - font-size: 10px; - -webkit-tap-highlight-color: rgba(0, 0, 0, 0); -} -body { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 14px; - line-height: 1.42857143; - color: #333333; - background-color: #fff; -} -input, -button, -select, -textarea { - font-family: inherit; - font-size: inherit; - line-height: inherit; -} -a { - color: #337ab7; - text-decoration: none; -} -a:hover, -a:focus { - color: #23527c; - text-decoration: underline; -} -a:focus { - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} -figure { - margin: 0; -} -img { - vertical-align: middle; -} -.img-responsive, -.thumbnail > img, -.thumbnail a > img, -.carousel-inner > .item > img, -.carousel-inner > .item > a > img { - display: block; - max-width: 100%; - height: auto; -} -.img-rounded { - border-radius: 6px; -} -.img-thumbnail { - padding: 4px; - line-height: 1.42857143; - background-color: #fff; - border: 1px solid #ddd; - border-radius: 4px; - -webkit-transition: all 0.2s ease-in-out; - -o-transition: all 0.2s ease-in-out; - transition: all 0.2s ease-in-out; - display: inline-block; - max-width: 100%; - height: auto; -} -.img-circle { - border-radius: 50%; -} -hr { - margin-top: 20px; - margin-bottom: 20px; - border: 0; - border-top: 1px solid #eeeeee; -} -.sr-only { - position: absolute; - width: 1px; - height: 1px; - padding: 0; - margin: -1px; - overflow: hidden; - clip: rect(0, 0, 0, 0); - border: 0; -} -.sr-only-focusable:active, -.sr-only-focusable:focus { - position: static; - width: auto; - height: auto; - margin: 0; - overflow: visible; - clip: auto; -} -[role="button"] { - cursor: pointer; -} -h1, -h2, -h3, -h4, -h5, -h6, -.h1, -.h2, -.h3, -.h4, -.h5, -.h6 { - font-family: inherit; - font-weight: 500; - line-height: 1.1; - color: inherit; -} -h1 small, -h2 small, -h3 small, -h4 small, -h5 small, -h6 small, -.h1 small, -.h2 small, -.h3 small, -.h4 small, -.h5 small, -.h6 small, -h1 .small, -h2 .small, -h3 .small, -h4 .small, -h5 .small, -h6 .small, -.h1 .small, -.h2 .small, -.h3 .small, -.h4 .small, -.h5 .small, -.h6 .small { - font-weight: 400; - line-height: 1; - color: #777777; -} -h1, -.h1, -h2, -.h2, -h3, -.h3 { - margin-top: 20px; - margin-bottom: 10px; -} -h1 small, -.h1 small, -h2 small, -.h2 small, -h3 small, -.h3 small, -h1 .small, -.h1 .small, -h2 .small, -.h2 .small, -h3 .small, -.h3 .small { - font-size: 65%; -} -h4, -.h4, -h5, -.h5, -h6, -.h6 { - margin-top: 10px; - margin-bottom: 10px; -} -h4 small, -.h4 small, -h5 small, -.h5 small, -h6 small, -.h6 small, -h4 .small, -.h4 .small, -h5 .small, -.h5 .small, -h6 .small, -.h6 .small { - font-size: 75%; -} -h1, -.h1 { - font-size: 36px; -} -h2, -.h2 { - font-size: 30px; -} -h3, -.h3 { - font-size: 24px; -} -h4, -.h4 { - font-size: 18px; -} -h5, -.h5 { - font-size: 14px; -} -h6, -.h6 { - font-size: 12px; -} -p { - margin: 0 0 10px; -} -.lead { - margin-bottom: 20px; - font-size: 16px; - font-weight: 300; - line-height: 1.4; -} -@media (min-width: 768px) { - .lead { - font-size: 21px; - } -} -small, -.small { - font-size: 85%; -} -mark, -.mark { - padding: 0.2em; - background-color: #fcf8e3; -} -.text-left { - text-align: left; -} -.text-right { - text-align: right; -} -.text-center { - text-align: center; -} -.text-justify { - text-align: justify; -} -.text-nowrap { - white-space: nowrap; -} -.text-lowercase { - text-transform: lowercase; -} -.text-uppercase { - text-transform: uppercase; -} -.text-capitalize { - text-transform: capitalize; -} -.text-muted { - color: #777777; -} -.text-primary { - color: #337ab7; -} -a.text-primary:hover, -a.text-primary:focus { - color: #286090; -} -.text-success { - color: #3c763d; -} -a.text-success:hover, -a.text-success:focus { - color: #2b542c; -} -.text-info { - color: #31708f; -} -a.text-info:hover, -a.text-info:focus { - color: #245269; -} -.text-warning { - color: #8a6d3b; -} -a.text-warning:hover, -a.text-warning:focus { - color: #66512c; -} -.text-danger { - color: #a94442; -} -a.text-danger:hover, -a.text-danger:focus { - color: #843534; -} -.bg-primary { - color: #fff; - background-color: #337ab7; -} -a.bg-primary:hover, -a.bg-primary:focus { - background-color: #286090; -} -.bg-success { - background-color: #dff0d8; -} -a.bg-success:hover, -a.bg-success:focus { - background-color: #c1e2b3; -} -.bg-info { - background-color: #d9edf7; -} -a.bg-info:hover, -a.bg-info:focus { - background-color: #afd9ee; -} -.bg-warning { - background-color: #fcf8e3; -} -a.bg-warning:hover, -a.bg-warning:focus { - background-color: #f7ecb5; -} -.bg-danger { - background-color: #f2dede; -} -a.bg-danger:hover, -a.bg-danger:focus { - background-color: #e4b9b9; -} -.page-header { - padding-bottom: 9px; - margin: 40px 0 20px; - border-bottom: 1px solid #eeeeee; -} -ul, -ol { - margin-top: 0; - margin-bottom: 10px; -} -ul ul, -ol ul, -ul ol, -ol ol { - margin-bottom: 0; -} -.list-unstyled { - padding-left: 0; - list-style: none; -} -.list-inline { - padding-left: 0; - list-style: none; - margin-left: -5px; -} -.list-inline > li { - display: inline-block; - padding-right: 5px; - padding-left: 5px; -} -dl { - margin-top: 0; - margin-bottom: 20px; -} -dt, -dd { - line-height: 1.42857143; -} -dt { - font-weight: 700; -} -dd { - margin-left: 0; -} -@media (min-width: 768px) { - .dl-horizontal dt { - float: left; - width: 160px; - clear: left; - text-align: right; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; - } - .dl-horizontal dd { - margin-left: 180px; - } -} -abbr[title], -abbr[data-original-title] { - cursor: help; -} -.initialism { - font-size: 90%; - text-transform: uppercase; -} -blockquote { - padding: 10px 20px; - margin: 0 0 20px; - font-size: 17.5px; - border-left: 5px solid #eeeeee; -} -blockquote p:last-child, -blockquote ul:last-child, -blockquote ol:last-child { - margin-bottom: 0; -} -blockquote footer, -blockquote small, -blockquote .small { - display: block; - font-size: 80%; - line-height: 1.42857143; - color: #777777; -} -blockquote footer:before, -blockquote small:before, -blockquote .small:before { - content: "\2014 \00A0"; -} -.blockquote-reverse, -blockquote.pull-right { - padding-right: 15px; - padding-left: 0; - text-align: right; - border-right: 5px solid #eeeeee; - border-left: 0; -} -.blockquote-reverse footer:before, -blockquote.pull-right footer:before, -.blockquote-reverse small:before, -blockquote.pull-right small:before, -.blockquote-reverse .small:before, -blockquote.pull-right .small:before { - content: ""; -} -.blockquote-reverse footer:after, -blockquote.pull-right footer:after, -.blockquote-reverse small:after, -blockquote.pull-right small:after, -.blockquote-reverse .small:after, -blockquote.pull-right .small:after { - content: "\00A0 \2014"; -} -address { - margin-bottom: 20px; - font-style: normal; - line-height: 1.42857143; -} -code, -kbd, -pre, -samp { - font-family: Menlo, Monaco, Consolas, "Courier New", monospace; -} -code { - padding: 2px 4px; - font-size: 90%; - color: #c7254e; - background-color: #f9f2f4; - border-radius: 4px; -} -kbd { - padding: 2px 4px; - font-size: 90%; - color: #fff; - background-color: #333; - border-radius: 3px; - -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25); - box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25); -} -kbd kbd { - padding: 0; - font-size: 100%; - font-weight: 700; - -webkit-box-shadow: none; - box-shadow: none; -} -pre { - display: block; - padding: 9.5px; - margin: 0 0 10px; - font-size: 13px; - line-height: 1.42857143; - color: #333333; - word-break: break-all; - word-wrap: break-word; - background-color: #f5f5f5; - border: 1px solid #ccc; - border-radius: 4px; -} -pre code { - padding: 0; - font-size: inherit; - color: inherit; - white-space: pre-wrap; - background-color: transparent; - border-radius: 0; -} -.pre-scrollable { - max-height: 340px; - overflow-y: scroll; -} -.container { - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; -} -@media (min-width: 768px) { - .container { - width: 750px; - } -} -@media (min-width: 992px) { - .container { - width: 970px; - } -} -@media (min-width: 1200px) { - .container { - width: 1170px; - } -} -.container-fluid { - padding-right: 15px; - padding-left: 15px; - margin-right: auto; - margin-left: auto; -} -.row { - margin-right: -15px; - margin-left: -15px; -} -.row-no-gutters { - margin-right: 0; - margin-left: 0; -} -.row-no-gutters [class*="col-"] { - padding-right: 0; - padding-left: 0; -} -.col-xs-1, -.col-sm-1, -.col-md-1, -.col-lg-1, -.col-xs-2, -.col-sm-2, -.col-md-2, -.col-lg-2, -.col-xs-3, -.col-sm-3, -.col-md-3, -.col-lg-3, -.col-xs-4, -.col-sm-4, -.col-md-4, -.col-lg-4, -.col-xs-5, -.col-sm-5, -.col-md-5, -.col-lg-5, -.col-xs-6, -.col-sm-6, -.col-md-6, -.col-lg-6, -.col-xs-7, -.col-sm-7, -.col-md-7, -.col-lg-7, -.col-xs-8, -.col-sm-8, -.col-md-8, -.col-lg-8, -.col-xs-9, -.col-sm-9, -.col-md-9, -.col-lg-9, -.col-xs-10, -.col-sm-10, -.col-md-10, -.col-lg-10, -.col-xs-11, -.col-sm-11, -.col-md-11, -.col-lg-11, -.col-xs-12, -.col-sm-12, -.col-md-12, -.col-lg-12 { - position: relative; - min-height: 1px; - padding-right: 15px; - padding-left: 15px; -} -.col-xs-1, -.col-xs-2, -.col-xs-3, -.col-xs-4, -.col-xs-5, -.col-xs-6, -.col-xs-7, -.col-xs-8, -.col-xs-9, -.col-xs-10, -.col-xs-11, -.col-xs-12 { - float: left; -} -.col-xs-12 { - width: 100%; -} -.col-xs-11 { - width: 91.66666667%; -} -.col-xs-10 { - width: 83.33333333%; -} -.col-xs-9 { - width: 75%; -} -.col-xs-8 { - width: 66.66666667%; -} -.col-xs-7 { - width: 58.33333333%; -} -.col-xs-6 { - width: 50%; -} -.col-xs-5 { - width: 41.66666667%; -} -.col-xs-4 { - width: 33.33333333%; -} -.col-xs-3 { - width: 25%; -} -.col-xs-2 { - width: 16.66666667%; -} -.col-xs-1 { - width: 8.33333333%; -} -.col-xs-pull-12 { - right: 100%; -} -.col-xs-pull-11 { - right: 91.66666667%; -} -.col-xs-pull-10 { - right: 83.33333333%; -} -.col-xs-pull-9 { - right: 75%; -} -.col-xs-pull-8 { - right: 66.66666667%; -} -.col-xs-pull-7 { - right: 58.33333333%; -} -.col-xs-pull-6 { - right: 50%; -} -.col-xs-pull-5 { - right: 41.66666667%; -} -.col-xs-pull-4 { - right: 33.33333333%; -} -.col-xs-pull-3 { - right: 25%; -} -.col-xs-pull-2 { - right: 16.66666667%; -} -.col-xs-pull-1 { - right: 8.33333333%; -} -.col-xs-pull-0 { - right: auto; -} -.col-xs-push-12 { - left: 100%; -} -.col-xs-push-11 { - left: 91.66666667%; -} -.col-xs-push-10 { - left: 83.33333333%; -} -.col-xs-push-9 { - left: 75%; -} -.col-xs-push-8 { - left: 66.66666667%; -} -.col-xs-push-7 { - left: 58.33333333%; -} -.col-xs-push-6 { - left: 50%; -} -.col-xs-push-5 { - left: 41.66666667%; -} -.col-xs-push-4 { - left: 33.33333333%; -} -.col-xs-push-3 { - left: 25%; -} -.col-xs-push-2 { - left: 16.66666667%; -} -.col-xs-push-1 { - left: 8.33333333%; -} -.col-xs-push-0 { - left: auto; -} -.col-xs-offset-12 { - margin-left: 100%; -} -.col-xs-offset-11 { - margin-left: 91.66666667%; -} -.col-xs-offset-10 { - margin-left: 83.33333333%; -} -.col-xs-offset-9 { - margin-left: 75%; -} -.col-xs-offset-8 { - margin-left: 66.66666667%; -} -.col-xs-offset-7 { - margin-left: 58.33333333%; -} -.col-xs-offset-6 { - margin-left: 50%; -} -.col-xs-offset-5 { - margin-left: 41.66666667%; -} -.col-xs-offset-4 { - margin-left: 33.33333333%; -} -.col-xs-offset-3 { - margin-left: 25%; -} -.col-xs-offset-2 { - margin-left: 16.66666667%; -} -.col-xs-offset-1 { - margin-left: 8.33333333%; -} -.col-xs-offset-0 { - margin-left: 0%; -} -@media (min-width: 768px) { - .col-sm-1, - .col-sm-2, - .col-sm-3, - .col-sm-4, - .col-sm-5, - .col-sm-6, - .col-sm-7, - .col-sm-8, - .col-sm-9, - .col-sm-10, - .col-sm-11, - .col-sm-12 { - float: left; - } - .col-sm-12 { - width: 100%; - } - .col-sm-11 { - width: 91.66666667%; - } - .col-sm-10 { - width: 83.33333333%; - } - .col-sm-9 { - width: 75%; - } - .col-sm-8 { - width: 66.66666667%; - } - .col-sm-7 { - width: 58.33333333%; - } - .col-sm-6 { - width: 50%; - } - .col-sm-5 { - width: 41.66666667%; - } - .col-sm-4 { - width: 33.33333333%; - } - .col-sm-3 { - width: 25%; - } - .col-sm-2 { - width: 16.66666667%; - } - .col-sm-1 { - width: 8.33333333%; - } - .col-sm-pull-12 { - right: 100%; - } - .col-sm-pull-11 { - right: 91.66666667%; - } - .col-sm-pull-10 { - right: 83.33333333%; - } - .col-sm-pull-9 { - right: 75%; - } - .col-sm-pull-8 { - right: 66.66666667%; - } - .col-sm-pull-7 { - right: 58.33333333%; - } - .col-sm-pull-6 { - right: 50%; - } - .col-sm-pull-5 { - right: 41.66666667%; - } - .col-sm-pull-4 { - right: 33.33333333%; - } - .col-sm-pull-3 { - right: 25%; - } - .col-sm-pull-2 { - right: 16.66666667%; - } - .col-sm-pull-1 { - right: 8.33333333%; - } - .col-sm-pull-0 { - right: auto; - } - .col-sm-push-12 { - left: 100%; - } - .col-sm-push-11 { - left: 91.66666667%; - } - .col-sm-push-10 { - left: 83.33333333%; - } - .col-sm-push-9 { - left: 75%; - } - .col-sm-push-8 { - left: 66.66666667%; - } - .col-sm-push-7 { - left: 58.33333333%; - } - .col-sm-push-6 { - left: 50%; - } - .col-sm-push-5 { - left: 41.66666667%; - } - .col-sm-push-4 { - left: 33.33333333%; - } - .col-sm-push-3 { - left: 25%; - } - .col-sm-push-2 { - left: 16.66666667%; - } - .col-sm-push-1 { - left: 8.33333333%; - } - .col-sm-push-0 { - left: auto; - } - .col-sm-offset-12 { - margin-left: 100%; - } - .col-sm-offset-11 { - margin-left: 91.66666667%; - } - .col-sm-offset-10 { - margin-left: 83.33333333%; - } - .col-sm-offset-9 { - margin-left: 75%; - } - .col-sm-offset-8 { - margin-left: 66.66666667%; - } - .col-sm-offset-7 { - margin-left: 58.33333333%; - } - .col-sm-offset-6 { - margin-left: 50%; - } - .col-sm-offset-5 { - margin-left: 41.66666667%; - } - .col-sm-offset-4 { - margin-left: 33.33333333%; - } - .col-sm-offset-3 { - margin-left: 25%; - } - .col-sm-offset-2 { - margin-left: 16.66666667%; - } - .col-sm-offset-1 { - margin-left: 8.33333333%; - } - .col-sm-offset-0 { - margin-left: 0%; - } -} -@media (min-width: 992px) { - .col-md-1, - .col-md-2, - .col-md-3, - .col-md-4, - .col-md-5, - .col-md-6, - .col-md-7, - .col-md-8, - .col-md-9, - .col-md-10, - .col-md-11, - .col-md-12 { - float: left; - } - .col-md-12 { - width: 100%; - } - .col-md-11 { - width: 91.66666667%; - } - .col-md-10 { - width: 83.33333333%; - } - .col-md-9 { - width: 75%; - } - .col-md-8 { - width: 66.66666667%; - } - .col-md-7 { - width: 58.33333333%; - } - .col-md-6 { - width: 50%; - } - .col-md-5 { - width: 41.66666667%; - } - .col-md-4 { - width: 33.33333333%; - } - .col-md-3 { - width: 25%; - } - .col-md-2 { - width: 16.66666667%; - } - .col-md-1 { - width: 8.33333333%; - } - .col-md-pull-12 { - right: 100%; - } - .col-md-pull-11 { - right: 91.66666667%; - } - .col-md-pull-10 { - right: 83.33333333%; - } - .col-md-pull-9 { - right: 75%; - } - .col-md-pull-8 { - right: 66.66666667%; - } - .col-md-pull-7 { - right: 58.33333333%; - } - .col-md-pull-6 { - right: 50%; - } - .col-md-pull-5 { - right: 41.66666667%; - } - .col-md-pull-4 { - right: 33.33333333%; - } - .col-md-pull-3 { - right: 25%; - } - .col-md-pull-2 { - right: 16.66666667%; - } - .col-md-pull-1 { - right: 8.33333333%; - } - .col-md-pull-0 { - right: auto; - } - .col-md-push-12 { - left: 100%; - } - .col-md-push-11 { - left: 91.66666667%; - } - .col-md-push-10 { - left: 83.33333333%; - } - .col-md-push-9 { - left: 75%; - } - .col-md-push-8 { - left: 66.66666667%; - } - .col-md-push-7 { - left: 58.33333333%; - } - .col-md-push-6 { - left: 50%; - } - .col-md-push-5 { - left: 41.66666667%; - } - .col-md-push-4 { - left: 33.33333333%; - } - .col-md-push-3 { - left: 25%; - } - .col-md-push-2 { - left: 16.66666667%; - } - .col-md-push-1 { - left: 8.33333333%; - } - .col-md-push-0 { - left: auto; - } - .col-md-offset-12 { - margin-left: 100%; - } - .col-md-offset-11 { - margin-left: 91.66666667%; - } - .col-md-offset-10 { - margin-left: 83.33333333%; - } - .col-md-offset-9 { - margin-left: 75%; - } - .col-md-offset-8 { - margin-left: 66.66666667%; - } - .col-md-offset-7 { - margin-left: 58.33333333%; - } - .col-md-offset-6 { - margin-left: 50%; - } - .col-md-offset-5 { - margin-left: 41.66666667%; - } - .col-md-offset-4 { - margin-left: 33.33333333%; - } - .col-md-offset-3 { - margin-left: 25%; - } - .col-md-offset-2 { - margin-left: 16.66666667%; - } - .col-md-offset-1 { - margin-left: 8.33333333%; - } - .col-md-offset-0 { - margin-left: 0%; - } -} -@media (min-width: 1200px) { - .col-lg-1, - .col-lg-2, - .col-lg-3, - .col-lg-4, - .col-lg-5, - .col-lg-6, - .col-lg-7, - .col-lg-8, - .col-lg-9, - .col-lg-10, - .col-lg-11, - .col-lg-12 { - float: left; - } - .col-lg-12 { - width: 100%; - } - .col-lg-11 { - width: 91.66666667%; - } - .col-lg-10 { - width: 83.33333333%; - } - .col-lg-9 { - width: 75%; - } - .col-lg-8 { - width: 66.66666667%; - } - .col-lg-7 { - width: 58.33333333%; - } - .col-lg-6 { - width: 50%; - } - .col-lg-5 { - width: 41.66666667%; - } - .col-lg-4 { - width: 33.33333333%; - } - .col-lg-3 { - width: 25%; - } - .col-lg-2 { - width: 16.66666667%; - } - .col-lg-1 { - width: 8.33333333%; - } - .col-lg-pull-12 { - right: 100%; - } - .col-lg-pull-11 { - right: 91.66666667%; - } - .col-lg-pull-10 { - right: 83.33333333%; - } - .col-lg-pull-9 { - right: 75%; - } - .col-lg-pull-8 { - right: 66.66666667%; - } - .col-lg-pull-7 { - right: 58.33333333%; - } - .col-lg-pull-6 { - right: 50%; - } - .col-lg-pull-5 { - right: 41.66666667%; - } - .col-lg-pull-4 { - right: 33.33333333%; - } - .col-lg-pull-3 { - right: 25%; - } - .col-lg-pull-2 { - right: 16.66666667%; - } - .col-lg-pull-1 { - right: 8.33333333%; - } - .col-lg-pull-0 { - right: auto; - } - .col-lg-push-12 { - left: 100%; - } - .col-lg-push-11 { - left: 91.66666667%; - } - .col-lg-push-10 { - left: 83.33333333%; - } - .col-lg-push-9 { - left: 75%; - } - .col-lg-push-8 { - left: 66.66666667%; - } - .col-lg-push-7 { - left: 58.33333333%; - } - .col-lg-push-6 { - left: 50%; - } - .col-lg-push-5 { - left: 41.66666667%; - } - .col-lg-push-4 { - left: 33.33333333%; - } - .col-lg-push-3 { - left: 25%; - } - .col-lg-push-2 { - left: 16.66666667%; - } - .col-lg-push-1 { - left: 8.33333333%; - } - .col-lg-push-0 { - left: auto; - } - .col-lg-offset-12 { - margin-left: 100%; - } - .col-lg-offset-11 { - margin-left: 91.66666667%; - } - .col-lg-offset-10 { - margin-left: 83.33333333%; - } - .col-lg-offset-9 { - margin-left: 75%; - } - .col-lg-offset-8 { - margin-left: 66.66666667%; - } - .col-lg-offset-7 { - margin-left: 58.33333333%; - } - .col-lg-offset-6 { - margin-left: 50%; - } - .col-lg-offset-5 { - margin-left: 41.66666667%; - } - .col-lg-offset-4 { - margin-left: 33.33333333%; - } - .col-lg-offset-3 { - margin-left: 25%; - } - .col-lg-offset-2 { - margin-left: 16.66666667%; - } - .col-lg-offset-1 { - margin-left: 8.33333333%; - } - .col-lg-offset-0 { - margin-left: 0%; - } -} -table { - background-color: transparent; -} -table col[class*="col-"] { - position: static; - display: table-column; - float: none; -} -table td[class*="col-"], -table th[class*="col-"] { - position: static; - display: table-cell; - float: none; -} -caption { - padding-top: 8px; - padding-bottom: 8px; - color: #777777; - text-align: left; -} -th { - text-align: left; -} -.table { - width: 100%; - max-width: 100%; - margin-bottom: 20px; -} -.table > thead > tr > th, -.table > tbody > tr > th, -.table > tfoot > tr > th, -.table > thead > tr > td, -.table > tbody > tr > td, -.table > tfoot > tr > td { - padding: 8px; - line-height: 1.42857143; - vertical-align: top; - border-top: 1px solid #ddd; -} -.table > thead > tr > th { - vertical-align: bottom; - border-bottom: 2px solid #ddd; -} -.table > caption + thead > tr:first-child > th, -.table > colgroup + thead > tr:first-child > th, -.table > thead:first-child > tr:first-child > th, -.table > caption + thead > tr:first-child > td, -.table > colgroup + thead > tr:first-child > td, -.table > thead:first-child > tr:first-child > td { - border-top: 0; -} -.table > tbody + tbody { - border-top: 2px solid #ddd; -} -.table .table { - background-color: #fff; -} -.table-condensed > thead > tr > th, -.table-condensed > tbody > tr > th, -.table-condensed > tfoot > tr > th, -.table-condensed > thead > tr > td, -.table-condensed > tbody > tr > td, -.table-condensed > tfoot > tr > td { - padding: 5px; -} -.table-bordered { - border: 1px solid #ddd; -} -.table-bordered > thead > tr > th, -.table-bordered > tbody > tr > th, -.table-bordered > tfoot > tr > th, -.table-bordered > thead > tr > td, -.table-bordered > tbody > tr > td, -.table-bordered > tfoot > tr > td { - border: 1px solid #ddd; -} -.table-bordered > thead > tr > th, -.table-bordered > thead > tr > td { - border-bottom-width: 2px; -} -.table-striped > tbody > tr:nth-of-type(odd) { - background-color: #f9f9f9; -} -.table-hover > tbody > tr:hover { - background-color: #f5f5f5; -} -.table > thead > tr > td.active, -.table > tbody > tr > td.active, -.table > tfoot > tr > td.active, -.table > thead > tr > th.active, -.table > tbody > tr > th.active, -.table > tfoot > tr > th.active, -.table > thead > tr.active > td, -.table > tbody > tr.active > td, -.table > tfoot > tr.active > td, -.table > thead > tr.active > th, -.table > tbody > tr.active > th, -.table > tfoot > tr.active > th { - background-color: #f5f5f5; -} -.table-hover > tbody > tr > td.active:hover, -.table-hover > tbody > tr > th.active:hover, -.table-hover > tbody > tr.active:hover > td, -.table-hover > tbody > tr:hover > .active, -.table-hover > tbody > tr.active:hover > th { - background-color: #e8e8e8; -} -.table > thead > tr > td.success, -.table > tbody > tr > td.success, -.table > tfoot > tr > td.success, -.table > thead > tr > th.success, -.table > tbody > tr > th.success, -.table > tfoot > tr > th.success, -.table > thead > tr.success > td, -.table > tbody > tr.success > td, -.table > tfoot > tr.success > td, -.table > thead > tr.success > th, -.table > tbody > tr.success > th, -.table > tfoot > tr.success > th { - background-color: #dff0d8; -} -.table-hover > tbody > tr > td.success:hover, -.table-hover > tbody > tr > th.success:hover, -.table-hover > tbody > tr.success:hover > td, -.table-hover > tbody > tr:hover > .success, -.table-hover > tbody > tr.success:hover > th { - background-color: #d0e9c6; -} -.table > thead > tr > td.info, -.table > tbody > tr > td.info, -.table > tfoot > tr > td.info, -.table > thead > tr > th.info, -.table > tbody > tr > th.info, -.table > tfoot > tr > th.info, -.table > thead > tr.info > td, -.table > tbody > tr.info > td, -.table > tfoot > tr.info > td, -.table > thead > tr.info > th, -.table > tbody > tr.info > th, -.table > tfoot > tr.info > th { - background-color: #d9edf7; -} -.table-hover > tbody > tr > td.info:hover, -.table-hover > tbody > tr > th.info:hover, -.table-hover > tbody > tr.info:hover > td, -.table-hover > tbody > tr:hover > .info, -.table-hover > tbody > tr.info:hover > th { - background-color: #c4e3f3; -} -.table > thead > tr > td.warning, -.table > tbody > tr > td.warning, -.table > tfoot > tr > td.warning, -.table > thead > tr > th.warning, -.table > tbody > tr > th.warning, -.table > tfoot > tr > th.warning, -.table > thead > tr.warning > td, -.table > tbody > tr.warning > td, -.table > tfoot > tr.warning > td, -.table > thead > tr.warning > th, -.table > tbody > tr.warning > th, -.table > tfoot > tr.warning > th { - background-color: #fcf8e3; -} -.table-hover > tbody > tr > td.warning:hover, -.table-hover > tbody > tr > th.warning:hover, -.table-hover > tbody > tr.warning:hover > td, -.table-hover > tbody > tr:hover > .warning, -.table-hover > tbody > tr.warning:hover > th { - background-color: #faf2cc; -} -.table > thead > tr > td.danger, -.table > tbody > tr > td.danger, -.table > tfoot > tr > td.danger, -.table > thead > tr > th.danger, -.table > tbody > tr > th.danger, -.table > tfoot > tr > th.danger, -.table > thead > tr.danger > td, -.table > tbody > tr.danger > td, -.table > tfoot > tr.danger > td, -.table > thead > tr.danger > th, -.table > tbody > tr.danger > th, -.table > tfoot > tr.danger > th { - background-color: #f2dede; -} -.table-hover > tbody > tr > td.danger:hover, -.table-hover > tbody > tr > th.danger:hover, -.table-hover > tbody > tr.danger:hover > td, -.table-hover > tbody > tr:hover > .danger, -.table-hover > tbody > tr.danger:hover > th { - background-color: #ebcccc; -} -.table-responsive { - min-height: 0.01%; - overflow-x: auto; -} -@media screen and (max-width: 767px) { - .table-responsive { - width: 100%; - margin-bottom: 15px; - overflow-y: hidden; - -ms-overflow-style: -ms-autohiding-scrollbar; - border: 1px solid #ddd; - } - .table-responsive > .table { - margin-bottom: 0; - } - .table-responsive > .table > thead > tr > th, - .table-responsive > .table > tbody > tr > th, - .table-responsive > .table > tfoot > tr > th, - .table-responsive > .table > thead > tr > td, - .table-responsive > .table > tbody > tr > td, - .table-responsive > .table > tfoot > tr > td { - white-space: nowrap; - } - .table-responsive > .table-bordered { - border: 0; - } - .table-responsive > .table-bordered > thead > tr > th:first-child, - .table-responsive > .table-bordered > tbody > tr > th:first-child, - .table-responsive > .table-bordered > tfoot > tr > th:first-child, - .table-responsive > .table-bordered > thead > tr > td:first-child, - .table-responsive > .table-bordered > tbody > tr > td:first-child, - .table-responsive > .table-bordered > tfoot > tr > td:first-child { - border-left: 0; - } - .table-responsive > .table-bordered > thead > tr > th:last-child, - .table-responsive > .table-bordered > tbody > tr > th:last-child, - .table-responsive > .table-bordered > tfoot > tr > th:last-child, - .table-responsive > .table-bordered > thead > tr > td:last-child, - .table-responsive > .table-bordered > tbody > tr > td:last-child, - .table-responsive > .table-bordered > tfoot > tr > td:last-child { - border-right: 0; - } - .table-responsive > .table-bordered > tbody > tr:last-child > th, - .table-responsive > .table-bordered > tfoot > tr:last-child > th, - .table-responsive > .table-bordered > tbody > tr:last-child > td, - .table-responsive > .table-bordered > tfoot > tr:last-child > td { - border-bottom: 0; - } -} -fieldset { - min-width: 0; - padding: 0; - margin: 0; - border: 0; -} -legend { - display: block; - width: 100%; - padding: 0; - margin-bottom: 20px; - font-size: 21px; - line-height: inherit; - color: #333333; - border: 0; - border-bottom: 1px solid #e5e5e5; -} -label { - display: inline-block; - max-width: 100%; - margin-bottom: 5px; - font-weight: 700; -} -input[type="search"] { - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; - -webkit-appearance: none; - -moz-appearance: none; - appearance: none; -} -input[type="radio"], -input[type="checkbox"] { - margin: 4px 0 0; - margin-top: 1px \9; - line-height: normal; -} -input[type="radio"][disabled], -input[type="checkbox"][disabled], -input[type="radio"].disabled, -input[type="checkbox"].disabled, -fieldset[disabled] input[type="radio"], -fieldset[disabled] input[type="checkbox"] { - cursor: not-allowed; -} -input[type="file"] { - display: block; -} -input[type="range"] { - display: block; - width: 100%; -} -select[multiple], -select[size] { - height: auto; -} -input[type="file"]:focus, -input[type="radio"]:focus, -input[type="checkbox"]:focus { - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} -output { - display: block; - padding-top: 7px; - font-size: 14px; - line-height: 1.42857143; - color: #555555; -} -.form-control { - display: block; - width: 100%; - height: 34px; - padding: 6px 12px; - font-size: 14px; - line-height: 1.42857143; - color: #555555; - background-color: #fff; - background-image: none; - border: 1px solid #ccc; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; - -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; - -webkit-transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s; - transition: border-color ease-in-out .15s, -webkit-box-shadow ease-in-out .15s; - transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s; - transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s, -webkit-box-shadow ease-in-out .15s; -} -.form-control:focus { - border-color: #66afe9; - outline: 0; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6); -} -.form-control::-moz-placeholder { - color: #999; - opacity: 1; -} -.form-control:-ms-input-placeholder { - color: #999; -} -.form-control::-webkit-input-placeholder { - color: #999; -} -.form-control::-ms-expand { - background-color: transparent; - border: 0; -} -.form-control[disabled], -.form-control[readonly], -fieldset[disabled] .form-control { - background-color: #eeeeee; - opacity: 1; -} -.form-control[disabled], -fieldset[disabled] .form-control { - cursor: not-allowed; -} -textarea.form-control { - height: auto; -} -@media screen and (-webkit-min-device-pixel-ratio: 0) { - input[type="date"].form-control, - input[type="time"].form-control, - input[type="datetime-local"].form-control, - input[type="month"].form-control { - line-height: 34px; - } - input[type="date"].input-sm, - input[type="time"].input-sm, - input[type="datetime-local"].input-sm, - input[type="month"].input-sm, - .input-group-sm input[type="date"], - .input-group-sm input[type="time"], - .input-group-sm input[type="datetime-local"], - .input-group-sm input[type="month"] { - line-height: 30px; - } - input[type="date"].input-lg, - input[type="time"].input-lg, - input[type="datetime-local"].input-lg, - input[type="month"].input-lg, - .input-group-lg input[type="date"], - .input-group-lg input[type="time"], - .input-group-lg input[type="datetime-local"], - .input-group-lg input[type="month"] { - line-height: 46px; - } -} -.form-group { - margin-bottom: 15px; -} -.radio, -.checkbox { - position: relative; - display: block; - margin-top: 10px; - margin-bottom: 10px; -} -.radio.disabled label, -.checkbox.disabled label, -fieldset[disabled] .radio label, -fieldset[disabled] .checkbox label { - cursor: not-allowed; -} -.radio label, -.checkbox label { - min-height: 20px; - padding-left: 20px; - margin-bottom: 0; - font-weight: 400; - cursor: pointer; -} -.radio input[type="radio"], -.radio-inline input[type="radio"], -.checkbox input[type="checkbox"], -.checkbox-inline input[type="checkbox"] { - position: absolute; - margin-top: 4px \9; - margin-left: -20px; -} -.radio + .radio, -.checkbox + .checkbox { - margin-top: -5px; -} -.radio-inline, -.checkbox-inline { - position: relative; - display: inline-block; - padding-left: 20px; - margin-bottom: 0; - font-weight: 400; - vertical-align: middle; - cursor: pointer; -} -.radio-inline.disabled, -.checkbox-inline.disabled, -fieldset[disabled] .radio-inline, -fieldset[disabled] .checkbox-inline { - cursor: not-allowed; -} -.radio-inline + .radio-inline, -.checkbox-inline + .checkbox-inline { - margin-top: 0; - margin-left: 10px; -} -.form-control-static { - min-height: 34px; - padding-top: 7px; - padding-bottom: 7px; - margin-bottom: 0; -} -.form-control-static.input-lg, -.form-control-static.input-sm { - padding-right: 0; - padding-left: 0; -} -.input-sm { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -select.input-sm { - height: 30px; - line-height: 30px; -} -textarea.input-sm, -select[multiple].input-sm { - height: auto; -} -.form-group-sm .form-control { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -.form-group-sm select.form-control { - height: 30px; - line-height: 30px; -} -.form-group-sm textarea.form-control, -.form-group-sm select[multiple].form-control { - height: auto; -} -.form-group-sm .form-control-static { - height: 30px; - min-height: 32px; - padding: 6px 10px; - font-size: 12px; - line-height: 1.5; -} -.input-lg { - height: 46px; - padding: 10px 16px; - font-size: 18px; - line-height: 1.3333333; - border-radius: 6px; -} -select.input-lg { - height: 46px; - line-height: 46px; -} -textarea.input-lg, -select[multiple].input-lg { - height: auto; -} -.form-group-lg .form-control { - height: 46px; - padding: 10px 16px; - font-size: 18px; - line-height: 1.3333333; - border-radius: 6px; -} -.form-group-lg select.form-control { - height: 46px; - line-height: 46px; -} -.form-group-lg textarea.form-control, -.form-group-lg select[multiple].form-control { - height: auto; -} -.form-group-lg .form-control-static { - height: 46px; - min-height: 38px; - padding: 11px 16px; - font-size: 18px; - line-height: 1.3333333; -} -.has-feedback { - position: relative; -} -.has-feedback .form-control { - padding-right: 42.5px; -} -.form-control-feedback { - position: absolute; - top: 0; - right: 0; - z-index: 2; - display: block; - width: 34px; - height: 34px; - line-height: 34px; - text-align: center; - pointer-events: none; -} -.input-lg + .form-control-feedback, -.input-group-lg + .form-control-feedback, -.form-group-lg .form-control + .form-control-feedback { - width: 46px; - height: 46px; - line-height: 46px; -} -.input-sm + .form-control-feedback, -.input-group-sm + .form-control-feedback, -.form-group-sm .form-control + .form-control-feedback { - width: 30px; - height: 30px; - line-height: 30px; -} -.has-success .help-block, -.has-success .control-label, -.has-success .radio, -.has-success .checkbox, -.has-success .radio-inline, -.has-success .checkbox-inline, -.has-success.radio label, -.has-success.checkbox label, -.has-success.radio-inline label, -.has-success.checkbox-inline label { - color: #3c763d; -} -.has-success .form-control { - border-color: #3c763d; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} -.has-success .form-control:focus { - border-color: #2b542c; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168; -} -.has-success .input-group-addon { - color: #3c763d; - background-color: #dff0d8; - border-color: #3c763d; -} -.has-success .form-control-feedback { - color: #3c763d; -} -.has-warning .help-block, -.has-warning .control-label, -.has-warning .radio, -.has-warning .checkbox, -.has-warning .radio-inline, -.has-warning .checkbox-inline, -.has-warning.radio label, -.has-warning.checkbox label, -.has-warning.radio-inline label, -.has-warning.checkbox-inline label { - color: #8a6d3b; -} -.has-warning .form-control { - border-color: #8a6d3b; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} -.has-warning .form-control:focus { - border-color: #66512c; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b; -} -.has-warning .input-group-addon { - color: #8a6d3b; - background-color: #fcf8e3; - border-color: #8a6d3b; -} -.has-warning .form-control-feedback { - color: #8a6d3b; -} -.has-error .help-block, -.has-error .control-label, -.has-error .radio, -.has-error .checkbox, -.has-error .radio-inline, -.has-error .checkbox-inline, -.has-error.radio label, -.has-error.checkbox label, -.has-error.radio-inline label, -.has-error.checkbox-inline label { - color: #a94442; -} -.has-error .form-control { - border-color: #a94442; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); -} -.has-error .form-control:focus { - border-color: #843534; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483; -} -.has-error .input-group-addon { - color: #a94442; - background-color: #f2dede; - border-color: #a94442; -} -.has-error .form-control-feedback { - color: #a94442; -} -.has-feedback label ~ .form-control-feedback { - top: 25px; -} -.has-feedback label.sr-only ~ .form-control-feedback { - top: 0; -} -.help-block { - display: block; - margin-top: 5px; - margin-bottom: 10px; - color: #737373; -} -@media (min-width: 768px) { - .form-inline .form-group { - display: inline-block; - margin-bottom: 0; - vertical-align: middle; - } - .form-inline .form-control { - display: inline-block; - width: auto; - vertical-align: middle; - } - .form-inline .form-control-static { - display: inline-block; - } - .form-inline .input-group { - display: inline-table; - vertical-align: middle; - } - .form-inline .input-group .input-group-addon, - .form-inline .input-group .input-group-btn, - .form-inline .input-group .form-control { - width: auto; - } - .form-inline .input-group > .form-control { - width: 100%; - } - .form-inline .control-label { - margin-bottom: 0; - vertical-align: middle; - } - .form-inline .radio, - .form-inline .checkbox { - display: inline-block; - margin-top: 0; - margin-bottom: 0; - vertical-align: middle; - } - .form-inline .radio label, - .form-inline .checkbox label { - padding-left: 0; - } - .form-inline .radio input[type="radio"], - .form-inline .checkbox input[type="checkbox"] { - position: relative; - margin-left: 0; - } - .form-inline .has-feedback .form-control-feedback { - top: 0; - } -} -.form-horizontal .radio, -.form-horizontal .checkbox, -.form-horizontal .radio-inline, -.form-horizontal .checkbox-inline { - padding-top: 7px; - margin-top: 0; - margin-bottom: 0; -} -.form-horizontal .radio, -.form-horizontal .checkbox { - min-height: 27px; -} -.form-horizontal .form-group { - margin-right: -15px; - margin-left: -15px; -} -@media (min-width: 768px) { - .form-horizontal .control-label { - padding-top: 7px; - margin-bottom: 0; - text-align: right; - } -} -.form-horizontal .has-feedback .form-control-feedback { - right: 15px; -} -@media (min-width: 768px) { - .form-horizontal .form-group-lg .control-label { - padding-top: 11px; - font-size: 18px; - } -} -@media (min-width: 768px) { - .form-horizontal .form-group-sm .control-label { - padding-top: 6px; - font-size: 12px; - } -} -.btn { - display: inline-block; - margin-bottom: 0; - font-weight: normal; - text-align: center; - white-space: nowrap; - vertical-align: middle; - -ms-touch-action: manipulation; - touch-action: manipulation; - cursor: pointer; - background-image: none; - border: 1px solid transparent; - padding: 6px 12px; - font-size: 14px; - line-height: 1.42857143; - border-radius: 4px; - -webkit-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; -} -.btn:focus, -.btn:active:focus, -.btn.active:focus, -.btn.focus, -.btn:active.focus, -.btn.active.focus { - outline: 5px auto -webkit-focus-ring-color; - outline-offset: -2px; -} -.btn:hover, -.btn:focus, -.btn.focus { - color: #333; - text-decoration: none; -} -.btn:active, -.btn.active { - background-image: none; - outline: 0; - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -} -.btn.disabled, -.btn[disabled], -fieldset[disabled] .btn { - cursor: not-allowed; - filter: alpha(opacity=65); - opacity: 0.65; - -webkit-box-shadow: none; - box-shadow: none; -} -a.btn.disabled, -fieldset[disabled] a.btn { - pointer-events: none; -} -.btn-default { - color: #333; - background-color: #fff; - border-color: #ccc; -} -.btn-default:focus, -.btn-default.focus { - color: #333; - background-color: #e6e6e6; - border-color: #8c8c8c; -} -.btn-default:hover { - color: #333; - background-color: #e6e6e6; - border-color: #adadad; -} -.btn-default:active, -.btn-default.active, -.open > .dropdown-toggle.btn-default { - color: #333; - background-color: #e6e6e6; - background-image: none; - border-color: #adadad; -} -.btn-default:active:hover, -.btn-default.active:hover, -.open > .dropdown-toggle.btn-default:hover, -.btn-default:active:focus, -.btn-default.active:focus, -.open > .dropdown-toggle.btn-default:focus, -.btn-default:active.focus, -.btn-default.active.focus, -.open > .dropdown-toggle.btn-default.focus { - color: #333; - background-color: #d4d4d4; - border-color: #8c8c8c; -} -.btn-default.disabled:hover, -.btn-default[disabled]:hover, -fieldset[disabled] .btn-default:hover, -.btn-default.disabled:focus, -.btn-default[disabled]:focus, -fieldset[disabled] .btn-default:focus, -.btn-default.disabled.focus, -.btn-default[disabled].focus, -fieldset[disabled] .btn-default.focus { - background-color: #fff; - border-color: #ccc; -} -.btn-default .badge { - color: #fff; - background-color: #333; -} -.btn-primary { - color: #fff; - background-color: #337ab7; - border-color: #2e6da4; -} -.btn-primary:focus, -.btn-primary.focus { - color: #fff; - background-color: #286090; - border-color: #122b40; -} -.btn-primary:hover { - color: #fff; - background-color: #286090; - border-color: #204d74; -} -.btn-primary:active, -.btn-primary.active, -.open > .dropdown-toggle.btn-primary { - color: #fff; - background-color: #286090; - background-image: none; - border-color: #204d74; -} -.btn-primary:active:hover, -.btn-primary.active:hover, -.open > .dropdown-toggle.btn-primary:hover, -.btn-primary:active:focus, -.btn-primary.active:focus, -.open > .dropdown-toggle.btn-primary:focus, -.btn-primary:active.focus, -.btn-primary.active.focus, -.open > .dropdown-toggle.btn-primary.focus { - color: #fff; - background-color: #204d74; - border-color: #122b40; -} -.btn-primary.disabled:hover, -.btn-primary[disabled]:hover, -fieldset[disabled] .btn-primary:hover, -.btn-primary.disabled:focus, -.btn-primary[disabled]:focus, -fieldset[disabled] .btn-primary:focus, -.btn-primary.disabled.focus, -.btn-primary[disabled].focus, -fieldset[disabled] .btn-primary.focus { - background-color: #337ab7; - border-color: #2e6da4; -} -.btn-primary .badge { - color: #337ab7; - background-color: #fff; -} -.btn-success { - color: #fff; - background-color: #5cb85c; - border-color: #4cae4c; -} -.btn-success:focus, -.btn-success.focus { - color: #fff; - background-color: #449d44; - border-color: #255625; -} -.btn-success:hover { - color: #fff; - background-color: #449d44; - border-color: #398439; -} -.btn-success:active, -.btn-success.active, -.open > .dropdown-toggle.btn-success { - color: #fff; - background-color: #449d44; - background-image: none; - border-color: #398439; -} -.btn-success:active:hover, -.btn-success.active:hover, -.open > .dropdown-toggle.btn-success:hover, -.btn-success:active:focus, -.btn-success.active:focus, -.open > .dropdown-toggle.btn-success:focus, -.btn-success:active.focus, -.btn-success.active.focus, -.open > .dropdown-toggle.btn-success.focus { - color: #fff; - background-color: #398439; - border-color: #255625; -} -.btn-success.disabled:hover, -.btn-success[disabled]:hover, -fieldset[disabled] .btn-success:hover, -.btn-success.disabled:focus, -.btn-success[disabled]:focus, -fieldset[disabled] .btn-success:focus, -.btn-success.disabled.focus, -.btn-success[disabled].focus, -fieldset[disabled] .btn-success.focus { - background-color: #5cb85c; - border-color: #4cae4c; -} -.btn-success .badge { - color: #5cb85c; - background-color: #fff; -} -.btn-info { - color: #fff; - background-color: #5bc0de; - border-color: #46b8da; -} -.btn-info:focus, -.btn-info.focus { - color: #fff; - background-color: #31b0d5; - border-color: #1b6d85; -} -.btn-info:hover { - color: #fff; - background-color: #31b0d5; - border-color: #269abc; -} -.btn-info:active, -.btn-info.active, -.open > .dropdown-toggle.btn-info { - color: #fff; - background-color: #31b0d5; - background-image: none; - border-color: #269abc; -} -.btn-info:active:hover, -.btn-info.active:hover, -.open > .dropdown-toggle.btn-info:hover, -.btn-info:active:focus, -.btn-info.active:focus, -.open > .dropdown-toggle.btn-info:focus, -.btn-info:active.focus, -.btn-info.active.focus, -.open > .dropdown-toggle.btn-info.focus { - color: #fff; - background-color: #269abc; - border-color: #1b6d85; -} -.btn-info.disabled:hover, -.btn-info[disabled]:hover, -fieldset[disabled] .btn-info:hover, -.btn-info.disabled:focus, -.btn-info[disabled]:focus, -fieldset[disabled] .btn-info:focus, -.btn-info.disabled.focus, -.btn-info[disabled].focus, -fieldset[disabled] .btn-info.focus { - background-color: #5bc0de; - border-color: #46b8da; -} -.btn-info .badge { - color: #5bc0de; - background-color: #fff; -} -.btn-warning { - color: #fff; - background-color: #f0ad4e; - border-color: #eea236; -} -.btn-warning:focus, -.btn-warning.focus { - color: #fff; - background-color: #ec971f; - border-color: #985f0d; -} -.btn-warning:hover { - color: #fff; - background-color: #ec971f; - border-color: #d58512; -} -.btn-warning:active, -.btn-warning.active, -.open > .dropdown-toggle.btn-warning { - color: #fff; - background-color: #ec971f; - background-image: none; - border-color: #d58512; -} -.btn-warning:active:hover, -.btn-warning.active:hover, -.open > .dropdown-toggle.btn-warning:hover, -.btn-warning:active:focus, -.btn-warning.active:focus, -.open > .dropdown-toggle.btn-warning:focus, -.btn-warning:active.focus, -.btn-warning.active.focus, -.open > .dropdown-toggle.btn-warning.focus { - color: #fff; - background-color: #d58512; - border-color: #985f0d; -} -.btn-warning.disabled:hover, -.btn-warning[disabled]:hover, -fieldset[disabled] .btn-warning:hover, -.btn-warning.disabled:focus, -.btn-warning[disabled]:focus, -fieldset[disabled] .btn-warning:focus, -.btn-warning.disabled.focus, -.btn-warning[disabled].focus, -fieldset[disabled] .btn-warning.focus { - background-color: #f0ad4e; - border-color: #eea236; -} -.btn-warning .badge { - color: #f0ad4e; - background-color: #fff; -} -.btn-danger { - color: #fff; - background-color: #d9534f; - border-color: #d43f3a; -} -.btn-danger:focus, -.btn-danger.focus { - color: #fff; - background-color: #c9302c; - border-color: #761c19; -} -.btn-danger:hover { - color: #fff; - background-color: #c9302c; - border-color: #ac2925; -} -.btn-danger:active, -.btn-danger.active, -.open > .dropdown-toggle.btn-danger { - color: #fff; - background-color: #c9302c; - background-image: none; - border-color: #ac2925; -} -.btn-danger:active:hover, -.btn-danger.active:hover, -.open > .dropdown-toggle.btn-danger:hover, -.btn-danger:active:focus, -.btn-danger.active:focus, -.open > .dropdown-toggle.btn-danger:focus, -.btn-danger:active.focus, -.btn-danger.active.focus, -.open > .dropdown-toggle.btn-danger.focus { - color: #fff; - background-color: #ac2925; - border-color: #761c19; -} -.btn-danger.disabled:hover, -.btn-danger[disabled]:hover, -fieldset[disabled] .btn-danger:hover, -.btn-danger.disabled:focus, -.btn-danger[disabled]:focus, -fieldset[disabled] .btn-danger:focus, -.btn-danger.disabled.focus, -.btn-danger[disabled].focus, -fieldset[disabled] .btn-danger.focus { - background-color: #d9534f; - border-color: #d43f3a; -} -.btn-danger .badge { - color: #d9534f; - background-color: #fff; -} -.btn-link { - font-weight: 400; - color: #337ab7; - border-radius: 0; -} -.btn-link, -.btn-link:active, -.btn-link.active, -.btn-link[disabled], -fieldset[disabled] .btn-link { - background-color: transparent; - -webkit-box-shadow: none; - box-shadow: none; -} -.btn-link, -.btn-link:hover, -.btn-link:focus, -.btn-link:active { - border-color: transparent; -} -.btn-link:hover, -.btn-link:focus { - color: #23527c; - text-decoration: underline; - background-color: transparent; -} -.btn-link[disabled]:hover, -fieldset[disabled] .btn-link:hover, -.btn-link[disabled]:focus, -fieldset[disabled] .btn-link:focus { - color: #777777; - text-decoration: none; -} -.btn-lg, -.btn-group-lg > .btn { - padding: 10px 16px; - font-size: 18px; - line-height: 1.3333333; - border-radius: 6px; -} -.btn-sm, -.btn-group-sm > .btn { - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -.btn-xs, -.btn-group-xs > .btn { - padding: 1px 5px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -.btn-block { - display: block; - width: 100%; -} -.btn-block + .btn-block { - margin-top: 5px; -} -input[type="submit"].btn-block, -input[type="reset"].btn-block, -input[type="button"].btn-block { - width: 100%; -} -.fade { - opacity: 0; - -webkit-transition: opacity 0.15s linear; - -o-transition: opacity 0.15s linear; - transition: opacity 0.15s linear; -} -.fade.in { - opacity: 1; -} -.collapse { - display: none; -} -.collapse.in { - display: block; -} -tr.collapse.in { - display: table-row; -} -tbody.collapse.in { - display: table-row-group; -} -.collapsing { - position: relative; - height: 0; - overflow: hidden; - -webkit-transition-property: height, visibility; - -o-transition-property: height, visibility; - transition-property: height, visibility; - -webkit-transition-duration: 0.35s; - -o-transition-duration: 0.35s; - transition-duration: 0.35s; - -webkit-transition-timing-function: ease; - -o-transition-timing-function: ease; - transition-timing-function: ease; -} -.caret { - display: inline-block; - width: 0; - height: 0; - margin-left: 2px; - vertical-align: middle; - border-top: 4px dashed; - border-top: 4px solid \9; - border-right: 4px solid transparent; - border-left: 4px solid transparent; -} -.dropup, -.dropdown { - position: relative; -} -.dropdown-toggle:focus { - outline: 0; -} -.dropdown-menu { - position: absolute; - top: 100%; - left: 0; - z-index: 1000; - display: none; - float: left; - min-width: 160px; - padding: 5px 0; - margin: 2px 0 0; - font-size: 14px; - text-align: left; - list-style: none; - background-color: #fff; - background-clip: padding-box; - border: 1px solid #ccc; - border: 1px solid rgba(0, 0, 0, 0.15); - border-radius: 4px; - -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175); - box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175); -} -.dropdown-menu.pull-right { - right: 0; - left: auto; -} -.dropdown-menu .divider { - height: 1px; - margin: 9px 0; - overflow: hidden; - background-color: #e5e5e5; -} -.dropdown-menu > li > a { - display: block; - padding: 3px 20px; - clear: both; - font-weight: 400; - line-height: 1.42857143; - color: #333333; - white-space: nowrap; -} -.dropdown-menu > li > a:hover, -.dropdown-menu > li > a:focus { - color: #262626; - text-decoration: none; - background-color: #f5f5f5; -} -.dropdown-menu > .active > a, -.dropdown-menu > .active > a:hover, -.dropdown-menu > .active > a:focus { - color: #fff; - text-decoration: none; - background-color: #337ab7; - outline: 0; -} -.dropdown-menu > .disabled > a, -.dropdown-menu > .disabled > a:hover, -.dropdown-menu > .disabled > a:focus { - color: #777777; -} -.dropdown-menu > .disabled > a:hover, -.dropdown-menu > .disabled > a:focus { - text-decoration: none; - cursor: not-allowed; - background-color: transparent; - background-image: none; - filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); -} -.open > .dropdown-menu { - display: block; -} -.open > a { - outline: 0; -} -.dropdown-menu-right { - right: 0; - left: auto; -} -.dropdown-menu-left { - right: auto; - left: 0; -} -.dropdown-header { - display: block; - padding: 3px 20px; - font-size: 12px; - line-height: 1.42857143; - color: #777777; - white-space: nowrap; -} -.dropdown-backdrop { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 990; -} -.pull-right > .dropdown-menu { - right: 0; - left: auto; -} -.dropup .caret, -.navbar-fixed-bottom .dropdown .caret { - content: ""; - border-top: 0; - border-bottom: 4px dashed; - border-bottom: 4px solid \9; -} -.dropup .dropdown-menu, -.navbar-fixed-bottom .dropdown .dropdown-menu { - top: auto; - bottom: 100%; - margin-bottom: 2px; -} -@media (min-width: 768px) { - .navbar-right .dropdown-menu { - right: 0; - left: auto; - } - .navbar-right .dropdown-menu-left { - right: auto; - left: 0; - } -} -.btn-group, -.btn-group-vertical { - position: relative; - display: inline-block; - vertical-align: middle; -} -.btn-group > .btn, -.btn-group-vertical > .btn { - position: relative; - float: left; -} -.btn-group > .btn:hover, -.btn-group-vertical > .btn:hover, -.btn-group > .btn:focus, -.btn-group-vertical > .btn:focus, -.btn-group > .btn:active, -.btn-group-vertical > .btn:active, -.btn-group > .btn.active, -.btn-group-vertical > .btn.active { - z-index: 2; -} -.btn-group .btn + .btn, -.btn-group .btn + .btn-group, -.btn-group .btn-group + .btn, -.btn-group .btn-group + .btn-group { - margin-left: -1px; -} -.btn-toolbar { - margin-left: -5px; -} -.btn-toolbar .btn, -.btn-toolbar .btn-group, -.btn-toolbar .input-group { - float: left; -} -.btn-toolbar > .btn, -.btn-toolbar > .btn-group, -.btn-toolbar > .input-group { - margin-left: 5px; -} -.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) { - border-radius: 0; -} -.btn-group > .btn:first-child { - margin-left: 0; -} -.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} -.btn-group > .btn:last-child:not(:first-child), -.btn-group > .dropdown-toggle:not(:first-child) { - border-top-left-radius: 0; - border-bottom-left-radius: 0; -} -.btn-group > .btn-group { - float: left; -} -.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn { - border-radius: 0; -} -.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child, -.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} -.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child { - border-top-left-radius: 0; - border-bottom-left-radius: 0; -} -.btn-group .dropdown-toggle:active, -.btn-group.open .dropdown-toggle { - outline: 0; -} -.btn-group > .btn + .dropdown-toggle { - padding-right: 8px; - padding-left: 8px; -} -.btn-group > .btn-lg + .dropdown-toggle { - padding-right: 12px; - padding-left: 12px; -} -.btn-group.open .dropdown-toggle { - -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); - box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125); -} -.btn-group.open .dropdown-toggle.btn-link { - -webkit-box-shadow: none; - box-shadow: none; -} -.btn .caret { - margin-left: 0; -} -.btn-lg .caret { - border-width: 5px 5px 0; - border-bottom-width: 0; -} -.dropup .btn-lg .caret { - border-width: 0 5px 5px; -} -.btn-group-vertical > .btn, -.btn-group-vertical > .btn-group, -.btn-group-vertical > .btn-group > .btn { - display: block; - float: none; - width: 100%; - max-width: 100%; -} -.btn-group-vertical > .btn-group > .btn { - float: none; -} -.btn-group-vertical > .btn + .btn, -.btn-group-vertical > .btn + .btn-group, -.btn-group-vertical > .btn-group + .btn, -.btn-group-vertical > .btn-group + .btn-group { - margin-top: -1px; - margin-left: 0; -} -.btn-group-vertical > .btn:not(:first-child):not(:last-child) { - border-radius: 0; -} -.btn-group-vertical > .btn:first-child:not(:last-child) { - border-top-left-radius: 4px; - border-top-right-radius: 4px; - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} -.btn-group-vertical > .btn:last-child:not(:first-child) { - border-top-left-radius: 0; - border-top-right-radius: 0; - border-bottom-right-radius: 4px; - border-bottom-left-radius: 4px; -} -.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn { - border-radius: 0; -} -.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child, -.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle { - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} -.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child { - border-top-left-radius: 0; - border-top-right-radius: 0; -} -.btn-group-justified { - display: table; - width: 100%; - table-layout: fixed; - border-collapse: separate; -} -.btn-group-justified > .btn, -.btn-group-justified > .btn-group { - display: table-cell; - float: none; - width: 1%; -} -.btn-group-justified > .btn-group .btn { - width: 100%; -} -.btn-group-justified > .btn-group .dropdown-menu { - left: auto; -} -[data-toggle="buttons"] > .btn input[type="radio"], -[data-toggle="buttons"] > .btn-group > .btn input[type="radio"], -[data-toggle="buttons"] > .btn input[type="checkbox"], -[data-toggle="buttons"] > .btn-group > .btn input[type="checkbox"] { - position: absolute; - clip: rect(0, 0, 0, 0); - pointer-events: none; -} -.input-group { - position: relative; - display: table; - border-collapse: separate; -} -.input-group[class*="col-"] { - float: none; - padding-right: 0; - padding-left: 0; -} -.input-group .form-control { - position: relative; - z-index: 2; - float: left; - width: 100%; - margin-bottom: 0; -} -.input-group .form-control:focus { - z-index: 3; -} -.input-group-lg > .form-control, -.input-group-lg > .input-group-addon, -.input-group-lg > .input-group-btn > .btn { - height: 46px; - padding: 10px 16px; - font-size: 18px; - line-height: 1.3333333; - border-radius: 6px; -} -select.input-group-lg > .form-control, -select.input-group-lg > .input-group-addon, -select.input-group-lg > .input-group-btn > .btn { - height: 46px; - line-height: 46px; -} -textarea.input-group-lg > .form-control, -textarea.input-group-lg > .input-group-addon, -textarea.input-group-lg > .input-group-btn > .btn, -select[multiple].input-group-lg > .form-control, -select[multiple].input-group-lg > .input-group-addon, -select[multiple].input-group-lg > .input-group-btn > .btn { - height: auto; -} -.input-group-sm > .form-control, -.input-group-sm > .input-group-addon, -.input-group-sm > .input-group-btn > .btn { - height: 30px; - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; - border-radius: 3px; -} -select.input-group-sm > .form-control, -select.input-group-sm > .input-group-addon, -select.input-group-sm > .input-group-btn > .btn { - height: 30px; - line-height: 30px; -} -textarea.input-group-sm > .form-control, -textarea.input-group-sm > .input-group-addon, -textarea.input-group-sm > .input-group-btn > .btn, -select[multiple].input-group-sm > .form-control, -select[multiple].input-group-sm > .input-group-addon, -select[multiple].input-group-sm > .input-group-btn > .btn { - height: auto; -} -.input-group-addon, -.input-group-btn, -.input-group .form-control { - display: table-cell; -} -.input-group-addon:not(:first-child):not(:last-child), -.input-group-btn:not(:first-child):not(:last-child), -.input-group .form-control:not(:first-child):not(:last-child) { - border-radius: 0; -} -.input-group-addon, -.input-group-btn { - width: 1%; - white-space: nowrap; - vertical-align: middle; -} -.input-group-addon { - padding: 6px 12px; - font-size: 14px; - font-weight: 400; - line-height: 1; - color: #555555; - text-align: center; - background-color: #eeeeee; - border: 1px solid #ccc; - border-radius: 4px; -} -.input-group-addon.input-sm { - padding: 5px 10px; - font-size: 12px; - border-radius: 3px; -} -.input-group-addon.input-lg { - padding: 10px 16px; - font-size: 18px; - border-radius: 6px; -} -.input-group-addon input[type="radio"], -.input-group-addon input[type="checkbox"] { - margin-top: 0; -} -.input-group .form-control:first-child, -.input-group-addon:first-child, -.input-group-btn:first-child > .btn, -.input-group-btn:first-child > .btn-group > .btn, -.input-group-btn:first-child > .dropdown-toggle, -.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle), -.input-group-btn:last-child > .btn-group:not(:last-child) > .btn { - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} -.input-group-addon:first-child { - border-right: 0; -} -.input-group .form-control:last-child, -.input-group-addon:last-child, -.input-group-btn:last-child > .btn, -.input-group-btn:last-child > .btn-group > .btn, -.input-group-btn:last-child > .dropdown-toggle, -.input-group-btn:first-child > .btn:not(:first-child), -.input-group-btn:first-child > .btn-group:not(:first-child) > .btn { - border-top-left-radius: 0; - border-bottom-left-radius: 0; -} -.input-group-addon:last-child { - border-left: 0; -} -.input-group-btn { - position: relative; - font-size: 0; - white-space: nowrap; -} -.input-group-btn > .btn { - position: relative; -} -.input-group-btn > .btn + .btn { - margin-left: -1px; -} -.input-group-btn > .btn:hover, -.input-group-btn > .btn:focus, -.input-group-btn > .btn:active { - z-index: 2; -} -.input-group-btn:first-child > .btn, -.input-group-btn:first-child > .btn-group { - margin-right: -1px; -} -.input-group-btn:last-child > .btn, -.input-group-btn:last-child > .btn-group { - z-index: 2; - margin-left: -1px; -} -.nav { - padding-left: 0; - margin-bottom: 0; - list-style: none; -} -.nav > li { - position: relative; - display: block; -} -.nav > li > a { - position: relative; - display: block; - padding: 10px 15px; -} -.nav > li > a:hover, -.nav > li > a:focus { - text-decoration: none; - background-color: #eeeeee; -} -.nav > li.disabled > a { - color: #777777; -} -.nav > li.disabled > a:hover, -.nav > li.disabled > a:focus { - color: #777777; - text-decoration: none; - cursor: not-allowed; - background-color: transparent; -} -.nav .open > a, -.nav .open > a:hover, -.nav .open > a:focus { - background-color: #eeeeee; - border-color: #337ab7; -} -.nav .nav-divider { - height: 1px; - margin: 9px 0; - overflow: hidden; - background-color: #e5e5e5; -} -.nav > li > a > img { - max-width: none; -} -.nav-tabs { - border-bottom: 1px solid #ddd; -} -.nav-tabs > li { - float: left; - margin-bottom: -1px; -} -.nav-tabs > li > a { - margin-right: 2px; - line-height: 1.42857143; - border: 1px solid transparent; - border-radius: 4px 4px 0 0; -} -.nav-tabs > li > a:hover { - border-color: #eeeeee #eeeeee #ddd; -} -.nav-tabs > li.active > a, -.nav-tabs > li.active > a:hover, -.nav-tabs > li.active > a:focus { - color: #555555; - cursor: default; - background-color: #fff; - border: 1px solid #ddd; - border-bottom-color: transparent; -} -.nav-tabs.nav-justified { - width: 100%; - border-bottom: 0; -} -.nav-tabs.nav-justified > li { - float: none; -} -.nav-tabs.nav-justified > li > a { - margin-bottom: 5px; - text-align: center; -} -.nav-tabs.nav-justified > .dropdown .dropdown-menu { - top: auto; - left: auto; -} -@media (min-width: 768px) { - .nav-tabs.nav-justified > li { - display: table-cell; - width: 1%; - } - .nav-tabs.nav-justified > li > a { - margin-bottom: 0; - } -} -.nav-tabs.nav-justified > li > a { - margin-right: 0; - border-radius: 4px; -} -.nav-tabs.nav-justified > .active > a, -.nav-tabs.nav-justified > .active > a:hover, -.nav-tabs.nav-justified > .active > a:focus { - border: 1px solid #ddd; -} -@media (min-width: 768px) { - .nav-tabs.nav-justified > li > a { - border-bottom: 1px solid #ddd; - border-radius: 4px 4px 0 0; - } - .nav-tabs.nav-justified > .active > a, - .nav-tabs.nav-justified > .active > a:hover, - .nav-tabs.nav-justified > .active > a:focus { - border-bottom-color: #fff; - } -} -.nav-pills > li { - float: left; -} -.nav-pills > li > a { - border-radius: 4px; -} -.nav-pills > li + li { - margin-left: 2px; -} -.nav-pills > li.active > a, -.nav-pills > li.active > a:hover, -.nav-pills > li.active > a:focus { - color: #fff; - background-color: #337ab7; -} -.nav-stacked > li { - float: none; -} -.nav-stacked > li + li { - margin-top: 2px; - margin-left: 0; -} -.nav-justified { - width: 100%; -} -.nav-justified > li { - float: none; -} -.nav-justified > li > a { - margin-bottom: 5px; - text-align: center; -} -.nav-justified > .dropdown .dropdown-menu { - top: auto; - left: auto; -} -@media (min-width: 768px) { - .nav-justified > li { - display: table-cell; - width: 1%; - } - .nav-justified > li > a { - margin-bottom: 0; - } -} -.nav-tabs-justified { - border-bottom: 0; -} -.nav-tabs-justified > li > a { - margin-right: 0; - border-radius: 4px; -} -.nav-tabs-justified > .active > a, -.nav-tabs-justified > .active > a:hover, -.nav-tabs-justified > .active > a:focus { - border: 1px solid #ddd; -} -@media (min-width: 768px) { - .nav-tabs-justified > li > a { - border-bottom: 1px solid #ddd; - border-radius: 4px 4px 0 0; - } - .nav-tabs-justified > .active > a, - .nav-tabs-justified > .active > a:hover, - .nav-tabs-justified > .active > a:focus { - border-bottom-color: #fff; - } -} -.tab-content > .tab-pane { - display: none; -} -.tab-content > .active { - display: block; -} -.nav-tabs .dropdown-menu { - margin-top: -1px; - border-top-left-radius: 0; - border-top-right-radius: 0; -} -.navbar { - position: relative; - min-height: 50px; - margin-bottom: 20px; - border: 1px solid transparent; -} -@media (min-width: 768px) { - .navbar { - border-radius: 4px; - } -} -@media (min-width: 768px) { - .navbar-header { - float: left; - } -} -.navbar-collapse { - padding-right: 15px; - padding-left: 15px; - overflow-x: visible; - border-top: 1px solid transparent; - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1); - -webkit-overflow-scrolling: touch; -} -.navbar-collapse.in { - overflow-y: auto; -} -@media (min-width: 768px) { - .navbar-collapse { - width: auto; - border-top: 0; - -webkit-box-shadow: none; - box-shadow: none; - } - .navbar-collapse.collapse { - display: block !important; - height: auto !important; - padding-bottom: 0; - overflow: visible !important; - } - .navbar-collapse.in { - overflow-y: visible; - } - .navbar-fixed-top .navbar-collapse, - .navbar-static-top .navbar-collapse, - .navbar-fixed-bottom .navbar-collapse { - padding-right: 0; - padding-left: 0; - } -} -.navbar-fixed-top, -.navbar-fixed-bottom { - position: fixed; - right: 0; - left: 0; - z-index: 1030; -} -.navbar-fixed-top .navbar-collapse, -.navbar-fixed-bottom .navbar-collapse { - max-height: 340px; -} -@media (max-device-width: 480px) and (orientation: landscape) { - .navbar-fixed-top .navbar-collapse, - .navbar-fixed-bottom .navbar-collapse { - max-height: 200px; - } -} -@media (min-width: 768px) { - .navbar-fixed-top, - .navbar-fixed-bottom { - border-radius: 0; - } -} -.navbar-fixed-top { - top: 0; - border-width: 0 0 1px; -} -.navbar-fixed-bottom { - bottom: 0; - margin-bottom: 0; - border-width: 1px 0 0; -} -.container > .navbar-header, -.container-fluid > .navbar-header, -.container > .navbar-collapse, -.container-fluid > .navbar-collapse { - margin-right: -15px; - margin-left: -15px; -} -@media (min-width: 768px) { - .container > .navbar-header, - .container-fluid > .navbar-header, - .container > .navbar-collapse, - .container-fluid > .navbar-collapse { - margin-right: 0; - margin-left: 0; - } -} -.navbar-static-top { - z-index: 1000; - border-width: 0 0 1px; -} -@media (min-width: 768px) { - .navbar-static-top { - border-radius: 0; - } -} -.navbar-brand { - float: left; - height: 50px; - padding: 15px 15px; - font-size: 18px; - line-height: 20px; -} -.navbar-brand:hover, -.navbar-brand:focus { - text-decoration: none; -} -.navbar-brand > img { - display: block; -} -@media (min-width: 768px) { - .navbar > .container .navbar-brand, - .navbar > .container-fluid .navbar-brand { - margin-left: -15px; - } -} -.navbar-toggle { - position: relative; - float: right; - padding: 9px 10px; - margin-right: 15px; - margin-top: 8px; - margin-bottom: 8px; - background-color: transparent; - background-image: none; - border: 1px solid transparent; - border-radius: 4px; -} -.navbar-toggle:focus { - outline: 0; -} -.navbar-toggle .icon-bar { - display: block; - width: 22px; - height: 2px; - border-radius: 1px; -} -.navbar-toggle .icon-bar + .icon-bar { - margin-top: 4px; -} -@media (min-width: 768px) { - .navbar-toggle { - display: none; - } -} -.navbar-nav { - margin: 7.5px -15px; -} -.navbar-nav > li > a { - padding-top: 10px; - padding-bottom: 10px; - line-height: 20px; -} -@media (max-width: 767px) { - .navbar-nav .open .dropdown-menu { - position: static; - float: none; - width: auto; - margin-top: 0; - background-color: transparent; - border: 0; - -webkit-box-shadow: none; - box-shadow: none; - } - .navbar-nav .open .dropdown-menu > li > a, - .navbar-nav .open .dropdown-menu .dropdown-header { - padding: 5px 15px 5px 25px; - } - .navbar-nav .open .dropdown-menu > li > a { - line-height: 20px; - } - .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-nav .open .dropdown-menu > li > a:focus { - background-image: none; - } -} -@media (min-width: 768px) { - .navbar-nav { - float: left; - margin: 0; - } - .navbar-nav > li { - float: left; - } - .navbar-nav > li > a { - padding-top: 15px; - padding-bottom: 15px; - } -} -.navbar-form { - padding: 10px 15px; - margin-right: -15px; - margin-left: -15px; - border-top: 1px solid transparent; - border-bottom: 1px solid transparent; - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); - margin-top: 8px; - margin-bottom: 8px; -} -@media (min-width: 768px) { - .navbar-form .form-group { - display: inline-block; - margin-bottom: 0; - vertical-align: middle; - } - .navbar-form .form-control { - display: inline-block; - width: auto; - vertical-align: middle; - } - .navbar-form .form-control-static { - display: inline-block; - } - .navbar-form .input-group { - display: inline-table; - vertical-align: middle; - } - .navbar-form .input-group .input-group-addon, - .navbar-form .input-group .input-group-btn, - .navbar-form .input-group .form-control { - width: auto; - } - .navbar-form .input-group > .form-control { - width: 100%; - } - .navbar-form .control-label { - margin-bottom: 0; - vertical-align: middle; - } - .navbar-form .radio, - .navbar-form .checkbox { - display: inline-block; - margin-top: 0; - margin-bottom: 0; - vertical-align: middle; - } - .navbar-form .radio label, - .navbar-form .checkbox label { - padding-left: 0; - } - .navbar-form .radio input[type="radio"], - .navbar-form .checkbox input[type="checkbox"] { - position: relative; - margin-left: 0; - } - .navbar-form .has-feedback .form-control-feedback { - top: 0; - } -} -@media (max-width: 767px) { - .navbar-form .form-group { - margin-bottom: 5px; - } - .navbar-form .form-group:last-child { - margin-bottom: 0; - } -} -@media (min-width: 768px) { - .navbar-form { - width: auto; - padding-top: 0; - padding-bottom: 0; - margin-right: 0; - margin-left: 0; - border: 0; - -webkit-box-shadow: none; - box-shadow: none; - } -} -.navbar-nav > li > .dropdown-menu { - margin-top: 0; - border-top-left-radius: 0; - border-top-right-radius: 0; -} -.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu { - margin-bottom: 0; - border-top-left-radius: 4px; - border-top-right-radius: 4px; - border-bottom-right-radius: 0; - border-bottom-left-radius: 0; -} -.navbar-btn { - margin-top: 8px; - margin-bottom: 8px; -} -.navbar-btn.btn-sm { - margin-top: 10px; - margin-bottom: 10px; -} -.navbar-btn.btn-xs { - margin-top: 14px; - margin-bottom: 14px; -} -.navbar-text { - margin-top: 15px; - margin-bottom: 15px; -} -@media (min-width: 768px) { - .navbar-text { - float: left; - margin-right: 15px; - margin-left: 15px; - } -} -@media (min-width: 768px) { - .navbar-left { - float: left !important; - } - .navbar-right { - float: right !important; - margin-right: -15px; - } - .navbar-right ~ .navbar-right { - margin-right: 0; - } -} -.navbar-default { - background-color: #f8f8f8; - border-color: #e7e7e7; -} -.navbar-default .navbar-brand { - color: #777; -} -.navbar-default .navbar-brand:hover, -.navbar-default .navbar-brand:focus { - color: #5e5e5e; - background-color: transparent; -} -.navbar-default .navbar-text { - color: #777; -} -.navbar-default .navbar-nav > li > a { - color: #777; -} -.navbar-default .navbar-nav > li > a:hover, -.navbar-default .navbar-nav > li > a:focus { - color: #333; - background-color: transparent; -} -.navbar-default .navbar-nav > .active > a, -.navbar-default .navbar-nav > .active > a:hover, -.navbar-default .navbar-nav > .active > a:focus { - color: #555; - background-color: #e7e7e7; -} -.navbar-default .navbar-nav > .disabled > a, -.navbar-default .navbar-nav > .disabled > a:hover, -.navbar-default .navbar-nav > .disabled > a:focus { - color: #ccc; - background-color: transparent; -} -.navbar-default .navbar-nav > .open > a, -.navbar-default .navbar-nav > .open > a:hover, -.navbar-default .navbar-nav > .open > a:focus { - color: #555; - background-color: #e7e7e7; -} -@media (max-width: 767px) { - .navbar-default .navbar-nav .open .dropdown-menu > li > a { - color: #777; - } - .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus { - color: #333; - background-color: transparent; - } - .navbar-default .navbar-nav .open .dropdown-menu > .active > a, - .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus { - color: #555; - background-color: #e7e7e7; - } - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a, - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover, - .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus { - color: #ccc; - background-color: transparent; - } -} -.navbar-default .navbar-toggle { - border-color: #ddd; -} -.navbar-default .navbar-toggle:hover, -.navbar-default .navbar-toggle:focus { - background-color: #ddd; -} -.navbar-default .navbar-toggle .icon-bar { - background-color: #888; -} -.navbar-default .navbar-collapse, -.navbar-default .navbar-form { - border-color: #e7e7e7; -} -.navbar-default .navbar-link { - color: #777; -} -.navbar-default .navbar-link:hover { - color: #333; -} -.navbar-default .btn-link { - color: #777; -} -.navbar-default .btn-link:hover, -.navbar-default .btn-link:focus { - color: #333; -} -.navbar-default .btn-link[disabled]:hover, -fieldset[disabled] .navbar-default .btn-link:hover, -.navbar-default .btn-link[disabled]:focus, -fieldset[disabled] .navbar-default .btn-link:focus { - color: #ccc; -} -.navbar-inverse { - background-color: #222; - border-color: #080808; -} -.navbar-inverse .navbar-brand { - color: #9d9d9d; -} -.navbar-inverse .navbar-brand:hover, -.navbar-inverse .navbar-brand:focus { - color: #fff; - background-color: transparent; -} -.navbar-inverse .navbar-text { - color: #9d9d9d; -} -.navbar-inverse .navbar-nav > li > a { - color: #9d9d9d; -} -.navbar-inverse .navbar-nav > li > a:hover, -.navbar-inverse .navbar-nav > li > a:focus { - color: #fff; - background-color: transparent; -} -.navbar-inverse .navbar-nav > .active > a, -.navbar-inverse .navbar-nav > .active > a:hover, -.navbar-inverse .navbar-nav > .active > a:focus { - color: #fff; - background-color: #080808; -} -.navbar-inverse .navbar-nav > .disabled > a, -.navbar-inverse .navbar-nav > .disabled > a:hover, -.navbar-inverse .navbar-nav > .disabled > a:focus { - color: #444; - background-color: transparent; -} -.navbar-inverse .navbar-nav > .open > a, -.navbar-inverse .navbar-nav > .open > a:hover, -.navbar-inverse .navbar-nav > .open > a:focus { - color: #fff; - background-color: #080808; -} -@media (max-width: 767px) { - .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header { - border-color: #080808; - } - .navbar-inverse .navbar-nav .open .dropdown-menu .divider { - background-color: #080808; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a { - color: #9d9d9d; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus { - color: #fff; - background-color: transparent; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a, - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus { - color: #fff; - background-color: #080808; - } - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a, - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover, - .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus { - color: #444; - background-color: transparent; - } -} -.navbar-inverse .navbar-toggle { - border-color: #333; -} -.navbar-inverse .navbar-toggle:hover, -.navbar-inverse .navbar-toggle:focus { - background-color: #333; -} -.navbar-inverse .navbar-toggle .icon-bar { - background-color: #fff; -} -.navbar-inverse .navbar-collapse, -.navbar-inverse .navbar-form { - border-color: #101010; -} -.navbar-inverse .navbar-link { - color: #9d9d9d; -} -.navbar-inverse .navbar-link:hover { - color: #fff; -} -.navbar-inverse .btn-link { - color: #9d9d9d; -} -.navbar-inverse .btn-link:hover, -.navbar-inverse .btn-link:focus { - color: #fff; -} -.navbar-inverse .btn-link[disabled]:hover, -fieldset[disabled] .navbar-inverse .btn-link:hover, -.navbar-inverse .btn-link[disabled]:focus, -fieldset[disabled] .navbar-inverse .btn-link:focus { - color: #444; -} -.breadcrumb { - padding: 8px 15px; - margin-bottom: 20px; - list-style: none; - background-color: #f5f5f5; - border-radius: 4px; -} -.breadcrumb > li { - display: inline-block; -} -.breadcrumb > li + li:before { - padding: 0 5px; - color: #ccc; - content: "/\00a0"; -} -.breadcrumb > .active { - color: #777777; -} -.pagination { - display: inline-block; - padding-left: 0; - margin: 20px 0; - border-radius: 4px; -} -.pagination > li { - display: inline; -} -.pagination > li > a, -.pagination > li > span { - position: relative; - float: left; - padding: 6px 12px; - margin-left: -1px; - line-height: 1.42857143; - color: #337ab7; - text-decoration: none; - background-color: #fff; - border: 1px solid #ddd; -} -.pagination > li > a:hover, -.pagination > li > span:hover, -.pagination > li > a:focus, -.pagination > li > span:focus { - z-index: 2; - color: #23527c; - background-color: #eeeeee; - border-color: #ddd; -} -.pagination > li:first-child > a, -.pagination > li:first-child > span { - margin-left: 0; - border-top-left-radius: 4px; - border-bottom-left-radius: 4px; -} -.pagination > li:last-child > a, -.pagination > li:last-child > span { - border-top-right-radius: 4px; - border-bottom-right-radius: 4px; -} -.pagination > .active > a, -.pagination > .active > span, -.pagination > .active > a:hover, -.pagination > .active > span:hover, -.pagination > .active > a:focus, -.pagination > .active > span:focus { - z-index: 3; - color: #fff; - cursor: default; - background-color: #337ab7; - border-color: #337ab7; -} -.pagination > .disabled > span, -.pagination > .disabled > span:hover, -.pagination > .disabled > span:focus, -.pagination > .disabled > a, -.pagination > .disabled > a:hover, -.pagination > .disabled > a:focus { - color: #777777; - cursor: not-allowed; - background-color: #fff; - border-color: #ddd; -} -.pagination-lg > li > a, -.pagination-lg > li > span { - padding: 10px 16px; - font-size: 18px; - line-height: 1.3333333; -} -.pagination-lg > li:first-child > a, -.pagination-lg > li:first-child > span { - border-top-left-radius: 6px; - border-bottom-left-radius: 6px; -} -.pagination-lg > li:last-child > a, -.pagination-lg > li:last-child > span { - border-top-right-radius: 6px; - border-bottom-right-radius: 6px; -} -.pagination-sm > li > a, -.pagination-sm > li > span { - padding: 5px 10px; - font-size: 12px; - line-height: 1.5; -} -.pagination-sm > li:first-child > a, -.pagination-sm > li:first-child > span { - border-top-left-radius: 3px; - border-bottom-left-radius: 3px; -} -.pagination-sm > li:last-child > a, -.pagination-sm > li:last-child > span { - border-top-right-radius: 3px; - border-bottom-right-radius: 3px; -} -.pager { - padding-left: 0; - margin: 20px 0; - text-align: center; - list-style: none; -} -.pager li { - display: inline; -} -.pager li > a, -.pager li > span { - display: inline-block; - padding: 5px 14px; - background-color: #fff; - border: 1px solid #ddd; - border-radius: 15px; -} -.pager li > a:hover, -.pager li > a:focus { - text-decoration: none; - background-color: #eeeeee; -} -.pager .next > a, -.pager .next > span { - float: right; -} -.pager .previous > a, -.pager .previous > span { - float: left; -} -.pager .disabled > a, -.pager .disabled > a:hover, -.pager .disabled > a:focus, -.pager .disabled > span { - color: #777777; - cursor: not-allowed; - background-color: #fff; -} -.label { - display: inline; - padding: 0.2em 0.6em 0.3em; - font-size: 75%; - font-weight: 700; - line-height: 1; - color: #fff; - text-align: center; - white-space: nowrap; - vertical-align: baseline; - border-radius: 0.25em; -} -a.label:hover, -a.label:focus { - color: #fff; - text-decoration: none; - cursor: pointer; -} -.label:empty { - display: none; -} -.btn .label { - position: relative; - top: -1px; -} -.label-default { - background-color: #777777; -} -.label-default[href]:hover, -.label-default[href]:focus { - background-color: #5e5e5e; -} -.label-primary { - background-color: #337ab7; -} -.label-primary[href]:hover, -.label-primary[href]:focus { - background-color: #286090; -} -.label-success { - background-color: #5cb85c; -} -.label-success[href]:hover, -.label-success[href]:focus { - background-color: #449d44; -} -.label-info { - background-color: #5bc0de; -} -.label-info[href]:hover, -.label-info[href]:focus { - background-color: #31b0d5; -} -.label-warning { - background-color: #f0ad4e; -} -.label-warning[href]:hover, -.label-warning[href]:focus { - background-color: #ec971f; -} -.label-danger { - background-color: #d9534f; -} -.label-danger[href]:hover, -.label-danger[href]:focus { - background-color: #c9302c; -} -.badge { - display: inline-block; - min-width: 10px; - padding: 3px 7px; - font-size: 12px; - font-weight: bold; - line-height: 1; - color: #fff; - text-align: center; - white-space: nowrap; - vertical-align: middle; - background-color: #777777; - border-radius: 10px; -} -.badge:empty { - display: none; -} -.btn .badge { - position: relative; - top: -1px; -} -.btn-xs .badge, -.btn-group-xs > .btn .badge { - top: 0; - padding: 1px 5px; -} -a.badge:hover, -a.badge:focus { - color: #fff; - text-decoration: none; - cursor: pointer; -} -.list-group-item.active > .badge, -.nav-pills > .active > a > .badge { - color: #337ab7; - background-color: #fff; -} -.list-group-item > .badge { - float: right; -} -.list-group-item > .badge + .badge { - margin-right: 5px; -} -.nav-pills > li > a > .badge { - margin-left: 3px; -} -.jumbotron { - padding-top: 30px; - padding-bottom: 30px; - margin-bottom: 30px; - color: inherit; - background-color: #eeeeee; -} -.jumbotron h1, -.jumbotron .h1 { - color: inherit; -} -.jumbotron p { - margin-bottom: 15px; - font-size: 21px; - font-weight: 200; -} -.jumbotron > hr { - border-top-color: #d5d5d5; -} -.container .jumbotron, -.container-fluid .jumbotron { - padding-right: 15px; - padding-left: 15px; - border-radius: 6px; -} -.jumbotron .container { - max-width: 100%; -} -@media screen and (min-width: 768px) { - .jumbotron { - padding-top: 48px; - padding-bottom: 48px; - } - .container .jumbotron, - .container-fluid .jumbotron { - padding-right: 60px; - padding-left: 60px; - } - .jumbotron h1, - .jumbotron .h1 { - font-size: 63px; - } -} -.thumbnail { - display: block; - padding: 4px; - margin-bottom: 20px; - line-height: 1.42857143; - background-color: #fff; - border: 1px solid #ddd; - border-radius: 4px; - -webkit-transition: border 0.2s ease-in-out; - -o-transition: border 0.2s ease-in-out; - transition: border 0.2s ease-in-out; -} -.thumbnail > img, -.thumbnail a > img { - margin-right: auto; - margin-left: auto; -} -a.thumbnail:hover, -a.thumbnail:focus, -a.thumbnail.active { - border-color: #337ab7; -} -.thumbnail .caption { - padding: 9px; - color: #333333; -} -.alert { - padding: 15px; - margin-bottom: 20px; - border: 1px solid transparent; - border-radius: 4px; -} -.alert h4 { - margin-top: 0; - color: inherit; -} -.alert .alert-link { - font-weight: bold; -} -.alert > p, -.alert > ul { - margin-bottom: 0; -} -.alert > p + p { - margin-top: 5px; -} -.alert-dismissable, -.alert-dismissible { - padding-right: 35px; -} -.alert-dismissable .close, -.alert-dismissible .close { - position: relative; - top: -2px; - right: -21px; - color: inherit; -} -.alert-success { - color: #3c763d; - background-color: #dff0d8; - border-color: #d6e9c6; -} -.alert-success hr { - border-top-color: #c9e2b3; -} -.alert-success .alert-link { - color: #2b542c; -} -.alert-info { - color: #31708f; - background-color: #d9edf7; - border-color: #bce8f1; -} -.alert-info hr { - border-top-color: #a6e1ec; -} -.alert-info .alert-link { - color: #245269; -} -.alert-warning { - color: #8a6d3b; - background-color: #fcf8e3; - border-color: #faebcc; -} -.alert-warning hr { - border-top-color: #f7e1b5; -} -.alert-warning .alert-link { - color: #66512c; -} -.alert-danger { - color: #a94442; - background-color: #f2dede; - border-color: #ebccd1; -} -.alert-danger hr { - border-top-color: #e4b9c0; -} -.alert-danger .alert-link { - color: #843534; -} -@-webkit-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} -@-o-keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} -@keyframes progress-bar-stripes { - from { - background-position: 40px 0; - } - to { - background-position: 0 0; - } -} -.progress { - height: 20px; - margin-bottom: 20px; - overflow: hidden; - background-color: #f5f5f5; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); - box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1); -} -.progress-bar { - float: left; - width: 0%; - height: 100%; - font-size: 12px; - line-height: 20px; - color: #fff; - text-align: center; - background-color: #337ab7; - -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15); - -webkit-transition: width 0.6s ease; - -o-transition: width 0.6s ease; - transition: width 0.6s ease; -} -.progress-striped .progress-bar, -.progress-bar-striped { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - -webkit-background-size: 40px 40px; - background-size: 40px 40px; -} -.progress.active .progress-bar, -.progress-bar.active { - -webkit-animation: progress-bar-stripes 2s linear infinite; - -o-animation: progress-bar-stripes 2s linear infinite; - animation: progress-bar-stripes 2s linear infinite; -} -.progress-bar-success { - background-color: #5cb85c; -} -.progress-striped .progress-bar-success { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} -.progress-bar-info { - background-color: #5bc0de; -} -.progress-striped .progress-bar-info { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} -.progress-bar-warning { - background-color: #f0ad4e; -} -.progress-striped .progress-bar-warning { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} -.progress-bar-danger { - background-color: #d9534f; -} -.progress-striped .progress-bar-danger { - background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); - background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent); -} -.media { - margin-top: 15px; -} -.media:first-child { - margin-top: 0; -} -.media, -.media-body { - overflow: hidden; - zoom: 1; -} -.media-body { - width: 10000px; -} -.media-object { - display: block; -} -.media-object.img-thumbnail { - max-width: none; -} -.media-right, -.media > .pull-right { - padding-left: 10px; -} -.media-left, -.media > .pull-left { - padding-right: 10px; -} -.media-left, -.media-right, -.media-body { - display: table-cell; - vertical-align: top; -} -.media-middle { - vertical-align: middle; -} -.media-bottom { - vertical-align: bottom; -} -.media-heading { - margin-top: 0; - margin-bottom: 5px; -} -.media-list { - padding-left: 0; - list-style: none; -} -.list-group { - padding-left: 0; - margin-bottom: 20px; -} -.list-group-item { - position: relative; - display: block; - padding: 10px 15px; - margin-bottom: -1px; - background-color: #fff; - border: 1px solid #ddd; -} -.list-group-item:first-child { - border-top-left-radius: 4px; - border-top-right-radius: 4px; -} -.list-group-item:last-child { - margin-bottom: 0; - border-bottom-right-radius: 4px; - border-bottom-left-radius: 4px; -} -.list-group-item.disabled, -.list-group-item.disabled:hover, -.list-group-item.disabled:focus { - color: #777777; - cursor: not-allowed; - background-color: #eeeeee; -} -.list-group-item.disabled .list-group-item-heading, -.list-group-item.disabled:hover .list-group-item-heading, -.list-group-item.disabled:focus .list-group-item-heading { - color: inherit; -} -.list-group-item.disabled .list-group-item-text, -.list-group-item.disabled:hover .list-group-item-text, -.list-group-item.disabled:focus .list-group-item-text { - color: #777777; -} -.list-group-item.active, -.list-group-item.active:hover, -.list-group-item.active:focus { - z-index: 2; - color: #fff; - background-color: #337ab7; - border-color: #337ab7; -} -.list-group-item.active .list-group-item-heading, -.list-group-item.active:hover .list-group-item-heading, -.list-group-item.active:focus .list-group-item-heading, -.list-group-item.active .list-group-item-heading > small, -.list-group-item.active:hover .list-group-item-heading > small, -.list-group-item.active:focus .list-group-item-heading > small, -.list-group-item.active .list-group-item-heading > .small, -.list-group-item.active:hover .list-group-item-heading > .small, -.list-group-item.active:focus .list-group-item-heading > .small { - color: inherit; -} -.list-group-item.active .list-group-item-text, -.list-group-item.active:hover .list-group-item-text, -.list-group-item.active:focus .list-group-item-text { - color: #c7ddef; -} -a.list-group-item, -button.list-group-item { - color: #555; -} -a.list-group-item .list-group-item-heading, -button.list-group-item .list-group-item-heading { - color: #333; -} -a.list-group-item:hover, -button.list-group-item:hover, -a.list-group-item:focus, -button.list-group-item:focus { - color: #555; - text-decoration: none; - background-color: #f5f5f5; -} -button.list-group-item { - width: 100%; - text-align: left; -} -.list-group-item-success { - color: #3c763d; - background-color: #dff0d8; -} -a.list-group-item-success, -button.list-group-item-success { - color: #3c763d; -} -a.list-group-item-success .list-group-item-heading, -button.list-group-item-success .list-group-item-heading { - color: inherit; -} -a.list-group-item-success:hover, -button.list-group-item-success:hover, -a.list-group-item-success:focus, -button.list-group-item-success:focus { - color: #3c763d; - background-color: #d0e9c6; -} -a.list-group-item-success.active, -button.list-group-item-success.active, -a.list-group-item-success.active:hover, -button.list-group-item-success.active:hover, -a.list-group-item-success.active:focus, -button.list-group-item-success.active:focus { - color: #fff; - background-color: #3c763d; - border-color: #3c763d; -} -.list-group-item-info { - color: #31708f; - background-color: #d9edf7; -} -a.list-group-item-info, -button.list-group-item-info { - color: #31708f; -} -a.list-group-item-info .list-group-item-heading, -button.list-group-item-info .list-group-item-heading { - color: inherit; -} -a.list-group-item-info:hover, -button.list-group-item-info:hover, -a.list-group-item-info:focus, -button.list-group-item-info:focus { - color: #31708f; - background-color: #c4e3f3; -} -a.list-group-item-info.active, -button.list-group-item-info.active, -a.list-group-item-info.active:hover, -button.list-group-item-info.active:hover, -a.list-group-item-info.active:focus, -button.list-group-item-info.active:focus { - color: #fff; - background-color: #31708f; - border-color: #31708f; -} -.list-group-item-warning { - color: #8a6d3b; - background-color: #fcf8e3; -} -a.list-group-item-warning, -button.list-group-item-warning { - color: #8a6d3b; -} -a.list-group-item-warning .list-group-item-heading, -button.list-group-item-warning .list-group-item-heading { - color: inherit; -} -a.list-group-item-warning:hover, -button.list-group-item-warning:hover, -a.list-group-item-warning:focus, -button.list-group-item-warning:focus { - color: #8a6d3b; - background-color: #faf2cc; -} -a.list-group-item-warning.active, -button.list-group-item-warning.active, -a.list-group-item-warning.active:hover, -button.list-group-item-warning.active:hover, -a.list-group-item-warning.active:focus, -button.list-group-item-warning.active:focus { - color: #fff; - background-color: #8a6d3b; - border-color: #8a6d3b; -} -.list-group-item-danger { - color: #a94442; - background-color: #f2dede; -} -a.list-group-item-danger, -button.list-group-item-danger { - color: #a94442; -} -a.list-group-item-danger .list-group-item-heading, -button.list-group-item-danger .list-group-item-heading { - color: inherit; -} -a.list-group-item-danger:hover, -button.list-group-item-danger:hover, -a.list-group-item-danger:focus, -button.list-group-item-danger:focus { - color: #a94442; - background-color: #ebcccc; -} -a.list-group-item-danger.active, -button.list-group-item-danger.active, -a.list-group-item-danger.active:hover, -button.list-group-item-danger.active:hover, -a.list-group-item-danger.active:focus, -button.list-group-item-danger.active:focus { - color: #fff; - background-color: #a94442; - border-color: #a94442; -} -.list-group-item-heading { - margin-top: 0; - margin-bottom: 5px; -} -.list-group-item-text { - margin-bottom: 0; - line-height: 1.3; -} -.panel { - margin-bottom: 20px; - background-color: #fff; - border: 1px solid transparent; - border-radius: 4px; - -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05); - box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05); -} -.panel-body { - padding: 15px; -} -.panel-heading { - padding: 10px 15px; - border-bottom: 1px solid transparent; - border-top-left-radius: 3px; - border-top-right-radius: 3px; -} -.panel-heading > .dropdown .dropdown-toggle { - color: inherit; -} -.panel-title { - margin-top: 0; - margin-bottom: 0; - font-size: 16px; - color: inherit; -} -.panel-title > a, -.panel-title > small, -.panel-title > .small, -.panel-title > small > a, -.panel-title > .small > a { - color: inherit; -} -.panel-footer { - padding: 10px 15px; - background-color: #f5f5f5; - border-top: 1px solid #ddd; - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} -.panel > .list-group, -.panel > .panel-collapse > .list-group { - margin-bottom: 0; -} -.panel > .list-group .list-group-item, -.panel > .panel-collapse > .list-group .list-group-item { - border-width: 1px 0; - border-radius: 0; -} -.panel > .list-group:first-child .list-group-item:first-child, -.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child { - border-top: 0; - border-top-left-radius: 3px; - border-top-right-radius: 3px; -} -.panel > .list-group:last-child .list-group-item:last-child, -.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child { - border-bottom: 0; - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} -.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child { - border-top-left-radius: 0; - border-top-right-radius: 0; -} -.panel-heading + .list-group .list-group-item:first-child { - border-top-width: 0; -} -.list-group + .panel-footer { - border-top-width: 0; -} -.panel > .table, -.panel > .table-responsive > .table, -.panel > .panel-collapse > .table { - margin-bottom: 0; -} -.panel > .table caption, -.panel > .table-responsive > .table caption, -.panel > .panel-collapse > .table caption { - padding-right: 15px; - padding-left: 15px; -} -.panel > .table:first-child, -.panel > .table-responsive:first-child > .table:first-child { - border-top-left-radius: 3px; - border-top-right-radius: 3px; -} -.panel > .table:first-child > thead:first-child > tr:first-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child, -.panel > .table:first-child > tbody:first-child > tr:first-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child { - border-top-left-radius: 3px; - border-top-right-radius: 3px; -} -.panel > .table:first-child > thead:first-child > tr:first-child td:first-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child, -.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child, -.panel > .table:first-child > thead:first-child > tr:first-child th:first-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child, -.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child { - border-top-left-radius: 3px; -} -.panel > .table:first-child > thead:first-child > tr:first-child td:last-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child, -.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child, -.panel > .table:first-child > thead:first-child > tr:first-child th:last-child, -.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child, -.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child, -.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child { - border-top-right-radius: 3px; -} -.panel > .table:last-child, -.panel > .table-responsive:last-child > .table:last-child { - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} -.panel > .table:last-child > tbody:last-child > tr:last-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child { - border-bottom-right-radius: 3px; - border-bottom-left-radius: 3px; -} -.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child, -.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child { - border-bottom-left-radius: 3px; -} -.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child, -.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child, -.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child, -.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child, -.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child { - border-bottom-right-radius: 3px; -} -.panel > .panel-body + .table, -.panel > .panel-body + .table-responsive, -.panel > .table + .panel-body, -.panel > .table-responsive + .panel-body { - border-top: 1px solid #ddd; -} -.panel > .table > tbody:first-child > tr:first-child th, -.panel > .table > tbody:first-child > tr:first-child td { - border-top: 0; -} -.panel > .table-bordered, -.panel > .table-responsive > .table-bordered { - border: 0; -} -.panel > .table-bordered > thead > tr > th:first-child, -.panel > .table-responsive > .table-bordered > thead > tr > th:first-child, -.panel > .table-bordered > tbody > tr > th:first-child, -.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child, -.panel > .table-bordered > tfoot > tr > th:first-child, -.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child, -.panel > .table-bordered > thead > tr > td:first-child, -.panel > .table-responsive > .table-bordered > thead > tr > td:first-child, -.panel > .table-bordered > tbody > tr > td:first-child, -.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child, -.panel > .table-bordered > tfoot > tr > td:first-child, -.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child { - border-left: 0; -} -.panel > .table-bordered > thead > tr > th:last-child, -.panel > .table-responsive > .table-bordered > thead > tr > th:last-child, -.panel > .table-bordered > tbody > tr > th:last-child, -.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child, -.panel > .table-bordered > tfoot > tr > th:last-child, -.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child, -.panel > .table-bordered > thead > tr > td:last-child, -.panel > .table-responsive > .table-bordered > thead > tr > td:last-child, -.panel > .table-bordered > tbody > tr > td:last-child, -.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child, -.panel > .table-bordered > tfoot > tr > td:last-child, -.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child { - border-right: 0; -} -.panel > .table-bordered > thead > tr:first-child > td, -.panel > .table-responsive > .table-bordered > thead > tr:first-child > td, -.panel > .table-bordered > tbody > tr:first-child > td, -.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td, -.panel > .table-bordered > thead > tr:first-child > th, -.panel > .table-responsive > .table-bordered > thead > tr:first-child > th, -.panel > .table-bordered > tbody > tr:first-child > th, -.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th { - border-bottom: 0; -} -.panel > .table-bordered > tbody > tr:last-child > td, -.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td, -.panel > .table-bordered > tfoot > tr:last-child > td, -.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td, -.panel > .table-bordered > tbody > tr:last-child > th, -.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th, -.panel > .table-bordered > tfoot > tr:last-child > th, -.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th { - border-bottom: 0; -} -.panel > .table-responsive { - margin-bottom: 0; - border: 0; -} -.panel-group { - margin-bottom: 20px; -} -.panel-group .panel { - margin-bottom: 0; - border-radius: 4px; -} -.panel-group .panel + .panel { - margin-top: 5px; -} -.panel-group .panel-heading { - border-bottom: 0; -} -.panel-group .panel-heading + .panel-collapse > .panel-body, -.panel-group .panel-heading + .panel-collapse > .list-group { - border-top: 1px solid #ddd; -} -.panel-group .panel-footer { - border-top: 0; -} -.panel-group .panel-footer + .panel-collapse .panel-body { - border-bottom: 1px solid #ddd; -} -.panel-default { - border-color: #ddd; -} -.panel-default > .panel-heading { - color: #333333; - background-color: #f5f5f5; - border-color: #ddd; -} -.panel-default > .panel-heading + .panel-collapse > .panel-body { - border-top-color: #ddd; -} -.panel-default > .panel-heading .badge { - color: #f5f5f5; - background-color: #333333; -} -.panel-default > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: #ddd; -} -.panel-primary { - border-color: #337ab7; -} -.panel-primary > .panel-heading { - color: #fff; - background-color: #337ab7; - border-color: #337ab7; -} -.panel-primary > .panel-heading + .panel-collapse > .panel-body { - border-top-color: #337ab7; -} -.panel-primary > .panel-heading .badge { - color: #337ab7; - background-color: #fff; -} -.panel-primary > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: #337ab7; -} -.panel-success { - border-color: #d6e9c6; -} -.panel-success > .panel-heading { - color: #3c763d; - background-color: #dff0d8; - border-color: #d6e9c6; -} -.panel-success > .panel-heading + .panel-collapse > .panel-body { - border-top-color: #d6e9c6; -} -.panel-success > .panel-heading .badge { - color: #dff0d8; - background-color: #3c763d; -} -.panel-success > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: #d6e9c6; -} -.panel-info { - border-color: #bce8f1; -} -.panel-info > .panel-heading { - color: #31708f; - background-color: #d9edf7; - border-color: #bce8f1; -} -.panel-info > .panel-heading + .panel-collapse > .panel-body { - border-top-color: #bce8f1; -} -.panel-info > .panel-heading .badge { - color: #d9edf7; - background-color: #31708f; -} -.panel-info > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: #bce8f1; -} -.panel-warning { - border-color: #faebcc; -} -.panel-warning > .panel-heading { - color: #8a6d3b; - background-color: #fcf8e3; - border-color: #faebcc; -} -.panel-warning > .panel-heading + .panel-collapse > .panel-body { - border-top-color: #faebcc; -} -.panel-warning > .panel-heading .badge { - color: #fcf8e3; - background-color: #8a6d3b; -} -.panel-warning > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: #faebcc; -} -.panel-danger { - border-color: #ebccd1; -} -.panel-danger > .panel-heading { - color: #a94442; - background-color: #f2dede; - border-color: #ebccd1; -} -.panel-danger > .panel-heading + .panel-collapse > .panel-body { - border-top-color: #ebccd1; -} -.panel-danger > .panel-heading .badge { - color: #f2dede; - background-color: #a94442; -} -.panel-danger > .panel-footer + .panel-collapse > .panel-body { - border-bottom-color: #ebccd1; -} -.embed-responsive { - position: relative; - display: block; - height: 0; - padding: 0; - overflow: hidden; -} -.embed-responsive .embed-responsive-item, -.embed-responsive iframe, -.embed-responsive embed, -.embed-responsive object, -.embed-responsive video { - position: absolute; - top: 0; - bottom: 0; - left: 0; - width: 100%; - height: 100%; - border: 0; -} -.embed-responsive-16by9 { - padding-bottom: 56.25%; -} -.embed-responsive-4by3 { - padding-bottom: 75%; -} -.well { - min-height: 20px; - padding: 19px; - margin-bottom: 20px; - background-color: #f5f5f5; - border: 1px solid #e3e3e3; - border-radius: 4px; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05); -} -.well blockquote { - border-color: #ddd; - border-color: rgba(0, 0, 0, 0.15); -} -.well-lg { - padding: 24px; - border-radius: 6px; -} -.well-sm { - padding: 9px; - border-radius: 3px; -} -.close { - float: right; - font-size: 21px; - font-weight: bold; - line-height: 1; - color: #000; - text-shadow: 0 1px 0 #fff; - filter: alpha(opacity=20); - opacity: 0.2; -} -.close:hover, -.close:focus { - color: #000; - text-decoration: none; - cursor: pointer; - filter: alpha(opacity=50); - opacity: 0.5; -} -button.close { - padding: 0; - cursor: pointer; - background: transparent; - border: 0; - -webkit-appearance: none; - -moz-appearance: none; - appearance: none; -} -.modal-open { - overflow: hidden; -} -.modal { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1050; - display: none; - overflow: hidden; - -webkit-overflow-scrolling: touch; - outline: 0; -} -.modal.fade .modal-dialog { - -webkit-transform: translate(0, -25%); - -ms-transform: translate(0, -25%); - -o-transform: translate(0, -25%); - transform: translate(0, -25%); - -webkit-transition: -webkit-transform 0.3s ease-out; - -o-transition: -o-transform 0.3s ease-out; - transition: -webkit-transform 0.3s ease-out; - transition: transform 0.3s ease-out; - transition: transform 0.3s ease-out, -webkit-transform 0.3s ease-out, -o-transform 0.3s ease-out; -} -.modal.in .modal-dialog { - -webkit-transform: translate(0, 0); - -ms-transform: translate(0, 0); - -o-transform: translate(0, 0); - transform: translate(0, 0); -} -.modal-open .modal { - overflow-x: hidden; - overflow-y: auto; -} -.modal-dialog { - position: relative; - width: auto; - margin: 10px; -} -.modal-content { - position: relative; - background-color: #fff; - background-clip: padding-box; - border: 1px solid #999; - border: 1px solid rgba(0, 0, 0, 0.2); - border-radius: 6px; - -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5); - box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5); - outline: 0; -} -.modal-backdrop { - position: fixed; - top: 0; - right: 0; - bottom: 0; - left: 0; - z-index: 1040; - background-color: #000; -} -.modal-backdrop.fade { - filter: alpha(opacity=0); - opacity: 0; -} -.modal-backdrop.in { - filter: alpha(opacity=50); - opacity: 0.5; -} -.modal-header { - padding: 15px; - border-bottom: 1px solid #e5e5e5; -} -.modal-header .close { - margin-top: -2px; -} -.modal-title { - margin: 0; - line-height: 1.42857143; -} -.modal-body { - position: relative; - padding: 15px; -} -.modal-footer { - padding: 15px; - text-align: right; - border-top: 1px solid #e5e5e5; -} -.modal-footer .btn + .btn { - margin-bottom: 0; - margin-left: 5px; -} -.modal-footer .btn-group .btn + .btn { - margin-left: -1px; -} -.modal-footer .btn-block + .btn-block { - margin-left: 0; -} -.modal-scrollbar-measure { - position: absolute; - top: -9999px; - width: 50px; - height: 50px; - overflow: scroll; -} -@media (min-width: 768px) { - .modal-dialog { - width: 600px; - margin: 30px auto; - } - .modal-content { - -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); - box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); - } - .modal-sm { - width: 300px; - } -} -@media (min-width: 992px) { - .modal-lg { - width: 900px; - } -} -.tooltip { - position: absolute; - z-index: 1070; - display: block; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-style: normal; - font-weight: 400; - line-height: 1.42857143; - line-break: auto; - text-align: left; - text-align: start; - text-decoration: none; - text-shadow: none; - text-transform: none; - letter-spacing: normal; - word-break: normal; - word-spacing: normal; - word-wrap: normal; - white-space: normal; - font-size: 12px; - filter: alpha(opacity=0); - opacity: 0; -} -.tooltip.in { - filter: alpha(opacity=90); - opacity: 0.9; -} -.tooltip.top { - padding: 5px 0; - margin-top: -3px; -} -.tooltip.right { - padding: 0 5px; - margin-left: 3px; -} -.tooltip.bottom { - padding: 5px 0; - margin-top: 3px; -} -.tooltip.left { - padding: 0 5px; - margin-left: -3px; -} -.tooltip.top .tooltip-arrow { - bottom: 0; - left: 50%; - margin-left: -5px; - border-width: 5px 5px 0; - border-top-color: #000; -} -.tooltip.top-left .tooltip-arrow { - right: 5px; - bottom: 0; - margin-bottom: -5px; - border-width: 5px 5px 0; - border-top-color: #000; -} -.tooltip.top-right .tooltip-arrow { - bottom: 0; - left: 5px; - margin-bottom: -5px; - border-width: 5px 5px 0; - border-top-color: #000; -} -.tooltip.right .tooltip-arrow { - top: 50%; - left: 0; - margin-top: -5px; - border-width: 5px 5px 5px 0; - border-right-color: #000; -} -.tooltip.left .tooltip-arrow { - top: 50%; - right: 0; - margin-top: -5px; - border-width: 5px 0 5px 5px; - border-left-color: #000; -} -.tooltip.bottom .tooltip-arrow { - top: 0; - left: 50%; - margin-left: -5px; - border-width: 0 5px 5px; - border-bottom-color: #000; -} -.tooltip.bottom-left .tooltip-arrow { - top: 0; - right: 5px; - margin-top: -5px; - border-width: 0 5px 5px; - border-bottom-color: #000; -} -.tooltip.bottom-right .tooltip-arrow { - top: 0; - left: 5px; - margin-top: -5px; - border-width: 0 5px 5px; - border-bottom-color: #000; -} -.tooltip-inner { - max-width: 200px; - padding: 3px 8px; - color: #fff; - text-align: center; - background-color: #000; - border-radius: 4px; -} -.tooltip-arrow { - position: absolute; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} -.popover { - position: absolute; - top: 0; - left: 0; - z-index: 1060; - display: none; - max-width: 276px; - padding: 1px; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-style: normal; - font-weight: 400; - line-height: 1.42857143; - line-break: auto; - text-align: left; - text-align: start; - text-decoration: none; - text-shadow: none; - text-transform: none; - letter-spacing: normal; - word-break: normal; - word-spacing: normal; - word-wrap: normal; - white-space: normal; - font-size: 14px; - background-color: #fff; - background-clip: padding-box; - border: 1px solid #ccc; - border: 1px solid rgba(0, 0, 0, 0.2); - border-radius: 6px; - -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); - box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); -} -.popover.top { - margin-top: -10px; -} -.popover.right { - margin-left: 10px; -} -.popover.bottom { - margin-top: 10px; -} -.popover.left { - margin-left: -10px; -} -.popover > .arrow { - border-width: 11px; -} -.popover > .arrow, -.popover > .arrow:after { - position: absolute; - display: block; - width: 0; - height: 0; - border-color: transparent; - border-style: solid; -} -.popover > .arrow:after { - content: ""; - border-width: 10px; -} -.popover.top > .arrow { - bottom: -11px; - left: 50%; - margin-left: -11px; - border-top-color: #999999; - border-top-color: rgba(0, 0, 0, 0.25); - border-bottom-width: 0; -} -.popover.top > .arrow:after { - bottom: 1px; - margin-left: -10px; - content: " "; - border-top-color: #fff; - border-bottom-width: 0; -} -.popover.right > .arrow { - top: 50%; - left: -11px; - margin-top: -11px; - border-right-color: #999999; - border-right-color: rgba(0, 0, 0, 0.25); - border-left-width: 0; -} -.popover.right > .arrow:after { - bottom: -10px; - left: 1px; - content: " "; - border-right-color: #fff; - border-left-width: 0; -} -.popover.bottom > .arrow { - top: -11px; - left: 50%; - margin-left: -11px; - border-top-width: 0; - border-bottom-color: #999999; - border-bottom-color: rgba(0, 0, 0, 0.25); -} -.popover.bottom > .arrow:after { - top: 1px; - margin-left: -10px; - content: " "; - border-top-width: 0; - border-bottom-color: #fff; -} -.popover.left > .arrow { - top: 50%; - right: -11px; - margin-top: -11px; - border-right-width: 0; - border-left-color: #999999; - border-left-color: rgba(0, 0, 0, 0.25); -} -.popover.left > .arrow:after { - right: 1px; - bottom: -10px; - content: " "; - border-right-width: 0; - border-left-color: #fff; -} -.popover-title { - padding: 8px 14px; - margin: 0; - font-size: 14px; - background-color: #f7f7f7; - border-bottom: 1px solid #ebebeb; - border-radius: 5px 5px 0 0; -} -.popover-content { - padding: 9px 14px; -} -.carousel { - position: relative; -} -.carousel-inner { - position: relative; - width: 100%; - overflow: hidden; -} -.carousel-inner > .item { - position: relative; - display: none; - -webkit-transition: 0.6s ease-in-out left; - -o-transition: 0.6s ease-in-out left; - transition: 0.6s ease-in-out left; -} -.carousel-inner > .item > img, -.carousel-inner > .item > a > img { - line-height: 1; -} -@media all and (transform-3d), (-webkit-transform-3d) { - .carousel-inner > .item { - -webkit-transition: -webkit-transform 0.6s ease-in-out; - -o-transition: -o-transform 0.6s ease-in-out; - transition: -webkit-transform 0.6s ease-in-out; - transition: transform 0.6s ease-in-out; - transition: transform 0.6s ease-in-out, -webkit-transform 0.6s ease-in-out, -o-transform 0.6s ease-in-out; - -webkit-backface-visibility: hidden; - backface-visibility: hidden; - -webkit-perspective: 1000px; - perspective: 1000px; - } - .carousel-inner > .item.next, - .carousel-inner > .item.active.right { - -webkit-transform: translate3d(100%, 0, 0); - transform: translate3d(100%, 0, 0); - left: 0; - } - .carousel-inner > .item.prev, - .carousel-inner > .item.active.left { - -webkit-transform: translate3d(-100%, 0, 0); - transform: translate3d(-100%, 0, 0); - left: 0; - } - .carousel-inner > .item.next.left, - .carousel-inner > .item.prev.right, - .carousel-inner > .item.active { - -webkit-transform: translate3d(0, 0, 0); - transform: translate3d(0, 0, 0); - left: 0; - } -} -.carousel-inner > .active, -.carousel-inner > .next, -.carousel-inner > .prev { - display: block; -} -.carousel-inner > .active { - left: 0; -} -.carousel-inner > .next, -.carousel-inner > .prev { - position: absolute; - top: 0; - width: 100%; -} -.carousel-inner > .next { - left: 100%; -} -.carousel-inner > .prev { - left: -100%; -} -.carousel-inner > .next.left, -.carousel-inner > .prev.right { - left: 0; -} -.carousel-inner > .active.left { - left: -100%; -} -.carousel-inner > .active.right { - left: 100%; -} -.carousel-control { - position: absolute; - top: 0; - bottom: 0; - left: 0; - width: 15%; - font-size: 20px; - color: #fff; - text-align: center; - text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6); - background-color: rgba(0, 0, 0, 0); - filter: alpha(opacity=50); - opacity: 0.5; -} -.carousel-control.left { - background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%); - background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%); - background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, 0.5)), to(rgba(0, 0, 0, 0.0001))); - background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1); - background-repeat: repeat-x; -} -.carousel-control.right { - right: 0; - left: auto; - background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%); - background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%); - background-image: -webkit-gradient(linear, left top, right top, from(rgba(0, 0, 0, 0.0001)), to(rgba(0, 0, 0, 0.5))); - background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%); - filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1); - background-repeat: repeat-x; -} -.carousel-control:hover, -.carousel-control:focus { - color: #fff; - text-decoration: none; - outline: 0; - filter: alpha(opacity=90); - opacity: 0.9; -} -.carousel-control .icon-prev, -.carousel-control .icon-next, -.carousel-control .glyphicon-chevron-left, -.carousel-control .glyphicon-chevron-right { - position: absolute; - top: 50%; - z-index: 5; - display: inline-block; - margin-top: -10px; -} -.carousel-control .icon-prev, -.carousel-control .glyphicon-chevron-left { - left: 50%; - margin-left: -10px; -} -.carousel-control .icon-next, -.carousel-control .glyphicon-chevron-right { - right: 50%; - margin-right: -10px; -} -.carousel-control .icon-prev, -.carousel-control .icon-next { - width: 20px; - height: 20px; - font-family: serif; - line-height: 1; -} -.carousel-control .icon-prev:before { - content: "\2039"; -} -.carousel-control .icon-next:before { - content: "\203a"; -} -.carousel-indicators { - position: absolute; - bottom: 10px; - left: 50%; - z-index: 15; - width: 60%; - padding-left: 0; - margin-left: -30%; - text-align: center; - list-style: none; -} -.carousel-indicators li { - display: inline-block; - width: 10px; - height: 10px; - margin: 1px; - text-indent: -999px; - cursor: pointer; - background-color: #000 \9; - background-color: rgba(0, 0, 0, 0); - border: 1px solid #fff; - border-radius: 10px; -} -.carousel-indicators .active { - width: 12px; - height: 12px; - margin: 0; - background-color: #fff; -} -.carousel-caption { - position: absolute; - right: 15%; - bottom: 20px; - left: 15%; - z-index: 10; - padding-top: 20px; - padding-bottom: 20px; - color: #fff; - text-align: center; - text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6); -} -.carousel-caption .btn { - text-shadow: none; -} -@media screen and (min-width: 768px) { - .carousel-control .glyphicon-chevron-left, - .carousel-control .glyphicon-chevron-right, - .carousel-control .icon-prev, - .carousel-control .icon-next { - width: 30px; - height: 30px; - margin-top: -10px; - font-size: 30px; - } - .carousel-control .glyphicon-chevron-left, - .carousel-control .icon-prev { - margin-left: -10px; - } - .carousel-control .glyphicon-chevron-right, - .carousel-control .icon-next { - margin-right: -10px; - } - .carousel-caption { - right: 20%; - left: 20%; - padding-bottom: 30px; - } - .carousel-indicators { - bottom: 20px; - } -} -.clearfix:before, -.clearfix:after, -.dl-horizontal dd:before, -.dl-horizontal dd:after, -.container:before, -.container:after, -.container-fluid:before, -.container-fluid:after, -.row:before, -.row:after, -.form-horizontal .form-group:before, -.form-horizontal .form-group:after, -.btn-toolbar:before, -.btn-toolbar:after, -.btn-group-vertical > .btn-group:before, -.btn-group-vertical > .btn-group:after, -.nav:before, -.nav:after, -.navbar:before, -.navbar:after, -.navbar-header:before, -.navbar-header:after, -.navbar-collapse:before, -.navbar-collapse:after, -.pager:before, -.pager:after, -.panel-body:before, -.panel-body:after, -.modal-header:before, -.modal-header:after, -.modal-footer:before, -.modal-footer:after { - display: table; - content: " "; -} -.clearfix:after, -.dl-horizontal dd:after, -.container:after, -.container-fluid:after, -.row:after, -.form-horizontal .form-group:after, -.btn-toolbar:after, -.btn-group-vertical > .btn-group:after, -.nav:after, -.navbar:after, -.navbar-header:after, -.navbar-collapse:after, -.pager:after, -.panel-body:after, -.modal-header:after, -.modal-footer:after { - clear: both; -} -.center-block { - display: block; - margin-right: auto; - margin-left: auto; -} -.pull-right { - float: right !important; -} -.pull-left { - float: left !important; -} -.hide { - display: none !important; -} -.show { - display: block !important; -} -.invisible { - visibility: hidden; -} -.text-hide { - font: 0/0 a; - color: transparent; - text-shadow: none; - background-color: transparent; - border: 0; -} -.hidden { - display: none !important; -} -.affix { - position: fixed; -} -@-ms-viewport { - width: device-width; -} -.visible-xs, -.visible-sm, -.visible-md, -.visible-lg { - display: none !important; -} -.visible-xs-block, -.visible-xs-inline, -.visible-xs-inline-block, -.visible-sm-block, -.visible-sm-inline, -.visible-sm-inline-block, -.visible-md-block, -.visible-md-inline, -.visible-md-inline-block, -.visible-lg-block, -.visible-lg-inline, -.visible-lg-inline-block { - display: none !important; -} -@media (max-width: 767px) { - .visible-xs { - display: block !important; - } - table.visible-xs { - display: table !important; - } - tr.visible-xs { - display: table-row !important; - } - th.visible-xs, - td.visible-xs { - display: table-cell !important; - } -} -@media (max-width: 767px) { - .visible-xs-block { - display: block !important; - } -} -@media (max-width: 767px) { - .visible-xs-inline { - display: inline !important; - } -} -@media (max-width: 767px) { - .visible-xs-inline-block { - display: inline-block !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm { - display: block !important; - } - table.visible-sm { - display: table !important; - } - tr.visible-sm { - display: table-row !important; - } - th.visible-sm, - td.visible-sm { - display: table-cell !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm-block { - display: block !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm-inline { - display: inline !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .visible-sm-inline-block { - display: inline-block !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md { - display: block !important; - } - table.visible-md { - display: table !important; - } - tr.visible-md { - display: table-row !important; - } - th.visible-md, - td.visible-md { - display: table-cell !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md-block { - display: block !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md-inline { - display: inline !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .visible-md-inline-block { - display: inline-block !important; - } -} -@media (min-width: 1200px) { - .visible-lg { - display: block !important; - } - table.visible-lg { - display: table !important; - } - tr.visible-lg { - display: table-row !important; - } - th.visible-lg, - td.visible-lg { - display: table-cell !important; - } -} -@media (min-width: 1200px) { - .visible-lg-block { - display: block !important; - } -} -@media (min-width: 1200px) { - .visible-lg-inline { - display: inline !important; - } -} -@media (min-width: 1200px) { - .visible-lg-inline-block { - display: inline-block !important; - } -} -@media (max-width: 767px) { - .hidden-xs { - display: none !important; - } -} -@media (min-width: 768px) and (max-width: 991px) { - .hidden-sm { - display: none !important; - } -} -@media (min-width: 992px) and (max-width: 1199px) { - .hidden-md { - display: none !important; - } -} -@media (min-width: 1200px) { - .hidden-lg { - display: none !important; - } -} -.visible-print { - display: none !important; -} -@media print { - .visible-print { - display: block !important; - } - table.visible-print { - display: table !important; - } - tr.visible-print { - display: table-row !important; - } - th.visible-print, - td.visible-print { - display: table-cell !important; - } -} -.visible-print-block { - display: none !important; -} -@media print { - .visible-print-block { - display: block !important; - } -} -.visible-print-inline { - display: none !important; -} -@media print { - .visible-print-inline { - display: inline !important; - } -} -.visible-print-inline-block { - display: none !important; -} -@media print { - .visible-print-inline-block { - display: inline-block !important; - } -} -@media print { - .hidden-print { - display: none !important; - } -} -/*# sourceMappingURL=bootstrap.css.map */ \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css.map b/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css.map deleted file mode 100644 index caac3e61254..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1/css/bootstrap.css.map +++ /dev/null @@ -1 +0,0 @@ -{"version":3,"sources":["bootstrap.css","less/normalize.less","less/print.less","less/glyphicons.less","less/scaffolding.less","less/mixins/vendor-prefixes.less","less/mixins/tab-focus.less","less/mixins/image.less","less/type.less","less/mixins/text-emphasis.less","less/mixins/background-variant.less","less/mixins/text-overflow.less","less/code.less","less/grid.less","less/mixins/grid.less","less/mixins/grid-framework.less","less/tables.less","less/mixins/table-row.less","less/forms.less","less/mixins/forms.less","less/buttons.less","less/mixins/buttons.less","less/mixins/opacity.less","less/component-animations.less","less/dropdowns.less","less/mixins/nav-divider.less","less/mixins/reset-filter.less","less/button-groups.less","less/mixins/border-radius.less","less/input-groups.less","less/navs.less","less/navbar.less","less/mixins/nav-vertical-align.less","less/utilities.less","less/breadcrumbs.less","less/pagination.less","less/mixins/pagination.less","less/pager.less","less/labels.less","less/mixins/labels.less","less/badges.less","less/jumbotron.less","less/thumbnails.less","less/alerts.less","less/mixins/alerts.less","less/progress-bars.less","less/mixins/gradients.less","less/mixins/progress-bar.less","less/media.less","less/list-group.less","less/mixins/list-group.less","less/panels.less","less/mixins/panels.less","less/responsive-embed.less","less/wells.less","less/close.less","less/modals.less","less/tooltip.less","less/mixins/reset-text.less","less/popovers.less","less/carousel.less","less/mixins/clearfix.less","less/mixins/center-block.less","less/mixins/hide-text.less","less/responsive-utilities.less","less/mixins/responsive-visibility.less"],"names":[],"mappings":"AAAA;;;;GAIG;AACH,4EAA4E;ACK5E;EACE,wBAAA;EACA,2BAAA;EACA,+BAAA;CDHD;ACUD;EACE,UAAA;CDRD;ACqBD;;;;;;;;;;;;;EAaE,eAAA;CDnBD;AC2BD;;;;EAIE,sBAAA;EACA,yBAAA;CDzBD;ACiCD;EACE,cAAA;EACA,UAAA;CD/BD;ACuCD;;EAEE,cAAA;CDrCD;AC+CD;EACE,8BAAA;CD7CD;ACqDD;;EAEE,WAAA;CDnDD;AC8DD;EACE,oBAAA;EACA,2BAAA;EACA,0CAAA;EAAA,uCAAA;EAAA,kCAAA;CD5DD;ACmED;;EAEE,kBAAA;CDjED;ACwED;EACE,mBAAA;CDtED;AC8ED;EACE,eAAA;EACA,iBAAA;CD5ED;ACmFD;EACE,iBAAA;EACA,YAAA;CDjFD;ACwFD;EACE,eAAA;CDtFD;AC6FD;;EAEE,eAAA;EACA,eAAA;EACA,mBAAA;EACA,yBAAA;CD3FD;AC8FD;EACE,YAAA;CD5FD;AC+FD;EACE,gBAAA;CD7FD;ACuGD;EACE,UAAA;CDrGD;AC4GD;EACE,iBAAA;CD1GD;ACoHD;EACE,iBAAA;CDlHD;ACyHD;EACE,gCAAA;EAAA,6BAAA;EAAA,wBAAA;EACA,UAAA;CDvHD;AC8HD;EACE,eAAA;CD5HD;ACmID;;;;EAIE,kCAAA;EACA,eAAA;CDjID;ACmJD;;;;;EAKE,eAAA;EACA,cAAA;EACA,UAAA;CDjJD;ACwJD;EACE,kBAAA;CDtJD;ACgKD;;EAEE,qBAAA;CD9JD;ACyKD;;;;EAIE,2BAAA;EACA,gBAAA;CDvKD;AC8KD;;EAEE,gBAAA;CD5KD;ACmLD;;EAEE,UAAA;EACA,WAAA;CDjLD;ACyLD;EACE,oBAAA;CDvLD;ACkMD;;EAEE,+BAAA;EAAA,4BAAA;EAAA,uBAAA;EACA,WAAA;CDhMD;ACyMD;;EAEE,aAAA;CDvMD;AC+MD;EACE,8BAAA;EACA,gCAAA;EAAA,6BAAA;EAAA,wBAAA;CD7MD;ACsND;;EAEE,yBAAA;CDpND;AC2ND;EACE,0BAAA;EACA,cAAA;EACA,+BAAA;CDzND;ACiOD;EACE,UAAA;EACA,WAAA;CD/ND;ACsOD;EACE,eAAA;CDpOD;AC4OD;EACE,kBAAA;CD1OD;ACoPD;EACE,0BAAA;EACA,kBAAA;CDlPD;ACqPD;;EAEE,WAAA;CDnPD;AACD,qFAAqF;AEhLrF;EACE;;;IAGE,uBAAA;IACA,6BAAA;IACA,mCAAA;IACA,oCAAA;IAAA,4BAAA;GFkLD;EE/KD;;IAEE,2BAAA;GFiLD;EE9KD;IACE,6BAAA;GFgLD;EE7KD;IACE,8BAAA;GF+KD;EE1KD;;IAEE,YAAA;GF4KD;EEzKD;;IAEE,uBAAA;IACA,yBAAA;GF2KD;EExKD;IACE,4BAAA;GF0KD;EEvKD;;IAEE,yBAAA;GFyKD;EEtKD;IACE,2BAAA;GFwKD;EErKD;;;IAGE,WAAA;IACA,UAAA;GFuKD;EEpKD;;IAEE,wBAAA;GFsKD;EEhKD;IACE,cAAA;GFkKD;EEhKD;;IAGI,kCAAA;GFiKH;EE9JD;IACE,uBAAA;GFgKD;EE7JD;IACE,qCAAA;GF+JD;EEhKD;;IAKI,kCAAA;GF+JH;EE5JD;;IAGI,kCAAA;GF6JH;CACF;AGnPD;EACE,oCAAA;EACA,sDAAA;EACA,gYAAA;CHqPD;AG7OD;EACE,mBAAA;EACA,SAAA;EACA,sBAAA;EACA,oCAAA;EACA,mBAAA;EACA,iBAAA;EACA,eAAA;EACA,oCAAA;EACA,mCAAA;CH+OD;AG3OmC;EAAW,iBAAA;CH8O9C;AG7OmC;EAAW,iBAAA;CHgP9C;AG9OmC;;EAAW,iBAAA;CHkP9C;AGjPmC;EAAW,iBAAA;CHoP9C;AGnPmC;EAAW,iBAAA;CHsP9C;AGrPmC;EAAW,iBAAA;CHwP9C;AGvPmC;EAAW,iBAAA;CH0P9C;AGzPmC;EAAW,iBAAA;CH4P9C;AG3PmC;EAAW,iBAAA;CH8P9C;AG7PmC;EAAW,iBAAA;CHgQ9C;AG/PmC;EAAW,iBAAA;CHkQ9C;AGjQmC;EAAW,iBAAA;CHoQ9C;AGnQmC;EAAW,iBAAA;CHsQ9C;AGrQmC;EAAW,iBAAA;CHwQ9C;AGvQmC;EAAW,iBAAA;CH0Q9C;AGzQmC;EAAW,iBAAA;CH4Q9C;AG3QmC;EAAW,iBAAA;CH8Q9C;AG7QmC;EAAW,iBAAA;CHgR9C;AG/QmC;EAAW,iBAAA;CHkR9C;AGjRmC;EAAW,iBAAA;CHoR9C;AGnRmC;EAAW,iBAAA;CHsR9C;AGrRmC;EAAW,iBAAA;CHwR9C;AGvRmC;EAAW,iBAAA;CH0R9C;AGzRmC;EAAW,iBAAA;CH4R9C;AG3RmC;EAAW,iBAAA;CH8R9C;AG7RmC;EAAW,iBAAA;CHgS9C;AG/RmC;EAAW,iBAAA;CHkS9C;AGjSmC;EAAW,iBAAA;CHoS9C;AGnSmC;EAAW,iBAAA;CHsS9C;AGrSmC;EAAW,iBAAA;CHwS9C;AGvSmC;EAAW,iBAAA;CH0S9C;AGzSmC;EAAW,iBAAA;CH4S9C;AG3SmC;EAAW,iBAAA;CH8S9C;AG7SmC;EAAW,iBAAA;CHgT9C;AG/SmC;EAAW,iBAAA;CHkT9C;AGjTmC;EAAW,iBAAA;CHoT9C;AGnTmC;EAAW,iBAAA;CHsT9C;AGrTmC;EAAW,iBAAA;CHwT9C;AGvTmC;EAAW,iBAAA;CH0T9C;AGzTmC;EAAW,iBAAA;CH4T9C;AG3TmC;EAAW,iBAAA;CH8T9C;AG7TmC;EAAW,iBAAA;CHgU9C;AG/TmC;EAAW,iBAAA;CHkU9C;AGjUmC;EAAW,iBAAA;CHoU9C;AGnUmC;EAAW,iBAAA;CHsU9C;AGrUmC;EAAW,iBAAA;CHwU9C;AGvUmC;EAAW,iBAAA;CH0U9C;AGzUmC;EAAW,iBAAA;CH4U9C;AG3UmC;EAAW,iBAAA;CH8U9C;AG7UmC;EAAW,iBAAA;CHgV9C;AG/UmC;EAAW,iBAAA;CHkV9C;AGjVmC;EAAW,iBAAA;CHoV9C;AGnVmC;EAAW,iBAAA;CHsV9C;AGrVmC;EAAW,iBAAA;CHwV9C;AGvVmC;EAAW,iBAAA;CH0V9C;AGzVmC;EAAW,iBAAA;CH4V9C;AG3VmC;EAAW,iBAAA;CH8V9C;AG7VmC;EAAW,iBAAA;CHgW9C;AG/VmC;EAAW,iBAAA;CHkW9C;AGjWmC;EAAW,iBAAA;CHoW9C;AGnWmC;EAAW,iBAAA;CHsW9C;AGrWmC;EAAW,iBAAA;CHwW9C;AGvWmC;EAAW,iBAAA;CH0W9C;AGzWmC;EAAW,iBAAA;CH4W9C;AG3WmC;EAAW,iBAAA;CH8W9C;AG7WmC;EAAW,iBAAA;CHgX9C;AG/WmC;EAAW,iBAAA;CHkX9C;AGjXmC;EAAW,iBAAA;CHoX9C;AGnXmC;EAAW,iBAAA;CHsX9C;AGrXmC;EAAW,iBAAA;CHwX9C;AGvXmC;EAAW,iBAAA;CH0X9C;AGzXmC;EAAW,iBAAA;CH4X9C;AG3XmC;EAAW,iBAAA;CH8X9C;AG7XmC;EAAW,iBAAA;CHgY9C;AG/XmC;EAAW,iBAAA;CHkY9C;AGjYmC;EAAW,iBAAA;CHoY9C;AGnYmC;EAAW,iBAAA;CHsY9C;AGrYmC;EAAW,iBAAA;CHwY9C;AGvYmC;EAAW,iBAAA;CH0Y9C;AGzYmC;EAAW,iBAAA;CH4Y9C;AG3YmC;EAAW,iBAAA;CH8Y9C;AG7YmC;EAAW,iBAAA;CHgZ9C;AG/YmC;EAAW,iBAAA;CHkZ9C;AGjZmC;EAAW,iBAAA;CHoZ9C;AGnZmC;EAAW,iBAAA;CHsZ9C;AGrZmC;EAAW,iBAAA;CHwZ9C;AGvZmC;EAAW,iBAAA;CH0Z9C;AGzZmC;EAAW,iBAAA;CH4Z9C;AG3ZmC;EAAW,iBAAA;CH8Z9C;AG7ZmC;EAAW,iBAAA;CHga9C;AG/ZmC;EAAW,iBAAA;CHka9C;AGjamC;EAAW,iBAAA;CHoa9C;AGnamC;EAAW,iBAAA;CHsa9C;AGramC;EAAW,iBAAA;CHwa9C;AGvamC;EAAW,iBAAA;CH0a9C;AGzamC;EAAW,iBAAA;CH4a9C;AG3amC;EAAW,iBAAA;CH8a9C;AG7amC;EAAW,iBAAA;CHgb9C;AG/amC;EAAW,iBAAA;CHkb9C;AGjbmC;EAAW,iBAAA;CHob9C;AGnbmC;EAAW,iBAAA;CHsb9C;AGrbmC;EAAW,iBAAA;CHwb9C;AGvbmC;EAAW,iBAAA;CH0b9C;AGzbmC;EAAW,iBAAA;CH4b9C;AG3bmC;EAAW,iBAAA;CH8b9C;AG7bmC;EAAW,iBAAA;CHgc9C;AG/bmC;EAAW,iBAAA;CHkc9C;AGjcmC;EAAW,iBAAA;CHoc9C;AGncmC;EAAW,iBAAA;CHsc9C;AGrcmC;EAAW,iBAAA;CHwc9C;AGvcmC;EAAW,iBAAA;CH0c9C;AGzcmC;EAAW,iBAAA;CH4c9C;AG3cmC;EAAW,iBAAA;CH8c9C;AG7cmC;EAAW,iBAAA;CHgd9C;AG/cmC;EAAW,iBAAA;CHkd9C;AGjdmC;EAAW,iBAAA;CHod9C;AGndmC;EAAW,iBAAA;CHsd9C;AGrdmC;EAAW,iBAAA;CHwd9C;AGvdmC;EAAW,iBAAA;CH0d9C;AGzdmC;EAAW,iBAAA;CH4d9C;AG3dmC;EAAW,iBAAA;CH8d9C;AG7dmC;EAAW,iBAAA;CHge9C;AG/dmC;EAAW,iBAAA;CHke9C;AGjemC;EAAW,iBAAA;CHoe9C;AGnemC;EAAW,iBAAA;CHse9C;AGremC;EAAW,iBAAA;CHwe9C;AGvemC;EAAW,iBAAA;CH0e9C;AGzemC;EAAW,iBAAA;CH4e9C;AG3emC;EAAW,iBAAA;CH8e9C;AG7emC;EAAW,iBAAA;CHgf9C;AG/emC;EAAW,iBAAA;CHkf9C;AGjfmC;EAAW,iBAAA;CHof9C;AGnfmC;EAAW,iBAAA;CHsf9C;AGrfmC;EAAW,iBAAA;CHwf9C;AGvfmC;EAAW,iBAAA;CH0f9C;AGzfmC;EAAW,iBAAA;CH4f9C;AG3fmC;EAAW,iBAAA;CH8f9C;AG7fmC;EAAW,iBAAA;CHggB9C;AG/fmC;EAAW,iBAAA;CHkgB9C;AGjgBmC;EAAW,iBAAA;CHogB9C;AGngBmC;EAAW,iBAAA;CHsgB9C;AGrgBmC;EAAW,iBAAA;CHwgB9C;AGvgBmC;EAAW,iBAAA;CH0gB9C;AGzgBmC;EAAW,iBAAA;CH4gB9C;AG3gBmC;EAAW,iBAAA;CH8gB9C;AG7gBmC;EAAW,iBAAA;CHghB9C;AG/gBmC;EAAW,iBAAA;CHkhB9C;AGjhBmC;EAAW,iBAAA;CHohB9C;AGnhBmC;EAAW,iBAAA;CHshB9C;AGrhBmC;EAAW,iBAAA;CHwhB9C;AGvhBmC;EAAW,iBAAA;CH0hB9C;AGzhBmC;EAAW,iBAAA;CH4hB9C;AG3hBmC;EAAW,iBAAA;CH8hB9C;AG7hBmC;EAAW,iBAAA;CHgiB9C;AG/hBmC;EAAW,iBAAA;CHkiB9C;AGjiBmC;EAAW,iBAAA;CHoiB9C;AGniBmC;EAAW,iBAAA;CHsiB9C;AGriBmC;EAAW,iBAAA;CHwiB9C;AGviBmC;EAAW,iBAAA;CH0iB9C;AGziBmC;EAAW,iBAAA;CH4iB9C;AG3iBmC;EAAW,iBAAA;CH8iB9C;AG7iBmC;EAAW,iBAAA;CHgjB9C;AG/iBmC;EAAW,iBAAA;CHkjB9C;AGjjBmC;EAAW,iBAAA;CHojB9C;AGnjBmC;EAAW,iBAAA;CHsjB9C;AGrjBmC;EAAW,iBAAA;CHwjB9C;AGvjBmC;EAAW,iBAAA;CH0jB9C;AGzjBmC;EAAW,iBAAA;CH4jB9C;AG3jBmC;EAAW,iBAAA;CH8jB9C;AG7jBmC;EAAW,iBAAA;CHgkB9C;AG/jBmC;EAAW,iBAAA;CHkkB9C;AGjkBmC;EAAW,iBAAA;CHokB9C;AGnkBmC;EAAW,iBAAA;CHskB9C;AGrkBmC;EAAW,iBAAA;CHwkB9C;AGvkBmC;EAAW,iBAAA;CH0kB9C;AGzkBmC;EAAW,iBAAA;CH4kB9C;AG3kBmC;EAAW,iBAAA;CH8kB9C;AG7kBmC;EAAW,iBAAA;CHglB9C;AG/kBmC;EAAW,iBAAA;CHklB9C;AGjlBmC;EAAW,iBAAA;CHolB9C;AGnlBmC;EAAW,iBAAA;CHslB9C;AGrlBmC;EAAW,iBAAA;CHwlB9C;AGvlBmC;EAAW,iBAAA;CH0lB9C;AGzlBmC;EAAW,iBAAA;CH4lB9C;AG3lBmC;EAAW,iBAAA;CH8lB9C;AG7lBmC;EAAW,iBAAA;CHgmB9C;AG/lBmC;EAAW,iBAAA;CHkmB9C;AGjmBmC;EAAW,iBAAA;CHomB9C;AGnmBmC;EAAW,iBAAA;CHsmB9C;AGrmBmC;EAAW,iBAAA;CHwmB9C;AGvmBmC;EAAW,iBAAA;CH0mB9C;AGzmBmC;EAAW,iBAAA;CH4mB9C;AG3mBmC;EAAW,iBAAA;CH8mB9C;AG7mBmC;EAAW,iBAAA;CHgnB9C;AG/mBmC;EAAW,iBAAA;CHknB9C;AGjnBmC;EAAW,iBAAA;CHonB9C;AGnnBmC;EAAW,iBAAA;CHsnB9C;AGrnBmC;EAAW,iBAAA;CHwnB9C;AGvnBmC;EAAW,iBAAA;CH0nB9C;AGznBmC;EAAW,iBAAA;CH4nB9C;AG3nBmC;EAAW,iBAAA;CH8nB9C;AG7nBmC;EAAW,iBAAA;CHgoB9C;AG/nBmC;EAAW,iBAAA;CHkoB9C;AGjoBmC;EAAW,iBAAA;CHooB9C;AGnoBmC;EAAW,iBAAA;CHsoB9C;AGroBmC;EAAW,iBAAA;CHwoB9C;AG/nBmC;EAAW,iBAAA;CHkoB9C;AGjoBmC;EAAW,iBAAA;CHooB9C;AGnoBmC;EAAW,iBAAA;CHsoB9C;AGroBmC;EAAW,iBAAA;CHwoB9C;AGvoBmC;EAAW,iBAAA;CH0oB9C;AGzoBmC;EAAW,iBAAA;CH4oB9C;AG3oBmC;EAAW,iBAAA;CH8oB9C;AG7oBmC;EAAW,iBAAA;CHgpB9C;AG/oBmC;EAAW,iBAAA;CHkpB9C;AGjpBmC;EAAW,iBAAA;CHopB9C;AGnpBmC;EAAW,iBAAA;CHspB9C;AGrpBmC;EAAW,iBAAA;CHwpB9C;AGvpBmC;EAAW,iBAAA;CH0pB9C;AGzpBmC;EAAW,iBAAA;CH4pB9C;AG3pBmC;EAAW,iBAAA;CH8pB9C;AG7pBmC;EAAW,iBAAA;CHgqB9C;AG/pBmC;EAAW,iBAAA;CHkqB9C;AGjqBmC;EAAW,iBAAA;CHoqB9C;AGnqBmC;EAAW,iBAAA;CHsqB9C;AGrqBmC;EAAW,iBAAA;CHwqB9C;AGvqBmC;EAAW,iBAAA;CH0qB9C;AGzqBmC;EAAW,iBAAA;CH4qB9C;AG3qBmC;EAAW,iBAAA;CH8qB9C;AG7qBmC;EAAW,iBAAA;CHgrB9C;AG/qBmC;EAAW,iBAAA;CHkrB9C;AGjrBmC;EAAW,iBAAA;CHorB9C;AGnrBmC;EAAW,iBAAA;CHsrB9C;AGrrBmC;EAAW,iBAAA;CHwrB9C;AGvrBmC;EAAW,iBAAA;CH0rB9C;AGzrBmC;EAAW,iBAAA;CH4rB9C;AG3rBmC;EAAW,iBAAA;CH8rB9C;AG7rBmC;EAAW,iBAAA;CHgsB9C;AG/rBmC;EAAW,iBAAA;CHksB9C;AGjsBmC;EAAW,iBAAA;CHosB9C;AGnsBmC;EAAW,iBAAA;CHssB9C;AGrsBmC;EAAW,iBAAA;CHwsB9C;AGvsBmC;EAAW,iBAAA;CH0sB9C;AGzsBmC;EAAW,iBAAA;CH4sB9C;AG3sBmC;EAAW,iBAAA;CH8sB9C;AG7sBmC;EAAW,iBAAA;CHgtB9C;AG/sBmC;EAAW,iBAAA;CHktB9C;AGjtBmC;EAAW,iBAAA;CHotB9C;AGntBmC;EAAW,iBAAA;CHstB9C;AGrtBmC;EAAW,iBAAA;CHwtB9C;AGvtBmC;EAAW,iBAAA;CH0tB9C;AGztBmC;EAAW,iBAAA;CH4tB9C;AG3tBmC;EAAW,iBAAA;CH8tB9C;AG7tBmC;EAAW,iBAAA;CHguB9C;AG/tBmC;EAAW,iBAAA;CHkuB9C;AGjuBmC;EAAW,iBAAA;CHouB9C;AGnuBmC;EAAW,iBAAA;CHsuB9C;AGruBmC;EAAW,iBAAA;CHwuB9C;AGvuBmC;EAAW,iBAAA;CH0uB9C;AGzuBmC;EAAW,iBAAA;CH4uB9C;AG3uBmC;EAAW,iBAAA;CH8uB9C;AG7uBmC;EAAW,iBAAA;CHgvB9C;AIxhCD;ECkEE,+BAAA;EACG,4BAAA;EACK,uBAAA;CLy9BT;AI1hCD;;EC+DE,+BAAA;EACG,4BAAA;EACK,uBAAA;CL+9BT;AIxhCD;EACE,gBAAA;EACA,8CAAA;CJ0hCD;AIvhCD;EACE,4DAAA;EACA,gBAAA;EACA,wBAAA;EACA,eAAA;EACA,uBAAA;CJyhCD;AIrhCD;;;;EAIE,qBAAA;EACA,mBAAA;EACA,qBAAA;CJuhCD;AIjhCD;EACE,eAAA;EACA,sBAAA;CJmhCD;AIjhCC;;EAEE,eAAA;EACA,2BAAA;CJmhCH;AIhhCC;EEnDA,2CAAA;EACA,qBAAA;CNskCD;AIzgCD;EACE,UAAA;CJ2gCD;AIrgCD;EACE,uBAAA;CJugCD;AIngCD;;;;;EG1EE,eAAA;EACA,gBAAA;EACA,aAAA;CPolCD;AIvgCD;EACE,mBAAA;CJygCD;AIngCD;EACE,aAAA;EACA,wBAAA;EACA,uBAAA;EACA,uBAAA;EACA,mBAAA;EC+FA,yCAAA;EACK,oCAAA;EACG,iCAAA;EE5LR,sBAAA;EACA,gBAAA;EACA,aAAA;CPomCD;AIngCD;EACE,mBAAA;CJqgCD;AI//BD;EACE,iBAAA;EACA,oBAAA;EACA,UAAA;EACA,8BAAA;CJigCD;AIz/BD;EACE,mBAAA;EACA,WAAA;EACA,YAAA;EACA,WAAA;EACA,aAAA;EACA,iBAAA;EACA,uBAAA;EACA,UAAA;CJ2/BD;AIn/BC;;EAEE,iBAAA;EACA,YAAA;EACA,aAAA;EACA,UAAA;EACA,kBAAA;EACA,WAAA;CJq/BH;AI1+BD;EACE,gBAAA;CJ4+BD;AQjoCD;;;;;;;;;;;;EAEE,qBAAA;EACA,iBAAA;EACA,iBAAA;EACA,eAAA;CR6oCD;AQlpCD;;;;;;;;;;;;;;;;;;;;;;;;EASI,iBAAA;EACA,eAAA;EACA,eAAA;CRmqCH;AQ/pCD;;;;;;EAGE,iBAAA;EACA,oBAAA;CRoqCD;AQxqCD;;;;;;;;;;;;EAQI,eAAA;CR8qCH;AQ3qCD;;;;;;EAGE,iBAAA;EACA,oBAAA;CRgrCD;AQprCD;;;;;;;;;;;;EAQI,eAAA;CR0rCH;AQtrCD;;EAAU,gBAAA;CR0rCT;AQzrCD;;EAAU,gBAAA;CR6rCT;AQ5rCD;;EAAU,gBAAA;CRgsCT;AQ/rCD;;EAAU,gBAAA;CRmsCT;AQlsCD;;EAAU,gBAAA;CRssCT;AQrsCD;;EAAU,gBAAA;CRysCT;AQnsCD;EACE,iBAAA;CRqsCD;AQlsCD;EACE,oBAAA;EACA,gBAAA;EACA,iBAAA;EACA,iBAAA;CRosCD;AQlsCC;EAAA;IACE,gBAAA;GRqsCD;CACF;AQ7rCD;;EAEE,eAAA;CR+rCD;AQ5rCD;;EAEE,eAAA;EACA,0BAAA;CR8rCD;AQ1rCD;EAAuB,iBAAA;CR6rCtB;AQ5rCD;EAAuB,kBAAA;CR+rCtB;AQ9rCD;EAAuB,mBAAA;CRisCtB;AQhsCD;EAAuB,oBAAA;CRmsCtB;AQlsCD;EAAuB,oBAAA;CRqsCtB;AQlsCD;EAAuB,0BAAA;CRqsCtB;AQpsCD;EAAuB,0BAAA;CRusCtB;AQtsCD;EAAuB,2BAAA;CRysCtB;AQtsCD;EACE,eAAA;CRwsCD;AQtsCD;ECvGE,eAAA;CTgzCD;AS/yCC;;EAEE,eAAA;CTizCH;AQ1sCD;EC1GE,eAAA;CTuzCD;AStzCC;;EAEE,eAAA;CTwzCH;AQ9sCD;EC7GE,eAAA;CT8zCD;AS7zCC;;EAEE,eAAA;CT+zCH;AQltCD;EChHE,eAAA;CTq0CD;ASp0CC;;EAEE,eAAA;CTs0CH;AQttCD;ECnHE,eAAA;CT40CD;AS30CC;;EAEE,eAAA;CT60CH;AQttCD;EAGE,YAAA;EE7HA,0BAAA;CVo1CD;AUn1CC;;EAEE,0BAAA;CVq1CH;AQxtCD;EEhIE,0BAAA;CV21CD;AU11CC;;EAEE,0BAAA;CV41CH;AQ5tCD;EEnIE,0BAAA;CVk2CD;AUj2CC;;EAEE,0BAAA;CVm2CH;AQhuCD;EEtIE,0BAAA;CVy2CD;AUx2CC;;EAEE,0BAAA;CV02CH;AQpuCD;EEzIE,0BAAA;CVg3CD;AU/2CC;;EAEE,0BAAA;CVi3CH;AQnuCD;EACE,oBAAA;EACA,oBAAA;EACA,iCAAA;CRquCD;AQ7tCD;;EAEE,cAAA;EACA,oBAAA;CR+tCD;AQluCD;;;;EAMI,iBAAA;CRkuCH;AQ3tCD;EACE,gBAAA;EACA,iBAAA;CR6tCD;AQztCD;EALE,gBAAA;EACA,iBAAA;EAMA,kBAAA;CR4tCD;AQ9tCD;EAKI,sBAAA;EACA,mBAAA;EACA,kBAAA;CR4tCH;AQvtCD;EACE,cAAA;EACA,oBAAA;CRytCD;AQvtCD;;EAEE,wBAAA;CRytCD;AQvtCD;EACE,iBAAA;CRytCD;AQvtCD;EACE,eAAA;CRytCD;AQ5sCC;EAAA;IAEI,YAAA;IACA,aAAA;IACA,YAAA;IACA,kBAAA;IGxNJ,iBAAA;IACA,wBAAA;IACA,oBAAA;GXu6CC;EQttCD;IASI,mBAAA;GRgtCH;CACF;AQtsCD;;EAEE,aAAA;CRwsCD;AQrsCD;EACE,eAAA;EA9IqB,0BAAA;CRs1CtB;AQnsCD;EACE,mBAAA;EACA,iBAAA;EACA,kBAAA;EACA,+BAAA;CRqsCD;AQhsCG;;;EACE,iBAAA;CRosCL;AQ9sCD;;;EAmBI,eAAA;EACA,eAAA;EACA,wBAAA;EACA,eAAA;CRgsCH;AQ9rCG;;;EACE,uBAAA;CRksCL;AQ1rCD;;EAEE,oBAAA;EACA,gBAAA;EACA,kBAAA;EACA,gCAAA;EACA,eAAA;CR4rCD;AQtrCG;;;;;;EAAW,YAAA;CR8rCd;AQ7rCG;;;;;;EACE,uBAAA;CRosCL;AQ9rCD;EACE,oBAAA;EACA,mBAAA;EACA,wBAAA;CRgsCD;AYx+CD;;;;EAIE,+DAAA;CZ0+CD;AYt+CD;EACE,iBAAA;EACA,eAAA;EACA,eAAA;EACA,0BAAA;EACA,mBAAA;CZw+CD;AYp+CD;EACE,iBAAA;EACA,eAAA;EACA,YAAA;EACA,uBAAA;EACA,mBAAA;EACA,uDAAA;EAAA,+CAAA;CZs+CD;AY5+CD;EASI,WAAA;EACA,gBAAA;EACA,iBAAA;EACA,yBAAA;EAAA,iBAAA;CZs+CH;AYj+CD;EACE,eAAA;EACA,eAAA;EACA,iBAAA;EACA,gBAAA;EACA,wBAAA;EACA,eAAA;EACA,sBAAA;EACA,sBAAA;EACA,0BAAA;EACA,uBAAA;EACA,mBAAA;CZm+CD;AY9+CD;EAeI,WAAA;EACA,mBAAA;EACA,eAAA;EACA,sBAAA;EACA,8BAAA;EACA,iBAAA;CZk+CH;AY79CD;EACE,kBAAA;EACA,mBAAA;CZ+9CD;AazhDD;ECHE,oBAAA;EACA,mBAAA;EACA,mBAAA;EACA,kBAAA;Cd+hDD;Aa5hDC;EAAA;IACE,aAAA;Gb+hDD;CACF;Aa9hDC;EAAA;IACE,aAAA;GbiiDD;CACF;AahiDC;EAAA;IACE,cAAA;GbmiDD;CACF;Aa1hDD;ECvBE,oBAAA;EACA,mBAAA;EACA,mBAAA;EACA,kBAAA;CdojDD;AavhDD;ECvBE,oBAAA;EACA,mBAAA;CdijDD;AavhDD;EACE,gBAAA;EACA,eAAA;CbyhDD;Aa3hDD;EAKI,iBAAA;EACA,gBAAA;CbyhDH;AczkDA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;ECiBK,mBAAA;EAEA,gBAAA;EAEA,oBAAA;EACA,mBAAA;CfwmDL;Ac9nDA;;;;;;;;;;;;ECuCK,YAAA;CfqmDL;Ac5oDA;EC+CG,YAAA;CfgmDH;Ac/oDA;EC+CG,oBAAA;CfmmDH;AclpDA;EC+CG,oBAAA;CfsmDH;AcrpDA;EC+CG,WAAA;CfymDH;AcxpDA;EC+CG,oBAAA;Cf4mDH;Ac3pDA;EC+CG,oBAAA;Cf+mDH;Ac9pDA;EC+CG,WAAA;CfknDH;AcjqDA;EC+CG,oBAAA;CfqnDH;AcpqDA;EC+CG,oBAAA;CfwnDH;AcvqDA;EC+CG,WAAA;Cf2nDH;Ac1qDA;EC+CG,oBAAA;Cf8nDH;Ac7qDA;EC+CG,mBAAA;CfioDH;AchrDA;EC8DG,YAAA;CfqnDH;AcnrDA;EC8DG,oBAAA;CfwnDH;ActrDA;EC8DG,oBAAA;Cf2nDH;AczrDA;EC8DG,WAAA;Cf8nDH;Ac5rDA;EC8DG,oBAAA;CfioDH;Ac/rDA;EC8DG,oBAAA;CfooDH;AclsDA;EC8DG,WAAA;CfuoDH;AcrsDA;EC8DG,oBAAA;Cf0oDH;AcxsDA;EC8DG,oBAAA;Cf6oDH;Ac3sDA;EC8DG,WAAA;CfgpDH;Ac9sDA;EC8DG,oBAAA;CfmpDH;AcjtDA;EC8DG,mBAAA;CfspDH;AcptDA;ECmEG,YAAA;CfopDH;AcvtDA;ECoDG,WAAA;CfsqDH;Ac1tDA;ECoDG,mBAAA;CfyqDH;Ac7tDA;ECoDG,mBAAA;Cf4qDH;AchuDA;ECoDG,UAAA;Cf+qDH;AcnuDA;ECoDG,mBAAA;CfkrDH;ActuDA;ECoDG,mBAAA;CfqrDH;AczuDA;ECoDG,UAAA;CfwrDH;Ac5uDA;ECoDG,mBAAA;Cf2rDH;Ac/uDA;ECoDG,mBAAA;Cf8rDH;AclvDA;ECoDG,UAAA;CfisDH;AcrvDA;ECoDG,mBAAA;CfosDH;AcxvDA;ECoDG,kBAAA;CfusDH;Ac3vDA;ECyDG,WAAA;CfqsDH;Ac9vDA;ECwEG,kBAAA;CfyrDH;AcjwDA;ECwEG,0BAAA;Cf4rDH;AcpwDA;ECwEG,0BAAA;Cf+rDH;AcvwDA;ECwEG,iBAAA;CfksDH;Ac1wDA;ECwEG,0BAAA;CfqsDH;Ac7wDA;ECwEG,0BAAA;CfwsDH;AchxDA;ECwEG,iBAAA;Cf2sDH;AcnxDA;ECwEG,0BAAA;Cf8sDH;ActxDA;ECwEG,0BAAA;CfitDH;AczxDA;ECwEG,iBAAA;CfotDH;Ac5xDA;ECwEG,0BAAA;CfutDH;Ac/xDA;ECwEG,yBAAA;Cf0tDH;AclyDA;ECwEG,gBAAA;Cf6tDH;Aa5tDD;ECzEC;;;;;;;;;;;;ICuCK,YAAA;Gf6wDH;EcpzDF;IC+CG,YAAA;GfwwDD;EcvzDF;IC+CG,oBAAA;Gf2wDD;Ec1zDF;IC+CG,oBAAA;Gf8wDD;Ec7zDF;IC+CG,WAAA;GfixDD;Ech0DF;IC+CG,oBAAA;GfoxDD;Ecn0DF;IC+CG,oBAAA;GfuxDD;Ect0DF;IC+CG,WAAA;Gf0xDD;Ecz0DF;IC+CG,oBAAA;Gf6xDD;Ec50DF;IC+CG,oBAAA;GfgyDD;Ec/0DF;IC+CG,WAAA;GfmyDD;Ecl1DF;IC+CG,oBAAA;GfsyDD;Ecr1DF;IC+CG,mBAAA;GfyyDD;Ecx1DF;IC8DG,YAAA;Gf6xDD;Ec31DF;IC8DG,oBAAA;GfgyDD;Ec91DF;IC8DG,oBAAA;GfmyDD;Ecj2DF;IC8DG,WAAA;GfsyDD;Ecp2DF;IC8DG,oBAAA;GfyyDD;Ecv2DF;IC8DG,oBAAA;Gf4yDD;Ec12DF;IC8DG,WAAA;Gf+yDD;Ec72DF;IC8DG,oBAAA;GfkzDD;Ech3DF;IC8DG,oBAAA;GfqzDD;Ecn3DF;IC8DG,WAAA;GfwzDD;Ect3DF;IC8DG,oBAAA;Gf2zDD;Ecz3DF;IC8DG,mBAAA;Gf8zDD;Ec53DF;ICmEG,YAAA;Gf4zDD;Ec/3DF;ICoDG,WAAA;Gf80DD;Ecl4DF;ICoDG,mBAAA;Gfi1DD;Ecr4DF;ICoDG,mBAAA;Gfo1DD;Ecx4DF;ICoDG,UAAA;Gfu1DD;Ec34DF;ICoDG,mBAAA;Gf01DD;Ec94DF;ICoDG,mBAAA;Gf61DD;Ecj5DF;ICoDG,UAAA;Gfg2DD;Ecp5DF;ICoDG,mBAAA;Gfm2DD;Ecv5DF;ICoDG,mBAAA;Gfs2DD;Ec15DF;ICoDG,UAAA;Gfy2DD;Ec75DF;ICoDG,mBAAA;Gf42DD;Ech6DF;ICoDG,kBAAA;Gf+2DD;Ecn6DF;ICyDG,WAAA;Gf62DD;Ect6DF;ICwEG,kBAAA;Gfi2DD;Ecz6DF;ICwEG,0BAAA;Gfo2DD;Ec56DF;ICwEG,0BAAA;Gfu2DD;Ec/6DF;ICwEG,iBAAA;Gf02DD;Ecl7DF;ICwEG,0BAAA;Gf62DD;Ecr7DF;ICwEG,0BAAA;Gfg3DD;Ecx7DF;ICwEG,iBAAA;Gfm3DD;Ec37DF;ICwEG,0BAAA;Gfs3DD;Ec97DF;ICwEG,0BAAA;Gfy3DD;Ecj8DF;ICwEG,iBAAA;Gf43DD;Ecp8DF;ICwEG,0BAAA;Gf+3DD;Ecv8DF;ICwEG,yBAAA;Gfk4DD;Ec18DF;ICwEG,gBAAA;Gfq4DD;CACF;Aa53DD;EClFC;;;;;;;;;;;;ICuCK,YAAA;Gfs7DH;Ec79DF;IC+CG,YAAA;Gfi7DD;Ech+DF;IC+CG,oBAAA;Gfo7DD;Ecn+DF;IC+CG,oBAAA;Gfu7DD;Ect+DF;IC+CG,WAAA;Gf07DD;Ecz+DF;IC+CG,oBAAA;Gf67DD;Ec5+DF;IC+CG,oBAAA;Gfg8DD;Ec/+DF;IC+CG,WAAA;Gfm8DD;Ecl/DF;IC+CG,oBAAA;Gfs8DD;Ecr/DF;IC+CG,oBAAA;Gfy8DD;Ecx/DF;IC+CG,WAAA;Gf48DD;Ec3/DF;IC+CG,oBAAA;Gf+8DD;Ec9/DF;IC+CG,mBAAA;Gfk9DD;EcjgEF;IC8DG,YAAA;Gfs8DD;EcpgEF;IC8DG,oBAAA;Gfy8DD;EcvgEF;IC8DG,oBAAA;Gf48DD;Ec1gEF;IC8DG,WAAA;Gf+8DD;Ec7gEF;IC8DG,oBAAA;Gfk9DD;EchhEF;IC8DG,oBAAA;Gfq9DD;EcnhEF;IC8DG,WAAA;Gfw9DD;EcthEF;IC8DG,oBAAA;Gf29DD;EczhEF;IC8DG,oBAAA;Gf89DD;Ec5hEF;IC8DG,WAAA;Gfi+DD;Ec/hEF;IC8DG,oBAAA;Gfo+DD;EcliEF;IC8DG,mBAAA;Gfu+DD;EcriEF;ICmEG,YAAA;Gfq+DD;EcxiEF;ICoDG,WAAA;Gfu/DD;Ec3iEF;ICoDG,mBAAA;Gf0/DD;Ec9iEF;ICoDG,mBAAA;Gf6/DD;EcjjEF;ICoDG,UAAA;GfggED;EcpjEF;ICoDG,mBAAA;GfmgED;EcvjEF;ICoDG,mBAAA;GfsgED;Ec1jEF;ICoDG,UAAA;GfygED;Ec7jEF;ICoDG,mBAAA;Gf4gED;EchkEF;ICoDG,mBAAA;Gf+gED;EcnkEF;ICoDG,UAAA;GfkhED;EctkEF;ICoDG,mBAAA;GfqhED;EczkEF;ICoDG,kBAAA;GfwhED;Ec5kEF;ICyDG,WAAA;GfshED;Ec/kEF;ICwEG,kBAAA;Gf0gED;EcllEF;ICwEG,0BAAA;Gf6gED;EcrlEF;ICwEG,0BAAA;GfghED;EcxlEF;ICwEG,iBAAA;GfmhED;Ec3lEF;ICwEG,0BAAA;GfshED;Ec9lEF;ICwEG,0BAAA;GfyhED;EcjmEF;ICwEG,iBAAA;Gf4hED;EcpmEF;ICwEG,0BAAA;Gf+hED;EcvmEF;ICwEG,0BAAA;GfkiED;Ec1mEF;ICwEG,iBAAA;GfqiED;Ec7mEF;ICwEG,0BAAA;GfwiED;EchnEF;ICwEG,yBAAA;Gf2iED;EcnnEF;ICwEG,gBAAA;Gf8iED;CACF;Aa5hED;EC3FC;;;;;;;;;;;;ICuCK,YAAA;Gf+lEH;EctoEF;IC+CG,YAAA;Gf0lED;EczoEF;IC+CG,oBAAA;Gf6lED;Ec5oEF;IC+CG,oBAAA;GfgmED;Ec/oEF;IC+CG,WAAA;GfmmED;EclpEF;IC+CG,oBAAA;GfsmED;EcrpEF;IC+CG,oBAAA;GfymED;EcxpEF;IC+CG,WAAA;Gf4mED;Ec3pEF;IC+CG,oBAAA;Gf+mED;Ec9pEF;IC+CG,oBAAA;GfknED;EcjqEF;IC+CG,WAAA;GfqnED;EcpqEF;IC+CG,oBAAA;GfwnED;EcvqEF;IC+CG,mBAAA;Gf2nED;Ec1qEF;IC8DG,YAAA;Gf+mED;Ec7qEF;IC8DG,oBAAA;GfknED;EchrEF;IC8DG,oBAAA;GfqnED;EcnrEF;IC8DG,WAAA;GfwnED;EctrEF;IC8DG,oBAAA;Gf2nED;EczrEF;IC8DG,oBAAA;Gf8nED;Ec5rEF;IC8DG,WAAA;GfioED;Ec/rEF;IC8DG,oBAAA;GfooED;EclsEF;IC8DG,oBAAA;GfuoED;EcrsEF;IC8DG,WAAA;Gf0oED;EcxsEF;IC8DG,oBAAA;Gf6oED;Ec3sEF;IC8DG,mBAAA;GfgpED;Ec9sEF;ICmEG,YAAA;Gf8oED;EcjtEF;ICoDG,WAAA;GfgqED;EcptEF;ICoDG,mBAAA;GfmqED;EcvtEF;ICoDG,mBAAA;GfsqED;Ec1tEF;ICoDG,UAAA;GfyqED;Ec7tEF;ICoDG,mBAAA;Gf4qED;EchuEF;ICoDG,mBAAA;Gf+qED;EcnuEF;ICoDG,UAAA;GfkrED;EctuEF;ICoDG,mBAAA;GfqrED;EczuEF;ICoDG,mBAAA;GfwrED;Ec5uEF;ICoDG,UAAA;Gf2rED;Ec/uEF;ICoDG,mBAAA;Gf8rED;EclvEF;ICoDG,kBAAA;GfisED;EcrvEF;ICyDG,WAAA;Gf+rED;EcxvEF;ICwEG,kBAAA;GfmrED;Ec3vEF;ICwEG,0BAAA;GfsrED;Ec9vEF;ICwEG,0BAAA;GfyrED;EcjwEF;ICwEG,iBAAA;Gf4rED;EcpwEF;ICwEG,0BAAA;Gf+rED;EcvwEF;ICwEG,0BAAA;GfksED;Ec1wEF;ICwEG,iBAAA;GfqsED;Ec7wEF;ICwEG,0BAAA;GfwsED;EchxEF;ICwEG,0BAAA;Gf2sED;EcnxEF;ICwEG,iBAAA;Gf8sED;EctxEF;ICwEG,0BAAA;GfitED;EczxEF;ICwEG,yBAAA;GfotED;Ec5xEF;ICwEG,gBAAA;GfutED;CACF;AgBzxED;EACE,8BAAA;ChB2xED;AgB5xED;EAQI,iBAAA;EACA,sBAAA;EACA,YAAA;ChBuxEH;AgBlxEG;;EACE,iBAAA;EACA,oBAAA;EACA,YAAA;ChBqxEL;AgBhxED;EACE,iBAAA;EACA,oBAAA;EACA,eAAA;EACA,iBAAA;ChBkxED;AgB/wED;EACE,iBAAA;ChBixED;AgB3wED;EACE,YAAA;EACA,gBAAA;EACA,oBAAA;ChB6wED;AgBhxED;;;;;;EAWQ,aAAA;EACA,wBAAA;EACA,oBAAA;EACA,2BAAA;ChB6wEP;AgB3xED;EAoBI,uBAAA;EACA,8BAAA;ChB0wEH;AgB/xED;;;;;;EA8BQ,cAAA;ChBywEP;AgBvyED;EAoCI,2BAAA;ChBswEH;AgB1yED;EAyCI,uBAAA;ChBowEH;AgB7vED;;;;;;EAOQ,aAAA;ChB8vEP;AgBnvED;EACE,uBAAA;ChBqvED;AgBtvED;;;;;;EAQQ,uBAAA;ChBsvEP;AgB9vED;;EAeM,yBAAA;ChBmvEL;AgBzuED;EAEI,0BAAA;ChB0uEH;AgBjuED;EAEI,0BAAA;ChBkuEH;AiBj3EC;;;;;;;;;;;;EAOI,0BAAA;CjBw3EL;AiBl3EC;;;;;EAMI,0BAAA;CjBm3EL;AiBt4EC;;;;;;;;;;;;EAOI,0BAAA;CjB64EL;AiBv4EC;;;;;EAMI,0BAAA;CjBw4EL;AiB35EC;;;;;;;;;;;;EAOI,0BAAA;CjBk6EL;AiB55EC;;;;;EAMI,0BAAA;CjB65EL;AiBh7EC;;;;;;;;;;;;EAOI,0BAAA;CjBu7EL;AiBj7EC;;;;;EAMI,0BAAA;CjBk7EL;AiBr8EC;;;;;;;;;;;;EAOI,0BAAA;CjB48EL;AiBt8EC;;;;;EAMI,0BAAA;CjBu8EL;AgBnzED;EACE,kBAAA;EACA,iBAAA;ChBqzED;AgBnzEC;EAAA;IACE,YAAA;IACA,oBAAA;IACA,mBAAA;IACA,6CAAA;IACA,uBAAA;GhBszED;EgB3zED;IASI,iBAAA;GhBqzEH;EgB9zED;;;;;;IAkBU,oBAAA;GhBozET;EgBt0ED;IA0BI,UAAA;GhB+yEH;EgBz0ED;;;;;;IAmCU,eAAA;GhB8yET;EgBj1ED;;;;;;IAuCU,gBAAA;GhBkzET;EgBz1ED;;;;IAoDU,iBAAA;GhB2yET;CACF;AkBrgFD;EAIE,aAAA;EACA,WAAA;EACA,UAAA;EACA,UAAA;ClBogFD;AkBjgFD;EACE,eAAA;EACA,YAAA;EACA,WAAA;EACA,oBAAA;EACA,gBAAA;EACA,qBAAA;EACA,eAAA;EACA,UAAA;EACA,iCAAA;ClBmgFD;AkBhgFD;EACE,sBAAA;EACA,gBAAA;EACA,mBAAA;EACA,iBAAA;ClBkgFD;AkBx/ED;Eb6BE,+BAAA;EACG,4BAAA;EACK,uBAAA;EarBR,yBAAA;EACA,sBAAA;EAAA,iBAAA;ClBo/ED;AkBh/ED;;EAEE,gBAAA;EACA,mBAAA;EACA,oBAAA;ClBk/ED;AkB5+EC;;;;;;EAGE,oBAAA;ClBi/EH;AkB7+ED;EACE,eAAA;ClB++ED;AkB3+ED;EACE,eAAA;EACA,YAAA;ClB6+ED;AkBz+ED;;EAEE,aAAA;ClB2+ED;AkBv+ED;;;EZ1FE,2CAAA;EACA,qBAAA;CNskFD;AkBt+ED;EACE,eAAA;EACA,iBAAA;EACA,gBAAA;EACA,wBAAA;EACA,eAAA;ClBw+ED;AkB98ED;EACE,eAAA;EACA,YAAA;EACA,aAAA;EACA,kBAAA;EACA,gBAAA;EACA,wBAAA;EACA,eAAA;EACA,uBAAA;EACA,uBAAA;EACA,uBAAA;EACA,mBAAA;Eb3EA,yDAAA;EACQ,iDAAA;EAyHR,+EAAA;EACK,0EAAA;EACG,uFAAA;EAAA,+EAAA;EAAA,uEAAA;EAAA,4GAAA;CLo6ET;AmB9iFC;EACE,sBAAA;EACA,WAAA;EdYF,0FAAA;EACQ,kFAAA;CLqiFT;AKpgFC;EACE,YAAA;EACA,WAAA;CLsgFH;AKpgFC;EAA0B,YAAA;CLugF3B;AKtgFC;EAAgC,YAAA;CLygFjC;AkB19EC;EACE,8BAAA;EACA,UAAA;ClB49EH;AkBp9EC;;;EAGE,0BAAA;EACA,WAAA;ClBs9EH;AkBn9EC;;EAEE,oBAAA;ClBq9EH;AkBj9EC;EACE,aAAA;ClBm9EH;AkBr8ED;EAKI;;;;IACE,kBAAA;GlBs8EH;EkBn8EC;;;;;;;;IAEE,kBAAA;GlB28EH;EkBx8EC;;;;;;;;IAEE,kBAAA;GlBg9EH;CACF;AkBt8ED;EACE,oBAAA;ClBw8ED;AkBh8ED;;EAEE,mBAAA;EACA,eAAA;EACA,iBAAA;EACA,oBAAA;ClBk8ED;AkB/7EC;;;;EAGI,oBAAA;ClBk8EL;AkB78ED;;EAgBI,iBAAA;EACA,mBAAA;EACA,iBAAA;EACA,iBAAA;EACA,gBAAA;ClBi8EH;AkB97ED;;;;EAIE,mBAAA;EACA,mBAAA;EACA,mBAAA;ClBg8ED;AkB77ED;;EAEE,iBAAA;ClB+7ED;AkB37ED;;EAEE,mBAAA;EACA,sBAAA;EACA,mBAAA;EACA,iBAAA;EACA,iBAAA;EACA,uBAAA;EACA,gBAAA;ClB67ED;AkB17EC;;;;EAEE,oBAAA;ClB87EH;AkB37ED;;EAEE,cAAA;EACA,kBAAA;ClB67ED;AkBp7ED;EACE,iBAAA;EAEA,iBAAA;EACA,oBAAA;EAEA,iBAAA;ClBo7ED;AkBl7EC;;EAEE,iBAAA;EACA,gBAAA;ClBo7EH;AkBv6ED;EC3PE,aAAA;EACA,kBAAA;EACA,gBAAA;EACA,iBAAA;EACA,mBAAA;CnBqqFD;AmBnqFC;EACE,aAAA;EACA,kBAAA;CnBqqFH;AmBlqFC;;EAEE,aAAA;CnBoqFH;AkBn7ED;EAEI,aAAA;EACA,kBAAA;EACA,gBAAA;EACA,iBAAA;EACA,mBAAA;ClBo7EH;AkB17ED;EASI,aAAA;EACA,kBAAA;ClBo7EH;AkB97ED;;EAcI,aAAA;ClBo7EH;AkBl8ED;EAiBI,aAAA;EACA,iBAAA;EACA,kBAAA;EACA,gBAAA;EACA,iBAAA;ClBo7EH;AkBh7ED;ECvRE,aAAA;EACA,mBAAA;EACA,gBAAA;EACA,uBAAA;EACA,mBAAA;CnB0sFD;AmBxsFC;EACE,aAAA;EACA,kBAAA;CnB0sFH;AmBvsFC;;EAEE,aAAA;CnBysFH;AkB57ED;EAEI,aAAA;EACA,mBAAA;EACA,gBAAA;EACA,uBAAA;EACA,mBAAA;ClB67EH;AkBn8ED;EASI,aAAA;EACA,kBAAA;ClB67EH;AkBv8ED;;EAcI,aAAA;ClB67EH;AkB38ED;EAiBI,aAAA;EACA,iBAAA;EACA,mBAAA;EACA,gBAAA;EACA,uBAAA;ClB67EH;AkBp7ED;EAEE,mBAAA;ClBq7ED;AkBv7ED;EAMI,sBAAA;ClBo7EH;AkBh7ED;EACE,mBAAA;EACA,OAAA;EACA,SAAA;EACA,WAAA;EACA,eAAA;EACA,YAAA;EACA,aAAA;EACA,kBAAA;EACA,mBAAA;EACA,qBAAA;ClBk7ED;AkBh7ED;;;EAGE,YAAA;EACA,aAAA;EACA,kBAAA;ClBk7ED;AkBh7ED;;;EAGE,YAAA;EACA,aAAA;EACA,kBAAA;ClBk7ED;AkB96ED;;;;;;;;;;EClZI,eAAA;CnB40FH;AkB17ED;EC9YI,sBAAA;EdiDF,yDAAA;EACQ,iDAAA;CL2xFT;AmB30FG;EACE,sBAAA;Ed8CJ,0EAAA;EACQ,kEAAA;CLgyFT;AkBp8ED;ECpYI,eAAA;EACA,0BAAA;EACA,sBAAA;CnB20FH;AkBz8ED;EC9XI,eAAA;CnB00FH;AkBz8ED;;;;;;;;;;ECrZI,eAAA;CnB02FH;AkBr9ED;ECjZI,sBAAA;EdiDF,yDAAA;EACQ,iDAAA;CLyzFT;AmBz2FG;EACE,sBAAA;Ed8CJ,0EAAA;EACQ,kEAAA;CL8zFT;AkB/9ED;ECvYI,eAAA;EACA,0BAAA;EACA,sBAAA;CnBy2FH;AkBp+ED;ECjYI,eAAA;CnBw2FH;AkBp+ED;;;;;;;;;;ECxZI,eAAA;CnBw4FH;AkBh/ED;ECpZI,sBAAA;EdiDF,yDAAA;EACQ,iDAAA;CLu1FT;AmBv4FG;EACE,sBAAA;Ed8CJ,0EAAA;EACQ,kEAAA;CL41FT;AkB1/ED;EC1YI,eAAA;EACA,0BAAA;EACA,sBAAA;CnBu4FH;AkB//ED;ECpYI,eAAA;CnBs4FH;AkB3/EC;EACE,UAAA;ClB6/EH;AkB3/EC;EACE,OAAA;ClB6/EH;AkBn/ED;EACE,eAAA;EACA,gBAAA;EACA,oBAAA;EACA,eAAA;ClBq/ED;AkBn+EC;EAAA;IAGI,sBAAA;IACA,iBAAA;IACA,uBAAA;GlBo+EH;EkBz+ED;IAUI,sBAAA;IACA,YAAA;IACA,uBAAA;GlBk+EH;EkB9+ED;IAiBI,sBAAA;GlBg+EH;EkBj/ED;IAqBI,sBAAA;IACA,uBAAA;GlB+9EH;EkBr/ED;;;IA2BM,YAAA;GlB+9EL;EkB1/ED;IAiCI,YAAA;GlB49EH;EkB7/ED;IAqCI,iBAAA;IACA,uBAAA;GlB29EH;EkBjgFD;;IA6CI,sBAAA;IACA,cAAA;IACA,iBAAA;IACA,uBAAA;GlBw9EH;EkBxgFD;;IAmDM,gBAAA;GlBy9EL;EkB5gFD;;IAwDI,mBAAA;IACA,eAAA;GlBw9EH;EkBjhFD;IA8DI,OAAA;GlBs9EH;CACF;AkB58ED;;;;EASI,iBAAA;EACA,cAAA;EACA,iBAAA;ClBy8EH;AkBp9ED;;EAiBI,iBAAA;ClBu8EH;AkBx9ED;EJ9gBE,oBAAA;EACA,mBAAA;Cdy+FD;AkBj8EC;EAAA;IAEI,iBAAA;IACA,iBAAA;IACA,kBAAA;GlBm8EH;CACF;AkBn+ED;EAwCI,YAAA;ClB87EH;AkBt7EG;EAAA;IAEI,kBAAA;IACA,gBAAA;GlBw7EL;CACF;AkBp7EG;EAAA;IAEI,iBAAA;IACA,gBAAA;GlBs7EL;CACF;AoBrgGD;EACE,sBAAA;EACA,iBAAA;EACA,oBAAA;EACA,mBAAA;EACA,oBAAA;EACA,uBAAA;EACA,+BAAA;EAAA,2BAAA;EACA,gBAAA;EACA,uBAAA;EACA,8BAAA;ECoCA,kBAAA;EACA,gBAAA;EACA,wBAAA;EACA,mBAAA;EhBqKA,0BAAA;EACG,uBAAA;EACC,sBAAA;EACI,kBAAA;CLg0FT;AoBxgGG;;;;;;EdrBF,2CAAA;EACA,qBAAA;CNqiGD;AoB3gGC;;;EAGE,YAAA;EACA,sBAAA;CpB6gGH;AoB1gGC;;EAEE,uBAAA;EACA,WAAA;Ef2BF,yDAAA;EACQ,iDAAA;CLk/FT;AoB1gGC;;;EAGE,oBAAA;EE9CF,0BAAA;EACA,cAAA;EjBiEA,yBAAA;EACQ,iBAAA;CL2/FT;AoB1gGG;;EAEE,qBAAA;CpB4gGL;AoBngGD;EC7DE,YAAA;EACA,uBAAA;EACA,mBAAA;CrBmkGD;AqBjkGC;;EAEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBmkGH;AqBjkGC;EACE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBmkGH;AqBjkGC;;;EAGE,YAAA;EACA,0BAAA;EACA,uBAAA;EACA,sBAAA;CrBmkGH;AqBjkGG;;;;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBykGL;AqBnkGG;;;;;;;;;EAGE,uBAAA;EACA,mBAAA;CrB2kGL;AoBpjGD;EClBI,YAAA;EACA,uBAAA;CrBykGH;AoBrjGD;EChEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBwnGD;AqBtnGC;;EAEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBwnGH;AqBtnGC;EACE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBwnGH;AqBtnGC;;;EAGE,YAAA;EACA,0BAAA;EACA,uBAAA;EACA,sBAAA;CrBwnGH;AqBtnGG;;;;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB8nGL;AqBxnGG;;;;;;;;;EAGE,0BAAA;EACA,sBAAA;CrBgoGL;AoBtmGD;ECrBI,eAAA;EACA,uBAAA;CrB8nGH;AoBtmGD;ECpEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB6qGD;AqB3qGC;;EAEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB6qGH;AqB3qGC;EACE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB6qGH;AqB3qGC;;;EAGE,YAAA;EACA,0BAAA;EACA,uBAAA;EACA,sBAAA;CrB6qGH;AqB3qGG;;;;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBmrGL;AqB7qGG;;;;;;;;;EAGE,0BAAA;EACA,sBAAA;CrBqrGL;AoBvpGD;ECzBI,eAAA;EACA,uBAAA;CrBmrGH;AoBvpGD;ECxEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBkuGD;AqBhuGC;;EAEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBkuGH;AqBhuGC;EACE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBkuGH;AqBhuGC;;;EAGE,YAAA;EACA,0BAAA;EACA,uBAAA;EACA,sBAAA;CrBkuGH;AqBhuGG;;;;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBwuGL;AqBluGG;;;;;;;;;EAGE,0BAAA;EACA,sBAAA;CrB0uGL;AoBxsGD;EC7BI,eAAA;EACA,uBAAA;CrBwuGH;AoBxsGD;EC5EE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBuxGD;AqBrxGC;;EAEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBuxGH;AqBrxGC;EACE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBuxGH;AqBrxGC;;;EAGE,YAAA;EACA,0BAAA;EACA,uBAAA;EACA,sBAAA;CrBuxGH;AqBrxGG;;;;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB6xGL;AqBvxGG;;;;;;;;;EAGE,0BAAA;EACA,sBAAA;CrB+xGL;AoBzvGD;ECjCI,eAAA;EACA,uBAAA;CrB6xGH;AoBzvGD;EChFE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB40GD;AqB10GC;;EAEE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB40GH;AqB10GC;EACE,YAAA;EACA,0BAAA;EACA,sBAAA;CrB40GH;AqB10GC;;;EAGE,YAAA;EACA,0BAAA;EACA,uBAAA;EACA,sBAAA;CrB40GH;AqB10GG;;;;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;CrBk1GL;AqB50GG;;;;;;;;;EAGE,0BAAA;EACA,sBAAA;CrBo1GL;AoB1yGD;ECrCI,eAAA;EACA,uBAAA;CrBk1GH;AoBryGD;EACE,iBAAA;EACA,eAAA;EACA,iBAAA;CpBuyGD;AoBryGC;;;;;EAKE,8BAAA;EfnCF,yBAAA;EACQ,iBAAA;CL20GT;AoBtyGC;;;;EAIE,0BAAA;CpBwyGH;AoBtyGC;;EAEE,eAAA;EACA,2BAAA;EACA,8BAAA;CpBwyGH;AoBpyGG;;;;EAEE,eAAA;EACA,sBAAA;CpBwyGL;AoB/xGD;;EC9EE,mBAAA;EACA,gBAAA;EACA,uBAAA;EACA,mBAAA;CrBi3GD;AoBlyGD;;EClFE,kBAAA;EACA,gBAAA;EACA,iBAAA;EACA,mBAAA;CrBw3GD;AoBryGD;;ECtFE,iBAAA;EACA,gBAAA;EACA,iBAAA;EACA,mBAAA;CrB+3GD;AoBpyGD;EACE,eAAA;EACA,YAAA;CpBsyGD;AoBlyGD;EACE,gBAAA;CpBoyGD;AoB7xGC;;;EACE,YAAA;CpBiyGH;AuB37GD;EACE,WAAA;ElBoLA,yCAAA;EACK,oCAAA;EACG,iCAAA;CL0wGT;AuB77GC;EACE,WAAA;CvB+7GH;AuB37GD;EACE,cAAA;CvB67GD;AuB37GC;EAAY,eAAA;CvB87Gb;AuB77GC;EAAY,mBAAA;CvBg8Gb;AuB/7GC;EAAY,yBAAA;CvBk8Gb;AuB/7GD;EACE,mBAAA;EACA,UAAA;EACA,iBAAA;ElBsKA,gDAAA;EACQ,2CAAA;EAAA,wCAAA;EAOR,mCAAA;EACQ,8BAAA;EAAA,2BAAA;EAGR,yCAAA;EACQ,oCAAA;EAAA,iCAAA;CLoxGT;AwBh+GD;EACE,sBAAA;EACA,SAAA;EACA,UAAA;EACA,iBAAA;EACA,uBAAA;EACA,uBAAA;EACA,yBAAA;EACA,oCAAA;EACA,mCAAA;CxBk+GD;AwB99GD;;EAEE,mBAAA;CxBg+GD;AwB59GD;EACE,WAAA;CxB89GD;AwB19GD;EACE,mBAAA;EACA,UAAA;EACA,QAAA;EACA,cAAA;EACA,cAAA;EACA,YAAA;EACA,iBAAA;EACA,eAAA;EACA,gBAAA;EACA,gBAAA;EACA,iBAAA;EACA,iBAAA;EACA,uBAAA;EACA,6BAAA;EACA,uBAAA;EACA,sCAAA;EACA,mBAAA;EnBuBA,oDAAA;EACQ,4CAAA;CLs8GT;AwBx9GC;EACE,SAAA;EACA,WAAA;CxB09GH;AwBn/GD;ECzBE,YAAA;EACA,cAAA;EACA,iBAAA;EACA,0BAAA;CzB+gHD;AwBz/GD;EAmCI,eAAA;EACA,kBAAA;EACA,YAAA;EACA,iBAAA;EACA,wBAAA;EACA,eAAA;EACA,oBAAA;CxBy9GH;AwBv9GG;;EAEE,eAAA;EACA,sBAAA;EACA,0BAAA;CxBy9GL;AwBl9GC;;;EAGE,YAAA;EACA,sBAAA;EACA,0BAAA;EACA,WAAA;CxBo9GH;AwB38GC;;;EAGE,eAAA;CxB68GH;AwBz8GC;;EAEE,sBAAA;EACA,oBAAA;EACA,8BAAA;EACA,uBAAA;EEzGF,oEAAA;C1BqjHD;AwBt8GD;EAGI,eAAA;CxBs8GH;AwBz8GD;EAQI,WAAA;CxBo8GH;AwB57GD;EACE,SAAA;EACA,WAAA;CxB87GD;AwBt7GD;EACE,YAAA;EACA,QAAA;CxBw7GD;AwBp7GD;EACE,eAAA;EACA,kBAAA;EACA,gBAAA;EACA,wBAAA;EACA,eAAA;EACA,oBAAA;CxBs7GD;AwBl7GD;EACE,gBAAA;EACA,OAAA;EACA,SAAA;EACA,UAAA;EACA,QAAA;EACA,aAAA;CxBo7GD;AwBh7GD;EACE,SAAA;EACA,WAAA;CxBk7GD;AwB16GD;;EAII,YAAA;EACA,cAAA;EACA,0BAAA;EACA,4BAAA;CxB06GH;AwBj7GD;;EAWI,UAAA;EACA,aAAA;EACA,mBAAA;CxB06GH;AwBj6GD;EACE;IApEA,SAAA;IACA,WAAA;GxBw+GC;EwBr6GD;IA1DA,YAAA;IACA,QAAA;GxBk+GC;CACF;A2B7mHD;;EAEE,mBAAA;EACA,sBAAA;EACA,uBAAA;C3B+mHD;A2BnnHD;;EAMI,mBAAA;EACA,YAAA;C3BinHH;A2B/mHG;;;;;;;;EAIE,WAAA;C3BqnHL;A2B/mHD;;;;EAKI,kBAAA;C3BgnHH;A2B3mHD;EACE,kBAAA;C3B6mHD;A2B9mHD;;;EAOI,YAAA;C3B4mHH;A2BnnHD;;;EAYI,iBAAA;C3B4mHH;A2BxmHD;EACE,iBAAA;C3B0mHD;A2BtmHD;EACE,eAAA;C3BwmHD;A2BvmHC;ECpDA,2BAAA;EACA,8BAAA;C5B8pHD;A2BtmHD;;ECjDE,0BAAA;EACA,6BAAA;C5B2pHD;A2BrmHD;EACE,YAAA;C3BumHD;A2BrmHD;EACE,iBAAA;C3BumHD;A2BrmHD;;ECrEE,2BAAA;EACA,8BAAA;C5B8qHD;A2BpmHD;ECnEE,0BAAA;EACA,6BAAA;C5B0qHD;A2BnmHD;;EAEE,WAAA;C3BqmHD;A2BplHD;EACE,mBAAA;EACA,kBAAA;C3BslHD;A2BplHD;EACE,oBAAA;EACA,mBAAA;C3BslHD;A2BjlHD;EtB/CE,yDAAA;EACQ,iDAAA;CLmoHT;A2BjlHC;EtBnDA,yBAAA;EACQ,iBAAA;CLuoHT;A2B9kHD;EACE,eAAA;C3BglHD;A2B7kHD;EACE,wBAAA;EACA,uBAAA;C3B+kHD;A2B5kHD;EACE,wBAAA;C3B8kHD;A2BvkHD;;;EAII,eAAA;EACA,YAAA;EACA,YAAA;EACA,gBAAA;C3BwkHH;A2B/kHD;EAcM,YAAA;C3BokHL;A2BllHD;;;;EAsBI,iBAAA;EACA,eAAA;C3BkkHH;A2B7jHC;EACE,iBAAA;C3B+jHH;A2B7jHC;EC7KA,4BAAA;EACA,6BAAA;EAOA,8BAAA;EACA,6BAAA;C5BuuHD;A2B/jHC;ECjLA,0BAAA;EACA,2BAAA;EAOA,gCAAA;EACA,+BAAA;C5B6uHD;A2BhkHD;EACE,iBAAA;C3BkkHD;A2BhkHD;;ECjLE,8BAAA;EACA,6BAAA;C5BqvHD;A2B/jHD;EC/LE,0BAAA;EACA,2BAAA;C5BiwHD;A2B3jHD;EACE,eAAA;EACA,YAAA;EACA,oBAAA;EACA,0BAAA;C3B6jHD;A2BjkHD;;EAOI,oBAAA;EACA,YAAA;EACA,UAAA;C3B8jHH;A2BvkHD;EAYI,YAAA;C3B8jHH;A2B1kHD;EAgBI,WAAA;C3B6jHH;A2B5iHD;;;;EAKM,mBAAA;EACA,uBAAA;EACA,qBAAA;C3B6iHL;A6BvxHD;EACE,mBAAA;EACA,eAAA;EACA,0BAAA;C7ByxHD;A6BtxHC;EACE,YAAA;EACA,iBAAA;EACA,gBAAA;C7BwxHH;A6BjyHD;EAeI,mBAAA;EACA,WAAA;EAKA,YAAA;EAEA,YAAA;EACA,iBAAA;C7BgxHH;A6B9wHG;EACE,WAAA;C7BgxHL;A6BtwHD;;;EVwBE,aAAA;EACA,mBAAA;EACA,gBAAA;EACA,uBAAA;EACA,mBAAA;CnBmvHD;AmBjvHC;;;EACE,aAAA;EACA,kBAAA;CnBqvHH;AmBlvHC;;;;;;EAEE,aAAA;CnBwvHH;A6BxxHD;;;EVmBE,aAAA;EACA,kBAAA;EACA,gBAAA;EACA,iBAAA;EACA,mBAAA;CnB0wHD;AmBxwHC;;;EACE,aAAA;EACA,kBAAA;CnB4wHH;AmBzwHC;;;;;;EAEE,aAAA;CnB+wHH;A6BtyHD;;;EAGE,oBAAA;C7BwyHD;A6BtyHC;;;EACE,iBAAA;C7B0yHH;A6BtyHD;;EAEE,UAAA;EACA,oBAAA;EACA,uBAAA;C7BwyHD;A6BnyHD;EACE,kBAAA;EACA,gBAAA;EACA,iBAAA;EACA,eAAA;EACA,eAAA;EACA,mBAAA;EACA,0BAAA;EACA,uBAAA;EACA,mBAAA;C7BqyHD;A6BlyHC;EACE,kBAAA;EACA,gBAAA;EACA,mBAAA;C7BoyHH;A6BlyHC;EACE,mBAAA;EACA,gBAAA;EACA,mBAAA;C7BoyHH;A6BxzHD;;EA0BI,cAAA;C7BkyHH;A6B7xHD;;;;;;;EDtGE,2BAAA;EACA,8BAAA;C5B44HD;A6B9xHD;EACE,gBAAA;C7BgyHD;A6B9xHD;;;;;;;ED1GE,0BAAA;EACA,6BAAA;C5Bi5HD;A6B/xHD;EACE,eAAA;C7BiyHD;A6B5xHD;EACE,mBAAA;EAGA,aAAA;EACA,oBAAA;C7B4xHD;A6BjyHD;EAUI,mBAAA;C7B0xHH;A6BpyHD;EAYM,kBAAA;C7B2xHL;A6BxxHG;;;EAGE,WAAA;C7B0xHL;A6BrxHC;;EAGI,mBAAA;C7BsxHL;A6BnxHC;;EAGI,WAAA;EACA,kBAAA;C7BoxHL;A8Bn7HD;EACE,gBAAA;EACA,iBAAA;EACA,iBAAA;C9Bq7HD;A8Bx7HD;EAOI,mBAAA;EACA,eAAA;C9Bo7HH;A8B57HD;EAWM,mBAAA;EACA,eAAA;EACA,mBAAA;C9Bo7HL;A8Bn7HK;;EAEE,sBAAA;EACA,0BAAA;C9Bq7HP;A8Bh7HG;EACE,eAAA;C9Bk7HL;A8Bh7HK;;EAEE,eAAA;EACA,sBAAA;EACA,oBAAA;EACA,8BAAA;C9Bk7HP;A8B36HG;;;EAGE,0BAAA;EACA,sBAAA;C9B66HL;A8Bt9HD;ELLE,YAAA;EACA,cAAA;EACA,iBAAA;EACA,0BAAA;CzB89HD;A8B59HD;EA0DI,gBAAA;C9Bq6HH;A8B55HD;EACE,8BAAA;C9B85HD;A8B/5HD;EAGI,YAAA;EAEA,oBAAA;C9B85HH;A8Bn6HD;EASM,kBAAA;EACA,wBAAA;EACA,8BAAA;EACA,2BAAA;C9B65HL;A8B55HK;EACE,mCAAA;C9B85HP;A8Bx5HK;;;EAGE,eAAA;EACA,gBAAA;EACA,uBAAA;EACA,uBAAA;EACA,iCAAA;C9B05HP;A8Br5HC;EAqDA,YAAA;EA8BA,iBAAA;C9Bs0HD;A8Bz5HC;EAwDE,YAAA;C9Bo2HH;A8B55HC;EA0DI,mBAAA;EACA,mBAAA;C9Bq2HL;A8Bh6HC;EAgEE,UAAA;EACA,WAAA;C9Bm2HH;A8Bh2HC;EAAA;IAEI,oBAAA;IACA,UAAA;G9Bk2HH;E8Br2HD;IAKM,iBAAA;G9Bm2HL;CACF;A8B76HC;EAuFE,gBAAA;EACA,mBAAA;C9By1HH;A8Bj7HC;;;EA8FE,uBAAA;C9Bw1HH;A8Br1HC;EAAA;IAEI,8BAAA;IACA,2BAAA;G9Bu1HH;E8B11HD;;;IAQI,0BAAA;G9Bu1HH;CACF;A8Bx7HD;EAEI,YAAA;C9By7HH;A8B37HD;EAMM,mBAAA;C9Bw7HL;A8B97HD;EASM,iBAAA;C9Bw7HL;A8Bn7HK;;;EAGE,YAAA;EACA,0BAAA;C9Bq7HP;A8B76HD;EAEI,YAAA;C9B86HH;A8Bh7HD;EAIM,gBAAA;EACA,eAAA;C9B+6HL;A8Bn6HD;EACE,YAAA;C9Bq6HD;A8Bt6HD;EAII,YAAA;C9Bq6HH;A8Bz6HD;EAMM,mBAAA;EACA,mBAAA;C9Bs6HL;A8B76HD;EAYI,UAAA;EACA,WAAA;C9Bo6HH;A8Bj6HC;EAAA;IAEI,oBAAA;IACA,UAAA;G9Bm6HH;E8Bt6HD;IAKM,iBAAA;G9Bo6HL;CACF;A8B55HD;EACE,iBAAA;C9B85HD;A8B/5HD;EAKI,gBAAA;EACA,mBAAA;C9B65HH;A8Bn6HD;;;EAYI,uBAAA;C9B45HH;A8Bz5HC;EAAA;IAEI,8BAAA;IACA,2BAAA;G9B25HH;E8B95HD;;;IAQI,0BAAA;G9B25HH;CACF;A8Bl5HD;EAEI,cAAA;C9Bm5HH;A8Br5HD;EAKI,eAAA;C9Bm5HH;A8B14HD;EAEE,iBAAA;EF7OA,0BAAA;EACA,2BAAA;C5BynID;A+BjnID;EACE,mBAAA;EACA,iBAAA;EACA,oBAAA;EACA,8BAAA;C/BmnID;A+B9mIC;EAAA;IACE,mBAAA;G/BinID;CACF;A+BrmIC;EAAA;IACE,YAAA;G/BwmID;CACF;A+B1lID;EACE,oBAAA;EACA,mBAAA;EACA,oBAAA;EACA,kCAAA;EACA,2DAAA;EAAA,mDAAA;EAEA,kCAAA;C/B2lID;A+BzlIC;EACE,iBAAA;C/B2lIH;A+BxlIC;EAAA;IACE,YAAA;IACA,cAAA;IACA,yBAAA;IAAA,iBAAA;G/B2lID;E+BzlIC;IACE,0BAAA;IACA,wBAAA;IACA,kBAAA;IACA,6BAAA;G/B2lIH;E+BxlIC;IACE,oBAAA;G/B0lIH;E+BrlIC;;;IAGE,iBAAA;IACA,gBAAA;G/BulIH;CACF;A+BnlID;;EAWE,gBAAA;EACA,SAAA;EACA,QAAA;EACA,cAAA;C/B4kID;A+B1lID;;EAGI,kBAAA;C/B2lIH;A+BzlIG;EAAA;;IACE,kBAAA;G/B6lIH;CACF;A+BnlIC;EAAA;;IACE,iBAAA;G/BulID;CACF;A+BplID;EACE,OAAA;EACA,sBAAA;C/BslID;A+BplID;EACE,UAAA;EACA,iBAAA;EACA,sBAAA;C/BslID;A+B9kID;;;;EAII,oBAAA;EACA,mBAAA;C/BglIH;A+B9kIG;EAAA;;;;IACE,gBAAA;IACA,eAAA;G/BolIH;CACF;A+BxkID;EACE,cAAA;EACA,sBAAA;C/B0kID;A+BxkIC;EAAA;IACE,iBAAA;G/B2kID;CACF;A+BrkID;EACE,YAAA;EACA,aAAA;EACA,mBAAA;EACA,gBAAA;EACA,kBAAA;C/BukID;A+BrkIC;;EAEE,sBAAA;C/BukIH;A+BhlID;EAaI,eAAA;C/BskIH;A+BnkIC;EACE;;IAEE,mBAAA;G/BqkIH;CACF;A+B3jID;EACE,mBAAA;EACA,aAAA;EACA,kBAAA;EACA,mBAAA;EC9LA,gBAAA;EACA,mBAAA;ED+LA,8BAAA;EACA,uBAAA;EACA,8BAAA;EACA,mBAAA;C/B8jID;A+B1jIC;EACE,WAAA;C/B4jIH;A+B1kID;EAmBI,eAAA;EACA,YAAA;EACA,YAAA;EACA,mBAAA;C/B0jIH;A+BhlID;EAyBI,gBAAA;C/B0jIH;A+BvjIC;EAAA;IACE,cAAA;G/B0jID;CACF;A+BjjID;EACE,oBAAA;C/BmjID;A+BpjID;EAII,kBAAA;EACA,qBAAA;EACA,kBAAA;C/BmjIH;A+BhjIC;EAAA;IAGI,iBAAA;IACA,YAAA;IACA,YAAA;IACA,cAAA;IACA,8BAAA;IACA,UAAA;IACA,yBAAA;IAAA,iBAAA;G/BijIH;E+B1jID;;IAYM,2BAAA;G/BkjIL;E+B9jID;IAeM,kBAAA;G/BkjIL;E+BjjIK;;IAEE,uBAAA;G/BmjIP;CACF;A+B7iIC;EAAA;IACE,YAAA;IACA,UAAA;G/BgjID;E+BljID;IAKI,YAAA;G/BgjIH;E+BrjID;IAOM,kBAAA;IACA,qBAAA;G/BijIL;CACF;A+BtiID;EACE,mBAAA;EACA,oBAAA;EACA,mBAAA;EACA,kCAAA;EACA,qCAAA;E1B5NA,6FAAA;EACQ,qFAAA;E2BjER,gBAAA;EACA,mBAAA;ChCu0ID;AkB13HC;EAAA;IAGI,sBAAA;IACA,iBAAA;IACA,uBAAA;GlB23HH;EkBh4HD;IAUI,sBAAA;IACA,YAAA;IACA,uBAAA;GlBy3HH;EkBr4HD;IAiBI,sBAAA;GlBu3HH;EkBx4HD;IAqBI,sBAAA;IACA,uBAAA;GlBs3HH;EkB54HD;;;IA2BM,YAAA;GlBs3HL;EkBj5HD;IAiCI,YAAA;GlBm3HH;EkBp5HD;IAqCI,iBAAA;IACA,uBAAA;GlBk3HH;EkBx5HD;;IA6CI,sBAAA;IACA,cAAA;IACA,iBAAA;IACA,uBAAA;GlB+2HH;EkB/5HD;;IAmDM,gBAAA;GlBg3HL;EkBn6HD;;IAwDI,mBAAA;IACA,eAAA;GlB+2HH;EkBx6HD;IA8DI,OAAA;GlB62HH;CACF;A+BtlIG;EAAA;IACE,mBAAA;G/BylIH;E+BvlIG;IACE,iBAAA;G/BylIL;CACF;A+BjlIC;EAAA;IACE,YAAA;IACA,eAAA;IACA,kBAAA;IACA,gBAAA;IACA,eAAA;IACA,UAAA;I1BvPF,yBAAA;IACQ,iBAAA;GL40IP;CACF;A+B9kID;EACE,cAAA;EHpUA,0BAAA;EACA,2BAAA;C5Bq5ID;A+B9kID;EACE,iBAAA;EHzUA,4BAAA;EACA,6BAAA;EAOA,8BAAA;EACA,6BAAA;C5Bo5ID;A+B1kID;EChVE,gBAAA;EACA,mBAAA;ChC65ID;A+B3kIC;ECnVA,iBAAA;EACA,oBAAA;ChCi6ID;A+B5kIC;ECtVA,iBAAA;EACA,oBAAA;ChCq6ID;A+BtkID;EChWE,iBAAA;EACA,oBAAA;ChCy6ID;A+BvkIC;EAAA;IACE,YAAA;IACA,mBAAA;IACA,kBAAA;G/B0kID;CACF;A+B9jID;EACE;IEtWA,uBAAA;GjCu6IC;E+BhkID;IE1WA,wBAAA;IF4WE,oBAAA;G/BkkID;E+BpkID;IAKI,gBAAA;G/BkkIH;CACF;A+BzjID;EACE,0BAAA;EACA,sBAAA;C/B2jID;A+B7jID;EAKI,YAAA;C/B2jIH;A+B1jIG;;EAEE,eAAA;EACA,8BAAA;C/B4jIL;A+BrkID;EAcI,YAAA;C/B0jIH;A+BxkID;EAmBM,YAAA;C/BwjIL;A+BtjIK;;EAEE,YAAA;EACA,8BAAA;C/BwjIP;A+BpjIK;;;EAGE,YAAA;EACA,0BAAA;C/BsjIP;A+BljIK;;;EAGE,YAAA;EACA,8BAAA;C/BojIP;A+B7iIK;;;EAGE,YAAA;EACA,0BAAA;C/B+iIP;A+B3iIG;EAAA;IAIM,YAAA;G/B2iIP;E+B1iIO;;IAEE,YAAA;IACA,8BAAA;G/B4iIT;E+BxiIO;;;IAGE,YAAA;IACA,0BAAA;G/B0iIT;E+BtiIO;;;IAGE,YAAA;IACA,8BAAA;G/BwiIT;CACF;A+BxnID;EAuFI,mBAAA;C/BoiIH;A+BniIG;;EAEE,uBAAA;C/BqiIL;A+B/nID;EA6FM,uBAAA;C/BqiIL;A+BloID;;EAmGI,sBAAA;C/BmiIH;A+BtoID;EA4GI,YAAA;C/B6hIH;A+B5hIG;EACE,YAAA;C/B8hIL;A+B5oID;EAmHI,YAAA;C/B4hIH;A+B3hIG;;EAEE,YAAA;C/B6hIL;A+BzhIK;;;;EAEE,YAAA;C/B6hIP;A+BrhID;EACE,uBAAA;EACA,sBAAA;C/BuhID;A+BzhID;EAKI,eAAA;C/BuhIH;A+BthIG;;EAEE,YAAA;EACA,8BAAA;C/BwhIL;A+BjiID;EAcI,eAAA;C/BshIH;A+BpiID;EAmBM,eAAA;C/BohIL;A+BlhIK;;EAEE,YAAA;EACA,8BAAA;C/BohIP;A+BhhIK;;;EAGE,YAAA;EACA,0BAAA;C/BkhIP;A+B9gIK;;;EAGE,YAAA;EACA,8BAAA;C/BghIP;A+B1gIK;;;EAGE,YAAA;EACA,0BAAA;C/B4gIP;A+BxgIG;EAAA;IAIM,sBAAA;G/BwgIP;E+B5gIC;IAOM,0BAAA;G/BwgIP;E+B/gIC;IAUM,eAAA;G/BwgIP;E+BvgIO;;IAEE,YAAA;IACA,8BAAA;G/BygIT;E+BrgIO;;;IAGE,YAAA;IACA,0BAAA;G/BugIT;E+BngIO;;;IAGE,YAAA;IACA,8BAAA;G/BqgIT;CACF;A+B1lID;EA6FI,mBAAA;C/BggIH;A+B//HG;;EAEE,uBAAA;C/BigIL;A+BjmID;EAmGM,uBAAA;C/BigIL;A+BpmID;;EAyGI,sBAAA;C/B+/HH;A+BxmID;EA6GI,eAAA;C/B8/HH;A+B7/HG;EACE,YAAA;C/B+/HL;A+B9mID;EAoHI,eAAA;C/B6/HH;A+B5/HG;;EAEE,YAAA;C/B8/HL;A+B1/HK;;;;EAEE,YAAA;C/B8/HP;AkCpoJD;EACE,kBAAA;EACA,oBAAA;EACA,iBAAA;EACA,0BAAA;EACA,mBAAA;ClCsoJD;AkC3oJD;EAQI,sBAAA;ClCsoJH;AkC9oJD;EAWM,eAAA;EACA,YAAA;EACA,kBAAA;ClCsoJL;AkCnpJD;EAkBI,eAAA;ClCooJH;AmCxpJD;EACE,sBAAA;EACA,gBAAA;EACA,eAAA;EACA,mBAAA;CnC0pJD;AmC9pJD;EAOI,gBAAA;CnC0pJH;AmCjqJD;;EAUM,mBAAA;EACA,YAAA;EACA,kBAAA;EACA,kBAAA;EACA,wBAAA;EACA,eAAA;EACA,sBAAA;EACA,uBAAA;EACA,uBAAA;CnC2pJL;AmCzpJK;;;;EAEE,WAAA;EACA,eAAA;EACA,0BAAA;EACA,mBAAA;CnC6pJP;AmC1pJG;;EAGI,eAAA;EPnBN,4BAAA;EACA,+BAAA;C5B+qJD;AmCzpJG;;EP/BF,6BAAA;EACA,gCAAA;C5B4rJD;AmCppJG;;;;;;EAGE,WAAA;EACA,YAAA;EACA,gBAAA;EACA,0BAAA;EACA,sBAAA;CnCypJL;AmC7sJD;;;;;;EA+DM,eAAA;EACA,oBAAA;EACA,uBAAA;EACA,mBAAA;CnCspJL;AmC7oJD;;ECxEM,mBAAA;EACA,gBAAA;EACA,uBAAA;CpCytJL;AoCvtJG;;ERKF,4BAAA;EACA,+BAAA;C5BstJD;AoCttJG;;ERTF,6BAAA;EACA,gCAAA;C5BmuJD;AmCxpJD;;EC7EM,kBAAA;EACA,gBAAA;EACA,iBAAA;CpCyuJL;AoCvuJG;;ERKF,4BAAA;EACA,+BAAA;C5BsuJD;AoCtuJG;;ERTF,6BAAA;EACA,gCAAA;C5BmvJD;AqCtvJD;EACE,gBAAA;EACA,eAAA;EACA,mBAAA;EACA,iBAAA;CrCwvJD;AqC5vJD;EAOI,gBAAA;CrCwvJH;AqC/vJD;;EAUM,sBAAA;EACA,kBAAA;EACA,uBAAA;EACA,uBAAA;EACA,oBAAA;CrCyvJL;AqCvwJD;;EAmBM,sBAAA;EACA,0BAAA;CrCwvJL;AqC5wJD;;EA2BM,aAAA;CrCqvJL;AqChxJD;;EAkCM,YAAA;CrCkvJL;AqCpxJD;;;;EA2CM,eAAA;EACA,oBAAA;EACA,uBAAA;CrC+uJL;AsC7xJD;EACE,gBAAA;EACA,2BAAA;EACA,eAAA;EACA,iBAAA;EACA,eAAA;EACA,YAAA;EACA,mBAAA;EACA,oBAAA;EACA,yBAAA;EACA,sBAAA;CtC+xJD;AsC3xJG;;EAEE,YAAA;EACA,sBAAA;EACA,gBAAA;CtC6xJL;AsCxxJC;EACE,cAAA;CtC0xJH;AsCtxJC;EACE,mBAAA;EACA,UAAA;CtCwxJH;AsCjxJD;ECtCE,0BAAA;CvC0zJD;AuCvzJG;;EAEE,0BAAA;CvCyzJL;AsCpxJD;EC1CE,0BAAA;CvCi0JD;AuC9zJG;;EAEE,0BAAA;CvCg0JL;AsCvxJD;EC9CE,0BAAA;CvCw0JD;AuCr0JG;;EAEE,0BAAA;CvCu0JL;AsC1xJD;EClDE,0BAAA;CvC+0JD;AuC50JG;;EAEE,0BAAA;CvC80JL;AsC7xJD;ECtDE,0BAAA;CvCs1JD;AuCn1JG;;EAEE,0BAAA;CvCq1JL;AsChyJD;EC1DE,0BAAA;CvC61JD;AuC11JG;;EAEE,0BAAA;CvC41JL;AwC91JD;EACE,sBAAA;EACA,gBAAA;EACA,iBAAA;EACA,gBAAA;EACA,kBAAA;EACA,eAAA;EACA,YAAA;EACA,mBAAA;EACA,oBAAA;EACA,uBAAA;EACA,0BAAA;EACA,oBAAA;CxCg2JD;AwC71JC;EACE,cAAA;CxC+1JH;AwC31JC;EACE,mBAAA;EACA,UAAA;CxC61JH;AwC11JC;;EAEE,OAAA;EACA,iBAAA;CxC41JH;AwCv1JG;;EAEE,YAAA;EACA,sBAAA;EACA,gBAAA;CxCy1JL;AwCp1JC;;EAEE,eAAA;EACA,uBAAA;CxCs1JH;AwCn1JC;EACE,aAAA;CxCq1JH;AwCl1JC;EACE,kBAAA;CxCo1JH;AwCj1JC;EACE,iBAAA;CxCm1JH;AyC74JD;EACE,kBAAA;EACA,qBAAA;EACA,oBAAA;EACA,eAAA;EACA,0BAAA;CzC+4JD;AyCp5JD;;EASI,eAAA;CzC+4JH;AyCx5JD;EAaI,oBAAA;EACA,gBAAA;EACA,iBAAA;CzC84JH;AyC75JD;EAmBI,0BAAA;CzC64JH;AyC14JC;;EAEE,oBAAA;EACA,mBAAA;EACA,mBAAA;CzC44JH;AyCt6JD;EA8BI,gBAAA;CzC24JH;AyCx4JC;EAAA;IACE,kBAAA;IACA,qBAAA;GzC24JD;EyCz4JC;;IAEE,oBAAA;IACA,mBAAA;GzC24JH;EyCl5JD;;IAYI,gBAAA;GzC04JH;CACF;A0Cr7JD;EACE,eAAA;EACA,aAAA;EACA,oBAAA;EACA,wBAAA;EACA,uBAAA;EACA,uBAAA;EACA,mBAAA;ErCiLA,4CAAA;EACK,uCAAA;EACG,oCAAA;CLuwJT;A0Cj8JD;;EAaI,mBAAA;EACA,kBAAA;C1Cw7JH;A0Cp7JC;;;EAGE,sBAAA;C1Cs7JH;A0C38JD;EA0BI,aAAA;EACA,eAAA;C1Co7JH;A2C/8JD;EACE,cAAA;EACA,oBAAA;EACA,8BAAA;EACA,mBAAA;C3Ci9JD;A2Cr9JD;EAQI,cAAA;EACA,eAAA;C3Cg9JH;A2Cz9JD;EAcI,kBAAA;C3C88JH;A2C59JD;;EAoBI,iBAAA;C3C48JH;A2Ch+JD;EAwBI,gBAAA;C3C28JH;A2Cl8JD;;EAEE,oBAAA;C3Co8JD;A2Ct8JD;;EAMI,mBAAA;EACA,UAAA;EACA,aAAA;EACA,eAAA;C3Co8JH;A2C57JD;ECvDE,eAAA;EACA,0BAAA;EACA,sBAAA;C5Cs/JD;A2Cj8JD;EClDI,0BAAA;C5Cs/JH;A2Cp8JD;EC9CI,eAAA;C5Cq/JH;A2Cn8JD;EC3DE,eAAA;EACA,0BAAA;EACA,sBAAA;C5CigKD;A2Cx8JD;ECtDI,0BAAA;C5CigKH;A2C38JD;EClDI,eAAA;C5CggKH;A2C18JD;EC/DE,eAAA;EACA,0BAAA;EACA,sBAAA;C5C4gKD;A2C/8JD;EC1DI,0BAAA;C5C4gKH;A2Cl9JD;ECtDI,eAAA;C5C2gKH;A2Cj9JD;ECnEE,eAAA;EACA,0BAAA;EACA,sBAAA;C5CuhKD;A2Ct9JD;EC9DI,0BAAA;C5CuhKH;A2Cz9JD;EC1DI,eAAA;C5CshKH;A6CvhKD;EACE;IAAQ,4BAAA;G7C0hKP;E6CzhKD;IAAQ,yBAAA;G7C4hKP;CACF;A6CzhKD;EACE;IAAQ,4BAAA;G7C4hKP;E6C3hKD;IAAQ,yBAAA;G7C8hKP;CACF;A6CjiKD;EACE;IAAQ,4BAAA;G7C4hKP;E6C3hKD;IAAQ,yBAAA;G7C8hKP;CACF;A6CvhKD;EACE,aAAA;EACA,oBAAA;EACA,iBAAA;EACA,0BAAA;EACA,mBAAA;ExCsCA,uDAAA;EACQ,+CAAA;CLo/JT;A6CthKD;EACE,YAAA;EACA,UAAA;EACA,aAAA;EACA,gBAAA;EACA,kBAAA;EACA,YAAA;EACA,mBAAA;EACA,0BAAA;ExCyBA,uDAAA;EACQ,+CAAA;EAyHR,oCAAA;EACK,+BAAA;EACG,4BAAA;CLw4JT;A6CnhKD;;ECDI,8MAAA;EACA,yMAAA;EACA,sMAAA;EDEF,mCAAA;EAAA,2BAAA;C7CuhKD;A6ChhKD;;ExC5CE,2DAAA;EACK,sDAAA;EACG,mDAAA;CLgkKT;A6C7gKD;EEvEE,0BAAA;C/CulKD;A+CplKC;EDgDE,8MAAA;EACA,yMAAA;EACA,sMAAA;C9CuiKH;A6CjhKD;EE3EE,0BAAA;C/C+lKD;A+C5lKC;EDgDE,8MAAA;EACA,yMAAA;EACA,sMAAA;C9C+iKH;A6CrhKD;EE/EE,0BAAA;C/CumKD;A+CpmKC;EDgDE,8MAAA;EACA,yMAAA;EACA,sMAAA;C9CujKH;A6CzhKD;EEnFE,0BAAA;C/C+mKD;A+C5mKC;EDgDE,8MAAA;EACA,yMAAA;EACA,sMAAA;C9C+jKH;AgDvnKD;EAEE,iBAAA;ChDwnKD;AgDtnKC;EACE,cAAA;ChDwnKH;AgDpnKD;;EAEE,iBAAA;EACA,QAAA;ChDsnKD;AgDnnKD;EACE,eAAA;ChDqnKD;AgDlnKD;EACE,eAAA;ChDonKD;AgDjnKC;EACE,gBAAA;ChDmnKH;AgD/mKD;;EAEE,mBAAA;ChDinKD;AgD9mKD;;EAEE,oBAAA;ChDgnKD;AgD7mKD;;;EAGE,oBAAA;EACA,oBAAA;ChD+mKD;AgD5mKD;EACE,uBAAA;ChD8mKD;AgD3mKD;EACE,uBAAA;ChD6mKD;AgDzmKD;EACE,cAAA;EACA,mBAAA;ChD2mKD;AgDrmKD;EACE,gBAAA;EACA,iBAAA;ChDumKD;AiD5pKD;EAEE,gBAAA;EACA,oBAAA;CjD6pKD;AiDrpKD;EACE,mBAAA;EACA,eAAA;EACA,mBAAA;EAEA,oBAAA;EACA,uBAAA;EACA,uBAAA;CjDspKD;AiDnpKC;ErB7BA,4BAAA;EACA,6BAAA;C5BmrKD;AiDppKC;EACE,iBAAA;ErBzBF,gCAAA;EACA,+BAAA;C5BgrKD;AiDnpKC;;;EAGE,eAAA;EACA,oBAAA;EACA,0BAAA;CjDqpKH;AiD1pKC;;;EASI,eAAA;CjDspKL;AiD/pKC;;;EAYI,eAAA;CjDwpKL;AiDnpKC;;;EAGE,WAAA;EACA,YAAA;EACA,0BAAA;EACA,sBAAA;CjDqpKH;AiD3pKC;;;;;;;;;EAYI,eAAA;CjD0pKL;AiDtqKC;;;EAeI,eAAA;CjD4pKL;AiDjpKD;;EAEE,YAAA;CjDmpKD;AiDrpKD;;EAKI,YAAA;CjDopKH;AiDhpKC;;;;EAEE,YAAA;EACA,sBAAA;EACA,0BAAA;CjDopKH;AiDhpKD;EACE,YAAA;EACA,iBAAA;CjDkpKD;AczvKA;EoCIG,eAAA;EACA,0BAAA;ClDwvKH;AkDtvKG;;EAEE,eAAA;ClDwvKL;AkD1vKG;;EAKI,eAAA;ClDyvKP;AkDtvKK;;;;EAEE,eAAA;EACA,0BAAA;ClD0vKP;AkDxvKK;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;ClD6vKP;ActxKA;EoCIG,eAAA;EACA,0BAAA;ClDqxKH;AkDnxKG;;EAEE,eAAA;ClDqxKL;AkDvxKG;;EAKI,eAAA;ClDsxKP;AkDnxKK;;;;EAEE,eAAA;EACA,0BAAA;ClDuxKP;AkDrxKK;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;ClD0xKP;AcnzKA;EoCIG,eAAA;EACA,0BAAA;ClDkzKH;AkDhzKG;;EAEE,eAAA;ClDkzKL;AkDpzKG;;EAKI,eAAA;ClDmzKP;AkDhzKK;;;;EAEE,eAAA;EACA,0BAAA;ClDozKP;AkDlzKK;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;ClDuzKP;Ach1KA;EoCIG,eAAA;EACA,0BAAA;ClD+0KH;AkD70KG;;EAEE,eAAA;ClD+0KL;AkDj1KG;;EAKI,eAAA;ClDg1KP;AkD70KK;;;;EAEE,eAAA;EACA,0BAAA;ClDi1KP;AkD/0KK;;;;;;EAGE,YAAA;EACA,0BAAA;EACA,sBAAA;ClDo1KP;AiDnvKD;EACE,cAAA;EACA,mBAAA;CjDqvKD;AiDnvKD;EACE,iBAAA;EACA,iBAAA;CjDqvKD;AmD72KD;EACE,oBAAA;EACA,uBAAA;EACA,8BAAA;EACA,mBAAA;E9C0DA,kDAAA;EACQ,0CAAA;CLszKT;AmD52KD;EACE,cAAA;CnD82KD;AmDz2KD;EACE,mBAAA;EACA,qCAAA;EvBtBA,4BAAA;EACA,6BAAA;C5Bk4KD;AmD/2KD;EAMI,eAAA;CnD42KH;AmDv2KD;EACE,cAAA;EACA,iBAAA;EACA,gBAAA;EACA,eAAA;CnDy2KD;AmD72KD;;;;;EAWI,eAAA;CnDy2KH;AmDp2KD;EACE,mBAAA;EACA,0BAAA;EACA,2BAAA;EvB1CA,gCAAA;EACA,+BAAA;C5Bi5KD;AmD91KD;;EAGI,iBAAA;CnD+1KH;AmDl2KD;;EAMM,oBAAA;EACA,iBAAA;CnDg2KL;AmD51KG;;EAEI,cAAA;EvBzEN,4BAAA;EACA,6BAAA;C5Bw6KD;AmD11KG;;EAEI,iBAAA;EvBzEN,gCAAA;EACA,+BAAA;C5Bs6KD;AmDn3KD;EvB5DE,0BAAA;EACA,2BAAA;C5Bk7KD;AmDt1KD;EAEI,oBAAA;CnDu1KH;AmDp1KD;EACE,oBAAA;CnDs1KD;AmD90KD;;;EAII,iBAAA;CnD+0KH;AmDn1KD;;;EAOM,oBAAA;EACA,mBAAA;CnDi1KL;AmDz1KD;;EvB3GE,4BAAA;EACA,6BAAA;C5Bw8KD;AmD91KD;;;;EAmBQ,4BAAA;EACA,6BAAA;CnDi1KP;AmDr2KD;;;;;;;;EAwBU,4BAAA;CnDu1KT;AmD/2KD;;;;;;;;EA4BU,6BAAA;CnD61KT;AmDz3KD;;EvBnGE,gCAAA;EACA,+BAAA;C5Bg+KD;AmD93KD;;;;EAyCQ,gCAAA;EACA,+BAAA;CnD21KP;AmDr4KD;;;;;;;;EA8CU,+BAAA;CnDi2KT;AmD/4KD;;;;;;;;EAkDU,gCAAA;CnDu2KT;AmDz5KD;;;;EA2DI,2BAAA;CnDo2KH;AmD/5KD;;EA+DI,cAAA;CnDo2KH;AmDn6KD;;EAmEI,UAAA;CnDo2KH;AmDv6KD;;;;;;;;;;;;EA0EU,eAAA;CnD22KT;AmDr7KD;;;;;;;;;;;;EA8EU,gBAAA;CnDq3KT;AmDn8KD;;;;;;;;EAuFU,iBAAA;CnDs3KT;AmD78KD;;;;;;;;EAgGU,iBAAA;CnDu3KT;AmDv9KD;EAsGI,iBAAA;EACA,UAAA;CnDo3KH;AmD12KD;EACE,oBAAA;CnD42KD;AmD72KD;EAKI,iBAAA;EACA,mBAAA;CnD22KH;AmDj3KD;EASM,gBAAA;CnD22KL;AmDp3KD;EAcI,iBAAA;CnDy2KH;AmDv3KD;;EAkBM,2BAAA;CnDy2KL;AmD33KD;EAuBI,cAAA;CnDu2KH;AmD93KD;EAyBM,8BAAA;CnDw2KL;AmDj2KD;EC5PE,mBAAA;CpDgmLD;AoD9lLC;EACE,eAAA;EACA,0BAAA;EACA,mBAAA;CpDgmLH;AoDnmLC;EAMI,uBAAA;CpDgmLL;AoDtmLC;EASI,eAAA;EACA,0BAAA;CpDgmLL;AoD7lLC;EAEI,0BAAA;CpD8lLL;AmDh3KD;EC/PE,sBAAA;CpDknLD;AoDhnLC;EACE,YAAA;EACA,0BAAA;EACA,sBAAA;CpDknLH;AoDrnLC;EAMI,0BAAA;CpDknLL;AoDxnLC;EASI,eAAA;EACA,uBAAA;CpDknLL;AoD/mLC;EAEI,6BAAA;CpDgnLL;AmD/3KD;EClQE,sBAAA;CpDooLD;AoDloLC;EACE,eAAA;EACA,0BAAA;EACA,sBAAA;CpDooLH;AoDvoLC;EAMI,0BAAA;CpDooLL;AoD1oLC;EASI,eAAA;EACA,0BAAA;CpDooLL;AoDjoLC;EAEI,6BAAA;CpDkoLL;AmD94KD;ECrQE,sBAAA;CpDspLD;AoDppLC;EACE,eAAA;EACA,0BAAA;EACA,sBAAA;CpDspLH;AoDzpLC;EAMI,0BAAA;CpDspLL;AoD5pLC;EASI,eAAA;EACA,0BAAA;CpDspLL;AoDnpLC;EAEI,6BAAA;CpDopLL;AmD75KD;ECxQE,sBAAA;CpDwqLD;AoDtqLC;EACE,eAAA;EACA,0BAAA;EACA,sBAAA;CpDwqLH;AoD3qLC;EAMI,0BAAA;CpDwqLL;AoD9qLC;EASI,eAAA;EACA,0BAAA;CpDwqLL;AoDrqLC;EAEI,6BAAA;CpDsqLL;AmD56KD;EC3QE,sBAAA;CpD0rLD;AoDxrLC;EACE,eAAA;EACA,0BAAA;EACA,sBAAA;CpD0rLH;AoD7rLC;EAMI,0BAAA;CpD0rLL;AoDhsLC;EASI,eAAA;EACA,0BAAA;CpD0rLL;AoDvrLC;EAEI,6BAAA;CpDwrLL;AqDxsLD;EACE,mBAAA;EACA,eAAA;EACA,UAAA;EACA,WAAA;EACA,iBAAA;CrD0sLD;AqD/sLD;;;;;EAYI,mBAAA;EACA,OAAA;EACA,UAAA;EACA,QAAA;EACA,YAAA;EACA,aAAA;EACA,UAAA;CrD0sLH;AqDrsLD;EACE,uBAAA;CrDusLD;AqDnsLD;EACE,oBAAA;CrDqsLD;AsDhuLD;EACE,iBAAA;EACA,cAAA;EACA,oBAAA;EACA,0BAAA;EACA,0BAAA;EACA,mBAAA;EjD0DA,wDAAA;EACQ,gDAAA;CLyqLT;AsD1uLD;EASI,mBAAA;EACA,kCAAA;CtDouLH;AsD/tLD;EACE,cAAA;EACA,mBAAA;CtDiuLD;AsD/tLD;EACE,aAAA;EACA,mBAAA;CtDiuLD;AuDrvLD;EACE,aAAA;EACA,gBAAA;EACA,kBAAA;EACA,eAAA;EACA,YAAA;EACA,0BAAA;EjCTA,0BAAA;EACA,aAAA;CtBiwLD;AuDtvLC;;EAEE,YAAA;EACA,sBAAA;EACA,gBAAA;EjChBF,0BAAA;EACA,aAAA;CtBywLD;AuDlvLC;EACE,WAAA;EACA,gBAAA;EACA,wBAAA;EACA,UAAA;EACA,yBAAA;EACA,sBAAA;EAAA,iBAAA;CvDovLH;AwD5wLD;EACE,iBAAA;CxD8wLD;AwD1wLD;EACE,gBAAA;EACA,OAAA;EACA,SAAA;EACA,UAAA;EACA,QAAA;EACA,cAAA;EACA,cAAA;EACA,iBAAA;EACA,kCAAA;EAIA,WAAA;CxDywLD;AwDtwLC;EnDiHA,sCAAA;EACI,kCAAA;EACC,iCAAA;EACG,8BAAA;EAkER,oDAAA;EAEK,0CAAA;EACG,4CAAA;EAAA,oCAAA;EAAA,iGAAA;CLulLT;AwD5wLC;EnD6GA,mCAAA;EACI,+BAAA;EACC,8BAAA;EACG,2BAAA;CLkqLT;AwDhxLD;EACE,mBAAA;EACA,iBAAA;CxDkxLD;AwD9wLD;EACE,mBAAA;EACA,YAAA;EACA,aAAA;CxDgxLD;AwD5wLD;EACE,mBAAA;EACA,uBAAA;EACA,6BAAA;EACA,uBAAA;EACA,qCAAA;EACA,mBAAA;EnDcA,iDAAA;EACQ,yCAAA;EmDZR,WAAA;CxD8wLD;AwD1wLD;EACE,gBAAA;EACA,OAAA;EACA,SAAA;EACA,UAAA;EACA,QAAA;EACA,cAAA;EACA,uBAAA;CxD4wLD;AwD1wLC;ElCpEA,yBAAA;EACA,WAAA;CtBi1LD;AwD7wLC;ElCrEA,0BAAA;EACA,aAAA;CtBq1LD;AwD5wLD;EACE,cAAA;EACA,iCAAA;CxD8wLD;AwD1wLD;EACE,iBAAA;CxD4wLD;AwDxwLD;EACE,UAAA;EACA,wBAAA;CxD0wLD;AwDrwLD;EACE,mBAAA;EACA,cAAA;CxDuwLD;AwDnwLD;EACE,cAAA;EACA,kBAAA;EACA,8BAAA;CxDqwLD;AwDxwLD;EAQI,iBAAA;EACA,iBAAA;CxDmwLH;AwD5wLD;EAaI,kBAAA;CxDkwLH;AwD/wLD;EAiBI,eAAA;CxDiwLH;AwD5vLD;EACE,mBAAA;EACA,aAAA;EACA,YAAA;EACA,aAAA;EACA,iBAAA;CxD8vLD;AwD1vLD;EAEE;IACE,aAAA;IACA,kBAAA;GxD2vLD;EwDzvLD;InDrEA,kDAAA;IACQ,0CAAA;GLi0LP;EwDxvLD;IAAY,aAAA;GxD2vLX;CACF;AwDzvLD;EACE;IAAY,aAAA;GxD4vLX;CACF;AyD34LD;EACE,mBAAA;EACA,cAAA;EACA,eAAA;ECRA,4DAAA;EAEA,mBAAA;EACA,iBAAA;EACA,wBAAA;EACA,iBAAA;EACA,iBAAA;EACA,kBAAA;EACA,sBAAA;EACA,kBAAA;EACA,qBAAA;EACA,uBAAA;EACA,mBAAA;EACA,qBAAA;EACA,kBAAA;EACA,oBAAA;EDHA,gBAAA;EnCTA,yBAAA;EACA,WAAA;CtBm6LD;AyDv5LC;EnCbA,0BAAA;EACA,aAAA;CtBu6LD;AyD15LC;EACE,eAAA;EACA,iBAAA;CzD45LH;AyD15LC;EACE,eAAA;EACA,iBAAA;CzD45LH;AyD15LC;EACE,eAAA;EACA,gBAAA;CzD45LH;AyD15LC;EACE,eAAA;EACA,kBAAA;CzD45LH;AyDx5LC;EACE,UAAA;EACA,UAAA;EACA,kBAAA;EACA,wBAAA;EACA,uBAAA;CzD05LH;AyDx5LC;EACE,WAAA;EACA,UAAA;EACA,oBAAA;EACA,wBAAA;EACA,uBAAA;CzD05LH;AyDx5LC;EACE,UAAA;EACA,UAAA;EACA,oBAAA;EACA,wBAAA;EACA,uBAAA;CzD05LH;AyDx5LC;EACE,SAAA;EACA,QAAA;EACA,iBAAA;EACA,4BAAA;EACA,yBAAA;CzD05LH;AyDx5LC;EACE,SAAA;EACA,SAAA;EACA,iBAAA;EACA,4BAAA;EACA,wBAAA;CzD05LH;AyDx5LC;EACE,OAAA;EACA,UAAA;EACA,kBAAA;EACA,wBAAA;EACA,0BAAA;CzD05LH;AyDx5LC;EACE,OAAA;EACA,WAAA;EACA,iBAAA;EACA,wBAAA;EACA,0BAAA;CzD05LH;AyDx5LC;EACE,OAAA;EACA,UAAA;EACA,iBAAA;EACA,wBAAA;EACA,0BAAA;CzD05LH;AyDr5LD;EACE,iBAAA;EACA,iBAAA;EACA,YAAA;EACA,mBAAA;EACA,uBAAA;EACA,mBAAA;CzDu5LD;AyDn5LD;EACE,mBAAA;EACA,SAAA;EACA,UAAA;EACA,0BAAA;EACA,oBAAA;CzDq5LD;A2D9/LD;EACE,mBAAA;EACA,OAAA;EACA,QAAA;EACA,cAAA;EACA,cAAA;EACA,iBAAA;EACA,aAAA;EDXA,4DAAA;EAEA,mBAAA;EACA,iBAAA;EACA,wBAAA;EACA,iBAAA;EACA,iBAAA;EACA,kBAAA;EACA,sBAAA;EACA,kBAAA;EACA,qBAAA;EACA,uBAAA;EACA,mBAAA;EACA,qBAAA;EACA,kBAAA;EACA,oBAAA;ECAA,gBAAA;EACA,uBAAA;EACA,6BAAA;EACA,uBAAA;EACA,qCAAA;EACA,mBAAA;EtDiDA,kDAAA;EACQ,0CAAA;CL49LT;A2D1gMC;EAAQ,kBAAA;C3D6gMT;A2D5gMC;EAAU,kBAAA;C3D+gMX;A2D9gMC;EAAW,iBAAA;C3DihMZ;A2DhhMC;EAAS,mBAAA;C3DmhMV;A2D1iMD;EA4BI,mBAAA;C3DihMH;A2D/gMG;;EAEE,mBAAA;EACA,eAAA;EACA,SAAA;EACA,UAAA;EACA,0BAAA;EACA,oBAAA;C3DihML;A2D9gMG;EACE,YAAA;EACA,mBAAA;C3DghML;A2D5gMC;EACE,cAAA;EACA,UAAA;EACA,mBAAA;EACA,0BAAA;EACA,sCAAA;EACA,uBAAA;C3D8gMH;A2D7gMG;EACE,YAAA;EACA,mBAAA;EACA,aAAA;EACA,uBAAA;EACA,uBAAA;C3D+gML;A2D5gMC;EACE,SAAA;EACA,YAAA;EACA,kBAAA;EACA,4BAAA;EACA,wCAAA;EACA,qBAAA;C3D8gMH;A2D7gMG;EACE,cAAA;EACA,UAAA;EACA,aAAA;EACA,yBAAA;EACA,qBAAA;C3D+gML;A2D5gMC;EACE,WAAA;EACA,UAAA;EACA,mBAAA;EACA,oBAAA;EACA,6BAAA;EACA,yCAAA;C3D8gMH;A2D7gMG;EACE,SAAA;EACA,mBAAA;EACA,aAAA;EACA,oBAAA;EACA,0BAAA;C3D+gML;A2D3gMC;EACE,SAAA;EACA,aAAA;EACA,kBAAA;EACA,sBAAA;EACA,2BAAA;EACA,uCAAA;C3D6gMH;A2D5gMG;EACE,WAAA;EACA,cAAA;EACA,aAAA;EACA,sBAAA;EACA,wBAAA;C3D8gML;A2DzgMD;EACE,kBAAA;EACA,UAAA;EACA,gBAAA;EACA,0BAAA;EACA,iCAAA;EACA,2BAAA;C3D2gMD;A2DxgMD;EACE,kBAAA;C3D0gMD;A4D9nMD;EACE,mBAAA;C5DgoMD;A4D7nMD;EACE,mBAAA;EACA,YAAA;EACA,iBAAA;C5D+nMD;A4DloMD;EAMI,mBAAA;EACA,cAAA;EvD6KF,0CAAA;EACK,qCAAA;EACG,kCAAA;CLm9LT;A4DzoMD;;EAcM,eAAA;C5D+nML;A4D3nMG;EAAA;IvDuLF,uDAAA;IAEK,6CAAA;IACG,+CAAA;IAAA,uCAAA;IAAA,0GAAA;IA7JR,oCAAA;IAEQ,4BAAA;IA+GR,4BAAA;IAEQ,oBAAA;GLw/LP;E4DnoMG;;IvDmHJ,2CAAA;IACQ,mCAAA;IuDjHF,QAAA;G5DsoML;E4DpoMG;;IvD8GJ,4CAAA;IACQ,oCAAA;IuD5GF,QAAA;G5DuoML;E4DroMG;;;IvDyGJ,wCAAA;IACQ,gCAAA;IuDtGF,QAAA;G5DwoML;CACF;A4D9qMD;;;EA6CI,eAAA;C5DsoMH;A4DnrMD;EAiDI,QAAA;C5DqoMH;A4DtrMD;;EAsDI,mBAAA;EACA,OAAA;EACA,YAAA;C5DooMH;A4D5rMD;EA4DI,WAAA;C5DmoMH;A4D/rMD;EA+DI,YAAA;C5DmoMH;A4DlsMD;;EAmEI,QAAA;C5DmoMH;A4DtsMD;EAuEI,YAAA;C5DkoMH;A4DzsMD;EA0EI,WAAA;C5DkoMH;A4D1nMD;EACE,mBAAA;EACA,OAAA;EACA,UAAA;EACA,QAAA;EACA,WAAA;EACA,gBAAA;EACA,YAAA;EACA,mBAAA;EACA,0CAAA;EACA,mCAAA;EtCpGA,0BAAA;EACA,aAAA;CtBiuMD;A4DxnMC;EdrGE,mGAAA;EACA,8FAAA;EACA,qHAAA;EAAA,+FAAA;EACA,uHAAA;EACA,4BAAA;C9CguMH;A4D5nMC;EACE,SAAA;EACA,WAAA;Ed1GA,mGAAA;EACA,8FAAA;EACA,qHAAA;EAAA,+FAAA;EACA,uHAAA;EACA,4BAAA;C9CyuMH;A4D9nMC;;EAEE,YAAA;EACA,sBAAA;EACA,WAAA;EtCxHF,0BAAA;EACA,aAAA;CtByvMD;A4DhqMD;;;;EAuCI,mBAAA;EACA,SAAA;EACA,WAAA;EACA,sBAAA;EACA,kBAAA;C5D+nMH;A4D1qMD;;EA+CI,UAAA;EACA,mBAAA;C5D+nMH;A4D/qMD;;EAoDI,WAAA;EACA,oBAAA;C5D+nMH;A4DprMD;;EAyDI,YAAA;EACA,aAAA;EACA,mBAAA;EACA,eAAA;C5D+nMH;A4D3nMG;EACE,iBAAA;C5D6nML;A4DznMG;EACE,iBAAA;C5D2nML;A4DjnMD;EACE,mBAAA;EACA,aAAA;EACA,UAAA;EACA,YAAA;EACA,WAAA;EACA,gBAAA;EACA,kBAAA;EACA,mBAAA;EACA,iBAAA;C5DmnMD;A4D5nMD;EAYI,sBAAA;EACA,YAAA;EACA,aAAA;EACA,YAAA;EACA,oBAAA;EACA,gBAAA;EAUA,0BAAA;EACA,mCAAA;EAEA,uBAAA;EACA,oBAAA;C5DymMH;A4DxoMD;EAmCI,YAAA;EACA,aAAA;EACA,UAAA;EACA,uBAAA;C5DwmMH;A4DjmMD;EACE,mBAAA;EACA,WAAA;EACA,aAAA;EACA,UAAA;EACA,YAAA;EACA,kBAAA;EACA,qBAAA;EACA,YAAA;EACA,mBAAA;EACA,0CAAA;C5DmmMD;A4DjmMC;EACE,kBAAA;C5DmmMH;A4D7lMD;EAGE;;;;IAKI,YAAA;IACA,aAAA;IACA,kBAAA;IACA,gBAAA;G5D4lMH;E4DpmMD;;IAYI,mBAAA;G5D4lMH;E4DxmMD;;IAgBI,oBAAA;G5D4lMH;E4DvlMD;IACE,WAAA;IACA,UAAA;IACA,qBAAA;G5DylMD;E4DrlMD;IACE,aAAA;G5DulMD;CACF;A6Dz1MC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;EAEE,eAAA;EACA,aAAA;C7Dy3MH;A6Dv3MC;;;;;;;;;;;;;;;;EACE,YAAA;C7Dw4MH;AiC94MD;E6BVE,eAAA;EACA,mBAAA;EACA,kBAAA;C9D25MD;AiCh5MD;EACE,wBAAA;CjCk5MD;AiCh5MD;EACE,uBAAA;CjCk5MD;AiC14MD;EACE,yBAAA;CjC44MD;AiC14MD;EACE,0BAAA;CjC44MD;AiC14MD;EACE,mBAAA;CjC44MD;AiC14MD;E8BzBE,YAAA;EACA,mBAAA;EACA,kBAAA;EACA,8BAAA;EACA,UAAA;C/Ds6MD;AiCx4MD;EACE,yBAAA;CjC04MD;AiCn4MD;EACE,gBAAA;CjCq4MD;AgEt6MD;EACE,oBAAA;ChEw6MD;AgEl6MD;;;;EClBE,yBAAA;CjE07MD;AgEj6MD;;;;;;;;;;;;EAYE,yBAAA;ChEm6MD;AgE/5MC;EAAA;ICjDA,0BAAA;GjEo9MC;EiEn9MD;IAAU,0BAAA;GjEs9MT;EiEr9MD;IAAU,8BAAA;GjEw9MT;EiEv9MD;;IACU,+BAAA;GjE09MT;CACF;AgEz6MC;EAAA;IACE,0BAAA;GhE46MD;CACF;AgEz6MC;EAAA;IACE,2BAAA;GhE46MD;CACF;AgEz6MC;EAAA;IACE,iCAAA;GhE46MD;CACF;AgEx6MC;EAAA;ICtEA,0BAAA;GjEk/MC;EiEj/MD;IAAU,0BAAA;GjEo/MT;EiEn/MD;IAAU,8BAAA;GjEs/MT;EiEr/MD;;IACU,+BAAA;GjEw/MT;CACF;AgEl7MC;EAAA;IACE,0BAAA;GhEq7MD;CACF;AgEl7MC;EAAA;IACE,2BAAA;GhEq7MD;CACF;AgEl7MC;EAAA;IACE,iCAAA;GhEq7MD;CACF;AgEj7MC;EAAA;IC3FA,0BAAA;GjEghNC;EiE/gND;IAAU,0BAAA;GjEkhNT;EiEjhND;IAAU,8BAAA;GjEohNT;EiEnhND;;IACU,+BAAA;GjEshNT;CACF;AgE37MC;EAAA;IACE,0BAAA;GhE87MD;CACF;AgE37MC;EAAA;IACE,2BAAA;GhE87MD;CACF;AgE37MC;EAAA;IACE,iCAAA;GhE87MD;CACF;AgE17MC;EAAA;IChHA,0BAAA;GjE8iNC;EiE7iND;IAAU,0BAAA;GjEgjNT;EiE/iND;IAAU,8BAAA;GjEkjNT;EiEjjND;;IACU,+BAAA;GjEojNT;CACF;AgEp8MC;EAAA;IACE,0BAAA;GhEu8MD;CACF;AgEp8MC;EAAA;IACE,2BAAA;GhEu8MD;CACF;AgEp8MC;EAAA;IACE,iCAAA;GhEu8MD;CACF;AgEn8MC;EAAA;IC7HA,yBAAA;GjEokNC;CACF;AgEn8MC;EAAA;IClIA,yBAAA;GjEykNC;CACF;AgEn8MC;EAAA;ICvIA,yBAAA;GjE8kNC;CACF;AgEn8MC;EAAA;IC5IA,yBAAA;GjEmlNC;CACF;AgE77MD;ECvJE,yBAAA;CjEulND;AgE77MC;EAAA;IClKA,0BAAA;GjEmmNC;EiElmND;IAAU,0BAAA;GjEqmNT;EiEpmND;IAAU,8BAAA;GjEumNT;EiEtmND;;IACU,+BAAA;GjEymNT;CACF;AgEx8MD;EACE,yBAAA;ChE08MD;AgEx8MC;EAAA;IACE,0BAAA;GhE28MD;CACF;AgEz8MD;EACE,yBAAA;ChE28MD;AgEz8MC;EAAA;IACE,2BAAA;GhE48MD;CACF;AgE18MD;EACE,yBAAA;ChE48MD;AgE18MC;EAAA;IACE,iCAAA;GhE68MD;CACF;AgEz8MC;EAAA;ICrLA,yBAAA;GjEkoNC;CACF","file":"bootstrap.css","sourcesContent":["/*!\n * Bootstrap v3.4.1 (https://getbootstrap.com/)\n * Copyright 2011-2019 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\nhtml {\n font-family: sans-serif;\n -ms-text-size-adjust: 100%;\n -webkit-text-size-adjust: 100%;\n}\nbody {\n margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n display: inline-block;\n vertical-align: baseline;\n}\naudio:not([controls]) {\n display: none;\n height: 0;\n}\n[hidden],\ntemplate {\n display: none;\n}\na {\n background-color: transparent;\n}\na:active,\na:hover {\n outline: 0;\n}\nabbr[title] {\n border-bottom: none;\n text-decoration: underline;\n text-decoration: underline dotted;\n}\nb,\nstrong {\n font-weight: bold;\n}\ndfn {\n font-style: italic;\n}\nh1 {\n font-size: 2em;\n margin: 0.67em 0;\n}\nmark {\n background: #ff0;\n color: #000;\n}\nsmall {\n font-size: 80%;\n}\nsub,\nsup {\n font-size: 75%;\n line-height: 0;\n position: relative;\n vertical-align: baseline;\n}\nsup {\n top: -0.5em;\n}\nsub {\n bottom: -0.25em;\n}\nimg {\n border: 0;\n}\nsvg:not(:root) {\n overflow: hidden;\n}\nfigure {\n margin: 1em 40px;\n}\nhr {\n box-sizing: content-box;\n height: 0;\n}\npre {\n overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n font-family: monospace, monospace;\n font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n color: inherit;\n font: inherit;\n margin: 0;\n}\nbutton {\n overflow: visible;\n}\nbutton,\nselect {\n text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n -webkit-appearance: button;\n cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n border: 0;\n padding: 0;\n}\ninput {\n line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n box-sizing: border-box;\n padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\ninput[type=\"search\"] {\n -webkit-appearance: textfield;\n box-sizing: content-box;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\nfieldset {\n border: 1px solid #c0c0c0;\n margin: 0 2px;\n padding: 0.35em 0.625em 0.75em;\n}\nlegend {\n border: 0;\n padding: 0;\n}\ntextarea {\n overflow: auto;\n}\noptgroup {\n font-weight: bold;\n}\ntable {\n border-collapse: collapse;\n border-spacing: 0;\n}\ntd,\nth {\n padding: 0;\n}\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n@media print {\n *,\n *:before,\n *:after {\n color: #000 !important;\n text-shadow: none !important;\n background: transparent !important;\n box-shadow: none !important;\n }\n a,\n a:visited {\n text-decoration: underline;\n }\n a[href]:after {\n content: \" (\" attr(href) \")\";\n }\n abbr[title]:after {\n content: \" (\" attr(title) \")\";\n }\n a[href^=\"#\"]:after,\n a[href^=\"javascript:\"]:after {\n content: \"\";\n }\n pre,\n blockquote {\n border: 1px solid #999;\n page-break-inside: avoid;\n }\n thead {\n display: table-header-group;\n }\n tr,\n img {\n page-break-inside: avoid;\n }\n img {\n max-width: 100% !important;\n }\n p,\n h2,\n h3 {\n orphans: 3;\n widows: 3;\n }\n h2,\n h3 {\n page-break-after: avoid;\n }\n .navbar {\n display: none;\n }\n .btn > .caret,\n .dropup > .btn > .caret {\n border-top-color: #000 !important;\n }\n .label {\n border: 1px solid #000;\n }\n .table {\n border-collapse: collapse !important;\n }\n .table td,\n .table th {\n background-color: #fff !important;\n }\n .table-bordered th,\n .table-bordered td {\n border: 1px solid #ddd !important;\n }\n}\n@font-face {\n font-family: \"Glyphicons Halflings\";\n src: url(\"../fonts/glyphicons-halflings-regular.eot\");\n src: url(\"../fonts/glyphicons-halflings-regular.eot?#iefix\") format(\"embedded-opentype\"), url(\"../fonts/glyphicons-halflings-regular.woff2\") format(\"woff2\"), url(\"../fonts/glyphicons-halflings-regular.woff\") format(\"woff\"), url(\"../fonts/glyphicons-halflings-regular.ttf\") format(\"truetype\"), url(\"../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular\") format(\"svg\");\n}\n.glyphicon {\n position: relative;\n top: 1px;\n display: inline-block;\n font-family: \"Glyphicons Halflings\";\n font-style: normal;\n font-weight: 400;\n line-height: 1;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n content: \"\\002a\";\n}\n.glyphicon-plus:before {\n content: \"\\002b\";\n}\n.glyphicon-euro:before,\n.glyphicon-eur:before {\n content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n content: \"\\270f\";\n}\n.glyphicon-glass:before {\n content: \"\\e001\";\n}\n.glyphicon-music:before {\n content: \"\\e002\";\n}\n.glyphicon-search:before {\n content: \"\\e003\";\n}\n.glyphicon-heart:before {\n content: \"\\e005\";\n}\n.glyphicon-star:before {\n content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n content: \"\\e007\";\n}\n.glyphicon-user:before {\n content: \"\\e008\";\n}\n.glyphicon-film:before {\n content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n content: \"\\e010\";\n}\n.glyphicon-th:before {\n content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n content: \"\\e012\";\n}\n.glyphicon-ok:before {\n content: \"\\e013\";\n}\n.glyphicon-remove:before {\n content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n content: \"\\e016\";\n}\n.glyphicon-off:before {\n content: \"\\e017\";\n}\n.glyphicon-signal:before {\n content: \"\\e018\";\n}\n.glyphicon-cog:before {\n content: \"\\e019\";\n}\n.glyphicon-trash:before {\n content: \"\\e020\";\n}\n.glyphicon-home:before {\n content: \"\\e021\";\n}\n.glyphicon-file:before {\n content: \"\\e022\";\n}\n.glyphicon-time:before {\n content: \"\\e023\";\n}\n.glyphicon-road:before {\n content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n content: \"\\e025\";\n}\n.glyphicon-download:before {\n content: \"\\e026\";\n}\n.glyphicon-upload:before {\n content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n content: \"\\e032\";\n}\n.glyphicon-lock:before {\n content: \"\\e033\";\n}\n.glyphicon-flag:before {\n content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n content: \"\\e040\";\n}\n.glyphicon-tag:before {\n content: \"\\e041\";\n}\n.glyphicon-tags:before {\n content: \"\\e042\";\n}\n.glyphicon-book:before {\n content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n content: \"\\e044\";\n}\n.glyphicon-print:before {\n content: \"\\e045\";\n}\n.glyphicon-camera:before {\n content: \"\\e046\";\n}\n.glyphicon-font:before {\n content: \"\\e047\";\n}\n.glyphicon-bold:before {\n content: \"\\e048\";\n}\n.glyphicon-italic:before {\n content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n content: \"\\e055\";\n}\n.glyphicon-list:before {\n content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n content: \"\\e059\";\n}\n.glyphicon-picture:before {\n content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n content: \"\\e063\";\n}\n.glyphicon-tint:before {\n content: \"\\e064\";\n}\n.glyphicon-edit:before {\n content: \"\\e065\";\n}\n.glyphicon-share:before {\n content: \"\\e066\";\n}\n.glyphicon-check:before {\n content: \"\\e067\";\n}\n.glyphicon-move:before {\n content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n content: \"\\e070\";\n}\n.glyphicon-backward:before {\n content: \"\\e071\";\n}\n.glyphicon-play:before {\n content: \"\\e072\";\n}\n.glyphicon-pause:before {\n content: \"\\e073\";\n}\n.glyphicon-stop:before {\n content: \"\\e074\";\n}\n.glyphicon-forward:before {\n content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n content: \"\\e077\";\n}\n.glyphicon-eject:before {\n content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n content: \"\\e101\";\n}\n.glyphicon-gift:before {\n content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n content: \"\\e103\";\n}\n.glyphicon-fire:before {\n content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n content: \"\\e107\";\n}\n.glyphicon-plane:before {\n content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n content: \"\\e109\";\n}\n.glyphicon-random:before {\n content: \"\\e110\";\n}\n.glyphicon-comment:before {\n content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n content: \"\\e122\";\n}\n.glyphicon-bell:before {\n content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n content: \"\\e134\";\n}\n.glyphicon-globe:before {\n content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n content: \"\\e137\";\n}\n.glyphicon-filter:before {\n content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n content: \"\\e143\";\n}\n.glyphicon-link:before {\n content: \"\\e144\";\n}\n.glyphicon-phone:before {\n content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n content: \"\\e146\";\n}\n.glyphicon-usd:before {\n content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n content: \"\\e149\";\n}\n.glyphicon-sort:before {\n content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n content: \"\\e157\";\n}\n.glyphicon-expand:before {\n content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n content: \"\\e161\";\n}\n.glyphicon-flash:before {\n content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n content: \"\\e164\";\n}\n.glyphicon-record:before {\n content: \"\\e165\";\n}\n.glyphicon-save:before {\n content: \"\\e166\";\n}\n.glyphicon-open:before {\n content: \"\\e167\";\n}\n.glyphicon-saved:before {\n content: \"\\e168\";\n}\n.glyphicon-import:before {\n content: \"\\e169\";\n}\n.glyphicon-export:before {\n content: \"\\e170\";\n}\n.glyphicon-send:before {\n content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n content: \"\\e179\";\n}\n.glyphicon-header:before {\n content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n content: \"\\e183\";\n}\n.glyphicon-tower:before {\n content: \"\\e184\";\n}\n.glyphicon-stats:before {\n content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n content: \"\\e200\";\n}\n.glyphicon-cd:before {\n content: \"\\e201\";\n}\n.glyphicon-save-file:before {\n content: \"\\e202\";\n}\n.glyphicon-open-file:before {\n content: \"\\e203\";\n}\n.glyphicon-level-up:before {\n content: \"\\e204\";\n}\n.glyphicon-copy:before {\n content: \"\\e205\";\n}\n.glyphicon-paste:before {\n content: \"\\e206\";\n}\n.glyphicon-alert:before {\n content: \"\\e209\";\n}\n.glyphicon-equalizer:before {\n content: \"\\e210\";\n}\n.glyphicon-king:before {\n content: \"\\e211\";\n}\n.glyphicon-queen:before {\n content: \"\\e212\";\n}\n.glyphicon-pawn:before {\n content: \"\\e213\";\n}\n.glyphicon-bishop:before {\n content: \"\\e214\";\n}\n.glyphicon-knight:before {\n content: \"\\e215\";\n}\n.glyphicon-baby-formula:before {\n content: \"\\e216\";\n}\n.glyphicon-tent:before {\n content: \"\\26fa\";\n}\n.glyphicon-blackboard:before {\n content: \"\\e218\";\n}\n.glyphicon-bed:before {\n content: \"\\e219\";\n}\n.glyphicon-apple:before {\n content: \"\\f8ff\";\n}\n.glyphicon-erase:before {\n content: \"\\e221\";\n}\n.glyphicon-hourglass:before {\n content: \"\\231b\";\n}\n.glyphicon-lamp:before {\n content: \"\\e223\";\n}\n.glyphicon-duplicate:before {\n content: \"\\e224\";\n}\n.glyphicon-piggy-bank:before {\n content: \"\\e225\";\n}\n.glyphicon-scissors:before {\n content: \"\\e226\";\n}\n.glyphicon-bitcoin:before {\n content: \"\\e227\";\n}\n.glyphicon-btc:before {\n content: \"\\e227\";\n}\n.glyphicon-xbt:before {\n content: \"\\e227\";\n}\n.glyphicon-yen:before {\n content: \"\\00a5\";\n}\n.glyphicon-jpy:before {\n content: \"\\00a5\";\n}\n.glyphicon-ruble:before {\n content: \"\\20bd\";\n}\n.glyphicon-rub:before {\n content: \"\\20bd\";\n}\n.glyphicon-scale:before {\n content: \"\\e230\";\n}\n.glyphicon-ice-lolly:before {\n content: \"\\e231\";\n}\n.glyphicon-ice-lolly-tasted:before {\n content: \"\\e232\";\n}\n.glyphicon-education:before {\n content: \"\\e233\";\n}\n.glyphicon-option-horizontal:before {\n content: \"\\e234\";\n}\n.glyphicon-option-vertical:before {\n content: \"\\e235\";\n}\n.glyphicon-menu-hamburger:before {\n content: \"\\e236\";\n}\n.glyphicon-modal-window:before {\n content: \"\\e237\";\n}\n.glyphicon-oil:before {\n content: \"\\e238\";\n}\n.glyphicon-grain:before {\n content: \"\\e239\";\n}\n.glyphicon-sunglasses:before {\n content: \"\\e240\";\n}\n.glyphicon-text-size:before {\n content: \"\\e241\";\n}\n.glyphicon-text-color:before {\n content: \"\\e242\";\n}\n.glyphicon-text-background:before {\n content: \"\\e243\";\n}\n.glyphicon-object-align-top:before {\n content: \"\\e244\";\n}\n.glyphicon-object-align-bottom:before {\n content: \"\\e245\";\n}\n.glyphicon-object-align-horizontal:before {\n content: \"\\e246\";\n}\n.glyphicon-object-align-left:before {\n content: \"\\e247\";\n}\n.glyphicon-object-align-vertical:before {\n content: \"\\e248\";\n}\n.glyphicon-object-align-right:before {\n content: \"\\e249\";\n}\n.glyphicon-triangle-right:before {\n content: \"\\e250\";\n}\n.glyphicon-triangle-left:before {\n content: \"\\e251\";\n}\n.glyphicon-triangle-bottom:before {\n content: \"\\e252\";\n}\n.glyphicon-triangle-top:before {\n content: \"\\e253\";\n}\n.glyphicon-console:before {\n content: \"\\e254\";\n}\n.glyphicon-superscript:before {\n content: \"\\e255\";\n}\n.glyphicon-subscript:before {\n content: \"\\e256\";\n}\n.glyphicon-menu-left:before {\n content: \"\\e257\";\n}\n.glyphicon-menu-right:before {\n content: \"\\e258\";\n}\n.glyphicon-menu-down:before {\n content: \"\\e259\";\n}\n.glyphicon-menu-up:before {\n content: \"\\e260\";\n}\n* {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\n*:before,\n*:after {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n}\nhtml {\n font-size: 10px;\n -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-size: 14px;\n line-height: 1.42857143;\n color: #333333;\n background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n font-family: inherit;\n font-size: inherit;\n line-height: inherit;\n}\na {\n color: #337ab7;\n text-decoration: none;\n}\na:hover,\na:focus {\n color: #23527c;\n text-decoration: underline;\n}\na:focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\nfigure {\n margin: 0;\n}\nimg {\n vertical-align: middle;\n}\n.img-responsive,\n.thumbnail > img,\n.thumbnail a > img,\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n display: block;\n max-width: 100%;\n height: auto;\n}\n.img-rounded {\n border-radius: 6px;\n}\n.img-thumbnail {\n padding: 4px;\n line-height: 1.42857143;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 4px;\n -webkit-transition: all 0.2s ease-in-out;\n -o-transition: all 0.2s ease-in-out;\n transition: all 0.2s ease-in-out;\n display: inline-block;\n max-width: 100%;\n height: auto;\n}\n.img-circle {\n border-radius: 50%;\n}\nhr {\n margin-top: 20px;\n margin-bottom: 20px;\n border: 0;\n border-top: 1px solid #eeeeee;\n}\n.sr-only {\n position: absolute;\n width: 1px;\n height: 1px;\n padding: 0;\n margin: -1px;\n overflow: hidden;\n clip: rect(0, 0, 0, 0);\n border: 0;\n}\n.sr-only-focusable:active,\n.sr-only-focusable:focus {\n position: static;\n width: auto;\n height: auto;\n margin: 0;\n overflow: visible;\n clip: auto;\n}\n[role=\"button\"] {\n cursor: pointer;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n font-family: inherit;\n font-weight: 500;\n line-height: 1.1;\n color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n font-weight: 400;\n line-height: 1;\n color: #777777;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n margin-top: 20px;\n margin-bottom: 10px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n margin-top: 10px;\n margin-bottom: 10px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n font-size: 75%;\n}\nh1,\n.h1 {\n font-size: 36px;\n}\nh2,\n.h2 {\n font-size: 30px;\n}\nh3,\n.h3 {\n font-size: 24px;\n}\nh4,\n.h4 {\n font-size: 18px;\n}\nh5,\n.h5 {\n font-size: 14px;\n}\nh6,\n.h6 {\n font-size: 12px;\n}\np {\n margin: 0 0 10px;\n}\n.lead {\n margin-bottom: 20px;\n font-size: 16px;\n font-weight: 300;\n line-height: 1.4;\n}\n@media (min-width: 768px) {\n .lead {\n font-size: 21px;\n }\n}\nsmall,\n.small {\n font-size: 85%;\n}\nmark,\n.mark {\n padding: 0.2em;\n background-color: #fcf8e3;\n}\n.text-left {\n text-align: left;\n}\n.text-right {\n text-align: right;\n}\n.text-center {\n text-align: center;\n}\n.text-justify {\n text-align: justify;\n}\n.text-nowrap {\n white-space: nowrap;\n}\n.text-lowercase {\n text-transform: lowercase;\n}\n.text-uppercase {\n text-transform: uppercase;\n}\n.text-capitalize {\n text-transform: capitalize;\n}\n.text-muted {\n color: #777777;\n}\n.text-primary {\n color: #337ab7;\n}\na.text-primary:hover,\na.text-primary:focus {\n color: #286090;\n}\n.text-success {\n color: #3c763d;\n}\na.text-success:hover,\na.text-success:focus {\n color: #2b542c;\n}\n.text-info {\n color: #31708f;\n}\na.text-info:hover,\na.text-info:focus {\n color: #245269;\n}\n.text-warning {\n color: #8a6d3b;\n}\na.text-warning:hover,\na.text-warning:focus {\n color: #66512c;\n}\n.text-danger {\n color: #a94442;\n}\na.text-danger:hover,\na.text-danger:focus {\n color: #843534;\n}\n.bg-primary {\n color: #fff;\n background-color: #337ab7;\n}\na.bg-primary:hover,\na.bg-primary:focus {\n background-color: #286090;\n}\n.bg-success {\n background-color: #dff0d8;\n}\na.bg-success:hover,\na.bg-success:focus {\n background-color: #c1e2b3;\n}\n.bg-info {\n background-color: #d9edf7;\n}\na.bg-info:hover,\na.bg-info:focus {\n background-color: #afd9ee;\n}\n.bg-warning {\n background-color: #fcf8e3;\n}\na.bg-warning:hover,\na.bg-warning:focus {\n background-color: #f7ecb5;\n}\n.bg-danger {\n background-color: #f2dede;\n}\na.bg-danger:hover,\na.bg-danger:focus {\n background-color: #e4b9b9;\n}\n.page-header {\n padding-bottom: 9px;\n margin: 40px 0 20px;\n border-bottom: 1px solid #eeeeee;\n}\nul,\nol {\n margin-top: 0;\n margin-bottom: 10px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n margin-bottom: 0;\n}\n.list-unstyled {\n padding-left: 0;\n list-style: none;\n}\n.list-inline {\n padding-left: 0;\n list-style: none;\n margin-left: -5px;\n}\n.list-inline > li {\n display: inline-block;\n padding-right: 5px;\n padding-left: 5px;\n}\ndl {\n margin-top: 0;\n margin-bottom: 20px;\n}\ndt,\ndd {\n line-height: 1.42857143;\n}\ndt {\n font-weight: 700;\n}\ndd {\n margin-left: 0;\n}\n@media (min-width: 768px) {\n .dl-horizontal dt {\n float: left;\n width: 160px;\n clear: left;\n text-align: right;\n overflow: hidden;\n text-overflow: ellipsis;\n white-space: nowrap;\n }\n .dl-horizontal dd {\n margin-left: 180px;\n }\n}\nabbr[title],\nabbr[data-original-title] {\n cursor: help;\n}\n.initialism {\n font-size: 90%;\n text-transform: uppercase;\n}\nblockquote {\n padding: 10px 20px;\n margin: 0 0 20px;\n font-size: 17.5px;\n border-left: 5px solid #eeeeee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n display: block;\n font-size: 80%;\n line-height: 1.42857143;\n color: #777777;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n content: \"\\2014 \\00A0\";\n}\n.blockquote-reverse,\nblockquote.pull-right {\n padding-right: 15px;\n padding-left: 0;\n text-align: right;\n border-right: 5px solid #eeeeee;\n border-left: 0;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n content: \"\";\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-right small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n content: \"\\00A0 \\2014\";\n}\naddress {\n margin-bottom: 20px;\n font-style: normal;\n line-height: 1.42857143;\n}\ncode,\nkbd,\npre,\nsamp {\n font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\ncode {\n padding: 2px 4px;\n font-size: 90%;\n color: #c7254e;\n background-color: #f9f2f4;\n border-radius: 4px;\n}\nkbd {\n padding: 2px 4px;\n font-size: 90%;\n color: #fff;\n background-color: #333;\n border-radius: 3px;\n box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.25);\n}\nkbd kbd {\n padding: 0;\n font-size: 100%;\n font-weight: 700;\n box-shadow: none;\n}\npre {\n display: block;\n padding: 9.5px;\n margin: 0 0 10px;\n font-size: 13px;\n line-height: 1.42857143;\n color: #333333;\n word-break: break-all;\n word-wrap: break-word;\n background-color: #f5f5f5;\n border: 1px solid #ccc;\n border-radius: 4px;\n}\npre code {\n padding: 0;\n font-size: inherit;\n color: inherit;\n white-space: pre-wrap;\n background-color: transparent;\n border-radius: 0;\n}\n.pre-scrollable {\n max-height: 340px;\n overflow-y: scroll;\n}\n.container {\n padding-right: 15px;\n padding-left: 15px;\n margin-right: auto;\n margin-left: auto;\n}\n@media (min-width: 768px) {\n .container {\n width: 750px;\n }\n}\n@media (min-width: 992px) {\n .container {\n width: 970px;\n }\n}\n@media (min-width: 1200px) {\n .container {\n width: 1170px;\n }\n}\n.container-fluid {\n padding-right: 15px;\n padding-left: 15px;\n margin-right: auto;\n margin-left: auto;\n}\n.row {\n margin-right: -15px;\n margin-left: -15px;\n}\n.row-no-gutters {\n margin-right: 0;\n margin-left: 0;\n}\n.row-no-gutters [class*=\"col-\"] {\n padding-right: 0;\n padding-left: 0;\n}\n.col-xs-1,\n.col-sm-1,\n.col-md-1,\n.col-lg-1,\n.col-xs-2,\n.col-sm-2,\n.col-md-2,\n.col-lg-2,\n.col-xs-3,\n.col-sm-3,\n.col-md-3,\n.col-lg-3,\n.col-xs-4,\n.col-sm-4,\n.col-md-4,\n.col-lg-4,\n.col-xs-5,\n.col-sm-5,\n.col-md-5,\n.col-lg-5,\n.col-xs-6,\n.col-sm-6,\n.col-md-6,\n.col-lg-6,\n.col-xs-7,\n.col-sm-7,\n.col-md-7,\n.col-lg-7,\n.col-xs-8,\n.col-sm-8,\n.col-md-8,\n.col-lg-8,\n.col-xs-9,\n.col-sm-9,\n.col-md-9,\n.col-lg-9,\n.col-xs-10,\n.col-sm-10,\n.col-md-10,\n.col-lg-10,\n.col-xs-11,\n.col-sm-11,\n.col-md-11,\n.col-lg-11,\n.col-xs-12,\n.col-sm-12,\n.col-md-12,\n.col-lg-12 {\n position: relative;\n min-height: 1px;\n padding-right: 15px;\n padding-left: 15px;\n}\n.col-xs-1,\n.col-xs-2,\n.col-xs-3,\n.col-xs-4,\n.col-xs-5,\n.col-xs-6,\n.col-xs-7,\n.col-xs-8,\n.col-xs-9,\n.col-xs-10,\n.col-xs-11,\n.col-xs-12 {\n float: left;\n}\n.col-xs-12 {\n width: 100%;\n}\n.col-xs-11 {\n width: 91.66666667%;\n}\n.col-xs-10 {\n width: 83.33333333%;\n}\n.col-xs-9 {\n width: 75%;\n}\n.col-xs-8 {\n width: 66.66666667%;\n}\n.col-xs-7 {\n width: 58.33333333%;\n}\n.col-xs-6 {\n width: 50%;\n}\n.col-xs-5 {\n width: 41.66666667%;\n}\n.col-xs-4 {\n width: 33.33333333%;\n}\n.col-xs-3 {\n width: 25%;\n}\n.col-xs-2 {\n width: 16.66666667%;\n}\n.col-xs-1 {\n width: 8.33333333%;\n}\n.col-xs-pull-12 {\n right: 100%;\n}\n.col-xs-pull-11 {\n right: 91.66666667%;\n}\n.col-xs-pull-10 {\n right: 83.33333333%;\n}\n.col-xs-pull-9 {\n right: 75%;\n}\n.col-xs-pull-8 {\n right: 66.66666667%;\n}\n.col-xs-pull-7 {\n right: 58.33333333%;\n}\n.col-xs-pull-6 {\n right: 50%;\n}\n.col-xs-pull-5 {\n right: 41.66666667%;\n}\n.col-xs-pull-4 {\n right: 33.33333333%;\n}\n.col-xs-pull-3 {\n right: 25%;\n}\n.col-xs-pull-2 {\n right: 16.66666667%;\n}\n.col-xs-pull-1 {\n right: 8.33333333%;\n}\n.col-xs-pull-0 {\n right: auto;\n}\n.col-xs-push-12 {\n left: 100%;\n}\n.col-xs-push-11 {\n left: 91.66666667%;\n}\n.col-xs-push-10 {\n left: 83.33333333%;\n}\n.col-xs-push-9 {\n left: 75%;\n}\n.col-xs-push-8 {\n left: 66.66666667%;\n}\n.col-xs-push-7 {\n left: 58.33333333%;\n}\n.col-xs-push-6 {\n left: 50%;\n}\n.col-xs-push-5 {\n left: 41.66666667%;\n}\n.col-xs-push-4 {\n left: 33.33333333%;\n}\n.col-xs-push-3 {\n left: 25%;\n}\n.col-xs-push-2 {\n left: 16.66666667%;\n}\n.col-xs-push-1 {\n left: 8.33333333%;\n}\n.col-xs-push-0 {\n left: auto;\n}\n.col-xs-offset-12 {\n margin-left: 100%;\n}\n.col-xs-offset-11 {\n margin-left: 91.66666667%;\n}\n.col-xs-offset-10 {\n margin-left: 83.33333333%;\n}\n.col-xs-offset-9 {\n margin-left: 75%;\n}\n.col-xs-offset-8 {\n margin-left: 66.66666667%;\n}\n.col-xs-offset-7 {\n margin-left: 58.33333333%;\n}\n.col-xs-offset-6 {\n margin-left: 50%;\n}\n.col-xs-offset-5 {\n margin-left: 41.66666667%;\n}\n.col-xs-offset-4 {\n margin-left: 33.33333333%;\n}\n.col-xs-offset-3 {\n margin-left: 25%;\n}\n.col-xs-offset-2 {\n margin-left: 16.66666667%;\n}\n.col-xs-offset-1 {\n margin-left: 8.33333333%;\n}\n.col-xs-offset-0 {\n margin-left: 0%;\n}\n@media (min-width: 768px) {\n .col-sm-1,\n .col-sm-2,\n .col-sm-3,\n .col-sm-4,\n .col-sm-5,\n .col-sm-6,\n .col-sm-7,\n .col-sm-8,\n .col-sm-9,\n .col-sm-10,\n .col-sm-11,\n .col-sm-12 {\n float: left;\n }\n .col-sm-12 {\n width: 100%;\n }\n .col-sm-11 {\n width: 91.66666667%;\n }\n .col-sm-10 {\n width: 83.33333333%;\n }\n .col-sm-9 {\n width: 75%;\n }\n .col-sm-8 {\n width: 66.66666667%;\n }\n .col-sm-7 {\n width: 58.33333333%;\n }\n .col-sm-6 {\n width: 50%;\n }\n .col-sm-5 {\n width: 41.66666667%;\n }\n .col-sm-4 {\n width: 33.33333333%;\n }\n .col-sm-3 {\n width: 25%;\n }\n .col-sm-2 {\n width: 16.66666667%;\n }\n .col-sm-1 {\n width: 8.33333333%;\n }\n .col-sm-pull-12 {\n right: 100%;\n }\n .col-sm-pull-11 {\n right: 91.66666667%;\n }\n .col-sm-pull-10 {\n right: 83.33333333%;\n }\n .col-sm-pull-9 {\n right: 75%;\n }\n .col-sm-pull-8 {\n right: 66.66666667%;\n }\n .col-sm-pull-7 {\n right: 58.33333333%;\n }\n .col-sm-pull-6 {\n right: 50%;\n }\n .col-sm-pull-5 {\n right: 41.66666667%;\n }\n .col-sm-pull-4 {\n right: 33.33333333%;\n }\n .col-sm-pull-3 {\n right: 25%;\n }\n .col-sm-pull-2 {\n right: 16.66666667%;\n }\n .col-sm-pull-1 {\n right: 8.33333333%;\n }\n .col-sm-pull-0 {\n right: auto;\n }\n .col-sm-push-12 {\n left: 100%;\n }\n .col-sm-push-11 {\n left: 91.66666667%;\n }\n .col-sm-push-10 {\n left: 83.33333333%;\n }\n .col-sm-push-9 {\n left: 75%;\n }\n .col-sm-push-8 {\n left: 66.66666667%;\n }\n .col-sm-push-7 {\n left: 58.33333333%;\n }\n .col-sm-push-6 {\n left: 50%;\n }\n .col-sm-push-5 {\n left: 41.66666667%;\n }\n .col-sm-push-4 {\n left: 33.33333333%;\n }\n .col-sm-push-3 {\n left: 25%;\n }\n .col-sm-push-2 {\n left: 16.66666667%;\n }\n .col-sm-push-1 {\n left: 8.33333333%;\n }\n .col-sm-push-0 {\n left: auto;\n }\n .col-sm-offset-12 {\n margin-left: 100%;\n }\n .col-sm-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-sm-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-sm-offset-9 {\n margin-left: 75%;\n }\n .col-sm-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-sm-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-sm-offset-6 {\n margin-left: 50%;\n }\n .col-sm-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-sm-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-sm-offset-3 {\n margin-left: 25%;\n }\n .col-sm-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-sm-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-sm-offset-0 {\n margin-left: 0%;\n }\n}\n@media (min-width: 992px) {\n .col-md-1,\n .col-md-2,\n .col-md-3,\n .col-md-4,\n .col-md-5,\n .col-md-6,\n .col-md-7,\n .col-md-8,\n .col-md-9,\n .col-md-10,\n .col-md-11,\n .col-md-12 {\n float: left;\n }\n .col-md-12 {\n width: 100%;\n }\n .col-md-11 {\n width: 91.66666667%;\n }\n .col-md-10 {\n width: 83.33333333%;\n }\n .col-md-9 {\n width: 75%;\n }\n .col-md-8 {\n width: 66.66666667%;\n }\n .col-md-7 {\n width: 58.33333333%;\n }\n .col-md-6 {\n width: 50%;\n }\n .col-md-5 {\n width: 41.66666667%;\n }\n .col-md-4 {\n width: 33.33333333%;\n }\n .col-md-3 {\n width: 25%;\n }\n .col-md-2 {\n width: 16.66666667%;\n }\n .col-md-1 {\n width: 8.33333333%;\n }\n .col-md-pull-12 {\n right: 100%;\n }\n .col-md-pull-11 {\n right: 91.66666667%;\n }\n .col-md-pull-10 {\n right: 83.33333333%;\n }\n .col-md-pull-9 {\n right: 75%;\n }\n .col-md-pull-8 {\n right: 66.66666667%;\n }\n .col-md-pull-7 {\n right: 58.33333333%;\n }\n .col-md-pull-6 {\n right: 50%;\n }\n .col-md-pull-5 {\n right: 41.66666667%;\n }\n .col-md-pull-4 {\n right: 33.33333333%;\n }\n .col-md-pull-3 {\n right: 25%;\n }\n .col-md-pull-2 {\n right: 16.66666667%;\n }\n .col-md-pull-1 {\n right: 8.33333333%;\n }\n .col-md-pull-0 {\n right: auto;\n }\n .col-md-push-12 {\n left: 100%;\n }\n .col-md-push-11 {\n left: 91.66666667%;\n }\n .col-md-push-10 {\n left: 83.33333333%;\n }\n .col-md-push-9 {\n left: 75%;\n }\n .col-md-push-8 {\n left: 66.66666667%;\n }\n .col-md-push-7 {\n left: 58.33333333%;\n }\n .col-md-push-6 {\n left: 50%;\n }\n .col-md-push-5 {\n left: 41.66666667%;\n }\n .col-md-push-4 {\n left: 33.33333333%;\n }\n .col-md-push-3 {\n left: 25%;\n }\n .col-md-push-2 {\n left: 16.66666667%;\n }\n .col-md-push-1 {\n left: 8.33333333%;\n }\n .col-md-push-0 {\n left: auto;\n }\n .col-md-offset-12 {\n margin-left: 100%;\n }\n .col-md-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-md-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-md-offset-9 {\n margin-left: 75%;\n }\n .col-md-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-md-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-md-offset-6 {\n margin-left: 50%;\n }\n .col-md-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-md-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-md-offset-3 {\n margin-left: 25%;\n }\n .col-md-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-md-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-md-offset-0 {\n margin-left: 0%;\n }\n}\n@media (min-width: 1200px) {\n .col-lg-1,\n .col-lg-2,\n .col-lg-3,\n .col-lg-4,\n .col-lg-5,\n .col-lg-6,\n .col-lg-7,\n .col-lg-8,\n .col-lg-9,\n .col-lg-10,\n .col-lg-11,\n .col-lg-12 {\n float: left;\n }\n .col-lg-12 {\n width: 100%;\n }\n .col-lg-11 {\n width: 91.66666667%;\n }\n .col-lg-10 {\n width: 83.33333333%;\n }\n .col-lg-9 {\n width: 75%;\n }\n .col-lg-8 {\n width: 66.66666667%;\n }\n .col-lg-7 {\n width: 58.33333333%;\n }\n .col-lg-6 {\n width: 50%;\n }\n .col-lg-5 {\n width: 41.66666667%;\n }\n .col-lg-4 {\n width: 33.33333333%;\n }\n .col-lg-3 {\n width: 25%;\n }\n .col-lg-2 {\n width: 16.66666667%;\n }\n .col-lg-1 {\n width: 8.33333333%;\n }\n .col-lg-pull-12 {\n right: 100%;\n }\n .col-lg-pull-11 {\n right: 91.66666667%;\n }\n .col-lg-pull-10 {\n right: 83.33333333%;\n }\n .col-lg-pull-9 {\n right: 75%;\n }\n .col-lg-pull-8 {\n right: 66.66666667%;\n }\n .col-lg-pull-7 {\n right: 58.33333333%;\n }\n .col-lg-pull-6 {\n right: 50%;\n }\n .col-lg-pull-5 {\n right: 41.66666667%;\n }\n .col-lg-pull-4 {\n right: 33.33333333%;\n }\n .col-lg-pull-3 {\n right: 25%;\n }\n .col-lg-pull-2 {\n right: 16.66666667%;\n }\n .col-lg-pull-1 {\n right: 8.33333333%;\n }\n .col-lg-pull-0 {\n right: auto;\n }\n .col-lg-push-12 {\n left: 100%;\n }\n .col-lg-push-11 {\n left: 91.66666667%;\n }\n .col-lg-push-10 {\n left: 83.33333333%;\n }\n .col-lg-push-9 {\n left: 75%;\n }\n .col-lg-push-8 {\n left: 66.66666667%;\n }\n .col-lg-push-7 {\n left: 58.33333333%;\n }\n .col-lg-push-6 {\n left: 50%;\n }\n .col-lg-push-5 {\n left: 41.66666667%;\n }\n .col-lg-push-4 {\n left: 33.33333333%;\n }\n .col-lg-push-3 {\n left: 25%;\n }\n .col-lg-push-2 {\n left: 16.66666667%;\n }\n .col-lg-push-1 {\n left: 8.33333333%;\n }\n .col-lg-push-0 {\n left: auto;\n }\n .col-lg-offset-12 {\n margin-left: 100%;\n }\n .col-lg-offset-11 {\n margin-left: 91.66666667%;\n }\n .col-lg-offset-10 {\n margin-left: 83.33333333%;\n }\n .col-lg-offset-9 {\n margin-left: 75%;\n }\n .col-lg-offset-8 {\n margin-left: 66.66666667%;\n }\n .col-lg-offset-7 {\n margin-left: 58.33333333%;\n }\n .col-lg-offset-6 {\n margin-left: 50%;\n }\n .col-lg-offset-5 {\n margin-left: 41.66666667%;\n }\n .col-lg-offset-4 {\n margin-left: 33.33333333%;\n }\n .col-lg-offset-3 {\n margin-left: 25%;\n }\n .col-lg-offset-2 {\n margin-left: 16.66666667%;\n }\n .col-lg-offset-1 {\n margin-left: 8.33333333%;\n }\n .col-lg-offset-0 {\n margin-left: 0%;\n }\n}\ntable {\n background-color: transparent;\n}\ntable col[class*=\"col-\"] {\n position: static;\n display: table-column;\n float: none;\n}\ntable td[class*=\"col-\"],\ntable th[class*=\"col-\"] {\n position: static;\n display: table-cell;\n float: none;\n}\ncaption {\n padding-top: 8px;\n padding-bottom: 8px;\n color: #777777;\n text-align: left;\n}\nth {\n text-align: left;\n}\n.table {\n width: 100%;\n max-width: 100%;\n margin-bottom: 20px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.table > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n padding: 8px;\n line-height: 1.42857143;\n vertical-align: top;\n border-top: 1px solid #ddd;\n}\n.table > thead > tr > th {\n vertical-align: bottom;\n border-bottom: 2px solid #ddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n border-top: 0;\n}\n.table > tbody + tbody {\n border-top: 2px solid #ddd;\n}\n.table .table {\n background-color: #fff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n padding: 5px;\n}\n.table-bordered {\n border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n border-bottom-width: 2px;\n}\n.table-striped > tbody > tr:nth-of-type(odd) {\n background-color: #f9f9f9;\n}\n.table-hover > tbody > tr:hover {\n background-color: #f5f5f5;\n}\n.table > thead > tr > td.active,\n.table > tbody > tr > td.active,\n.table > tfoot > tr > td.active,\n.table > thead > tr > th.active,\n.table > tbody > tr > th.active,\n.table > tfoot > tr > th.active,\n.table > thead > tr.active > td,\n.table > tbody > tr.active > td,\n.table > tfoot > tr.active > td,\n.table > thead > tr.active > th,\n.table > tbody > tr.active > th,\n.table > tfoot > tr.active > th {\n background-color: #f5f5f5;\n}\n.table-hover > tbody > tr > td.active:hover,\n.table-hover > tbody > tr > th.active:hover,\n.table-hover > tbody > tr.active:hover > td,\n.table-hover > tbody > tr:hover > .active,\n.table-hover > tbody > tr.active:hover > th {\n background-color: #e8e8e8;\n}\n.table > thead > tr > td.success,\n.table > tbody > tr > td.success,\n.table > tfoot > tr > td.success,\n.table > thead > tr > th.success,\n.table > tbody > tr > th.success,\n.table > tfoot > tr > th.success,\n.table > thead > tr.success > td,\n.table > tbody > tr.success > td,\n.table > tfoot > tr.success > td,\n.table > thead > tr.success > th,\n.table > tbody > tr.success > th,\n.table > tfoot > tr.success > th {\n background-color: #dff0d8;\n}\n.table-hover > tbody > tr > td.success:hover,\n.table-hover > tbody > tr > th.success:hover,\n.table-hover > tbody > tr.success:hover > td,\n.table-hover > tbody > tr:hover > .success,\n.table-hover > tbody > tr.success:hover > th {\n background-color: #d0e9c6;\n}\n.table > thead > tr > td.info,\n.table > tbody > tr > td.info,\n.table > tfoot > tr > td.info,\n.table > thead > tr > th.info,\n.table > tbody > tr > th.info,\n.table > tfoot > tr > th.info,\n.table > thead > tr.info > td,\n.table > tbody > tr.info > td,\n.table > tfoot > tr.info > td,\n.table > thead > tr.info > th,\n.table > tbody > tr.info > th,\n.table > tfoot > tr.info > th {\n background-color: #d9edf7;\n}\n.table-hover > tbody > tr > td.info:hover,\n.table-hover > tbody > tr > th.info:hover,\n.table-hover > tbody > tr.info:hover > td,\n.table-hover > tbody > tr:hover > .info,\n.table-hover > tbody > tr.info:hover > th {\n background-color: #c4e3f3;\n}\n.table > thead > tr > td.warning,\n.table > tbody > tr > td.warning,\n.table > tfoot > tr > td.warning,\n.table > thead > tr > th.warning,\n.table > tbody > tr > th.warning,\n.table > tfoot > tr > th.warning,\n.table > thead > tr.warning > td,\n.table > tbody > tr.warning > td,\n.table > tfoot > tr.warning > td,\n.table > thead > tr.warning > th,\n.table > tbody > tr.warning > th,\n.table > tfoot > tr.warning > th {\n background-color: #fcf8e3;\n}\n.table-hover > tbody > tr > td.warning:hover,\n.table-hover > tbody > tr > th.warning:hover,\n.table-hover > tbody > tr.warning:hover > td,\n.table-hover > tbody > tr:hover > .warning,\n.table-hover > tbody > tr.warning:hover > th {\n background-color: #faf2cc;\n}\n.table > thead > tr > td.danger,\n.table > tbody > tr > td.danger,\n.table > tfoot > tr > td.danger,\n.table > thead > tr > th.danger,\n.table > tbody > tr > th.danger,\n.table > tfoot > tr > th.danger,\n.table > thead > tr.danger > td,\n.table > tbody > tr.danger > td,\n.table > tfoot > tr.danger > td,\n.table > thead > tr.danger > th,\n.table > tbody > tr.danger > th,\n.table > tfoot > tr.danger > th {\n background-color: #f2dede;\n}\n.table-hover > tbody > tr > td.danger:hover,\n.table-hover > tbody > tr > th.danger:hover,\n.table-hover > tbody > tr.danger:hover > td,\n.table-hover > tbody > tr:hover > .danger,\n.table-hover > tbody > tr.danger:hover > th {\n background-color: #ebcccc;\n}\n.table-responsive {\n min-height: 0.01%;\n overflow-x: auto;\n}\n@media screen and (max-width: 767px) {\n .table-responsive {\n width: 100%;\n margin-bottom: 15px;\n overflow-y: hidden;\n -ms-overflow-style: -ms-autohiding-scrollbar;\n border: 1px solid #ddd;\n }\n .table-responsive > .table {\n margin-bottom: 0;\n }\n .table-responsive > .table > thead > tr > th,\n .table-responsive > .table > tbody > tr > th,\n .table-responsive > .table > tfoot > tr > th,\n .table-responsive > .table > thead > tr > td,\n .table-responsive > .table > tbody > tr > td,\n .table-responsive > .table > tfoot > tr > td {\n white-space: nowrap;\n }\n .table-responsive > .table-bordered {\n border: 0;\n }\n .table-responsive > .table-bordered > thead > tr > th:first-child,\n .table-responsive > .table-bordered > tbody > tr > th:first-child,\n .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n .table-responsive > .table-bordered > thead > tr > td:first-child,\n .table-responsive > .table-bordered > tbody > tr > td:first-child,\n .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n border-left: 0;\n }\n .table-responsive > .table-bordered > thead > tr > th:last-child,\n .table-responsive > .table-bordered > tbody > tr > th:last-child,\n .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n .table-responsive > .table-bordered > thead > tr > td:last-child,\n .table-responsive > .table-bordered > tbody > tr > td:last-child,\n .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n border-right: 0;\n }\n .table-responsive > .table-bordered > tbody > tr:last-child > th,\n .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n .table-responsive > .table-bordered > tbody > tr:last-child > td,\n .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n border-bottom: 0;\n }\n}\nfieldset {\n min-width: 0;\n padding: 0;\n margin: 0;\n border: 0;\n}\nlegend {\n display: block;\n width: 100%;\n padding: 0;\n margin-bottom: 20px;\n font-size: 21px;\n line-height: inherit;\n color: #333333;\n border: 0;\n border-bottom: 1px solid #e5e5e5;\n}\nlabel {\n display: inline-block;\n max-width: 100%;\n margin-bottom: 5px;\n font-weight: 700;\n}\ninput[type=\"search\"] {\n -webkit-box-sizing: border-box;\n -moz-box-sizing: border-box;\n box-sizing: border-box;\n -webkit-appearance: none;\n appearance: none;\n}\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n margin: 4px 0 0;\n margin-top: 1px \\9;\n line-height: normal;\n}\ninput[type=\"radio\"][disabled],\ninput[type=\"checkbox\"][disabled],\ninput[type=\"radio\"].disabled,\ninput[type=\"checkbox\"].disabled,\nfieldset[disabled] input[type=\"radio\"],\nfieldset[disabled] input[type=\"checkbox\"] {\n cursor: not-allowed;\n}\ninput[type=\"file\"] {\n display: block;\n}\ninput[type=\"range\"] {\n display: block;\n width: 100%;\n}\nselect[multiple],\nselect[size] {\n height: auto;\n}\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\noutput {\n display: block;\n padding-top: 7px;\n font-size: 14px;\n line-height: 1.42857143;\n color: #555555;\n}\n.form-control {\n display: block;\n width: 100%;\n height: 34px;\n padding: 6px 12px;\n font-size: 14px;\n line-height: 1.42857143;\n color: #555555;\n background-color: #fff;\n background-image: none;\n border: 1px solid #ccc;\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n -o-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n}\n.form-control:focus {\n border-color: #66afe9;\n outline: 0;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 8px rgba(102, 175, 233, 0.6);\n}\n.form-control::-moz-placeholder {\n color: #999;\n opacity: 1;\n}\n.form-control:-ms-input-placeholder {\n color: #999;\n}\n.form-control::-webkit-input-placeholder {\n color: #999;\n}\n.form-control::-ms-expand {\n background-color: transparent;\n border: 0;\n}\n.form-control[disabled],\n.form-control[readonly],\nfieldset[disabled] .form-control {\n background-color: #eeeeee;\n opacity: 1;\n}\n.form-control[disabled],\nfieldset[disabled] .form-control {\n cursor: not-allowed;\n}\ntextarea.form-control {\n height: auto;\n}\n@media screen and (-webkit-min-device-pixel-ratio: 0) {\n input[type=\"date\"].form-control,\n input[type=\"time\"].form-control,\n input[type=\"datetime-local\"].form-control,\n input[type=\"month\"].form-control {\n line-height: 34px;\n }\n input[type=\"date\"].input-sm,\n input[type=\"time\"].input-sm,\n input[type=\"datetime-local\"].input-sm,\n input[type=\"month\"].input-sm,\n .input-group-sm input[type=\"date\"],\n .input-group-sm input[type=\"time\"],\n .input-group-sm input[type=\"datetime-local\"],\n .input-group-sm input[type=\"month\"] {\n line-height: 30px;\n }\n input[type=\"date\"].input-lg,\n input[type=\"time\"].input-lg,\n input[type=\"datetime-local\"].input-lg,\n input[type=\"month\"].input-lg,\n .input-group-lg input[type=\"date\"],\n .input-group-lg input[type=\"time\"],\n .input-group-lg input[type=\"datetime-local\"],\n .input-group-lg input[type=\"month\"] {\n line-height: 46px;\n }\n}\n.form-group {\n margin-bottom: 15px;\n}\n.radio,\n.checkbox {\n position: relative;\n display: block;\n margin-top: 10px;\n margin-bottom: 10px;\n}\n.radio.disabled label,\n.checkbox.disabled label,\nfieldset[disabled] .radio label,\nfieldset[disabled] .checkbox label {\n cursor: not-allowed;\n}\n.radio label,\n.checkbox label {\n min-height: 20px;\n padding-left: 20px;\n margin-bottom: 0;\n font-weight: 400;\n cursor: pointer;\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n position: absolute;\n margin-top: 4px \\9;\n margin-left: -20px;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n margin-top: -5px;\n}\n.radio-inline,\n.checkbox-inline {\n position: relative;\n display: inline-block;\n padding-left: 20px;\n margin-bottom: 0;\n font-weight: 400;\n vertical-align: middle;\n cursor: pointer;\n}\n.radio-inline.disabled,\n.checkbox-inline.disabled,\nfieldset[disabled] .radio-inline,\nfieldset[disabled] .checkbox-inline {\n cursor: not-allowed;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n margin-top: 0;\n margin-left: 10px;\n}\n.form-control-static {\n min-height: 34px;\n padding-top: 7px;\n padding-bottom: 7px;\n margin-bottom: 0;\n}\n.form-control-static.input-lg,\n.form-control-static.input-sm {\n padding-right: 0;\n padding-left: 0;\n}\n.input-sm {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\nselect.input-sm {\n height: 30px;\n line-height: 30px;\n}\ntextarea.input-sm,\nselect[multiple].input-sm {\n height: auto;\n}\n.form-group-sm .form-control {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\n.form-group-sm select.form-control {\n height: 30px;\n line-height: 30px;\n}\n.form-group-sm textarea.form-control,\n.form-group-sm select[multiple].form-control {\n height: auto;\n}\n.form-group-sm .form-control-static {\n height: 30px;\n min-height: 32px;\n padding: 6px 10px;\n font-size: 12px;\n line-height: 1.5;\n}\n.input-lg {\n height: 46px;\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\nselect.input-lg {\n height: 46px;\n line-height: 46px;\n}\ntextarea.input-lg,\nselect[multiple].input-lg {\n height: auto;\n}\n.form-group-lg .form-control {\n height: 46px;\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\n.form-group-lg select.form-control {\n height: 46px;\n line-height: 46px;\n}\n.form-group-lg textarea.form-control,\n.form-group-lg select[multiple].form-control {\n height: auto;\n}\n.form-group-lg .form-control-static {\n height: 46px;\n min-height: 38px;\n padding: 11px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n}\n.has-feedback {\n position: relative;\n}\n.has-feedback .form-control {\n padding-right: 42.5px;\n}\n.form-control-feedback {\n position: absolute;\n top: 0;\n right: 0;\n z-index: 2;\n display: block;\n width: 34px;\n height: 34px;\n line-height: 34px;\n text-align: center;\n pointer-events: none;\n}\n.input-lg + .form-control-feedback,\n.input-group-lg + .form-control-feedback,\n.form-group-lg .form-control + .form-control-feedback {\n width: 46px;\n height: 46px;\n line-height: 46px;\n}\n.input-sm + .form-control-feedback,\n.input-group-sm + .form-control-feedback,\n.form-group-sm .form-control + .form-control-feedback {\n width: 30px;\n height: 30px;\n line-height: 30px;\n}\n.has-success .help-block,\n.has-success .control-label,\n.has-success .radio,\n.has-success .checkbox,\n.has-success .radio-inline,\n.has-success .checkbox-inline,\n.has-success.radio label,\n.has-success.checkbox label,\n.has-success.radio-inline label,\n.has-success.checkbox-inline label {\n color: #3c763d;\n}\n.has-success .form-control {\n border-color: #3c763d;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-success .form-control:focus {\n border-color: #2b542c;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;\n}\n.has-success .input-group-addon {\n color: #3c763d;\n background-color: #dff0d8;\n border-color: #3c763d;\n}\n.has-success .form-control-feedback {\n color: #3c763d;\n}\n.has-warning .help-block,\n.has-warning .control-label,\n.has-warning .radio,\n.has-warning .checkbox,\n.has-warning .radio-inline,\n.has-warning .checkbox-inline,\n.has-warning.radio label,\n.has-warning.checkbox label,\n.has-warning.radio-inline label,\n.has-warning.checkbox-inline label {\n color: #8a6d3b;\n}\n.has-warning .form-control {\n border-color: #8a6d3b;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-warning .form-control:focus {\n border-color: #66512c;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;\n}\n.has-warning .input-group-addon {\n color: #8a6d3b;\n background-color: #fcf8e3;\n border-color: #8a6d3b;\n}\n.has-warning .form-control-feedback {\n color: #8a6d3b;\n}\n.has-error .help-block,\n.has-error .control-label,\n.has-error .radio,\n.has-error .checkbox,\n.has-error .radio-inline,\n.has-error .checkbox-inline,\n.has-error.radio label,\n.has-error.checkbox label,\n.has-error.radio-inline label,\n.has-error.checkbox-inline label {\n color: #a94442;\n}\n.has-error .form-control {\n border-color: #a94442;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);\n}\n.has-error .form-control:focus {\n border-color: #843534;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;\n}\n.has-error .input-group-addon {\n color: #a94442;\n background-color: #f2dede;\n border-color: #a94442;\n}\n.has-error .form-control-feedback {\n color: #a94442;\n}\n.has-feedback label ~ .form-control-feedback {\n top: 25px;\n}\n.has-feedback label.sr-only ~ .form-control-feedback {\n top: 0;\n}\n.help-block {\n display: block;\n margin-top: 5px;\n margin-bottom: 10px;\n color: #737373;\n}\n@media (min-width: 768px) {\n .form-inline .form-group {\n display: inline-block;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .form-control {\n display: inline-block;\n width: auto;\n vertical-align: middle;\n }\n .form-inline .form-control-static {\n display: inline-block;\n }\n .form-inline .input-group {\n display: inline-table;\n vertical-align: middle;\n }\n .form-inline .input-group .input-group-addon,\n .form-inline .input-group .input-group-btn,\n .form-inline .input-group .form-control {\n width: auto;\n }\n .form-inline .input-group > .form-control {\n width: 100%;\n }\n .form-inline .control-label {\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .radio,\n .form-inline .checkbox {\n display: inline-block;\n margin-top: 0;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .form-inline .radio label,\n .form-inline .checkbox label {\n padding-left: 0;\n }\n .form-inline .radio input[type=\"radio\"],\n .form-inline .checkbox input[type=\"checkbox\"] {\n position: relative;\n margin-left: 0;\n }\n .form-inline .has-feedback .form-control-feedback {\n top: 0;\n }\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox,\n.form-horizontal .radio-inline,\n.form-horizontal .checkbox-inline {\n padding-top: 7px;\n margin-top: 0;\n margin-bottom: 0;\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox {\n min-height: 27px;\n}\n.form-horizontal .form-group {\n margin-right: -15px;\n margin-left: -15px;\n}\n@media (min-width: 768px) {\n .form-horizontal .control-label {\n padding-top: 7px;\n margin-bottom: 0;\n text-align: right;\n }\n}\n.form-horizontal .has-feedback .form-control-feedback {\n right: 15px;\n}\n@media (min-width: 768px) {\n .form-horizontal .form-group-lg .control-label {\n padding-top: 11px;\n font-size: 18px;\n }\n}\n@media (min-width: 768px) {\n .form-horizontal .form-group-sm .control-label {\n padding-top: 6px;\n font-size: 12px;\n }\n}\n.btn {\n display: inline-block;\n margin-bottom: 0;\n font-weight: normal;\n text-align: center;\n white-space: nowrap;\n vertical-align: middle;\n touch-action: manipulation;\n cursor: pointer;\n background-image: none;\n border: 1px solid transparent;\n padding: 6px 12px;\n font-size: 14px;\n line-height: 1.42857143;\n border-radius: 4px;\n -webkit-user-select: none;\n -moz-user-select: none;\n -ms-user-select: none;\n user-select: none;\n}\n.btn:focus,\n.btn:active:focus,\n.btn.active:focus,\n.btn.focus,\n.btn:active.focus,\n.btn.active.focus {\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\n.btn:hover,\n.btn:focus,\n.btn.focus {\n color: #333;\n text-decoration: none;\n}\n.btn:active,\n.btn.active {\n background-image: none;\n outline: 0;\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn.disabled,\n.btn[disabled],\nfieldset[disabled] .btn {\n cursor: not-allowed;\n filter: alpha(opacity=65);\n opacity: 0.65;\n -webkit-box-shadow: none;\n box-shadow: none;\n}\na.btn.disabled,\nfieldset[disabled] a.btn {\n pointer-events: none;\n}\n.btn-default {\n color: #333;\n background-color: #fff;\n border-color: #ccc;\n}\n.btn-default:focus,\n.btn-default.focus {\n color: #333;\n background-color: #e6e6e6;\n border-color: #8c8c8c;\n}\n.btn-default:hover {\n color: #333;\n background-color: #e6e6e6;\n border-color: #adadad;\n}\n.btn-default:active,\n.btn-default.active,\n.open > .dropdown-toggle.btn-default {\n color: #333;\n background-color: #e6e6e6;\n background-image: none;\n border-color: #adadad;\n}\n.btn-default:active:hover,\n.btn-default.active:hover,\n.open > .dropdown-toggle.btn-default:hover,\n.btn-default:active:focus,\n.btn-default.active:focus,\n.open > .dropdown-toggle.btn-default:focus,\n.btn-default:active.focus,\n.btn-default.active.focus,\n.open > .dropdown-toggle.btn-default.focus {\n color: #333;\n background-color: #d4d4d4;\n border-color: #8c8c8c;\n}\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled.focus,\n.btn-default[disabled].focus,\nfieldset[disabled] .btn-default.focus {\n background-color: #fff;\n border-color: #ccc;\n}\n.btn-default .badge {\n color: #fff;\n background-color: #333;\n}\n.btn-primary {\n color: #fff;\n background-color: #337ab7;\n border-color: #2e6da4;\n}\n.btn-primary:focus,\n.btn-primary.focus {\n color: #fff;\n background-color: #286090;\n border-color: #122b40;\n}\n.btn-primary:hover {\n color: #fff;\n background-color: #286090;\n border-color: #204d74;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open > .dropdown-toggle.btn-primary {\n color: #fff;\n background-color: #286090;\n background-image: none;\n border-color: #204d74;\n}\n.btn-primary:active:hover,\n.btn-primary.active:hover,\n.open > .dropdown-toggle.btn-primary:hover,\n.btn-primary:active:focus,\n.btn-primary.active:focus,\n.open > .dropdown-toggle.btn-primary:focus,\n.btn-primary:active.focus,\n.btn-primary.active.focus,\n.open > .dropdown-toggle.btn-primary.focus {\n color: #fff;\n background-color: #204d74;\n border-color: #122b40;\n}\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled.focus,\n.btn-primary[disabled].focus,\nfieldset[disabled] .btn-primary.focus {\n background-color: #337ab7;\n border-color: #2e6da4;\n}\n.btn-primary .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.btn-success {\n color: #fff;\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.btn-success:focus,\n.btn-success.focus {\n color: #fff;\n background-color: #449d44;\n border-color: #255625;\n}\n.btn-success:hover {\n color: #fff;\n background-color: #449d44;\n border-color: #398439;\n}\n.btn-success:active,\n.btn-success.active,\n.open > .dropdown-toggle.btn-success {\n color: #fff;\n background-color: #449d44;\n background-image: none;\n border-color: #398439;\n}\n.btn-success:active:hover,\n.btn-success.active:hover,\n.open > .dropdown-toggle.btn-success:hover,\n.btn-success:active:focus,\n.btn-success.active:focus,\n.open > .dropdown-toggle.btn-success:focus,\n.btn-success:active.focus,\n.btn-success.active.focus,\n.open > .dropdown-toggle.btn-success.focus {\n color: #fff;\n background-color: #398439;\n border-color: #255625;\n}\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled.focus,\n.btn-success[disabled].focus,\nfieldset[disabled] .btn-success.focus {\n background-color: #5cb85c;\n border-color: #4cae4c;\n}\n.btn-success .badge {\n color: #5cb85c;\n background-color: #fff;\n}\n.btn-info {\n color: #fff;\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.btn-info:focus,\n.btn-info.focus {\n color: #fff;\n background-color: #31b0d5;\n border-color: #1b6d85;\n}\n.btn-info:hover {\n color: #fff;\n background-color: #31b0d5;\n border-color: #269abc;\n}\n.btn-info:active,\n.btn-info.active,\n.open > .dropdown-toggle.btn-info {\n color: #fff;\n background-color: #31b0d5;\n background-image: none;\n border-color: #269abc;\n}\n.btn-info:active:hover,\n.btn-info.active:hover,\n.open > .dropdown-toggle.btn-info:hover,\n.btn-info:active:focus,\n.btn-info.active:focus,\n.open > .dropdown-toggle.btn-info:focus,\n.btn-info:active.focus,\n.btn-info.active.focus,\n.open > .dropdown-toggle.btn-info.focus {\n color: #fff;\n background-color: #269abc;\n border-color: #1b6d85;\n}\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled.focus,\n.btn-info[disabled].focus,\nfieldset[disabled] .btn-info.focus {\n background-color: #5bc0de;\n border-color: #46b8da;\n}\n.btn-info .badge {\n color: #5bc0de;\n background-color: #fff;\n}\n.btn-warning {\n color: #fff;\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.btn-warning:focus,\n.btn-warning.focus {\n color: #fff;\n background-color: #ec971f;\n border-color: #985f0d;\n}\n.btn-warning:hover {\n color: #fff;\n background-color: #ec971f;\n border-color: #d58512;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open > .dropdown-toggle.btn-warning {\n color: #fff;\n background-color: #ec971f;\n background-image: none;\n border-color: #d58512;\n}\n.btn-warning:active:hover,\n.btn-warning.active:hover,\n.open > .dropdown-toggle.btn-warning:hover,\n.btn-warning:active:focus,\n.btn-warning.active:focus,\n.open > .dropdown-toggle.btn-warning:focus,\n.btn-warning:active.focus,\n.btn-warning.active.focus,\n.open > .dropdown-toggle.btn-warning.focus {\n color: #fff;\n background-color: #d58512;\n border-color: #985f0d;\n}\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled.focus,\n.btn-warning[disabled].focus,\nfieldset[disabled] .btn-warning.focus {\n background-color: #f0ad4e;\n border-color: #eea236;\n}\n.btn-warning .badge {\n color: #f0ad4e;\n background-color: #fff;\n}\n.btn-danger {\n color: #fff;\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.btn-danger:focus,\n.btn-danger.focus {\n color: #fff;\n background-color: #c9302c;\n border-color: #761c19;\n}\n.btn-danger:hover {\n color: #fff;\n background-color: #c9302c;\n border-color: #ac2925;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open > .dropdown-toggle.btn-danger {\n color: #fff;\n background-color: #c9302c;\n background-image: none;\n border-color: #ac2925;\n}\n.btn-danger:active:hover,\n.btn-danger.active:hover,\n.open > .dropdown-toggle.btn-danger:hover,\n.btn-danger:active:focus,\n.btn-danger.active:focus,\n.open > .dropdown-toggle.btn-danger:focus,\n.btn-danger:active.focus,\n.btn-danger.active.focus,\n.open > .dropdown-toggle.btn-danger.focus {\n color: #fff;\n background-color: #ac2925;\n border-color: #761c19;\n}\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled.focus,\n.btn-danger[disabled].focus,\nfieldset[disabled] .btn-danger.focus {\n background-color: #d9534f;\n border-color: #d43f3a;\n}\n.btn-danger .badge {\n color: #d9534f;\n background-color: #fff;\n}\n.btn-link {\n font-weight: 400;\n color: #337ab7;\n border-radius: 0;\n}\n.btn-link,\n.btn-link:active,\n.btn-link.active,\n.btn-link[disabled],\nfieldset[disabled] .btn-link {\n background-color: transparent;\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn-link,\n.btn-link:hover,\n.btn-link:focus,\n.btn-link:active {\n border-color: transparent;\n}\n.btn-link:hover,\n.btn-link:focus {\n color: #23527c;\n text-decoration: underline;\n background-color: transparent;\n}\n.btn-link[disabled]:hover,\nfieldset[disabled] .btn-link:hover,\n.btn-link[disabled]:focus,\nfieldset[disabled] .btn-link:focus {\n color: #777777;\n text-decoration: none;\n}\n.btn-lg,\n.btn-group-lg > .btn {\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\n.btn-sm,\n.btn-group-sm > .btn {\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\n.btn-xs,\n.btn-group-xs > .btn {\n padding: 1px 5px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\n.btn-block {\n display: block;\n width: 100%;\n}\n.btn-block + .btn-block {\n margin-top: 5px;\n}\ninput[type=\"submit\"].btn-block,\ninput[type=\"reset\"].btn-block,\ninput[type=\"button\"].btn-block {\n width: 100%;\n}\n.fade {\n opacity: 0;\n -webkit-transition: opacity 0.15s linear;\n -o-transition: opacity 0.15s linear;\n transition: opacity 0.15s linear;\n}\n.fade.in {\n opacity: 1;\n}\n.collapse {\n display: none;\n}\n.collapse.in {\n display: block;\n}\ntr.collapse.in {\n display: table-row;\n}\ntbody.collapse.in {\n display: table-row-group;\n}\n.collapsing {\n position: relative;\n height: 0;\n overflow: hidden;\n -webkit-transition-property: height, visibility;\n transition-property: height, visibility;\n -webkit-transition-duration: 0.35s;\n transition-duration: 0.35s;\n -webkit-transition-timing-function: ease;\n transition-timing-function: ease;\n}\n.caret {\n display: inline-block;\n width: 0;\n height: 0;\n margin-left: 2px;\n vertical-align: middle;\n border-top: 4px dashed;\n border-top: 4px solid \\9;\n border-right: 4px solid transparent;\n border-left: 4px solid transparent;\n}\n.dropup,\n.dropdown {\n position: relative;\n}\n.dropdown-toggle:focus {\n outline: 0;\n}\n.dropdown-menu {\n position: absolute;\n top: 100%;\n left: 0;\n z-index: 1000;\n display: none;\n float: left;\n min-width: 160px;\n padding: 5px 0;\n margin: 2px 0 0;\n font-size: 14px;\n text-align: left;\n list-style: none;\n background-color: #fff;\n background-clip: padding-box;\n border: 1px solid #ccc;\n border: 1px solid rgba(0, 0, 0, 0.15);\n border-radius: 4px;\n -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n box-shadow: 0 6px 12px rgba(0, 0, 0, 0.175);\n}\n.dropdown-menu.pull-right {\n right: 0;\n left: auto;\n}\n.dropdown-menu .divider {\n height: 1px;\n margin: 9px 0;\n overflow: hidden;\n background-color: #e5e5e5;\n}\n.dropdown-menu > li > a {\n display: block;\n padding: 3px 20px;\n clear: both;\n font-weight: 400;\n line-height: 1.42857143;\n color: #333333;\n white-space: nowrap;\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n color: #262626;\n text-decoration: none;\n background-color: #f5f5f5;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n color: #fff;\n text-decoration: none;\n background-color: #337ab7;\n outline: 0;\n}\n.dropdown-menu > .disabled > a,\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n color: #777777;\n}\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n text-decoration: none;\n cursor: not-allowed;\n background-color: transparent;\n background-image: none;\n filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n}\n.open > .dropdown-menu {\n display: block;\n}\n.open > a {\n outline: 0;\n}\n.dropdown-menu-right {\n right: 0;\n left: auto;\n}\n.dropdown-menu-left {\n right: auto;\n left: 0;\n}\n.dropdown-header {\n display: block;\n padding: 3px 20px;\n font-size: 12px;\n line-height: 1.42857143;\n color: #777777;\n white-space: nowrap;\n}\n.dropdown-backdrop {\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 990;\n}\n.pull-right > .dropdown-menu {\n right: 0;\n left: auto;\n}\n.dropup .caret,\n.navbar-fixed-bottom .dropdown .caret {\n content: \"\";\n border-top: 0;\n border-bottom: 4px dashed;\n border-bottom: 4px solid \\9;\n}\n.dropup .dropdown-menu,\n.navbar-fixed-bottom .dropdown .dropdown-menu {\n top: auto;\n bottom: 100%;\n margin-bottom: 2px;\n}\n@media (min-width: 768px) {\n .navbar-right .dropdown-menu {\n right: 0;\n left: auto;\n }\n .navbar-right .dropdown-menu-left {\n right: auto;\n left: 0;\n }\n}\n.btn-group,\n.btn-group-vertical {\n position: relative;\n display: inline-block;\n vertical-align: middle;\n}\n.btn-group > .btn,\n.btn-group-vertical > .btn {\n position: relative;\n float: left;\n}\n.btn-group > .btn:hover,\n.btn-group-vertical > .btn:hover,\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus,\n.btn-group > .btn:active,\n.btn-group-vertical > .btn:active,\n.btn-group > .btn.active,\n.btn-group-vertical > .btn.active {\n z-index: 2;\n}\n.btn-group .btn + .btn,\n.btn-group .btn + .btn-group,\n.btn-group .btn-group + .btn,\n.btn-group .btn-group + .btn-group {\n margin-left: -1px;\n}\n.btn-toolbar {\n margin-left: -5px;\n}\n.btn-toolbar .btn,\n.btn-toolbar .btn-group,\n.btn-toolbar .input-group {\n float: left;\n}\n.btn-toolbar > .btn,\n.btn-toolbar > .btn-group,\n.btn-toolbar > .input-group {\n margin-left: 5px;\n}\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n border-radius: 0;\n}\n.btn-group > .btn:first-child {\n margin-left: 0;\n}\n.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {\n border-top-right-radius: 0;\n border-bottom-right-radius: 0;\n}\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n border-top-left-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group > .btn-group {\n float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n border-radius: 0;\n}\n.btn-group > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n border-top-right-radius: 0;\n border-bottom-right-radius: 0;\n}\n.btn-group > .btn-group:last-child:not(:first-child) > .btn:first-child {\n border-top-left-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n outline: 0;\n}\n.btn-group > .btn + .dropdown-toggle {\n padding-right: 8px;\n padding-left: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n padding-right: 12px;\n padding-left: 12px;\n}\n.btn-group.open .dropdown-toggle {\n -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n box-shadow: inset 0 3px 5px rgba(0, 0, 0, 0.125);\n}\n.btn-group.open .dropdown-toggle.btn-link {\n -webkit-box-shadow: none;\n box-shadow: none;\n}\n.btn .caret {\n margin-left: 0;\n}\n.btn-lg .caret {\n border-width: 5px 5px 0;\n border-bottom-width: 0;\n}\n.dropup .btn-lg .caret {\n border-width: 0 5px 5px;\n}\n.btn-group-vertical > .btn,\n.btn-group-vertical > .btn-group,\n.btn-group-vertical > .btn-group > .btn {\n display: block;\n float: none;\n width: 100%;\n max-width: 100%;\n}\n.btn-group-vertical > .btn-group > .btn {\n float: none;\n}\n.btn-group-vertical > .btn + .btn,\n.btn-group-vertical > .btn + .btn-group,\n.btn-group-vertical > .btn-group + .btn,\n.btn-group-vertical > .btn-group + .btn-group {\n margin-top: -1px;\n margin-left: 0;\n}\n.btn-group-vertical > .btn:not(:first-child):not(:last-child) {\n border-radius: 0;\n}\n.btn-group-vertical > .btn:first-child:not(:last-child) {\n border-top-left-radius: 4px;\n border-top-right-radius: 4px;\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn:last-child:not(:first-child) {\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n border-bottom-right-radius: 4px;\n border-bottom-left-radius: 4px;\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n}\n.btn-group-justified {\n display: table;\n width: 100%;\n table-layout: fixed;\n border-collapse: separate;\n}\n.btn-group-justified > .btn,\n.btn-group-justified > .btn-group {\n display: table-cell;\n float: none;\n width: 1%;\n}\n.btn-group-justified > .btn-group .btn {\n width: 100%;\n}\n.btn-group-justified > .btn-group .dropdown-menu {\n left: auto;\n}\n[data-toggle=\"buttons\"] > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn input[type=\"checkbox\"],\n[data-toggle=\"buttons\"] > .btn-group > .btn input[type=\"checkbox\"] {\n position: absolute;\n clip: rect(0, 0, 0, 0);\n pointer-events: none;\n}\n.input-group {\n position: relative;\n display: table;\n border-collapse: separate;\n}\n.input-group[class*=\"col-\"] {\n float: none;\n padding-right: 0;\n padding-left: 0;\n}\n.input-group .form-control {\n position: relative;\n z-index: 2;\n float: left;\n width: 100%;\n margin-bottom: 0;\n}\n.input-group .form-control:focus {\n z-index: 3;\n}\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n height: 46px;\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n border-radius: 6px;\n}\nselect.input-group-lg > .form-control,\nselect.input-group-lg > .input-group-addon,\nselect.input-group-lg > .input-group-btn > .btn {\n height: 46px;\n line-height: 46px;\n}\ntextarea.input-group-lg > .form-control,\ntextarea.input-group-lg > .input-group-addon,\ntextarea.input-group-lg > .input-group-btn > .btn,\nselect[multiple].input-group-lg > .form-control,\nselect[multiple].input-group-lg > .input-group-addon,\nselect[multiple].input-group-lg > .input-group-btn > .btn {\n height: auto;\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n height: 30px;\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n border-radius: 3px;\n}\nselect.input-group-sm > .form-control,\nselect.input-group-sm > .input-group-addon,\nselect.input-group-sm > .input-group-btn > .btn {\n height: 30px;\n line-height: 30px;\n}\ntextarea.input-group-sm > .form-control,\ntextarea.input-group-sm > .input-group-addon,\ntextarea.input-group-sm > .input-group-btn > .btn,\nselect[multiple].input-group-sm > .form-control,\nselect[multiple].input-group-sm > .input-group-addon,\nselect[multiple].input-group-sm > .input-group-btn > .btn {\n height: auto;\n}\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n display: table-cell;\n}\n.input-group-addon:not(:first-child):not(:last-child),\n.input-group-btn:not(:first-child):not(:last-child),\n.input-group .form-control:not(:first-child):not(:last-child) {\n border-radius: 0;\n}\n.input-group-addon,\n.input-group-btn {\n width: 1%;\n white-space: nowrap;\n vertical-align: middle;\n}\n.input-group-addon {\n padding: 6px 12px;\n font-size: 14px;\n font-weight: 400;\n line-height: 1;\n color: #555555;\n text-align: center;\n background-color: #eeeeee;\n border: 1px solid #ccc;\n border-radius: 4px;\n}\n.input-group-addon.input-sm {\n padding: 5px 10px;\n font-size: 12px;\n border-radius: 3px;\n}\n.input-group-addon.input-lg {\n padding: 10px 16px;\n font-size: 18px;\n border-radius: 6px;\n}\n.input-group-addon input[type=\"radio\"],\n.input-group-addon input[type=\"checkbox\"] {\n margin-top: 0;\n}\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n border-top-right-radius: 0;\n border-bottom-right-radius: 0;\n}\n.input-group-addon:first-child {\n border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n border-top-left-radius: 0;\n border-bottom-left-radius: 0;\n}\n.input-group-addon:last-child {\n border-left: 0;\n}\n.input-group-btn {\n position: relative;\n font-size: 0;\n white-space: nowrap;\n}\n.input-group-btn > .btn {\n position: relative;\n}\n.input-group-btn > .btn + .btn {\n margin-left: -1px;\n}\n.input-group-btn > .btn:hover,\n.input-group-btn > .btn:focus,\n.input-group-btn > .btn:active {\n z-index: 2;\n}\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group {\n margin-right: -1px;\n}\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group {\n z-index: 2;\n margin-left: -1px;\n}\n.nav {\n padding-left: 0;\n margin-bottom: 0;\n list-style: none;\n}\n.nav > li {\n position: relative;\n display: block;\n}\n.nav > li > a {\n position: relative;\n display: block;\n padding: 10px 15px;\n}\n.nav > li > a:hover,\n.nav > li > a:focus {\n text-decoration: none;\n background-color: #eeeeee;\n}\n.nav > li.disabled > a {\n color: #777777;\n}\n.nav > li.disabled > a:hover,\n.nav > li.disabled > a:focus {\n color: #777777;\n text-decoration: none;\n cursor: not-allowed;\n background-color: transparent;\n}\n.nav .open > a,\n.nav .open > a:hover,\n.nav .open > a:focus {\n background-color: #eeeeee;\n border-color: #337ab7;\n}\n.nav .nav-divider {\n height: 1px;\n margin: 9px 0;\n overflow: hidden;\n background-color: #e5e5e5;\n}\n.nav > li > a > img {\n max-width: none;\n}\n.nav-tabs {\n border-bottom: 1px solid #ddd;\n}\n.nav-tabs > li {\n float: left;\n margin-bottom: -1px;\n}\n.nav-tabs > li > a {\n margin-right: 2px;\n line-height: 1.42857143;\n border: 1px solid transparent;\n border-radius: 4px 4px 0 0;\n}\n.nav-tabs > li > a:hover {\n border-color: #eeeeee #eeeeee #ddd;\n}\n.nav-tabs > li.active > a,\n.nav-tabs > li.active > a:hover,\n.nav-tabs > li.active > a:focus {\n color: #555555;\n cursor: default;\n background-color: #fff;\n border: 1px solid #ddd;\n border-bottom-color: transparent;\n}\n.nav-tabs.nav-justified {\n width: 100%;\n border-bottom: 0;\n}\n.nav-tabs.nav-justified > li {\n float: none;\n}\n.nav-tabs.nav-justified > li > a {\n margin-bottom: 5px;\n text-align: center;\n}\n.nav-tabs.nav-justified > .dropdown .dropdown-menu {\n top: auto;\n left: auto;\n}\n@media (min-width: 768px) {\n .nav-tabs.nav-justified > li {\n display: table-cell;\n width: 1%;\n }\n .nav-tabs.nav-justified > li > a {\n margin-bottom: 0;\n }\n}\n.nav-tabs.nav-justified > li > a {\n margin-right: 0;\n border-radius: 4px;\n}\n.nav-tabs.nav-justified > .active > a,\n.nav-tabs.nav-justified > .active > a:hover,\n.nav-tabs.nav-justified > .active > a:focus {\n border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n .nav-tabs.nav-justified > li > a {\n border-bottom: 1px solid #ddd;\n border-radius: 4px 4px 0 0;\n }\n .nav-tabs.nav-justified > .active > a,\n .nav-tabs.nav-justified > .active > a:hover,\n .nav-tabs.nav-justified > .active > a:focus {\n border-bottom-color: #fff;\n }\n}\n.nav-pills > li {\n float: left;\n}\n.nav-pills > li > a {\n border-radius: 4px;\n}\n.nav-pills > li + li {\n margin-left: 2px;\n}\n.nav-pills > li.active > a,\n.nav-pills > li.active > a:hover,\n.nav-pills > li.active > a:focus {\n color: #fff;\n background-color: #337ab7;\n}\n.nav-stacked > li {\n float: none;\n}\n.nav-stacked > li + li {\n margin-top: 2px;\n margin-left: 0;\n}\n.nav-justified {\n width: 100%;\n}\n.nav-justified > li {\n float: none;\n}\n.nav-justified > li > a {\n margin-bottom: 5px;\n text-align: center;\n}\n.nav-justified > .dropdown .dropdown-menu {\n top: auto;\n left: auto;\n}\n@media (min-width: 768px) {\n .nav-justified > li {\n display: table-cell;\n width: 1%;\n }\n .nav-justified > li > a {\n margin-bottom: 0;\n }\n}\n.nav-tabs-justified {\n border-bottom: 0;\n}\n.nav-tabs-justified > li > a {\n margin-right: 0;\n border-radius: 4px;\n}\n.nav-tabs-justified > .active > a,\n.nav-tabs-justified > .active > a:hover,\n.nav-tabs-justified > .active > a:focus {\n border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n .nav-tabs-justified > li > a {\n border-bottom: 1px solid #ddd;\n border-radius: 4px 4px 0 0;\n }\n .nav-tabs-justified > .active > a,\n .nav-tabs-justified > .active > a:hover,\n .nav-tabs-justified > .active > a:focus {\n border-bottom-color: #fff;\n }\n}\n.tab-content > .tab-pane {\n display: none;\n}\n.tab-content > .active {\n display: block;\n}\n.nav-tabs .dropdown-menu {\n margin-top: -1px;\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n}\n.navbar {\n position: relative;\n min-height: 50px;\n margin-bottom: 20px;\n border: 1px solid transparent;\n}\n@media (min-width: 768px) {\n .navbar {\n border-radius: 4px;\n }\n}\n@media (min-width: 768px) {\n .navbar-header {\n float: left;\n }\n}\n.navbar-collapse {\n padding-right: 15px;\n padding-left: 15px;\n overflow-x: visible;\n border-top: 1px solid transparent;\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1);\n -webkit-overflow-scrolling: touch;\n}\n.navbar-collapse.in {\n overflow-y: auto;\n}\n@media (min-width: 768px) {\n .navbar-collapse {\n width: auto;\n border-top: 0;\n box-shadow: none;\n }\n .navbar-collapse.collapse {\n display: block !important;\n height: auto !important;\n padding-bottom: 0;\n overflow: visible !important;\n }\n .navbar-collapse.in {\n overflow-y: visible;\n }\n .navbar-fixed-top .navbar-collapse,\n .navbar-static-top .navbar-collapse,\n .navbar-fixed-bottom .navbar-collapse {\n padding-right: 0;\n padding-left: 0;\n }\n}\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n position: fixed;\n right: 0;\n left: 0;\n z-index: 1030;\n}\n.navbar-fixed-top .navbar-collapse,\n.navbar-fixed-bottom .navbar-collapse {\n max-height: 340px;\n}\n@media (max-device-width: 480px) and (orientation: landscape) {\n .navbar-fixed-top .navbar-collapse,\n .navbar-fixed-bottom .navbar-collapse {\n max-height: 200px;\n }\n}\n@media (min-width: 768px) {\n .navbar-fixed-top,\n .navbar-fixed-bottom {\n border-radius: 0;\n }\n}\n.navbar-fixed-top {\n top: 0;\n border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n bottom: 0;\n margin-bottom: 0;\n border-width: 1px 0 0;\n}\n.container > .navbar-header,\n.container-fluid > .navbar-header,\n.container > .navbar-collapse,\n.container-fluid > .navbar-collapse {\n margin-right: -15px;\n margin-left: -15px;\n}\n@media (min-width: 768px) {\n .container > .navbar-header,\n .container-fluid > .navbar-header,\n .container > .navbar-collapse,\n .container-fluid > .navbar-collapse {\n margin-right: 0;\n margin-left: 0;\n }\n}\n.navbar-static-top {\n z-index: 1000;\n border-width: 0 0 1px;\n}\n@media (min-width: 768px) {\n .navbar-static-top {\n border-radius: 0;\n }\n}\n.navbar-brand {\n float: left;\n height: 50px;\n padding: 15px 15px;\n font-size: 18px;\n line-height: 20px;\n}\n.navbar-brand:hover,\n.navbar-brand:focus {\n text-decoration: none;\n}\n.navbar-brand > img {\n display: block;\n}\n@media (min-width: 768px) {\n .navbar > .container .navbar-brand,\n .navbar > .container-fluid .navbar-brand {\n margin-left: -15px;\n }\n}\n.navbar-toggle {\n position: relative;\n float: right;\n padding: 9px 10px;\n margin-right: 15px;\n margin-top: 8px;\n margin-bottom: 8px;\n background-color: transparent;\n background-image: none;\n border: 1px solid transparent;\n border-radius: 4px;\n}\n.navbar-toggle:focus {\n outline: 0;\n}\n.navbar-toggle .icon-bar {\n display: block;\n width: 22px;\n height: 2px;\n border-radius: 1px;\n}\n.navbar-toggle .icon-bar + .icon-bar {\n margin-top: 4px;\n}\n@media (min-width: 768px) {\n .navbar-toggle {\n display: none;\n }\n}\n.navbar-nav {\n margin: 7.5px -15px;\n}\n.navbar-nav > li > a {\n padding-top: 10px;\n padding-bottom: 10px;\n line-height: 20px;\n}\n@media (max-width: 767px) {\n .navbar-nav .open .dropdown-menu {\n position: static;\n float: none;\n width: auto;\n margin-top: 0;\n background-color: transparent;\n border: 0;\n box-shadow: none;\n }\n .navbar-nav .open .dropdown-menu > li > a,\n .navbar-nav .open .dropdown-menu .dropdown-header {\n padding: 5px 15px 5px 25px;\n }\n .navbar-nav .open .dropdown-menu > li > a {\n line-height: 20px;\n }\n .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-nav .open .dropdown-menu > li > a:focus {\n background-image: none;\n }\n}\n@media (min-width: 768px) {\n .navbar-nav {\n float: left;\n margin: 0;\n }\n .navbar-nav > li {\n float: left;\n }\n .navbar-nav > li > a {\n padding-top: 15px;\n padding-bottom: 15px;\n }\n}\n.navbar-form {\n padding: 10px 15px;\n margin-right: -15px;\n margin-left: -15px;\n border-top: 1px solid transparent;\n border-bottom: 1px solid transparent;\n -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1);\n margin-top: 8px;\n margin-bottom: 8px;\n}\n@media (min-width: 768px) {\n .navbar-form .form-group {\n display: inline-block;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .form-control {\n display: inline-block;\n width: auto;\n vertical-align: middle;\n }\n .navbar-form .form-control-static {\n display: inline-block;\n }\n .navbar-form .input-group {\n display: inline-table;\n vertical-align: middle;\n }\n .navbar-form .input-group .input-group-addon,\n .navbar-form .input-group .input-group-btn,\n .navbar-form .input-group .form-control {\n width: auto;\n }\n .navbar-form .input-group > .form-control {\n width: 100%;\n }\n .navbar-form .control-label {\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .radio,\n .navbar-form .checkbox {\n display: inline-block;\n margin-top: 0;\n margin-bottom: 0;\n vertical-align: middle;\n }\n .navbar-form .radio label,\n .navbar-form .checkbox label {\n padding-left: 0;\n }\n .navbar-form .radio input[type=\"radio\"],\n .navbar-form .checkbox input[type=\"checkbox\"] {\n position: relative;\n margin-left: 0;\n }\n .navbar-form .has-feedback .form-control-feedback {\n top: 0;\n }\n}\n@media (max-width: 767px) {\n .navbar-form .form-group {\n margin-bottom: 5px;\n }\n .navbar-form .form-group:last-child {\n margin-bottom: 0;\n }\n}\n@media (min-width: 768px) {\n .navbar-form {\n width: auto;\n padding-top: 0;\n padding-bottom: 0;\n margin-right: 0;\n margin-left: 0;\n border: 0;\n -webkit-box-shadow: none;\n box-shadow: none;\n }\n}\n.navbar-nav > li > .dropdown-menu {\n margin-top: 0;\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n}\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n margin-bottom: 0;\n border-top-left-radius: 4px;\n border-top-right-radius: 4px;\n border-bottom-right-radius: 0;\n border-bottom-left-radius: 0;\n}\n.navbar-btn {\n margin-top: 8px;\n margin-bottom: 8px;\n}\n.navbar-btn.btn-sm {\n margin-top: 10px;\n margin-bottom: 10px;\n}\n.navbar-btn.btn-xs {\n margin-top: 14px;\n margin-bottom: 14px;\n}\n.navbar-text {\n margin-top: 15px;\n margin-bottom: 15px;\n}\n@media (min-width: 768px) {\n .navbar-text {\n float: left;\n margin-right: 15px;\n margin-left: 15px;\n }\n}\n@media (min-width: 768px) {\n .navbar-left {\n float: left !important;\n }\n .navbar-right {\n float: right !important;\n margin-right: -15px;\n }\n .navbar-right ~ .navbar-right {\n margin-right: 0;\n }\n}\n.navbar-default {\n background-color: #f8f8f8;\n border-color: #e7e7e7;\n}\n.navbar-default .navbar-brand {\n color: #777;\n}\n.navbar-default .navbar-brand:hover,\n.navbar-default .navbar-brand:focus {\n color: #5e5e5e;\n background-color: transparent;\n}\n.navbar-default .navbar-text {\n color: #777;\n}\n.navbar-default .navbar-nav > li > a {\n color: #777;\n}\n.navbar-default .navbar-nav > li > a:hover,\n.navbar-default .navbar-nav > li > a:focus {\n color: #333;\n background-color: transparent;\n}\n.navbar-default .navbar-nav > .active > a,\n.navbar-default .navbar-nav > .active > a:hover,\n.navbar-default .navbar-nav > .active > a:focus {\n color: #555;\n background-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .disabled > a,\n.navbar-default .navbar-nav > .disabled > a:hover,\n.navbar-default .navbar-nav > .disabled > a:focus {\n color: #ccc;\n background-color: transparent;\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .open > a:hover,\n.navbar-default .navbar-nav > .open > a:focus {\n color: #555;\n background-color: #e7e7e7;\n}\n@media (max-width: 767px) {\n .navbar-default .navbar-nav .open .dropdown-menu > li > a {\n color: #777;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {\n color: #333;\n background-color: transparent;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a,\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #555;\n background-color: #e7e7e7;\n }\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n color: #ccc;\n background-color: transparent;\n }\n}\n.navbar-default .navbar-toggle {\n border-color: #ddd;\n}\n.navbar-default .navbar-toggle:hover,\n.navbar-default .navbar-toggle:focus {\n background-color: #ddd;\n}\n.navbar-default .navbar-toggle .icon-bar {\n background-color: #888;\n}\n.navbar-default .navbar-collapse,\n.navbar-default .navbar-form {\n border-color: #e7e7e7;\n}\n.navbar-default .navbar-link {\n color: #777;\n}\n.navbar-default .navbar-link:hover {\n color: #333;\n}\n.navbar-default .btn-link {\n color: #777;\n}\n.navbar-default .btn-link:hover,\n.navbar-default .btn-link:focus {\n color: #333;\n}\n.navbar-default .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-default .btn-link:hover,\n.navbar-default .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-default .btn-link:focus {\n color: #ccc;\n}\n.navbar-inverse {\n background-color: #222;\n border-color: #080808;\n}\n.navbar-inverse .navbar-brand {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-brand:hover,\n.navbar-inverse .navbar-brand:focus {\n color: #fff;\n background-color: transparent;\n}\n.navbar-inverse .navbar-text {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-nav > li > a:hover,\n.navbar-inverse .navbar-nav > li > a:focus {\n color: #fff;\n background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .active > a,\n.navbar-inverse .navbar-nav > .active > a:hover,\n.navbar-inverse .navbar-nav > .active > a:focus {\n color: #fff;\n background-color: #080808;\n}\n.navbar-inverse .navbar-nav > .disabled > a,\n.navbar-inverse .navbar-nav > .disabled > a:hover,\n.navbar-inverse .navbar-nav > .disabled > a:focus {\n color: #444;\n background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .open > a:hover,\n.navbar-inverse .navbar-nav > .open > a:focus {\n color: #fff;\n background-color: #080808;\n}\n@media (max-width: 767px) {\n .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {\n border-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu .divider {\n background-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {\n color: #9d9d9d;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {\n color: #fff;\n background-color: transparent;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {\n color: #fff;\n background-color: #080808;\n }\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n color: #444;\n background-color: transparent;\n }\n}\n.navbar-inverse .navbar-toggle {\n border-color: #333;\n}\n.navbar-inverse .navbar-toggle:hover,\n.navbar-inverse .navbar-toggle:focus {\n background-color: #333;\n}\n.navbar-inverse .navbar-toggle .icon-bar {\n background-color: #fff;\n}\n.navbar-inverse .navbar-collapse,\n.navbar-inverse .navbar-form {\n border-color: #101010;\n}\n.navbar-inverse .navbar-link {\n color: #9d9d9d;\n}\n.navbar-inverse .navbar-link:hover {\n color: #fff;\n}\n.navbar-inverse .btn-link {\n color: #9d9d9d;\n}\n.navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link:focus {\n color: #fff;\n}\n.navbar-inverse .btn-link[disabled]:hover,\nfieldset[disabled] .navbar-inverse .btn-link:hover,\n.navbar-inverse .btn-link[disabled]:focus,\nfieldset[disabled] .navbar-inverse .btn-link:focus {\n color: #444;\n}\n.breadcrumb {\n padding: 8px 15px;\n margin-bottom: 20px;\n list-style: none;\n background-color: #f5f5f5;\n border-radius: 4px;\n}\n.breadcrumb > li {\n display: inline-block;\n}\n.breadcrumb > li + li:before {\n padding: 0 5px;\n color: #ccc;\n content: \"/\\00a0\";\n}\n.breadcrumb > .active {\n color: #777777;\n}\n.pagination {\n display: inline-block;\n padding-left: 0;\n margin: 20px 0;\n border-radius: 4px;\n}\n.pagination > li {\n display: inline;\n}\n.pagination > li > a,\n.pagination > li > span {\n position: relative;\n float: left;\n padding: 6px 12px;\n margin-left: -1px;\n line-height: 1.42857143;\n color: #337ab7;\n text-decoration: none;\n background-color: #fff;\n border: 1px solid #ddd;\n}\n.pagination > li > a:hover,\n.pagination > li > span:hover,\n.pagination > li > a:focus,\n.pagination > li > span:focus {\n z-index: 2;\n color: #23527c;\n background-color: #eeeeee;\n border-color: #ddd;\n}\n.pagination > li:first-child > a,\n.pagination > li:first-child > span {\n margin-left: 0;\n border-top-left-radius: 4px;\n border-bottom-left-radius: 4px;\n}\n.pagination > li:last-child > a,\n.pagination > li:last-child > span {\n border-top-right-radius: 4px;\n border-bottom-right-radius: 4px;\n}\n.pagination > .active > a,\n.pagination > .active > span,\n.pagination > .active > a:hover,\n.pagination > .active > span:hover,\n.pagination > .active > a:focus,\n.pagination > .active > span:focus {\n z-index: 3;\n color: #fff;\n cursor: default;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.pagination > .disabled > span,\n.pagination > .disabled > span:hover,\n.pagination > .disabled > span:focus,\n.pagination > .disabled > a,\n.pagination > .disabled > a:hover,\n.pagination > .disabled > a:focus {\n color: #777777;\n cursor: not-allowed;\n background-color: #fff;\n border-color: #ddd;\n}\n.pagination-lg > li > a,\n.pagination-lg > li > span {\n padding: 10px 16px;\n font-size: 18px;\n line-height: 1.3333333;\n}\n.pagination-lg > li:first-child > a,\n.pagination-lg > li:first-child > span {\n border-top-left-radius: 6px;\n border-bottom-left-radius: 6px;\n}\n.pagination-lg > li:last-child > a,\n.pagination-lg > li:last-child > span {\n border-top-right-radius: 6px;\n border-bottom-right-radius: 6px;\n}\n.pagination-sm > li > a,\n.pagination-sm > li > span {\n padding: 5px 10px;\n font-size: 12px;\n line-height: 1.5;\n}\n.pagination-sm > li:first-child > a,\n.pagination-sm > li:first-child > span {\n border-top-left-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.pagination-sm > li:last-child > a,\n.pagination-sm > li:last-child > span {\n border-top-right-radius: 3px;\n border-bottom-right-radius: 3px;\n}\n.pager {\n padding-left: 0;\n margin: 20px 0;\n text-align: center;\n list-style: none;\n}\n.pager li {\n display: inline;\n}\n.pager li > a,\n.pager li > span {\n display: inline-block;\n padding: 5px 14px;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 15px;\n}\n.pager li > a:hover,\n.pager li > a:focus {\n text-decoration: none;\n background-color: #eeeeee;\n}\n.pager .next > a,\n.pager .next > span {\n float: right;\n}\n.pager .previous > a,\n.pager .previous > span {\n float: left;\n}\n.pager .disabled > a,\n.pager .disabled > a:hover,\n.pager .disabled > a:focus,\n.pager .disabled > span {\n color: #777777;\n cursor: not-allowed;\n background-color: #fff;\n}\n.label {\n display: inline;\n padding: 0.2em 0.6em 0.3em;\n font-size: 75%;\n font-weight: 700;\n line-height: 1;\n color: #fff;\n text-align: center;\n white-space: nowrap;\n vertical-align: baseline;\n border-radius: 0.25em;\n}\na.label:hover,\na.label:focus {\n color: #fff;\n text-decoration: none;\n cursor: pointer;\n}\n.label:empty {\n display: none;\n}\n.btn .label {\n position: relative;\n top: -1px;\n}\n.label-default {\n background-color: #777777;\n}\n.label-default[href]:hover,\n.label-default[href]:focus {\n background-color: #5e5e5e;\n}\n.label-primary {\n background-color: #337ab7;\n}\n.label-primary[href]:hover,\n.label-primary[href]:focus {\n background-color: #286090;\n}\n.label-success {\n background-color: #5cb85c;\n}\n.label-success[href]:hover,\n.label-success[href]:focus {\n background-color: #449d44;\n}\n.label-info {\n background-color: #5bc0de;\n}\n.label-info[href]:hover,\n.label-info[href]:focus {\n background-color: #31b0d5;\n}\n.label-warning {\n background-color: #f0ad4e;\n}\n.label-warning[href]:hover,\n.label-warning[href]:focus {\n background-color: #ec971f;\n}\n.label-danger {\n background-color: #d9534f;\n}\n.label-danger[href]:hover,\n.label-danger[href]:focus {\n background-color: #c9302c;\n}\n.badge {\n display: inline-block;\n min-width: 10px;\n padding: 3px 7px;\n font-size: 12px;\n font-weight: bold;\n line-height: 1;\n color: #fff;\n text-align: center;\n white-space: nowrap;\n vertical-align: middle;\n background-color: #777777;\n border-radius: 10px;\n}\n.badge:empty {\n display: none;\n}\n.btn .badge {\n position: relative;\n top: -1px;\n}\n.btn-xs .badge,\n.btn-group-xs > .btn .badge {\n top: 0;\n padding: 1px 5px;\n}\na.badge:hover,\na.badge:focus {\n color: #fff;\n text-decoration: none;\n cursor: pointer;\n}\n.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.list-group-item > .badge {\n float: right;\n}\n.list-group-item > .badge + .badge {\n margin-right: 5px;\n}\n.nav-pills > li > a > .badge {\n margin-left: 3px;\n}\n.jumbotron {\n padding-top: 30px;\n padding-bottom: 30px;\n margin-bottom: 30px;\n color: inherit;\n background-color: #eeeeee;\n}\n.jumbotron h1,\n.jumbotron .h1 {\n color: inherit;\n}\n.jumbotron p {\n margin-bottom: 15px;\n font-size: 21px;\n font-weight: 200;\n}\n.jumbotron > hr {\n border-top-color: #d5d5d5;\n}\n.container .jumbotron,\n.container-fluid .jumbotron {\n padding-right: 15px;\n padding-left: 15px;\n border-radius: 6px;\n}\n.jumbotron .container {\n max-width: 100%;\n}\n@media screen and (min-width: 768px) {\n .jumbotron {\n padding-top: 48px;\n padding-bottom: 48px;\n }\n .container .jumbotron,\n .container-fluid .jumbotron {\n padding-right: 60px;\n padding-left: 60px;\n }\n .jumbotron h1,\n .jumbotron .h1 {\n font-size: 63px;\n }\n}\n.thumbnail {\n display: block;\n padding: 4px;\n margin-bottom: 20px;\n line-height: 1.42857143;\n background-color: #fff;\n border: 1px solid #ddd;\n border-radius: 4px;\n -webkit-transition: border 0.2s ease-in-out;\n -o-transition: border 0.2s ease-in-out;\n transition: border 0.2s ease-in-out;\n}\n.thumbnail > img,\n.thumbnail a > img {\n margin-right: auto;\n margin-left: auto;\n}\na.thumbnail:hover,\na.thumbnail:focus,\na.thumbnail.active {\n border-color: #337ab7;\n}\n.thumbnail .caption {\n padding: 9px;\n color: #333333;\n}\n.alert {\n padding: 15px;\n margin-bottom: 20px;\n border: 1px solid transparent;\n border-radius: 4px;\n}\n.alert h4 {\n margin-top: 0;\n color: inherit;\n}\n.alert .alert-link {\n font-weight: bold;\n}\n.alert > p,\n.alert > ul {\n margin-bottom: 0;\n}\n.alert > p + p {\n margin-top: 5px;\n}\n.alert-dismissable,\n.alert-dismissible {\n padding-right: 35px;\n}\n.alert-dismissable .close,\n.alert-dismissible .close {\n position: relative;\n top: -2px;\n right: -21px;\n color: inherit;\n}\n.alert-success {\n color: #3c763d;\n background-color: #dff0d8;\n border-color: #d6e9c6;\n}\n.alert-success hr {\n border-top-color: #c9e2b3;\n}\n.alert-success .alert-link {\n color: #2b542c;\n}\n.alert-info {\n color: #31708f;\n background-color: #d9edf7;\n border-color: #bce8f1;\n}\n.alert-info hr {\n border-top-color: #a6e1ec;\n}\n.alert-info .alert-link {\n color: #245269;\n}\n.alert-warning {\n color: #8a6d3b;\n background-color: #fcf8e3;\n border-color: #faebcc;\n}\n.alert-warning hr {\n border-top-color: #f7e1b5;\n}\n.alert-warning .alert-link {\n color: #66512c;\n}\n.alert-danger {\n color: #a94442;\n background-color: #f2dede;\n border-color: #ebccd1;\n}\n.alert-danger hr {\n border-top-color: #e4b9c0;\n}\n.alert-danger .alert-link {\n color: #843534;\n}\n@-webkit-keyframes progress-bar-stripes {\n from {\n background-position: 40px 0;\n }\n to {\n background-position: 0 0;\n }\n}\n@keyframes progress-bar-stripes {\n from {\n background-position: 40px 0;\n }\n to {\n background-position: 0 0;\n }\n}\n.progress {\n height: 20px;\n margin-bottom: 20px;\n overflow: hidden;\n background-color: #f5f5f5;\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.1);\n}\n.progress-bar {\n float: left;\n width: 0%;\n height: 100%;\n font-size: 12px;\n line-height: 20px;\n color: #fff;\n text-align: center;\n background-color: #337ab7;\n -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n box-shadow: inset 0 -1px 0 rgba(0, 0, 0, 0.15);\n -webkit-transition: width 0.6s ease;\n -o-transition: width 0.6s ease;\n transition: width 0.6s ease;\n}\n.progress-striped .progress-bar,\n.progress-bar-striped {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-size: 40px 40px;\n}\n.progress.active .progress-bar,\n.progress-bar.active {\n -webkit-animation: progress-bar-stripes 2s linear infinite;\n -o-animation: progress-bar-stripes 2s linear infinite;\n animation: progress-bar-stripes 2s linear infinite;\n}\n.progress-bar-success {\n background-color: #5cb85c;\n}\n.progress-striped .progress-bar-success {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-info {\n background-color: #5bc0de;\n}\n.progress-striped .progress-bar-info {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-warning {\n background-color: #f0ad4e;\n}\n.progress-striped .progress-bar-warning {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.progress-bar-danger {\n background-color: #d9534f;\n}\n.progress-striped .progress-bar-danger {\n background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: -o-linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);\n}\n.media {\n margin-top: 15px;\n}\n.media:first-child {\n margin-top: 0;\n}\n.media,\n.media-body {\n overflow: hidden;\n zoom: 1;\n}\n.media-body {\n width: 10000px;\n}\n.media-object {\n display: block;\n}\n.media-object.img-thumbnail {\n max-width: none;\n}\n.media-right,\n.media > .pull-right {\n padding-left: 10px;\n}\n.media-left,\n.media > .pull-left {\n padding-right: 10px;\n}\n.media-left,\n.media-right,\n.media-body {\n display: table-cell;\n vertical-align: top;\n}\n.media-middle {\n vertical-align: middle;\n}\n.media-bottom {\n vertical-align: bottom;\n}\n.media-heading {\n margin-top: 0;\n margin-bottom: 5px;\n}\n.media-list {\n padding-left: 0;\n list-style: none;\n}\n.list-group {\n padding-left: 0;\n margin-bottom: 20px;\n}\n.list-group-item {\n position: relative;\n display: block;\n padding: 10px 15px;\n margin-bottom: -1px;\n background-color: #fff;\n border: 1px solid #ddd;\n}\n.list-group-item:first-child {\n border-top-left-radius: 4px;\n border-top-right-radius: 4px;\n}\n.list-group-item:last-child {\n margin-bottom: 0;\n border-bottom-right-radius: 4px;\n border-bottom-left-radius: 4px;\n}\n.list-group-item.disabled,\n.list-group-item.disabled:hover,\n.list-group-item.disabled:focus {\n color: #777777;\n cursor: not-allowed;\n background-color: #eeeeee;\n}\n.list-group-item.disabled .list-group-item-heading,\n.list-group-item.disabled:hover .list-group-item-heading,\n.list-group-item.disabled:focus .list-group-item-heading {\n color: inherit;\n}\n.list-group-item.disabled .list-group-item-text,\n.list-group-item.disabled:hover .list-group-item-text,\n.list-group-item.disabled:focus .list-group-item-text {\n color: #777777;\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n z-index: 2;\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.list-group-item.active .list-group-item-heading,\n.list-group-item.active:hover .list-group-item-heading,\n.list-group-item.active:focus .list-group-item-heading,\n.list-group-item.active .list-group-item-heading > small,\n.list-group-item.active:hover .list-group-item-heading > small,\n.list-group-item.active:focus .list-group-item-heading > small,\n.list-group-item.active .list-group-item-heading > .small,\n.list-group-item.active:hover .list-group-item-heading > .small,\n.list-group-item.active:focus .list-group-item-heading > .small {\n color: inherit;\n}\n.list-group-item.active .list-group-item-text,\n.list-group-item.active:hover .list-group-item-text,\n.list-group-item.active:focus .list-group-item-text {\n color: #c7ddef;\n}\na.list-group-item,\nbutton.list-group-item {\n color: #555;\n}\na.list-group-item .list-group-item-heading,\nbutton.list-group-item .list-group-item-heading {\n color: #333;\n}\na.list-group-item:hover,\nbutton.list-group-item:hover,\na.list-group-item:focus,\nbutton.list-group-item:focus {\n color: #555;\n text-decoration: none;\n background-color: #f5f5f5;\n}\nbutton.list-group-item {\n width: 100%;\n text-align: left;\n}\n.list-group-item-success {\n color: #3c763d;\n background-color: #dff0d8;\n}\na.list-group-item-success,\nbutton.list-group-item-success {\n color: #3c763d;\n}\na.list-group-item-success .list-group-item-heading,\nbutton.list-group-item-success .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-success:hover,\nbutton.list-group-item-success:hover,\na.list-group-item-success:focus,\nbutton.list-group-item-success:focus {\n color: #3c763d;\n background-color: #d0e9c6;\n}\na.list-group-item-success.active,\nbutton.list-group-item-success.active,\na.list-group-item-success.active:hover,\nbutton.list-group-item-success.active:hover,\na.list-group-item-success.active:focus,\nbutton.list-group-item-success.active:focus {\n color: #fff;\n background-color: #3c763d;\n border-color: #3c763d;\n}\n.list-group-item-info {\n color: #31708f;\n background-color: #d9edf7;\n}\na.list-group-item-info,\nbutton.list-group-item-info {\n color: #31708f;\n}\na.list-group-item-info .list-group-item-heading,\nbutton.list-group-item-info .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-info:hover,\nbutton.list-group-item-info:hover,\na.list-group-item-info:focus,\nbutton.list-group-item-info:focus {\n color: #31708f;\n background-color: #c4e3f3;\n}\na.list-group-item-info.active,\nbutton.list-group-item-info.active,\na.list-group-item-info.active:hover,\nbutton.list-group-item-info.active:hover,\na.list-group-item-info.active:focus,\nbutton.list-group-item-info.active:focus {\n color: #fff;\n background-color: #31708f;\n border-color: #31708f;\n}\n.list-group-item-warning {\n color: #8a6d3b;\n background-color: #fcf8e3;\n}\na.list-group-item-warning,\nbutton.list-group-item-warning {\n color: #8a6d3b;\n}\na.list-group-item-warning .list-group-item-heading,\nbutton.list-group-item-warning .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-warning:hover,\nbutton.list-group-item-warning:hover,\na.list-group-item-warning:focus,\nbutton.list-group-item-warning:focus {\n color: #8a6d3b;\n background-color: #faf2cc;\n}\na.list-group-item-warning.active,\nbutton.list-group-item-warning.active,\na.list-group-item-warning.active:hover,\nbutton.list-group-item-warning.active:hover,\na.list-group-item-warning.active:focus,\nbutton.list-group-item-warning.active:focus {\n color: #fff;\n background-color: #8a6d3b;\n border-color: #8a6d3b;\n}\n.list-group-item-danger {\n color: #a94442;\n background-color: #f2dede;\n}\na.list-group-item-danger,\nbutton.list-group-item-danger {\n color: #a94442;\n}\na.list-group-item-danger .list-group-item-heading,\nbutton.list-group-item-danger .list-group-item-heading {\n color: inherit;\n}\na.list-group-item-danger:hover,\nbutton.list-group-item-danger:hover,\na.list-group-item-danger:focus,\nbutton.list-group-item-danger:focus {\n color: #a94442;\n background-color: #ebcccc;\n}\na.list-group-item-danger.active,\nbutton.list-group-item-danger.active,\na.list-group-item-danger.active:hover,\nbutton.list-group-item-danger.active:hover,\na.list-group-item-danger.active:focus,\nbutton.list-group-item-danger.active:focus {\n color: #fff;\n background-color: #a94442;\n border-color: #a94442;\n}\n.list-group-item-heading {\n margin-top: 0;\n margin-bottom: 5px;\n}\n.list-group-item-text {\n margin-bottom: 0;\n line-height: 1.3;\n}\n.panel {\n margin-bottom: 20px;\n background-color: #fff;\n border: 1px solid transparent;\n border-radius: 4px;\n -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n box-shadow: 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.panel-body {\n padding: 15px;\n}\n.panel-heading {\n padding: 10px 15px;\n border-bottom: 1px solid transparent;\n border-top-left-radius: 3px;\n border-top-right-radius: 3px;\n}\n.panel-heading > .dropdown .dropdown-toggle {\n color: inherit;\n}\n.panel-title {\n margin-top: 0;\n margin-bottom: 0;\n font-size: 16px;\n color: inherit;\n}\n.panel-title > a,\n.panel-title > small,\n.panel-title > .small,\n.panel-title > small > a,\n.panel-title > .small > a {\n color: inherit;\n}\n.panel-footer {\n padding: 10px 15px;\n background-color: #f5f5f5;\n border-top: 1px solid #ddd;\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel > .list-group,\n.panel > .panel-collapse > .list-group {\n margin-bottom: 0;\n}\n.panel > .list-group .list-group-item,\n.panel > .panel-collapse > .list-group .list-group-item {\n border-width: 1px 0;\n border-radius: 0;\n}\n.panel > .list-group:first-child .list-group-item:first-child,\n.panel > .panel-collapse > .list-group:first-child .list-group-item:first-child {\n border-top: 0;\n border-top-left-radius: 3px;\n border-top-right-radius: 3px;\n}\n.panel > .list-group:last-child .list-group-item:last-child,\n.panel > .panel-collapse > .list-group:last-child .list-group-item:last-child {\n border-bottom: 0;\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel > .panel-heading + .panel-collapse > .list-group .list-group-item:first-child {\n border-top-left-radius: 0;\n border-top-right-radius: 0;\n}\n.panel-heading + .list-group .list-group-item:first-child {\n border-top-width: 0;\n}\n.list-group + .panel-footer {\n border-top-width: 0;\n}\n.panel > .table,\n.panel > .table-responsive > .table,\n.panel > .panel-collapse > .table {\n margin-bottom: 0;\n}\n.panel > .table caption,\n.panel > .table-responsive > .table caption,\n.panel > .panel-collapse > .table caption {\n padding-right: 15px;\n padding-left: 15px;\n}\n.panel > .table:first-child,\n.panel > .table-responsive:first-child > .table:first-child {\n border-top-left-radius: 3px;\n border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child {\n border-top-left-radius: 3px;\n border-top-right-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {\n border-top-left-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {\n border-top-right-radius: 3px;\n}\n.panel > .table:last-child,\n.panel > .table-responsive:last-child > .table:last-child {\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child {\n border-bottom-right-radius: 3px;\n border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {\n border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {\n border-bottom-right-radius: 3px;\n}\n.panel > .panel-body + .table,\n.panel > .panel-body + .table-responsive,\n.panel > .table + .panel-body,\n.panel > .table-responsive + .panel-body {\n border-top: 1px solid #ddd;\n}\n.panel > .table > tbody:first-child > tr:first-child th,\n.panel > .table > tbody:first-child > tr:first-child td {\n border-top: 0;\n}\n.panel > .table-bordered,\n.panel > .table-responsive > .table-bordered {\n border: 0;\n}\n.panel > .table-bordered > thead > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,\n.panel > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-bordered > thead > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,\n.panel > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-bordered > tfoot > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n border-left: 0;\n}\n.panel > .table-bordered > thead > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,\n.panel > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-bordered > thead > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,\n.panel > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-bordered > tfoot > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n border-right: 0;\n}\n.panel > .table-bordered > thead > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,\n.panel > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-bordered > thead > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,\n.panel > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th {\n border-bottom: 0;\n}\n.panel > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th {\n border-bottom: 0;\n}\n.panel > .table-responsive {\n margin-bottom: 0;\n border: 0;\n}\n.panel-group {\n margin-bottom: 20px;\n}\n.panel-group .panel {\n margin-bottom: 0;\n border-radius: 4px;\n}\n.panel-group .panel + .panel {\n margin-top: 5px;\n}\n.panel-group .panel-heading {\n border-bottom: 0;\n}\n.panel-group .panel-heading + .panel-collapse > .panel-body,\n.panel-group .panel-heading + .panel-collapse > .list-group {\n border-top: 1px solid #ddd;\n}\n.panel-group .panel-footer {\n border-top: 0;\n}\n.panel-group .panel-footer + .panel-collapse .panel-body {\n border-bottom: 1px solid #ddd;\n}\n.panel-default {\n border-color: #ddd;\n}\n.panel-default > .panel-heading {\n color: #333333;\n background-color: #f5f5f5;\n border-color: #ddd;\n}\n.panel-default > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #ddd;\n}\n.panel-default > .panel-heading .badge {\n color: #f5f5f5;\n background-color: #333333;\n}\n.panel-default > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #ddd;\n}\n.panel-primary {\n border-color: #337ab7;\n}\n.panel-primary > .panel-heading {\n color: #fff;\n background-color: #337ab7;\n border-color: #337ab7;\n}\n.panel-primary > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #337ab7;\n}\n.panel-primary > .panel-heading .badge {\n color: #337ab7;\n background-color: #fff;\n}\n.panel-primary > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #337ab7;\n}\n.panel-success {\n border-color: #d6e9c6;\n}\n.panel-success > .panel-heading {\n color: #3c763d;\n background-color: #dff0d8;\n border-color: #d6e9c6;\n}\n.panel-success > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #d6e9c6;\n}\n.panel-success > .panel-heading .badge {\n color: #dff0d8;\n background-color: #3c763d;\n}\n.panel-success > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #d6e9c6;\n}\n.panel-info {\n border-color: #bce8f1;\n}\n.panel-info > .panel-heading {\n color: #31708f;\n background-color: #d9edf7;\n border-color: #bce8f1;\n}\n.panel-info > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #bce8f1;\n}\n.panel-info > .panel-heading .badge {\n color: #d9edf7;\n background-color: #31708f;\n}\n.panel-info > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #bce8f1;\n}\n.panel-warning {\n border-color: #faebcc;\n}\n.panel-warning > .panel-heading {\n color: #8a6d3b;\n background-color: #fcf8e3;\n border-color: #faebcc;\n}\n.panel-warning > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #faebcc;\n}\n.panel-warning > .panel-heading .badge {\n color: #fcf8e3;\n background-color: #8a6d3b;\n}\n.panel-warning > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #faebcc;\n}\n.panel-danger {\n border-color: #ebccd1;\n}\n.panel-danger > .panel-heading {\n color: #a94442;\n background-color: #f2dede;\n border-color: #ebccd1;\n}\n.panel-danger > .panel-heading + .panel-collapse > .panel-body {\n border-top-color: #ebccd1;\n}\n.panel-danger > .panel-heading .badge {\n color: #f2dede;\n background-color: #a94442;\n}\n.panel-danger > .panel-footer + .panel-collapse > .panel-body {\n border-bottom-color: #ebccd1;\n}\n.embed-responsive {\n position: relative;\n display: block;\n height: 0;\n padding: 0;\n overflow: hidden;\n}\n.embed-responsive .embed-responsive-item,\n.embed-responsive iframe,\n.embed-responsive embed,\n.embed-responsive object,\n.embed-responsive video {\n position: absolute;\n top: 0;\n bottom: 0;\n left: 0;\n width: 100%;\n height: 100%;\n border: 0;\n}\n.embed-responsive-16by9 {\n padding-bottom: 56.25%;\n}\n.embed-responsive-4by3 {\n padding-bottom: 75%;\n}\n.well {\n min-height: 20px;\n padding: 19px;\n margin-bottom: 20px;\n background-color: #f5f5f5;\n border: 1px solid #e3e3e3;\n border-radius: 4px;\n -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.05);\n}\n.well blockquote {\n border-color: #ddd;\n border-color: rgba(0, 0, 0, 0.15);\n}\n.well-lg {\n padding: 24px;\n border-radius: 6px;\n}\n.well-sm {\n padding: 9px;\n border-radius: 3px;\n}\n.close {\n float: right;\n font-size: 21px;\n font-weight: bold;\n line-height: 1;\n color: #000;\n text-shadow: 0 1px 0 #fff;\n filter: alpha(opacity=20);\n opacity: 0.2;\n}\n.close:hover,\n.close:focus {\n color: #000;\n text-decoration: none;\n cursor: pointer;\n filter: alpha(opacity=50);\n opacity: 0.5;\n}\nbutton.close {\n padding: 0;\n cursor: pointer;\n background: transparent;\n border: 0;\n -webkit-appearance: none;\n appearance: none;\n}\n.modal-open {\n overflow: hidden;\n}\n.modal {\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 1050;\n display: none;\n overflow: hidden;\n -webkit-overflow-scrolling: touch;\n outline: 0;\n}\n.modal.fade .modal-dialog {\n -webkit-transform: translate(0, -25%);\n -ms-transform: translate(0, -25%);\n -o-transform: translate(0, -25%);\n transform: translate(0, -25%);\n -webkit-transition: -webkit-transform 0.3s ease-out;\n -moz-transition: -moz-transform 0.3s ease-out;\n -o-transition: -o-transform 0.3s ease-out;\n transition: transform 0.3s ease-out;\n}\n.modal.in .modal-dialog {\n -webkit-transform: translate(0, 0);\n -ms-transform: translate(0, 0);\n -o-transform: translate(0, 0);\n transform: translate(0, 0);\n}\n.modal-open .modal {\n overflow-x: hidden;\n overflow-y: auto;\n}\n.modal-dialog {\n position: relative;\n width: auto;\n margin: 10px;\n}\n.modal-content {\n position: relative;\n background-color: #fff;\n background-clip: padding-box;\n border: 1px solid #999;\n border: 1px solid rgba(0, 0, 0, 0.2);\n border-radius: 6px;\n -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);\n outline: 0;\n}\n.modal-backdrop {\n position: fixed;\n top: 0;\n right: 0;\n bottom: 0;\n left: 0;\n z-index: 1040;\n background-color: #000;\n}\n.modal-backdrop.fade {\n filter: alpha(opacity=0);\n opacity: 0;\n}\n.modal-backdrop.in {\n filter: alpha(opacity=50);\n opacity: 0.5;\n}\n.modal-header {\n padding: 15px;\n border-bottom: 1px solid #e5e5e5;\n}\n.modal-header .close {\n margin-top: -2px;\n}\n.modal-title {\n margin: 0;\n line-height: 1.42857143;\n}\n.modal-body {\n position: relative;\n padding: 15px;\n}\n.modal-footer {\n padding: 15px;\n text-align: right;\n border-top: 1px solid #e5e5e5;\n}\n.modal-footer .btn + .btn {\n margin-bottom: 0;\n margin-left: 5px;\n}\n.modal-footer .btn-group .btn + .btn {\n margin-left: -1px;\n}\n.modal-footer .btn-block + .btn-block {\n margin-left: 0;\n}\n.modal-scrollbar-measure {\n position: absolute;\n top: -9999px;\n width: 50px;\n height: 50px;\n overflow: scroll;\n}\n@media (min-width: 768px) {\n .modal-dialog {\n width: 600px;\n margin: 30px auto;\n }\n .modal-content {\n -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);\n }\n .modal-sm {\n width: 300px;\n }\n}\n@media (min-width: 992px) {\n .modal-lg {\n width: 900px;\n }\n}\n.tooltip {\n position: absolute;\n z-index: 1070;\n display: block;\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-style: normal;\n font-weight: 400;\n line-height: 1.42857143;\n line-break: auto;\n text-align: left;\n text-align: start;\n text-decoration: none;\n text-shadow: none;\n text-transform: none;\n letter-spacing: normal;\n word-break: normal;\n word-spacing: normal;\n word-wrap: normal;\n white-space: normal;\n font-size: 12px;\n filter: alpha(opacity=0);\n opacity: 0;\n}\n.tooltip.in {\n filter: alpha(opacity=90);\n opacity: 0.9;\n}\n.tooltip.top {\n padding: 5px 0;\n margin-top: -3px;\n}\n.tooltip.right {\n padding: 0 5px;\n margin-left: 3px;\n}\n.tooltip.bottom {\n padding: 5px 0;\n margin-top: 3px;\n}\n.tooltip.left {\n padding: 0 5px;\n margin-left: -3px;\n}\n.tooltip.top .tooltip-arrow {\n bottom: 0;\n left: 50%;\n margin-left: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.top-left .tooltip-arrow {\n right: 5px;\n bottom: 0;\n margin-bottom: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.top-right .tooltip-arrow {\n bottom: 0;\n left: 5px;\n margin-bottom: -5px;\n border-width: 5px 5px 0;\n border-top-color: #000;\n}\n.tooltip.right .tooltip-arrow {\n top: 50%;\n left: 0;\n margin-top: -5px;\n border-width: 5px 5px 5px 0;\n border-right-color: #000;\n}\n.tooltip.left .tooltip-arrow {\n top: 50%;\n right: 0;\n margin-top: -5px;\n border-width: 5px 0 5px 5px;\n border-left-color: #000;\n}\n.tooltip.bottom .tooltip-arrow {\n top: 0;\n left: 50%;\n margin-left: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.tooltip.bottom-left .tooltip-arrow {\n top: 0;\n right: 5px;\n margin-top: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.tooltip.bottom-right .tooltip-arrow {\n top: 0;\n left: 5px;\n margin-top: -5px;\n border-width: 0 5px 5px;\n border-bottom-color: #000;\n}\n.tooltip-inner {\n max-width: 200px;\n padding: 3px 8px;\n color: #fff;\n text-align: center;\n background-color: #000;\n border-radius: 4px;\n}\n.tooltip-arrow {\n position: absolute;\n width: 0;\n height: 0;\n border-color: transparent;\n border-style: solid;\n}\n.popover {\n position: absolute;\n top: 0;\n left: 0;\n z-index: 1060;\n display: none;\n max-width: 276px;\n padding: 1px;\n font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n font-style: normal;\n font-weight: 400;\n line-height: 1.42857143;\n line-break: auto;\n text-align: left;\n text-align: start;\n text-decoration: none;\n text-shadow: none;\n text-transform: none;\n letter-spacing: normal;\n word-break: normal;\n word-spacing: normal;\n word-wrap: normal;\n white-space: normal;\n font-size: 14px;\n background-color: #fff;\n background-clip: padding-box;\n border: 1px solid #ccc;\n border: 1px solid rgba(0, 0, 0, 0.2);\n border-radius: 6px;\n -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);\n}\n.popover.top {\n margin-top: -10px;\n}\n.popover.right {\n margin-left: 10px;\n}\n.popover.bottom {\n margin-top: 10px;\n}\n.popover.left {\n margin-left: -10px;\n}\n.popover > .arrow {\n border-width: 11px;\n}\n.popover > .arrow,\n.popover > .arrow:after {\n position: absolute;\n display: block;\n width: 0;\n height: 0;\n border-color: transparent;\n border-style: solid;\n}\n.popover > .arrow:after {\n content: \"\";\n border-width: 10px;\n}\n.popover.top > .arrow {\n bottom: -11px;\n left: 50%;\n margin-left: -11px;\n border-top-color: #999999;\n border-top-color: rgba(0, 0, 0, 0.25);\n border-bottom-width: 0;\n}\n.popover.top > .arrow:after {\n bottom: 1px;\n margin-left: -10px;\n content: \" \";\n border-top-color: #fff;\n border-bottom-width: 0;\n}\n.popover.right > .arrow {\n top: 50%;\n left: -11px;\n margin-top: -11px;\n border-right-color: #999999;\n border-right-color: rgba(0, 0, 0, 0.25);\n border-left-width: 0;\n}\n.popover.right > .arrow:after {\n bottom: -10px;\n left: 1px;\n content: \" \";\n border-right-color: #fff;\n border-left-width: 0;\n}\n.popover.bottom > .arrow {\n top: -11px;\n left: 50%;\n margin-left: -11px;\n border-top-width: 0;\n border-bottom-color: #999999;\n border-bottom-color: rgba(0, 0, 0, 0.25);\n}\n.popover.bottom > .arrow:after {\n top: 1px;\n margin-left: -10px;\n content: \" \";\n border-top-width: 0;\n border-bottom-color: #fff;\n}\n.popover.left > .arrow {\n top: 50%;\n right: -11px;\n margin-top: -11px;\n border-right-width: 0;\n border-left-color: #999999;\n border-left-color: rgba(0, 0, 0, 0.25);\n}\n.popover.left > .arrow:after {\n right: 1px;\n bottom: -10px;\n content: \" \";\n border-right-width: 0;\n border-left-color: #fff;\n}\n.popover-title {\n padding: 8px 14px;\n margin: 0;\n font-size: 14px;\n background-color: #f7f7f7;\n border-bottom: 1px solid #ebebeb;\n border-radius: 5px 5px 0 0;\n}\n.popover-content {\n padding: 9px 14px;\n}\n.carousel {\n position: relative;\n}\n.carousel-inner {\n position: relative;\n width: 100%;\n overflow: hidden;\n}\n.carousel-inner > .item {\n position: relative;\n display: none;\n -webkit-transition: 0.6s ease-in-out left;\n -o-transition: 0.6s ease-in-out left;\n transition: 0.6s ease-in-out left;\n}\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n line-height: 1;\n}\n@media all and (transform-3d), (-webkit-transform-3d) {\n .carousel-inner > .item {\n -webkit-transition: -webkit-transform 0.6s ease-in-out;\n -moz-transition: -moz-transform 0.6s ease-in-out;\n -o-transition: -o-transform 0.6s ease-in-out;\n transition: transform 0.6s ease-in-out;\n -webkit-backface-visibility: hidden;\n -moz-backface-visibility: hidden;\n backface-visibility: hidden;\n -webkit-perspective: 1000px;\n -moz-perspective: 1000px;\n perspective: 1000px;\n }\n .carousel-inner > .item.next,\n .carousel-inner > .item.active.right {\n -webkit-transform: translate3d(100%, 0, 0);\n transform: translate3d(100%, 0, 0);\n left: 0;\n }\n .carousel-inner > .item.prev,\n .carousel-inner > .item.active.left {\n -webkit-transform: translate3d(-100%, 0, 0);\n transform: translate3d(-100%, 0, 0);\n left: 0;\n }\n .carousel-inner > .item.next.left,\n .carousel-inner > .item.prev.right,\n .carousel-inner > .item.active {\n -webkit-transform: translate3d(0, 0, 0);\n transform: translate3d(0, 0, 0);\n left: 0;\n }\n}\n.carousel-inner > .active,\n.carousel-inner > .next,\n.carousel-inner > .prev {\n display: block;\n}\n.carousel-inner > .active {\n left: 0;\n}\n.carousel-inner > .next,\n.carousel-inner > .prev {\n position: absolute;\n top: 0;\n width: 100%;\n}\n.carousel-inner > .next {\n left: 100%;\n}\n.carousel-inner > .prev {\n left: -100%;\n}\n.carousel-inner > .next.left,\n.carousel-inner > .prev.right {\n left: 0;\n}\n.carousel-inner > .active.left {\n left: -100%;\n}\n.carousel-inner > .active.right {\n left: 100%;\n}\n.carousel-control {\n position: absolute;\n top: 0;\n bottom: 0;\n left: 0;\n width: 15%;\n font-size: 20px;\n color: #fff;\n text-align: center;\n text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n background-color: rgba(0, 0, 0, 0);\n filter: alpha(opacity=50);\n opacity: 0.5;\n}\n.carousel-control.left {\n background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n background-image: linear-gradient(to right, rgba(0, 0, 0, 0.5) 0%, rgba(0, 0, 0, 0.0001) 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);\n background-repeat: repeat-x;\n}\n.carousel-control.right {\n right: 0;\n left: auto;\n background-image: -webkit-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n background-image: -o-linear-gradient(left, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n background-image: linear-gradient(to right, rgba(0, 0, 0, 0.0001) 0%, rgba(0, 0, 0, 0.5) 100%);\n filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);\n background-repeat: repeat-x;\n}\n.carousel-control:hover,\n.carousel-control:focus {\n color: #fff;\n text-decoration: none;\n outline: 0;\n filter: alpha(opacity=90);\n opacity: 0.9;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-left,\n.carousel-control .glyphicon-chevron-right {\n position: absolute;\n top: 50%;\n z-index: 5;\n display: inline-block;\n margin-top: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .glyphicon-chevron-left {\n left: 50%;\n margin-left: -10px;\n}\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-right {\n right: 50%;\n margin-right: -10px;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next {\n width: 20px;\n height: 20px;\n font-family: serif;\n line-height: 1;\n}\n.carousel-control .icon-prev:before {\n content: \"\\2039\";\n}\n.carousel-control .icon-next:before {\n content: \"\\203a\";\n}\n.carousel-indicators {\n position: absolute;\n bottom: 10px;\n left: 50%;\n z-index: 15;\n width: 60%;\n padding-left: 0;\n margin-left: -30%;\n text-align: center;\n list-style: none;\n}\n.carousel-indicators li {\n display: inline-block;\n width: 10px;\n height: 10px;\n margin: 1px;\n text-indent: -999px;\n cursor: pointer;\n background-color: #000 \\9;\n background-color: rgba(0, 0, 0, 0);\n border: 1px solid #fff;\n border-radius: 10px;\n}\n.carousel-indicators .active {\n width: 12px;\n height: 12px;\n margin: 0;\n background-color: #fff;\n}\n.carousel-caption {\n position: absolute;\n right: 15%;\n bottom: 20px;\n left: 15%;\n z-index: 10;\n padding-top: 20px;\n padding-bottom: 20px;\n color: #fff;\n text-align: center;\n text-shadow: 0 1px 2px rgba(0, 0, 0, 0.6);\n}\n.carousel-caption .btn {\n text-shadow: none;\n}\n@media screen and (min-width: 768px) {\n .carousel-control .glyphicon-chevron-left,\n .carousel-control .glyphicon-chevron-right,\n .carousel-control .icon-prev,\n .carousel-control .icon-next {\n width: 30px;\n height: 30px;\n margin-top: -10px;\n font-size: 30px;\n }\n .carousel-control .glyphicon-chevron-left,\n .carousel-control .icon-prev {\n margin-left: -10px;\n }\n .carousel-control .glyphicon-chevron-right,\n .carousel-control .icon-next {\n margin-right: -10px;\n }\n .carousel-caption {\n right: 20%;\n left: 20%;\n padding-bottom: 30px;\n }\n .carousel-indicators {\n bottom: 20px;\n }\n}\n.clearfix:before,\n.clearfix:after,\n.dl-horizontal dd:before,\n.dl-horizontal dd:after,\n.container:before,\n.container:after,\n.container-fluid:before,\n.container-fluid:after,\n.row:before,\n.row:after,\n.form-horizontal .form-group:before,\n.form-horizontal .form-group:after,\n.btn-toolbar:before,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:before,\n.btn-group-vertical > .btn-group:after,\n.nav:before,\n.nav:after,\n.navbar:before,\n.navbar:after,\n.navbar-header:before,\n.navbar-header:after,\n.navbar-collapse:before,\n.navbar-collapse:after,\n.pager:before,\n.pager:after,\n.panel-body:before,\n.panel-body:after,\n.modal-header:before,\n.modal-header:after,\n.modal-footer:before,\n.modal-footer:after {\n display: table;\n content: \" \";\n}\n.clearfix:after,\n.dl-horizontal dd:after,\n.container:after,\n.container-fluid:after,\n.row:after,\n.form-horizontal .form-group:after,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:after,\n.nav:after,\n.navbar:after,\n.navbar-header:after,\n.navbar-collapse:after,\n.pager:after,\n.panel-body:after,\n.modal-header:after,\n.modal-footer:after {\n clear: both;\n}\n.center-block {\n display: block;\n margin-right: auto;\n margin-left: auto;\n}\n.pull-right {\n float: right !important;\n}\n.pull-left {\n float: left !important;\n}\n.hide {\n display: none !important;\n}\n.show {\n display: block !important;\n}\n.invisible {\n visibility: hidden;\n}\n.text-hide {\n font: 0/0 a;\n color: transparent;\n text-shadow: none;\n background-color: transparent;\n border: 0;\n}\n.hidden {\n display: none !important;\n}\n.affix {\n position: fixed;\n}\n@-ms-viewport {\n width: device-width;\n}\n.visible-xs,\n.visible-sm,\n.visible-md,\n.visible-lg {\n display: none !important;\n}\n.visible-xs-block,\n.visible-xs-inline,\n.visible-xs-inline-block,\n.visible-sm-block,\n.visible-sm-inline,\n.visible-sm-inline-block,\n.visible-md-block,\n.visible-md-inline,\n.visible-md-inline-block,\n.visible-lg-block,\n.visible-lg-inline,\n.visible-lg-inline-block {\n display: none !important;\n}\n@media (max-width: 767px) {\n .visible-xs {\n display: block !important;\n }\n table.visible-xs {\n display: table !important;\n }\n tr.visible-xs {\n display: table-row !important;\n }\n th.visible-xs,\n td.visible-xs {\n display: table-cell !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-block {\n display: block !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-inline {\n display: inline !important;\n }\n}\n@media (max-width: 767px) {\n .visible-xs-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm {\n display: block !important;\n }\n table.visible-sm {\n display: table !important;\n }\n tr.visible-sm {\n display: table-row !important;\n }\n th.visible-sm,\n td.visible-sm {\n display: table-cell !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-block {\n display: block !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-inline {\n display: inline !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .visible-sm-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md {\n display: block !important;\n }\n table.visible-md {\n display: table !important;\n }\n tr.visible-md {\n display: table-row !important;\n }\n th.visible-md,\n td.visible-md {\n display: table-cell !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-block {\n display: block !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-inline {\n display: inline !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .visible-md-inline-block {\n display: inline-block !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg {\n display: block !important;\n }\n table.visible-lg {\n display: table !important;\n }\n tr.visible-lg {\n display: table-row !important;\n }\n th.visible-lg,\n td.visible-lg {\n display: table-cell !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-block {\n display: block !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-inline {\n display: inline !important;\n }\n}\n@media (min-width: 1200px) {\n .visible-lg-inline-block {\n display: inline-block !important;\n }\n}\n@media (max-width: 767px) {\n .hidden-xs {\n display: none !important;\n }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n .hidden-sm {\n display: none !important;\n }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n .hidden-md {\n display: none !important;\n }\n}\n@media (min-width: 1200px) {\n .hidden-lg {\n display: none !important;\n }\n}\n.visible-print {\n display: none !important;\n}\n@media print {\n .visible-print {\n display: block !important;\n }\n table.visible-print {\n display: table !important;\n }\n tr.visible-print {\n display: table-row !important;\n }\n th.visible-print,\n td.visible-print {\n display: table-cell !important;\n }\n}\n.visible-print-block {\n display: none !important;\n}\n@media print {\n .visible-print-block {\n display: block !important;\n }\n}\n.visible-print-inline {\n display: none !important;\n}\n@media print {\n .visible-print-inline {\n display: inline !important;\n }\n}\n.visible-print-inline-block {\n display: none !important;\n}\n@media print {\n .visible-print-inline-block {\n display: inline-block !important;\n }\n}\n@media print {\n .hidden-print {\n display: none !important;\n }\n}\n/*# sourceMappingURL=bootstrap.css.map */","// stylelint-disable\n\n/*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */\n\n//\n// 1. Set default font family to sans-serif.\n// 2. Prevent iOS and IE text size adjust after device orientation change,\n// without disabling user zoom.\n//\n\nhtml {\n font-family: sans-serif; // 1\n -ms-text-size-adjust: 100%; // 2\n -webkit-text-size-adjust: 100%; // 2\n}\n\n//\n// Remove default margin.\n//\n\nbody {\n margin: 0;\n}\n\n// HTML5 display definitions\n// ==========================================================================\n\n//\n// Correct `block` display not defined for any HTML5 element in IE 8/9.\n// Correct `block` display not defined for `details` or `summary` in IE 10/11\n// and Firefox.\n// Correct `block` display not defined for `main` in IE 11.\n//\n\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nmenu,\nnav,\nsection,\nsummary {\n display: block;\n}\n\n//\n// 1. Correct `inline-block` display not defined in IE 8/9.\n// 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.\n//\n\naudio,\ncanvas,\nprogress,\nvideo {\n display: inline-block; // 1\n vertical-align: baseline; // 2\n}\n\n//\n// Prevent modern browsers from displaying `audio` without controls.\n// Remove excess height in iOS 5 devices.\n//\n\naudio:not([controls]) {\n display: none;\n height: 0;\n}\n\n//\n// Address `[hidden]` styling not present in IE 8/9/10.\n// Hide the `template` element in IE 8/9/10/11, Safari, and Firefox < 22.\n//\n\n[hidden],\ntemplate {\n display: none;\n}\n\n// Links\n// ==========================================================================\n\n//\n// Remove the gray background color from active links in IE 10.\n//\n\na {\n background-color: transparent;\n}\n\n//\n// Improve readability of focused elements when they are also in an\n// active/hover state.\n//\n\na:active,\na:hover {\n outline: 0;\n}\n\n// Text-level semantics\n// ==========================================================================\n\n//\n// 1. Remove the bottom border in Chrome 57- and Firefox 39-.\n// 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.\n//\n\nabbr[title] {\n border-bottom: none; // 1\n text-decoration: underline; // 2\n text-decoration: underline dotted; // 2\n}\n\n//\n// Address style set to `bolder` in Firefox 4+, Safari, and Chrome.\n//\n\nb,\nstrong {\n font-weight: bold;\n}\n\n//\n// Address styling not present in Safari and Chrome.\n//\n\ndfn {\n font-style: italic;\n}\n\n//\n// Address variable `h1` font-size and margin within `section` and `article`\n// contexts in Firefox 4+, Safari, and Chrome.\n//\n\nh1 {\n font-size: 2em;\n margin: 0.67em 0;\n}\n\n//\n// Address styling not present in IE 8/9.\n//\n\nmark {\n background: #ff0;\n color: #000;\n}\n\n//\n// Address inconsistent and variable font size in all browsers.\n//\n\nsmall {\n font-size: 80%;\n}\n\n//\n// Prevent `sub` and `sup` affecting `line-height` in all browsers.\n//\n\nsub,\nsup {\n font-size: 75%;\n line-height: 0;\n position: relative;\n vertical-align: baseline;\n}\n\nsup {\n top: -0.5em;\n}\n\nsub {\n bottom: -0.25em;\n}\n\n// Embedded content\n// ==========================================================================\n\n//\n// Remove border when inside `a` element in IE 8/9/10.\n//\n\nimg {\n border: 0;\n}\n\n//\n// Correct overflow not hidden in IE 9/10/11.\n//\n\nsvg:not(:root) {\n overflow: hidden;\n}\n\n// Grouping content\n// ==========================================================================\n\n//\n// Address margin not present in IE 8/9 and Safari.\n//\n\nfigure {\n margin: 1em 40px;\n}\n\n//\n// Address differences between Firefox and other browsers.\n//\n\nhr {\n box-sizing: content-box;\n height: 0;\n}\n\n//\n// Contain overflow in all browsers.\n//\n\npre {\n overflow: auto;\n}\n\n//\n// Address odd `em`-unit font size rendering in all browsers.\n//\n\ncode,\nkbd,\npre,\nsamp {\n font-family: monospace, monospace;\n font-size: 1em;\n}\n\n// Forms\n// ==========================================================================\n\n//\n// Known limitation: by default, Chrome and Safari on OS X allow very limited\n// styling of `select`, unless a `border` property is set.\n//\n\n//\n// 1. Correct color not being inherited.\n// Known issue: affects color of disabled elements.\n// 2. Correct font properties not being inherited.\n// 3. Address margins set differently in Firefox 4+, Safari, and Chrome.\n//\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n color: inherit; // 1\n font: inherit; // 2\n margin: 0; // 3\n}\n\n//\n// Address `overflow` set to `hidden` in IE 8/9/10/11.\n//\n\nbutton {\n overflow: visible;\n}\n\n//\n// Address inconsistent `text-transform` inheritance for `button` and `select`.\n// All other form control elements do not inherit `text-transform` values.\n// Correct `button` style inheritance in Firefox, IE 8/9/10/11, and Opera.\n// Correct `select` style inheritance in Firefox.\n//\n\nbutton,\nselect {\n text-transform: none;\n}\n\n//\n// 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`\n// and `video` controls.\n// 2. Correct inability to style clickable `input` types in iOS.\n// 3. Improve usability and consistency of cursor style between image-type\n// `input` and others.\n//\n\nbutton,\nhtml input[type=\"button\"], // 1\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n -webkit-appearance: button; // 2\n cursor: pointer; // 3\n}\n\n//\n// Re-set default cursor for disabled elements.\n//\n\nbutton[disabled],\nhtml input[disabled] {\n cursor: default;\n}\n\n//\n// Remove inner padding and border in Firefox 4+.\n//\n\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n border: 0;\n padding: 0;\n}\n\n//\n// Address Firefox 4+ setting `line-height` on `input` using `!important` in\n// the UA stylesheet.\n//\n\ninput {\n line-height: normal;\n}\n\n//\n// It's recommended that you don't attempt to style these elements.\n// Firefox's implementation doesn't respect box-sizing, padding, or width.\n//\n// 1. Address box sizing set to `content-box` in IE 8/9/10.\n// 2. Remove excess padding in IE 8/9/10.\n//\n\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n box-sizing: border-box; // 1\n padding: 0; // 2\n}\n\n//\n// Fix the cursor style for Chrome's increment/decrement buttons. For certain\n// `font-size` values of the `input`, it causes the cursor style of the\n// decrement button to change from `default` to `text`.\n//\n\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\n\n//\n// 1. Address `appearance` set to `searchfield` in Safari and Chrome.\n// 2. Address `box-sizing` set to `border-box` in Safari and Chrome.\n//\n\ninput[type=\"search\"] {\n -webkit-appearance: textfield; // 1\n box-sizing: content-box; //2\n}\n\n//\n// Remove inner padding and search cancel button in Safari and Chrome on OS X.\n// Safari (but not Chrome) clips the cancel button when the search input has\n// padding (and `textfield` appearance).\n//\n\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\n\n//\n// Define consistent border, margin, and padding.\n//\n\nfieldset {\n border: 1px solid #c0c0c0;\n margin: 0 2px;\n padding: 0.35em 0.625em 0.75em;\n}\n\n//\n// 1. Correct `color` not being inherited in IE 8/9/10/11.\n// 2. Remove padding so people aren't caught out if they zero out fieldsets.\n//\n\nlegend {\n border: 0; // 1\n padding: 0; // 2\n}\n\n//\n// Remove default vertical scrollbar in IE 8/9/10/11.\n//\n\ntextarea {\n overflow: auto;\n}\n\n//\n// Don't inherit the `font-weight` (applied by a rule above).\n// NOTE: the default cannot safely be changed in Chrome and Safari on OS X.\n//\n\noptgroup {\n font-weight: bold;\n}\n\n// Tables\n// ==========================================================================\n\n//\n// Remove most spacing between table cells.\n//\n\ntable {\n border-collapse: collapse;\n border-spacing: 0;\n}\n\ntd,\nth {\n padding: 0;\n}\n","// stylelint-disable declaration-no-important, selector-no-qualifying-type\n\n/*! Source: https://github.com/h5bp/html5-boilerplate/blob/master/src/css/main.css */\n\n// ==========================================================================\n// Print styles.\n// Inlined to avoid the additional HTTP request: h5bp.com/r\n// ==========================================================================\n\n@media print {\n *,\n *:before,\n *:after {\n color: #000 !important; // Black prints faster: h5bp.com/s\n text-shadow: none !important;\n background: transparent !important;\n box-shadow: none !important;\n }\n\n a,\n a:visited {\n text-decoration: underline;\n }\n\n a[href]:after {\n content: \" (\" attr(href) \")\";\n }\n\n abbr[title]:after {\n content: \" (\" attr(title) \")\";\n }\n\n // Don't show links that are fragment identifiers,\n // or use the `javascript:` pseudo protocol\n a[href^=\"#\"]:after,\n a[href^=\"javascript:\"]:after {\n content: \"\";\n }\n\n pre,\n blockquote {\n border: 1px solid #999;\n page-break-inside: avoid;\n }\n\n thead {\n display: table-header-group; // h5bp.com/t\n }\n\n tr,\n img {\n page-break-inside: avoid;\n }\n\n img {\n max-width: 100% !important;\n }\n\n p,\n h2,\n h3 {\n orphans: 3;\n widows: 3;\n }\n\n h2,\n h3 {\n page-break-after: avoid;\n }\n\n // Bootstrap specific changes start\n\n // Bootstrap components\n .navbar {\n display: none;\n }\n .btn,\n .dropup > .btn {\n > .caret {\n border-top-color: #000 !important;\n }\n }\n .label {\n border: 1px solid #000;\n }\n\n .table {\n border-collapse: collapse !important;\n\n td,\n th {\n background-color: #fff !important;\n }\n }\n .table-bordered {\n th,\n td {\n border: 1px solid #ddd !important;\n }\n }\n}\n","// stylelint-disable value-list-comma-newline-after, value-list-comma-space-after, indentation, declaration-colon-newline-after, font-family-no-missing-generic-family-keyword\n\n//\n// Glyphicons for Bootstrap\n//\n// Since icons are fonts, they can be placed anywhere text is placed and are\n// thus automatically sized to match the surrounding child. To use, create an\n// inline element with the appropriate classes, like so:\n//\n// Star\n\n// Import the fonts\n@font-face {\n font-family: \"Glyphicons Halflings\";\n src: url(\"@{icon-font-path}@{icon-font-name}.eot\");\n src: url(\"@{icon-font-path}@{icon-font-name}.eot?#iefix\") format(\"embedded-opentype\"),\n url(\"@{icon-font-path}@{icon-font-name}.woff2\") format(\"woff2\"),\n url(\"@{icon-font-path}@{icon-font-name}.woff\") format(\"woff\"),\n url(\"@{icon-font-path}@{icon-font-name}.ttf\") format(\"truetype\"),\n url(\"@{icon-font-path}@{icon-font-name}.svg#@{icon-font-svg-id}\") format(\"svg\");\n}\n\n// Catchall baseclass\n.glyphicon {\n position: relative;\n top: 1px;\n display: inline-block;\n font-family: \"Glyphicons Halflings\";\n font-style: normal;\n font-weight: 400;\n line-height: 1;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n\n// Individual icons\n.glyphicon-asterisk { &:before { content: \"\\002a\"; } }\n.glyphicon-plus { &:before { content: \"\\002b\"; } }\n.glyphicon-euro,\n.glyphicon-eur { &:before { content: \"\\20ac\"; } }\n.glyphicon-minus { &:before { content: \"\\2212\"; } }\n.glyphicon-cloud { &:before { content: \"\\2601\"; } }\n.glyphicon-envelope { &:before { content: \"\\2709\"; } }\n.glyphicon-pencil { &:before { content: \"\\270f\"; } }\n.glyphicon-glass { &:before { content: \"\\e001\"; } }\n.glyphicon-music { &:before { content: \"\\e002\"; } }\n.glyphicon-search { &:before { content: \"\\e003\"; } }\n.glyphicon-heart { &:before { content: \"\\e005\"; } }\n.glyphicon-star { &:before { content: \"\\e006\"; } }\n.glyphicon-star-empty { &:before { content: \"\\e007\"; } }\n.glyphicon-user { &:before { content: \"\\e008\"; } }\n.glyphicon-film { &:before { content: \"\\e009\"; } }\n.glyphicon-th-large { &:before { content: \"\\e010\"; } }\n.glyphicon-th { &:before { content: \"\\e011\"; } }\n.glyphicon-th-list { &:before { content: \"\\e012\"; } }\n.glyphicon-ok { &:before { content: \"\\e013\"; } }\n.glyphicon-remove { &:before { content: \"\\e014\"; } }\n.glyphicon-zoom-in { &:before { content: \"\\e015\"; } }\n.glyphicon-zoom-out { &:before { content: \"\\e016\"; } }\n.glyphicon-off { &:before { content: \"\\e017\"; } }\n.glyphicon-signal { &:before { content: \"\\e018\"; } }\n.glyphicon-cog { &:before { content: \"\\e019\"; } }\n.glyphicon-trash { &:before { content: \"\\e020\"; } }\n.glyphicon-home { &:before { content: \"\\e021\"; } }\n.glyphicon-file { &:before { content: \"\\e022\"; } }\n.glyphicon-time { &:before { content: \"\\e023\"; } }\n.glyphicon-road { &:before { content: \"\\e024\"; } }\n.glyphicon-download-alt { &:before { content: \"\\e025\"; } }\n.glyphicon-download { &:before { content: \"\\e026\"; } }\n.glyphicon-upload { &:before { content: \"\\e027\"; } }\n.glyphicon-inbox { &:before { content: \"\\e028\"; } }\n.glyphicon-play-circle { &:before { content: \"\\e029\"; } }\n.glyphicon-repeat { &:before { content: \"\\e030\"; } }\n.glyphicon-refresh { &:before { content: \"\\e031\"; } }\n.glyphicon-list-alt { &:before { content: \"\\e032\"; } }\n.glyphicon-lock { &:before { content: \"\\e033\"; } }\n.glyphicon-flag { &:before { content: \"\\e034\"; } }\n.glyphicon-headphones { &:before { content: \"\\e035\"; } }\n.glyphicon-volume-off { &:before { content: \"\\e036\"; } }\n.glyphicon-volume-down { &:before { content: \"\\e037\"; } }\n.glyphicon-volume-up { &:before { content: \"\\e038\"; } }\n.glyphicon-qrcode { &:before { content: \"\\e039\"; } }\n.glyphicon-barcode { &:before { content: \"\\e040\"; } }\n.glyphicon-tag { &:before { content: \"\\e041\"; } }\n.glyphicon-tags { &:before { content: \"\\e042\"; } }\n.glyphicon-book { &:before { content: \"\\e043\"; } }\n.glyphicon-bookmark { &:before { content: \"\\e044\"; } }\n.glyphicon-print { &:before { content: \"\\e045\"; } }\n.glyphicon-camera { &:before { content: \"\\e046\"; } }\n.glyphicon-font { &:before { content: \"\\e047\"; } }\n.glyphicon-bold { &:before { content: \"\\e048\"; } }\n.glyphicon-italic { &:before { content: \"\\e049\"; } }\n.glyphicon-text-height { &:before { content: \"\\e050\"; } }\n.glyphicon-text-width { &:before { content: \"\\e051\"; } }\n.glyphicon-align-left { &:before { content: \"\\e052\"; } }\n.glyphicon-align-center { &:before { content: \"\\e053\"; } }\n.glyphicon-align-right { &:before { content: \"\\e054\"; } }\n.glyphicon-align-justify { &:before { content: \"\\e055\"; } }\n.glyphicon-list { &:before { content: \"\\e056\"; } }\n.glyphicon-indent-left { &:before { content: \"\\e057\"; } }\n.glyphicon-indent-right { &:before { content: \"\\e058\"; } }\n.glyphicon-facetime-video { &:before { content: \"\\e059\"; } }\n.glyphicon-picture { &:before { content: \"\\e060\"; } }\n.glyphicon-map-marker { &:before { content: \"\\e062\"; } }\n.glyphicon-adjust { &:before { content: \"\\e063\"; } }\n.glyphicon-tint { &:before { content: \"\\e064\"; } }\n.glyphicon-edit { &:before { content: \"\\e065\"; } }\n.glyphicon-share { &:before { content: \"\\e066\"; } }\n.glyphicon-check { &:before { content: \"\\e067\"; } }\n.glyphicon-move { &:before { content: \"\\e068\"; } }\n.glyphicon-step-backward { &:before { content: \"\\e069\"; } }\n.glyphicon-fast-backward { &:before { content: \"\\e070\"; } }\n.glyphicon-backward { &:before { content: \"\\e071\"; } }\n.glyphicon-play { &:before { content: \"\\e072\"; } }\n.glyphicon-pause { &:before { content: \"\\e073\"; } }\n.glyphicon-stop { &:before { content: \"\\e074\"; } }\n.glyphicon-forward { &:before { content: \"\\e075\"; } }\n.glyphicon-fast-forward { &:before { content: \"\\e076\"; } }\n.glyphicon-step-forward { &:before { content: \"\\e077\"; } }\n.glyphicon-eject { &:before { content: \"\\e078\"; } }\n.glyphicon-chevron-left { &:before { content: \"\\e079\"; } }\n.glyphicon-chevron-right { &:before { content: \"\\e080\"; } }\n.glyphicon-plus-sign { &:before { content: \"\\e081\"; } }\n.glyphicon-minus-sign { &:before { content: \"\\e082\"; } }\n.glyphicon-remove-sign { &:before { content: \"\\e083\"; } }\n.glyphicon-ok-sign { &:before { content: \"\\e084\"; } }\n.glyphicon-question-sign { &:before { content: \"\\e085\"; } }\n.glyphicon-info-sign { &:before { content: \"\\e086\"; } }\n.glyphicon-screenshot { &:before { content: \"\\e087\"; } }\n.glyphicon-remove-circle { &:before { content: \"\\e088\"; } }\n.glyphicon-ok-circle { &:before { content: \"\\e089\"; } }\n.glyphicon-ban-circle { &:before { content: \"\\e090\"; } }\n.glyphicon-arrow-left { &:before { content: \"\\e091\"; } }\n.glyphicon-arrow-right { &:before { content: \"\\e092\"; } }\n.glyphicon-arrow-up { &:before { content: \"\\e093\"; } }\n.glyphicon-arrow-down { &:before { content: \"\\e094\"; } }\n.glyphicon-share-alt { &:before { content: \"\\e095\"; } }\n.glyphicon-resize-full { &:before { content: \"\\e096\"; } }\n.glyphicon-resize-small { &:before { content: \"\\e097\"; } }\n.glyphicon-exclamation-sign { &:before { content: \"\\e101\"; } }\n.glyphicon-gift { &:before { content: \"\\e102\"; } }\n.glyphicon-leaf { &:before { content: \"\\e103\"; } }\n.glyphicon-fire { &:before { content: \"\\e104\"; } }\n.glyphicon-eye-open { &:before { content: \"\\e105\"; } }\n.glyphicon-eye-close { &:before { content: \"\\e106\"; } }\n.glyphicon-warning-sign { &:before { content: \"\\e107\"; } }\n.glyphicon-plane { &:before { content: \"\\e108\"; } }\n.glyphicon-calendar { &:before { content: \"\\e109\"; } }\n.glyphicon-random { &:before { content: \"\\e110\"; } }\n.glyphicon-comment { &:before { content: \"\\e111\"; } }\n.glyphicon-magnet { &:before { content: \"\\e112\"; } }\n.glyphicon-chevron-up { &:before { content: \"\\e113\"; } }\n.glyphicon-chevron-down { &:before { content: \"\\e114\"; } }\n.glyphicon-retweet { &:before { content: \"\\e115\"; } }\n.glyphicon-shopping-cart { &:before { content: \"\\e116\"; } }\n.glyphicon-folder-close { &:before { content: \"\\e117\"; } }\n.glyphicon-folder-open { &:before { content: \"\\e118\"; } }\n.glyphicon-resize-vertical { &:before { content: \"\\e119\"; } }\n.glyphicon-resize-horizontal { &:before { content: \"\\e120\"; } }\n.glyphicon-hdd { &:before { content: \"\\e121\"; } }\n.glyphicon-bullhorn { &:before { content: \"\\e122\"; } }\n.glyphicon-bell { &:before { content: \"\\e123\"; } }\n.glyphicon-certificate { &:before { content: \"\\e124\"; } }\n.glyphicon-thumbs-up { &:before { content: \"\\e125\"; } }\n.glyphicon-thumbs-down { &:before { content: \"\\e126\"; } }\n.glyphicon-hand-right { &:before { content: \"\\e127\"; } }\n.glyphicon-hand-left { &:before { content: \"\\e128\"; } }\n.glyphicon-hand-up { &:before { content: \"\\e129\"; } }\n.glyphicon-hand-down { &:before { content: \"\\e130\"; } }\n.glyphicon-circle-arrow-right { &:before { content: \"\\e131\"; } }\n.glyphicon-circle-arrow-left { &:before { content: \"\\e132\"; } }\n.glyphicon-circle-arrow-up { &:before { content: \"\\e133\"; } }\n.glyphicon-circle-arrow-down { &:before { content: \"\\e134\"; } }\n.glyphicon-globe { &:before { content: \"\\e135\"; } }\n.glyphicon-wrench { &:before { content: \"\\e136\"; } }\n.glyphicon-tasks { &:before { content: \"\\e137\"; } }\n.glyphicon-filter { &:before { content: \"\\e138\"; } }\n.glyphicon-briefcase { &:before { content: \"\\e139\"; } }\n.glyphicon-fullscreen { &:before { content: \"\\e140\"; } }\n.glyphicon-dashboard { &:before { content: \"\\e141\"; } }\n.glyphicon-paperclip { &:before { content: \"\\e142\"; } }\n.glyphicon-heart-empty { &:before { content: \"\\e143\"; } }\n.glyphicon-link { &:before { content: \"\\e144\"; } }\n.glyphicon-phone { &:before { content: \"\\e145\"; } }\n.glyphicon-pushpin { &:before { content: \"\\e146\"; } }\n.glyphicon-usd { &:before { content: \"\\e148\"; } }\n.glyphicon-gbp { &:before { content: \"\\e149\"; } }\n.glyphicon-sort { &:before { content: \"\\e150\"; } }\n.glyphicon-sort-by-alphabet { &:before { content: \"\\e151\"; } }\n.glyphicon-sort-by-alphabet-alt { &:before { content: \"\\e152\"; } }\n.glyphicon-sort-by-order { &:before { content: \"\\e153\"; } }\n.glyphicon-sort-by-order-alt { &:before { content: \"\\e154\"; } }\n.glyphicon-sort-by-attributes { &:before { content: \"\\e155\"; } }\n.glyphicon-sort-by-attributes-alt { &:before { content: \"\\e156\"; } }\n.glyphicon-unchecked { &:before { content: \"\\e157\"; } }\n.glyphicon-expand { &:before { content: \"\\e158\"; } }\n.glyphicon-collapse-down { &:before { content: \"\\e159\"; } }\n.glyphicon-collapse-up { &:before { content: \"\\e160\"; } }\n.glyphicon-log-in { &:before { content: \"\\e161\"; } }\n.glyphicon-flash { &:before { content: \"\\e162\"; } }\n.glyphicon-log-out { &:before { content: \"\\e163\"; } }\n.glyphicon-new-window { &:before { content: \"\\e164\"; } }\n.glyphicon-record { &:before { content: \"\\e165\"; } }\n.glyphicon-save { &:before { content: \"\\e166\"; } }\n.glyphicon-open { &:before { content: \"\\e167\"; } }\n.glyphicon-saved { &:before { content: \"\\e168\"; } }\n.glyphicon-import { &:before { content: \"\\e169\"; } }\n.glyphicon-export { &:before { content: \"\\e170\"; } }\n.glyphicon-send { &:before { content: \"\\e171\"; } }\n.glyphicon-floppy-disk { &:before { content: \"\\e172\"; } }\n.glyphicon-floppy-saved { &:before { content: \"\\e173\"; } }\n.glyphicon-floppy-remove { &:before { content: \"\\e174\"; } }\n.glyphicon-floppy-save { &:before { content: \"\\e175\"; } }\n.glyphicon-floppy-open { &:before { content: \"\\e176\"; } }\n.glyphicon-credit-card { &:before { content: \"\\e177\"; } }\n.glyphicon-transfer { &:before { content: \"\\e178\"; } }\n.glyphicon-cutlery { &:before { content: \"\\e179\"; } }\n.glyphicon-header { &:before { content: \"\\e180\"; } }\n.glyphicon-compressed { &:before { content: \"\\e181\"; } }\n.glyphicon-earphone { &:before { content: \"\\e182\"; } }\n.glyphicon-phone-alt { &:before { content: \"\\e183\"; } }\n.glyphicon-tower { &:before { content: \"\\e184\"; } }\n.glyphicon-stats { &:before { content: \"\\e185\"; } }\n.glyphicon-sd-video { &:before { content: \"\\e186\"; } }\n.glyphicon-hd-video { &:before { content: \"\\e187\"; } }\n.glyphicon-subtitles { &:before { content: \"\\e188\"; } }\n.glyphicon-sound-stereo { &:before { content: \"\\e189\"; } }\n.glyphicon-sound-dolby { &:before { content: \"\\e190\"; } }\n.glyphicon-sound-5-1 { &:before { content: \"\\e191\"; } }\n.glyphicon-sound-6-1 { &:before { content: \"\\e192\"; } }\n.glyphicon-sound-7-1 { &:before { content: \"\\e193\"; } }\n.glyphicon-copyright-mark { &:before { content: \"\\e194\"; } }\n.glyphicon-registration-mark { &:before { content: \"\\e195\"; } }\n.glyphicon-cloud-download { &:before { content: \"\\e197\"; } }\n.glyphicon-cloud-upload { &:before { content: \"\\e198\"; } }\n.glyphicon-tree-conifer { &:before { content: \"\\e199\"; } }\n.glyphicon-tree-deciduous { &:before { content: \"\\e200\"; } }\n.glyphicon-cd { &:before { content: \"\\e201\"; } }\n.glyphicon-save-file { &:before { content: \"\\e202\"; } }\n.glyphicon-open-file { &:before { content: \"\\e203\"; } }\n.glyphicon-level-up { &:before { content: \"\\e204\"; } }\n.glyphicon-copy { &:before { content: \"\\e205\"; } }\n.glyphicon-paste { &:before { content: \"\\e206\"; } }\n// The following 2 Glyphicons are omitted for the time being because\n// they currently use Unicode codepoints that are outside the\n// Basic Multilingual Plane (BMP). Older buggy versions of WebKit can't handle\n// non-BMP codepoints in CSS string escapes, and thus can't display these two icons.\n// Notably, the bug affects some older versions of the Android Browser.\n// More info: https://github.com/twbs/bootstrap/issues/10106\n// .glyphicon-door { &:before { content: \"\\1f6aa\"; } }\n// .glyphicon-key { &:before { content: \"\\1f511\"; } }\n.glyphicon-alert { &:before { content: \"\\e209\"; } }\n.glyphicon-equalizer { &:before { content: \"\\e210\"; } }\n.glyphicon-king { &:before { content: \"\\e211\"; } }\n.glyphicon-queen { &:before { content: \"\\e212\"; } }\n.glyphicon-pawn { &:before { content: \"\\e213\"; } }\n.glyphicon-bishop { &:before { content: \"\\e214\"; } }\n.glyphicon-knight { &:before { content: \"\\e215\"; } }\n.glyphicon-baby-formula { &:before { content: \"\\e216\"; } }\n.glyphicon-tent { &:before { content: \"\\26fa\"; } }\n.glyphicon-blackboard { &:before { content: \"\\e218\"; } }\n.glyphicon-bed { &:before { content: \"\\e219\"; } }\n.glyphicon-apple { &:before { content: \"\\f8ff\"; } }\n.glyphicon-erase { &:before { content: \"\\e221\"; } }\n.glyphicon-hourglass { &:before { content: \"\\231b\"; } }\n.glyphicon-lamp { &:before { content: \"\\e223\"; } }\n.glyphicon-duplicate { &:before { content: \"\\e224\"; } }\n.glyphicon-piggy-bank { &:before { content: \"\\e225\"; } }\n.glyphicon-scissors { &:before { content: \"\\e226\"; } }\n.glyphicon-bitcoin { &:before { content: \"\\e227\"; } }\n.glyphicon-btc { &:before { content: \"\\e227\"; } }\n.glyphicon-xbt { &:before { content: \"\\e227\"; } }\n.glyphicon-yen { &:before { content: \"\\00a5\"; } }\n.glyphicon-jpy { &:before { content: \"\\00a5\"; } }\n.glyphicon-ruble { &:before { content: \"\\20bd\"; } }\n.glyphicon-rub { &:before { content: \"\\20bd\"; } }\n.glyphicon-scale { &:before { content: \"\\e230\"; } }\n.glyphicon-ice-lolly { &:before { content: \"\\e231\"; } }\n.glyphicon-ice-lolly-tasted { &:before { content: \"\\e232\"; } }\n.glyphicon-education { &:before { content: \"\\e233\"; } }\n.glyphicon-option-horizontal { &:before { content: \"\\e234\"; } }\n.glyphicon-option-vertical { &:before { content: \"\\e235\"; } }\n.glyphicon-menu-hamburger { &:before { content: \"\\e236\"; } }\n.glyphicon-modal-window { &:before { content: \"\\e237\"; } }\n.glyphicon-oil { &:before { content: \"\\e238\"; } }\n.glyphicon-grain { &:before { content: \"\\e239\"; } }\n.glyphicon-sunglasses { &:before { content: \"\\e240\"; } }\n.glyphicon-text-size { &:before { content: \"\\e241\"; } }\n.glyphicon-text-color { &:before { content: \"\\e242\"; } }\n.glyphicon-text-background { &:before { content: \"\\e243\"; } }\n.glyphicon-object-align-top { &:before { content: \"\\e244\"; } }\n.glyphicon-object-align-bottom { &:before { content: \"\\e245\"; } }\n.glyphicon-object-align-horizontal{ &:before { content: \"\\e246\"; } }\n.glyphicon-object-align-left { &:before { content: \"\\e247\"; } }\n.glyphicon-object-align-vertical { &:before { content: \"\\e248\"; } }\n.glyphicon-object-align-right { &:before { content: \"\\e249\"; } }\n.glyphicon-triangle-right { &:before { content: \"\\e250\"; } }\n.glyphicon-triangle-left { &:before { content: \"\\e251\"; } }\n.glyphicon-triangle-bottom { &:before { content: \"\\e252\"; } }\n.glyphicon-triangle-top { &:before { content: \"\\e253\"; } }\n.glyphicon-console { &:before { content: \"\\e254\"; } }\n.glyphicon-superscript { &:before { content: \"\\e255\"; } }\n.glyphicon-subscript { &:before { content: \"\\e256\"; } }\n.glyphicon-menu-left { &:before { content: \"\\e257\"; } }\n.glyphicon-menu-right { &:before { content: \"\\e258\"; } }\n.glyphicon-menu-down { &:before { content: \"\\e259\"; } }\n.glyphicon-menu-up { &:before { content: \"\\e260\"; } }\n","//\n// Scaffolding\n// --------------------------------------------------\n\n\n// Reset the box-sizing\n//\n// Heads up! This reset may cause conflicts with some third-party widgets.\n// For recommendations on resolving such conflicts, see\n// https://getbootstrap.com/docs/3.4/getting-started/#third-box-sizing\n* {\n .box-sizing(border-box);\n}\n*:before,\n*:after {\n .box-sizing(border-box);\n}\n\n\n// Body reset\n\nhtml {\n font-size: 10px;\n -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\n\nbody {\n font-family: @font-family-base;\n font-size: @font-size-base;\n line-height: @line-height-base;\n color: @text-color;\n background-color: @body-bg;\n}\n\n// Reset fonts for relevant elements\ninput,\nbutton,\nselect,\ntextarea {\n font-family: inherit;\n font-size: inherit;\n line-height: inherit;\n}\n\n\n// Links\n\na {\n color: @link-color;\n text-decoration: none;\n\n &:hover,\n &:focus {\n color: @link-hover-color;\n text-decoration: @link-hover-decoration;\n }\n\n &:focus {\n .tab-focus();\n }\n}\n\n\n// Figures\n//\n// We reset this here because previously Normalize had no `figure` margins. This\n// ensures we don't break anyone's use of the element.\n\nfigure {\n margin: 0;\n}\n\n\n// Images\n\nimg {\n vertical-align: middle;\n}\n\n// Responsive images (ensure images don't scale beyond their parents)\n.img-responsive {\n .img-responsive();\n}\n\n// Rounded corners\n.img-rounded {\n border-radius: @border-radius-large;\n}\n\n// Image thumbnails\n//\n// Heads up! This is mixin-ed into thumbnails.less for `.thumbnail`.\n.img-thumbnail {\n padding: @thumbnail-padding;\n line-height: @line-height-base;\n background-color: @thumbnail-bg;\n border: 1px solid @thumbnail-border;\n border-radius: @thumbnail-border-radius;\n .transition(all .2s ease-in-out);\n\n // Keep them at most 100% wide\n .img-responsive(inline-block);\n}\n\n// Perfect circle\n.img-circle {\n border-radius: 50%; // set radius in percents\n}\n\n\n// Horizontal rules\n\nhr {\n margin-top: @line-height-computed;\n margin-bottom: @line-height-computed;\n border: 0;\n border-top: 1px solid @hr-border;\n}\n\n\n// Only display content to screen readers\n//\n// See: https://a11yproject.com/posts/how-to-hide-content\n\n.sr-only {\n position: absolute;\n width: 1px;\n height: 1px;\n padding: 0;\n margin: -1px;\n overflow: hidden;\n clip: rect(0, 0, 0, 0);\n border: 0;\n}\n\n// Use in conjunction with .sr-only to only display content when it's focused.\n// Useful for \"Skip to main content\" links; see https://www.w3.org/TR/2013/NOTE-WCAG20-TECHS-20130905/G1\n// Credit: HTML5 Boilerplate\n\n.sr-only-focusable {\n &:active,\n &:focus {\n position: static;\n width: auto;\n height: auto;\n margin: 0;\n overflow: visible;\n clip: auto;\n }\n}\n\n\n// iOS \"clickable elements\" fix for role=\"button\"\n//\n// Fixes \"clickability\" issue (and more generally, the firing of events such as focus as well)\n// for traditionally non-focusable elements with role=\"button\"\n// see https://developer.mozilla.org/en-US/docs/Web/Events/click#Safari_Mobile\n\n[role=\"button\"] {\n cursor: pointer;\n}\n","// stylelint-disable indentation, property-no-vendor-prefix, selector-no-vendor-prefix\n\n// Vendor Prefixes\n//\n// All vendor mixins are deprecated as of v3.2.0 due to the introduction of\n// Autoprefixer in our Gruntfile. They have been removed in v4.\n\n// - Animations\n// - Backface visibility\n// - Box shadow\n// - Box sizing\n// - Content columns\n// - Hyphens\n// - Placeholder text\n// - Transformations\n// - Transitions\n// - User Select\n\n\n// Animations\n.animation(@animation) {\n -webkit-animation: @animation;\n -o-animation: @animation;\n animation: @animation;\n}\n.animation-name(@name) {\n -webkit-animation-name: @name;\n animation-name: @name;\n}\n.animation-duration(@duration) {\n -webkit-animation-duration: @duration;\n animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n -webkit-animation-timing-function: @timing-function;\n animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n -webkit-animation-delay: @delay;\n animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n -webkit-animation-iteration-count: @iteration-count;\n animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n -webkit-animation-direction: @direction;\n animation-direction: @direction;\n}\n.animation-fill-mode(@fill-mode) {\n -webkit-animation-fill-mode: @fill-mode;\n animation-fill-mode: @fill-mode;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n\n.backface-visibility(@visibility) {\n -webkit-backface-visibility: @visibility;\n -moz-backface-visibility: @visibility;\n backface-visibility: @visibility;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n// supported browsers that have box shadow capabilities now support it.\n\n.box-shadow(@shadow) {\n -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n box-shadow: @shadow;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n -webkit-box-sizing: @boxmodel;\n -moz-box-sizing: @boxmodel;\n box-sizing: @boxmodel;\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n -webkit-column-count: @column-count;\n -moz-column-count: @column-count;\n column-count: @column-count;\n -webkit-column-gap: @column-gap;\n -moz-column-gap: @column-gap;\n column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n -webkit-hyphens: @mode;\n -moz-hyphens: @mode;\n -ms-hyphens: @mode; // IE10+\n -o-hyphens: @mode;\n hyphens: @mode;\n word-wrap: break-word;\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n // Firefox\n &::-moz-placeholder {\n color: @color;\n opacity: 1; // Override Firefox's unusual default opacity; see https://github.com/twbs/bootstrap/pull/11526\n }\n &:-ms-input-placeholder { color: @color; } // Internet Explorer 10+\n &::-webkit-input-placeholder { color: @color; } // Safari and Chrome\n}\n\n// Transformations\n.scale(@ratio) {\n -webkit-transform: scale(@ratio);\n -ms-transform: scale(@ratio); // IE9 only\n -o-transform: scale(@ratio);\n transform: scale(@ratio);\n}\n.scale(@ratioX; @ratioY) {\n -webkit-transform: scale(@ratioX, @ratioY);\n -ms-transform: scale(@ratioX, @ratioY); // IE9 only\n -o-transform: scale(@ratioX, @ratioY);\n transform: scale(@ratioX, @ratioY);\n}\n.scaleX(@ratio) {\n -webkit-transform: scaleX(@ratio);\n -ms-transform: scaleX(@ratio); // IE9 only\n -o-transform: scaleX(@ratio);\n transform: scaleX(@ratio);\n}\n.scaleY(@ratio) {\n -webkit-transform: scaleY(@ratio);\n -ms-transform: scaleY(@ratio); // IE9 only\n -o-transform: scaleY(@ratio);\n transform: scaleY(@ratio);\n}\n.skew(@x; @y) {\n -webkit-transform: skewX(@x) skewY(@y);\n -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n -o-transform: skewX(@x) skewY(@y);\n transform: skewX(@x) skewY(@y);\n}\n.translate(@x; @y) {\n -webkit-transform: translate(@x, @y);\n -ms-transform: translate(@x, @y); // IE9 only\n -o-transform: translate(@x, @y);\n transform: translate(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n -webkit-transform: translate3d(@x, @y, @z);\n transform: translate3d(@x, @y, @z);\n}\n.rotate(@degrees) {\n -webkit-transform: rotate(@degrees);\n -ms-transform: rotate(@degrees); // IE9 only\n -o-transform: rotate(@degrees);\n transform: rotate(@degrees);\n}\n.rotateX(@degrees) {\n -webkit-transform: rotateX(@degrees);\n -ms-transform: rotateX(@degrees); // IE9 only\n -o-transform: rotateX(@degrees);\n transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n -webkit-transform: rotateY(@degrees);\n -ms-transform: rotateY(@degrees); // IE9 only\n -o-transform: rotateY(@degrees);\n transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n -webkit-perspective: @perspective;\n -moz-perspective: @perspective;\n perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n -webkit-perspective-origin: @perspective;\n -moz-perspective-origin: @perspective;\n perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n -webkit-transform-origin: @origin;\n -moz-transform-origin: @origin;\n -ms-transform-origin: @origin; // IE9 only\n transform-origin: @origin;\n}\n\n\n// Transitions\n\n.transition(@transition) {\n -webkit-transition: @transition;\n -o-transition: @transition;\n transition: @transition;\n}\n.transition-property(@transition-property) {\n -webkit-transition-property: @transition-property;\n transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n -webkit-transition-delay: @transition-delay;\n transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n -webkit-transition-duration: @transition-duration;\n transition-duration: @transition-duration;\n}\n.transition-timing-function(@timing-function) {\n -webkit-transition-timing-function: @timing-function;\n transition-timing-function: @timing-function;\n}\n.transition-transform(@transition) {\n -webkit-transition: -webkit-transform @transition;\n -moz-transition: -moz-transform @transition;\n -o-transition: -o-transform @transition;\n transition: transform @transition;\n}\n\n\n// User select\n// For selecting text on the page\n\n.user-select(@select) {\n -webkit-user-select: @select;\n -moz-user-select: @select;\n -ms-user-select: @select; // IE10+\n user-select: @select;\n}\n","// WebKit-style focus\n\n.tab-focus() {\n // WebKit-specific. Other browsers will keep their default outline style.\n // (Initially tried to also force default via `outline: initial`,\n // but that seems to erroneously remove the outline in Firefox altogether.)\n outline: 5px auto -webkit-focus-ring-color;\n outline-offset: -2px;\n}\n","// stylelint-disable media-feature-name-no-vendor-prefix, media-feature-parentheses-space-inside, media-feature-name-no-unknown, indentation, at-rule-name-space-after\n\n// Responsive image\n//\n// Keep images from scaling beyond the width of their parents.\n.img-responsive(@display: block) {\n display: @display;\n max-width: 100%; // Part 1: Set a maximum relative to the parent\n height: auto; // Part 2: Scale the height according to the width, otherwise you get stretching\n}\n\n\n// Retina image\n//\n// Short retina mixin for setting background-image and -size. Note that the\n// spelling of `min--moz-device-pixel-ratio` is intentional.\n.img-retina(@file-1x; @file-2x; @width-1x; @height-1x) {\n background-image: url(\"@{file-1x}\");\n\n @media\n only screen and (-webkit-min-device-pixel-ratio: 2),\n only screen and ( min--moz-device-pixel-ratio: 2),\n only screen and ( -o-min-device-pixel-ratio: 2/1),\n only screen and ( min-device-pixel-ratio: 2),\n only screen and ( min-resolution: 192dpi),\n only screen and ( min-resolution: 2dppx) {\n background-image: url(\"@{file-2x}\");\n background-size: @width-1x @height-1x;\n }\n}\n","// stylelint-disable selector-list-comma-newline-after, selector-no-qualifying-type\n\n//\n// Typography\n// --------------------------------------------------\n\n\n// Headings\n// -------------------------\n\nh1, h2, h3, h4, h5, h6,\n.h1, .h2, .h3, .h4, .h5, .h6 {\n font-family: @headings-font-family;\n font-weight: @headings-font-weight;\n line-height: @headings-line-height;\n color: @headings-color;\n\n small,\n .small {\n font-weight: 400;\n line-height: 1;\n color: @headings-small-color;\n }\n}\n\nh1, .h1,\nh2, .h2,\nh3, .h3 {\n margin-top: @line-height-computed;\n margin-bottom: (@line-height-computed / 2);\n\n small,\n .small {\n font-size: 65%;\n }\n}\nh4, .h4,\nh5, .h5,\nh6, .h6 {\n margin-top: (@line-height-computed / 2);\n margin-bottom: (@line-height-computed / 2);\n\n small,\n .small {\n font-size: 75%;\n }\n}\n\nh1, .h1 { font-size: @font-size-h1; }\nh2, .h2 { font-size: @font-size-h2; }\nh3, .h3 { font-size: @font-size-h3; }\nh4, .h4 { font-size: @font-size-h4; }\nh5, .h5 { font-size: @font-size-h5; }\nh6, .h6 { font-size: @font-size-h6; }\n\n\n// Body text\n// -------------------------\n\np {\n margin: 0 0 (@line-height-computed / 2);\n}\n\n.lead {\n margin-bottom: @line-height-computed;\n font-size: floor((@font-size-base * 1.15));\n font-weight: 300;\n line-height: 1.4;\n\n @media (min-width: @screen-sm-min) {\n font-size: (@font-size-base * 1.5);\n }\n}\n\n\n// Emphasis & misc\n// -------------------------\n\n// Ex: (12px small font / 14px base font) * 100% = about 85%\nsmall,\n.small {\n font-size: floor((100% * @font-size-small / @font-size-base));\n}\n\nmark,\n.mark {\n padding: .2em;\n background-color: @state-warning-bg;\n}\n\n// Alignment\n.text-left { text-align: left; }\n.text-right { text-align: right; }\n.text-center { text-align: center; }\n.text-justify { text-align: justify; }\n.text-nowrap { white-space: nowrap; }\n\n// Transformation\n.text-lowercase { text-transform: lowercase; }\n.text-uppercase { text-transform: uppercase; }\n.text-capitalize { text-transform: capitalize; }\n\n// Contextual colors\n.text-muted {\n color: @text-muted;\n}\n.text-primary {\n .text-emphasis-variant(@brand-primary);\n}\n.text-success {\n .text-emphasis-variant(@state-success-text);\n}\n.text-info {\n .text-emphasis-variant(@state-info-text);\n}\n.text-warning {\n .text-emphasis-variant(@state-warning-text);\n}\n.text-danger {\n .text-emphasis-variant(@state-danger-text);\n}\n\n// Contextual backgrounds\n// For now we'll leave these alongside the text classes until v4 when we can\n// safely shift things around (per SemVer rules).\n.bg-primary {\n // Given the contrast here, this is the only class to have its color inverted\n // automatically.\n color: #fff;\n .bg-variant(@brand-primary);\n}\n.bg-success {\n .bg-variant(@state-success-bg);\n}\n.bg-info {\n .bg-variant(@state-info-bg);\n}\n.bg-warning {\n .bg-variant(@state-warning-bg);\n}\n.bg-danger {\n .bg-variant(@state-danger-bg);\n}\n\n\n// Page header\n// -------------------------\n\n.page-header {\n padding-bottom: ((@line-height-computed / 2) - 1);\n margin: (@line-height-computed * 2) 0 @line-height-computed;\n border-bottom: 1px solid @page-header-border-color;\n}\n\n\n// Lists\n// -------------------------\n\n// Unordered and Ordered lists\nul,\nol {\n margin-top: 0;\n margin-bottom: (@line-height-computed / 2);\n ul,\n ol {\n margin-bottom: 0;\n }\n}\n\n// List options\n\n// Unstyled keeps list items block level, just removes default browser padding and list-style\n.list-unstyled {\n padding-left: 0;\n list-style: none;\n}\n\n// Inline turns list items into inline-block\n.list-inline {\n .list-unstyled();\n margin-left: -5px;\n\n > li {\n display: inline-block;\n padding-right: 5px;\n padding-left: 5px;\n }\n}\n\n// Description Lists\ndl {\n margin-top: 0; // Remove browser default\n margin-bottom: @line-height-computed;\n}\ndt,\ndd {\n line-height: @line-height-base;\n}\ndt {\n font-weight: 700;\n}\ndd {\n margin-left: 0; // Undo browser default\n}\n\n// Horizontal description lists\n//\n// Defaults to being stacked without any of the below styles applied, until the\n// grid breakpoint is reached (default of ~768px).\n\n.dl-horizontal {\n dd {\n &:extend(.clearfix all); // Clear the floated `dt` if an empty `dd` is present\n }\n\n @media (min-width: @dl-horizontal-breakpoint) {\n dt {\n float: left;\n width: (@dl-horizontal-offset - 20);\n clear: left;\n text-align: right;\n .text-overflow();\n }\n dd {\n margin-left: @dl-horizontal-offset;\n }\n }\n}\n\n\n// Misc\n// -------------------------\n\n// Abbreviations and acronyms\n// Add data-* attribute to help out our tooltip plugin, per https://github.com/twbs/bootstrap/issues/5257\nabbr[title],\nabbr[data-original-title] {\n cursor: help;\n}\n\n.initialism {\n font-size: 90%;\n .text-uppercase();\n}\n\n// Blockquotes\nblockquote {\n padding: (@line-height-computed / 2) @line-height-computed;\n margin: 0 0 @line-height-computed;\n font-size: @blockquote-font-size;\n border-left: 5px solid @blockquote-border-color;\n\n p,\n ul,\n ol {\n &:last-child {\n margin-bottom: 0;\n }\n }\n\n // Note: Deprecated small and .small as of v3.1.0\n // Context: https://github.com/twbs/bootstrap/issues/11660\n footer,\n small,\n .small {\n display: block;\n font-size: 80%; // back to default font-size\n line-height: @line-height-base;\n color: @blockquote-small-color;\n\n &:before {\n content: \"\\2014 \\00A0\"; // em dash, nbsp\n }\n }\n}\n\n// Opposite alignment of blockquote\n//\n// Heads up: `blockquote.pull-right` has been deprecated as of v3.1.0.\n.blockquote-reverse,\nblockquote.pull-right {\n padding-right: 15px;\n padding-left: 0;\n text-align: right;\n border-right: 5px solid @blockquote-border-color;\n border-left: 0;\n\n // Account for citation\n footer,\n small,\n .small {\n &:before { content: \"\"; }\n &:after {\n content: \"\\00A0 \\2014\"; // nbsp, em dash\n }\n }\n}\n\n// Addresses\naddress {\n margin-bottom: @line-height-computed;\n font-style: normal;\n line-height: @line-height-base;\n}\n","// Typography\n\n.text-emphasis-variant(@color) {\n color: @color;\n a&:hover,\n a&:focus {\n color: darken(@color, 10%);\n }\n}\n","// Contextual backgrounds\n\n.bg-variant(@color) {\n background-color: @color;\n a&:hover,\n a&:focus {\n background-color: darken(@color, 10%);\n }\n}\n","// Text overflow\n// Requires inline-block or block for proper styling\n\n.text-overflow() {\n overflow: hidden;\n text-overflow: ellipsis;\n white-space: nowrap;\n}\n","//\n// Code (inline and block)\n// --------------------------------------------------\n\n\n// Inline and block code styles\ncode,\nkbd,\npre,\nsamp {\n font-family: @font-family-monospace;\n}\n\n// Inline code\ncode {\n padding: 2px 4px;\n font-size: 90%;\n color: @code-color;\n background-color: @code-bg;\n border-radius: @border-radius-base;\n}\n\n// User input typically entered via keyboard\nkbd {\n padding: 2px 4px;\n font-size: 90%;\n color: @kbd-color;\n background-color: @kbd-bg;\n border-radius: @border-radius-small;\n box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);\n\n kbd {\n padding: 0;\n font-size: 100%;\n font-weight: 700;\n box-shadow: none;\n }\n}\n\n// Blocks of code\npre {\n display: block;\n padding: ((@line-height-computed - 1) / 2);\n margin: 0 0 (@line-height-computed / 2);\n font-size: (@font-size-base - 1); // 14px to 13px\n line-height: @line-height-base;\n color: @pre-color;\n word-break: break-all;\n word-wrap: break-word;\n background-color: @pre-bg;\n border: 1px solid @pre-border-color;\n border-radius: @border-radius-base;\n\n // Account for some code outputs that place code tags in pre tags\n code {\n padding: 0;\n font-size: inherit;\n color: inherit;\n white-space: pre-wrap;\n background-color: transparent;\n border-radius: 0;\n }\n}\n\n// Enable scrollable blocks of code\n.pre-scrollable {\n max-height: @pre-scrollable-max-height;\n overflow-y: scroll;\n}\n","//\n// Grid system\n// --------------------------------------------------\n\n\n// Container widths\n//\n// Set the container width, and override it for fixed navbars in media queries.\n\n.container {\n .container-fixed();\n\n @media (min-width: @screen-sm-min) {\n width: @container-sm;\n }\n @media (min-width: @screen-md-min) {\n width: @container-md;\n }\n @media (min-width: @screen-lg-min) {\n width: @container-lg;\n }\n}\n\n\n// Fluid container\n//\n// Utilizes the mixin meant for fixed width containers, but without any defined\n// width for fluid, full width layouts.\n\n.container-fluid {\n .container-fixed();\n}\n\n\n// Row\n//\n// Rows contain and clear the floats of your columns.\n\n.row {\n .make-row();\n}\n\n.row-no-gutters {\n margin-right: 0;\n margin-left: 0;\n\n [class*=\"col-\"] {\n padding-right: 0;\n padding-left: 0;\n }\n}\n\n\n// Columns\n//\n// Common styles for small and large grid columns\n\n.make-grid-columns();\n\n\n// Extra small grid\n//\n// Columns, offsets, pushes, and pulls for extra small devices like\n// smartphones.\n\n.make-grid(xs);\n\n\n// Small grid\n//\n// Columns, offsets, pushes, and pulls for the small device range, from phones\n// to tablets.\n\n@media (min-width: @screen-sm-min) {\n .make-grid(sm);\n}\n\n\n// Medium grid\n//\n// Columns, offsets, pushes, and pulls for the desktop device range.\n\n@media (min-width: @screen-md-min) {\n .make-grid(md);\n}\n\n\n// Large grid\n//\n// Columns, offsets, pushes, and pulls for the large desktop device range.\n\n@media (min-width: @screen-lg-min) {\n .make-grid(lg);\n}\n","// Grid system\n//\n// Generate semantic grid columns with these mixins.\n\n// Centered container element\n.container-fixed(@gutter: @grid-gutter-width) {\n padding-right: ceil((@gutter / 2));\n padding-left: floor((@gutter / 2));\n margin-right: auto;\n margin-left: auto;\n &:extend(.clearfix all);\n}\n\n// Creates a wrapper for a series of columns\n.make-row(@gutter: @grid-gutter-width) {\n margin-right: floor((@gutter / -2));\n margin-left: ceil((@gutter / -2));\n &:extend(.clearfix all);\n}\n\n// Generate the extra small columns\n.make-xs-column(@columns; @gutter: @grid-gutter-width) {\n position: relative;\n float: left;\n width: percentage((@columns / @grid-columns));\n min-height: 1px;\n padding-right: (@gutter / 2);\n padding-left: (@gutter / 2);\n}\n.make-xs-column-offset(@columns) {\n margin-left: percentage((@columns / @grid-columns));\n}\n.make-xs-column-push(@columns) {\n left: percentage((@columns / @grid-columns));\n}\n.make-xs-column-pull(@columns) {\n right: percentage((@columns / @grid-columns));\n}\n\n// Generate the small columns\n.make-sm-column(@columns; @gutter: @grid-gutter-width) {\n position: relative;\n min-height: 1px;\n padding-right: (@gutter / 2);\n padding-left: (@gutter / 2);\n\n @media (min-width: @screen-sm-min) {\n float: left;\n width: percentage((@columns / @grid-columns));\n }\n}\n.make-sm-column-offset(@columns) {\n @media (min-width: @screen-sm-min) {\n margin-left: percentage((@columns / @grid-columns));\n }\n}\n.make-sm-column-push(@columns) {\n @media (min-width: @screen-sm-min) {\n left: percentage((@columns / @grid-columns));\n }\n}\n.make-sm-column-pull(@columns) {\n @media (min-width: @screen-sm-min) {\n right: percentage((@columns / @grid-columns));\n }\n}\n\n// Generate the medium columns\n.make-md-column(@columns; @gutter: @grid-gutter-width) {\n position: relative;\n min-height: 1px;\n padding-right: (@gutter / 2);\n padding-left: (@gutter / 2);\n\n @media (min-width: @screen-md-min) {\n float: left;\n width: percentage((@columns / @grid-columns));\n }\n}\n.make-md-column-offset(@columns) {\n @media (min-width: @screen-md-min) {\n margin-left: percentage((@columns / @grid-columns));\n }\n}\n.make-md-column-push(@columns) {\n @media (min-width: @screen-md-min) {\n left: percentage((@columns / @grid-columns));\n }\n}\n.make-md-column-pull(@columns) {\n @media (min-width: @screen-md-min) {\n right: percentage((@columns / @grid-columns));\n }\n}\n\n// Generate the large columns\n.make-lg-column(@columns; @gutter: @grid-gutter-width) {\n position: relative;\n min-height: 1px;\n padding-right: (@gutter / 2);\n padding-left: (@gutter / 2);\n\n @media (min-width: @screen-lg-min) {\n float: left;\n width: percentage((@columns / @grid-columns));\n }\n}\n.make-lg-column-offset(@columns) {\n @media (min-width: @screen-lg-min) {\n margin-left: percentage((@columns / @grid-columns));\n }\n}\n.make-lg-column-push(@columns) {\n @media (min-width: @screen-lg-min) {\n left: percentage((@columns / @grid-columns));\n }\n}\n.make-lg-column-pull(@columns) {\n @media (min-width: @screen-lg-min) {\n right: percentage((@columns / @grid-columns));\n }\n}\n","// Framework grid generation\n//\n// Used only by Bootstrap to generate the correct number of grid classes given\n// any value of `@grid-columns`.\n\n.make-grid-columns() {\n // Common styles for all sizes of grid columns, widths 1-12\n .col(@index) { // initial\n @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n .col((@index + 1), @item);\n }\n .col(@index, @list) when (@index =< @grid-columns) { // general; \"=<\" isn't a typo\n @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n .col((@index + 1), ~\"@{list}, @{item}\");\n }\n .col(@index, @list) when (@index > @grid-columns) { // terminal\n @{list} {\n position: relative;\n // Prevent columns from collapsing when empty\n min-height: 1px;\n // Inner gutter via padding\n padding-right: floor((@grid-gutter-width / 2));\n padding-left: ceil((@grid-gutter-width / 2));\n }\n }\n .col(1); // kickstart it\n}\n\n.float-grid-columns(@class) {\n .col(@index) { // initial\n @item: ~\".col-@{class}-@{index}\";\n .col((@index + 1), @item);\n }\n .col(@index, @list) when (@index =< @grid-columns) { // general\n @item: ~\".col-@{class}-@{index}\";\n .col((@index + 1), ~\"@{list}, @{item}\");\n }\n .col(@index, @list) when (@index > @grid-columns) { // terminal\n @{list} {\n float: left;\n }\n }\n .col(1); // kickstart it\n}\n\n.calc-grid-column(@index, @class, @type) when (@type = width) and (@index > 0) {\n .col-@{class}-@{index} {\n width: percentage((@index / @grid-columns));\n }\n}\n.calc-grid-column(@index, @class, @type) when (@type = push) and (@index > 0) {\n .col-@{class}-push-@{index} {\n left: percentage((@index / @grid-columns));\n }\n}\n.calc-grid-column(@index, @class, @type) when (@type = push) and (@index = 0) {\n .col-@{class}-push-0 {\n left: auto;\n }\n}\n.calc-grid-column(@index, @class, @type) when (@type = pull) and (@index > 0) {\n .col-@{class}-pull-@{index} {\n right: percentage((@index / @grid-columns));\n }\n}\n.calc-grid-column(@index, @class, @type) when (@type = pull) and (@index = 0) {\n .col-@{class}-pull-0 {\n right: auto;\n }\n}\n.calc-grid-column(@index, @class, @type) when (@type = offset) {\n .col-@{class}-offset-@{index} {\n margin-left: percentage((@index / @grid-columns));\n }\n}\n\n// Basic looping in LESS\n.loop-grid-columns(@index, @class, @type) when (@index >= 0) {\n .calc-grid-column(@index, @class, @type);\n // next iteration\n .loop-grid-columns((@index - 1), @class, @type);\n}\n\n// Create grid for specific class\n.make-grid(@class) {\n .float-grid-columns(@class);\n .loop-grid-columns(@grid-columns, @class, width);\n .loop-grid-columns(@grid-columns, @class, pull);\n .loop-grid-columns(@grid-columns, @class, push);\n .loop-grid-columns(@grid-columns, @class, offset);\n}\n","// stylelint-disable selector-max-type, selector-max-compound-selectors, selector-no-qualifying-type\n\n//\n// Tables\n// --------------------------------------------------\n\n\ntable {\n background-color: @table-bg;\n\n // Table cell sizing\n //\n // Reset default table behavior\n\n col[class*=\"col-\"] {\n position: static; // Prevent border hiding in Firefox and IE9-11 (see https://github.com/twbs/bootstrap/issues/11623)\n display: table-column;\n float: none;\n }\n\n td,\n th {\n &[class*=\"col-\"] {\n position: static; // Prevent border hiding in Firefox and IE9-11 (see https://github.com/twbs/bootstrap/issues/11623)\n display: table-cell;\n float: none;\n }\n }\n}\n\ncaption {\n padding-top: @table-cell-padding;\n padding-bottom: @table-cell-padding;\n color: @text-muted;\n text-align: left;\n}\n\nth {\n text-align: left;\n}\n\n\n// Baseline styles\n\n.table {\n width: 100%;\n max-width: 100%;\n margin-bottom: @line-height-computed;\n // Cells\n > thead,\n > tbody,\n > tfoot {\n > tr {\n > th,\n > td {\n padding: @table-cell-padding;\n line-height: @line-height-base;\n vertical-align: top;\n border-top: 1px solid @table-border-color;\n }\n }\n }\n // Bottom align for column headings\n > thead > tr > th {\n vertical-align: bottom;\n border-bottom: 2px solid @table-border-color;\n }\n // Remove top border from thead by default\n > caption + thead,\n > colgroup + thead,\n > thead:first-child {\n > tr:first-child {\n > th,\n > td {\n border-top: 0;\n }\n }\n }\n // Account for multiple tbody instances\n > tbody + tbody {\n border-top: 2px solid @table-border-color;\n }\n\n // Nesting\n .table {\n background-color: @body-bg;\n }\n}\n\n\n// Condensed table w/ half padding\n\n.table-condensed {\n > thead,\n > tbody,\n > tfoot {\n > tr {\n > th,\n > td {\n padding: @table-condensed-cell-padding;\n }\n }\n }\n}\n\n\n// Bordered version\n//\n// Add borders all around the table and between all the columns.\n\n.table-bordered {\n border: 1px solid @table-border-color;\n > thead,\n > tbody,\n > tfoot {\n > tr {\n > th,\n > td {\n border: 1px solid @table-border-color;\n }\n }\n }\n > thead > tr {\n > th,\n > td {\n border-bottom-width: 2px;\n }\n }\n}\n\n\n// Zebra-striping\n//\n// Default zebra-stripe styles (alternating gray and transparent backgrounds)\n\n.table-striped {\n > tbody > tr:nth-of-type(odd) {\n background-color: @table-bg-accent;\n }\n}\n\n\n// Hover effect\n//\n// Placed here since it has to come after the potential zebra striping\n\n.table-hover {\n > tbody > tr:hover {\n background-color: @table-bg-hover;\n }\n}\n\n\n// Table backgrounds\n//\n// Exact selectors below required to override `.table-striped` and prevent\n// inheritance to nested tables.\n\n// Generate the contextual variants\n.table-row-variant(active; @table-bg-active);\n.table-row-variant(success; @state-success-bg);\n.table-row-variant(info; @state-info-bg);\n.table-row-variant(warning; @state-warning-bg);\n.table-row-variant(danger; @state-danger-bg);\n\n\n// Responsive tables\n//\n// Wrap your tables in `.table-responsive` and we'll make them mobile friendly\n// by enabling horizontal scrolling. Only applies <768px. Everything above that\n// will display normally.\n\n.table-responsive {\n min-height: .01%; // Workaround for IE9 bug (see https://github.com/twbs/bootstrap/issues/14837)\n overflow-x: auto;\n\n @media screen and (max-width: @screen-xs-max) {\n width: 100%;\n margin-bottom: (@line-height-computed * .75);\n overflow-y: hidden;\n -ms-overflow-style: -ms-autohiding-scrollbar;\n border: 1px solid @table-border-color;\n\n // Tighten up spacing\n > .table {\n margin-bottom: 0;\n\n // Ensure the content doesn't wrap\n > thead,\n > tbody,\n > tfoot {\n > tr {\n > th,\n > td {\n white-space: nowrap;\n }\n }\n }\n }\n\n // Special overrides for the bordered tables\n > .table-bordered {\n border: 0;\n\n // Nuke the appropriate borders so that the parent can handle them\n > thead,\n > tbody,\n > tfoot {\n > tr {\n > th:first-child,\n > td:first-child {\n border-left: 0;\n }\n > th:last-child,\n > td:last-child {\n border-right: 0;\n }\n }\n }\n\n // Only nuke the last row's bottom-border in `tbody` and `tfoot` since\n // chances are there will be only one `tr` in a `thead` and that would\n // remove the border altogether.\n > tbody,\n > tfoot {\n > tr:last-child {\n > th,\n > td {\n border-bottom: 0;\n }\n }\n }\n\n }\n }\n}\n","// Tables\n\n.table-row-variant(@state; @background) {\n // Exact selectors below required to override `.table-striped` and prevent\n // inheritance to nested tables.\n .table > thead > tr,\n .table > tbody > tr,\n .table > tfoot > tr {\n > td.@{state},\n > th.@{state},\n &.@{state} > td,\n &.@{state} > th {\n background-color: @background;\n }\n }\n\n // Hover states for `.table-hover`\n // Note: this is not available for cells or rows within `thead` or `tfoot`.\n .table-hover > tbody > tr {\n > td.@{state}:hover,\n > th.@{state}:hover,\n &.@{state}:hover > td,\n &:hover > .@{state},\n &.@{state}:hover > th {\n background-color: darken(@background, 5%);\n }\n }\n}\n","// stylelint-disable selector-no-qualifying-type, property-no-vendor-prefix, media-feature-name-no-vendor-prefix\n\n//\n// Forms\n// --------------------------------------------------\n\n\n// Normalize non-controls\n//\n// Restyle and baseline non-control form elements.\n\nfieldset {\n // Chrome and Firefox set a `min-width: min-content;` on fieldsets,\n // so we reset that to ensure it behaves more like a standard block element.\n // See https://github.com/twbs/bootstrap/issues/12359.\n min-width: 0;\n padding: 0;\n margin: 0;\n border: 0;\n}\n\nlegend {\n display: block;\n width: 100%;\n padding: 0;\n margin-bottom: @line-height-computed;\n font-size: (@font-size-base * 1.5);\n line-height: inherit;\n color: @legend-color;\n border: 0;\n border-bottom: 1px solid @legend-border-color;\n}\n\nlabel {\n display: inline-block;\n max-width: 100%; // Force IE8 to wrap long content (see https://github.com/twbs/bootstrap/issues/13141)\n margin-bottom: 5px;\n font-weight: 700;\n}\n\n\n// Normalize form controls\n//\n// While most of our form styles require extra classes, some basic normalization\n// is required to ensure optimum display with or without those classes to better\n// address browser inconsistencies.\n\ninput[type=\"search\"] {\n // Override content-box in Normalize (* isn't specific enough)\n .box-sizing(border-box);\n\n // Search inputs in iOS\n //\n // This overrides the extra rounded corners on search inputs in iOS so that our\n // `.form-control` class can properly style them. Note that this cannot simply\n // be added to `.form-control` as it's not specific enough. For details, see\n // https://github.com/twbs/bootstrap/issues/11586.\n -webkit-appearance: none;\n appearance: none;\n}\n\n// Position radios and checkboxes better\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n margin: 4px 0 0;\n margin-top: 1px \\9; // IE8-9\n line-height: normal;\n\n // Apply same disabled cursor tweak as for inputs\n // Some special care is needed because
' - }) - .component('pane', { - transclude: true, - require: { - tabsCtrl: '^tabs' - }, - bindings: { - title: '@' - }, - controller: function() { - this.$onInit = function() { - this.tabsCtrl.addPane(this); - }; - }, - template: '
' - }); - - angular.module('ozone').component('navmenu', { - bindings: { - metrics: '<' - }, - templateUrl: 'static/templates/menu.html', - controller: function($http) { - var ctrl = this; - ctrl.docs = false; - $http.head("docs/index.html") - .then(function(result) { - ctrl.docs = true; - }, function() { - ctrl.docs = false; - }); - } - }); - - angular.module('ozone').component('config', { - templateUrl: 'static/templates/config.html', - controller: function($scope, $http) { - var ctrl = this; - ctrl.selectedTags = []; - ctrl.configArray = []; - - $http.get("conf?cmd=getOzoneTags") - .then(function(response) { - ctrl.tags = response.data; - var excludedTags = ['CBLOCK', 'OM', 'SCM']; - for (var i = 0; i < excludedTags.length; i++) { - var idx = ctrl.tags.indexOf(excludedTags[i]); - // Remove CBLOCK related properties - if (idx > -1) { - ctrl.tags.splice(idx, 1); - } - } - ctrl.loadAll(); - }); - - ctrl.convertToArray = function(srcObj) { - ctrl.keyTagMap = {}; - for (var idx in srcObj) { - //console.log("Adding keys for "+idx) - for (var key in srcObj[idx]) { - - if (ctrl.keyTagMap.hasOwnProperty(key)) { - ctrl.keyTagMap[key]['tag'].push(idx); - } else { - var newProp = {}; - newProp['name'] = key; - newProp['value'] = srcObj[idx][key]; - newProp['tag'] = []; - newProp['tag'].push(idx); - ctrl.keyTagMap[key] = newProp; - } - } - } - } - - ctrl.loadAll = function() { - $http.get("conf?cmd=getPropertyByTag&tags=OM,SCM," + ctrl.tags) - .then(function(response) { - - ctrl.convertToArray(response.data); - ctrl.configs = Object.values(ctrl.keyTagMap); - ctrl.component = 'All'; - ctrl.sortBy('name'); - }); - }; - - ctrl.filterTags = function() { - if (!ctrl.selectedTags) { - return true; - } - - if (ctrl.selectedTags.length < 1 && ctrl.component == 'All') { - return true; - } - - ctrl.configs = ctrl.configs.filter(function(item) { - - if (ctrl.component != 'All' && (item['tag'].indexOf(ctrl - .component) < 0)) { - return false; - } - - if (ctrl.selectedTags.length < 1) { - return true; - } - for (var tag in item['tag']) { - tag = item['tag'][tag]; - if (ctrl.selectedTags.indexOf(tag) > -1) { - return true; - } - } - return false; - }); - - }; - ctrl.configFilter = function(config) { - return false; - }; - ctrl.selected = function(tag) { - return ctrl.selectedTags.includes(tag); - }; - - ctrl.switchto = function(tag) { - ctrl.component = tag; - ctrl.reloadConfig(); - }; - - ctrl.select = function(tag) { - var tagIdx = ctrl.selectedTags.indexOf(tag); - if (tagIdx > -1) { - ctrl.selectedTags.splice(tagIdx, 1); - } else { - ctrl.selectedTags.push(tag); - } - ctrl.reloadConfig(); - }; - - ctrl.reloadConfig = function() { - ctrl.configs = []; - ctrl.configs = Object.values(ctrl.keyTagMap); - ctrl.filterTags(); - }; - - ctrl.sortBy = function(field) { - ctrl.reverse = (ctrl.propertyName === field) ? !ctrl.reverse : false; - ctrl.propertyName = field; - }; - - ctrl.allSelected = function(comp) { - //console.log("Adding key for compo ->"+comp) - return ctrl.component == comp; - }; - - } - }); - -})(); \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html deleted file mode 100644 index b52f6533fc9..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html +++ /dev/null @@ -1,91 +0,0 @@ - - -
-
- -
-
-
- All - - OM - SCM -
-
-
-
-
- - - - - - - - - - - - - - - -
Tag
{{tag}}
-
-
- - - - - - - - - - - - - - - -
- Property - - - - Value - - - Description - -
{{config.name}}{{config.value}}{{config.description}}
-
-
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html deleted file mode 100644 index c1f7d16aefa..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - -
JVM:{{$ctrl.jmx.SystemProperties.java_vm_name}} {{$ctrl.jmx.SystemProperties.java_vm_version}}
Input arguments:{{$ctrl.jmx.InputArguments}}
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html deleted file mode 100644 index 95f1b4842f1..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html +++ /dev/null @@ -1,60 +0,0 @@ - - diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html deleted file mode 100644 index 30e2d26f56f..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html +++ /dev/null @@ -1,39 +0,0 @@ - -

Overview

- - - - - - - - - - - - - - - -
Started:{{$ctrl.jmx.StartedTimeInMillis | date : 'medium'}}
Version:{{$ctrl.jmx.Version}}
Compiled:{{$ctrl.jmx.CompileInfo}}
- -

JVM parameters

- - - -
\ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html deleted file mode 100644 index facb1520304..00000000000 --- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html +++ /dev/null @@ -1,87 +0,0 @@ - -
- Please set rpc.metrics.quantile.enable to true and define the - intervals in seconds with setting rpc.metrics.percentiles.intervals - (eg. set to 60,300) in your hdfs-site.xml - to display Hadoop RPC related graphs. -
-
-

{{window}} window

-

Quantiles based on a fixed {{window}} window. Calculated once at every - {{window}}

- -
-
-

{{metric}}

-

{{percentiles.numOps}} sample

- -
-
- -
-
-
-

Number of ops / Averages

- - - - - - - - - - - - - - -
Metric nameNumber of opsAverage time (ms)
{{key}}{{metric.numOps | number}}{{metric.avgTime | number:2}}
-
-
-

Success / Failures

- - - - - - - - - - - - - - - -
Metric nameSuccessFailures
{{key}}{{metric.success}}{{metric.failures}}
-
-
-
-

Other JMX Metrics

- - - - - - -
{{metric.key}}{{metric.value}}
-
\ No newline at end of file diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestBaseHttpServer.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestBaseHttpServer.java deleted file mode 100644 index c6eae0e5fa6..00000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestBaseHttpServer.java +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server; - -import org.apache.hadoop.conf.Configuration; -import org.junit.Assert; -import org.junit.Test; - -/** - * Test Common ozone/hdds web methods. - */ -public class TestBaseHttpServer { - @Test - public void getBindAddress() throws Exception { - Configuration conf = new Configuration(); - conf.set("enabled", "false"); - - BaseHttpServer baseHttpServer = new BaseHttpServer(conf, "test") { - @Override - protected String getHttpAddressKey() { - return null; - } - - @Override - protected String getHttpsAddressKey() { - return null; - } - - @Override - protected String getHttpBindHostKey() { - return null; - } - - @Override - protected String getHttpsBindHostKey() { - return null; - } - - @Override - protected String getBindHostDefault() { - return null; - } - - @Override - protected int getHttpBindPortDefault() { - return 0; - } - - @Override - protected int getHttpsBindPortDefault() { - return 0; - } - - @Override - protected String getKeytabFile() { - return null; - } - - @Override - protected String getSpnegoPrincipal() { - return null; - } - - @Override - protected String getEnabledKey() { - return "enabled"; - } - }; - - conf.set("addresskey", "0.0.0.0:1234"); - - Assert.assertEquals("/0.0.0.0:1234", baseHttpServer - .getBindAddress("bindhostkey", "addresskey", - "default", 65).toString()); - - conf.set("bindhostkey", "1.2.3.4"); - - Assert.assertEquals("/1.2.3.4:1234", baseHttpServer - .getBindAddress("bindhostkey", "addresskey", - "default", 65).toString()); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestProfileServlet.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestProfileServlet.java deleted file mode 100644 index 1c4adf61aba..00000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestProfileServlet.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server; - -import java.io.IOException; - -import org.apache.hadoop.hdds.server.ProfileServlet.Event; -import org.apache.hadoop.hdds.server.ProfileServlet.Output; - -import org.junit.Test; - -/** - * Test prometheus Sink. - */ -public class TestProfileServlet { - - @Test - public void testNameValidation() throws IOException { - ProfileServlet.validateFileName( - ProfileServlet.generateFileName(1, Output.SVG, Event.ALLOC)); - - ProfileServlet.validateFileName( - ProfileServlet.generateFileName(23, Output.COLLAPSED, - Event.L1_DCACHE_LOAD_MISSES)); - } - - @Test(expected = IllegalArgumentException.class) - public void testNameValidationWithNewLine() throws IOException { - ProfileServlet.validateFileName( - "test\n" + ProfileServlet.generateFileName(1, Output.SVG, Event.ALLOC)); - } - - @Test(expected = IllegalArgumentException.class) - public void testNameValidationWithSlash() throws IOException { - ProfileServlet.validateFileName( - "../" + ProfileServlet.generateFileName(1, Output.SVG, Event.ALLOC)); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java deleted file mode 100644 index f2683b59a4b..00000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestPrometheusMetricsSink.java +++ /dev/null @@ -1,199 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; - -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.MetricsTag; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; - -import static java.nio.charset.StandardCharsets.UTF_8; -import org.junit.Assert; -import org.junit.Test; - -/** - * Test prometheus Sink. - */ -public class TestPrometheusMetricsSink { - - @Test - public void testPublish() throws IOException { - //GIVEN - MetricsSystem metrics = DefaultMetricsSystem.instance(); - - metrics.init("test"); - PrometheusMetricsSink sink = new PrometheusMetricsSink(); - metrics.register("Prometheus", "Prometheus", sink); - TestMetrics testMetrics = metrics - .register("TestMetrics", "Testing metrics", new TestMetrics()); - - metrics.start(); - testMetrics.numBucketCreateFails.incr(); - metrics.publishMetricsNow(); - ByteArrayOutputStream stream = new ByteArrayOutputStream(); - OutputStreamWriter writer = new OutputStreamWriter(stream, UTF_8); - - //WHEN - sink.writeMetrics(writer); - writer.flush(); - - //THEN - String writtenMetrics = stream.toString(UTF_8.name()); - Assert.assertTrue( - "The expected metric line is missing from prometheus metrics output", - writtenMetrics.contains( - "test_metrics_num_bucket_create_fails{context=\"dfs\"") - ); - - metrics.stop(); - metrics.shutdown(); - } - - @Test - public void testPublishWithSameName() throws IOException { - //GIVEN - MetricsSystem metrics = DefaultMetricsSystem.instance(); - - metrics.init("test"); - PrometheusMetricsSink sink = new PrometheusMetricsSink(); - metrics.register("Prometheus", "Prometheus", sink); - metrics.register("FooBar", "fooBar", (MetricsSource) (collector, all) -> { - collector.addRecord("RpcMetrics").add(new MetricsTag(PORT_INFO, "1234")) - .addGauge(COUNTER_INFO, 123).endRecord(); - - collector.addRecord("RpcMetrics").add(new MetricsTag( - PORT_INFO, "2345")).addGauge(COUNTER_INFO, 234).endRecord(); - }); - - metrics.start(); - metrics.publishMetricsNow(); - - ByteArrayOutputStream stream = new ByteArrayOutputStream(); - OutputStreamWriter writer = new OutputStreamWriter(stream, UTF_8); - - //WHEN - sink.writeMetrics(writer); - writer.flush(); - - //THEN - String writtenMetrics = stream.toString(UTF_8.name()); - Assert.assertTrue( - "The expected metric line is missing from prometheus metrics output", - writtenMetrics.contains( - "rpc_metrics_counter{port=\"2345\"")); - - Assert.assertTrue( - "The expected metric line is missing from prometheus metrics " - + "output", - writtenMetrics.contains( - "rpc_metrics_counter{port=\"1234\"")); - - metrics.stop(); - metrics.shutdown(); - } - - @Test - public void testNamingCamelCase() { - PrometheusMetricsSink sink = new PrometheusMetricsSink(); - - Assert.assertEquals("rpc_time_some_metrics", - sink.prometheusName("RpcTime", "SomeMetrics")); - - Assert.assertEquals("om_rpc_time_om_info_keys", - sink.prometheusName("OMRpcTime", "OMInfoKeys")); - - Assert.assertEquals("rpc_time_small", - sink.prometheusName("RpcTime", "small")); - } - - @Test - public void testNamingRocksDB() { - //RocksDB metrics are handled differently. - PrometheusMetricsSink sink = new PrometheusMetricsSink(); - Assert.assertEquals("rocksdb_om.db_num_open_connections", - sink.prometheusName("Rocksdb_om.db", "num_open_connections")); - } - - @Test - public void testNamingPipeline() { - PrometheusMetricsSink sink = new PrometheusMetricsSink(); - - String recordName = "SCMPipelineMetrics"; - String metricName = "NumBlocksAllocated-" - + "RATIS-THREE-47659e3d-40c9-43b3-9792-4982fc279aba"; - Assert.assertEquals( - "scm_pipeline_metrics_" - + "num_blocks_allocated_" - + "ratis_three_47659e3d_40c9_43b3_9792_4982fc279aba", - sink.prometheusName(recordName, metricName)); - } - - @Test - public void testNamingSpaces() { - PrometheusMetricsSink sink = new PrometheusMetricsSink(); - - String recordName = "JvmMetrics"; - String metricName = "GcTimeMillisG1 Young Generation"; - Assert.assertEquals( - "jvm_metrics_gc_time_millis_g1_young_generation", - sink.prometheusName(recordName, metricName)); - } - - /** - * Example metric pojo. - */ - @Metrics(about = "Test Metrics", context = "dfs") - private static class TestMetrics { - - @Metric - private MutableCounterLong numBucketCreateFails; - } - - public static final MetricsInfo PORT_INFO = new MetricsInfo() { - @Override - public String name() { - return "PORT"; - } - - @Override - public String description() { - return "port"; - } - }; - - public static final MetricsInfo COUNTER_INFO = new MetricsInfo() { - @Override - public String name() { - return "COUNTER"; - } - - @Override - public String description() { - return "counter"; - } - }; - -} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java deleted file mode 100644 index 9735d2cab97..00000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.test.PathUtils; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.File; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -/** - * Unit tests for {@link ServerUtils}. - */ -public class TestServerUtils { - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - /** - * Test {@link ServerUtils#getScmDbDir}. - */ - @Test - public void testGetScmDbDir() { - final File testDir = PathUtils.getTestDir(TestServerUtils.class); - final File dbDir = new File(testDir, "scmDbDir"); - final File metaDir = new File(testDir, "metaDir"); - final Configuration conf = new OzoneConfiguration(); - conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); - - try { - assertFalse(metaDir.exists()); - assertFalse(dbDir.exists()); - assertEquals(dbDir, ServerUtils.getScmDbDir(conf)); - assertTrue(dbDir.exists()); - assertFalse(metaDir.exists()); - } finally { - FileUtils.deleteQuietly(dbDir); - } - } - - /** - * Test {@link ServerUtils#getScmDbDir} with fallback to OZONE_METADATA_DIRS - * when OZONE_SCM_DB_DIRS is undefined. - */ - @Test - public void testGetScmDbDirWithFallback() { - final File testDir = PathUtils.getTestDir(TestServerUtils.class); - final File metaDir = new File(testDir, "metaDir"); - final Configuration conf = new OzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); - try { - assertFalse(metaDir.exists()); - assertEquals(metaDir, ServerUtils.getScmDbDir(conf)); - assertTrue(metaDir.exists()); - } finally { - FileUtils.deleteQuietly(metaDir); - } - } - - @Test - public void testNoScmDbDirConfigured() { - thrown.expect(IllegalArgumentException.class); - ServerUtils.getScmDbDir(new OzoneConfiguration()); - } - - @Test - public void ozoneMetadataDirIsMandatory() { - thrown.expect(IllegalArgumentException.class); - ServerUtils.getOzoneMetaDirPath(new OzoneConfiguration()); - } - - @Test - public void ozoneMetadataDirAcceptsSingleItem() { - final File testDir = PathUtils.getTestDir(TestServerUtils.class); - final File metaDir = new File(testDir, "metaDir"); - final Configuration conf = new OzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); - - try { - assertFalse(metaDir.exists()); - assertEquals(metaDir, ServerUtils.getOzoneMetaDirPath(conf)); - assertTrue(metaDir.exists()); - } finally { - FileUtils.deleteQuietly(metaDir); - } - } - - @Test - public void ozoneMetadataDirRejectsList() { - final Configuration conf = new OzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, "/data/meta1,/data/meta2"); - thrown.expect(IllegalArgumentException.class); - - ServerUtils.getOzoneMetaDirPath(conf); - } - -} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java deleted file mode 100644 index 3f34a70e6e7..00000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/EventHandlerStub.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -import java.util.ArrayList; -import java.util.List; - -/** - * Dummy class for testing to collect all the received events. - */ -public class EventHandlerStub implements EventHandler { - - private List receivedEvents = new ArrayList<>(); - - @Override - public void onMessage(PAYLOAD payload, EventPublisher publisher) { - receivedEvents.add(payload); - } - - public List getReceivedEvents() { - return receivedEvents; - } -} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java deleted file mode 100644 index 0c1200f6d14..00000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; - -/** - * Testing the basic functionality of the event queue. - */ -public class TestEventQueue { - - private static final Event EVENT1 = - new TypedEvent<>(Long.class, "SCM_EVENT1"); - private static final Event EVENT2 = - new TypedEvent<>(Long.class, "SCM_EVENT2"); - - private static final Event EVENT3 = - new TypedEvent<>(Long.class, "SCM_EVENT3"); - private static final Event EVENT4 = - new TypedEvent<>(Long.class, "SCM_EVENT4"); - - private EventQueue queue; - - @Before - public void startEventQueue() { - DefaultMetricsSystem.initialize(getClass().getSimpleName()); - queue = new EventQueue(); - } - - @After - public void stopEventQueue() { - DefaultMetricsSystem.shutdown(); - queue.close(); - } - - @Test - public void simpleEvent() { - - final long[] result = new long[2]; - - queue.addHandler(EVENT1, (payload, publisher) -> result[0] = payload); - - queue.fireEvent(EVENT1, 11L); - queue.processAll(1000); - Assert.assertEquals(11, result[0]); - - } - - @Test - public void multipleSubscriber() { - final long[] result = new long[2]; - queue.addHandler(EVENT2, (payload, publisher) -> result[0] = payload); - - queue.addHandler(EVENT2, (payload, publisher) -> result[1] = payload); - - queue.fireEvent(EVENT2, 23L); - queue.processAll(1000); - Assert.assertEquals(23, result[0]); - Assert.assertEquals(23, result[1]); - - } - -} \ No newline at end of file diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java deleted file mode 100644 index bb05ef453e6..00000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueueChain.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.server.events; - -import org.junit.Test; - -/** - * More realistic event test with sending event from one listener. - */ -public class TestEventQueueChain { - - private static final Event DECOMMISSION = - new TypedEvent<>(FailedNode.class); - - private static final Event DECOMMISSION_START = - new TypedEvent<>(FailedNode.class); - - @Test - public void simpleEvent() { - EventQueue queue = new EventQueue(); - - queue.addHandler(DECOMMISSION, new PipelineManager()); - queue.addHandler(DECOMMISSION_START, new NodeWatcher()); - - queue.fireEvent(DECOMMISSION, new FailedNode("node1")); - - queue.processAll(5000); - } - - - static class FailedNode { - private final String nodeId; - - FailedNode(String nodeId) { - this.nodeId = nodeId; - } - - String getNodeId() { - return nodeId; - } - } - - private static class PipelineManager implements EventHandler { - - @Override - public void onMessage(FailedNode message, EventPublisher publisher) { - - System.out.println( - "Closing pipelines for all pipelines including node: " + message - .getNodeId()); - - publisher.fireEvent(DECOMMISSION_START, message); - } - - } - - private static class NodeWatcher implements EventHandler { - - @Override - public void onMessage(FailedNode message, EventPublisher publisher) { - System.out.println("Clear timer"); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java deleted file mode 100644 index dcbcdb03f96..00000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventWatcher.java +++ /dev/null @@ -1,305 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.server.events; - -import org.apache.hadoop.hdds.HddsIdFactory; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.ozone.lease.LeaseManager; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.util.List; -import java.util.Objects; - -/** - * Test the basic functionality of event watcher. - */ -public class TestEventWatcher { - - private static final TypedEvent WATCH_UNDER_REPLICATED = - new TypedEvent<>(UnderreplicatedEvent.class); - - private static final TypedEvent UNDER_REPLICATED = - new TypedEvent<>(UnderreplicatedEvent.class); - - private static final TypedEvent - REPLICATION_COMPLETED = new TypedEvent<>(ReplicationCompletedEvent.class); - - private LeaseManager leaseManager; - - @Before - public void startLeaseManager() { - DefaultMetricsSystem.instance(); - leaseManager = new LeaseManager<>("Test", 2000L); - leaseManager.start(); - } - - @After - public void stopLeaseManager() { - leaseManager.shutdown(); - DefaultMetricsSystem.shutdown(); - } - - @Test - public void testEventHandling() throws InterruptedException { - EventQueue queue = new EventQueue(); - - EventWatcher - replicationWatcher = createEventWatcher(); - - EventHandlerStub underReplicatedEvents = - new EventHandlerStub<>(); - - queue.addHandler(UNDER_REPLICATED, underReplicatedEvents); - - replicationWatcher.start(queue); - - long id1 = HddsIdFactory.getLongId(); - long id2 = HddsIdFactory.getLongId(); - - queue.fireEvent(WATCH_UNDER_REPLICATED, - new UnderreplicatedEvent(id1, "C1")); - - queue.fireEvent(WATCH_UNDER_REPLICATED, - new UnderreplicatedEvent(id2, "C2")); - - Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size()); - - Thread.sleep(1000); - - queue.fireEvent(REPLICATION_COMPLETED, - new ReplicationCompletedEvent(id1, "C2", "D1")); - - Assert.assertEquals(0, underReplicatedEvents.getReceivedEvents().size()); - - Thread.sleep(1500); - - queue.processAll(1000L); - - Assert.assertEquals(1, underReplicatedEvents.getReceivedEvents().size()); - Assert.assertEquals(id2, - underReplicatedEvents.getReceivedEvents().get(0).id); - - } - - @Test - public void testInprogressFilter() throws InterruptedException { - - EventQueue queue = new EventQueue(); - - EventWatcher - replicationWatcher = createEventWatcher(); - - EventHandlerStub underReplicatedEvents = - new EventHandlerStub<>(); - - queue.addHandler(UNDER_REPLICATED, underReplicatedEvents); - - replicationWatcher.start(queue); - - UnderreplicatedEvent event1 = - new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1"); - - queue.fireEvent(WATCH_UNDER_REPLICATED, event1); - - queue.fireEvent(WATCH_UNDER_REPLICATED, - new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C2")); - - queue.fireEvent(WATCH_UNDER_REPLICATED, - new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1")); - - queue.processAll(1000L); - Thread.sleep(1000L); - List c1todo = replicationWatcher - .getTimeoutEvents(e -> e.containerId.equalsIgnoreCase("C1")); - - Assert.assertEquals(2, c1todo.size()); - Assert.assertTrue(replicationWatcher.contains(event1)); - Thread.sleep(1500L); - - c1todo = replicationWatcher - .getTimeoutEvents(e -> e.containerId.equalsIgnoreCase("C1")); - Assert.assertEquals(0, c1todo.size()); - Assert.assertFalse(replicationWatcher.contains(event1)); - - } - - @Test - public void testMetrics() throws InterruptedException { - - DefaultMetricsSystem.initialize("test"); - - EventQueue queue = new EventQueue(); - - EventWatcher - replicationWatcher = createEventWatcher(); - - EventHandlerStub underReplicatedEvents = - new EventHandlerStub<>(); - - queue.addHandler(UNDER_REPLICATED, underReplicatedEvents); - - replicationWatcher.start(queue); - - //send 3 event to track 3 in-progress activity - UnderreplicatedEvent event1 = - new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1"); - - UnderreplicatedEvent event2 = - new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C2"); - - UnderreplicatedEvent event3 = - new UnderreplicatedEvent(HddsIdFactory.getLongId(), "C1"); - - queue.fireEvent(WATCH_UNDER_REPLICATED, event1); - - queue.fireEvent(WATCH_UNDER_REPLICATED, event2); - - queue.fireEvent(WATCH_UNDER_REPLICATED, event3); - - //1st event is completed, don't need to track any more - ReplicationCompletedEvent event1Completed = - new ReplicationCompletedEvent(event1.id, "C1", "D1"); - - queue.fireEvent(REPLICATION_COMPLETED, event1Completed); - - //lease manager timeout = 2000L - Thread.sleep(3 * 2000L); - - queue.processAll(2000L); - - //until now: 3 in-progress activities are tracked with three - // UnderreplicatedEvents. The first one is completed, the remaining two - // are timed out (as the timeout -- defined in the lease manager -- is - // 2000ms). - - EventWatcherMetrics metrics = replicationWatcher.getMetrics(); - - //3 events are received - Assert.assertEquals(3, metrics.getTrackedEvents().value()); - - //completed + timed out = all messages - Assert.assertEquals( - "number of timed out and completed messages should be the same as the" - + " all messages", - metrics.getTrackedEvents().value(), - metrics.getCompletedEvents().value() + metrics.getTimedOutEvents() - .value()); - - //_at least_ two are timed out. - Assert.assertTrue("At least two events should be timed out.", - metrics.getTimedOutEvents().value() >= 2); - - DefaultMetricsSystem.shutdown(); - } - - private EventWatcher - createEventWatcher() { - return new CommandWatcherExample(WATCH_UNDER_REPLICATED, - REPLICATION_COMPLETED, leaseManager); - } - - private static class CommandWatcherExample - extends EventWatcher { - - CommandWatcherExample(Event startEvent, - Event completionEvent, - LeaseManager leaseManager) { - super("TestCommandWatcher", startEvent, completionEvent, leaseManager); - } - - @Override - protected void onTimeout(EventPublisher publisher, - UnderreplicatedEvent payload) { - publisher.fireEvent(UNDER_REPLICATED, payload); - } - - @Override - protected void onFinished(EventPublisher publisher, - UnderreplicatedEvent payload) { - //Good job. We did it. - } - - @Override - public EventWatcherMetrics getMetrics() { - return super.getMetrics(); - } - } - - private static class ReplicationCompletedEvent - implements IdentifiableEventPayload { - - private final long id; - - private final String containerId; - - private final String datanodeId; - - ReplicationCompletedEvent(long id, String containerId, - String datanodeId) { - this.id = id; - this.containerId = containerId; - this.datanodeId = datanodeId; - } - - @Override - public long getId() { - return id; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ReplicationCompletedEvent that = (ReplicationCompletedEvent) o; - return Objects.equals(containerId, that.containerId) && Objects - .equals(datanodeId, that.datanodeId); - } - - @Override - public int hashCode() { - - return Objects.hash(containerId, datanodeId); - } - } - - private static class UnderreplicatedEvent - - implements IdentifiableEventPayload { - - private final long id; - - private final String containerId; - - UnderreplicatedEvent(long id, String containerId) { - this.containerId = containerId; - this.id = id; - } - - @Override - public long getId() { - return id; - } - } - -} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java deleted file mode 100644 index 720dd6fa4d1..00000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Tests for Event Watcher. - */ -package org.apache.hadoop.hdds.server.events; \ No newline at end of file diff --git a/hadoop-hdds/framework/src/test/resources/ozone-site.xml b/hadoop-hdds/framework/src/test/resources/ozone-site.xml deleted file mode 100644 index 77dd7ef9940..00000000000 --- a/hadoop-hdds/framework/src/test/resources/ozone-site.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml deleted file mode 100644 index a17433732d4..00000000000 --- a/hadoop-hdds/pom.xml +++ /dev/null @@ -1,406 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-main-ozone - 0.5.0-SNAPSHOT - ../pom.ozone.xml - - - hadoop-hdds - 0.5.0-SNAPSHOT - Apache Hadoop Distributed Data Store Project - Apache Hadoop HDDS - pom - - - client - common - framework - container-service - server-scm - tools - docs - config - - - - - 0.5.0-SNAPSHOT - - - 0.5.0-201fc85-SNAPSHOT - - 1.60 - - 0.5.1 - 1.5.0.Final - - 3.0.0-M1 - - 5.3.1 - - - - apache.snapshots.https - https://repository.apache.org/content/repositories/snapshots - - - - - apache.snapshots.https - https://repository.apache.org/content/repositories/snapshots - - - - - - - - org.apache.hadoop - hadoop-hdds-common - ${hdds.version} - - - - org.apache.hadoop - hadoop-hdds-client - ${hdds.version} - - - - org.apache.hadoop - hadoop-hdds-tools - ${hdds.version} - - - - org.apache.hadoop - hadoop-hdds-server-framework - ${hdds.version} - - - - org.apache.hadoop - hadoop-hdds-server-scm - ${hdds.version} - - - - org.apache.hadoop - hadoop-hdds-container-service - ${hdds.version} - - - - org.apache.hadoop - hadoop-hdds-docs - ${hdds.version} - - - - org.apache.hadoop - hadoop-hdds-config - ${hdds.version} - - - - org.apache.hadoop - hadoop-hdds-container-service - ${hdds.version} - test-jar - - - - org.apache.hadoop - hadoop-hdds-server-scm - test-jar - ${hdds.version} - - - - org.openjdk.jmh - jmh-core - 1.19 - - - - org.openjdk.jmh - jmh-generator-annprocess - 1.19 - - - - org.apache.ratis - ratis-proto-shaded - ${ratis.version} - - - ratis-common - org.apache.ratis - ${ratis.version} - - - ratis-client - org.apache.ratis - ${ratis.version} - - - ratis-server - org.apache.ratis - ${ratis.version} - - - ratis-netty - org.apache.ratis - ${ratis.version} - - - ratis-grpc - org.apache.ratis - ${ratis.version} - - - - org.bouncycastle - bcprov-jdk15on - ${bouncycastle.version} - - - org.bouncycastle - bcpkix-jdk15on - ${bouncycastle.version} - - - - org.junit.jupiter - junit-jupiter-api - ${junit.jupiter.version} - test - - - - - - org.apache.hadoop - hadoop-common - ${hadoop.version} - - - org.apache.hadoop - hadoop-hdfs - ${hadoop.version} - - - org.apache.hadoop - hadoop-hdfs-client - ${hadoop.version} - - - com.squareup.okhttp - okhttp - - - - - org.apache.hadoop - hadoop-common - ${hadoop.version} - test - test-jar - - - org.apache.hadoop - hadoop-hdfs - ${hadoop.version} - test - test-jar - - - info.picocli - picocli - 3.9.6 - - - com.google.protobuf - protobuf-java - compile - - - com.google.guava - guava - compile - - - junit - junit - test - - - - - - - org.apache.maven.plugins - maven-enforcer-plugin - - - depcheck - - - - false - - - - - - - - - org.apache.rat - apache-rat-plugin - - - **/hs_err*.log - **/target/** - .gitattributes - .idea/** - src/main/resources/webapps/static/angular-1.6.4.min.js - src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js - src/main/resources/webapps/static/angular-route-1.6.4.min.js - src/main/resources/webapps/static/d3-3.5.17.min.js - src/main/resources/webapps/static/nvd3-1.8.5.min.css.map - src/main/resources/webapps/static/nvd3-1.8.5.min.css - src/main/resources/webapps/static/nvd3-1.8.5.min.js.map - src/main/resources/webapps/static/nvd3-1.8.5.min.js - src/main/resources/webapps/static/jquery-3.4.1.min.js - src/main/resources/webapps/static/bootstrap-3.4.1/** - src/test/resources/additionalfields.container - src/test/resources/incorrect.checksum.container - src/test/resources/incorrect.container - src/test/resources/test.db.ini - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - add-classpath-descriptor - package - - build-classpath - - - ${project.build.directory}/classpath - $HDDS_LIB_JARS_DIR - true - runtime - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - attach-classpath-artifact - package - - attach-artifact - - - - - ${project.build.directory}/classpath - cp - classpath - - - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - - - - - - parallel-tests - - - - org.apache.hadoop - hadoop-maven-plugins - - - parallel-tests-createdir - - parallel-tests-createdir - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${testsThreadCount} - false - ${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true - - ${testsThreadCount} - ${test.build.data}/${surefire.forkNumber} - ${test.build.dir}/${surefire.forkNumber} - ${hadoop.tmp.dir}/${surefire.forkNumber} - - - - - - ${test.build.data} - - - - - - fork-${surefire.forkNumber} - - - - - - - - diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml deleted file mode 100644 index 68a5cd8061b..00000000000 --- a/hadoop-hdds/server-scm/pom.xml +++ /dev/null @@ -1,153 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-hdds - 0.5.0-SNAPSHOT - - hadoop-hdds-server-scm - 0.5.0-SNAPSHOT - Apache Hadoop Distributed Data Store Storage Container Manager Server - Apache Hadoop HDDS SCM Server - jar - - - - org.apache.hadoop - hadoop-hdds-common - - - - org.apache.hadoop - hadoop-hdds-container-service - - - - org.apache.hadoop - hadoop-hdds-client - - - - org.apache.hadoop - hadoop-hdds-server-framework - - - - org.apache.hadoop - hadoop-hdds-docs - - - - org.apache.hadoop - hadoop-hdds-container-service - test - test-jar - - - - org.hamcrest - hamcrest-core - 1.3 - test - - - io.dropwizard.metrics - metrics-core - - - org.assertj - assertj-core - test - - - org.openjdk.jmh - jmh-core - test - - - org.openjdk.jmh - jmh-generator-annprocess - test - - - org.mockito - mockito-all - test - - - org.hamcrest - hamcrest-all - 1.3 - - - org.bouncycastle - bcprov-jdk15on - - - com.github.spotbugs - spotbugs - provided - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - copy-common-html - prepare-package - - unpack - - - - - org.apache.hadoop - hadoop-hdds-server-framework - ${project.build.outputDirectory} - - webapps/static/**/*.* - - - org.apache.hadoop - hadoop-hdds-docs - ${project.build.outputDirectory}/webapps/scm - - docs/**/*.* - - - true - - - - - - - - ${basedir}/../../hadoop-hdds/common/src/main/resources - - - ${basedir}/src/test/resources - - - - diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java deleted file mode 100644 index 426341a32f4..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.safemode.Precheck; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * SCM utility class. - */ -public final class ScmUtils { - private static final Logger LOG = LoggerFactory - .getLogger(ScmUtils.class); - - private ScmUtils() { - } - - /** - * Perform all prechecks for given scm operation. - * - * @param operation - * @param preChecks prechecks to be performed - */ - public static void preCheck(ScmOps operation, Precheck... preChecks) - throws SCMException { - for (Precheck preCheck : preChecks) { - preCheck.check(operation); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java deleted file mode 100644 index 0bdbeb894bc..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.block; - -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; - -/** - * - * Block APIs. - * Container is transparent to these APIs. - */ -public interface BlockManager extends Closeable { - /** - * Allocates a new block for a given size. - * @param size - Block Size - * @param type Replication Type - * @param factor - Replication Factor - * @param excludeList List of datanodes/containers to exclude during block - * allocation. - * @return AllocatedBlock - * @throws IOException - */ - AllocatedBlock allocateBlock(long size, HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, String owner, - ExcludeList excludeList) throws IOException; - - /** - * Deletes a list of blocks in an atomic operation. Internally, SCM - * writes these blocks into a {@link DeletedBlockLog} and deletes them - * from SCM DB. If this is successful, given blocks are entering pending - * deletion state and becomes invisible from SCM namespace. - * - * @param blockIDs block IDs. This is often the list of blocks of - * a particular object key. - * @throws IOException if exception happens, non of the blocks is deleted. - */ - void deleteBlocks(List blockIDs) throws IOException; - - /** - * @return the block deletion transaction log maintained by SCM. - */ - DeletedBlockLog getDeletedBlockLog(); - - /** - * Start block manager background services. - * @throws IOException - */ - void start() throws IOException; - - /** - * Shutdown block manager background services. - * @throws IOException - */ - void stop() throws IOException; - - /** - * @return the block deleting service executed in SCM. - */ - SCMBlockDeletingService getSCMBlockDeletingService(); - - /** - * Set SafeMode status. - * - * @param safeModeStatus - */ - void setSafeModeStatus(boolean safeModeStatus); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java deleted file mode 100644 index 4c182c355b9..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ /dev/null @@ -1,362 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.TimeUnit; -import javax.management.ObjectName; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.ScmUtils; -import org.apache.hadoop.hdds.scm.safemode.SafeModePrecheck; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.hdds.utils.UniqueId; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .INVALID_BLOCK_SIZE; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; - - -/** Block Manager manages the block access for SCM. */ -public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean { - private static final Logger LOG = - LoggerFactory.getLogger(BlockManagerImpl.class); - // TODO : FIX ME : Hard coding the owner. - // Currently only user of the block service is Ozone, CBlock manages blocks - // by itself and does not rely on the Block service offered by SCM. - - private final PipelineManager pipelineManager; - private final ContainerManager containerManager; - - private final long containerSize; - - private final DeletedBlockLog deletedBlockLog; - private final SCMBlockDeletingService blockDeletingService; - - private ObjectName mxBean; - private SafeModePrecheck safeModePrecheck; - - /** - * Constructor. - * - * @param conf - configuration. - * @param scm - * @throws IOException - */ - public BlockManagerImpl(final Configuration conf, - final StorageContainerManager scm) { - Objects.requireNonNull(scm, "SCM cannot be null"); - this.pipelineManager = scm.getPipelineManager(); - this.containerManager = scm.getContainerManager(); - - this.containerSize = (long)conf.getStorageSize( - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, - StorageUnit.BYTES); - - mxBean = MBeans.register("BlockManager", "BlockManagerImpl", this); - - // SCM block deleting transaction log and deleting service. - deletedBlockLog = new DeletedBlockLogImpl(conf, scm.getContainerManager(), - scm.getScmMetadataStore()); - long svcInterval = - conf.getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, - OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - long serviceTimeout = - conf.getTimeDuration( - OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, - OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - blockDeletingService = - new SCMBlockDeletingService(deletedBlockLog, containerManager, - scm.getScmNodeManager(), scm.getEventQueue(), svcInterval, - serviceTimeout, conf); - safeModePrecheck = new SafeModePrecheck(conf); - } - - /** - * Start block manager services. - * - * @throws IOException - */ - public void start() throws IOException { - this.blockDeletingService.start(); - } - - /** - * Shutdown block manager services. - * - * @throws IOException - */ - public void stop() throws IOException { - this.blockDeletingService.shutdown(); - this.close(); - } - - /** - * Allocates a block in a container and returns that info. - * - * @param size - Block Size - * @param type Replication Type - * @param factor - Replication Factor - * @param excludeList List of datanodes/containers to exclude during block - * allocation. - * @return Allocated block - * @throws IOException on failure. - */ - @Override - public AllocatedBlock allocateBlock(final long size, ReplicationType type, - ReplicationFactor factor, String owner, ExcludeList excludeList) - throws IOException { - if (LOG.isTraceEnabled()) { - LOG.trace("Size;{} , type : {}, factor : {} ", size, type, factor); - } - ScmUtils.preCheck(ScmOps.allocateBlock, safeModePrecheck); - if (size < 0 || size > containerSize) { - LOG.warn("Invalid block size requested : {}", size); - throw new SCMException("Unsupported block size: " + size, - INVALID_BLOCK_SIZE); - } - - /* - Here is the high level logic. - - 1. We try to find pipelines in open state. - - 2. If there are no pipelines in OPEN state, then we try to create one. - - 3. We allocate a block from the available containers in the selected - pipeline. - - TODO : #CLUTIL Support random picking of two containers from the list. - So we can use different kind of policies. - */ - - ContainerInfo containerInfo; - - while (true) { - List availablePipelines = - pipelineManager - .getPipelines(type, factor, Pipeline.PipelineState.OPEN, - excludeList.getDatanodes(), excludeList.getPipelineIds()); - Pipeline pipeline = null; - if (availablePipelines.size() == 0) { - try { - // TODO: #CLUTIL Remove creation logic when all replication types and - // factors are handled by pipeline creator - pipeline = pipelineManager.createPipeline(type, factor); - } catch (IOException e) { - LOG.warn("Pipeline creation failed for type:{} factor:{}. Retrying " + - "get pipelines call once.", type, factor, e); - availablePipelines = pipelineManager - .getPipelines(type, factor, Pipeline.PipelineState.OPEN, - excludeList.getDatanodes(), excludeList.getPipelineIds()); - if (availablePipelines.size() == 0) { - LOG.info("Could not find available pipeline of type:{} and " + - "factor:{} even after retrying", type, factor); - break; - } - } - } - - if (null == pipeline) { - // TODO: #CLUTIL Make the selection policy driven. - pipeline = availablePipelines - .get((int) (Math.random() * availablePipelines.size())); - } - - // look for OPEN containers that match the criteria. - containerInfo = containerManager.getMatchingContainer(size, owner, - pipeline, excludeList.getContainerIds()); - - if (containerInfo != null) { - return newBlock(containerInfo); - } - } - - // we have tried all strategies we know and but somehow we are not able - // to get a container for this block. Log that info and return a null. - LOG.error( - "Unable to allocate a block for the size: {}, type: {}, factor: {}", - size, type, factor); - return null; - } - - /** - * newBlock - returns a new block assigned to a container. - * - * @param containerInfo - Container Info. - * @return AllocatedBlock - */ - private AllocatedBlock newBlock(ContainerInfo containerInfo) { - try { - final Pipeline pipeline = pipelineManager - .getPipeline(containerInfo.getPipelineID()); - // TODO : Revisit this local ID allocation when HA is added. - long localID = UniqueId.next(); - long containerID = containerInfo.getContainerID(); - AllocatedBlock.Builder abb = new AllocatedBlock.Builder() - .setContainerBlockID(new ContainerBlockID(containerID, localID)) - .setPipeline(pipeline); - if (LOG.isTraceEnabled()) { - LOG.trace("New block allocated : {} Container ID: {}", localID, - containerID); - } - pipelineManager.incNumBlocksAllocatedMetric(pipeline.getId()); - return abb.build(); - } catch (PipelineNotFoundException ex) { - LOG.error("Pipeline Machine count is zero.", ex); - return null; - } - } - - /** - * Deletes a list of blocks in an atomic operation. Internally, SCM writes - * these blocks into a - * {@link DeletedBlockLog} and deletes them from SCM DB. If this is - * successful, given blocks are - * entering pending deletion state and becomes invisible from SCM namespace. - * - * @param blockIDs block IDs. This is often the list of blocks of a - * particular object key. - * @throws IOException if exception happens, non of the blocks is deleted. - */ - @Override - public void deleteBlocks(List blockIDs) throws IOException { - ScmUtils.preCheck(ScmOps.deleteBlock, safeModePrecheck); - - LOG.info("Deleting blocks {}", StringUtils.join(",", blockIDs)); - Map> containerBlocks = new HashMap<>(); - // TODO: track the block size info so that we can reclaim the container - // TODO: used space when the block is deleted. - for (BlockID block : blockIDs) { - // Merge blocks to a container to blocks mapping, - // prepare to persist this info to the deletedBlocksLog. - long containerID = block.getContainerID(); - if (containerBlocks.containsKey(containerID)) { - containerBlocks.get(containerID).add(block.getLocalID()); - } else { - List item = new ArrayList<>(); - item.add(block.getLocalID()); - containerBlocks.put(containerID, item); - } - } - - try { - deletedBlockLog.addTransactions(containerBlocks); - } catch (IOException e) { - throw new IOException( - "Skip writing the deleted blocks info to" - + " the delLog because addTransaction fails. Batch skipped: " - + StringUtils.join(",", blockIDs), e); - } - // TODO: Container report handling of the deleted blocks: - // Remove tombstone and update open container usage. - // We will revisit this when the closed container replication is done. - } - - @Override - public DeletedBlockLog getDeletedBlockLog() { - return this.deletedBlockLog; - } - - /** - * Close the resources for BlockManager. - * - * @throws IOException - */ - @Override - public void close() throws IOException { - if (deletedBlockLog != null) { - deletedBlockLog.close(); - } - blockDeletingService.shutdown(); - if (mxBean != null) { - MBeans.unregister(mxBean); - mxBean = null; - } - } - - @Override - public int getOpenContainersNo() { - return 0; - // TODO : FIX ME : The open container being a single number does not make - // sense. - // We have to get open containers by Replication Type and Replication - // factor. Hence returning 0 for now. - // containers.get(HddsProtos.LifeCycleState.OPEN).size(); - } - - @Override - public SCMBlockDeletingService getSCMBlockDeletingService() { - return this.blockDeletingService; - } - - @Override - public void setSafeModeStatus(boolean safeModeStatus) { - this.safeModePrecheck.setInSafeMode(safeModeStatus); - } - - /** - * Returns status of scm safe mode determined by SAFE_MODE_STATUS event. - * */ - public boolean isScmInSafeMode() { - return this.safeModePrecheck.isInSafeMode(); - } - - /** - * Get class logger. - * */ - public static Logger getLogger() { - return LOG; - } - - /** - * This class uses system current time milliseconds to generate unique id. - */ -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockmanagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockmanagerMXBean.java deleted file mode 100644 index 23c6983083e..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockmanagerMXBean.java +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.block; - - -/** - * JMX interface for the block manager. - */ -public interface BlockmanagerMXBean { - - /** - * Number of open containers manager by the block manager. - */ - int getOpenContainersNo(); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java deleted file mode 100644 index ce65a70e168..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java +++ /dev/null @@ -1,146 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import com.google.common.collect.ArrayListMultimap; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Set; -import java.util.UUID; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.scm.container.ContainerReplica; - -/** - * A wrapper class to hold info about datanode and all deleted block - * transactions that will be sent to this datanode. - */ -public class DatanodeDeletedBlockTransactions { - private int nodeNum; - // The throttle size for each datanode. - private int maximumAllowedTXNum; - // Current counter of inserted TX. - private int currentTXNum; - private ContainerManager containerManager; - // A list of TXs mapped to a certain datanode ID. - private final ArrayListMultimap - transactions; - - DatanodeDeletedBlockTransactions(ContainerManager containerManager, - int maximumAllowedTXNum, int nodeNum) { - this.transactions = ArrayListMultimap.create(); - this.containerManager = containerManager; - this.maximumAllowedTXNum = maximumAllowedTXNum; - this.nodeNum = nodeNum; - } - - public boolean addTransaction(DeletedBlocksTransaction tx, - Set dnsWithTransactionCommitted) { - try { - boolean success = false; - final ContainerID id = ContainerID.valueof(tx.getContainerID()); - final ContainerInfo container = containerManager.getContainer(id); - final Set replicas = containerManager - .getContainerReplicas(id); - if (!container.isOpen()) { - for (ContainerReplica replica : replicas) { - UUID dnID = replica.getDatanodeDetails().getUuid(); - if (dnsWithTransactionCommitted == null || - !dnsWithTransactionCommitted.contains(dnID)) { - // Transaction need not be sent to dns which have - // already committed it - success = addTransactionToDN(dnID, tx); - } - } - } - return success; - } catch (IOException e) { - SCMBlockDeletingService.LOG.warn("Got container info error.", e); - return false; - } - } - - private boolean addTransactionToDN(UUID dnID, DeletedBlocksTransaction tx) { - if (transactions.containsKey(dnID)) { - List txs = transactions.get(dnID); - if (txs != null && txs.size() < maximumAllowedTXNum) { - boolean hasContained = false; - for (DeletedBlocksTransaction t : txs) { - if (t.getContainerID() == tx.getContainerID()) { - hasContained = true; - break; - } - } - - if (!hasContained) { - txs.add(tx); - currentTXNum++; - return true; - } - } - } else { - currentTXNum++; - transactions.put(dnID, tx); - return true; - } - SCMBlockDeletingService.LOG - .debug("Transaction added: {} <- TX({})", dnID, tx.getTxID()); - return false; - } - - Set getDatanodeIDs() { - return transactions.keySet(); - } - - boolean isEmpty() { - return transactions.isEmpty(); - } - - boolean hasTransactions(UUID dnId) { - return transactions.containsKey(dnId) && - !transactions.get(dnId).isEmpty(); - } - - List getDatanodeTransactions(UUID dnId) { - return transactions.get(dnId); - } - - List getTransactionIDList(UUID dnId) { - if (hasTransactions(dnId)) { - return transactions.get(dnId).stream() - .map(DeletedBlocksTransaction::getTxID).map(String::valueOf) - .collect(Collectors.toList()); - } else { - return Collections.emptyList(); - } - } - - boolean isFull() { - return currentTXNum >= maximumAllowedTXNum * nodeNum; - } - - int getTXNum() { - return currentTXNum; - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java deleted file mode 100644 index db6c1c5dda2..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto - .DeleteBlockTransactionResult; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -/** - * The DeletedBlockLog is a persisted log in SCM to keep tracking - * container blocks which are under deletion. It maintains info - * about under-deletion container blocks that notified by OM, - * and the state how it is processed. - */ -public interface DeletedBlockLog extends Closeable { - - /** - * Scan entire log once and returns TXs to DatanodeDeletedBlockTransactions. - * Once DatanodeDeletedBlockTransactions is full, the scan behavior will - * stop. - * @param transactions a list of TXs will be set into. - * @return Mapping from containerId to latest transactionId for the container. - * @throws IOException - */ - Map getTransactions(DatanodeDeletedBlockTransactions transactions) - throws IOException; - - /** - * Return all failed transactions in the log. A transaction is considered - * to be failed if it has been sent more than MAX_RETRY limit and its - * count is reset to -1. - * - * @return a list of failed deleted block transactions. - * @throws IOException - */ - List getFailedTransactions() - throws IOException; - - /** - * Increments count for given list of transactions by 1. - * The log maintains a valid range of counts for each transaction - * [0, MAX_RETRY]. If exceed this range, resets it to -1 to indicate - * the transaction is no longer valid. - * - * @param txIDs - transaction ID. - */ - void incrementCount(List txIDs) - throws IOException; - - /** - * Commits a transaction means to delete all footprints of a transaction - * from the log. This method doesn't guarantee all transactions can be - * successfully deleted, it tolerate failures and tries best efforts to. - * @param transactionResults - delete block transaction results. - * @param dnID - ID of datanode which acknowledges the delete block command. - */ - void commitTransactions(List transactionResults, - UUID dnID); - - /** - * Creates a block deletion transaction and adds that into the log. - * - * @param containerID - container ID. - * @param blocks - blocks that belong to the same container. - * - * @throws IOException - */ - void addTransaction(long containerID, List blocks) - throws IOException; - - /** - * Creates block deletion transactions for a set of containers, - * add into the log and persist them atomically. An object key - * might be stored in multiple containers and multiple blocks, - * this API ensures that these updates are done in atomic manner - * so if any of them fails, the entire operation fails without - * any updates to the log. Note, this doesn't mean to create only - * one transaction, it creates multiple transactions (depends on the - * number of containers) together (on success) or non (on failure). - * - * @param containerBlocksMap a map of containerBlocks. - * @throws IOException - */ - void addTransactions(Map> containerBlocksMap) - throws IOException; - - /** - * Returns the total number of valid transactions. A transaction is - * considered to be valid as long as its count is in range [0, MAX_RETRY]. - * - * @return number of a valid transactions. - * @throws IOException - */ - int getNumOfValidTransactions() throws IOException; -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java deleted file mode 100644 index 7c920ba6578..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ /dev/null @@ -1,366 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import com.google.common.collect.Lists; -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.stream.Collectors; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto - .DeleteBlockTransactionResult; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdds.scm.command - .CommandStatusReportHandler.DeleteBlockStatus; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.eclipse.jetty.util.ConcurrentHashSet; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static java.lang.Math.min; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_BLOCK_DELETION_MAX_RETRY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT; - -/** - * A implement class of {@link DeletedBlockLog}, and it uses - * K/V db to maintain block deletion transactions between scm and datanode. - * This is a very basic implementation, it simply scans the log and - * memorize the position that scanned by last time, and uses this to - * determine where the next scan starts. It has no notion about weight - * of each transaction so as long as transaction is still valid, they get - * equally same chance to be retrieved which only depends on the nature - * order of the transaction ID. - */ -public class DeletedBlockLogImpl - implements DeletedBlockLog, EventHandler { - - public static final Logger LOG = - LoggerFactory.getLogger(DeletedBlockLogImpl.class); - - private final int maxRetry; - private final ContainerManager containerManager; - private final SCMMetadataStore scmMetadataStore; - private final Lock lock; - // Maps txId to set of DNs which are successful in committing the transaction - private Map> transactionToDNsCommitMap; - - public DeletedBlockLogImpl(Configuration conf, - ContainerManager containerManager, - SCMMetadataStore scmMetadataStore) { - maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, - OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT); - this.containerManager = containerManager; - this.scmMetadataStore = scmMetadataStore; - this.lock = new ReentrantLock(); - - // transactionToDNsCommitMap is updated only when - // transaction is added to the log and when it is removed. - - // maps transaction to dns which have committed it. - transactionToDNsCommitMap = new ConcurrentHashMap<>(); - } - - - @Override - public List getFailedTransactions() - throws IOException { - lock.lock(); - try { - final List failedTXs = Lists.newArrayList(); - try (TableIterator> iter = - scmMetadataStore.getDeletedBlocksTXTable().iterator()) { - while (iter.hasNext()) { - DeletedBlocksTransaction delTX = iter.next().getValue(); - if (delTX.getCount() == -1) { - failedTXs.add(delTX); - } - } - } - return failedTXs; - } finally { - lock.unlock(); - } - } - - /** - * {@inheritDoc} - * - * @param txIDs - transaction ID. - * @throws IOException - */ - @Override - public void incrementCount(List txIDs) throws IOException { - for (Long txID : txIDs) { - lock.lock(); - try { - DeletedBlocksTransaction block = - scmMetadataStore.getDeletedBlocksTXTable().get(txID); - if (block == null) { - // Should we make this an error ? How can we not find the deleted - // TXID? - LOG.warn("Deleted TXID not found."); - continue; - } - DeletedBlocksTransaction.Builder builder = block.toBuilder(); - int currentCount = block.getCount(); - if (currentCount > -1) { - builder.setCount(++currentCount); - } - // if the retry time exceeds the maxRetry value - // then set the retry value to -1, stop retrying, admins can - // analyze those blocks and purge them manually by SCMCli. - if (currentCount > maxRetry) { - builder.setCount(-1); - } - scmMetadataStore.getDeletedBlocksTXTable().put(txID, - builder.build()); - } catch (IOException ex) { - LOG.warn("Cannot increase count for txID " + txID, ex); - // We do not throw error here, since we don't want to abort the loop. - // Just log and continue processing the rest of txids. - } finally { - lock.unlock(); - } - } - } - - - private DeletedBlocksTransaction constructNewTransaction(long txID, - long containerID, - List blocks) { - return DeletedBlocksTransaction.newBuilder() - .setTxID(txID) - .setContainerID(containerID) - .addAllLocalID(blocks) - .setCount(0) - .build(); - } - - /** - * {@inheritDoc} - * - * @param transactionResults - transaction IDs. - * @param dnID - Id of Datanode which has acknowledged - * a delete block command. - * @throws IOException - */ - @Override - public void commitTransactions( - List transactionResults, UUID dnID) { - lock.lock(); - try { - Set dnsWithCommittedTxn; - for (DeleteBlockTransactionResult transactionResult : - transactionResults) { - if (isTransactionFailed(transactionResult)) { - continue; - } - try { - long txID = transactionResult.getTxID(); - // set of dns which have successfully committed transaction txId. - dnsWithCommittedTxn = transactionToDNsCommitMap.get(txID); - final ContainerID containerId = ContainerID.valueof( - transactionResult.getContainerID()); - if (dnsWithCommittedTxn == null) { - LOG.warn("Transaction txId={} commit by dnId={} for containerID={} " - + "failed. Corresponding entry not found.", txID, dnID, - containerId); - return; - } - - dnsWithCommittedTxn.add(dnID); - final ContainerInfo container = - containerManager.getContainer(containerId); - final Set replicas = - containerManager.getContainerReplicas(containerId); - // The delete entry can be safely removed from the log if all the - // corresponding nodes commit the txn. It is required to check that - // the nodes returned in the pipeline match the replication factor. - if (min(replicas.size(), dnsWithCommittedTxn.size()) - >= container.getReplicationFactor().getNumber()) { - List containerDns = replicas.stream() - .map(ContainerReplica::getDatanodeDetails) - .map(DatanodeDetails::getUuid) - .collect(Collectors.toList()); - if (dnsWithCommittedTxn.containsAll(containerDns)) { - transactionToDNsCommitMap.remove(txID); - LOG.debug("Purging txId={} from block deletion log", txID); - scmMetadataStore.getDeletedBlocksTXTable().delete(txID); - } - } - LOG.debug("Datanode txId={} containerId={} committed by dnId={}", - txID, containerId, dnID); - } catch (IOException e) { - LOG.warn("Could not commit delete block transaction: " + - transactionResult.getTxID(), e); - } - } - } finally { - lock.unlock(); - } - } - - private boolean isTransactionFailed(DeleteBlockTransactionResult result) { - if (LOG.isDebugEnabled()) { - LOG.debug( - "Got block deletion ACK from datanode, TXIDs={}, " + "success={}", - result.getTxID(), result.getSuccess()); - } - if (!result.getSuccess()) { - LOG.warn("Got failed ACK for TXID={}, prepare to resend the " - + "TX in next interval", result.getTxID()); - return true; - } - return false; - } - - /** - * {@inheritDoc} - * - * @param containerID - container ID. - * @param blocks - blocks that belong to the same container. - * @throws IOException - */ - @Override - public void addTransaction(long containerID, List blocks) - throws IOException { - lock.lock(); - try { - Long nextTXID = scmMetadataStore.getNextDeleteBlockTXID(); - DeletedBlocksTransaction tx = - constructNewTransaction(nextTXID, containerID, blocks); - scmMetadataStore.getDeletedBlocksTXTable().put(nextTXID, tx); - } finally { - lock.unlock(); - } - } - - @Override - public int getNumOfValidTransactions() throws IOException { - lock.lock(); - try { - final AtomicInteger num = new AtomicInteger(0); - try (TableIterator> iter = - scmMetadataStore.getDeletedBlocksTXTable().iterator()) { - while (iter.hasNext()) { - DeletedBlocksTransaction delTX = iter.next().getValue(); - if (delTX.getCount() > -1) { - num.incrementAndGet(); - } - } - } - return num.get(); - } finally { - lock.unlock(); - } - } - - /** - * {@inheritDoc} - * - * @param containerBlocksMap a map of containerBlocks. - * @throws IOException - */ - @Override - public void addTransactions(Map> containerBlocksMap) - throws IOException { - lock.lock(); - try { - BatchOperation batch = scmMetadataStore.getStore().initBatchOperation(); - for (Map.Entry> entry : containerBlocksMap.entrySet()) { - long nextTXID = scmMetadataStore.getNextDeleteBlockTXID(); - DeletedBlocksTransaction tx = constructNewTransaction(nextTXID, - entry.getKey(), entry.getValue()); - scmMetadataStore.getDeletedBlocksTXTable().putWithBatch(batch, - nextTXID, tx); - } - scmMetadataStore.getStore().commitBatchOperation(batch); - } finally { - lock.unlock(); - } - } - - @Override - public void close() throws IOException { - } - - @Override - public Map getTransactions( - DatanodeDeletedBlockTransactions transactions) throws IOException { - lock.lock(); - try { - Map deleteTransactionMap = new HashMap<>(); - try (TableIterator> iter = - scmMetadataStore.getDeletedBlocksTXTable().iterator()) { - while (iter.hasNext()) { - Table.KeyValue keyValue = - iter.next(); - DeletedBlocksTransaction block = keyValue.getValue(); - if (block.getCount() > -1 && block.getCount() <= maxRetry) { - if (transactions.addTransaction(block, - transactionToDNsCommitMap.get(block.getTxID()))) { - deleteTransactionMap.put(block.getContainerID(), - block.getTxID()); - transactionToDNsCommitMap - .putIfAbsent(block.getTxID(), new ConcurrentHashSet<>()); - } - } - } - } - return deleteTransactionMap; - } finally { - lock.unlock(); - } - } - - @Override - public void onMessage(DeleteBlockStatus deleteBlockStatus, - EventPublisher publisher) { - ContainerBlocksDeletionACKProto ackProto = - deleteBlockStatus.getCmdStatus().getBlockDeletionAck(); - commitTransactions(ackProto.getResultsList(), - UUID.fromString(ackProto.getDnId())); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteHandler.java deleted file mode 100644 index 4090f6bb873..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteHandler.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; - -/** - * Event handler for PedingDeleteStatuList events. - */ -public class PendingDeleteHandler implements - EventHandler { - - private SCMBlockDeletingService scmBlockDeletingService; - - public PendingDeleteHandler( - SCMBlockDeletingService scmBlockDeletingService) { - this.scmBlockDeletingService = scmBlockDeletingService; - } - - @Override - public void onMessage(PendingDeleteStatusList pendingDeleteStatusList, - EventPublisher publisher) { - scmBlockDeletingService.handlePendingDeletes(pendingDeleteStatusList); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java deleted file mode 100644 index ee64c488cd3..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/PendingDeleteStatusList.java +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; - -import java.util.ArrayList; -import java.util.List; - -/** - * Pending Deletes in the block space. - */ -public class PendingDeleteStatusList { - - private List pendingDeleteStatuses; - private DatanodeDetails datanodeDetails; - - public PendingDeleteStatusList(DatanodeDetails datanodeDetails) { - this.datanodeDetails = datanodeDetails; - pendingDeleteStatuses = new ArrayList<>(); - } - - public void addPendingDeleteStatus(long dnDeleteTransactionId, - long scmDeleteTransactionId, long containerId) { - pendingDeleteStatuses.add( - new PendingDeleteStatus(dnDeleteTransactionId, scmDeleteTransactionId, - containerId)); - } - - /** - * Status of pending deletes. - */ - public static class PendingDeleteStatus { - private long dnDeleteTransactionId; - private long scmDeleteTransactionId; - private long containerId; - - public PendingDeleteStatus(long dnDeleteTransactionId, - long scmDeleteTransactionId, long containerId) { - this.dnDeleteTransactionId = dnDeleteTransactionId; - this.scmDeleteTransactionId = scmDeleteTransactionId; - this.containerId = containerId; - } - - public long getDnDeleteTransactionId() { - return dnDeleteTransactionId; - } - - public long getScmDeleteTransactionId() { - return scmDeleteTransactionId; - } - - public long getContainerId() { - return containerId; - } - - } - - public List getPendingDeleteStatuses() { - return pendingDeleteStatuses; - } - - public int getNumPendingDeletes() { - return pendingDeleteStatuses.size(); - } - - public DatanodeDetails getDatanodeDetails() { - return datanodeDetails; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java deleted file mode 100644 index 74db22d6652..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java +++ /dev/null @@ -1,199 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.BackgroundService; -import org.apache.hadoop.hdds.utils.BackgroundTask; -import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; -import org.apache.hadoop.hdds.utils.BackgroundTaskResult.EmptyTaskResult; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT; - -/** - * A background service running in SCM to delete blocks. This service scans - * block deletion log in certain interval and caches block deletion commands - * in {@link org.apache.hadoop.hdds.scm.node.CommandQueue}, asynchronously - * SCM HB thread polls cached commands and sends them to datanode for physical - * processing. - */ -public class SCMBlockDeletingService extends BackgroundService { - - public static final Logger LOG = - LoggerFactory.getLogger(SCMBlockDeletingService.class); - - // ThreadPoolSize=2, 1 for scheduler and the other for the scanner. - private final static int BLOCK_DELETING_SERVICE_CORE_POOL_SIZE = 2; - private final DeletedBlockLog deletedBlockLog; - private final ContainerManager containerManager; - private final NodeManager nodeManager; - private final EventPublisher eventPublisher; - - // Block delete limit size is dynamically calculated based on container - // delete limit size (ozone.block.deleting.container.limit.per.interval) - // that configured for datanode. To ensure DN not wait for - // delete commands, we use this value multiply by a factor 2 as the final - // limit TX size for each node. - // Currently we implement a throttle algorithm that throttling delete blocks - // for each datanode. Each node is limited by the calculation size. Firstly - // current node info is fetched from nodemanager, then scan entire delLog - // from the beginning to end. If one node reaches maximum value, its records - // will be skipped. If not, keep scanning until it reaches maximum value. - // Once all node are full, the scan behavior will stop. - private int blockDeleteLimitSize; - - public SCMBlockDeletingService(DeletedBlockLog deletedBlockLog, - ContainerManager containerManager, NodeManager nodeManager, - EventPublisher eventPublisher, long interval, long serviceTimeout, - Configuration conf) { - super("SCMBlockDeletingService", interval, TimeUnit.MILLISECONDS, - BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout); - this.deletedBlockLog = deletedBlockLog; - this.containerManager = containerManager; - this.nodeManager = nodeManager; - this.eventPublisher = eventPublisher; - - int containerLimit = conf.getInt( - OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, - OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT); - Preconditions.checkArgument(containerLimit > 0, - "Container limit size should be " + "positive."); - // Use container limit value multiply by a factor 2 to ensure DN - // not wait for orders. - this.blockDeleteLimitSize = containerLimit * 2; - } - - @Override - public BackgroundTaskQueue getTasks() { - BackgroundTaskQueue queue = new BackgroundTaskQueue(); - queue.add(new DeletedBlockTransactionScanner()); - return queue; - } - - public void handlePendingDeletes(PendingDeleteStatusList deletionStatusList) { - DatanodeDetails dnDetails = deletionStatusList.getDatanodeDetails(); - for (PendingDeleteStatusList.PendingDeleteStatus deletionStatus : - deletionStatusList.getPendingDeleteStatuses()) { - LOG.info( - "Block deletion txnID mismatch in datanode {} for containerID {}." - + " Datanode delete txnID: {}, SCM txnID: {}", - dnDetails.getUuid(), deletionStatus.getContainerId(), - deletionStatus.getDnDeleteTransactionId(), - deletionStatus.getScmDeleteTransactionId()); - } - } - - private class DeletedBlockTransactionScanner - implements BackgroundTask { - - @Override - public int getPriority() { - return 1; - } - - @Override - public EmptyTaskResult call() throws Exception { - int dnTxCount = 0; - long startTime = Time.monotonicNow(); - // Scan SCM DB in HB interval and collect a throttled list of - // to delete blocks. - LOG.debug("Running DeletedBlockTransactionScanner"); - DatanodeDeletedBlockTransactions transactions = null; - List datanodes = nodeManager.getNodes(NodeState.HEALTHY); - Map transactionMap = null; - if (datanodes != null) { - transactions = new DatanodeDeletedBlockTransactions(containerManager, - blockDeleteLimitSize, datanodes.size()); - try { - transactionMap = deletedBlockLog.getTransactions(transactions); - } catch (IOException e) { - // We may tolerant a number of failures for sometime - // but if it continues to fail, at some point we need to raise - // an exception and probably fail the SCM ? At present, it simply - // continues to retry the scanning. - LOG.error("Failed to get block deletion transactions from delTX log", - e); - } - LOG.debug("Scanned deleted blocks log and got {} delTX to process.", - transactions.getTXNum()); - } - - if (transactions != null && !transactions.isEmpty()) { - for (UUID dnId : transactions.getDatanodeIDs()) { - List dnTXs = transactions - .getDatanodeTransactions(dnId); - if (dnTXs != null && !dnTXs.isEmpty()) { - dnTxCount += dnTXs.size(); - // TODO commandQueue needs a cap. - // We should stop caching new commands if num of un-processed - // command is bigger than a limit, e.g 50. In case datanode goes - // offline for sometime, the cached commands be flooded. - eventPublisher.fireEvent(SCMEvents.RETRIABLE_DATANODE_COMMAND, - new CommandForDatanode<>(dnId, new DeleteBlocksCommand(dnTXs))); - if (LOG.isDebugEnabled()) { - LOG.debug( - "Added delete block command for datanode {} in the queue," + - " number of delete block transactions: {}, TxID list: {}", - dnId, dnTXs.size(), String.join(",", - transactions.getTransactionIDList(dnId))); - } - } - } - containerManager.updateDeleteTransactionId(transactionMap); - } - - if (dnTxCount > 0) { - LOG.info( - "Totally added {} delete blocks command for" - + " {} datanodes, task elapsed time: {}ms", - dnTxCount, transactions.getDatanodeIDs().size(), - Time.monotonicNow() - startTime); - } - - return EmptyTaskResult.newResult(); - } - } - - @VisibleForTesting - public void setBlockDeleteTXNum(int numTXs) { - blockDeleteLimitSize = numTXs; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/package-info.java deleted file mode 100644 index e1bfdff5063..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.block; -/** - * This package contains routines to manage the block location and - * mapping inside SCM - */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java deleted file mode 100644 index e9098650893..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/CommandStatusReportHandler.java +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.command; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatus; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .CommandStatusReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; - -/** - * Handles CommandStatusReports from datanode. - */ -public class CommandStatusReportHandler implements - EventHandler { - - private static final Logger LOGGER = LoggerFactory - .getLogger(CommandStatusReportHandler.class); - - @Override - public void onMessage(CommandStatusReportFromDatanode report, - EventPublisher publisher) { - Preconditions.checkNotNull(report); - List cmdStatusList = report.getReport().getCmdStatusList(); - Preconditions.checkNotNull(cmdStatusList); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Processing command status report for dn: {}", report - .getDatanodeDetails()); - } - - // Route command status to its watchers. - cmdStatusList.forEach(cmdStatus -> { - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Emitting command status for id:{} type: {}", cmdStatus - .getCmdId(), cmdStatus.getType()); - } - if (cmdStatus.getType() == SCMCommandProto.Type.deleteBlocksCommand) { - if (cmdStatus.getStatus() == CommandStatus.Status.EXECUTED) { - publisher.fireEvent(SCMEvents.DELETE_BLOCK_STATUS, - new DeleteBlockStatus(cmdStatus)); - } - } else { - LOGGER.debug("CommandStatus of type:{} not handled in " + - "CommandStatusReportHandler.", cmdStatus.getType()); - } - }); - } - - /** - * Wrapper event for CommandStatus. - */ - public static class CommandStatusEvent implements IdentifiableEventPayload { - private CommandStatus cmdStatus; - - CommandStatusEvent(CommandStatus cmdStatus) { - this.cmdStatus = cmdStatus; - } - - public CommandStatus getCmdStatus() { - return cmdStatus; - } - - @Override - public String toString() { - return "CommandStatusEvent:" + cmdStatus.toString(); - } - - @Override - public long getId() { - return cmdStatus.getCmdId(); - } - } - - /** - * Wrapper event for DeleteBlock Command. - */ - public static class DeleteBlockStatus extends CommandStatusEvent { - public DeleteBlockStatus(CommandStatus cmdStatus) { - super(cmdStatus); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java deleted file mode 100644 index ba17fb9eeaa..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/command/package-info.java +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - *

- * This package contains HDDS protocol related classes. - */ - -/** - * This package contains HDDS protocol related classes. - */ -package org.apache.hadoop.hdds.scm.command; -/* - * Classes related to commands issued from SCM to DataNode. - * */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java deleted file mode 100644 index 59be36b0d2b..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java +++ /dev/null @@ -1,246 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; -import org.slf4j.Logger; - -import java.io.IOException; -import java.util.UUID; -import java.util.function.Supplier; - -/** - * Base class for all the container report handlers. - */ -public class AbstractContainerReportHandler { - - private final ContainerManager containerManager; - private final Logger logger; - - /** - * Constructs AbstractContainerReportHandler instance with the - * given ContainerManager instance. - * - * @param containerManager ContainerManager - * @param logger Logger to be used for logging - */ - AbstractContainerReportHandler(final ContainerManager containerManager, - final Logger logger) { - Preconditions.checkNotNull(containerManager); - Preconditions.checkNotNull(logger); - this.containerManager = containerManager; - this.logger = logger; - } - - /** - * Process the given ContainerReplica received from specified datanode. - * - * @param datanodeDetails DatanodeDetails of the node which reported - * this replica - * @param replicaProto ContainerReplica - * - * @throws IOException In case of any Exception while processing the report - */ - void processContainerReplica(final DatanodeDetails datanodeDetails, - final ContainerReplicaProto replicaProto) - throws IOException { - final ContainerID containerId = ContainerID - .valueof(replicaProto.getContainerID()); - final ContainerReplica replica = ContainerReplica.newBuilder() - .setContainerID(containerId) - .setContainerState(replicaProto.getState()) - .setDatanodeDetails(datanodeDetails) - .setOriginNodeId(UUID.fromString(replicaProto.getOriginNodeId())) - .setSequenceId(replicaProto.getBlockCommitSequenceId()) - .build(); - - if (logger.isDebugEnabled()) { - logger.debug("Processing replica of container {} from datanode {}", - containerId, datanodeDetails); - } - // Synchronized block should be replaced by container lock, - // once we have introduced lock inside ContainerInfo. - synchronized (containerManager.getContainer(containerId)) { - updateContainerStats(containerId, replicaProto); - updateContainerState(datanodeDetails, containerId, replica); - containerManager.updateContainerReplica(containerId, replica); - } - } - - /** - * Update the container stats if it's lagging behind the stats in reported - * replica. - * - * @param containerId ID of the container - * @param replicaProto Container Replica information - * @throws ContainerNotFoundException If the container is not present - */ - private void updateContainerStats(final ContainerID containerId, - final ContainerReplicaProto replicaProto) - throws ContainerNotFoundException { - - if (!isUnhealthy(replicaProto::getState)) { - final ContainerInfo containerInfo = containerManager - .getContainer(containerId); - - if (containerInfo.getSequenceId() < - replicaProto.getBlockCommitSequenceId()) { - containerInfo.updateSequenceId( - replicaProto.getBlockCommitSequenceId()); - } - if (containerInfo.getUsedBytes() < replicaProto.getUsed()) { - containerInfo.setUsedBytes(replicaProto.getUsed()); - } - if (containerInfo.getNumberOfKeys() < replicaProto.getKeyCount()) { - containerInfo.setNumberOfKeys(replicaProto.getKeyCount()); - } - } - } - - /** - * Updates the container state based on the given replica state. - * - * @param datanode Datanode from which the report is received - * @param containerId ID of the container - * @param replica ContainerReplica - * @throws IOException In case of Exception - */ - private void updateContainerState(final DatanodeDetails datanode, - final ContainerID containerId, - final ContainerReplica replica) - throws IOException { - - final ContainerInfo container = containerManager - .getContainer(containerId); - - switch (container.getState()) { - case OPEN: - /* - * If the state of a container is OPEN, datanodes cannot report - * any other state. - */ - if (replica.getState() != State.OPEN) { - logger.warn("Container {} is in OPEN state, but the datanode {} " + - "reports an {} replica.", containerId, - datanode, replica.getState()); - // Should we take some action? - } - break; - case CLOSING: - /* - * When the container is in CLOSING state the replicas can be in any - * of the following states: - * - * - OPEN - * - CLOSING - * - QUASI_CLOSED - * - CLOSED - * - * If all the replica are either in OPEN or CLOSING state, do nothing. - * - * If the replica is in QUASI_CLOSED state, move the container to - * QUASI_CLOSED state. - * - * If the replica is in CLOSED state, mark the container as CLOSED. - * - */ - - if (replica.getState() == State.QUASI_CLOSED) { - logger.info("Moving container {} to QUASI_CLOSED state, datanode {} " + - "reported QUASI_CLOSED replica.", containerId, datanode); - containerManager.updateContainerState(containerId, - LifeCycleEvent.QUASI_CLOSE); - } - - if (replica.getState() == State.CLOSED) { - logger.info("Moving container {} to CLOSED state, datanode {} " + - "reported CLOSED replica.", containerId, datanode); - Preconditions.checkArgument(replica.getSequenceId() - == container.getSequenceId()); - containerManager.updateContainerState(containerId, - LifeCycleEvent.CLOSE); - } - - break; - case QUASI_CLOSED: - /* - * The container is in QUASI_CLOSED state, this means that at least - * one of the replica was QUASI_CLOSED. - * - * Now replicas can be in any of the following state. - * - * 1. OPEN - * 2. CLOSING - * 3. QUASI_CLOSED - * 4. CLOSED - * - * If at least one of the replica is in CLOSED state, mark the - * container as CLOSED. - * - */ - if (replica.getState() == State.CLOSED) { - logger.info("Moving container {} to CLOSED state, datanode {} " + - "reported CLOSED replica.", containerId, datanode); - Preconditions.checkArgument(replica.getSequenceId() - == container.getSequenceId()); - containerManager.updateContainerState(containerId, - LifeCycleEvent.FORCE_CLOSE); - } - break; - case CLOSED: - /* - * The container is already in closed state. do nothing. - */ - break; - case DELETING: - throw new UnsupportedOperationException( - "Unsupported container state 'DELETING'."); - case DELETED: - throw new UnsupportedOperationException( - "Unsupported container state 'DELETED'."); - default: - break; - } - } - - /** - * Returns true if the container replica is not marked as UNHEALTHY. - * - * @param replicaState State of the container replica. - * @return true if unhealthy, false otherwise - */ - private boolean isUnhealthy(final Supplier replicaState) { - return replicaState.get() == ContainerReplicaProto.State.UNHEALTHY; - } - - /** - * Return ContainerManager. - * @return {@link ContainerManager} - */ - protected ContainerManager getContainerManager() { - return containerManager; - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java deleted file mode 100644 index fd73711003b..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- *

http://www.apache.org/licenses/LICENSE-2.0 - *

- *

Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import java.io.IOException; -import java.util.List; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND; - -/** - * In case of a node failure, volume failure, volume out of spapce, node - * out of space etc, CLOSE_CONTAINER will be triggered. - * CloseContainerEventHandler is the handler for CLOSE_CONTAINER. - * When a close container event is fired, a close command for the container - * should be sent to all the datanodes in the pipeline and containerStateManager - * needs to update the container state to Closing. - */ -public class CloseContainerEventHandler implements EventHandler { - - public static final Logger LOG = - LoggerFactory.getLogger(CloseContainerEventHandler.class); - - private final PipelineManager pipelineManager; - private final ContainerManager containerManager; - - public CloseContainerEventHandler(final PipelineManager pipelineManager, - final ContainerManager containerManager) { - this.pipelineManager = pipelineManager; - this.containerManager = containerManager; - } - - @Override - public void onMessage(ContainerID containerID, EventPublisher publisher) { - LOG.info("Close container Event triggered for container : {}", containerID); - try { - // If the container is in OPEN state, FINALIZE it. - if (containerManager.getContainer(containerID).getState() - == LifeCycleState.OPEN) { - containerManager.updateContainerState( - containerID, LifeCycleEvent.FINALIZE); - } - - // ContainerInfo has to read again after the above state change. - final ContainerInfo container = containerManager - .getContainer(containerID); - // Send close command to datanodes, if the container is in CLOSING state - if (container.getState() == LifeCycleState.CLOSING) { - - final CloseContainerCommand closeContainerCommand = - new CloseContainerCommand( - containerID.getId(), container.getPipelineID()); - - getNodes(container).forEach(node -> publisher.fireEvent( - DATANODE_COMMAND, - new CommandForDatanode<>(node.getUuid(), closeContainerCommand))); - } else { - LOG.warn("Cannot close container {}, which is in {} state.", - containerID, container.getState()); - } - - } catch (IOException ex) { - LOG.error("Failed to close the container {}.", containerID, ex); - } - } - - /** - * Returns the list of Datanodes where this container lives. - * - * @param container ContainerInfo - * @return list of DatanodeDetails - * @throws ContainerNotFoundException - */ - private List getNodes(final ContainerInfo container) - throws ContainerNotFoundException { - try { - return pipelineManager.getPipeline(container.getPipelineID()).getNodes(); - } catch (PipelineNotFoundException ex) { - // Use container replica if the pipeline is not available. - return containerManager.getContainerReplicas(container.containerID()) - .stream() - .map(ContainerReplica::getDatanodeDetails) - .collect(Collectors.toList()); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java deleted file mode 100644 index e79f268974c..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .ContainerActionsFromDatanode; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handles container reports from datanode. - */ -public class ContainerActionsHandler implements - EventHandler { - - private static final Logger LOG = LoggerFactory.getLogger( - ContainerActionsHandler.class); - - @Override - public void onMessage( - ContainerActionsFromDatanode containerReportFromDatanode, - EventPublisher publisher) { - DatanodeDetails dd = containerReportFromDatanode.getDatanodeDetails(); - for (ContainerAction action : containerReportFromDatanode.getReport() - .getContainerActionsList()) { - ContainerID containerId = ContainerID.valueof(action.getContainerID()); - switch (action.getAction()) { - case CLOSE: - if (LOG.isDebugEnabled()) { - LOG.debug("Closing container {} in datanode {} because the" + - " container is {}.", containerId, dd, action.getReason()); - } - publisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerId); - break; - default: - LOG.warn("Invalid action {} with reason {}, from datanode {}. ", - action.getAction(), action.getReason(), dd); } - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java deleted file mode 100644 index f9488e222eb..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java +++ /dev/null @@ -1,190 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.Set; - -// TODO: Write extensive java doc. -// This is the main interface of ContainerManager. -/** - * ContainerManager class contains the mapping from a name to a pipeline - * mapping. This is used by SCM when allocating new locations and when - * looking up a key. - */ -public interface ContainerManager extends Closeable { - - - /** - * Returns all the container Ids managed by ContainerManager. - * - * @return Set of ContainerID - */ - Set getContainerIDs(); - - /** - * Returns all the containers managed by ContainerManager. - * - * @return List of ContainerInfo - */ - List getContainers(); - - /** - * Returns all the containers which are in the specified state. - * - * @return List of ContainerInfo - */ - List getContainers(HddsProtos.LifeCycleState state); - - /** - * Returns number of containers in the given, - * {@link org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState}. - * - * @return Number of containers - */ - Integer getContainerCountByState(HddsProtos.LifeCycleState state); - - /** - * Returns the ContainerInfo from the container ID. - * - * @param containerID - ID of container. - * @return - ContainerInfo such as creation state and the pipeline. - * @throws IOException - */ - ContainerInfo getContainer(ContainerID containerID) - throws ContainerNotFoundException; - - /** - * Returns containers under certain conditions. - * Search container IDs from start ID(exclusive), - * The max size of the searching range cannot exceed the - * value of count. - * - * @param startContainerID start containerID, >=0, - * start searching at the head if 0. - * @param count count must be >= 0 - * Usually the count will be replace with a very big - * value instead of being unlimited in case the db is very big. - * - * @return a list of container. - * @throws IOException - */ - List listContainer(ContainerID startContainerID, int count); - - /** - * Allocates a new container for a given keyName and replication factor. - * - * @param replicationFactor - replication factor of the container. - * @param owner - * @return - ContainerInfo. - * @throws IOException - */ - ContainerInfo allocateContainer(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor replicationFactor, String owner) - throws IOException; - - /** - * Deletes a container from SCM. - * - * @param containerID - Container ID - * @throws IOException - */ - void deleteContainer(ContainerID containerID) throws IOException; - - /** - * Update container state. - * @param containerID - Container ID - * @param event - container life cycle event - * @return - new container state - * @throws IOException - */ - HddsProtos.LifeCycleState updateContainerState(ContainerID containerID, - HddsProtos.LifeCycleEvent event) throws IOException; - - /** - * Returns the latest list of replicas for given containerId. - * - * @param containerID Container ID - * @return Set of ContainerReplica - */ - Set getContainerReplicas(ContainerID containerID) - throws ContainerNotFoundException; - - /** - * Adds a container Replica for the given Container. - * - * @param containerID Container ID - * @param replica ContainerReplica - */ - void updateContainerReplica(ContainerID containerID, ContainerReplica replica) - throws ContainerNotFoundException; - - /** - * Remove a container Replica form a given Container. - * - * @param containerID Container ID - * @param replica ContainerReplica - * @return True of dataNode is removed successfully else false. - */ - void removeContainerReplica(ContainerID containerID, ContainerReplica replica) - throws ContainerNotFoundException, ContainerReplicaNotFoundException; - - /** - * Update deleteTransactionId according to deleteTransactionMap. - * - * @param deleteTransactionMap Maps the containerId to latest delete - * transaction id for the container. - * @throws IOException - */ - void updateDeleteTransactionId(Map deleteTransactionMap) - throws IOException; - - /** - * Returns ContainerInfo which matches the requirements. - * @param size - the amount of space required in the container - * @param owner - the user which requires space in its owned container - * @param pipeline - pipeline to which the container should belong - * @return ContainerInfo for the matching container. - */ - ContainerInfo getMatchingContainer(long size, String owner, - Pipeline pipeline); - - /** - * Returns ContainerInfo which matches the requirements. - * @param size - the amount of space required in the container - * @param owner - the user which requires space in its owned container - * @param pipeline - pipeline to which the container should belong. - * @param excludedContainerIDS - containerIds to be excluded. - * @return ContainerInfo for the matching container. - */ - ContainerInfo getMatchingContainer(long size, String owner, - Pipeline pipeline, List excludedContainerIDS); - - /** - * Once after report processor handler completes, call this to notify - * container manager to increment metrics. - * @param isFullReport - * @param success - */ - void notifyContainerReportProcessing(boolean isFullReport, boolean success); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java deleted file mode 100644 index 8bfcb848ecb..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplica.java +++ /dev/null @@ -1,231 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.builder.CompareToBuilder; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; - -import java.util.Optional; -import java.util.UUID; - -/** - * In-memory state of a container replica. - */ -public final class ContainerReplica implements Comparable { - - final private ContainerID containerID; - final private ContainerReplicaProto.State state; - final private DatanodeDetails datanodeDetails; - final private UUID placeOfBirth; - - private Long sequenceId; - - - private ContainerReplica(final ContainerID containerID, - final ContainerReplicaProto.State state, final DatanodeDetails datanode, - final UUID originNodeId) { - this.containerID = containerID; - this.state = state; - this.datanodeDetails = datanode; - this.placeOfBirth = originNodeId; - } - - private void setSequenceId(Long seqId) { - sequenceId = seqId; - } - - /** - * Returns the DatanodeDetails to which this replica belongs. - * - * @return DatanodeDetails - */ - public DatanodeDetails getDatanodeDetails() { - return datanodeDetails; - } - - /** - * Returns the UUID of Datanode where this replica originated. - * - * @return UUID - */ - public UUID getOriginDatanodeId() { - return placeOfBirth; - } - - /** - * Returns the state of this replica. - * - * @return replica state - */ - public ContainerReplicaProto.State getState() { - return state; - } - - /** - * Returns the Sequence Id of this replica. - * - * @return Sequence Id - */ - public Long getSequenceId() { - return sequenceId; - } - - @Override - public int hashCode() { - return new HashCodeBuilder(61, 71) - .append(containerID) - .append(datanodeDetails) - .toHashCode(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - final ContainerReplica that = (ContainerReplica) o; - - return new EqualsBuilder() - .append(containerID, that.containerID) - .append(datanodeDetails, that.datanodeDetails) - .isEquals(); - } - - @Override - public int compareTo(ContainerReplica that) { - Preconditions.checkNotNull(that); - return new CompareToBuilder() - .append(this.containerID, that.containerID) - .append(this.datanodeDetails, that.datanodeDetails) - .build(); - } - - /** - * Returns a new Builder to construct ContainerReplica. - * - * @return ContainerReplicaBuilder - */ - public static ContainerReplicaBuilder newBuilder() { - return new ContainerReplicaBuilder(); - } - - @Override - public String toString() { - return "ContainerReplica{" + - "containerID=" + containerID + - ", datanodeDetails=" + datanodeDetails + - ", placeOfBirth=" + placeOfBirth + - ", sequenceId=" + sequenceId + - '}'; - } - - /** - * Used for building ContainerReplica instance. - */ - public static class ContainerReplicaBuilder { - - private ContainerID containerID; - private ContainerReplicaProto.State state; - private DatanodeDetails datanode; - private UUID placeOfBirth; - private Long sequenceId; - - /** - * Set Container Id. - * - * @param cID ContainerID - * @return ContainerReplicaBuilder - */ - public ContainerReplicaBuilder setContainerID( - final ContainerID cID) { - this.containerID = cID; - return this; - } - - public ContainerReplicaBuilder setContainerState( - final ContainerReplicaProto.State containerState) { - state = containerState; - return this; - } - - /** - * Set DatanodeDetails. - * - * @param datanodeDetails DatanodeDetails - * @return ContainerReplicaBuilder - */ - public ContainerReplicaBuilder setDatanodeDetails( - DatanodeDetails datanodeDetails) { - datanode = datanodeDetails; - return this; - } - - /** - * Set replica origin node id. - * - * @param originNodeId origin node UUID - * @return ContainerReplicaBuilder - */ - public ContainerReplicaBuilder setOriginNodeId(UUID originNodeId) { - placeOfBirth = originNodeId; - return this; - } - - /** - * Set sequence Id of the replica. - * - * @param seqId container sequence Id - * @return ContainerReplicaBuilder - */ - public ContainerReplicaBuilder setSequenceId(long seqId) { - sequenceId = seqId; - return this; - } - - /** - * Constructs new ContainerReplicaBuilder. - * - * @return ContainerReplicaBuilder - */ - public ContainerReplica build() { - Preconditions.checkNotNull(containerID, - "Container Id can't be null"); - Preconditions.checkNotNull(state, - "Container state can't be null"); - Preconditions.checkNotNull(datanode, - "DatanodeDetails can't be null"); - ContainerReplica replica = new ContainerReplica( - containerID, state, datanode, - Optional.ofNullable(placeOfBirth).orElse(datanode.getUuid())); - Optional.ofNullable(sequenceId).ifPresent(replica::setSequenceId); - return replica; - } - } - - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java deleted file mode 100644 index 2227df61563..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java +++ /dev/null @@ -1,201 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.block.PendingDeleteStatusList; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .ContainerReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; - -/** - * Handles container reports from datanode. - */ -public class ContainerReportHandler extends AbstractContainerReportHandler - implements EventHandler { - - private static final Logger LOG = - LoggerFactory.getLogger(ContainerReportHandler.class); - - private final NodeManager nodeManager; - private final ContainerManager containerManager; - - /** - * Constructs ContainerReportHandler instance with the - * given NodeManager and ContainerManager instance. - * - * @param nodeManager NodeManager instance - * @param containerManager ContainerManager instance - */ - public ContainerReportHandler(final NodeManager nodeManager, - final ContainerManager containerManager) { - super(containerManager, LOG); - this.nodeManager = nodeManager; - this.containerManager = containerManager; - } - - /** - * Process the container reports from datanodes. - * - * @param reportFromDatanode Container Report - * @param publisher EventPublisher reference - */ - @Override - public void onMessage(final ContainerReportFromDatanode reportFromDatanode, - final EventPublisher publisher) { - - final DatanodeDetails datanodeDetails = - reportFromDatanode.getDatanodeDetails(); - final ContainerReportsProto containerReport = - reportFromDatanode.getReport(); - - try { - final List replicas = - containerReport.getReportsList(); - final Set containersInSCM = - nodeManager.getContainers(datanodeDetails); - - final Set containersInDn = replicas.parallelStream() - .map(ContainerReplicaProto::getContainerID) - .map(ContainerID::valueof).collect(Collectors.toSet()); - - final Set missingReplicas = new HashSet<>(containersInSCM); - missingReplicas.removeAll(containersInDn); - - processContainerReplicas(datanodeDetails, replicas); - processMissingReplicas(datanodeDetails, missingReplicas); - updateDeleteTransaction(datanodeDetails, replicas, publisher); - - /* - * Update the latest set of containers for this datanode in - * NodeManager - */ - nodeManager.setContainers(datanodeDetails, containersInDn); - - containerManager.notifyContainerReportProcessing(true, true); - } catch (NodeNotFoundException ex) { - containerManager.notifyContainerReportProcessing(true, false); - LOG.error("Received container report from unknown datanode {} {}", - datanodeDetails, ex); - } - - } - - /** - * Processes the ContainerReport. - * - * @param datanodeDetails Datanode from which this report was received - * @param replicas list of ContainerReplicaProto - */ - private void processContainerReplicas(final DatanodeDetails datanodeDetails, - final List replicas) { - for (ContainerReplicaProto replicaProto : replicas) { - try { - processContainerReplica(datanodeDetails, replicaProto); - } catch (ContainerNotFoundException e) { - LOG.error("Received container report for an unknown container" + - " {} from datanode {}.", replicaProto.getContainerID(), - datanodeDetails, e); - } catch (IOException e) { - LOG.error("Exception while processing container report for container" + - " {} from datanode {}.", replicaProto.getContainerID(), - datanodeDetails, e); - } - } - } - - /** - * Process the missing replica on the given datanode. - * - * @param datanodeDetails DatanodeDetails - * @param missingReplicas ContainerID which are missing on the given datanode - */ - private void processMissingReplicas(final DatanodeDetails datanodeDetails, - final Set missingReplicas) { - for (ContainerID id : missingReplicas) { - try { - containerManager.getContainerReplicas(id).stream() - .filter(replica -> replica.getDatanodeDetails() - .equals(datanodeDetails)).findFirst() - .ifPresent(replica -> { - try { - containerManager.removeContainerReplica(id, replica); - } catch (ContainerNotFoundException | - ContainerReplicaNotFoundException ignored) { - // This should not happen, but even if it happens, not an issue - } - }); - } catch (ContainerNotFoundException e) { - LOG.warn("Cannot remove container replica, container {} not found.", - id, e); - } - } - } - - /** - * Updates the Delete Transaction Id for the given datanode. - * - * @param datanodeDetails DatanodeDetails - * @param replicas List of ContainerReplicaProto - * @param publisher EventPublisher reference - */ - private void updateDeleteTransaction(final DatanodeDetails datanodeDetails, - final List replicas, - final EventPublisher publisher) { - final PendingDeleteStatusList pendingDeleteStatusList = - new PendingDeleteStatusList(datanodeDetails); - for (ContainerReplicaProto replica : replicas) { - try { - final ContainerInfo containerInfo = containerManager.getContainer( - ContainerID.valueof(replica.getContainerID())); - if (containerInfo.getDeleteTransactionId() > - replica.getDeleteTransactionId()) { - pendingDeleteStatusList.addPendingDeleteStatus( - replica.getDeleteTransactionId(), - containerInfo.getDeleteTransactionId(), - containerInfo.getContainerID()); - } - } catch (ContainerNotFoundException cnfe) { - LOG.warn("Cannot update pending delete transaction for " + - "container #{}. Reason: container missing.", - replica.getContainerID()); - } - } - if (pendingDeleteStatusList.getNumPendingDeletes() > 0) { - publisher.fireEvent(SCMEvents.PENDING_DELETE_STATUS, - pendingDeleteStatusList); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java deleted file mode 100644 index 7dde8d75f94..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ /dev/null @@ -1,535 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_CHANGE_CONTAINER_STATE; - -import java.io.IOException; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.NavigableSet; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicLong; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.states.ContainerState; -import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.ozone.common.statemachine - .InvalidStateTransitionException; -import org.apache.hadoop.ozone.common.statemachine.StateMachine; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.AtomicLongMap; - -/** - * A container state manager keeps track of container states and returns - * containers that match various queries. - *

- * This state machine is driven by a combination of server and client actions. - *

- * This is how a create container happens: 1. When a container is created, the - * Server(or SCM) marks that Container as ALLOCATED state. In this state, SCM - * has chosen a pipeline for container to live on. However, the container is not - * created yet. This container along with the pipeline is returned to the - * client. - *

- * 2. The client when it sees the Container state as ALLOCATED understands that - * container needs to be created on the specified pipeline. The client lets the - * SCM know that saw this flag and is initiating the on the data nodes. - *

- * This is done by calling into notifyObjectCreation(ContainerName, - * BEGIN_CREATE) flag. When SCM gets this call, SCM puts the container state - * into CREATING. All this state means is that SCM told Client to create a - * container and client saw that request. - *

- * 3. Then client makes calls to datanodes directly, asking the datanodes to - * create the container. This is done with the help of pipeline that supports - * this container. - *

- * 4. Once the creation of the container is complete, the client will make - * another call to the SCM, this time specifying the containerName and the - * COMPLETE_CREATE as the Event. - *

- * 5. With COMPLETE_CREATE event, the container moves to an Open State. This is - * the state when clients can write to a container. - *

- * 6. If the client does not respond with the COMPLETE_CREATE event with a - * certain time, the state machine times out and triggers a delete operation of - * the container. - *

- * Please see the function initializeStateMachine below to see how this looks in - * code. - *

- * Reusing existing container : - *

- * The create container call is not made all the time, the system tries to use - * open containers as much as possible. So in those cases, it looks thru the - * list of open containers and will return containers that match the specific - * signature. - *

- * Please note : Logically there are 3 separate state machines in the case of - * containers. - *

- * The Create State Machine -- Commented extensively above. - *

- * Open/Close State Machine - Once the container is in the Open State, - * eventually it will be closed, once sufficient data has been written to it. - *

- * TimeOut Delete Container State Machine - if the container creating times out, - * then Container State manager decides to delete the container. - */ -public class ContainerStateManager { - private static final Logger LOG = - LoggerFactory.getLogger(ContainerStateManager.class); - - private final StateMachine stateMachine; - - private final long containerSize; - private final ConcurrentHashMap lastUsedMap; - private final ContainerStateMap containers; - private final AtomicLong containerCount; - private final AtomicLongMap containerStateCount = - AtomicLongMap.create(); - - /** - * Constructs a Container State Manager that tracks all containers owned by - * SCM for the purpose of allocation of blocks. - *

- * TODO : Add Container Tags so we know which containers are owned by SCM. - */ - @SuppressWarnings("unchecked") - public ContainerStateManager(final Configuration configuration) { - - // Initialize the container state machine. - final Set finalStates = new HashSet(); - - // These are the steady states of a container. - finalStates.add(LifeCycleState.OPEN); - finalStates.add(LifeCycleState.CLOSED); - finalStates.add(LifeCycleState.DELETED); - - this.stateMachine = new StateMachine<>(LifeCycleState.OPEN, - finalStates); - initializeStateMachine(); - - this.containerSize = (long) configuration.getStorageSize( - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, - ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, - StorageUnit.BYTES); - - this.lastUsedMap = new ConcurrentHashMap<>(); - this.containerCount = new AtomicLong(0); - this.containers = new ContainerStateMap(); - } - - /* - * - * Event and State Transition Mapping: - * - * State: OPEN ----------------> CLOSING - * Event: FINALIZE - * - * State: CLOSING ----------------> QUASI_CLOSED - * Event: QUASI_CLOSE - * - * State: CLOSING ----------------> CLOSED - * Event: CLOSE - * - * State: QUASI_CLOSED ----------------> CLOSED - * Event: FORCE_CLOSE - * - * State: CLOSED ----------------> DELETING - * Event: DELETE - * - * State: DELETING ----------------> DELETED - * Event: CLEANUP - * - * - * Container State Flow: - * - * [OPEN]--------------->[CLOSING]--------------->[QUASI_CLOSED] - * (FINALIZE) | (QUASI_CLOSE) | - * | | - * | | - * (CLOSE) | (FORCE_CLOSE) | - * | | - * | | - * +--------->[CLOSED]<--------+ - * | - * (DELETE)| - * | - * | - * [DELETING] - * | - * (CLEANUP) | - * | - * V - * [DELETED] - * - */ - private void initializeStateMachine() { - stateMachine.addTransition(LifeCycleState.OPEN, - LifeCycleState.CLOSING, - LifeCycleEvent.FINALIZE); - - stateMachine.addTransition(LifeCycleState.CLOSING, - LifeCycleState.QUASI_CLOSED, - LifeCycleEvent.QUASI_CLOSE); - - stateMachine.addTransition(LifeCycleState.CLOSING, - LifeCycleState.CLOSED, - LifeCycleEvent.CLOSE); - - stateMachine.addTransition(LifeCycleState.QUASI_CLOSED, - LifeCycleState.CLOSED, - LifeCycleEvent.FORCE_CLOSE); - - stateMachine.addTransition(LifeCycleState.CLOSED, - LifeCycleState.DELETING, - LifeCycleEvent.DELETE); - - stateMachine.addTransition(LifeCycleState.DELETING, - LifeCycleState.DELETED, - LifeCycleEvent.CLEANUP); - } - - - void loadContainer(final ContainerInfo containerInfo) throws SCMException { - containers.addContainer(containerInfo); - containerCount.set(Long.max( - containerInfo.getContainerID(), containerCount.get())); - containerStateCount.incrementAndGet(containerInfo.getState()); - } - - /** - * Allocates a new container based on the type, replication etc. - * - * @param pipelineManager -- Pipeline Manager class. - * @param type -- Replication type. - * @param replicationFactor - Replication replicationFactor. - * @return ContainerWithPipeline - * @throws IOException on Failure. - */ - ContainerInfo allocateContainer(final PipelineManager pipelineManager, - final HddsProtos.ReplicationType type, - final HddsProtos.ReplicationFactor replicationFactor, final String owner) - throws IOException { - - Pipeline pipeline; - try { - // TODO: #CLUTIL remove creation logic when all replication types and - // factors are handled by pipeline creator job. - pipeline = pipelineManager.createPipeline(type, replicationFactor); - } catch (IOException e) { - final List pipelines = pipelineManager - .getPipelines(type, replicationFactor, Pipeline.PipelineState.OPEN); - if (pipelines.isEmpty()) { - throw new IOException("Could not allocate container. Cannot get any" + - " matching pipeline for Type:" + type + - ", Factor:" + replicationFactor + ", State:PipelineState.OPEN"); - } - pipeline = pipelines.get((int) containerCount.get() % pipelines.size()); - } - synchronized (pipeline) { - return allocateContainer(pipelineManager, owner, pipeline); - } - } - - /** - * Allocates a new container based on the type, replication etc. - * This method should be called only after the lock on the pipeline is held - * on which the container will be allocated. - * - * @param pipelineManager - Pipeline Manager class. - * @param owner - Owner of the container. - * @param pipeline - Pipeline to which the container needs to be - * allocated. - * @return ContainerWithPipeline - * @throws IOException on Failure. - */ - ContainerInfo allocateContainer( - final PipelineManager pipelineManager, final String owner, - Pipeline pipeline) throws IOException { - Preconditions.checkNotNull(pipeline, - "Pipeline couldn't be found for the new container. " - + "Do you have enough nodes?"); - - final long containerID = containerCount.incrementAndGet(); - final ContainerInfo containerInfo = new ContainerInfo.Builder() - .setState(LifeCycleState.OPEN) - .setPipelineID(pipeline.getId()) - .setUsedBytes(0) - .setNumberOfKeys(0) - .setStateEnterTime(Time.monotonicNow()) - .setOwner(owner) - .setContainerID(containerID) - .setDeleteTransactionId(0) - .setReplicationFactor(pipeline.getFactor()) - .setReplicationType(pipeline.getType()) - .build(); - Preconditions.checkNotNull(containerInfo); - containers.addContainer(containerInfo); - pipelineManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(containerID)); - containerStateCount.incrementAndGet(containerInfo.getState()); - if (LOG.isTraceEnabled()) { - LOG.trace("New container allocated: {}", containerInfo); - } - return containerInfo; - } - - /** - * Update the Container State to the next state. - * - * @param containerID - ContainerID - * @param event - LifeCycle Event - * @throws SCMException on Failure. - */ - void updateContainerState(final ContainerID containerID, - final HddsProtos.LifeCycleEvent event) - throws SCMException, ContainerNotFoundException { - final ContainerInfo info = containers.getContainerInfo(containerID); - try { - final LifeCycleState oldState = info.getState(); - final LifeCycleState newState = stateMachine.getNextState( - info.getState(), event); - containers.updateState(containerID, info.getState(), newState); - containerStateCount.incrementAndGet(newState); - containerStateCount.decrementAndGet(oldState); - } catch (InvalidStateTransitionException ex) { - String error = String.format("Failed to update container state %s, " + - "reason: invalid state transition from state: %s upon " + - "event: %s.", - containerID, info.getState(), event); - LOG.error(error); - throw new SCMException(error, FAILED_TO_CHANGE_CONTAINER_STATE); - } - } - - /** - * Update deleteTransactionId for a container. - * - * @param deleteTransactionMap maps containerId to its new - * deleteTransactionID - */ - void updateDeleteTransactionId( - final Map deleteTransactionMap) { - deleteTransactionMap.forEach((k, v) -> { - try { - containers.getContainerInfo(ContainerID.valueof(k)) - .updateDeleteTransactionId(v); - } catch (ContainerNotFoundException e) { - LOG.warn("Exception while updating delete transaction id.", e); - } - }); - } - - - /** - * Return a container matching the attributes specified. - * - * @param size - Space needed in the Container. - * @param owner - Owner of the container - A specific nameservice. - * @param pipelineID - ID of the pipeline - * @param containerIDs - Set of containerIDs to choose from - * @return ContainerInfo, null if there is no match found. - */ - ContainerInfo getMatchingContainer(final long size, String owner, - PipelineID pipelineID, NavigableSet containerIDs) { - if (containerIDs.isEmpty()) { - return null; - } - - // Get the last used container and find container above the last used - // container ID. - final ContainerState key = new ContainerState(owner, pipelineID); - final ContainerID lastID = - lastUsedMap.getOrDefault(key, containerIDs.first()); - - // There is a small issue here. The first time, we will skip the first - // container. But in most cases it will not matter. - NavigableSet resultSet = containerIDs.tailSet(lastID, false); - if (resultSet.size() == 0) { - resultSet = containerIDs; - } - - ContainerInfo selectedContainer = - findContainerWithSpace(size, resultSet, owner, pipelineID); - if (selectedContainer == null) { - - // If we did not find any space in the tailSet, we need to look for - // space in the headset, we need to pass true to deal with the - // situation that we have a lone container that has space. That is we - // ignored the last used container under the assumption we can find - // other containers with space, but if have a single container that is - // not true. Hence we need to include the last used container as the - // last element in the sorted set. - - resultSet = containerIDs.headSet(lastID, true); - selectedContainer = - findContainerWithSpace(size, resultSet, owner, pipelineID); - } - - return selectedContainer; - } - - private ContainerInfo findContainerWithSpace(final long size, - final NavigableSet searchSet, final String owner, - final PipelineID pipelineID) { - try { - // Get the container with space to meet our request. - for (ContainerID id : searchSet) { - final ContainerInfo containerInfo = containers.getContainerInfo(id); - if (containerInfo.getUsedBytes() + size <= this.containerSize) { - containerInfo.updateLastUsedTime(); - return containerInfo; - } - } - } catch (ContainerNotFoundException e) { - // This should not happen! - LOG.warn("Exception while finding container with space", e); - } - return null; - } - - Set getAllContainerIDs() { - return containers.getAllContainerIDs(); - } - - /** - * Returns Containers by State. - * - * @param state - State - Open, Closed etc. - * @return List of containers by state. - */ - Set getContainerIDsByState(final LifeCycleState state) { - return containers.getContainerIDsByState(state); - } - - /** - * Get count of containers in the current {@link LifeCycleState}. - * - * @param state {@link LifeCycleState} - * @return Count of containers - */ - Integer getContainerCountByState(final LifeCycleState state) { - return Long.valueOf(containerStateCount.get(state)).intValue(); - } - - /** - * Returns a set of ContainerIDs that match the Container. - * - * @param owner Owner of the Containers. - * @param type - Replication Type of the containers - * @param factor - Replication factor of the containers. - * @param state - Current State, like Open, Close etc. - * @return Set of containers that match the specific query parameters. - */ - NavigableSet getMatchingContainerIDs(final String owner, - final ReplicationType type, final ReplicationFactor factor, - final LifeCycleState state) { - return containers.getMatchingContainerIDs(state, owner, - factor, type); - } - - /** - * Returns the containerInfo for the given container id. - * @param containerID id of the container - * @return ContainerInfo containerInfo - * @throws IOException - */ - ContainerInfo getContainer(final ContainerID containerID) - throws ContainerNotFoundException { - return containers.getContainerInfo(containerID); - } - - void close() throws IOException { - } - - /** - * Returns the latest list of DataNodes where replica for given containerId - * exist. Throws an SCMException if no entry is found for given containerId. - * - * @param containerID - * @return Set - */ - Set getContainerReplicas( - final ContainerID containerID) throws ContainerNotFoundException { - return containers.getContainerReplicas(containerID); - } - - /** - * Add a container Replica for given DataNode. - * - * @param containerID - * @param replica - */ - void updateContainerReplica(final ContainerID containerID, - final ContainerReplica replica) throws ContainerNotFoundException { - containers.updateContainerReplica(containerID, replica); - } - - /** - * Remove a container Replica for given DataNode. - * - * @param containerID - * @param replica - * @return True of dataNode is removed successfully else false. - */ - void removeContainerReplica(final ContainerID containerID, - final ContainerReplica replica) - throws ContainerNotFoundException, ContainerReplicaNotFoundException { - containers.removeContainerReplica(containerID, replica); - } - - void removeContainer(final ContainerID containerID) - throws ContainerNotFoundException { - containers.removeContainer(containerID); - } - - /** - * Update the lastUsedmap to update with ContainerState and containerID. - * @param pipelineID - * @param containerID - * @param owner - */ - public synchronized void updateLastUsedMap(PipelineID pipelineID, - ContainerID containerID, String owner) { - lastUsedMap.put(new ContainerState(owner, pipelineID), - containerID); - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java deleted file mode 100644 index b58100066a3..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import java.io.IOException; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos - .ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .IncrementalContainerReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handles incremental container reports from datanode. - */ -public class IncrementalContainerReportHandler extends - AbstractContainerReportHandler - implements EventHandler { - - private static final Logger LOG = LoggerFactory.getLogger( - IncrementalContainerReportHandler.class); - - private final NodeManager nodeManager; - - public IncrementalContainerReportHandler( - final NodeManager nodeManager, - final ContainerManager containerManager) { - super(containerManager, LOG); - this.nodeManager = nodeManager; - } - - @Override - public void onMessage(final IncrementalContainerReportFromDatanode report, - final EventPublisher publisher) { - if (LOG.isDebugEnabled()) { - LOG.debug("Processing incremental container report from data node {}", - report.getDatanodeDetails().getUuid()); - } - - boolean success = true; - for (ContainerReplicaProto replicaProto : - report.getReport().getReportList()) { - try { - final DatanodeDetails dd = report.getDatanodeDetails(); - final ContainerID id = ContainerID.valueof( - replicaProto.getContainerID()); - nodeManager.addContainer(dd, id); - processContainerReplica(dd, replicaProto); - } catch (ContainerNotFoundException e) { - success = false; - LOG.warn("Container {} not found!", replicaProto.getContainerID()); - } catch (NodeNotFoundException ex) { - success = false; - LOG.error("Received ICR from unknown datanode {} {}", - report.getDatanodeDetails(), ex); - } catch (IOException e) { - success = false; - LOG.error("Exception while processing ICR for container {}", - replicaProto.getContainerID()); - } - } - - if (success) { - getContainerManager().notifyContainerReportProcessing(false, true); - } else { - getContainerManager().notifyContainerReportProcessing(false, false); - } - - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java deleted file mode 100644 index 5540d737cb9..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java +++ /dev/null @@ -1,875 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.StringJoiner; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Consumer; -import java.util.function.Predicate; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.conf.Config; -import org.apache.hadoop.hdds.conf.ConfigGroup; -import org.apache.hadoop.hdds.conf.ConfigType; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.ozone.lock.LockManager; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.util.ExitUtil; -import org.apache.hadoop.util.Time; - -import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.GeneratedMessage; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE; -import static org.apache.hadoop.hdds.conf.ConfigTag.SCM; -import org.apache.ratis.util.Preconditions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Replication Manager (RM) is the one which is responsible for making sure - * that the containers are properly replicated. Replication Manager deals only - * with Quasi Closed / Closed container. - */ -public class ReplicationManager implements MetricsSource { - - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationManager.class); - - public static final String METRICS_SOURCE_NAME = "SCMReplicationManager"; - - /** - * Reference to the ContainerManager. - */ - private final ContainerManager containerManager; - - /** - * PlacementPolicy which is used to identify where a container - * should be replicated. - */ - private final ContainerPlacementPolicy containerPlacement; - - /** - * EventPublisher to fire Replicate and Delete container events. - */ - private final EventPublisher eventPublisher; - - /** - * Used for locking a container using its ID while processing it. - */ - private final LockManager lockManager; - - /** - * This is used for tracking container replication commands which are issued - * by ReplicationManager and not yet complete. - */ - private final Map> inflightReplication; - - /** - * This is used for tracking container deletion commands which are issued - * by ReplicationManager and not yet complete. - */ - private final Map> inflightDeletion; - - /** - * ReplicationManager specific configuration. - */ - private final ReplicationManagerConfiguration conf; - - /** - * ReplicationMonitor thread is the one which wakes up at configured - * interval and processes all the containers. - */ - private Thread replicationMonitor; - - /** - * Flag used for checking if the ReplicationMonitor thread is running or - * not. - */ - private volatile boolean running; - - /** - * Constructs ReplicationManager instance with the given configuration. - * - * @param conf OzoneConfiguration - * @param containerManager ContainerManager - * @param containerPlacement ContainerPlacementPolicy - * @param eventPublisher EventPublisher - */ - public ReplicationManager(final ReplicationManagerConfiguration conf, - final ContainerManager containerManager, - final ContainerPlacementPolicy containerPlacement, - final EventPublisher eventPublisher, - final LockManager lockManager) { - this.containerManager = containerManager; - this.containerPlacement = containerPlacement; - this.eventPublisher = eventPublisher; - this.lockManager = lockManager; - this.conf = conf; - this.running = false; - this.inflightReplication = new ConcurrentHashMap<>(); - this.inflightDeletion = new ConcurrentHashMap<>(); - } - - /** - * Starts Replication Monitor thread. - */ - public synchronized void start() { - - if (!isRunning()) { - DefaultMetricsSystem.instance().register(METRICS_SOURCE_NAME, - "SCM Replication manager (closed container replication) related " - + "metrics", - this); - LOG.info("Starting Replication Monitor Thread."); - running = true; - replicationMonitor = new Thread(this::run); - replicationMonitor.setName("ReplicationMonitor"); - replicationMonitor.setDaemon(true); - replicationMonitor.start(); - } else { - LOG.info("Replication Monitor Thread is already running."); - } - } - - /** - * Returns true if the Replication Monitor Thread is running. - * - * @return true if running, false otherwise - */ - public boolean isRunning() { - if (!running) { - synchronized (this) { - return replicationMonitor != null - && replicationMonitor.isAlive(); - } - } - return true; - } - - /** - * Process all the containers immediately. - */ - @VisibleForTesting - @SuppressFBWarnings(value="NN_NAKED_NOTIFY", - justification="Used only for testing") - public synchronized void processContainersNow() { - notify(); - } - - /** - * Stops Replication Monitor thread. - */ - public synchronized void stop() { - if (running) { - LOG.info("Stopping Replication Monitor Thread."); - inflightReplication.clear(); - inflightDeletion.clear(); - running = false; - notify(); - } else { - LOG.info("Replication Monitor Thread is not running."); - } - } - - /** - * ReplicationMonitor thread runnable. This wakes up at configured - * interval and processes all the containers in the system. - */ - private synchronized void run() { - try { - while (running) { - final long start = Time.monotonicNow(); - final Set containerIds = - containerManager.getContainerIDs(); - containerIds.forEach(this::processContainer); - - LOG.info("Replication Monitor Thread took {} milliseconds for" + - " processing {} containers.", Time.monotonicNow() - start, - containerIds.size()); - - wait(conf.getInterval()); - } - } catch (Throwable t) { - // When we get runtime exception, we should terminate SCM. - LOG.error("Exception in Replication Monitor Thread.", t); - ExitUtil.terminate(1, t); - } - } - - /** - * Process the given container. - * - * @param id ContainerID - */ - private void processContainer(ContainerID id) { - lockManager.lock(id); - try { - final ContainerInfo container = containerManager.getContainer(id); - final Set replicas = containerManager - .getContainerReplicas(container.containerID()); - final LifeCycleState state = container.getState(); - - /* - * We don't take any action if the container is in OPEN state. - */ - if (state == LifeCycleState.OPEN) { - return; - } - - /* - * If the container is in CLOSING state, the replicas can either - * be in OPEN or in CLOSING state. In both of this cases - * we have to resend close container command to the datanodes. - */ - if (state == LifeCycleState.CLOSING) { - replicas.forEach(replica -> sendCloseCommand( - container, replica.getDatanodeDetails(), false)); - return; - } - - /* - * If the container is in QUASI_CLOSED state, check and close the - * container if possible. - */ - if (state == LifeCycleState.QUASI_CLOSED && - canForceCloseContainer(container, replicas)) { - forceCloseContainer(container, replicas); - return; - } - - /* - * Before processing the container we have to reconcile the - * inflightReplication and inflightDeletion actions. - * - * We remove the entry from inflightReplication and inflightDeletion - * list, if the operation is completed or if it has timed out. - */ - updateInflightAction(container, inflightReplication, - action -> replicas.stream() - .anyMatch(r -> r.getDatanodeDetails().equals(action.datanode))); - - updateInflightAction(container, inflightDeletion, - action -> replicas.stream() - .noneMatch(r -> r.getDatanodeDetails().equals(action.datanode))); - - - /* - * We don't have to take any action if the container is healthy. - * - * According to ReplicationMonitor container is considered healthy if - * the container is either in QUASI_CLOSED or in CLOSED state and has - * exact number of replicas in the same state. - */ - if (isContainerHealthy(container, replicas)) { - return; - } - - /* - * Check if the container is under replicated and take appropriate - * action. - */ - if (isContainerUnderReplicated(container, replicas)) { - handleUnderReplicatedContainer(container, replicas); - return; - } - - /* - * Check if the container is over replicated and take appropriate - * action. - */ - if (isContainerOverReplicated(container, replicas)) { - handleOverReplicatedContainer(container, replicas); - return; - } - - /* - * The container is neither under nor over replicated and the container - * is not healthy. This means that the container has unhealthy/corrupted - * replica. - */ - handleUnstableContainer(container, replicas); - - } catch (ContainerNotFoundException ex) { - LOG.warn("Missing container {}.", id); - } finally { - lockManager.unlock(id); - } - } - - /** - * Reconciles the InflightActions for a given container. - * - * @param container Container to update - * @param inflightActions inflightReplication (or) inflightDeletion - * @param filter filter to check if the operation is completed - */ - private void updateInflightAction(final ContainerInfo container, - final Map> inflightActions, - final Predicate filter) { - final ContainerID id = container.containerID(); - final long deadline = Time.monotonicNow() - conf.getEventTimeout(); - if (inflightActions.containsKey(id)) { - final List actions = inflightActions.get(id); - actions.removeIf(action -> action.time < deadline); - actions.removeIf(filter); - if (actions.isEmpty()) { - inflightActions.remove(id); - } - } - } - - /** - * Returns true if the container is healthy according to ReplicationMonitor. - * - * According to ReplicationMonitor container is considered healthy if - * it has exact number of replicas in the same state as the container. - * - * @param container Container to check - * @param replicas Set of ContainerReplicas - * @return true if the container is healthy, false otherwise - */ - private boolean isContainerHealthy(final ContainerInfo container, - final Set replicas) { - return container.getReplicationFactor().getNumber() == replicas.size() && - replicas.stream().allMatch( - r -> compareState(container.getState(), r.getState())); - } - - /** - * Checks if the container is under replicated or not. - * - * @param container Container to check - * @param replicas Set of ContainerReplicas - * @return true if the container is under replicated, false otherwise - */ - private boolean isContainerUnderReplicated(final ContainerInfo container, - final Set replicas) { - return container.getReplicationFactor().getNumber() > - getReplicaCount(container.containerID(), replicas); - } - - /** - * Checks if the container is over replicated or not. - * - * @param container Container to check - * @param replicas Set of ContainerReplicas - * @return true if the container if over replicated, false otherwise - */ - private boolean isContainerOverReplicated(final ContainerInfo container, - final Set replicas) { - return container.getReplicationFactor().getNumber() < - getReplicaCount(container.containerID(), replicas); - } - - /** - * Returns the replication count of the given container. This also - * considers inflight replication and deletion. - * - * @param id ContainerID - * @param replicas Set of existing replicas - * @return number of estimated replicas for this container - */ - private int getReplicaCount(final ContainerID id, - final Set replicas) { - return replicas.size() - + inflightReplication.getOrDefault(id, Collections.emptyList()).size() - - inflightDeletion.getOrDefault(id, Collections.emptyList()).size(); - } - - /** - * Returns true if more than 50% of the container replicas with unique - * originNodeId are in QUASI_CLOSED state. - * - * @param container Container to check - * @param replicas Set of ContainerReplicas - * @return true if we can force close the container, false otherwise - */ - private boolean canForceCloseContainer(final ContainerInfo container, - final Set replicas) { - Preconditions.assertTrue(container.getState() == - LifeCycleState.QUASI_CLOSED); - final int replicationFactor = container.getReplicationFactor().getNumber(); - final long uniqueQuasiClosedReplicaCount = replicas.stream() - .filter(r -> r.getState() == State.QUASI_CLOSED) - .map(ContainerReplica::getOriginDatanodeId) - .distinct() - .count(); - return uniqueQuasiClosedReplicaCount > (replicationFactor / 2); - } - - /** - * Force close the container replica(s) with highest sequence Id. - * - *

- * Note: We should force close the container only if >50% (quorum) - * of replicas with unique originNodeId are in QUASI_CLOSED state. - *

- * - * @param container ContainerInfo - * @param replicas Set of ContainerReplicas - */ - private void forceCloseContainer(final ContainerInfo container, - final Set replicas) { - Preconditions.assertTrue(container.getState() == - LifeCycleState.QUASI_CLOSED); - - final List quasiClosedReplicas = replicas.stream() - .filter(r -> r.getState() == State.QUASI_CLOSED) - .collect(Collectors.toList()); - - final Long sequenceId = quasiClosedReplicas.stream() - .map(ContainerReplica::getSequenceId) - .max(Long::compare) - .orElse(-1L); - - LOG.info("Force closing container {} with BCSID {}," + - " which is in QUASI_CLOSED state.", - container.containerID(), sequenceId); - - quasiClosedReplicas.stream() - .filter(r -> sequenceId != -1L) - .filter(replica -> replica.getSequenceId().equals(sequenceId)) - .forEach(replica -> sendCloseCommand( - container, replica.getDatanodeDetails(), true)); - } - - /** - * If the given container is under replicated, identify a new set of - * datanode(s) to replicate the container using ContainerPlacementPolicy - * and send replicate container command to the identified datanode(s). - * - * @param container ContainerInfo - * @param replicas Set of ContainerReplicas - */ - private void handleUnderReplicatedContainer(final ContainerInfo container, - final Set replicas) { - LOG.debug("Handling underreplicated container: {}", - container.getContainerID()); - try { - final ContainerID id = container.containerID(); - final List deletionInFlight = inflightDeletion - .getOrDefault(id, Collections.emptyList()) - .stream() - .map(action -> action.datanode) - .collect(Collectors.toList()); - final List source = replicas.stream() - .filter(r -> - r.getState() == State.QUASI_CLOSED || - r.getState() == State.CLOSED) - .filter(r -> !deletionInFlight.contains(r.getDatanodeDetails())) - .sorted((r1, r2) -> r2.getSequenceId().compareTo(r1.getSequenceId())) - .map(ContainerReplica::getDatanodeDetails) - .collect(Collectors.toList()); - if (source.size() > 0) { - final int replicationFactor = container - .getReplicationFactor().getNumber(); - final int delta = replicationFactor - getReplicaCount(id, replicas); - final List excludeList = replicas.stream() - .map(ContainerReplica::getDatanodeDetails) - .collect(Collectors.toList()); - List actionList = inflightReplication.get(id); - if (actionList != null) { - actionList.stream().map(r -> r.datanode) - .forEach(excludeList::add); - } - final List selectedDatanodes = containerPlacement - .chooseDatanodes(excludeList, null, delta, - container.getUsedBytes()); - - LOG.info("Container {} is under replicated. Expected replica count" + - " is {}, but found {}.", id, replicationFactor, - replicationFactor - delta); - - for (DatanodeDetails datanode : selectedDatanodes) { - sendReplicateCommand(container, datanode, source); - } - } else { - LOG.warn("Cannot replicate container {}, no healthy replica found.", - container.containerID()); - } - } catch (IOException ex) { - LOG.warn("Exception while replicating container {}.", - container.getContainerID(), ex); - } - } - - /** - * If the given container is over replicated, identify the datanode(s) - * to delete the container and send delete container command to the - * identified datanode(s). - * - * @param container ContainerInfo - * @param replicas Set of ContainerReplicas - */ - private void handleOverReplicatedContainer(final ContainerInfo container, - final Set replicas) { - - final ContainerID id = container.containerID(); - final int replicationFactor = container.getReplicationFactor().getNumber(); - // Dont consider inflight replication while calculating excess here. - final int excess = replicas.size() - replicationFactor - - inflightDeletion.getOrDefault(id, Collections.emptyList()).size(); - - if (excess > 0) { - - LOG.info("Container {} is over replicated. Expected replica count" + - " is {}, but found {}.", id, replicationFactor, - replicationFactor + excess); - - final Map uniqueReplicas = - new LinkedHashMap<>(); - - replicas.stream() - .filter(r -> compareState(container.getState(), r.getState())) - .forEach(r -> uniqueReplicas - .putIfAbsent(r.getOriginDatanodeId(), r)); - - // Retain one healthy replica per origin node Id. - final List eligibleReplicas = new ArrayList<>(replicas); - eligibleReplicas.removeAll(uniqueReplicas.values()); - - final List unhealthyReplicas = eligibleReplicas - .stream() - .filter(r -> !compareState(container.getState(), r.getState())) - .collect(Collectors.toList()); - - //Move the unhealthy replicas to the front of eligible replicas to delete - eligibleReplicas.removeAll(unhealthyReplicas); - eligibleReplicas.addAll(0, unhealthyReplicas); - - for (int i = 0; i < excess; i++) { - sendDeleteCommand(container, - eligibleReplicas.get(i).getDatanodeDetails(), true); - } - } - } - - /** - * Handles unstable container. - * A container is inconsistent if any of the replica state doesn't - * match the container state. We have to take appropriate action - * based on state of the replica. - * - * @param container ContainerInfo - * @param replicas Set of ContainerReplicas - */ - private void handleUnstableContainer(final ContainerInfo container, - final Set replicas) { - // Find unhealthy replicas - List unhealthyReplicas = replicas.stream() - .filter(r -> !compareState(container.getState(), r.getState())) - .collect(Collectors.toList()); - - Iterator iterator = unhealthyReplicas.iterator(); - while (iterator.hasNext()) { - final ContainerReplica replica = iterator.next(); - final State state = replica.getState(); - if (state == State.OPEN || state == State.CLOSING) { - sendCloseCommand(container, replica.getDatanodeDetails(), false); - iterator.remove(); - } - - if (state == State.QUASI_CLOSED) { - // Send force close command if the BCSID matches - if (container.getSequenceId() == replica.getSequenceId()) { - sendCloseCommand(container, replica.getDatanodeDetails(), true); - iterator.remove(); - } - } - } - - // Now we are left with the replicas which are either unhealthy or - // the BCSID doesn't match. These replicas should be deleted. - - /* - * If we have unhealthy replicas we go under replicated and then - * replicate the healthy copy. - * - * We also make sure that we delete only one unhealthy replica at a time. - * - * If there are two unhealthy replica: - * - Delete first unhealthy replica - * - Re-replicate the healthy copy - * - Delete second unhealthy replica - * - Re-replicate the healthy copy - * - * Note: Only one action will be executed in a single ReplicationMonitor - * iteration. So to complete all the above actions we need four - * ReplicationMonitor iterations. - */ - - unhealthyReplicas.stream().findFirst().ifPresent(replica -> - sendDeleteCommand(container, replica.getDatanodeDetails(), false)); - - } - - /** - * Sends close container command for the given container to the given - * datanode. - * - * @param container Container to be closed - * @param datanode The datanode on which the container - * has to be closed - * @param force Should be set to true if we want to close a - * QUASI_CLOSED container - */ - private void sendCloseCommand(final ContainerInfo container, - final DatanodeDetails datanode, - final boolean force) { - - LOG.info("Sending close container command for container {}" + - " to datanode {}.", container.containerID(), datanode); - - CloseContainerCommand closeContainerCommand = - new CloseContainerCommand(container.getContainerID(), - container.getPipelineID(), force); - eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND, - new CommandForDatanode<>(datanode.getUuid(), closeContainerCommand)); - } - - /** - * Sends replicate container command for the given container to the given - * datanode. - * - * @param container Container to be replicated - * @param datanode The destination datanode to replicate - * @param sources List of source nodes from where we can replicate - */ - private void sendReplicateCommand(final ContainerInfo container, - final DatanodeDetails datanode, - final List sources) { - - LOG.info("Sending replicate container command for container {}" + - " to datanode {}", container.containerID(), datanode); - - final ContainerID id = container.containerID(); - final ReplicateContainerCommand replicateCommand = - new ReplicateContainerCommand(id.getId(), sources); - inflightReplication.computeIfAbsent(id, k -> new ArrayList<>()); - sendAndTrackDatanodeCommand(datanode, replicateCommand, - action -> inflightReplication.get(id).add(action)); - } - - /** - * Sends delete container command for the given container to the given - * datanode. - * - * @param container Container to be deleted - * @param datanode The datanode on which the replica should be deleted - * @param force Should be set to true to delete an OPEN replica - */ - private void sendDeleteCommand(final ContainerInfo container, - final DatanodeDetails datanode, - final boolean force) { - - LOG.info("Sending delete container command for container {}" + - " to datanode {}", container.containerID(), datanode); - - final ContainerID id = container.containerID(); - final DeleteContainerCommand deleteCommand = - new DeleteContainerCommand(id.getId(), force); - inflightDeletion.computeIfAbsent(id, k -> new ArrayList<>()); - sendAndTrackDatanodeCommand(datanode, deleteCommand, - action -> inflightDeletion.get(id).add(action)); - } - - /** - * Creates CommandForDatanode with the given SCMCommand and fires - * DATANODE_COMMAND event to event queue. - * - * Tracks the command using the given tracker. - * - * @param datanode Datanode to which the command has to be sent - * @param command SCMCommand to be sent - * @param tracker Tracker which tracks the inflight actions - * @param Type of SCMCommand - */ - private void sendAndTrackDatanodeCommand( - final DatanodeDetails datanode, - final SCMCommand command, - final Consumer tracker) { - final CommandForDatanode datanodeCommand = - new CommandForDatanode<>(datanode.getUuid(), command); - eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND, datanodeCommand); - tracker.accept(new InflightAction(datanode, Time.monotonicNow())); - } - - /** - * Compares the container state with the replica state. - * - * @param containerState ContainerState - * @param replicaState ReplicaState - * @return true if the state matches, false otherwise - */ - private static boolean compareState(final LifeCycleState containerState, - final State replicaState) { - switch (containerState) { - case OPEN: - return replicaState == State.OPEN; - case CLOSING: - return replicaState == State.CLOSING; - case QUASI_CLOSED: - return replicaState == State.QUASI_CLOSED; - case CLOSED: - return replicaState == State.CLOSED; - case DELETING: - return false; - case DELETED: - return false; - default: - return false; - } - } - - @Override - public void getMetrics(MetricsCollector collector, boolean all) { - collector.addRecord(ReplicationManager.class.getSimpleName()) - .addGauge(ReplicationManagerMetrics.INFLIGHT_REPLICATION, - inflightReplication.size()) - .addGauge(ReplicationManagerMetrics.INFLIGHT_DELETION, - inflightDeletion.size()) - .endRecord(); - } - - /** - * Wrapper class to hold the InflightAction with its start time. - */ - private static final class InflightAction { - - private final DatanodeDetails datanode; - private final long time; - - private InflightAction(final DatanodeDetails datanode, - final long time) { - this.datanode = datanode; - this.time = time; - } - } - - /** - * Configuration used by the Replication Manager. - */ - @ConfigGroup(prefix = "hdds.scm.replication") - public static class ReplicationManagerConfiguration { - /** - * The frequency in which ReplicationMonitor thread should run. - */ - private long interval = 5 * 60 * 1000; - - /** - * Timeout for container replication & deletion command issued by - * ReplicationManager. - */ - private long eventTimeout = 10 * 60 * 1000; - - @Config(key = "thread.interval", - type = ConfigType.TIME, - defaultValue = "300s", - tags = {SCM, OZONE}, - description = "When a heartbeat from the data node arrives on SCM, " - + "It is queued for processing with the time stamp of when the " - + "heartbeat arrived. There is a heartbeat processing thread " - + "inside " - + "SCM that runs at a specified interval. This value controls how " - + "frequently this thread is run.\n\n" - + "There are some assumptions build into SCM such as this " - + "value should allow the heartbeat processing thread to run at " - + "least three times more frequently than heartbeats and at least " - + "five times more than stale node detection time. " - + "If you specify a wrong value, SCM will gracefully refuse to " - + "run. " - + "For more info look at the node manager tests in SCM.\n" - + "\n" - + "In short, you don't need to change this." - ) - public void setInterval(long interval) { - this.interval = interval; - } - - @Config(key = "event.timeout", - type = ConfigType.TIME, - defaultValue = "10m", - tags = {SCM, OZONE}, - description = "Timeout for the container replication/deletion commands " - + "sent to datanodes. After this timeout the command will be " - + "retried.") - public void setEventTimeout(long eventTimeout) { - this.eventTimeout = eventTimeout; - } - - public long getInterval() { - return interval; - } - - public long getEventTimeout() { - return eventTimeout; - } - } - - /** - * Metric name definitions for Replication manager. - */ - public enum ReplicationManagerMetrics implements MetricsInfo { - - INFLIGHT_REPLICATION("Tracked inflight container replication requests."), - INFLIGHT_DELETION("Tracked inflight container deletion requests."); - - private final String desc; - - ReplicationManagerMetrics(String desc) { - this.desc = desc; - } - - @Override - public String description() { - return desc; - } - - @Override - public String toString() { - return new StringJoiner(", ", this.getClass().getSimpleName() + "{", "}") - .add("name=" + name()) - .add("description=" + desc) - .toString(); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java deleted file mode 100644 index 470d4eb1c48..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java +++ /dev/null @@ -1,592 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- *

http://www.apache.org/licenses/LICENSE-2.0 - *

- *

Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.metrics.SCMContainerManagerMetrics; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdds.utils.BatchOperation; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.NavigableSet; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.stream.Collectors; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB; - -/** - * ContainerManager class contains the mapping from a name to a pipeline - * mapping. This is used by SCM when allocating new locations and when - * looking up a key. - */ -public class SCMContainerManager implements ContainerManager { - private static final Logger LOG = LoggerFactory.getLogger(SCMContainerManager - .class); - - private final Lock lock; - private final MetadataStore containerStore; - private final PipelineManager pipelineManager; - private final ContainerStateManager containerStateManager; - private final int numContainerPerOwnerInPipeline; - - private final SCMContainerManagerMetrics scmContainerManagerMetrics; - - /** - * Constructs a mapping class that creates mapping between container names - * and pipelines. - * - * @param nodeManager - NodeManager so that we can get the nodes that are - * healthy to place new - * containers. - * passed to LevelDB and this memory is allocated in Native code space. - * CacheSize is specified - * in MB. - * @param pipelineManager - PipelineManager - * @throws IOException on Failure. - */ - @SuppressWarnings("unchecked") - public SCMContainerManager(final Configuration conf, - final NodeManager nodeManager, PipelineManager pipelineManager, - final EventPublisher eventPublisher) throws IOException { - - final File metaDir = ServerUtils.getScmDbDir(conf); - final File containerDBPath = new File(metaDir, SCM_CONTAINER_DB); - final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, - OZONE_SCM_DB_CACHE_SIZE_DEFAULT); - - this.containerStore = MetadataStoreBuilder.newBuilder() - .setConf(conf) - .setDbFile(containerDBPath) - .setCacheSize(cacheSize * OzoneConsts.MB) - .build(); - - this.lock = new ReentrantLock(); - this.pipelineManager = pipelineManager; - this.containerStateManager = new ContainerStateManager(conf); - this.numContainerPerOwnerInPipeline = conf - .getInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, - ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT); - - loadExistingContainers(); - - scmContainerManagerMetrics = SCMContainerManagerMetrics.create(); - } - - private void loadExistingContainers() throws IOException { - List> range = containerStore - .getSequentialRangeKVs(null, Integer.MAX_VALUE, null); - for (Map.Entry entry : range) { - ContainerInfo container = ContainerInfo.fromProtobuf( - ContainerInfoProto.PARSER.parseFrom(entry.getValue())); - Preconditions.checkNotNull(container); - containerStateManager.loadContainer(container); - if (container.getState() == LifeCycleState.OPEN) { - pipelineManager.addContainerToPipeline(container.getPipelineID(), - ContainerID.valueof(container.getContainerID())); - } - } - } - - @VisibleForTesting - // TODO: remove this later. - public ContainerStateManager getContainerStateManager() { - return containerStateManager; - } - - @Override - public Set getContainerIDs() { - lock.lock(); - try { - return containerStateManager.getAllContainerIDs(); - } finally { - lock.unlock(); - } - } - - @Override - public List getContainers() { - lock.lock(); - try { - return containerStateManager.getAllContainerIDs().stream().map(id -> { - try { - return containerStateManager.getContainer(id); - } catch (ContainerNotFoundException e) { - // How can this happen? - return null; - } - }).filter(Objects::nonNull).collect(Collectors.toList()); - } finally { - lock.unlock(); - } - } - - @Override - public List getContainers(LifeCycleState state) { - lock.lock(); - try { - return containerStateManager.getContainerIDsByState(state).stream() - .map(id -> { - try { - return containerStateManager.getContainer(id); - } catch (ContainerNotFoundException e) { - // How can this happen? - return null; - } - }).filter(Objects::nonNull).collect(Collectors.toList()); - } finally { - lock.unlock(); - } - } - - /** - * Get number of containers in the given state. - * - * @param state {@link LifeCycleState} - * @return Count - */ - public Integer getContainerCountByState(LifeCycleState state) { - return containerStateManager.getContainerCountByState(state); - } - - /** - * {@inheritDoc} - */ - @Override - public ContainerInfo getContainer(final ContainerID containerID) - throws ContainerNotFoundException { - return containerStateManager.getContainer(containerID); - } - - /** - * {@inheritDoc} - */ - @Override - public List listContainer(ContainerID startContainerID, - int count) { - lock.lock(); - try { - scmContainerManagerMetrics.incNumListContainersOps(); - final long startId = startContainerID == null ? - 0 : startContainerID.getId(); - final List containersIds = - new ArrayList<>(containerStateManager.getAllContainerIDs()); - Collections.sort(containersIds); - - return containersIds.stream() - .filter(id -> id.getId() > startId) - .limit(count) - .map(id -> { - try { - return containerStateManager.getContainer(id); - } catch (ContainerNotFoundException ex) { - // This can never happen, as we hold lock no one else can remove - // the container after we got the container ids. - LOG.warn("Container Missing.", ex); - return null; - } - }).collect(Collectors.toList()); - } finally { - lock.unlock(); - } - } - - - /** - * Allocates a new container. - * - * @param replicationFactor - replication factor of the container. - * @param owner - The string name of the Service that owns this container. - * @return - Pipeline that makes up this container. - * @throws IOException - Exception - */ - @Override - public ContainerInfo allocateContainer(final ReplicationType type, - final ReplicationFactor replicationFactor, final String owner) - throws IOException { - try { - lock.lock(); - ContainerInfo containerInfo = null; - try { - containerInfo = - containerStateManager.allocateContainer(pipelineManager, type, - replicationFactor, owner); - } catch (IOException ex) { - scmContainerManagerMetrics.incNumFailureCreateContainers(); - throw ex; - } - // Add container to DB. - try { - addContainerToDB(containerInfo); - } catch (IOException ex) { - // When adding to DB failed, we are removing from containerStateMap. - // We should also remove from pipeline2Container Map in - // PipelineStateManager. - pipelineManager.removeContainerFromPipeline( - containerInfo.getPipelineID(), - new ContainerID(containerInfo.getContainerID())); - throw ex; - } - return containerInfo; - } finally { - lock.unlock(); - } - } - - /** - * Deletes a container from SCM. - * - * @param containerID - Container ID - * @throws IOException if container doesn't exist or container store failed - * to delete the - * specified key. - */ - @Override - public void deleteContainer(ContainerID containerID) throws IOException { - lock.lock(); - try { - containerStateManager.removeContainer(containerID); - final byte[] dbKey = Longs.toByteArray(containerID.getId()); - final byte[] containerBytes = containerStore.get(dbKey); - if (containerBytes != null) { - containerStore.delete(dbKey); - } else { - // Where did the container go? o_O - LOG.warn("Unable to remove the container {} from container store," + - " it's missing!", containerID); - } - scmContainerManagerMetrics.incNumSuccessfulDeleteContainers(); - } catch (ContainerNotFoundException cnfe) { - scmContainerManagerMetrics.incNumFailureDeleteContainers(); - throw new SCMException( - "Failed to delete container " + containerID + ", reason : " + - "container doesn't exist.", - SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); - } finally { - lock.unlock(); - } - } - - /** - * {@inheritDoc} Used by client to update container state on SCM. - */ - @Override - public HddsProtos.LifeCycleState updateContainerState( - ContainerID containerID, HddsProtos.LifeCycleEvent event) - throws IOException { - // Should we return the updated ContainerInfo instead of LifeCycleState? - lock.lock(); - try { - final ContainerInfo container = containerStateManager - .getContainer(containerID); - final LifeCycleState oldState = container.getState(); - containerStateManager.updateContainerState(containerID, event); - final LifeCycleState newState = container.getState(); - - if (oldState == LifeCycleState.OPEN && newState != LifeCycleState.OPEN) { - pipelineManager - .removeContainerFromPipeline(container.getPipelineID(), - containerID); - } - final byte[] dbKey = Longs.toByteArray(containerID.getId()); - containerStore.put(dbKey, container.getProtobuf().toByteArray()); - return newState; - } catch (ContainerNotFoundException cnfe) { - throw new SCMException( - "Failed to update container state" - + containerID - + ", reason : container doesn't exist.", - SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); - } finally { - lock.unlock(); - } - } - - - /** - * Update deleteTransactionId according to deleteTransactionMap. - * - * @param deleteTransactionMap Maps the containerId to latest delete - * transaction id for the container. - * @throws IOException - */ - public void updateDeleteTransactionId(Map deleteTransactionMap) - throws IOException { - if (deleteTransactionMap == null) { - return; - } - - lock.lock(); - try { - BatchOperation batch = new BatchOperation(); - for (Map.Entry entry : deleteTransactionMap.entrySet()) { - long containerID = entry.getKey(); - byte[] dbKey = Longs.toByteArray(containerID); - byte[] containerBytes = containerStore.get(dbKey); - if (containerBytes == null) { - throw new SCMException( - "Failed to increment number of deleted blocks for container " - + containerID + ", reason : " + "container doesn't exist.", - SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); - } - ContainerInfo containerInfo = ContainerInfo.fromProtobuf( - HddsProtos.ContainerInfoProto.parseFrom(containerBytes)); - containerInfo.updateDeleteTransactionId(entry.getValue()); - batch.put(dbKey, containerInfo.getProtobuf().toByteArray()); - } - containerStore.writeBatch(batch); - containerStateManager - .updateDeleteTransactionId(deleteTransactionMap); - } finally { - lock.unlock(); - } - } - - /** - * Return a container matching the attributes specified. - * - * @param sizeRequired - Space needed in the Container. - * @param owner - Owner of the container - A specific nameservice. - * @param pipeline - Pipeline to which the container should belong. - * @return ContainerInfo, null if there is no match found. - */ - public ContainerInfo getMatchingContainer(final long sizeRequired, - String owner, Pipeline pipeline) { - return getMatchingContainer(sizeRequired, owner, pipeline, Collections - .emptyList()); - } - - public ContainerInfo getMatchingContainer(final long sizeRequired, - String owner, Pipeline pipeline, List excludedContainers) { - NavigableSet containerIDs; - try { - synchronized (pipeline) { - //TODO: #CLUTIL See if lock is required here - containerIDs = - pipelineManager.getContainersInPipeline(pipeline.getId()); - - containerIDs = getContainersForOwner(containerIDs, owner); - if (containerIDs.size() < numContainerPerOwnerInPipeline) { - // TODO: #CLUTIL Maybe we can add selection logic inside synchronized - // as well - if (containerIDs.size() < numContainerPerOwnerInPipeline) { - ContainerInfo containerInfo = - containerStateManager.allocateContainer(pipelineManager, owner, - pipeline); - // Add to DB - addContainerToDB(containerInfo); - containerStateManager.updateLastUsedMap(pipeline.getId(), - containerInfo.containerID(), owner); - return containerInfo; - } - } - } - - containerIDs.removeAll(excludedContainers); - ContainerInfo containerInfo = - containerStateManager.getMatchingContainer(sizeRequired, owner, - pipeline.getId(), containerIDs); - if (containerInfo == null) { - synchronized (pipeline) { - containerInfo = - containerStateManager.allocateContainer(pipelineManager, owner, - pipeline); - // Add to DB - addContainerToDB(containerInfo); - } - } - containerStateManager.updateLastUsedMap(pipeline.getId(), - containerInfo.containerID(), owner); - // TODO: #CLUTIL cleanup entries in lastUsedMap - return containerInfo; - } catch (Exception e) { - LOG.warn("Container allocation failed for pipeline={} requiredSize={} {}", - pipeline, sizeRequired, e); - return null; - } - } - - /** - * Add newly allocated container to container DB. - * @param containerInfo - * @throws IOException - */ - private void addContainerToDB(ContainerInfo containerInfo) - throws IOException { - try { - final byte[] containerIDBytes = Longs.toByteArray( - containerInfo.getContainerID()); - containerStore.put(containerIDBytes, - containerInfo.getProtobuf().toByteArray()); - // Incrementing here, as allocateBlock to create a container calls - // getMatchingContainer() and finally calls this API to add newly - // created container to DB. - // Even allocateContainer calls this API to add newly allocated - // container to DB. So we need to increment metrics here. - scmContainerManagerMetrics.incNumSuccessfulCreateContainers(); - } catch (IOException ex) { - // If adding to containerStore fails, we should remove the container - // from in-memory map. - scmContainerManagerMetrics.incNumFailureCreateContainers(); - LOG.error("Add Container to DB failed for ContainerID #{}", - containerInfo.getContainerID()); - try { - containerStateManager.removeContainer(containerInfo.containerID()); - } catch (ContainerNotFoundException cnfe) { - // This should not happen, as we are removing after adding in to - // container state cmap. - } - throw ex; - } - } - - /** - * Returns the container ID's matching with specified owner. - * @param containerIDs - * @param owner - * @return NavigableSet - */ - private NavigableSet getContainersForOwner( - NavigableSet containerIDs, String owner) { - Iterator containerIDIterator = containerIDs.iterator(); - while (containerIDIterator.hasNext()) { - ContainerID cid = containerIDIterator.next(); - try { - if (!getContainer(cid).getOwner().equals(owner)) { - containerIDIterator.remove(); - } - } catch (ContainerNotFoundException e) { - LOG.error("Could not find container info for container id={} {}", cid, - e); - containerIDIterator.remove(); - } - } - return containerIDs; - } - - - - /** - * Returns the latest list of DataNodes where replica for given containerId - * exist. Throws an SCMException if no entry is found for given containerId. - * - * @param containerID - * @return Set - */ - public Set getContainerReplicas( - final ContainerID containerID) throws ContainerNotFoundException { - return containerStateManager.getContainerReplicas(containerID); - } - - /** - * Add a container Replica for given DataNode. - * - * @param containerID - * @param replica - */ - public void updateContainerReplica(final ContainerID containerID, - final ContainerReplica replica) throws ContainerNotFoundException { - containerStateManager.updateContainerReplica(containerID, replica); - } - - /** - * Remove a container Replica for given DataNode. - * - * @param containerID - * @param replica - * @return True of dataNode is removed successfully else false. - */ - public void removeContainerReplica(final ContainerID containerID, - final ContainerReplica replica) - throws ContainerNotFoundException, ContainerReplicaNotFoundException { - containerStateManager.removeContainerReplica(containerID, replica); - } - - /** - * Closes this stream and releases any system resources associated with it. - * If the stream is - * already closed then invoking this method has no effect. - *

- *

As noted in {@link AutoCloseable#close()}, cases where the close may - * fail require careful - * attention. It is strongly advised to relinquish the underlying resources - * and to internally - * mark the {@code Closeable} as closed, prior to throwing the - * {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - if (containerStateManager != null) { - containerStateManager.close(); - } - if (containerStore != null) { - containerStore.close(); - } - - if (scmContainerManagerMetrics != null) { - this.scmContainerManagerMetrics.unRegister(); - } - } - - public void notifyContainerReportProcessing(boolean isFullReport, - boolean success) { - if (isFullReport) { - if (success) { - scmContainerManagerMetrics.incNumContainerReportsProcessedSuccessful(); - } else { - scmContainerManagerMetrics.incNumContainerReportsProcessedFailed(); - } - } else { - if (success) { - scmContainerManagerMetrics.incNumICRReportsProcessedSuccessful(); - } else { - scmContainerManagerMetrics.incNumICRReportsProcessedFailed(); - } - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java deleted file mode 100644 index ee02bbd88f2..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -/** - * This package has class that close a container. That is move a container from - * open state to close state. - */ -package org.apache.hadoop.hdds.scm.container.closer; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/SCMContainerManagerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/SCMContainerManagerMetrics.java deleted file mode 100644 index e9a2579a2f0..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/SCMContainerManagerMetrics.java +++ /dev/null @@ -1,144 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.metrics; - - -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; - -/** - * Class contains metrics related to ContainerManager. - */ -@Metrics(about = "SCM ContainerManager metrics", context = "ozone") -public final class SCMContainerManagerMetrics { - - private static final String SOURCE_NAME = - SCMContainerManagerMetrics.class.getSimpleName(); - - // These are the metrics which will be reset to zero after restart. - // These metrics capture count of number of successful/failure operations - // of create/delete containers in SCM. - - private @Metric MutableCounterLong numSuccessfulCreateContainers; - private @Metric MutableCounterLong numFailureCreateContainers; - private @Metric MutableCounterLong numSuccessfulDeleteContainers; - private @Metric MutableCounterLong numFailureDeleteContainers; - private @Metric MutableCounterLong numListContainerOps; - - - private @Metric MutableCounterLong numContainerReportsProcessedSuccessful; - private @Metric MutableCounterLong numContainerReportsProcessedFailed; - private @Metric MutableCounterLong numICRReportsProcessedSuccessful; - private @Metric MutableCounterLong numICRReportsProcessedFailed; - - private SCMContainerManagerMetrics() { - } - - /** - * Create and return metrics instance. - * @return SCMContainerManagerMetrics - */ - public static SCMContainerManagerMetrics create() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - return ms.register(SOURCE_NAME, "SCM ContainerManager Metrics", - new SCMContainerManagerMetrics()); - } - - /** - * Unregister metrics. - */ - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } - - public void incNumSuccessfulCreateContainers() { - this.numSuccessfulCreateContainers.incr(); - } - - public void incNumFailureCreateContainers() { - this.numFailureCreateContainers.incr(); - } - - public void incNumSuccessfulDeleteContainers() { - this.numSuccessfulDeleteContainers.incr(); - } - - public void incNumFailureDeleteContainers() { - this.numFailureDeleteContainers.incr(); - } - - public void incNumListContainersOps() { - this.numListContainerOps.incr(); - } - - public void incNumContainerReportsProcessedSuccessful() { - this.numContainerReportsProcessedSuccessful.incr(); - } - - public void incNumContainerReportsProcessedFailed() { - this.numContainerReportsProcessedFailed.incr(); - } - - public void incNumICRReportsProcessedSuccessful() { - this.numICRReportsProcessedSuccessful.incr(); - } - - public void incNumICRReportsProcessedFailed() { - this.numICRReportsProcessedFailed.incr(); - } - - public long getNumContainerReportsProcessedSuccessful() { - return numContainerReportsProcessedSuccessful.value(); - } - - public long getNumContainerReportsProcessedFailed() { - return numContainerReportsProcessedFailed.value(); - } - - public long getNumICRReportsProcessedSuccessful() { - return numICRReportsProcessedSuccessful.value(); - } - - public long getNumICRReportsProcessedFailed() { - return numICRReportsProcessedFailed.value(); - } - - public long getNumSuccessfulCreateContainers() { - return numSuccessfulCreateContainers.value(); - } - - public long getNumFailureCreateContainers() { - return numFailureCreateContainers.value(); - } - - public long getNumSuccessfulDeleteContainers() { - return numSuccessfulDeleteContainers.value(); - } - - public long getNumFailureDeleteContainers() { - return numFailureDeleteContainers.value(); - } - - public long getNumListContainersOps() { - return numListContainerOps.value(); - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/package-info.java deleted file mode 100644 index 3198de13e44..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.metrics; - -/* - * This package contains StorageContainerManager metric classes. - */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java deleted file mode 100644 index 3f8d05681bd..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; -/** - * This package contains routines to manage the container location and - * mapping inside SCM - */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java deleted file mode 100644 index 18ec2c385b0..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.algorithms; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.lang.reflect.Constructor; - -/** - * A factory to create container placement instance based on configuration - * property ozone.scm.container.placement.classname. - */ -public final class ContainerPlacementPolicyFactory { - private static final Logger LOG = - LoggerFactory.getLogger(ContainerPlacementPolicyFactory.class); - - private static final Class - OZONE_SCM_CONTAINER_PLACEMENT_IMPL_DEFAULT = - SCMContainerPlacementRandom.class; - - private ContainerPlacementPolicyFactory() { - } - - public static ContainerPlacementPolicy getPolicy(Configuration conf, - final NodeManager nodeManager, NetworkTopology clusterMap, - final boolean fallback, SCMContainerPlacementMetrics metrics) - throws SCMException{ - final Class placementClass = conf - .getClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - OZONE_SCM_CONTAINER_PLACEMENT_IMPL_DEFAULT, - ContainerPlacementPolicy.class); - Constructor constructor; - try { - constructor = placementClass.getDeclaredConstructor(NodeManager.class, - Configuration.class, NetworkTopology.class, boolean.class, - SCMContainerPlacementMetrics.class); - LOG.info("Create container placement policy of type " + - placementClass.getCanonicalName()); - } catch (NoSuchMethodException e) { - String msg = "Failed to find constructor(NodeManager, Configuration, " + - "NetworkTopology, boolean) for class " + - placementClass.getCanonicalName(); - LOG.error(msg); - throw new SCMException(msg, - SCMException.ResultCodes.FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY); - } - - try { - return constructor.newInstance(nodeManager, conf, clusterMap, fallback, - metrics); - } catch (Exception e) { - throw new RuntimeException("Failed to instantiate class " + - placementClass.getCanonicalName() + " for " + e.getMessage()); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java deleted file mode 100644 index 77cdd83f793..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java +++ /dev/null @@ -1,201 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.List; -import java.util.Random; -import java.util.stream.Collectors; - -/** - * SCM CommonPolicy implements a set of invariants which are common - * for all container placement policies, acts as the repository of helper - * functions which are common to placement policies. - */ -public abstract class SCMCommonPolicy implements ContainerPlacementPolicy { - @VisibleForTesting - static final Logger LOG = - LoggerFactory.getLogger(SCMCommonPolicy.class); - private final NodeManager nodeManager; - private final Random rand; - private final Configuration conf; - - /** - * Constructs SCM Common Policy Class. - * - * @param nodeManager NodeManager - * @param conf Configuration class. - */ - public SCMCommonPolicy(NodeManager nodeManager, Configuration conf) { - this.nodeManager = nodeManager; - this.rand = new Random(); - this.conf = conf; - } - - /** - * Return node manager. - * - * @return node manager - */ - public NodeManager getNodeManager() { - return nodeManager; - } - - /** - * Returns the Random Object. - * - * @return rand - */ - public Random getRand() { - return rand; - } - - /** - * Get Config. - * - * @return Configuration - */ - public Configuration getConf() { - return conf; - } - - /** - * Given the replication factor and size required, return set of datanodes - * that satisfy the nodes and size requirement. - *

- * Here are some invariants of container placement. - *

- * 1. We place containers only on healthy nodes. - * 2. We place containers on nodes with enough space for that container. - * 3. if a set of containers are requested, we either meet the required - * number of nodes or we fail that request. - * - * - * @param excludedNodes - datanodes with existing replicas - * @param favoredNodes - list of nodes preferred. - * @param nodesRequired - number of datanodes required. - * @param sizeRequired - size required for the container or block. - * @return list of datanodes chosen. - * @throws SCMException SCM exception. - */ - @Override - public List chooseDatanodes( - List excludedNodes, List favoredNodes, - int nodesRequired, final long sizeRequired) throws SCMException { - List healthyNodes = - nodeManager.getNodes(HddsProtos.NodeState.HEALTHY); - if (excludedNodes != null) { - healthyNodes.removeAll(excludedNodes); - } - String msg; - if (healthyNodes.size() == 0) { - msg = "No healthy node found to allocate container."; - LOG.error(msg); - throw new SCMException(msg, SCMException.ResultCodes - .FAILED_TO_FIND_HEALTHY_NODES); - } - - if (healthyNodes.size() < nodesRequired) { - msg = String.format("Not enough healthy nodes to allocate container. %d " - + " datanodes required. Found %d", - nodesRequired, healthyNodes.size()); - LOG.error(msg); - throw new SCMException(msg, - SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); - } - List healthyList = healthyNodes.stream().filter(d -> - hasEnoughSpace(d, sizeRequired)).collect(Collectors.toList()); - - if (healthyList.size() < nodesRequired) { - msg = String.format("Unable to find enough nodes that meet the space " + - "requirement of %d bytes in healthy node set." + - " Nodes required: %d Found: %d", - sizeRequired, nodesRequired, healthyList.size()); - LOG.error(msg); - throw new SCMException(msg, - SCMException.ResultCodes.FAILED_TO_FIND_NODES_WITH_SPACE); - } - return healthyList; - } - - /** - * Returns true if this node has enough space to meet our requirement. - * - * @param datanodeDetails DatanodeDetails - * @return true if we have enough space. - */ - boolean hasEnoughSpace(DatanodeDetails datanodeDetails, - long sizeRequired) { - SCMNodeMetric nodeMetric = nodeManager.getNodeStat(datanodeDetails); - return (nodeMetric != null) && (nodeMetric.get() != null) - && nodeMetric.get().getRemaining().hasResources(sizeRequired); - } - - /** - * This function invokes the derived classes chooseNode Function to build a - * list of nodes. Then it verifies that invoked policy was able to return - * expected number of nodes. - * - * @param nodesRequired - Nodes Required - * @param healthyNodes - List of Nodes in the result set. - * @return List of Datanodes that can be used for placement. - * @throws SCMException - */ - public List getResultSet( - int nodesRequired, List healthyNodes) - throws SCMException { - List results = new ArrayList<>(); - for (int x = 0; x < nodesRequired; x++) { - // invoke the choose function defined in the derived classes. - DatanodeDetails nodeId = chooseNode(healthyNodes); - if (nodeId != null) { - results.add(nodeId); - } - } - - if (results.size() < nodesRequired) { - LOG.error("Unable to find the required number of healthy nodes that " + - "meet the criteria. Required nodes: {}, Found nodes: {}", - nodesRequired, results.size()); - throw new SCMException("Unable to find required number of nodes.", - SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); - } - return results; - } - - /** - * Choose a datanode according to the policy, this function is implemented - * by the actual policy class. For example, PlacementCapacity or - * PlacementRandom. - * - * @param healthyNodes - Set of healthy nodes we can choose from. - * @return DatanodeDetails - */ - public abstract DatanodeDetails chooseNode( - List healthyNodes); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java deleted file mode 100644 index 85d281cf6dc..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.node.NodeManager; - -import com.google.common.annotations.VisibleForTesting; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Container placement policy that randomly choose datanodes with remaining - * space to satisfy the size constraints. - *

- * The Algorithm is as follows, Pick 2 random nodes from a given pool of nodes - * and then pick the node which lower utilization. This leads to a higher - * probability of nodes with lower utilization to be picked. - *

- * For those wondering why we choose two nodes randomly and choose the node - * with lower utilization. There are links to this original papers in - * HDFS-11564. - *

- * A brief summary -- We treat the nodes from a scale of lowest utilized to - * highest utilized, there are (s * ( s + 1)) / 2 possibilities to build - * distinct pairs of nodes. There are s - k pairs of nodes in which the rank - * k node is less than the couple. So probability of a picking a node is - * (2 * (s -k)) / (s * (s - 1)). - *

- * In English, There is a much higher probability of picking less utilized nodes - * as compared to nodes with higher utilization since we pick 2 nodes and - * then pick the node with lower utilization. - *

- * This avoids the issue of users adding new nodes into the cluster and HDFS - * sending all traffic to those nodes if we only use a capacity based - * allocation scheme. Unless those nodes are part of the set of the first 2 - * nodes then newer nodes will not be in the running to get the container. - *

- * This leads to an I/O pattern where the lower utilized nodes are favoured - * more than higher utilized nodes, but part of the I/O will still go to the - * older higher utilized nodes. - *

- * With this algorithm in place, our hope is that balancer tool needs to do - * little or no work and the cluster will achieve a balanced distribution - * over time. - */ -public final class SCMContainerPlacementCapacity extends SCMCommonPolicy { - @VisibleForTesting - static final Logger LOG = - LoggerFactory.getLogger(SCMContainerPlacementCapacity.class); - - /** - * Constructs a Container Placement with considering only capacity. - * That is this policy tries to place containers based on node weight. - * - * @param nodeManager Node Manager - * @param conf Configuration - */ - public SCMContainerPlacementCapacity(final NodeManager nodeManager, - final Configuration conf, final NetworkTopology networkTopology, - final boolean fallback, final SCMContainerPlacementMetrics metrics) { - super(nodeManager, conf); - } - - /** - * Called by SCM to choose datanodes. - * - * - * @param excludedNodes - list of the datanodes to exclude. - * @param favoredNodes - list of nodes preferred. - * @param nodesRequired - number of datanodes required. - * @param sizeRequired - size required for the container or block. - * @return List of datanodes. - * @throws SCMException SCMException - */ - @Override - public List chooseDatanodes( - List excludedNodes, List favoredNodes, - final int nodesRequired, final long sizeRequired) throws SCMException { - List healthyNodes = super.chooseDatanodes(excludedNodes, - favoredNodes, nodesRequired, sizeRequired); - if (healthyNodes.size() == nodesRequired) { - return healthyNodes; - } - return getResultSet(nodesRequired, healthyNodes); - } - - /** - * Find a node from the healthy list and return it after removing it from the - * list that we are operating on. - * - * @param healthyNodes - List of healthy nodes that meet the size - * requirement. - * @return DatanodeDetails that is chosen. - */ - @Override - public DatanodeDetails chooseNode(List healthyNodes) { - int firstNodeNdx = getRand().nextInt(healthyNodes.size()); - int secondNodeNdx = getRand().nextInt(healthyNodes.size()); - - DatanodeDetails datanodeDetails; - // There is a possibility that both numbers will be same. - // if that is so, we just return the node. - if (firstNodeNdx == secondNodeNdx) { - datanodeDetails = healthyNodes.get(firstNodeNdx); - } else { - DatanodeDetails firstNodeDetails = healthyNodes.get(firstNodeNdx); - DatanodeDetails secondNodeDetails = healthyNodes.get(secondNodeNdx); - SCMNodeMetric firstNodeMetric = - getNodeManager().getNodeStat(firstNodeDetails); - SCMNodeMetric secondNodeMetric = - getNodeManager().getNodeStat(secondNodeDetails); - datanodeDetails = firstNodeMetric.isGreater(secondNodeMetric.get()) - ? firstNodeDetails : secondNodeDetails; - } - healthyNodes.remove(datanodeDetails); - return datanodeDetails; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java deleted file mode 100644 index fb709b146be..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.Interns; -import org.apache.hadoop.metrics2.lib.MetricsRegistry; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; - -/** - * This class is for maintaining Topology aware container placement statistics. - */ -@Metrics(about="SCM Container Placement Metrics", context = "ozone") -public class SCMContainerPlacementMetrics implements MetricsSource { - public static final String SOURCE_NAME = - SCMContainerPlacementMetrics.class.getSimpleName(); - private static final MetricsInfo RECORD_INFO = Interns.info(SOURCE_NAME, - "SCM Container Placement Metrics"); - private static MetricsRegistry registry; - - // total datanode allocation request count - @Metric private MutableCounterLong datanodeRequestCount; - // datanode allocation attempt count, including success, fallback and failed - @Metric private MutableCounterLong datanodeChooseAttemptCount; - // datanode successful allocation count - @Metric private MutableCounterLong datanodeChooseSuccessCount; - // datanode allocated with some allocation constrains compromised - @Metric private MutableCounterLong datanodeChooseFallbackCount; - - public SCMContainerPlacementMetrics() { - } - - public static SCMContainerPlacementMetrics create() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - registry = new MetricsRegistry(RECORD_INFO); - return ms.register(SOURCE_NAME, "SCM Container Placement Metrics", - new SCMContainerPlacementMetrics()); - } - - public void incrDatanodeRequestCount(long count) { - this.datanodeRequestCount.incr(count); - } - - public void incrDatanodeChooseSuccessCount() { - this.datanodeChooseSuccessCount.incr(1); - } - - public void incrDatanodeChooseFallbackCount() { - this.datanodeChooseFallbackCount.incr(1); - } - - public void incrDatanodeChooseAttemptCount() { - this.datanodeChooseAttemptCount.incr(1); - } - - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } - - @VisibleForTesting - public long getDatanodeRequestCount() { - return this.datanodeRequestCount.value(); - } - - @VisibleForTesting - public long getDatanodeChooseSuccessCount() { - return this.datanodeChooseSuccessCount.value(); - } - - @VisibleForTesting - public long getDatanodeChooseFallbackCount() { - return this.datanodeChooseFallbackCount.value(); - } - - @VisibleForTesting - public long getDatanodeChooseAttemptCount() { - return this.datanodeChooseAttemptCount.value(); - } - - @Override - public void getMetrics(MetricsCollector collector, boolean all) { - registry.snapshot(collector.addRecord(registry.info().name()), true); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java deleted file mode 100644 index 6d49459b739..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ /dev/null @@ -1,348 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.net.NetConstants; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.net.Node; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * Container placement policy that choose datanodes with network topology - * awareness, together with the space to satisfy the size constraints. - *

- * This placement policy complies with the algorithm used in HDFS. With default - * 3 replica, two replica will be on the same rack, the third one will on a - * different rack. - *

- * This implementation applies to network topology like "/rack/node". Don't - * recommend to use this if the network topology has more layers. - *

- */ -public final class SCMContainerPlacementRackAware extends SCMCommonPolicy { - @VisibleForTesting - static final Logger LOG = - LoggerFactory.getLogger(SCMContainerPlacementRackAware.class); - private final NetworkTopology networkTopology; - private boolean fallback; - private static final int RACK_LEVEL = 1; - private static final int MAX_RETRY= 3; - private final SCMContainerPlacementMetrics metrics; - - /** - * Constructs a Container Placement with rack awareness. - * - * @param nodeManager Node Manager - * @param conf Configuration - * @param fallback Whether reducing constrains to choose a data node when - * there is no node which satisfy all constrains. - * Basically, false for open container placement, and true - * for closed container placement. - */ - public SCMContainerPlacementRackAware(final NodeManager nodeManager, - final Configuration conf, final NetworkTopology networkTopology, - final boolean fallback, final SCMContainerPlacementMetrics metrics) { - super(nodeManager, conf); - this.networkTopology = networkTopology; - this.fallback = fallback; - this.metrics = metrics; - } - - /** - * Called by SCM to choose datanodes. - * There are two scenarios, one is choosing all nodes for a new pipeline. - * Another is choosing node to meet replication requirement. - * - * - * @param excludedNodes - list of the datanodes to exclude. - * @param favoredNodes - list of nodes preferred. This is a hint to the - * allocator, whether the favored nodes will be used - * depends on whether the nodes meets the allocator's - * requirement. - * @param nodesRequired - number of datanodes required. - * @param sizeRequired - size required for the container or block. - * @return List of datanodes. - * @throws SCMException SCMException - */ - @Override - public List chooseDatanodes( - List excludedNodes, List favoredNodes, - int nodesRequired, final long sizeRequired) throws SCMException { - Preconditions.checkArgument(nodesRequired > 0); - metrics.incrDatanodeRequestCount(nodesRequired); - int datanodeCount = networkTopology.getNumOfLeafNode(NetConstants.ROOT); - int excludedNodesCount = excludedNodes == null ? 0 : excludedNodes.size(); - if (datanodeCount < nodesRequired + excludedNodesCount) { - throw new SCMException("No enough datanodes to choose. " + - "TotalNode = " + datanodeCount + - "RequiredNode = " + nodesRequired + - "ExcludedNode = " + excludedNodesCount, null); - } - List mutableFavoredNodes = favoredNodes; - // sanity check of favoredNodes - if (mutableFavoredNodes != null && excludedNodes != null) { - mutableFavoredNodes = new ArrayList<>(); - mutableFavoredNodes.addAll(favoredNodes); - mutableFavoredNodes.removeAll(excludedNodes); - } - int favoredNodeNum = mutableFavoredNodes == null? 0 : - mutableFavoredNodes.size(); - - List chosenNodes = new ArrayList<>(); - int favorIndex = 0; - if (excludedNodes == null || excludedNodes.isEmpty()) { - // choose all nodes for a new pipeline case - // choose first datanode from scope ROOT or from favoredNodes if not null - Node favoredNode = favoredNodeNum > favorIndex ? - mutableFavoredNodes.get(favorIndex) : null; - Node firstNode; - if (favoredNode != null) { - firstNode = favoredNode; - favorIndex++; - } else { - firstNode = chooseNode(null, null, sizeRequired); - } - chosenNodes.add(firstNode); - nodesRequired--; - if (nodesRequired == 0) { - return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0])); - } - - // choose second datanode on the same rack as first one - favoredNode = favoredNodeNum > favorIndex ? - mutableFavoredNodes.get(favorIndex) : null; - Node secondNode; - if (favoredNode != null && - networkTopology.isSameParent(firstNode, favoredNode)) { - secondNode = favoredNode; - favorIndex++; - } else { - secondNode = chooseNode(chosenNodes, firstNode, sizeRequired); - } - chosenNodes.add(secondNode); - nodesRequired--; - if (nodesRequired == 0) { - return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0])); - } - - // choose remaining datanodes on different rack as first and second - return chooseNodes(null, chosenNodes, mutableFavoredNodes, favorIndex, - nodesRequired, sizeRequired); - } else { - List mutableExcludedNodes = new ArrayList<>(); - mutableExcludedNodes.addAll(excludedNodes); - // choose node to meet replication requirement - // case 1: one excluded node, choose one on the same rack as the excluded - // node, choose others on different racks. - Node favoredNode; - if (excludedNodes.size() == 1) { - favoredNode = favoredNodeNum > favorIndex ? - mutableFavoredNodes.get(favorIndex) : null; - Node firstNode; - if (favoredNode != null && - networkTopology.isSameParent(excludedNodes.get(0), favoredNode)) { - firstNode = favoredNode; - favorIndex++; - } else { - firstNode = chooseNode(mutableExcludedNodes, excludedNodes.get(0), - sizeRequired); - } - chosenNodes.add(firstNode); - nodesRequired--; - if (nodesRequired == 0) { - return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0])); - } - // choose remaining nodes on different racks - return chooseNodes(null, chosenNodes, mutableFavoredNodes, favorIndex, - nodesRequired, sizeRequired); - } - // case 2: two or more excluded nodes, if these two nodes are - // in the same rack, then choose nodes on different racks, otherwise, - // choose one on the same rack as one of excluded nodes, remaining chosen - // are on different racks. - for(int i = 0; i < excludedNodesCount; i++) { - for (int j = i + 1; j < excludedNodesCount; j++) { - if (networkTopology.isSameParent( - excludedNodes.get(i), excludedNodes.get(j))) { - // choose remaining nodes on different racks - return chooseNodes(mutableExcludedNodes, chosenNodes, - mutableFavoredNodes, favorIndex, nodesRequired, sizeRequired); - } - } - } - // choose one data on the same rack with one excluded node - favoredNode = favoredNodeNum > favorIndex ? - mutableFavoredNodes.get(favorIndex) : null; - Node secondNode; - if (favoredNode != null && networkTopology.isSameParent( - mutableExcludedNodes.get(0), favoredNode)) { - secondNode = favoredNode; - favorIndex++; - } else { - secondNode = - chooseNode(chosenNodes, mutableExcludedNodes.get(0), sizeRequired); - } - chosenNodes.add(secondNode); - mutableExcludedNodes.add(secondNode); - nodesRequired--; - if (nodesRequired == 0) { - return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0])); - } - // choose remaining nodes on different racks - return chooseNodes(mutableExcludedNodes, chosenNodes, mutableFavoredNodes, - favorIndex, nodesRequired, sizeRequired); - } - } - - @Override - public DatanodeDetails chooseNode(List healthyNodes) { - return null; - } - - /** - * Choose a datanode which meets the requirements. If there is no node which - * meets all the requirements, there is fallback chosen process depending on - * whether fallback is allowed when this class is instantiated. - * - * - * @param excludedNodes - list of the datanodes to excluded. Can be null. - * @param affinityNode - the chosen nodes should be on the same rack as - * affinityNode. Can be null. - * @param sizeRequired - size required for the container or block. - * @return List of chosen datanodes. - * @throws SCMException SCMException - */ - private Node chooseNode(List excludedNodes, Node affinityNode, - long sizeRequired) throws SCMException { - int ancestorGen = RACK_LEVEL; - int maxRetry = MAX_RETRY; - List excludedNodesForCapacity = null; - boolean isFallbacked = false; - while(true) { - metrics.incrDatanodeChooseAttemptCount(); - Node node = networkTopology.chooseRandom(NetConstants.ROOT, - excludedNodesForCapacity, excludedNodes, affinityNode, ancestorGen); - if (node == null) { - // cannot find the node which meets all constrains - LOG.warn("Failed to find the datanode for container. excludedNodes:" + - (excludedNodes == null ? "" : excludedNodes.toString()) + - ", affinityNode:" + - (affinityNode == null ? "" : affinityNode.getNetworkFullPath())); - if (fallback) { - isFallbacked = true; - // fallback, don't consider the affinity node - if (affinityNode != null) { - affinityNode = null; - continue; - } - // fallback, don't consider cross rack - if (ancestorGen == RACK_LEVEL) { - ancestorGen--; - continue; - } - } - // there is no constrains to reduce or fallback is true - throw new SCMException("No satisfied datanode to meet the" + - " excludedNodes and affinityNode constrains.", null); - } - if (hasEnoughSpace((DatanodeDetails)node, sizeRequired)) { - if (LOG.isDebugEnabled()) { - LOG.debug("Datanode {} is chosen for container. Required size is {}", - node.toString(), sizeRequired); - } - metrics.incrDatanodeChooseSuccessCount(); - if (isFallbacked) { - metrics.incrDatanodeChooseFallbackCount(); - } - return node; - } else { - maxRetry--; - if (maxRetry == 0) { - // avoid the infinite loop - String errMsg = "No satisfied datanode to meet the space constrains. " - + " sizeRequired: " + sizeRequired; - LOG.info(errMsg); - throw new SCMException(errMsg, null); - } - if (excludedNodesForCapacity == null) { - excludedNodesForCapacity = new ArrayList<>(); - } - excludedNodesForCapacity.add(node.getNetworkFullPath()); - } - } - } - - /** - * Choose a batch of datanodes on different rack than excludedNodes or - * chosenNodes. - * - * - * @param excludedNodes - list of the datanodes to excluded. Can be null. - * @param chosenNodes - list of nodes already chosen. These nodes should also - * be excluded. Cannot be null. - * @param favoredNodes - list of favoredNodes. It's a hint. Whether the nodes - * are chosen depends on whether they meet the constrains. - * Can be null. - * @param favorIndex - the node index of favoredNodes which is not chosen yet. - * @param sizeRequired - size required for the container or block. - * @param nodesRequired - number of datanodes required. - * @param sizeRequired - size required for the container or block. - * @return List of chosen datanodes. - * @throws SCMException SCMException - */ - private List chooseNodes(List excludedNodes, - List chosenNodes, List favoredNodes, - int favorIndex, int nodesRequired, long sizeRequired) - throws SCMException { - Preconditions.checkArgument(chosenNodes != null); - List excludedNodeList = excludedNodes != null ? - excludedNodes : chosenNodes; - int favoredNodeNum = favoredNodes == null? 0 : favoredNodes.size(); - while(true) { - Node favoredNode = favoredNodeNum > favorIndex ? - favoredNodes.get(favorIndex) : null; - Node chosenNode; - if (favoredNode != null && networkTopology.isSameParent( - excludedNodeList.get(excludedNodeList.size() - 1), favoredNode)) { - chosenNode = favoredNode; - favorIndex++; - } else { - chosenNode = chooseNode(excludedNodeList, null, sizeRequired); - } - excludedNodeList.add(chosenNode); - if (excludedNodeList != chosenNodes) { - chosenNodes.add(chosenNode); - } - nodesRequired--; - if (nodesRequired == 0) { - return Arrays.asList(chosenNodes.toArray(new DatanodeDetails[0])); - } - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java deleted file mode 100644 index 6b1a5c8c6cb..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; - -/** - * Container placement policy that randomly chooses healthy datanodes. - * This is very similar to current HDFS placement. That is we - * just randomly place containers without any considerations of utilization. - *

- * That means we rely on balancer to achieve even distribution of data. - * Balancer will need to support containers as a feature before this class - * can be practically used. - */ -public final class SCMContainerPlacementRandom extends SCMCommonPolicy - implements ContainerPlacementPolicy { - @VisibleForTesting - static final Logger LOG = - LoggerFactory.getLogger(SCMContainerPlacementRandom.class); - - /** - * Construct a random Block Placement policy. - * - * @param nodeManager nodeManager - * @param conf Config - */ - public SCMContainerPlacementRandom(final NodeManager nodeManager, - final Configuration conf, final NetworkTopology networkTopology, - final boolean fallback, final SCMContainerPlacementMetrics metrics) { - super(nodeManager, conf); - } - - /** - * Choose datanodes called by the SCM to choose the datanode. - * - * - * @param excludedNodes - list of the datanodes to exclude. - * @param favoredNodes - list of nodes preferred. - * @param nodesRequired - number of datanodes required. - * @param sizeRequired - size required for the container or block. - * @return List of Datanodes. - * @throws SCMException SCMException - */ - @Override - public List chooseDatanodes( - List excludedNodes, List favoredNodes, - final int nodesRequired, final long sizeRequired) throws SCMException { - List healthyNodes = - super.chooseDatanodes(excludedNodes, favoredNodes, nodesRequired, - sizeRequired); - - if (healthyNodes.size() == nodesRequired) { - return healthyNodes; - } - return getResultSet(nodesRequired, healthyNodes); - } - - /** - * Just chose a node randomly and remove it from the set of nodes we can - * chose from. - * - * @param healthyNodes - all healthy datanodes. - * @return one randomly chosen datanode that from two randomly chosen datanode - */ - @Override - public DatanodeDetails chooseNode(final List healthyNodes) { - DatanodeDetails selectedNode = - healthyNodes.get(getRand().nextInt(healthyNodes.size())); - healthyNodes.remove(selectedNode); - return selectedNode; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java deleted file mode 100644 index 1cb810dd0e5..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.algorithms; -// Various placement algorithms. \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java deleted file mode 100644 index b8e89987638..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java +++ /dev/null @@ -1,165 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import java.io.IOException; - -/** - * This class represents the SCM container stat. - */ -public class ContainerStat { - /** - * The maximum container size. - */ - @JsonProperty("Size") - private LongMetric size; - - /** - * The number of bytes used by the container. - */ - @JsonProperty("Used") - private LongMetric used; - - /** - * The number of keys in the container. - */ - @JsonProperty("KeyCount") - private LongMetric keyCount; - - /** - * The number of bytes read from the container. - */ - @JsonProperty("ReadBytes") - private LongMetric readBytes; - - /** - * The number of bytes write into the container. - */ - @JsonProperty("WriteBytes") - private LongMetric writeBytes; - - /** - * The number of times the container is read. - */ - @JsonProperty("ReadCount") - private LongMetric readCount; - - /** - * The number of times the container is written into. - */ - @JsonProperty("WriteCount") - private LongMetric writeCount; - - public ContainerStat() { - this(0L, 0L, 0L, 0L, 0L, 0L, 0L); - } - - public ContainerStat(long size, long used, long keyCount, long readBytes, - long writeBytes, long readCount, long writeCount) { - Preconditions.checkArgument(size >= 0, - "Container size cannot be " + "negative."); - Preconditions.checkArgument(used >= 0, - "Used space cannot be " + "negative."); - Preconditions.checkArgument(keyCount >= 0, - "Key count cannot be " + "negative"); - Preconditions.checkArgument(readBytes >= 0, - "Read bytes read cannot be " + "negative."); - Preconditions.checkArgument(readBytes >= 0, - "Write bytes cannot be " + "negative."); - Preconditions.checkArgument(readCount >= 0, - "Read count cannot be " + "negative."); - Preconditions.checkArgument(writeCount >= 0, - "Write count cannot be " + "negative"); - - this.size = new LongMetric(size); - this.used = new LongMetric(used); - this.keyCount = new LongMetric(keyCount); - this.readBytes = new LongMetric(readBytes); - this.writeBytes = new LongMetric(writeBytes); - this.readCount = new LongMetric(readCount); - this.writeCount = new LongMetric(writeCount); - } - - public LongMetric getSize() { - return size; - } - - public LongMetric getUsed() { - return used; - } - - public LongMetric getKeyCount() { - return keyCount; - } - - public LongMetric getReadBytes() { - return readBytes; - } - - public LongMetric getWriteBytes() { - return writeBytes; - } - - public LongMetric getReadCount() { - return readCount; - } - - public LongMetric getWriteCount() { - return writeCount; - } - - public void add(ContainerStat stat) { - if (stat == null) { - return; - } - - this.size.add(stat.getSize().get()); - this.used.add(stat.getUsed().get()); - this.keyCount.add(stat.getKeyCount().get()); - this.readBytes.add(stat.getReadBytes().get()); - this.writeBytes.add(stat.getWriteBytes().get()); - this.readCount.add(stat.getReadCount().get()); - this.writeCount.add(stat.getWriteCount().get()); - } - - public void subtract(ContainerStat stat) { - if (stat == null) { - return; - } - - this.size.subtract(stat.getSize().get()); - this.used.subtract(stat.getUsed().get()); - this.keyCount.subtract(stat.getKeyCount().get()); - this.readBytes.subtract(stat.getReadBytes().get()); - this.writeBytes.subtract(stat.getWriteBytes().get()); - this.readCount.subtract(stat.getReadCount().get()); - this.writeCount.subtract(stat.getWriteCount().get()); - } - - public String toJsonString() { - try { - return JsonUtils.toJsonString(this); - } catch (IOException ignored) { - return null; - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java deleted file mode 100644 index 530594258ad..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -import org.apache.hadoop.hdds.scm.exceptions.SCMException; - -/** - * DatanodeMetric acts as the basis for all the metric that is used in - * comparing 2 datanodes. - */ -public interface DatanodeMetric { - - /** - * Some syntactic sugar over Comparable interface. This makes code easier to - * read. - * - * @param o - Other Object - * @return - True if *this* object is greater than argument. - */ - boolean isGreater(T o); - - /** - * Inverse of isGreater. - * - * @param o - other object. - * @return True if *this* object is Lesser than argument. - */ - boolean isLess(T o); - - /** - * Returns true if the object has same values. Because of issues with - * equals, and loss of type information this interface supports isEqual. - * - * @param o object to compare. - * @return True, if the values match. - */ - boolean isEqual(T o); - - /** - * A resourceCheck, defined by resourceNeeded. - * For example, S could be bytes required - * and DatanodeMetric can reply by saying it can be met or not. - * - * @param resourceNeeded - ResourceNeeded in its own metric. - * @return boolean, True if this resource requirement can be met. - */ - boolean hasResources(S resourceNeeded) throws SCMException; - - /** - * Returns the metric. - * - * @return T, the object that represents this metric. - */ - T get(); - - /** - * Sets the value of this metric. - * - * @param value - value of the metric. - */ - void set(T value); - - /** - * Adds a value of to the base. - * @param value - value - */ - void add(T value); - - /** - * subtract a value. - * @param value value - */ - void subtract(T value); - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java deleted file mode 100644 index e1c8f87d6a0..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java +++ /dev/null @@ -1,161 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility; - -/** - * An helper class for all metrics based on Longs. - */ -@JsonAutoDetect(fieldVisibility = Visibility.ANY) -public class LongMetric implements DatanodeMetric { - private Long value; - - /** - * Constructs a long Metric. - * - * @param value Value for this metric. - */ - public LongMetric(Long value) { - this.value = value; - } - - /** - * Some syntactic sugar over Comparable interface. This makes code easier to - * read. - * - * @param o - Other Object - * @return - True if *this* object is greater than argument. - */ - @Override - public boolean isGreater(Long o) { - return compareTo(o) > 0; - } - - /** - * Inverse of isGreater. - * - * @param o - other object. - * @return True if *this* object is Lesser than argument. - */ - @Override - public boolean isLess(Long o) { - return compareTo(o) < 0; - } - - /** - * Returns true if the object has same values. Because of issues with - * equals, and loss of type information this interface supports isEqual. - * - * @param o object to compare. - * @return True, if the values match. - */ - @Override - public boolean isEqual(Long o) { - return compareTo(o) == 0; - } - - /** - * A resourceCheck, defined by resourceNeeded. - * For example, S could be bytes required - * and DatanodeMetric can reply by saying it can be met or not. - * - * @param resourceNeeded - ResourceNeeded in its own metric. - * @return boolean, True if this resource requirement can be met. - */ - @Override - public boolean hasResources(Long resourceNeeded) { - return isGreater(resourceNeeded); - } - - /** - * Returns the metric. - * - * @return T, the object that represents this metric. - */ - @Override - public Long get() { - return this.value; - } - - /** - * Sets the value of this metric. - * - * @param setValue - value of the metric. - */ - @Override - public void set(Long setValue) { - this.value = setValue; - - } - - /** - * Adds a value of to the base. - * - * @param addValue - value - */ - @Override - public void add(Long addValue) { - this.value += addValue; - } - - /** - * subtract a value. - * - * @param subValue value - */ - @Override - public void subtract(Long subValue) { - this.value -= subValue; - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object is - * less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - public int compareTo(Long o) { - return Long.compare(this.value, o); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - LongMetric that = (LongMetric) o; - - return value != null ? value.equals(that.value) : that.value == null; - } - - @Override - public int hashCode() { - return value != null ? value.hashCode() : 0; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java deleted file mode 100644 index d6857d395cf..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -import com.google.common.annotations.VisibleForTesting; - -/** - * Interface that defines Node Stats. - */ -interface NodeStat { - /** - * Get capacity of the node. - * @return capacity of the node. - */ - LongMetric getCapacity(); - - /** - * Get the used space of the node. - * @return the used space of the node. - */ - LongMetric getScmUsed(); - - /** - * Get the remaining space of the node. - * @return the remaining space of the node. - */ - LongMetric getRemaining(); - - /** - * Set the total/used/remaining space. - * @param capacity - total space. - * @param used - used space. - * @param remain - remaining space. - */ - @VisibleForTesting - void set(long capacity, long used, long remain); - - /** - * Adding of the stat. - * @param stat - stat to be added. - * @return updated node stat. - */ - NodeStat add(NodeStat stat); - - /** - * Subtract of the stat. - * @param stat - stat to be subtracted. - * @return updated nodestat. - */ - NodeStat subtract(NodeStat stat); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java deleted file mode 100644 index e4dd9aa37ef..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableGaugeLong; - -/** - * This class is for maintaining StorageContainerManager statistics. - */ -@Metrics(about="Storage Container Manager Metrics", context="dfs") -public class SCMMetrics { - public static final String SOURCE_NAME = - SCMMetrics.class.getSimpleName(); - - /** - * Container stat metrics, the meaning of following metrics - * can be found in {@link ContainerStat}. - */ - @Metric private MutableGaugeLong lastContainerReportSize; - @Metric private MutableGaugeLong lastContainerReportUsed; - @Metric private MutableGaugeLong lastContainerReportKeyCount; - @Metric private MutableGaugeLong lastContainerReportReadBytes; - @Metric private MutableGaugeLong lastContainerReportWriteBytes; - @Metric private MutableGaugeLong lastContainerReportReadCount; - @Metric private MutableGaugeLong lastContainerReportWriteCount; - - @Metric private MutableCounterLong containerReportSize; - @Metric private MutableCounterLong containerReportUsed; - @Metric private MutableCounterLong containerReportKeyCount; - @Metric private MutableCounterLong containerReportReadBytes; - @Metric private MutableCounterLong containerReportWriteBytes; - @Metric private MutableCounterLong containerReportReadCount; - @Metric private MutableCounterLong containerReportWriteCount; - - public SCMMetrics() { - } - - public static SCMMetrics create() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - return ms.register(SOURCE_NAME, "Storage Container Manager Metrics", - new SCMMetrics()); - } - - public void setLastContainerReportSize(long size) { - this.lastContainerReportSize.set(size); - } - - public void setLastContainerReportUsed(long used) { - this.lastContainerReportUsed.set(used); - } - - public void setLastContainerReportKeyCount(long keyCount) { - this.lastContainerReportKeyCount.set(keyCount); - } - - public void setLastContainerReportReadBytes(long readBytes) { - this.lastContainerReportReadBytes.set(readBytes); - } - - public void setLastContainerReportWriteBytes(long writeBytes) { - this.lastContainerReportWriteBytes.set(writeBytes); - } - - public void setLastContainerReportReadCount(long readCount) { - this.lastContainerReportReadCount.set(readCount); - } - - public void setLastContainerReportWriteCount(long writeCount) { - this.lastContainerReportWriteCount.set(writeCount); - } - - public void incrContainerReportSize(long size) { - this.containerReportSize.incr(size); - } - - public void incrContainerReportUsed(long used) { - this.containerReportUsed.incr(used); - } - - public void incrContainerReportKeyCount(long keyCount) { - this.containerReportKeyCount.incr(keyCount); - } - - public void incrContainerReportReadBytes(long readBytes) { - this.containerReportReadBytes.incr(readBytes); - } - - public void incrContainerReportWriteBytes(long writeBytes) { - this.containerReportWriteBytes.incr(writeBytes); - } - - public void incrContainerReportReadCount(long readCount) { - this.containerReportReadCount.incr(readCount); - } - - public void incrContainerReportWriteCount(long writeCount) { - this.containerReportWriteCount.incr(writeCount); - } - - public void setLastContainerStat(ContainerStat newStat) { - this.lastContainerReportSize.set(newStat.getSize().get()); - this.lastContainerReportUsed.set(newStat.getUsed().get()); - this.lastContainerReportKeyCount.set(newStat.getKeyCount().get()); - this.lastContainerReportReadBytes.set(newStat.getReadBytes().get()); - this.lastContainerReportWriteBytes.set(newStat.getWriteBytes().get()); - this.lastContainerReportReadCount.set(newStat.getReadCount().get()); - this.lastContainerReportWriteCount.set(newStat.getWriteCount().get()); - } - - public void incrContainerStat(ContainerStat deltaStat) { - this.containerReportSize.incr(deltaStat.getSize().get()); - this.containerReportUsed.incr(deltaStat.getUsed().get()); - this.containerReportKeyCount.incr(deltaStat.getKeyCount().get()); - this.containerReportReadBytes.incr(deltaStat.getReadBytes().get()); - this.containerReportWriteBytes.incr(deltaStat.getWriteBytes().get()); - this.containerReportReadCount.incr(deltaStat.getReadCount().get()); - this.containerReportWriteCount.incr(deltaStat.getWriteCount().get()); - } - - public void decrContainerStat(ContainerStat deltaStat) { - this.containerReportSize.incr(-1 * deltaStat.getSize().get()); - this.containerReportUsed.incr(-1 * deltaStat.getUsed().get()); - this.containerReportKeyCount.incr(-1 * deltaStat.getKeyCount().get()); - this.containerReportReadBytes.incr(-1 * deltaStat.getReadBytes().get()); - this.containerReportWriteBytes.incr(-1 * deltaStat.getWriteBytes().get()); - this.containerReportReadCount.incr(-1 * deltaStat.getReadCount().get()); - this.containerReportWriteCount.incr(-1 * deltaStat.getWriteCount().get()); - } - - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java deleted file mode 100644 index a886084b984..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java +++ /dev/null @@ -1,224 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -/** - * SCM Node Metric that is used in the placement classes. - */ -public class SCMNodeMetric implements DatanodeMetric { - private SCMNodeStat stat; - - /** - * Constructs an SCMNode Metric. - * - * @param stat - SCMNodeStat. - */ - public SCMNodeMetric(SCMNodeStat stat) { - this.stat = stat; - } - - /** - * Set the capacity, used and remaining space on a datanode. - * - * @param capacity in bytes - * @param used in bytes - * @param remaining in bytes - */ - @VisibleForTesting - public SCMNodeMetric(long capacity, long used, long remaining) { - this.stat = new SCMNodeStat(); - this.stat.set(capacity, used, remaining); - } - - /** - * - * @param o - Other Object - * @return - True if *this* object is greater than argument. - */ - @Override - public boolean isGreater(SCMNodeStat o) { - Preconditions.checkNotNull(this.stat, "Argument cannot be null"); - Preconditions.checkNotNull(o, "Argument cannot be null"); - - // if zero, replace with 1 for the division to work. - long thisDenominator = (this.stat.getCapacity().get() == 0) - ? 1 : this.stat.getCapacity().get(); - long otherDenominator = (o.getCapacity().get() == 0) - ? 1 : o.getCapacity().get(); - - float thisNodeWeight = - stat.getScmUsed().get() / (float) thisDenominator; - - float oNodeWeight = - o.getScmUsed().get() / (float) otherDenominator; - - if (Math.abs(thisNodeWeight - oNodeWeight) > 0.000001) { - return thisNodeWeight > oNodeWeight; - } - // if these nodes are have similar weight then return the node with more - // free space as the greater node. - return stat.getRemaining().isGreater(o.getRemaining().get()); - } - - /** - * Inverse of isGreater. - * - * @param o - other object. - * @return True if *this* object is Lesser than argument. - */ - @Override - public boolean isLess(SCMNodeStat o) { - Preconditions.checkNotNull(o, "Argument cannot be null"); - - // if zero, replace with 1 for the division to work. - long thisDenominator = (this.stat.getCapacity().get() == 0) - ? 1 : this.stat.getCapacity().get(); - long otherDenominator = (o.getCapacity().get() == 0) - ? 1 : o.getCapacity().get(); - - float thisNodeWeight = - stat.getScmUsed().get() / (float) thisDenominator; - - float oNodeWeight = - o.getScmUsed().get() / (float) otherDenominator; - - if (Math.abs(thisNodeWeight - oNodeWeight) > 0.000001) { - return thisNodeWeight < oNodeWeight; - } - - // if these nodes are have similar weight then return the node with less - // free space as the lesser node. - return stat.getRemaining().isLess(o.getRemaining().get()); - } - - /** - * Returns true if the object has same values. Because of issues with - * equals, and loss of type information this interface supports isEqual. - * - * @param o object to compare. - * @return True, if the values match. - * TODO : Consider if it makes sense to add remaining to this equation. - */ - @Override - public boolean isEqual(SCMNodeStat o) { - float thisNodeWeight = stat.getScmUsed().get() / (float) - stat.getCapacity().get(); - float oNodeWeight = o.getScmUsed().get() / (float) o.getCapacity().get(); - return Math.abs(thisNodeWeight - oNodeWeight) < 0.000001; - } - - /** - * A resourceCheck, defined by resourceNeeded. - * For example, S could be bytes required - * and DatanodeMetric can reply by saying it can be met or not. - * - * @param resourceNeeded - ResourceNeeded in its own metric. - * @return boolean, True if this resource requirement can be met. - */ - @Override - public boolean hasResources(Long resourceNeeded) { - return false; - } - - /** - * Returns the metric. - * - * @return T, the object that represents this metric. - */ - @Override - public SCMNodeStat get() { - return stat; - } - - /** - * Sets the value of this metric. - * - * @param value - value of the metric. - */ - @Override - public void set(SCMNodeStat value) { - stat.set(value.getCapacity().get(), value.getScmUsed().get(), - value.getRemaining().get()); - } - - /** - * Adds a value of to the base. - * - * @param value - value - */ - @Override - public void add(SCMNodeStat value) { - stat.add(value); - } - - /** - * subtract a value. - * - * @param value value - */ - @Override - public void subtract(SCMNodeStat value) { - stat.subtract(value); - } - - /** - * Compares this object with the specified object for order. Returns a - * negative integer, zero, or a positive integer as this object is less - * than, equal to, or greater than the specified object. - * - * @param o the object to be compared. - * @return a negative integer, zero, or a positive integer as this object is - * less than, equal to, or greater than the specified object. - * @throws NullPointerException if the specified object is null - * @throws ClassCastException if the specified object's type prevents it - * from being compared to this object. - */ - //@Override - public int compareTo(SCMNodeStat o) { - if (isEqual(o)) { - return 0; - } - if (isGreater(o)) { - return 1; - } else { - return -1; - } - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - - SCMNodeMetric that = (SCMNodeMetric) o; - - return stat != null ? stat.equals(that.stat) : that.stat == null; - } - - @Override - public int hashCode() { - return stat != null ? stat.hashCode() : 0; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java deleted file mode 100644 index 962bbb464ec..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java +++ /dev/null @@ -1,141 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -/** - * This class represents the SCM node stat. - */ -public class SCMNodeStat implements NodeStat { - private LongMetric capacity; - private LongMetric scmUsed; - private LongMetric remaining; - - public SCMNodeStat() { - this(0L, 0L, 0L); - } - - public SCMNodeStat(SCMNodeStat other) { - this(other.capacity.get(), other.scmUsed.get(), other.remaining.get()); - } - - public SCMNodeStat(long capacity, long used, long remaining) { - Preconditions.checkArgument(capacity >= 0, "Capacity cannot be " + - "negative."); - Preconditions.checkArgument(used >= 0, "used space cannot be " + - "negative."); - Preconditions.checkArgument(remaining >= 0, "remaining cannot be " + - "negative"); - this.capacity = new LongMetric(capacity); - this.scmUsed = new LongMetric(used); - this.remaining = new LongMetric(remaining); - } - - /** - * @return the total configured capacity of the node. - */ - @Override - public LongMetric getCapacity() { - return capacity; - } - - /** - * @return the total SCM used space on the node. - */ - @Override - public LongMetric getScmUsed() { - return scmUsed; - } - - /** - * @return the total remaining space available on the node. - */ - @Override - public LongMetric getRemaining() { - return remaining; - } - - /** - * Set the capacity, used and remaining space on a datanode. - * - * @param newCapacity in bytes - * @param newUsed in bytes - * @param newRemaining in bytes - */ - @Override - @VisibleForTesting - public void set(long newCapacity, long newUsed, long newRemaining) { - Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " + - "negative."); - Preconditions.checkArgument(newUsed >= 0, "used space cannot be " + - "negative."); - Preconditions.checkArgument(newRemaining >= 0, "remaining cannot be " + - "negative"); - - this.capacity = new LongMetric(newCapacity); - this.scmUsed = new LongMetric(newUsed); - this.remaining = new LongMetric(newRemaining); - } - - /** - * Adds a new nodestat to existing values of the node. - * - * @param stat Nodestat. - * @return SCMNodeStat - */ - @Override - public SCMNodeStat add(NodeStat stat) { - this.capacity.set(this.getCapacity().get() + stat.getCapacity().get()); - this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get()); - this.remaining.set(this.getRemaining().get() + stat.getRemaining().get()); - return this; - } - - /** - * Subtracts the stat values from the existing NodeStat. - * - * @param stat SCMNodeStat. - * @return Modified SCMNodeStat - */ - @Override - public SCMNodeStat subtract(NodeStat stat) { - this.capacity.set(this.getCapacity().get() - stat.getCapacity().get()); - this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get()); - this.remaining.set(this.getRemaining().get() - stat.getRemaining().get()); - return this; - } - - @Override - public boolean equals(Object to) { - if (to instanceof SCMNodeStat) { - SCMNodeStat tempStat = (SCMNodeStat) to; - return capacity.isEqual(tempStat.getCapacity().get()) && - scmUsed.isEqual(tempStat.getScmUsed().get()) && - remaining.isEqual(tempStat.getRemaining().get()); - } - return false; - } - - @Override - public int hashCode() { - return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get()); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java deleted file mode 100644 index 4a81d692168..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.metrics; - -// Various metrics supported by Datanode and used by SCM in the placement -// strategy. \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java deleted file mode 100644 index dc54d9bd912..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.placement; -// Classes related to container placement. \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java deleted file mode 100644 index 92a30d5c265..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatus.java +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -import javax.management.ObjectName; -import java.io.Closeable; -import java.io.IOException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.metrics2.util.MBeans; - - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdds.utils.Scheduler; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Event listener to track the current state of replication. - */ -public class ReplicationActivityStatus implements - ReplicationActivityStatusMXBean, Closeable { - - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationActivityStatus.class); - - private Scheduler scheduler; - private AtomicBoolean replicationEnabled = new AtomicBoolean(); - private ObjectName jmxObjectName; - - public ReplicationActivityStatus(Scheduler scheduler) { - this.scheduler = scheduler; - } - - @Override - public boolean isReplicationEnabled() { - return replicationEnabled.get(); - } - - @VisibleForTesting - @Override - public void setReplicationEnabled(boolean enabled) { - replicationEnabled.set(enabled); - } - - @VisibleForTesting - public void enableReplication() { - replicationEnabled.set(true); - } - - - public void start() { - try { - this.jmxObjectName = - MBeans.register( - "StorageContainerManager", "ReplicationActivityStatus", this); - } catch (Exception ex) { - LOG.error("JMX bean for ReplicationActivityStatus can't be registered", - ex); - } - } - - @Override - public void close() throws IOException { - if (this.jmxObjectName != null) { - MBeans.unregister(jmxObjectName); - } - } - - /** - * Waits for - * {@link HddsConfigKeys#HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT} and set - * replicationEnabled to start replication monitor thread. - */ - public void fireReplicationStart(boolean safeModeStatus, - long waitTime) { - if (!safeModeStatus) { - scheduler.schedule(() -> { - setReplicationEnabled(true); - LOG.info("Replication Timer sleep for {} ms completed. Enable " - + "Replication", waitTime); - }, waitTime, TimeUnit.MILLISECONDS); - } - } - - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatusMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatusMXBean.java deleted file mode 100644 index 164bd247efb..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationActivityStatusMXBean.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container.replication; - -/** - * JMX interface to monitor replication status. - */ -public interface ReplicationActivityStatusMXBean { - - boolean isReplicationEnabled(); - - void setReplicationEnabled(boolean enabled); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java deleted file mode 100644 index 934b01e6231..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container.replication; - -/** - * HDDS (Closed) Container replicaton related classes. - */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java deleted file mode 100644 index af44a8a043e..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ -package org.apache.hadoop.hdds.scm.container.states; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.NavigableSet; -import java.util.TreeSet; - -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_CHANGE_CONTAINER_STATE; - -/** - * Each Attribute that we manage for a container is maintained as a map. - *

- * Currently we manage the following attributes for a container. - *

- * 1. StateMap - LifeCycleState -> Set of ContainerIDs - * 2. TypeMap - ReplicationType -> Set of ContainerIDs - * 3. OwnerMap - OwnerNames -> Set of ContainerIDs - * 4. FactorMap - ReplicationFactor -> Set of ContainerIDs - *

- * This means that for a cluster size of 750 PB -- we will have around 150 - * Million containers, if we assume 5GB average container size. - *

- * That implies that these maps will take around 2/3 GB of RAM which will be - * pinned down in the SCM. This is deemed acceptable since we can tune the - * container size --say we make it 10GB average size, then we can deal with a - * cluster size of 1.5 exa bytes with the same metadata in SCMs memory. - *

- * Please note: **This class is not thread safe**. This used to be thread safe, - * while bench marking we found that ContainerStateMap would be taking 5 - * locks for a single container insert. If we remove locks in this class, - * then we are able to perform about 540K operations per second, with the - * locks in this class it goes down to 246K operations per second. Hence we - * are going to rely on ContainerStateMap locks to maintain consistency of - * data in these classes too, since ContainerAttribute is only used by - * ContainerStateMap class. - */ -public class ContainerAttribute { - private static final Logger LOG = - LoggerFactory.getLogger(ContainerAttribute.class); - - private final Map> attributeMap; - private static final NavigableSet EMPTY_SET = Collections - .unmodifiableNavigableSet(new TreeSet<>()); - - /** - * Creates a Container Attribute map from an existing Map. - * - * @param attributeMap - AttributeMap - */ - public ContainerAttribute(Map> attributeMap) { - this.attributeMap = attributeMap; - } - - /** - * Create an empty Container Attribute map. - */ - public ContainerAttribute() { - this.attributeMap = new HashMap<>(); - } - - /** - * Insert or update the value in the Attribute map. - * - * @param key - The key to the set where the ContainerID should exist. - * @param value - Actual Container ID. - * @throws SCMException - on Error - */ - public boolean insert(T key, ContainerID value) throws SCMException { - Preconditions.checkNotNull(key); - Preconditions.checkNotNull(value); - - if (attributeMap.containsKey(key)) { - if (attributeMap.get(key).add(value)) { - return true; //we inserted the value as it doesn’t exist in the set. - } else { // Failure indicates that this ContainerID exists in the Set - if (!attributeMap.get(key).remove(value)) { - LOG.error("Failure to remove the object from the Map.Key:{}, " + - "ContainerID: {}", key, value); - throw new SCMException("Failure to remove the object from the Map", - FAILED_TO_CHANGE_CONTAINER_STATE); - } - attributeMap.get(key).add(value); - return true; - } - } else { - // This key does not exist, we need to allocate this key in the map. - // TODO: Replace TreeSet with FoldedTreeSet from HDFS Utils. - // Skipping for now, since FoldedTreeSet does not have implementations - // for headSet and TailSet. We need those calls. - this.attributeMap.put(key, new TreeSet<>()); - // This should not fail, we just allocated this object. - attributeMap.get(key).add(value); - return true; - } - } - - /** - * Returns true if have this bucket in the attribute map. - * - * @param key - Key to lookup - * @return true if we have the key - */ - public boolean hasKey(T key) { - Preconditions.checkNotNull(key); - return this.attributeMap.containsKey(key); - } - - /** - * Returns true if we have the key and the containerID in the bucket. - * - * @param key - Key to the bucket - * @param id - container ID that we want to lookup - * @return true or false - */ - public boolean hasContainerID(T key, ContainerID id) { - Preconditions.checkNotNull(key); - Preconditions.checkNotNull(id); - - return this.attributeMap.containsKey(key) && - this.attributeMap.get(key).contains(id); - } - - /** - * Returns true if we have the key and the containerID in the bucket. - * - * @param key - Key to the bucket - * @param id - container ID that we want to lookup - * @return true or false - */ - public boolean hasContainerID(T key, int id) { - return hasContainerID(key, ContainerID.valueof(id)); - } - - /** - * Clears all entries for this key type. - * - * @param key - Key that identifies the Set. - */ - public void clearSet(T key) { - Preconditions.checkNotNull(key); - - if (attributeMap.containsKey(key)) { - attributeMap.get(key).clear(); - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("key: {} does not exist in the attributeMap", key); - } - } - } - - /** - * Removes a container ID from the set pointed by the key. - * - * @param key - key to identify the set. - * @param value - Container ID - */ - public boolean remove(T key, ContainerID value) { - Preconditions.checkNotNull(key); - Preconditions.checkNotNull(value); - - if (attributeMap.containsKey(key)) { - if (!attributeMap.get(key).remove(value)) { - if (LOG.isDebugEnabled()) { - LOG.debug("ContainerID: {} does not exist in the set pointed by " + - "key:{}", value, key); - } - return false; - } - return true; - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("key: {} does not exist in the attributeMap", key); - } - return false; - } - } - - /** - * Returns the collection that maps to the given key. - * - * @param key - Key to the bucket. - * @return Underlying Set in immutable form. - */ - public NavigableSet getCollection(T key) { - Preconditions.checkNotNull(key); - - if (this.attributeMap.containsKey(key)) { - return Collections.unmodifiableNavigableSet(this.attributeMap.get(key)); - } - if (LOG.isDebugEnabled()) { - LOG.debug("No such Key. Key {}", key); - } - return EMPTY_SET; - } - - /** - * Moves a ContainerID from one bucket to another. - * - * @param currentKey - Current Key - * @param newKey - newKey - * @param value - ContainerID - * @throws SCMException on Error - */ - public void update(T currentKey, T newKey, ContainerID value) - throws SCMException { - Preconditions.checkNotNull(currentKey); - Preconditions.checkNotNull(newKey); - - boolean removed = false; - try { - removed = remove(currentKey, value); - if (!removed) { - throw new SCMException("Unable to find key in the current key bucket", - FAILED_TO_CHANGE_CONTAINER_STATE); - } - insert(newKey, value); - } catch (SCMException ex) { - // if we removed the key, insert it back to original bucket, since the - // next insert failed. - LOG.error("error in update.", ex); - if (removed) { - insert(currentKey, value); - if (LOG.isTraceEnabled()) { - LOG.trace("reinserted the removed key. {}", currentKey); - } - } - throw ex; - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerQueryKey.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerQueryKey.java deleted file mode 100644 index cd491154291..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerQueryKey.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.container.states; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -/** - * Key for the Caching layer for Container Query. - */ -public class ContainerQueryKey { - private final HddsProtos.LifeCycleState state; - private final String owner; - private final HddsProtos.ReplicationFactor factor; - private final HddsProtos.ReplicationType type; - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - ContainerQueryKey that = (ContainerQueryKey) o; - - return new EqualsBuilder() - .append(getState(), that.getState()) - .append(getOwner(), that.getOwner()) - .append(getFactor(), that.getFactor()) - .append(getType(), that.getType()) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(61, 71) - .append(getState()) - .append(getOwner()) - .append(getFactor()) - .append(getType()) - .toHashCode(); - } - - /** - * Constructor for ContainerQueryKey. - * @param state LifeCycleState - * @param owner - Name of the Owner. - * @param factor Replication Factor. - * @param type - Replication Type. - */ - public ContainerQueryKey(HddsProtos.LifeCycleState state, String owner, - HddsProtos.ReplicationFactor factor, HddsProtos.ReplicationType type) { - this.state = state; - this.owner = owner; - this.factor = factor; - this.type = type; - } - - /** - * Returns the state of containers which this key represents. - * @return LifeCycleState - */ - public HddsProtos.LifeCycleState getState() { - return state; - } - - /** - * Returns the owner of containers which this key represents. - * @return Owner - */ - public String getOwner() { - return owner; - } - - /** - * Returns the replication factor of containers which this key represents. - * @return ReplicationFactor - */ - public HddsProtos.ReplicationFactor getFactor() { - return factor; - } - - /** - * Returns the replication type of containers which this key represents. - * @return ReplicationType - */ - public HddsProtos.ReplicationType getType() { - return type; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java deleted file mode 100644 index e4e8ed379f5..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.container.states; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; - -/** - * Class that acts as the container state. - */ -public class ContainerState { - private final String owner; - private final PipelineID pipelineID; - - /** - * Constructs a Container Key. - * - * @param owner - Container Owners - * @param pipelineID - ID of the pipeline - */ - public ContainerState(String owner, PipelineID pipelineID) { - this.pipelineID = pipelineID; - this.owner = owner; - } - - public String getOwner() { - return owner; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - ContainerState that = (ContainerState) o; - - return new EqualsBuilder() - .append(owner, that.owner) - .append(pipelineID, that.pipelineID) - .isEquals(); - } - - @Override - public int hashCode() { - return new HashCodeBuilder(137, 757) - .append(owner) - .append(pipelineID) - .toHashCode(); - } - - @Override - public String toString() { - return "ContainerKey{" + - ", owner=" + owner + - ", pipelineID=" + pipelineID + - '}'; - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java deleted file mode 100644 index 5fc94008ff3..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java +++ /dev/null @@ -1,545 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.container.states; - -import com.google.common.base.Preconditions; - -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; -import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerReplicaNotFoundException; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Set; -import java.util.Collections; -import java.util.Map; -import java.util.NavigableSet; -import java.util.TreeSet; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.concurrent.ConcurrentHashMap; - -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .CONTAINER_EXISTS; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_CHANGE_CONTAINER_STATE; - -/** - * Container State Map acts like a unified map for various attributes that are - * used to select containers when we need allocated blocks. - *

- * This class provides the ability to query 5 classes of attributes. They are - *

- * 1. LifeCycleStates - LifeCycle States of container describe in which state - * a container is. For example, a container needs to be in Open State for a - * client to able to write to it. - *

- * 2. Owners - Each instance of Name service, for example, Namenode of HDFS or - * Ozone Manager (OM) of Ozone or CBlockServer -- is an owner. It is - * possible to have many OMs for a Ozone cluster and only one SCM. But SCM - * keeps the data from each OM in separate bucket, never mixing them. To - * write data, often we have to find all open containers for a specific owner. - *

- * 3. ReplicationType - The clients are allowed to specify what kind of - * replication pipeline they want to use. Each Container exists on top of a - * pipeline, so we need to get ReplicationType that is specified by the user. - *

- * 4. ReplicationFactor - The replication factor represents how many copies - * of data should be made, right now we support 2 different types, ONE - * Replica and THREE Replica. User can specify how many copies should be made - * for a ozone key. - *

- * The most common access pattern of this class is to select a container based - * on all these parameters, for example, when allocating a block we will - * select a container that belongs to user1, with Ratis replication which can - * make 3 copies of data. The fact that we will look for open containers by - * default and if we cannot find them we will add new containers. - */ -public class ContainerStateMap { - private static final Logger LOG = - LoggerFactory.getLogger(ContainerStateMap.class); - - private final static NavigableSet EMPTY_SET = - Collections.unmodifiableNavigableSet(new TreeSet<>()); - - private final ContainerAttribute lifeCycleStateMap; - private final ContainerAttribute ownerMap; - private final ContainerAttribute factorMap; - private final ContainerAttribute typeMap; - private final Map containerMap; - private final Map> replicaMap; - private final Map> resultCache; - - // Container State Map lock should be held before calling into - // Update ContainerAttributes. The consistency of ContainerAttributes is - // protected by this lock. - private final ReadWriteLock lock; - - /** - * Create a ContainerStateMap. - */ - public ContainerStateMap() { - this.lifeCycleStateMap = new ContainerAttribute<>(); - this.ownerMap = new ContainerAttribute<>(); - this.factorMap = new ContainerAttribute<>(); - this.typeMap = new ContainerAttribute<>(); - this.containerMap = new ConcurrentHashMap<>(); - this.lock = new ReentrantReadWriteLock(); - this.replicaMap = new ConcurrentHashMap<>(); - this.resultCache = new ConcurrentHashMap<>(); - } - - /** - * Adds a ContainerInfo Entry in the ContainerStateMap. - * - * @param info - container info - * @throws SCMException - throws if create failed. - */ - public void addContainer(final ContainerInfo info) - throws SCMException { - Preconditions.checkNotNull(info, "Container Info cannot be null"); - Preconditions.checkArgument(info.getReplicationFactor().getNumber() > 0, - "ExpectedReplicaCount should be greater than 0"); - - lock.writeLock().lock(); - try { - final ContainerID id = info.containerID(); - if (containerMap.putIfAbsent(id, info) != null) { - LOG.debug("Duplicate container ID detected. {}", id); - throw new - SCMException("Duplicate container ID detected.", - CONTAINER_EXISTS); - } - - lifeCycleStateMap.insert(info.getState(), id); - ownerMap.insert(info.getOwner(), id); - factorMap.insert(info.getReplicationFactor(), id); - typeMap.insert(info.getReplicationType(), id); - replicaMap.put(id, ConcurrentHashMap.newKeySet()); - - // Flush the cache of this container type, will be added later when - // get container queries are executed. - flushCache(info); - LOG.trace("Created container with {} successfully.", id); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Removes a Container Entry from ContainerStateMap. - * - * @param containerID - ContainerID - * @throws SCMException - throws if create failed. - */ - public void removeContainer(final ContainerID containerID) - throws ContainerNotFoundException { - Preconditions.checkNotNull(containerID, "ContainerID cannot be null"); - lock.writeLock().lock(); - try { - checkIfContainerExist(containerID); - // Should we revert back to the original state if any of the below - // remove operation fails? - final ContainerInfo info = containerMap.remove(containerID); - lifeCycleStateMap.remove(info.getState(), containerID); - ownerMap.remove(info.getOwner(), containerID); - factorMap.remove(info.getReplicationFactor(), containerID); - typeMap.remove(info.getReplicationType(), containerID); - // Flush the cache of this container type. - flushCache(info); - LOG.trace("Removed container with {} successfully.", containerID); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Returns the latest state of Container from SCM's Container State Map. - * - * @param containerID - ContainerID - * @return container info, if found. - */ - public ContainerInfo getContainerInfo(final ContainerID containerID) - throws ContainerNotFoundException { - lock.readLock().lock(); - try { - checkIfContainerExist(containerID); - return containerMap.get(containerID); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns the latest list of DataNodes where replica for given containerId - * exist. Throws an SCMException if no entry is found for given containerId. - * - * @param containerID - * @return Set - */ - public Set getContainerReplicas( - final ContainerID containerID) throws ContainerNotFoundException { - Preconditions.checkNotNull(containerID); - lock.readLock().lock(); - try { - checkIfContainerExist(containerID); - return Collections - .unmodifiableSet(replicaMap.get(containerID)); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Adds given datanodes as nodes where replica for given containerId exist. - * Logs a debug entry if a datanode is already added as replica for given - * ContainerId. - * - * @param containerID - * @param replica - */ - public void updateContainerReplica(final ContainerID containerID, - final ContainerReplica replica) throws ContainerNotFoundException { - Preconditions.checkNotNull(containerID); - lock.writeLock().lock(); - try { - checkIfContainerExist(containerID); - Set replicas = replicaMap.get(containerID); - replicas.remove(replica); - replicas.add(replica); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Remove a container Replica for given DataNode. - * - * @param containerID - * @param replica - * @return True of dataNode is removed successfully else false. - */ - public void removeContainerReplica(final ContainerID containerID, - final ContainerReplica replica) - throws ContainerNotFoundException, ContainerReplicaNotFoundException { - Preconditions.checkNotNull(containerID); - Preconditions.checkNotNull(replica); - - lock.writeLock().lock(); - try { - checkIfContainerExist(containerID); - if(!replicaMap.get(containerID).remove(replica)) { - throw new ContainerReplicaNotFoundException( - "Container #" - + containerID.getId() + ", replica: " + replica); - } - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Just update the container State. - * @param info ContainerInfo. - */ - public void updateContainerInfo(final ContainerInfo info) - throws ContainerNotFoundException { - lock.writeLock().lock(); - try { - Preconditions.checkNotNull(info); - checkIfContainerExist(info.containerID()); - final ContainerInfo currentInfo = containerMap.get(info.containerID()); - flushCache(info, currentInfo); - containerMap.put(info.containerID(), info); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Update the State of a container. - * - * @param containerID - ContainerID - * @param currentState - CurrentState - * @param newState - NewState. - * @throws SCMException - in case of failure. - */ - public void updateState(ContainerID containerID, LifeCycleState currentState, - LifeCycleState newState) throws SCMException, ContainerNotFoundException { - Preconditions.checkNotNull(currentState); - Preconditions.checkNotNull(newState); - lock.writeLock().lock(); - try { - checkIfContainerExist(containerID); - final ContainerInfo currentInfo = containerMap.get(containerID); - try { - currentInfo.setState(newState); - - // We are updating two places before this update is done, these can - // fail independently, since the code needs to handle it. - - // We update the attribute map, if that fails it will throw an - // exception, so no issues, if we are successful, we keep track of the - // fact that we have updated the lifecycle state in the map, and update - // the container state. If this second update fails, we will attempt to - // roll back the earlier change we did. If the rollback fails, we can - // be in an inconsistent state, - - lifeCycleStateMap.update(currentState, newState, containerID); - if (LOG.isTraceEnabled()) { - LOG.trace("Updated the container {} to new state. Old = {}, new = " + - "{}", containerID, currentState, newState); - } - - // Just flush both old and new data sets from the result cache. - flushCache(currentInfo); - } catch (SCMException ex) { - LOG.error("Unable to update the container state. {}", ex); - // we need to revert the change in this attribute since we are not - // able to update the hash table. - LOG.info("Reverting the update to lifecycle state. Moving back to " + - "old state. Old = {}, Attempted state = {}", currentState, - newState); - - currentInfo.setState(currentState); - - // if this line throws, the state map can be in an inconsistent - // state, since we will have modified the attribute by the - // container state will not in sync since we were not able to put - // that into the hash table. - lifeCycleStateMap.update(newState, currentState, containerID); - - throw new SCMException("Updating the container map failed.", ex, - FAILED_TO_CHANGE_CONTAINER_STATE); - } - } finally { - lock.writeLock().unlock(); - } - } - - public Set getAllContainerIDs() { - return Collections.unmodifiableSet(containerMap.keySet()); - } - - /** - * Returns A list of containers owned by a name service. - * - * @param ownerName - Name of the NameService. - * @return - NavigableSet of ContainerIDs. - */ - NavigableSet getContainerIDsByOwner(final String ownerName) { - Preconditions.checkNotNull(ownerName); - lock.readLock().lock(); - try { - return ownerMap.getCollection(ownerName); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns Containers in the System by the Type. - * - * @param type - Replication type -- StandAlone, Ratis etc. - * @return NavigableSet - */ - NavigableSet getContainerIDsByType(final ReplicationType type) { - Preconditions.checkNotNull(type); - lock.readLock().lock(); - try { - return typeMap.getCollection(type); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns Containers by replication factor. - * - * @param factor - Replication Factor. - * @return NavigableSet. - */ - NavigableSet getContainerIDsByFactor( - final ReplicationFactor factor) { - Preconditions.checkNotNull(factor); - lock.readLock().lock(); - try { - return factorMap.getCollection(factor); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns Containers by State. - * - * @param state - State - Open, Closed etc. - * @return List of containers by state. - */ - public NavigableSet getContainerIDsByState( - final LifeCycleState state) { - Preconditions.checkNotNull(state); - lock.readLock().lock(); - try { - return lifeCycleStateMap.getCollection(state); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Gets the containers that matches the following filters. - * - * @param state - LifeCycleState - * @param owner - Owner - * @param factor - Replication Factor - * @param type - Replication Type - * @return ContainerInfo or Null if not container satisfies the criteria. - */ - public NavigableSet getMatchingContainerIDs( - final LifeCycleState state, final String owner, - final ReplicationFactor factor, final ReplicationType type) { - - Preconditions.checkNotNull(state, "State cannot be null"); - Preconditions.checkNotNull(owner, "Owner cannot be null"); - Preconditions.checkNotNull(factor, "Factor cannot be null"); - Preconditions.checkNotNull(type, "Type cannot be null"); - - lock.readLock().lock(); - try { - final ContainerQueryKey queryKey = - new ContainerQueryKey(state, owner, factor, type); - if(resultCache.containsKey(queryKey)){ - return resultCache.get(queryKey); - } - - // If we cannot meet any one condition we return EMPTY_SET immediately. - // Since when we intersect these sets, the result will be empty if any - // one is empty. - final NavigableSet stateSet = - lifeCycleStateMap.getCollection(state); - if (stateSet.size() == 0) { - return EMPTY_SET; - } - - final NavigableSet ownerSet = - ownerMap.getCollection(owner); - if (ownerSet.size() == 0) { - return EMPTY_SET; - } - - final NavigableSet factorSet = - factorMap.getCollection(factor); - if (factorSet.size() == 0) { - return EMPTY_SET; - } - - final NavigableSet typeSet = - typeMap.getCollection(type); - if (typeSet.size() == 0) { - return EMPTY_SET; - } - - - // if we add more constraints we will just add those sets here.. - final NavigableSet[] sets = sortBySize(stateSet, - ownerSet, factorSet, typeSet); - - NavigableSet currentSet = sets[0]; - // We take the smallest set and intersect against the larger sets. This - // allows us to reduce the lookups to the least possible number. - for (int x = 1; x < sets.length; x++) { - currentSet = intersectSets(currentSet, sets[x]); - } - resultCache.put(queryKey, currentSet); - return currentSet; - } finally { - lock.readLock().unlock(); - } - } - - /** - * Calculates the intersection between sets and returns a new set. - * - * @param smaller - First Set - * @param bigger - Second Set - * @return resultSet which is the intersection of these two sets. - */ - private NavigableSet intersectSets( - final NavigableSet smaller, - final NavigableSet bigger) { - Preconditions.checkState(smaller.size() <= bigger.size(), - "This function assumes the first set is lesser or equal to second " + - "set"); - final NavigableSet resultSet = new TreeSet<>(); - for (ContainerID id : smaller) { - if (bigger.contains(id)) { - resultSet.add(id); - } - } - return resultSet; - } - - /** - * Sorts a list of Sets based on Size. This is useful when we are - * intersecting the sets. - * - * @param sets - varagrs of sets - * @return Returns a sorted array of sets based on the size of the set. - */ - @SuppressWarnings("unchecked") - private NavigableSet[] sortBySize( - final NavigableSet... sets) { - for (int x = 0; x < sets.length - 1; x++) { - for (int y = 0; y < sets.length - x - 1; y++) { - if (sets[y].size() > sets[y + 1].size()) { - final NavigableSet temp = sets[y]; - sets[y] = sets[y + 1]; - sets[y + 1] = temp; - } - } - } - return sets; - } - - private void flushCache(final ContainerInfo... containerInfos) { - for (ContainerInfo containerInfo : containerInfos) { - final ContainerQueryKey key = new ContainerQueryKey( - containerInfo.getState(), - containerInfo.getOwner(), - containerInfo.getReplicationFactor(), - containerInfo.getReplicationType()); - resultCache.remove(key); - } - } - - private void checkIfContainerExist(ContainerID containerID) - throws ContainerNotFoundException { - if (!containerMap.containsKey(containerID)) { - throw new ContainerNotFoundException("#" + containerID.getId()); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java deleted file mode 100644 index 8ad1c8b842f..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -/** - * Container States package. - */ -package org.apache.hadoop.hdds.scm.container.states; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java deleted file mode 100644 index 43d396e0cb1..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.events; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.block.PendingDeleteStatusList; -import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus; -import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .IncrementalContainerReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .PipelineReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .PipelineActionsFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .ContainerActionsFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .CommandStatusReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .ContainerReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .NodeReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer - .NodeRegistrationContainerReport; -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.TypedEvent; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; - -/** - * Class that acts as the namespace for all SCM Events. - */ -public final class SCMEvents { - - /** - * NodeReports are sent out by Datanodes. This report is received by - * SCMDatanodeHeartbeatDispatcher and NodeReport Event is generated. - */ - public static final TypedEvent NODE_REPORT = - new TypedEvent<>(NodeReportFromDatanode.class, "Node_Report"); - - /** - * Event generated on DataNode registration. - */ - public static final TypedEvent - NODE_REGISTRATION_CONT_REPORT = new TypedEvent<>( - NodeRegistrationContainerReport.class, - "Node_Registration_Container_Report"); - - /** - * ContainerReports are send out by Datanodes. This report is received by - * SCMDatanodeHeartbeatDispatcher and Container_Report Event is generated. - */ - public static final TypedEvent CONTAINER_REPORT = - new TypedEvent<>(ContainerReportFromDatanode.class, "Container_Report"); - - /** - * IncrementalContainerReports are send out by Datanodes. - * This report is received by SCMDatanodeHeartbeatDispatcher and - * Incremental_Container_Report Event is generated. - */ - public static final TypedEvent - INCREMENTAL_CONTAINER_REPORT = new TypedEvent<>( - IncrementalContainerReportFromDatanode.class, - "Incremental_Container_Report"); - - /** - * ContainerActions are sent by Datanode. This event is received by - * SCMDatanodeHeartbeatDispatcher and CONTAINER_ACTIONS event is generated. - */ - public static final TypedEvent - CONTAINER_ACTIONS = new TypedEvent<>(ContainerActionsFromDatanode.class, - "Container_Actions"); - - /** - * PipelineReports are send out by Datanodes. This report is received by - * SCMDatanodeHeartbeatDispatcher and Pipeline_Report Event is generated. - */ - public static final TypedEvent PIPELINE_REPORT = - new TypedEvent<>(PipelineReportFromDatanode.class, "Pipeline_Report"); - - /** - * PipelineReport processed by pipeline report handler. This event is - * received by HealthyPipelineSafeModeRule. - */ - public static final TypedEvent - PROCESSED_PIPELINE_REPORT = new TypedEvent<>( - PipelineReportFromDatanode.class, "Processed_Pipeline_Report"); - - /** - * PipelineActions are sent by Datanode. This event is received by - * SCMDatanodeHeartbeatDispatcher and PIPELINE_ACTIONS event is generated. - */ - public static final TypedEvent - PIPELINE_ACTIONS = new TypedEvent<>(PipelineActionsFromDatanode.class, - "Pipeline_Actions"); - - /** - * A Command status report will be sent by datanodes. This repoort is received - * by SCMDatanodeHeartbeatDispatcher and CommandReport event is generated. - */ - public static final TypedEvent - CMD_STATUS_REPORT = - new TypedEvent<>(CommandStatusReportFromDatanode.class, - "Cmd_Status_Report"); - - /** - * When ever a command for the Datanode needs to be issued by any component - * inside SCM, a Datanode_Command event is generated. NodeManager listens to - * these events and dispatches them to Datanode for further processing. - */ - public static final Event DATANODE_COMMAND = - new TypedEvent<>(CommandForDatanode.class, "Datanode_Command"); - - public static final TypedEvent - RETRIABLE_DATANODE_COMMAND = - new TypedEvent<>(CommandForDatanode.class, "Retriable_Datanode_Command"); - - /** - * A Close Container Event can be triggered under many condition. Some of them - * are: 1. A Container is full, then we stop writing further information to - * that container. DN's let SCM know that current state and sends a - * informational message that allows SCM to close the container. - *

- * 2. If a pipeline is open; for example Ratis; if a single node fails, we - * will proactively close these containers. - *

- * Once a command is dispatched to DN, we will also listen to updates from the - * datanode which lets us know that this command completed or timed out. - */ - public static final TypedEvent CLOSE_CONTAINER = - new TypedEvent<>(ContainerID.class, "Close_Container"); - - /** - * This event will be triggered whenever a new datanode is registered with - * SCM. - */ - public static final TypedEvent NEW_NODE = - new TypedEvent<>(DatanodeDetails.class, "New_Node"); - - /** - * This event will be triggered whenever a datanode is moved from healthy to - * stale state. - */ - public static final TypedEvent STALE_NODE = - new TypedEvent<>(DatanodeDetails.class, "Stale_Node"); - - /** - * This event will be triggered whenever a datanode is moved from stale to - * dead state. - */ - public static final TypedEvent DEAD_NODE = - new TypedEvent<>(DatanodeDetails.class, "Dead_Node"); - - /** - * This event will be triggered whenever a datanode is moved from non-healthy - * state to healthy state. - */ - public static final TypedEvent NON_HEALTHY_TO_HEALTHY_NODE = - new TypedEvent<>(DatanodeDetails.class, "NON_HEALTHY_TO_HEALTHY_NODE"); - - /** - * This event will be triggered by CommandStatusReportHandler whenever a - * status for DeleteBlock SCMCommand is received. - */ - public static final TypedEvent - DELETE_BLOCK_STATUS = - new TypedEvent<>(CommandStatusReportHandler.DeleteBlockStatus.class, - "Delete_Block_Status"); - - /** - * This event will be triggered while processing container reports from DN - * when deleteTransactionID of container in report mismatches with the - * deleteTransactionID on SCM. - */ - public static final Event PENDING_DELETE_STATUS = - new TypedEvent<>(PendingDeleteStatusList.class, "Pending_Delete_Status"); - - public static final TypedEvent SAFE_MODE_STATUS = - new TypedEvent<>(SafeModeStatus.class); - - /** - * Private Ctor. Never Constructed. - */ - private SCMEvents() { - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java deleted file mode 100644 index 46181a3eb5f..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Events Package contains all the Events used by SCM internally to - * communicate between different sub-systems that make up SCM. - */ -package org.apache.hadoop.hdds.scm.events; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java deleted file mode 100644 index 62eb0f25689..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/BigIntegerCodec.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.metadata; - -import java.io.IOException; -import java.math.BigInteger; -import org.apache.hadoop.hdds.utils.db.Codec; - -/** - * Encode and decode BigInteger. - */ -public class BigIntegerCodec implements Codec { - @Override - public byte[] toPersistedFormat(BigInteger object) throws IOException { - return object.toByteArray(); - } - - @Override - public BigInteger fromPersistedFormat(byte[] rawData) throws IOException { - return new BigInteger(rawData); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/DeletedBlocksTransactionCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/DeletedBlocksTransactionCodec.java deleted file mode 100644 index f825025f701..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/DeletedBlocksTransactionCodec.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.metadata; - - -import com.google.protobuf.InvalidProtocolBufferException; -import java.io.IOException; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdds.utils.db.Codec; - -/** - * Codec for Persisting the DeletedBlocks. - */ -public class DeletedBlocksTransactionCodec - implements Codec { - @Override - public byte[] toPersistedFormat(DeletedBlocksTransaction object) - throws IOException { - return object.toByteArray(); - } - - @Override - public DeletedBlocksTransaction fromPersistedFormat(byte[] rawData) - throws IOException { - try { - return DeletedBlocksTransaction.parseFrom(rawData); - } catch (InvalidProtocolBufferException e) { - throw new IllegalArgumentException( - "Can't convert rawBytes to DeletedBlocksTransaction.", e); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/LongCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/LongCodec.java deleted file mode 100644 index 16923203d7e..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/LongCodec.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.metadata; - -import com.google.common.primitives.Longs; -import java.io.IOException; -import org.apache.hadoop.hdds.utils.db.Codec; - -/** - * Codec for Persisting the DeletedBlocks. - */ -public class LongCodec implements Codec { - - @Override - public byte[] toPersistedFormat(Long object) throws IOException { - return Longs.toByteArray(object); - } - - @Override - public Long fromPersistedFormat(byte[] rawData) throws IOException { - return Longs.fromByteArray(rawData); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java deleted file mode 100644 index 11503169ce2..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStore.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.metadata; - -import java.math.BigInteger; -import java.security.cert.X509Certificate; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import java.io.IOException; -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdds.utils.db.TableIterator; - -/** - * Generic interface for data stores for SCM. - * This is similar to the OMMetadataStore class, - * where we write classes into some underlying storage system. - */ -public interface SCMMetadataStore { - /** - * Start metadata manager. - * - * @param configuration - Configuration - * @throws IOException - Unable to start metadata store. - */ - void start(OzoneConfiguration configuration) throws IOException; - - /** - * Stop metadata manager. - */ - void stop() throws Exception; - - /** - * Get metadata store. - * - * @return metadata store. - */ - @VisibleForTesting - DBStore getStore(); - - /** - * A Table that keeps the deleted blocks lists and transactions. - * - * @return Table - */ - Table getDeletedBlocksTXTable(); - - /** - * Returns the current TXID for the deleted blocks. - * - * @return Long - */ - Long getCurrentTXID(); - - /** - * Returns the next TXID for the Deleted Blocks. - * - * @return Long. - */ - Long getNextDeleteBlockTXID(); - - /** - * A table that maintains all the valid certificates issued by the SCM CA. - * - * @return Table - */ - Table getValidCertsTable(); - - /** - * A Table that maintains all revoked certificates until they expire. - * - * @return Table. - */ - Table getRevokedCertsTable(); - - /** - * Returns the list of Certificates of a specific type. - * - * @param certType - CertType. - * @return Iterator - */ - TableIterator getAllCerts(CertificateStore.CertType certType); - - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java deleted file mode 100644 index eff7a985241..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.metadata; - -import java.io.File; -import java.math.BigInteger; -import java.nio.file.Paths; -import java.security.cert.X509Certificate; -import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import java.io.IOException; -import org.apache.hadoop.hdds.security.x509.certificate.authority - .CertificateStore; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.ozone.OzoneConsts.SCM_DB_NAME; - -/** - * A RocksDB based implementation of SCM Metadata Store. - *

- *

- * +---------------+------------------+-------------------------+ - * | Column Family | Key | Value | - * +---------------+------------------+-------------------------+ - * | DeletedBlocks | TXID(Long) | DeletedBlockTransaction | - * +---------------+------------------+-------------------------+ - * | ValidCerts | Serial (BigInt) | X509Certificate | - * +---------------+------------------+-------------------------+ - * |RevokedCerts | Serial (BigInt) | X509Certificate | - * +---------------+------------------+-------------------------+ - */ -public class SCMMetadataStoreRDBImpl implements SCMMetadataStore { - - private static final String DELETED_BLOCKS_TABLE = "deletedBlocks"; - private Table deletedBlocksTable; - - private static final String VALID_CERTS_TABLE = "validCerts"; - private Table validCertsTable; - - private static final String REVOKED_CERTS_TABLE = "revokedCerts"; - private Table revokedCertsTable; - - - - private static final Logger LOG = - LoggerFactory.getLogger(SCMMetadataStoreRDBImpl.class); - private DBStore store; - private final OzoneConfiguration configuration; - private final AtomicLong txID; - - /** - * Constructs the metadata store and starts the DB Services. - * - * @param config - Ozone Configuration. - * @throws IOException - on Failure. - */ - public SCMMetadataStoreRDBImpl(OzoneConfiguration config) - throws IOException { - this.configuration = config; - start(this.configuration); - this.txID = new AtomicLong(this.getLargestRecordedTXID()); - } - - @Override - public void start(OzoneConfiguration config) - throws IOException { - if (this.store == null) { - File metaDir = ServerUtils.getScmDbDir(configuration); - - this.store = DBStoreBuilder.newBuilder(configuration) - .setName(SCM_DB_NAME) - .setPath(Paths.get(metaDir.getPath())) - .addTable(DELETED_BLOCKS_TABLE) - .addTable(VALID_CERTS_TABLE) - .addTable(REVOKED_CERTS_TABLE) - .addCodec(DeletedBlocksTransaction.class, - new DeletedBlocksTransactionCodec()) - .addCodec(Long.class, new LongCodec()) - .addCodec(BigInteger.class, new BigIntegerCodec()) - .addCodec(X509Certificate.class, new X509CertificateCodec()) - .build(); - - deletedBlocksTable = this.store.getTable(DELETED_BLOCKS_TABLE, - Long.class, DeletedBlocksTransaction.class); - checkTableStatus(deletedBlocksTable, DELETED_BLOCKS_TABLE); - - validCertsTable = this.store.getTable(VALID_CERTS_TABLE, - BigInteger.class, X509Certificate.class); - checkTableStatus(validCertsTable, VALID_CERTS_TABLE); - - revokedCertsTable = this.store.getTable(REVOKED_CERTS_TABLE, - BigInteger.class, X509Certificate.class); - checkTableStatus(revokedCertsTable, REVOKED_CERTS_TABLE); - } - } - - @Override - public void stop() throws Exception { - if (store != null) { - store.close(); - store = null; - } - } - - @Override - public DBStore getStore() { - return this.store; - } - - @Override - public Table getDeletedBlocksTXTable() { - return deletedBlocksTable; - } - - @Override - public Long getNextDeleteBlockTXID() { - return this.txID.incrementAndGet(); - } - - @Override - public Table getValidCertsTable() { - return validCertsTable; - } - - @Override - public Table getRevokedCertsTable() { - return revokedCertsTable; - } - - @Override - public TableIterator getAllCerts(CertificateStore.CertType certType) { - if(certType == CertificateStore.CertType.VALID_CERTS) { - return validCertsTable.iterator(); - } - - if(certType == CertificateStore.CertType.REVOKED_CERTS) { - return revokedCertsTable.iterator(); - } - - return null; - } - - @Override - public Long getCurrentTXID() { - return this.txID.get(); - } - - /** - * Returns the largest recorded TXID from the DB. - * - * @return Long - * @throws IOException - */ - private Long getLargestRecordedTXID() throws IOException { - try (TableIterator txIter = - deletedBlocksTable.iterator()) { - txIter.seekToLast(); - Long txid = txIter.key(); - if (txid != null) { - return txid; - } - } - return 0L; - } - - - private void checkTableStatus(Table table, String name) throws IOException { - String logMessage = "Unable to get a reference to %s table. Cannot " + - "continue."; - String errMsg = "Inconsistent DB state, Table - %s. Please check the" + - " logs for more info."; - if (table == null) { - LOG.error(String.format(logMessage, name)); - throw new IOException(String.format(errMsg, name)); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java deleted file mode 100644 index b21103ecbfa..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.metadata; - -import java.io.IOException; -import java.nio.charset.Charset; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.utils.db.Codec; - -/** - * Encodes and Decodes X509Certificate Class. - */ -public class X509CertificateCodec implements Codec { - @Override - public byte[] toPersistedFormat(X509Certificate object) throws IOException { - try { - return CertificateCodec.getPEMEncodedString(object) - .getBytes(Charset.forName("UTF-8")); - } catch (SCMSecurityException exp) { - throw new IOException(exp); - } - } - - @Override - public X509Certificate fromPersistedFormat(byte[] rawData) - throws IOException { - try{ - String s = new String(rawData, Charset.forName("UTF-8")); - return CertificateCodec.getX509Certificate(s); - } catch (CertificateException exp) { - throw new IOException(exp); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/package-info.java deleted file mode 100644 index 23e8aaaa4e2..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Metadata layer for SCM. - */ -package org.apache.hadoop.hdds.scm.metadata; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java deleted file mode 100644 index eb6dc0d424f..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.util.Time; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -/** - * Command Queue is queue of commands for the datanode. - *

- * Node manager, container Manager and Ozone managers can queue commands for - * datanodes into this queue. These commands will be send in the order in which - * there where queued. - */ -public class CommandQueue { - // This list is used as default return value. - private static final List DEFAULT_LIST = new ArrayList<>(); - private final Map commandMap; - private final Lock lock; - private long commandsInQueue; - - /** - * Returns number of commands in queue. - * @return Command Count. - */ - public long getCommandsInQueue() { - return commandsInQueue; - } - - /** - * Constructs a Command Queue. - * TODO : Add a flusher thread that throws away commands older than a certain - * time period. - */ - public CommandQueue() { - commandMap = new HashMap<>(); - lock = new ReentrantLock(); - commandsInQueue = 0; - } - - /** - * This function is used only for test purposes. - */ - @VisibleForTesting - public void clear() { - lock.lock(); - try { - commandMap.clear(); - commandsInQueue = 0; - } finally { - lock.unlock(); - } - } - - /** - * Returns a list of Commands for the datanode to execute, if we have no - * commands returns a empty list otherwise the current set of - * commands are returned and command map set to empty list again. - * - * @param datanodeUuid Datanode UUID - * @return List of SCM Commands. - */ - @SuppressWarnings("unchecked") - List getCommand(final UUID datanodeUuid) { - lock.lock(); - try { - Commands cmds = commandMap.remove(datanodeUuid); - List cmdList = null; - if(cmds != null) { - cmdList = cmds.getCommands(); - commandsInQueue -= cmdList.size() > 0 ? cmdList.size() : 0; - // A post condition really. - Preconditions.checkState(commandsInQueue >= 0); - } - return cmds == null ? DEFAULT_LIST : cmdList; - } finally { - lock.unlock(); - } - } - - /** - * Adds a Command to the SCM Queue to send the command to container. - * - * @param datanodeUuid DatanodeDetails.Uuid - * @param command - Command - */ - public void addCommand(final UUID datanodeUuid, final SCMCommand - command) { - lock.lock(); - try { - if (commandMap.containsKey(datanodeUuid)) { - commandMap.get(datanodeUuid).add(command); - } else { - commandMap.put(datanodeUuid, new Commands(command)); - } - commandsInQueue++; - } finally { - lock.unlock(); - } - } - - /** - * Class that stores commands for a datanode. - */ - private static class Commands { - private long updateTime; - private long readTime; - private List commands; - - /** - * Constructs a Commands class. - */ - Commands() { - commands = new ArrayList<>(); - updateTime = 0; - readTime = 0; - } - - /** - * Creates the object and populates with the command. - * @param command command to add to queue. - */ - Commands(SCMCommand command) { - this(); - this.add(command); - } - - /** - * Gets the last time the commands for this node was updated. - * @return Time stamp - */ - public long getUpdateTime() { - return updateTime; - } - - /** - * Gets the last read time. - * @return last time when these commands were read from this queue. - */ - public long getReadTime() { - return readTime; - } - - /** - * Adds a command to the list. - * - * @param command SCMCommand - */ - public void add(SCMCommand command) { - this.commands.add(command); - updateTime = Time.monotonicNow(); - } - - /** - * Returns the commands for this datanode. - * @return command list. - */ - public List getCommands() { - List temp = this.commands; - this.commands = new ArrayList<>(); - readTime = Time.monotonicNow(); - return temp; - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java deleted file mode 100644 index d06ea2a3b3f..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.util.Time; - -import java.util.Collections; -import java.util.List; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -/** - * This class extends the primary identifier of a Datanode with ephemeral - * state, eg last reported time, usage information etc. - */ -public class DatanodeInfo extends DatanodeDetails { - - private final ReadWriteLock lock; - - private volatile long lastHeartbeatTime; - private long lastStatsUpdatedTime; - - private List storageReports; - - /** - * Constructs DatanodeInfo from DatanodeDetails. - * - * @param datanodeDetails Details about the datanode - */ - public DatanodeInfo(DatanodeDetails datanodeDetails) { - super(datanodeDetails); - this.lock = new ReentrantReadWriteLock(); - this.lastHeartbeatTime = Time.monotonicNow(); - this.storageReports = Collections.emptyList(); - } - - /** - * Updates the last heartbeat time with current time. - */ - public void updateLastHeartbeatTime() { - try { - lock.writeLock().lock(); - lastHeartbeatTime = Time.monotonicNow(); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Returns the last heartbeat time. - * - * @return last heartbeat time. - */ - public long getLastHeartbeatTime() { - try { - lock.readLock().lock(); - return lastHeartbeatTime; - } finally { - lock.readLock().unlock(); - } - } - - /** - * Updates the datanode storage reports. - * - * @param reports list of storage report - */ - public void updateStorageReports(List reports) { - try { - lock.writeLock().lock(); - lastStatsUpdatedTime = Time.monotonicNow(); - storageReports = reports; - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Returns the storage reports associated with this datanode. - * - * @return list of storage report - */ - public List getStorageReports() { - try { - lock.readLock().lock(); - return storageReports; - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns the last updated time of datanode info. - * @return the last updated time of datanode info. - */ - public long getLastStatsUpdatedTime() { - return lastStatsUpdatedTime; - } - - @Override - public int hashCode() { - return super.hashCode(); - } - - @Override - public boolean equals(Object obj) { - return super.equals(obj); - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java deleted file mode 100644 index 17e1fedd952..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java +++ /dev/null @@ -1,172 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import java.io.IOException; -import java.util.Optional; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerException; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER; - -/** - * Handles Dead Node event. - */ -public class DeadNodeHandler implements EventHandler { - - private final NodeManager nodeManager; - private final PipelineManager pipelineManager; - private final ContainerManager containerManager; - - private static final Logger LOG = - LoggerFactory.getLogger(DeadNodeHandler.class); - - public DeadNodeHandler(final NodeManager nodeManager, - final PipelineManager pipelineManager, - final ContainerManager containerManager) { - this.nodeManager = nodeManager; - this.pipelineManager = pipelineManager; - this.containerManager = containerManager; - } - - @Override - public void onMessage(final DatanodeDetails datanodeDetails, - final EventPublisher publisher) { - - try { - - /* - * We should have already destroyed all the pipelines on this datanode - * when it was marked as stale. Destroy pipeline should also have closed - * all the containers on this datanode. - * - * Ideally we should not have any pipeline or OPEN containers now. - * - * To be on a safer side, we double check here and take appropriate - * action. - */ - - destroyPipelines(datanodeDetails); - closeContainers(datanodeDetails, publisher); - - // Remove the container replicas associated with the dead node. - removeContainerReplicas(datanodeDetails); - - } catch (NodeNotFoundException ex) { - // This should not happen, we cannot get a dead node event for an - // unregistered datanode! - LOG.error("DeadNode event for a unregistered node: {}!", datanodeDetails); - } - } - - /** - * Destroys all the pipelines on the given datanode if there are any. - * - * @param datanodeDetails DatanodeDetails - */ - private void destroyPipelines(final DatanodeDetails datanodeDetails) { - Optional.ofNullable(nodeManager.getPipelines(datanodeDetails)) - .ifPresent(pipelines -> - pipelines.forEach(id -> { - try { - pipelineManager.finalizeAndDestroyPipeline( - pipelineManager.getPipeline(id), false); - } catch (PipelineNotFoundException ignore) { - // Pipeline is not there in pipeline manager, - // should we care? - } catch (IOException ex) { - LOG.warn("Exception while finalizing pipeline {}", - id, ex); - } - })); - } - - /** - * Sends CloseContainerCommand to all the open containers on the - * given datanode. - * - * @param datanodeDetails DatanodeDetails - * @param publisher EventPublisher - * @throws NodeNotFoundException - */ - private void closeContainers(final DatanodeDetails datanodeDetails, - final EventPublisher publisher) - throws NodeNotFoundException { - nodeManager.getContainers(datanodeDetails) - .forEach(id -> { - try { - final ContainerInfo container = containerManager.getContainer(id); - if (container.getState() == HddsProtos.LifeCycleState.OPEN) { - publisher.fireEvent(CLOSE_CONTAINER, id); - } - } catch (ContainerNotFoundException cnfe) { - LOG.warn("Container {} is not managed by ContainerManager.", - id, cnfe); - } - }); - } - - /** - * Removes the ContainerReplica of the dead datanode from the containers - * which are hosted by that datanode. - * - * @param datanodeDetails DatanodeDetails - * @throws NodeNotFoundException - */ - private void removeContainerReplicas(final DatanodeDetails datanodeDetails) - throws NodeNotFoundException { - nodeManager.getContainers(datanodeDetails) - .forEach(id -> { - try { - final ContainerInfo container = containerManager.getContainer(id); - // Identify and remove the ContainerReplica of dead node - containerManager.getContainerReplicas(id) - .stream() - .filter(r -> r.getDatanodeDetails().equals(datanodeDetails)) - .findFirst() - .ifPresent(replica -> { - try { - containerManager.removeContainerReplica(id, replica); - } catch (ContainerException ex) { - LOG.warn("Exception while removing container replica #{} " + - "of container {}.", replica, container, ex); - } - }); - } catch (ContainerNotFoundException cnfe) { - LOG.warn("Container {} is not managed by ContainerManager.", - id, cnfe); - } - }); - } - - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java deleted file mode 100644 index 1dc924b2575..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; - -/** - * Handles New Node event. - */ -public class NewNodeHandler implements EventHandler { - - private final PipelineManager pipelineManager; - private final Configuration conf; - - public NewNodeHandler(PipelineManager pipelineManager, Configuration conf) { - this.pipelineManager = pipelineManager; - this.conf = conf; - } - - @Override - public void onMessage(DatanodeDetails datanodeDetails, - EventPublisher publisher) { - pipelineManager.triggerPipelineCreation(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java deleted file mode 100644 index fd8bb87ceb1..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ /dev/null @@ -1,202 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; - -import java.io.Closeable; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -/** - * A node manager supports a simple interface for managing a datanode. - *

- * 1. A datanode registers with the NodeManager. - *

- * 2. If the node is allowed to register, we add that to the nodes that we need - * to keep track of. - *

- * 3. A heartbeat is made by the node at a fixed frequency. - *

- * 4. A node can be in any of these 4 states: {HEALTHY, STALE, DEAD, - * DECOMMISSIONED} - *

- * HEALTHY - It is a datanode that is regularly heartbeating us. - * - * STALE - A datanode for which we have missed few heart beats. - * - * DEAD - A datanode that we have not heard from for a while. - * - * DECOMMISSIONED - Someone told us to remove this node from the tracking - * list, by calling removeNode. We will throw away this nodes info soon. - */ -public interface NodeManager extends StorageContainerNodeProtocol, - EventHandler, NodeManagerMXBean, Closeable { - - /** - * Gets all Live Datanodes that is currently communicating with SCM. - * @param nodeState - State of the node - * @return List of Datanodes that are Heartbeating SCM. - */ - List getNodes(NodeState nodeState); - - /** - * Returns the Number of Datanodes that are communicating with SCM. - * @param nodeState - State of the node - * @return int -- count - */ - int getNodeCount(NodeState nodeState); - - /** - * Get all datanodes known to SCM. - * - * @return List of DatanodeDetails known to SCM. - */ - List getAllNodes(); - - /** - * Returns the aggregated node stats. - * @return the aggregated node stats. - */ - SCMNodeStat getStats(); - - /** - * Return a map of node stats. - * @return a map of individual node stats (live/stale but not dead). - */ - Map getNodeStats(); - - /** - * Return the node stat of the specified datanode. - * @param datanodeDetails DatanodeDetails. - * @return node stat if it is live/stale, null if it is decommissioned or - * doesn't exist. - */ - SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails); - - /** - * Returns the node state of a specific node. - * @param datanodeDetails DatanodeDetails - * @return Healthy/Stale/Dead. - */ - NodeState getNodeState(DatanodeDetails datanodeDetails); - - /** - * Get set of pipelines a datanode is part of. - * @param datanodeDetails DatanodeDetails - * @return Set of PipelineID - */ - Set getPipelines(DatanodeDetails datanodeDetails); - - /** - * Add pipeline information in the NodeManager. - * @param pipeline - Pipeline to be added - */ - void addPipeline(Pipeline pipeline); - - /** - * Remove a pipeline information from the NodeManager. - * @param pipeline - Pipeline to be removed - */ - void removePipeline(Pipeline pipeline); - - /** - * Adds the given container to the specified datanode. - * - * @param datanodeDetails - DatanodeDetails - * @param containerId - containerID - * @throws NodeNotFoundException - if datanode is not known. For new datanode - * use addDatanodeInContainerMap call. - */ - void addContainer(DatanodeDetails datanodeDetails, - ContainerID containerId) throws NodeNotFoundException; - - /** - * Remaps datanode to containers mapping to the new set of containers. - * @param datanodeDetails - DatanodeDetails - * @param containerIds - Set of containerIDs - * @throws NodeNotFoundException - if datanode is not known. For new datanode - * use addDatanodeInContainerMap call. - */ - void setContainers(DatanodeDetails datanodeDetails, - Set containerIds) throws NodeNotFoundException; - - /** - * Return set of containerIDs available on a datanode. - * @param datanodeDetails DatanodeDetails - * @return set of containerIDs - */ - Set getContainers(DatanodeDetails datanodeDetails) - throws NodeNotFoundException; - - /** - * Add a {@link SCMCommand} to the command queue, which are - * handled by HB thread asynchronously. - * @param dnId datanode uuid - * @param command - */ - void addDatanodeCommand(UUID dnId, SCMCommand command); - - /** - * Process node report. - * - * @param datanodeDetails - * @param nodeReport - */ - void processNodeReport(DatanodeDetails datanodeDetails, - NodeReportProto nodeReport); - - /** - * Get list of SCMCommands in the Command Queue for a particular Datanode. - * @param dnID - Datanode uuid. - * @return list of commands - */ - // TODO: We can give better name to this method! - List getCommandQueue(UUID dnID); - - /** - * Given datanode uuid, returns the DatanodeDetails for the node. - * - * @param uuid datanode uuid - * @return the given datanode, or null if not found - */ - DatanodeDetails getNodeByUuid(String uuid); - - /** - * Given datanode address(Ipaddress or hostname), returns a list of - * DatanodeDetails for the datanodes running at that address. - * - * @param address datanode address - * @return the given datanode, or empty list if none found - */ - List getNodesByAddress(String address); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java deleted file mode 100644 index e1b51efc34c..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.classification.InterfaceAudience; - -import java.util.Map; - -/** - * - * This is the JMX management interface for node manager information. - */ -@InterfaceAudience.Private -public interface NodeManagerMXBean { - - /** - * Get the number of data nodes that in all states. - * - * @return A state to number of nodes that in this state mapping - */ - Map getNodeCount(); - - /** - * Get the disk metrics like capacity, usage and remaining based on the - * storage type. - */ - Map getNodeInfo(); - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java deleted file mode 100644 index 71e1b077713..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .NodeReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handles Node Reports from datanode. - */ -public class NodeReportHandler implements EventHandler { - - private static final Logger LOGGER = LoggerFactory - .getLogger(NodeReportHandler.class); - private final NodeManager nodeManager; - - public NodeReportHandler(NodeManager nodeManager) { - Preconditions.checkNotNull(nodeManager); - this.nodeManager = nodeManager; - } - - @Override - public void onMessage(NodeReportFromDatanode nodeReportFromDatanode, - EventPublisher publisher) { - Preconditions.checkNotNull(nodeReportFromDatanode); - DatanodeDetails dn = nodeReportFromDatanode.getDatanodeDetails(); - Preconditions.checkNotNull(dn, "NodeReport is " - + "missing DatanodeDetails."); - nodeManager - .processNodeReport(dn, nodeReportFromDatanode.getReport()); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java deleted file mode 100644 index 954cb0e8ea4..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ /dev/null @@ -1,765 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.node.states.*; -import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap; -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.common.statemachine - .InvalidStateTransitionException; -import org.apache.hadoop.ozone.common.statemachine.StateMachine; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.util.*; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.function.Predicate; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_STALENODE_INTERVAL; - -/** - * NodeStateManager maintains the state of all the datanodes in the cluster. All - * the node state change should happen only via NodeStateManager. It also - * runs a heartbeat thread which periodically updates the node state. - *

- * The getNode(byState) functions make copy of node maps and then creates a list - * based on that. It should be assumed that these get functions always report - * *stale* information. For example, getting the deadNodeCount followed by - * getNodes(DEAD) could very well produce totally different count. Also - * getNodeCount(HEALTHY) + getNodeCount(DEAD) + getNodeCode(STALE), is not - * guaranteed to add up to the total nodes that we know off. Please treat all - * get functions in this file as a snap-shot of information that is inconsistent - * as soon as you read it. - */ -public class NodeStateManager implements Runnable, Closeable { - - /** - * Node's life cycle events. - */ - private enum NodeLifeCycleEvent { - TIMEOUT, RESTORE, RESURRECT, DECOMMISSION, DECOMMISSIONED - } - - private static final Logger LOG = LoggerFactory - .getLogger(NodeStateManager.class); - - /** - * StateMachine for node lifecycle. - */ - private final StateMachine stateMachine; - /** - * This is the map which maintains the current state of all datanodes. - */ - private final NodeStateMap nodeStateMap; - /** - * Maintains the mapping from node to pipelines a node is part of. - */ - private final Node2PipelineMap node2PipelineMap; - /** - * Used for publishing node state change events. - */ - private final EventPublisher eventPublisher; - /** - * Maps the event to be triggered when a node state us updated. - */ - private final Map> state2EventMap; - /** - * ExecutorService used for scheduling heartbeat processing thread. - */ - private final ScheduledExecutorService executorService; - /** - * The frequency in which we have run the heartbeat processing thread. - */ - private final long heartbeatCheckerIntervalMs; - /** - * The timeout value which will be used for marking a datanode as stale. - */ - private final long staleNodeIntervalMs; - /** - * The timeout value which will be used for marking a datanode as dead. - */ - private final long deadNodeIntervalMs; - - /** - * The future is used to pause/unpause the scheduled checks. - */ - private ScheduledFuture healthCheckFuture; - - /** - * Test utility - tracks if health check has been paused (unit tests). - */ - private boolean checkPaused; - - /** - * timestamp of the latest heartbeat check process. - */ - private long lastHealthCheck; - - /** - * number of times the heart beat check was skipped. - */ - private long skippedHealthChecks; - - /** - * Constructs a NodeStateManager instance with the given configuration. - * - * @param conf Configuration - */ - public NodeStateManager(Configuration conf, EventPublisher eventPublisher) { - this.nodeStateMap = new NodeStateMap(); - this.node2PipelineMap = new Node2PipelineMap(); - this.eventPublisher = eventPublisher; - this.state2EventMap = new HashMap<>(); - initialiseState2EventMap(); - Set finalStates = new HashSet<>(); - finalStates.add(NodeState.DECOMMISSIONED); - this.stateMachine = new StateMachine<>(NodeState.HEALTHY, finalStates); - initializeStateMachine(); - heartbeatCheckerIntervalMs = HddsServerUtil - .getScmheartbeatCheckerInterval(conf); - staleNodeIntervalMs = HddsServerUtil.getStaleNodeInterval(conf); - deadNodeIntervalMs = HddsServerUtil.getDeadNodeInterval(conf); - Preconditions.checkState(heartbeatCheckerIntervalMs > 0, - OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL + " should be greater than 0."); - Preconditions.checkState(staleNodeIntervalMs < deadNodeIntervalMs, - OZONE_SCM_STALENODE_INTERVAL + " should be less than" + - OZONE_SCM_DEADNODE_INTERVAL); - executorService = HadoopExecutors.newScheduledThreadPool(1, - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("SCM Heartbeat Processing Thread - %d").build()); - - skippedHealthChecks = 0; - checkPaused = false; // accessed only from test functions - - scheduleNextHealthCheck(); - } - - /** - * Populates state2event map. - */ - private void initialiseState2EventMap() { - state2EventMap.put(NodeState.STALE, SCMEvents.STALE_NODE); - state2EventMap.put(NodeState.DEAD, SCMEvents.DEAD_NODE); - state2EventMap - .put(NodeState.HEALTHY, SCMEvents.NON_HEALTHY_TO_HEALTHY_NODE); - } - - /* - * - * Node and State Transition Mapping: - * - * State: HEALTHY -------------------> STALE - * Event: TIMEOUT - * - * State: STALE -------------------> DEAD - * Event: TIMEOUT - * - * State: STALE -------------------> HEALTHY - * Event: RESTORE - * - * State: DEAD -------------------> HEALTHY - * Event: RESURRECT - * - * State: HEALTHY -------------------> DECOMMISSIONING - * Event: DECOMMISSION - * - * State: STALE -------------------> DECOMMISSIONING - * Event: DECOMMISSION - * - * State: DEAD -------------------> DECOMMISSIONING - * Event: DECOMMISSION - * - * State: DECOMMISSIONING -------------------> DECOMMISSIONED - * Event: DECOMMISSIONED - * - * Node State Flow - * - * +--------------------------------------------------------+ - * | (RESURRECT) | - * | +--------------------------+ | - * | | (RESTORE) | | - * | | | | - * V V | | - * [HEALTHY]------------------->[STALE]------------------->[DEAD] - * | (TIMEOUT) | (TIMEOUT) | - * | | | - * | | | - * | | | - * | | | - * | (DECOMMISSION) | (DECOMMISSION) | (DECOMMISSION) - * | V | - * +------------------->[DECOMMISSIONING]<----------------+ - * | - * | (DECOMMISSIONED) - * | - * V - * [DECOMMISSIONED] - * - */ - - /** - * Initializes the lifecycle of node state machine. - */ - private void initializeStateMachine() { - stateMachine.addTransition( - NodeState.HEALTHY, NodeState.STALE, NodeLifeCycleEvent.TIMEOUT); - stateMachine.addTransition( - NodeState.STALE, NodeState.DEAD, NodeLifeCycleEvent.TIMEOUT); - stateMachine.addTransition( - NodeState.STALE, NodeState.HEALTHY, NodeLifeCycleEvent.RESTORE); - stateMachine.addTransition( - NodeState.DEAD, NodeState.HEALTHY, NodeLifeCycleEvent.RESURRECT); - stateMachine.addTransition( - NodeState.HEALTHY, NodeState.DECOMMISSIONING, - NodeLifeCycleEvent.DECOMMISSION); - stateMachine.addTransition( - NodeState.STALE, NodeState.DECOMMISSIONING, - NodeLifeCycleEvent.DECOMMISSION); - stateMachine.addTransition( - NodeState.DEAD, NodeState.DECOMMISSIONING, - NodeLifeCycleEvent.DECOMMISSION); - stateMachine.addTransition( - NodeState.DECOMMISSIONING, NodeState.DECOMMISSIONED, - NodeLifeCycleEvent.DECOMMISSIONED); - - } - - /** - * Adds a new node to the state manager. - * - * @param datanodeDetails DatanodeDetails - * - * @throws NodeAlreadyExistsException if the node is already present - */ - public void addNode(DatanodeDetails datanodeDetails) - throws NodeAlreadyExistsException { - nodeStateMap.addNode(datanodeDetails, stateMachine.getInitialState()); - eventPublisher.fireEvent(SCMEvents.NEW_NODE, datanodeDetails); - } - - /** - * Adds a pipeline in the node2PipelineMap. - * @param pipeline - Pipeline to be added - */ - public void addPipeline(Pipeline pipeline) { - node2PipelineMap.addPipeline(pipeline); - } - - /** - * Get information about the node. - * - * @param datanodeDetails DatanodeDetails - * - * @return DatanodeInfo - * - * @throws NodeNotFoundException if the node is not present - */ - public DatanodeInfo getNode(DatanodeDetails datanodeDetails) - throws NodeNotFoundException { - return nodeStateMap.getNodeInfo(datanodeDetails.getUuid()); - } - - /** - * Updates the last heartbeat time of the node. - * - * @throws NodeNotFoundException if the node is not present - */ - public void updateLastHeartbeatTime(DatanodeDetails datanodeDetails) - throws NodeNotFoundException { - nodeStateMap.getNodeInfo(datanodeDetails.getUuid()) - .updateLastHeartbeatTime(); - } - - /** - * Returns the current state of the node. - * - * @param datanodeDetails DatanodeDetails - * - * @return NodeState - * - * @throws NodeNotFoundException if the node is not present - */ - public NodeState getNodeState(DatanodeDetails datanodeDetails) - throws NodeNotFoundException { - return nodeStateMap.getNodeState(datanodeDetails.getUuid()); - } - - /** - * Returns all the node which are in healthy state. - * - * @return list of healthy nodes - */ - public List getHealthyNodes() { - return getNodes(NodeState.HEALTHY); - } - - /** - * Returns all the node which are in stale state. - * - * @return list of stale nodes - */ - public List getStaleNodes() { - return getNodes(NodeState.STALE); - } - - /** - * Returns all the node which are in dead state. - * - * @return list of dead nodes - */ - public List getDeadNodes() { - return getNodes(NodeState.DEAD); - } - - /** - * Returns all the node which are in the specified state. - * - * @param state NodeState - * - * @return list of nodes - */ - public List getNodes(NodeState state) { - List nodes = new ArrayList<>(); - nodeStateMap.getNodes(state).forEach( - uuid -> { - try { - nodes.add(nodeStateMap.getNodeInfo(uuid)); - } catch (NodeNotFoundException e) { - // This should not happen unless someone else other than - // NodeStateManager is directly modifying NodeStateMap and removed - // the node entry after we got the list of UUIDs. - LOG.error("Inconsistent NodeStateMap! " + nodeStateMap); - } - }); - return nodes; - } - - /** - * Returns all the nodes which have registered to NodeStateManager. - * - * @return all the managed nodes - */ - public List getAllNodes() { - List nodes = new ArrayList<>(); - nodeStateMap.getAllNodes().forEach( - uuid -> { - try { - nodes.add(nodeStateMap.getNodeInfo(uuid)); - } catch (NodeNotFoundException e) { - // This should not happen unless someone else other than - // NodeStateManager is directly modifying NodeStateMap and removed - // the node entry after we got the list of UUIDs. - LOG.error("Inconsistent NodeStateMap! " + nodeStateMap); - } - }); - return nodes; - } - - /** - * Gets set of pipelineID a datanode belongs to. - * @param dnId - Datanode ID - * @return Set of PipelineID - */ - public Set getPipelineByDnID(UUID dnId) { - return node2PipelineMap.getPipelines(dnId); - } - - /** - * Returns the count of healthy nodes. - * - * @return healthy node count - */ - public int getHealthyNodeCount() { - return getNodeCount(NodeState.HEALTHY); - } - - /** - * Returns the count of stale nodes. - * - * @return stale node count - */ - public int getStaleNodeCount() { - return getNodeCount(NodeState.STALE); - } - - /** - * Returns the count of dead nodes. - * - * @return dead node count - */ - public int getDeadNodeCount() { - return getNodeCount(NodeState.DEAD); - } - - /** - * Returns the count of nodes in specified state. - * - * @param state NodeState - * - * @return node count - */ - public int getNodeCount(NodeState state) { - return nodeStateMap.getNodeCount(state); - } - - /** - * Returns the count of all nodes managed by NodeStateManager. - * - * @return node count - */ - public int getTotalNodeCount() { - return nodeStateMap.getTotalNodeCount(); - } - - /** - * Removes a pipeline from the node2PipelineMap. - * @param pipeline - Pipeline to be removed - */ - public void removePipeline(Pipeline pipeline) { - node2PipelineMap.removePipeline(pipeline); - } - - /** - * Adds the given container to the specified datanode. - * - * @param uuid - datanode uuid - * @param containerId - containerID - * @throws NodeNotFoundException - if datanode is not known. For new datanode - * use addDatanodeInContainerMap call. - */ - public void addContainer(final UUID uuid, - final ContainerID containerId) - throws NodeNotFoundException { - nodeStateMap.addContainer(uuid, containerId); - } - - /** - * Update set of containers available on a datanode. - * @param uuid - DatanodeID - * @param containerIds - Set of containerIDs - * @throws NodeNotFoundException - if datanode is not known. - */ - public void setContainers(UUID uuid, Set containerIds) - throws NodeNotFoundException { - nodeStateMap.setContainers(uuid, containerIds); - } - - /** - * Return set of containerIDs available on a datanode. - * @param uuid - DatanodeID - * @return - set of containerIDs - */ - public Set getContainers(UUID uuid) - throws NodeNotFoundException { - return nodeStateMap.getContainers(uuid); - } - - /** - * Move Stale or Dead node to healthy if we got a heartbeat from them. - * Move healthy nodes to stale nodes if it is needed. - * Move Stales node to dead if needed. - * - * @see Thread#run() - */ - @Override - public void run() { - - if (shouldSkipCheck()) { - skippedHealthChecks++; - LOG.info("Detected long delay in scheduling HB processing thread. " - + "Skipping heartbeat checks for one iteration."); - } else { - checkNodesHealth(); - } - - // we purposefully make this non-deterministic. Instead of using a - // scheduleAtFixedFrequency we will just go to sleep - // and wake up at the next rendezvous point, which is currentTime + - // heartbeatCheckerIntervalMs. This leads to the issue that we are now - // heart beating not at a fixed cadence, but clock tick + time taken to - // work. - // - // This time taken to work can skew the heartbeat processor thread. - // The reason why we don't care is because of the following reasons. - // - // 1. checkerInterval is general many magnitudes faster than datanode HB - // frequency. - // - // 2. if we have too much nodes, the SCM would be doing only HB - // processing, this could lead to SCM's CPU starvation. With this - // approach we always guarantee that HB thread sleeps for a little while. - // - // 3. It is possible that we will never finish processing the HB's in the - // thread. But that means we have a mis-configured system. We will warn - // the users by logging that information. - // - // 4. And the most important reason, heartbeats are not blocked even if - // this thread does not run, they will go into the processing queue. - scheduleNextHealthCheck(); - } - - private void checkNodesHealth() { - - /* - * - * staleNodeDeadline healthyNodeDeadline - * | | - * Dead | Stale | Healthy - * Node | Node | Node - * Window | Window | Window - * ----------------+----------------------------------+-------------------> - * >>-->> time-line >>-->> - * - * Here is the logic of computing the health of a node. -     * -     * 1. We get the current time and look back that the time -     *  when we got a heartbeat from a node. -     *  -     * 2. If the last heartbeat was within the window of healthy node we mark -     *  it as healthy. -     *  -     * 3. If the last HB Time stamp is longer and falls within the window of -     *  Stale Node time, we will mark it as Stale. -     *  -     * 4. If the last HB time is older than the Stale Window, then the node is -     * marked as dead. - * - * The Processing starts from current time and looks backwards in time. - */ - long processingStartTime = Time.monotonicNow(); - // After this time node is considered to be stale. - long healthyNodeDeadline = processingStartTime - staleNodeIntervalMs; - // After this time node is considered to be dead. - long staleNodeDeadline = processingStartTime - deadNodeIntervalMs; - - Predicate healthyNodeCondition = - (lastHbTime) -> lastHbTime >= healthyNodeDeadline; - // staleNodeCondition is superset of stale and dead node - Predicate staleNodeCondition = - (lastHbTime) -> lastHbTime < healthyNodeDeadline; - Predicate deadNodeCondition = - (lastHbTime) -> lastHbTime < staleNodeDeadline; - try { - for (NodeState state : NodeState.values()) { - List nodes = nodeStateMap.getNodes(state); - for (UUID id : nodes) { - DatanodeInfo node = nodeStateMap.getNodeInfo(id); - switch (state) { - case HEALTHY: - // Move the node to STALE if the last heartbeat time is less than - // configured stale-node interval. - updateNodeState(node, staleNodeCondition, state, - NodeLifeCycleEvent.TIMEOUT); - break; - case STALE: - // Move the node to DEAD if the last heartbeat time is less than - // configured dead-node interval. - updateNodeState(node, deadNodeCondition, state, - NodeLifeCycleEvent.TIMEOUT); - // Restore the node if we have received heartbeat before configured - // stale-node interval. - updateNodeState(node, healthyNodeCondition, state, - NodeLifeCycleEvent.RESTORE); - break; - case DEAD: - // Resurrect the node if we have received heartbeat before - // configured stale-node interval. - updateNodeState(node, healthyNodeCondition, state, - NodeLifeCycleEvent.RESURRECT); - break; - // We don't do anything for DECOMMISSIONING and DECOMMISSIONED in - // heartbeat processing. - case DECOMMISSIONING: - case DECOMMISSIONED: - default: - } - } - } - } catch (NodeNotFoundException e) { - // This should not happen unless someone else other than - // NodeStateManager is directly modifying NodeStateMap and removed - // the node entry after we got the list of UUIDs. - LOG.error("Inconsistent NodeStateMap! " + nodeStateMap); - } - long processingEndTime = Time.monotonicNow(); - //If we have taken too much time for HB processing, log that information. - if ((processingEndTime - processingStartTime) > - heartbeatCheckerIntervalMs) { - LOG.error("Total time spend processing datanode HB's is greater than " + - "configured values for datanode heartbeats. Please adjust the" + - " heartbeat configs. Time Spend on HB processing: {} seconds " + - "Datanode heartbeat Interval: {} seconds.", - TimeUnit.MILLISECONDS - .toSeconds(processingEndTime - processingStartTime), - heartbeatCheckerIntervalMs); - } - - } - - private void scheduleNextHealthCheck() { - - if (!Thread.currentThread().isInterrupted() && - !executorService.isShutdown()) { - //BUGBUG: The return future needs to checked here to make sure the - // exceptions are handled correctly. - healthCheckFuture = executorService.schedule(this, - heartbeatCheckerIntervalMs, TimeUnit.MILLISECONDS); - } else { - LOG.warn("Current Thread is interrupted, shutting down HB processing " + - "thread for Node Manager."); - } - - lastHealthCheck = Time.monotonicNow(); - } - - /** - * if the time since last check exceeds the stale|dead node interval, skip. - * such long delays might be caused by a JVM pause. SCM cannot make reliable - * conclusions about datanode health in such situations. - * @return : true indicates skip HB checks - */ - private boolean shouldSkipCheck() { - - long currentTime = Time.monotonicNow(); - long minInterval = Math.min(staleNodeIntervalMs, deadNodeIntervalMs); - - return ((currentTime - lastHealthCheck) >= minInterval); - } - - /** - * Updates the node state if the condition satisfies. - * - * @param node DatanodeInfo - * @param condition condition to check - * @param state current state of node - * @param lifeCycleEvent NodeLifeCycleEvent to be applied if condition - * matches - * - * @throws NodeNotFoundException if the node is not present - */ - private void updateNodeState(DatanodeInfo node, Predicate condition, - NodeState state, NodeLifeCycleEvent lifeCycleEvent) - throws NodeNotFoundException { - try { - if (condition.test(node.getLastHeartbeatTime())) { - NodeState newState = stateMachine.getNextState(state, lifeCycleEvent); - nodeStateMap.updateNodeState(node.getUuid(), state, newState); - if (state2EventMap.containsKey(newState)) { - eventPublisher.fireEvent(state2EventMap.get(newState), node); - } - } - } catch (InvalidStateTransitionException e) { - LOG.warn("Invalid state transition of node {}." + - " Current state: {}, life cycle event: {}", - node, state, lifeCycleEvent); - } - } - - @Override - public void close() { - executorService.shutdown(); - try { - if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { - executorService.shutdownNow(); - } - - if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) { - LOG.error("Unable to shutdown NodeStateManager properly."); - } - } catch (InterruptedException e) { - executorService.shutdownNow(); - Thread.currentThread().interrupt(); - } - } - - /** - * Test Utility : return number of times heartbeat check was skipped. - * @return : count of times HB process was skipped - */ - @VisibleForTesting - long getSkippedHealthChecks() { - return skippedHealthChecks; - } - - /** - * Test Utility : Pause the periodic node hb check. - * @return ScheduledFuture for the scheduled check that got cancelled. - */ - @VisibleForTesting - ScheduledFuture pause() { - - if (executorService.isShutdown() || checkPaused) { - return null; - } - - checkPaused = healthCheckFuture.cancel(false); - - return healthCheckFuture; - } - - /** - * Test utility : unpause the periodic node hb check. - * @return ScheduledFuture for the next scheduled check - */ - @VisibleForTesting - ScheduledFuture unpause() { - - if (executorService.isShutdown()) { - return null; - } - - if (checkPaused) { - Preconditions.checkState(((healthCheckFuture == null) - || healthCheckFuture.isCancelled() - || healthCheckFuture.isDone())); - - checkPaused = false; - /** - * We do not call scheduleNextHealthCheck because we are - * not updating the lastHealthCheck timestamp. - */ - healthCheckFuture = executorService.schedule(this, - heartbeatCheckerIntervalMs, TimeUnit.MILLISECONDS); - } - - return healthCheckFuture; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java deleted file mode 100644 index 5976c17a607..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; - -/** - * Handles Stale node event. - */ -public class NonHealthyToHealthyNodeHandler - implements EventHandler { - - private final PipelineManager pipelineManager; - private final Configuration conf; - - public NonHealthyToHealthyNodeHandler( - PipelineManager pipelineManager, OzoneConfiguration conf) { - this.pipelineManager = pipelineManager; - this.conf = conf; - } - - @Override - public void onMessage(DatanodeDetails datanodeDetails, - EventPublisher publisher) { - pipelineManager.triggerPipelineCreation(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java deleted file mode 100644 index e1e1d6cf3e6..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ /dev/null @@ -1,684 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import javax.management.ObjectName; -import java.io.IOException; -import java.net.InetAddress; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.LinkedList; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ScheduledFuture; -import java.util.stream.Collectors; - -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.VersionInfo; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.net.CachedDNSToSwitchMapping; -import org.apache.hadoop.net.DNSToSwitchMapping; -import org.apache.hadoop.net.TableMapping; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.util.ReflectionUtils; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Maintains information about the Datanodes on SCM side. - *

- * Heartbeats under SCM is very simple compared to HDFS heartbeatManager. - *

- * The getNode(byState) functions make copy of node maps and then creates a list - * based on that. It should be assumed that these get functions always report - * *stale* information. For example, getting the deadNodeCount followed by - * getNodes(DEAD) could very well produce totally different count. Also - * getNodeCount(HEALTHY) + getNodeCount(DEAD) + getNodeCode(STALE), is not - * guaranteed to add up to the total nodes that we know off. Please treat all - * get functions in this file as a snap-shot of information that is inconsistent - * as soon as you read it. - */ -public class SCMNodeManager implements NodeManager { - - private static final Logger LOG = - LoggerFactory.getLogger(SCMNodeManager.class); - - private final NodeStateManager nodeStateManager; - private final VersionInfo version; - private final CommandQueue commandQueue; - private final SCMNodeMetrics metrics; - // Node manager MXBean - private ObjectName nmInfoBean; - private final SCMStorageConfig scmStorageConfig; - private final NetworkTopology clusterMap; - private final DNSToSwitchMapping dnsToSwitchMapping; - private final boolean useHostname; - private final ConcurrentHashMap> dnsToUuidMap = - new ConcurrentHashMap<>(); - - /** - * Constructs SCM machine Manager. - */ - public SCMNodeManager(OzoneConfiguration conf, - SCMStorageConfig scmStorageConfig, EventPublisher eventPublisher, - NetworkTopology networkTopology) { - this.nodeStateManager = new NodeStateManager(conf, eventPublisher); - this.version = VersionInfo.getLatestVersion(); - this.commandQueue = new CommandQueue(); - this.scmStorageConfig = scmStorageConfig; - LOG.info("Entering startup safe mode."); - registerMXBean(); - this.metrics = SCMNodeMetrics.create(this); - this.clusterMap = networkTopology; - Class dnsToSwitchMappingClass = - conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, - TableMapping.class, DNSToSwitchMapping.class); - DNSToSwitchMapping newInstance = ReflectionUtils.newInstance( - dnsToSwitchMappingClass, conf); - this.dnsToSwitchMapping = - ((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance - : new CachedDNSToSwitchMapping(newInstance)); - this.useHostname = conf.getBoolean( - DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, - DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); - } - - private void registerMXBean() { - this.nmInfoBean = MBeans.register("SCMNodeManager", - "SCMNodeManagerInfo", this); - } - - private void unregisterMXBean() { - if (this.nmInfoBean != null) { - MBeans.unregister(this.nmInfoBean); - this.nmInfoBean = null; - } - } - - - /** - * Returns all datanode that are in the given state. This function works by - * taking a snapshot of the current collection and then returning the list - * from that collection. This means that real map might have changed by the - * time we return this list. - * - * @return List of Datanodes that are known to SCM in the requested state. - */ - @Override - public List getNodes(NodeState nodestate) { - return nodeStateManager.getNodes(nodestate).stream() - .map(node -> (DatanodeDetails)node).collect(Collectors.toList()); - } - - /** - * Returns all datanodes that are known to SCM. - * - * @return List of DatanodeDetails - */ - @Override - public List getAllNodes() { - return nodeStateManager.getAllNodes().stream() - .map(node -> (DatanodeDetails)node).collect(Collectors.toList()); - } - - /** - * Returns the Number of Datanodes by State they are in. - * - * @return count - */ - @Override - public int getNodeCount(NodeState nodestate) { - return nodeStateManager.getNodeCount(nodestate); - } - - /** - * Returns the node state of a specific node. - * - * @param datanodeDetails Datanode Details - * @return Healthy/Stale/Dead/Unknown. - */ - @Override - public NodeState getNodeState(DatanodeDetails datanodeDetails) { - try { - return nodeStateManager.getNodeState(datanodeDetails); - } catch (NodeNotFoundException e) { - // TODO: should we throw NodeNotFoundException? - return null; - } - } - - /** - * Closes this stream and releases any system resources associated with it. If - * the stream is already closed then invoking this method has no effect. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - unregisterMXBean(); - metrics.unRegister(); - nodeStateManager.close(); - } - - /** - * Gets the version info from SCM. - * - * @param versionRequest - version Request. - * @return - returns SCM version info and other required information needed by - * datanode. - */ - @Override - public VersionResponse getVersion(SCMVersionRequestProto versionRequest) { - return VersionResponse.newBuilder() - .setVersion(this.version.getVersion()) - .addValue(OzoneConsts.SCM_ID, - this.scmStorageConfig.getScmId()) - .addValue(OzoneConsts.CLUSTER_ID, this.scmStorageConfig.getClusterID()) - .build(); - } - - /** - * Register the node if the node finds that it is not registered with any - * SCM. - * - * @param datanodeDetails - Send datanodeDetails with Node info. - * This function generates and assigns new datanode ID - * for the datanode. This allows SCM to be run independent - * of Namenode if required. - * @param nodeReport NodeReport. - * - * @return SCMHeartbeatResponseProto - */ - @Override - public RegisteredCommand register( - DatanodeDetails datanodeDetails, NodeReportProto nodeReport, - PipelineReportsProto pipelineReportsProto) { - - InetAddress dnAddress = Server.getRemoteIp(); - if (dnAddress != null) { - // Mostly called inside an RPC, update ip and peer hostname - datanodeDetails.setHostName(dnAddress.getHostName()); - datanodeDetails.setIpAddress(dnAddress.getHostAddress()); - } - try { - String dnsName; - String networkLocation; - datanodeDetails.setNetworkName(datanodeDetails.getUuidString()); - if (useHostname) { - dnsName = datanodeDetails.getHostName(); - } else { - dnsName = datanodeDetails.getIpAddress(); - } - networkLocation = nodeResolve(dnsName); - if (networkLocation != null) { - datanodeDetails.setNetworkLocation(networkLocation); - } - nodeStateManager.addNode(datanodeDetails); - clusterMap.add(datanodeDetails); - addEntryTodnsToUuidMap(dnsName, datanodeDetails.getUuidString()); - // Updating Node Report, as registration is successful - processNodeReport(datanodeDetails, nodeReport); - LOG.info("Registered Data node : {}", datanodeDetails); - } catch (NodeAlreadyExistsException e) { - if (LOG.isTraceEnabled()) { - LOG.trace("Datanode is already registered. Datanode: {}", - datanodeDetails.toString()); - } - } - - return RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success) - .setDatanode(datanodeDetails) - .setClusterID(this.scmStorageConfig.getClusterID()) - .build(); - } - - /** - * Add an entry to the dnsToUuidMap, which maps hostname / IP to the DNs - * running on that host. As each address can have many DNs running on it, - * this is a one to many mapping. - * @param dnsName String representing the hostname or IP of the node - * @param uuid String representing the UUID of the registered node. - */ - @SuppressFBWarnings(value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION", - justification="The method is synchronized and this is the only place "+ - "dnsToUuidMap is modified") - private synchronized void addEntryTodnsToUuidMap( - String dnsName, String uuid) { - Set dnList = dnsToUuidMap.get(dnsName); - if (dnList == null) { - dnList = ConcurrentHashMap.newKeySet(); - dnsToUuidMap.put(dnsName, dnList); - } - dnList.add(uuid); - } - - /** - * Send heartbeat to indicate the datanode is alive and doing well. - * - * @param datanodeDetails - DatanodeDetailsProto. - * @return SCMheartbeat response. - */ - @Override - public List processHeartbeat(DatanodeDetails datanodeDetails) { - Preconditions.checkNotNull(datanodeDetails, "Heartbeat is missing " + - "DatanodeDetails."); - try { - nodeStateManager.updateLastHeartbeatTime(datanodeDetails); - metrics.incNumHBProcessed(); - } catch (NodeNotFoundException e) { - metrics.incNumHBProcessingFailed(); - LOG.error("SCM trying to process heartbeat from an " + - "unregistered node {}. Ignoring the heartbeat.", datanodeDetails); - } - return commandQueue.getCommand(datanodeDetails.getUuid()); - } - - @Override - public Boolean isNodeRegistered(DatanodeDetails datanodeDetails) { - try { - nodeStateManager.getNode(datanodeDetails); - return true; - } catch (NodeNotFoundException e) { - return false; - } - } - - /** - * Process node report. - * - * @param datanodeDetails - * @param nodeReport - */ - @Override - public void processNodeReport(DatanodeDetails datanodeDetails, - NodeReportProto nodeReport) { - if (LOG.isDebugEnabled()) { - LOG.debug("Processing node report from [datanode={}]", - datanodeDetails.getHostName()); - } - if (LOG.isTraceEnabled()) { - LOG.trace("HB is received from [datanode={}]: {}", - datanodeDetails.getHostName(), - nodeReport.toString().replaceAll("\n", "\\\\n")); - } - try { - DatanodeInfo datanodeInfo = nodeStateManager.getNode(datanodeDetails); - if (nodeReport != null) { - datanodeInfo.updateStorageReports(nodeReport.getStorageReportList()); - metrics.incNumNodeReportProcessed(); - } - } catch (NodeNotFoundException e) { - metrics.incNumNodeReportProcessingFailed(); - LOG.warn("Got node report from unregistered datanode {}", - datanodeDetails); - } - } - - /** - * Returns the aggregated node stats. - * @return the aggregated node stats. - */ - @Override - public SCMNodeStat getStats() { - long capacity = 0L; - long used = 0L; - long remaining = 0L; - - for (SCMNodeStat stat : getNodeStats().values()) { - capacity += stat.getCapacity().get(); - used += stat.getScmUsed().get(); - remaining += stat.getRemaining().get(); - } - return new SCMNodeStat(capacity, used, remaining); - } - - /** - * Return a map of node stats. - * @return a map of individual node stats (live/stale but not dead). - */ - @Override - public Map getNodeStats() { - - final Map nodeStats = new HashMap<>(); - - final List healthyNodes = nodeStateManager - .getNodes(NodeState.HEALTHY); - final List staleNodes = nodeStateManager - .getNodes(NodeState.STALE); - final List datanodes = new ArrayList<>(healthyNodes); - datanodes.addAll(staleNodes); - - for (DatanodeInfo dnInfo : datanodes) { - SCMNodeStat nodeStat = getNodeStatInternal(dnInfo); - if (nodeStat != null) { - nodeStats.put(dnInfo, nodeStat); - } - } - return nodeStats; - } - - /** - * Return the node stat of the specified datanode. - * @param datanodeDetails - datanode ID. - * @return node stat if it is live/stale, null if it is decommissioned or - * doesn't exist. - */ - @Override - public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) { - final SCMNodeStat nodeStat = getNodeStatInternal(datanodeDetails); - return nodeStat != null ? new SCMNodeMetric(nodeStat) : null; - } - - private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) { - try { - long capacity = 0L; - long used = 0L; - long remaining = 0L; - - final DatanodeInfo datanodeInfo = nodeStateManager - .getNode(datanodeDetails); - final List storageReportProtos = datanodeInfo - .getStorageReports(); - for (StorageReportProto reportProto : storageReportProtos) { - capacity += reportProto.getCapacity(); - used += reportProto.getScmUsed(); - remaining += reportProto.getRemaining(); - } - return new SCMNodeStat(capacity, used, remaining); - } catch (NodeNotFoundException e) { - LOG.warn("Cannot generate NodeStat, datanode {} not found.", - datanodeDetails.getUuid()); - return null; - } - } - - @Override - public Map getNodeCount() { - Map nodeCountMap = new HashMap(); - for(NodeState state : NodeState.values()) { - nodeCountMap.put(state.toString(), getNodeCount(state)); - } - return nodeCountMap; - } - - // We should introduce DISK, SSD, etc., notion in - // SCMNodeStat and try to use it. - @Override - public Map getNodeInfo() { - long diskCapacity = 0L; - long diskUsed = 0L; - long diskRemaning = 0L; - - long ssdCapacity = 0L; - long ssdUsed = 0L; - long ssdRemaining = 0L; - - List healthyNodes = nodeStateManager - .getNodes(NodeState.HEALTHY); - List staleNodes = nodeStateManager - .getNodes(NodeState.STALE); - - List datanodes = new ArrayList<>(healthyNodes); - datanodes.addAll(staleNodes); - - for (DatanodeInfo dnInfo : datanodes) { - List storageReportProtos = dnInfo.getStorageReports(); - for (StorageReportProto reportProto : storageReportProtos) { - if (reportProto.getStorageType() == - StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK) { - diskCapacity += reportProto.getCapacity(); - diskRemaning += reportProto.getRemaining(); - diskUsed += reportProto.getScmUsed(); - } else if (reportProto.getStorageType() == - StorageContainerDatanodeProtocolProtos.StorageTypeProto.SSD) { - ssdCapacity += reportProto.getCapacity(); - ssdRemaining += reportProto.getRemaining(); - ssdUsed += reportProto.getScmUsed(); - } - } - } - - Map nodeInfo = new HashMap<>(); - nodeInfo.put("DISKCapacity", diskCapacity); - nodeInfo.put("DISKUsed", diskUsed); - nodeInfo.put("DISKRemaining", diskRemaning); - - nodeInfo.put("SSDCapacity", ssdCapacity); - nodeInfo.put("SSDUsed", ssdUsed); - nodeInfo.put("SSDRemaining", ssdRemaining); - return nodeInfo; - } - - - /** - * Get set of pipelines a datanode is part of. - * @param datanodeDetails - datanodeID - * @return Set of PipelineID - */ - @Override - public Set getPipelines(DatanodeDetails datanodeDetails) { - return nodeStateManager.getPipelineByDnID(datanodeDetails.getUuid()); - } - - - /** - * Add pipeline information in the NodeManager. - * @param pipeline - Pipeline to be added - */ - @Override - public void addPipeline(Pipeline pipeline) { - nodeStateManager.addPipeline(pipeline); - } - - /** - * Remove a pipeline information from the NodeManager. - * @param pipeline - Pipeline to be removed - */ - @Override - public void removePipeline(Pipeline pipeline) { - nodeStateManager.removePipeline(pipeline); - } - - @Override - public void addContainer(final DatanodeDetails datanodeDetails, - final ContainerID containerId) - throws NodeNotFoundException { - nodeStateManager.addContainer(datanodeDetails.getUuid(), containerId); - } - - /** - * Update set of containers available on a datanode. - * @param datanodeDetails - DatanodeID - * @param containerIds - Set of containerIDs - * @throws NodeNotFoundException - if datanode is not known. For new datanode - * use addDatanodeInContainerMap call. - */ - @Override - public void setContainers(DatanodeDetails datanodeDetails, - Set containerIds) throws NodeNotFoundException { - nodeStateManager.setContainers(datanodeDetails.getUuid(), - containerIds); - } - - /** - * Return set of containerIDs available on a datanode. - * @param datanodeDetails - DatanodeID - * @return - set of containerIDs - */ - @Override - public Set getContainers(DatanodeDetails datanodeDetails) - throws NodeNotFoundException { - return nodeStateManager.getContainers(datanodeDetails.getUuid()); - } - - // TODO: - // Since datanode commands are added through event queue, onMessage method - // should take care of adding commands to command queue. - // Refactor and remove all the usage of this method and delete this method. - @Override - public void addDatanodeCommand(UUID dnId, SCMCommand command) { - this.commandQueue.addCommand(dnId, command); - } - - /** - * This method is called by EventQueue whenever someone adds a new - * DATANODE_COMMAND to the Queue. - * - * @param commandForDatanode DatanodeCommand - * @param ignored publisher - */ - @Override - public void onMessage(CommandForDatanode commandForDatanode, - EventPublisher ignored) { - addDatanodeCommand(commandForDatanode.getDatanodeId(), - commandForDatanode.getCommand()); - } - - @Override - public List getCommandQueue(UUID dnID) { - return commandQueue.getCommand(dnID); - } - - /** - * Given datanode uuid, returns the DatanodeDetails for the node. - * - * @param uuid node host address - * @return the given datanode, or null if not found - */ - @Override - public DatanodeDetails getNodeByUuid(String uuid) { - if (Strings.isNullOrEmpty(uuid)) { - LOG.warn("uuid is null"); - return null; - } - DatanodeDetails temp = DatanodeDetails.newBuilder().setUuid(uuid).build(); - try { - return nodeStateManager.getNode(temp); - } catch (NodeNotFoundException e) { - LOG.warn("Cannot find node for uuid {}", uuid); - return null; - } - } - - /** - * Given datanode address(Ipaddress or hostname), return a list of - * DatanodeDetails for the datanodes registered on that address. - * - * @param address datanode address - * @return the given datanode, or empty list if none found - */ - @Override - public List getNodesByAddress(String address) { - List results = new LinkedList<>(); - if (Strings.isNullOrEmpty(address)) { - LOG.warn("address is null"); - return results; - } - Set uuids = dnsToUuidMap.get(address); - if (uuids == null) { - LOG.warn("Cannot find node for address {}", address); - return results; - } - - for (String uuid : uuids) { - DatanodeDetails temp = DatanodeDetails.newBuilder().setUuid(uuid).build(); - try { - results.add(nodeStateManager.getNode(temp)); - } catch (NodeNotFoundException e) { - LOG.warn("Cannot find node for uuid {}", uuid); - } - } - return results; - } - - private String nodeResolve(String hostname) { - List hosts = new ArrayList<>(1); - hosts.add(hostname); - List resolvedHosts = dnsToSwitchMapping.resolve(hosts); - if (resolvedHosts != null && !resolvedHosts.isEmpty()) { - String location = resolvedHosts.get(0); - if (LOG.isDebugEnabled()) { - LOG.debug("Resolve datanode {} return location {}", hostname, location); - } - return location; - } else { - LOG.error("Node {} Resolution failed. Please make sure that DNS table " + - "mapping or configured mapping is functional.", hostname); - return null; - } - } - - /** - * Test utility to stop heartbeat check process. - * @return ScheduledFuture of next scheduled check that got cancelled. - */ - @VisibleForTesting - ScheduledFuture pauseHealthCheck() { - return nodeStateManager.pause(); - } - - /** - * Test utility to resume the paused heartbeat check process. - * @return ScheduledFuture of the next scheduled check - */ - @VisibleForTesting - ScheduledFuture unpauseHealthCheck() { - return nodeStateManager.unpause(); - } - - /** - * Test utility to get the count of skipped heartbeat check iterations. - * @return count of skipped heartbeat check iterations - */ - @VisibleForTesting - long getSkippedHealthChecks() { - return nodeStateManager.getSkippedHealthChecks(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java deleted file mode 100644 index 1596523bbcd..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java +++ /dev/null @@ -1,161 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONED; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONING; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; - -import java.util.Map; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.Interns; -import org.apache.hadoop.metrics2.lib.MetricsRegistry; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; - -/** - * This class maintains Node related metrics. - */ -@InterfaceAudience.Private -@Metrics(about = "SCM NodeManager Metrics", context = "ozone") -public final class SCMNodeMetrics implements MetricsSource { - - private static final String SOURCE_NAME = - SCMNodeMetrics.class.getSimpleName(); - - private @Metric MutableCounterLong numHBProcessed; - private @Metric MutableCounterLong numHBProcessingFailed; - private @Metric MutableCounterLong numNodeReportProcessed; - private @Metric MutableCounterLong numNodeReportProcessingFailed; - - private final MetricsRegistry registry; - private final NodeManagerMXBean managerMXBean; - private final MetricsInfo recordInfo = Interns.info("SCMNodeManager", - "SCM NodeManager metrics"); - - /** Private constructor. */ - private SCMNodeMetrics(NodeManagerMXBean managerMXBean) { - this.managerMXBean = managerMXBean; - this.registry = new MetricsRegistry(recordInfo); - } - - /** - * Create and returns SCMNodeMetrics instance. - * - * @return SCMNodeMetrics - */ - public static SCMNodeMetrics create(NodeManagerMXBean managerMXBean) { - MetricsSystem ms = DefaultMetricsSystem.instance(); - return ms.register(SOURCE_NAME, "SCM NodeManager Metrics", - new SCMNodeMetrics(managerMXBean)); - } - - /** - * Unregister the metrics instance. - */ - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } - - /** - * Increments number of heartbeat processed count. - */ - void incNumHBProcessed() { - numHBProcessed.incr(); - } - - /** - * Increments number of heartbeat processing failed count. - */ - void incNumHBProcessingFailed() { - numHBProcessingFailed.incr(); - } - - /** - * Increments number of node report processed count. - */ - void incNumNodeReportProcessed() { - numNodeReportProcessed.incr(); - } - - /** - * Increments number of node report processing failed count. - */ - void incNumNodeReportProcessingFailed() { - numNodeReportProcessingFailed.incr(); - } - - /** - * Get aggregated counter and gauage metrics. - */ - @Override - @SuppressWarnings("SuspiciousMethodCalls") - public void getMetrics(MetricsCollector collector, boolean all) { - Map nodeCount = managerMXBean.getNodeCount(); - Map nodeInfo = managerMXBean.getNodeInfo(); - - registry.snapshot( - collector.addRecord(registry.info()) // Add annotated ones first - .addGauge(Interns.info( - "HealthyNodes", - "Number of healthy datanodes"), - nodeCount.get(HEALTHY.toString())) - .addGauge(Interns.info("StaleNodes", - "Number of stale datanodes"), - nodeCount.get(STALE.toString())) - .addGauge(Interns.info("DeadNodes", - "Number of dead datanodes"), - nodeCount.get(DEAD.toString())) - .addGauge(Interns.info("DecommissioningNodes", - "Number of decommissioning datanodes"), - nodeCount.get(DECOMMISSIONING.toString())) - .addGauge(Interns.info("DecommissionedNodes", - "Number of decommissioned datanodes"), - nodeCount.get(DECOMMISSIONED.toString())) - .addGauge(Interns.info("DiskCapacity", - "Total disk capacity"), - nodeInfo.get("DISKCapacity")) - .addGauge(Interns.info("DiskUsed", - "Total disk capacity used"), - nodeInfo.get("DISKUsed")) - .addGauge(Interns.info("DiskRemaining", - "Total disk capacity remaining"), - nodeInfo.get("DISKRemaining")) - .addGauge(Interns.info("SSDCapacity", - "Total ssd capacity"), - nodeInfo.get("SSDCapacity")) - .addGauge(Interns.info("SSDUsed", - "Total ssd capacity used"), - nodeInfo.get("SSDUsed")) - .addGauge(Interns.info("SSDRemaining", - "Total disk capacity remaining"), - nodeInfo.get("SSDRemaining")), - all); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java deleted file mode 100644 index 32ecbad50ab..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; - -import java.util.Set; -import java.util.UUID; - -/** - * - * This is the JMX management interface for node manager information. - */ -@InterfaceAudience.Private -public interface SCMNodeStorageStatMXBean { - /** - * Get the capacity of the dataNode. - * @param datanodeID Datanode Id - * @return long - */ - long getCapacity(UUID datanodeID); - - /** - * Returns the remaining space of a Datanode. - * @param datanodeId Datanode Id - * @return long - */ - long getRemainingSpace(UUID datanodeId); - - - /** - * Returns used space in bytes of a Datanode. - * @return long - */ - long getUsedSpace(UUID datanodeId); - - /** - * Returns the total capacity of all dataNodes. - * @return long - */ - long getTotalCapacity(); - - /** - * Returns the total Used Space in all Datanodes. - * @return long - */ - long getTotalSpaceUsed(); - - /** - * Returns the total Remaining Space in all Datanodes. - * @return long - */ - long getTotalFreeSpace(); - - /** - * Returns the set of disks for a given Datanode. - * @return set of storage volumes - */ - Set getStorageVolumes(UUID datanodeId); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java deleted file mode 100644 index 1b0e5b56e77..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java +++ /dev/null @@ -1,368 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.node; - - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.ObjectName; -import java.io.IOException; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.stream.Collectors; - -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.NO_SUCH_DATANODE; - -/** - * This data structure maintains the disk space capacity, disk usage and free - * space availability per Datanode. - * This information is built from the DN node reports. - */ -public class SCMNodeStorageStatMap implements SCMNodeStorageStatMXBean { - static final Logger LOG = - LoggerFactory.getLogger(SCMNodeStorageStatMap.class); - - private final double warningUtilizationThreshold; - private final double criticalUtilizationThreshold; - - private final Map> scmNodeStorageReportMap; - // NodeStorageInfo MXBean - private ObjectName scmNodeStorageInfoBean; - /** - * constructs the scmNodeStorageReportMap object. - */ - public SCMNodeStorageStatMap(OzoneConfiguration conf) { - // scmNodeStorageReportMap = new ConcurrentHashMap<>(); - scmNodeStorageReportMap = new ConcurrentHashMap<>(); - warningUtilizationThreshold = conf.getDouble( - OzoneConfigKeys. - HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD, - OzoneConfigKeys. - HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD_DEFAULT); - criticalUtilizationThreshold = conf.getDouble( - OzoneConfigKeys. - HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD, - OzoneConfigKeys. - HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT); - } - - /** - * Enum that Describes what we should do at various thresholds. - */ - public enum UtilizationThreshold { - NORMAL, WARN, CRITICAL; - } - - /** - * Returns true if this a datanode that is already tracked by - * scmNodeStorageReportMap. - * - * @param datanodeID - UUID of the Datanode. - * @return True if this is tracked, false if this map does not know about it. - */ - public boolean isKnownDatanode(UUID datanodeID) { - Preconditions.checkNotNull(datanodeID); - return scmNodeStorageReportMap.containsKey(datanodeID); - } - - public List getDatanodeList( - UtilizationThreshold threshold) { - return scmNodeStorageReportMap.entrySet().stream().filter( - entry -> (isThresholdReached(threshold, - getScmUsedratio(getUsedSpace(entry.getKey()), - getCapacity(entry.getKey()))))) - .map(Map.Entry::getKey) - .collect(Collectors.toList()); - } - - - - /** - * Insert a new datanode into Node2Container Map. - * - * @param datanodeID -- Datanode UUID - * @param report - set if StorageReports. - */ - public void insertNewDatanode(UUID datanodeID, - Set report) throws SCMException { - Preconditions.checkNotNull(report); - Preconditions.checkState(report.size() != 0); - Preconditions.checkNotNull(datanodeID); - synchronized (scmNodeStorageReportMap) { - if (isKnownDatanode(datanodeID)) { - throw new SCMException("Node already exists in the map", - DUPLICATE_DATANODE); - } - scmNodeStorageReportMap.putIfAbsent(datanodeID, report); - } - } - - //TODO: This should be called once SCMNodeManager gets Started. - private void registerMXBean() { - this.scmNodeStorageInfoBean = MBeans.register("StorageContainerManager", - "scmNodeStorageInfo", this); - } - - //TODO: Unregister call should happen as a part of SCMNodeManager shutdown. - private void unregisterMXBean() { - if(this.scmNodeStorageInfoBean != null) { - MBeans.unregister(this.scmNodeStorageInfoBean); - this.scmNodeStorageInfoBean = null; - } - } - /** - * Updates the Container list of an existing DN. - * - * @param datanodeID - UUID of DN. - * @param report - set of Storage Reports for the Datanode. - * @throws SCMException - if we don't know about this datanode, for new DN - * use addDatanodeInContainerMap. - */ - public void updateDatanodeMap(UUID datanodeID, - Set report) throws SCMException { - Preconditions.checkNotNull(datanodeID); - Preconditions.checkNotNull(report); - Preconditions.checkState(report.size() != 0); - synchronized (scmNodeStorageReportMap) { - if (!scmNodeStorageReportMap.containsKey(datanodeID)) { - throw new SCMException("No such datanode", NO_SUCH_DATANODE); - } - scmNodeStorageReportMap.put(datanodeID, report); - } - } - - public StorageReportResult processNodeReport(UUID datanodeID, - StorageContainerDatanodeProtocolProtos.NodeReportProto nodeReport) - throws IOException { - Preconditions.checkNotNull(datanodeID); - Preconditions.checkNotNull(nodeReport); - - long totalCapacity = 0; - long totalRemaining = 0; - long totalScmUsed = 0; - Set storagReportSet = new HashSet<>(); - Set fullVolumeSet = new HashSet<>(); - Set failedVolumeSet = new HashSet<>(); - List - storageReports = nodeReport.getStorageReportList(); - for (StorageReportProto report : storageReports) { - StorageLocationReport storageReport = - StorageLocationReport.getFromProtobuf(report); - storagReportSet.add(storageReport); - if (report.hasFailed() && report.getFailed()) { - failedVolumeSet.add(storageReport); - } else if (isThresholdReached(UtilizationThreshold.CRITICAL, - getScmUsedratio(report.getScmUsed(), report.getCapacity()))) { - fullVolumeSet.add(storageReport); - } - totalCapacity += report.getCapacity(); - totalRemaining += report.getRemaining(); - totalScmUsed += report.getScmUsed(); - } - - if (!isKnownDatanode(datanodeID)) { - insertNewDatanode(datanodeID, storagReportSet); - } else { - updateDatanodeMap(datanodeID, storagReportSet); - } - if (isThresholdReached(UtilizationThreshold.CRITICAL, - getScmUsedratio(totalScmUsed, totalCapacity))) { - LOG.warn("Datanode {} is out of storage space. Capacity: {}, Used: {}", - datanodeID, totalCapacity, totalScmUsed); - return StorageReportResult.ReportResultBuilder.newBuilder() - .setStatus(ReportStatus.DATANODE_OUT_OF_SPACE) - .setFullVolumeSet(fullVolumeSet).setFailedVolumeSet(failedVolumeSet) - .build(); - } - if (isThresholdReached(UtilizationThreshold.WARN, - getScmUsedratio(totalScmUsed, totalCapacity))) { - LOG.warn("Datanode {} is low on storage space. Capacity: {}, Used: {}", - datanodeID, totalCapacity, totalScmUsed); - } - - if (failedVolumeSet.isEmpty() && !fullVolumeSet.isEmpty()) { - return StorageReportResult.ReportResultBuilder.newBuilder() - .setStatus(ReportStatus.STORAGE_OUT_OF_SPACE) - .setFullVolumeSet(fullVolumeSet).build(); - } - - if (!failedVolumeSet.isEmpty() && fullVolumeSet.isEmpty()) { - return StorageReportResult.ReportResultBuilder.newBuilder() - .setStatus(ReportStatus.FAILED_STORAGE) - .setFailedVolumeSet(failedVolumeSet).build(); - } - if (!failedVolumeSet.isEmpty() && !fullVolumeSet.isEmpty()) { - return StorageReportResult.ReportResultBuilder.newBuilder() - .setStatus(ReportStatus.FAILED_AND_OUT_OF_SPACE_STORAGE) - .setFailedVolumeSet(failedVolumeSet).setFullVolumeSet(fullVolumeSet) - .build(); - } - return StorageReportResult.ReportResultBuilder.newBuilder() - .setStatus(ReportStatus.ALL_IS_WELL).build(); - } - - private boolean isThresholdReached(UtilizationThreshold threshold, - double scmUsedratio) { - switch (threshold) { - case NORMAL: - return scmUsedratio < warningUtilizationThreshold; - case WARN: - return scmUsedratio >= warningUtilizationThreshold - && scmUsedratio < criticalUtilizationThreshold; - case CRITICAL: - return scmUsedratio >= criticalUtilizationThreshold; - default: - throw new RuntimeException("Unknown UtilizationThreshold value"); - } - } - - @Override - public long getCapacity(UUID dnId) { - long capacity = 0; - Set reportSet = scmNodeStorageReportMap.get(dnId); - for (StorageLocationReport report : reportSet) { - capacity += report.getCapacity(); - } - return capacity; - } - - @Override - public long getRemainingSpace(UUID dnId) { - long remaining = 0; - Set reportSet = scmNodeStorageReportMap.get(dnId); - for (StorageLocationReport report : reportSet) { - remaining += report.getRemaining(); - } - return remaining; - } - - @Override - public long getUsedSpace(UUID dnId) { - long scmUsed = 0; - Set reportSet = scmNodeStorageReportMap.get(dnId); - for (StorageLocationReport report : reportSet) { - scmUsed += report.getScmUsed(); - } - return scmUsed; - } - - @Override - public long getTotalCapacity() { - long capacity = 0; - Set dnIdSet = scmNodeStorageReportMap.keySet(); - for (UUID id : dnIdSet) { - capacity += getCapacity(id); - } - return capacity; - } - - @Override - public long getTotalSpaceUsed() { - long scmUsed = 0; - Set dnIdSet = scmNodeStorageReportMap.keySet(); - for (UUID id : dnIdSet) { - scmUsed += getUsedSpace(id); - } - return scmUsed; - } - - @Override - public long getTotalFreeSpace() { - long remaining = 0; - Set dnIdSet = scmNodeStorageReportMap.keySet(); - for (UUID id : dnIdSet) { - remaining += getRemainingSpace(id); - } - return remaining; - } - - /** - * removes the dataNode from scmNodeStorageReportMap. - * @param datanodeID - * @throws SCMException in case the dataNode is not found in the map. - */ - public void removeDatanode(UUID datanodeID) throws SCMException { - Preconditions.checkNotNull(datanodeID); - synchronized (scmNodeStorageReportMap) { - if (!scmNodeStorageReportMap.containsKey(datanodeID)) { - throw new SCMException("No such datanode", NO_SUCH_DATANODE); - } - scmNodeStorageReportMap.remove(datanodeID); - } - } - - /** - * Returns the set of storage volumes for a Datanode. - * @param datanodeID - * @return set of storage volumes. - */ - - @Override - public Set getStorageVolumes(UUID datanodeID) { - return scmNodeStorageReportMap.get(datanodeID); - } - - - /** - * Truncate to 4 digits since uncontrolled precision is some times - * counter intuitive to what users expect. - * @param value - double. - * @return double. - */ - private double truncateDecimals(double value) { - final int multiplier = 10000; - return (double) ((long) (value * multiplier)) / multiplier; - } - - /** - * get the scmUsed ratio. - */ - public double getScmUsedratio(long scmUsed, long capacity) { - double scmUsedRatio = - truncateDecimals(scmUsed / (double) capacity); - return scmUsedRatio; - } - /** - * Results possible from processing a Node report by - * Node2ContainerMapper. - */ - public enum ReportStatus { - ALL_IS_WELL, - DATANODE_OUT_OF_SPACE, - STORAGE_OUT_OF_SPACE, - FAILED_STORAGE, - FAILED_AND_OUT_OF_SPACE_STORAGE - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java deleted file mode 100644 index 26e8f5fb279..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Set; - -/** - * Handles Stale node event. - */ -public class StaleNodeHandler implements EventHandler { - private static final Logger LOG = - LoggerFactory.getLogger(StaleNodeHandler.class); - - private final NodeManager nodeManager; - private final PipelineManager pipelineManager; - private final Configuration conf; - - public StaleNodeHandler(NodeManager nodeManager, - PipelineManager pipelineManager, OzoneConfiguration conf) { - this.nodeManager = nodeManager; - this.pipelineManager = pipelineManager; - this.conf = conf; - } - - @Override - public void onMessage(DatanodeDetails datanodeDetails, - EventPublisher publisher) { - Set pipelineIds = - nodeManager.getPipelines(datanodeDetails); - LOG.info("Datanode {} moved to stale state. Finalizing its pipelines {}", - datanodeDetails, pipelineIds); - for (PipelineID pipelineID : pipelineIds) { - try { - Pipeline pipeline = pipelineManager.getPipeline(pipelineID); - pipelineManager.finalizeAndDestroyPipeline(pipeline, true); - } catch (IOException e) { - LOG.info("Could not finalize pipeline={} for dn={}", pipelineID, - datanodeDetails); - } - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java deleted file mode 100644 index 0b63ceb5783..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StorageReportResult.java +++ /dev/null @@ -1,87 +0,0 @@ - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; - -import java.util.Set; - -/** - * A Container Report gets processsed by the Node2Container and returns the - * Report Result class. - */ -public class StorageReportResult { - private SCMNodeStorageStatMap.ReportStatus status; - private Set fullVolumes; - private Set failedVolumes; - - StorageReportResult(SCMNodeStorageStatMap.ReportStatus status, - Set fullVolumes, - Set failedVolumes) { - this.status = status; - this.fullVolumes = fullVolumes; - this.failedVolumes = failedVolumes; - } - - public SCMNodeStorageStatMap.ReportStatus getStatus() { - return status; - } - - public Set getFullVolumes() { - return fullVolumes; - } - - public Set getFailedVolumes() { - return failedVolumes; - } - - static class ReportResultBuilder { - private SCMNodeStorageStatMap.ReportStatus status; - private Set fullVolumes; - private Set failedVolumes; - - static ReportResultBuilder newBuilder() { - return new ReportResultBuilder(); - } - - public ReportResultBuilder setStatus( - SCMNodeStorageStatMap.ReportStatus newstatus) { - this.status = newstatus; - return this; - } - - public ReportResultBuilder setFullVolumeSet( - Set fullVolumesSet) { - this.fullVolumes = fullVolumesSet; - return this; - } - - public ReportResultBuilder setFailedVolumeSet( - Set failedVolumesSet) { - this.failedVolumes = failedVolumesSet; - return this; - } - - StorageReportResult build() { - return new StorageReportResult(status, fullVolumes, failedVolumes); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java deleted file mode 100644 index d6a8ad0394e..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.node; - -/** - * The node package deals with node management. - *

- * The node manager takes care of node registrations, removal of node and - * handling of heartbeats. - *

- * The node manager maintains statistics that gets send as part of - * heartbeats. - *

- * The container manager polls the node manager to learn the state of - * datanodes that it is interested in. - *

- */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java deleted file mode 100644 index c0f46f15fe2..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import java.util.HashSet; -import java.util.Set; -import java.util.UUID; - -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .NO_SUCH_DATANODE; - -/** - * This data structure maintains the list of containers that is on a datanode. - * This information is built from the DN container reports. - */ -public class Node2ContainerMap extends Node2ObjectsMap { - - /** - * Constructs a Node2ContainerMap Object. - */ - public Node2ContainerMap() { - super(); - } - - /** - * Returns null if there no containers associated with this datanode ID. - * - * @param datanode - UUID - * @return Set of containers or Null. - */ - public Set getContainers(UUID datanode) { - return getObjects(datanode); - } - - /** - * Insert a new datanode into Node2Container Map. - * - * @param datanodeID -- Datanode UUID - * @param containerIDs - List of ContainerIDs. - */ - @Override - public void insertNewDatanode(UUID datanodeID, Set containerIDs) - throws SCMException { - super.insertNewDatanode(datanodeID, containerIDs); - } - - /** - * Updates the Container list of an existing DN. - * - * @param datanodeID - UUID of DN. - * @param containers - Set of Containers tht is present on DN. - * @throws SCMException - if we don't know about this datanode, for new DN - * use addDatanodeInContainerMap. - */ - public void setContainersForDatanode(UUID datanodeID, - Set containers) throws SCMException { - Preconditions.checkNotNull(datanodeID); - Preconditions.checkNotNull(containers); - if (dn2ObjectMap - .computeIfPresent(datanodeID, (k, v) -> new HashSet<>(containers)) - == null) { - throw new SCMException("No such datanode", NO_SUCH_DATANODE); - } - } - - @VisibleForTesting - @Override - public int size() { - return dn2ObjectMap.size(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java deleted file mode 100644 index 37525b0076e..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; - -import java.util.UUID; -import java.util.Set; -import java.util.Map; -import java.util.TreeSet; -import java.util.HashSet; -import java.util.Collections; - -import java.util.concurrent.ConcurrentHashMap; - -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE; - -/** - * This data structure maintains the list of containers that is on a datanode. - * This information is built from the DN container reports. - */ -public class Node2ObjectsMap { - - @SuppressWarnings("visibilitymodifier") - protected final Map> dn2ObjectMap; - - /** - * Constructs a Node2ContainerMap Object. - */ - public Node2ObjectsMap() { - dn2ObjectMap = new ConcurrentHashMap<>(); - } - - /** - * Returns true if this a datanode that is already tracked by - * Node2ContainerMap. - * - * @param datanodeID - UUID of the Datanode. - * @return True if this is tracked, false if this map does not know about it. - */ - public boolean isKnownDatanode(UUID datanodeID) { - Preconditions.checkNotNull(datanodeID); - return dn2ObjectMap.containsKey(datanodeID); - } - - /** - * Insert a new datanode into Node2Container Map. - * - * @param datanodeID -- Datanode UUID - * @param containerIDs - List of ContainerIDs. - */ - public void insertNewDatanode(UUID datanodeID, Set containerIDs) - throws SCMException { - Preconditions.checkNotNull(containerIDs); - Preconditions.checkNotNull(datanodeID); - if (dn2ObjectMap.putIfAbsent(datanodeID, new HashSet<>(containerIDs)) - != null) { - throw new SCMException("Node already exists in the map", - DUPLICATE_DATANODE); - } - } - - /** - * Removes datanode Entry from the map. - * - * @param datanodeID - Datanode ID. - */ - void removeDatanode(UUID datanodeID) { - Preconditions.checkNotNull(datanodeID); - dn2ObjectMap.computeIfPresent(datanodeID, (k, v) -> null); - } - - /** - * Returns null if there no containers associated with this datanode ID. - * - * @param datanode - UUID - * @return Set of containers or Null. - */ - Set getObjects(UUID datanode) { - Preconditions.checkNotNull(datanode); - final Set s = dn2ObjectMap.get(datanode); - return s != null? Collections.unmodifiableSet(s): Collections.emptySet(); - } - - public ReportResult.ReportResultBuilder newBuilder() { - return new ReportResult.ReportResultBuilder<>(); - } - - public ReportResult processReport(UUID datanodeID, Set objects) { - Preconditions.checkNotNull(datanodeID); - Preconditions.checkNotNull(objects); - - if (!isKnownDatanode(datanodeID)) { - return newBuilder() - .setStatus(ReportResult.ReportStatus.NEW_DATANODE_FOUND) - .setNewEntries(objects) - .build(); - } - - // Conditions like Zero length containers should be handled by removeAll. - Set currentSet = dn2ObjectMap.get(datanodeID); - TreeSet newObjects = new TreeSet<>(objects); - newObjects.removeAll(currentSet); - - TreeSet missingObjects = new TreeSet<>(currentSet); - missingObjects.removeAll(objects); - - if (newObjects.isEmpty() && missingObjects.isEmpty()) { - return newBuilder() - .setStatus(ReportResult.ReportStatus.ALL_IS_WELL) - .build(); - } - - if (newObjects.isEmpty() && !missingObjects.isEmpty()) { - return newBuilder() - .setStatus(ReportResult.ReportStatus.MISSING_ENTRIES) - .setMissingEntries(missingObjects) - .build(); - } - - if (!newObjects.isEmpty() && missingObjects.isEmpty()) { - return newBuilder() - .setStatus(ReportResult.ReportStatus.NEW_ENTRIES_FOUND) - .setNewEntries(newObjects) - .build(); - } - - if (!newObjects.isEmpty() && !missingObjects.isEmpty()) { - return newBuilder() - .setStatus(ReportResult.ReportStatus.MISSING_AND_NEW_ENTRIES_FOUND) - .setNewEntries(newObjects) - .setMissingEntries(missingObjects) - .build(); - } - - // default status & Make compiler happy - return newBuilder() - .setStatus(ReportResult.ReportStatus.ALL_IS_WELL) - .build(); - } - - @VisibleForTesting - public int size() { - return dn2ObjectMap.size(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java deleted file mode 100644 index f8633f9fcbc..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; - -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; - -/** - * This data structure maintains the list of pipelines which the given - * datanode is a part of. This information will be added whenever a new - * pipeline allocation happens. - * - *

TODO: this information needs to be regenerated from pipeline reports - * on SCM restart - */ -public class Node2PipelineMap extends Node2ObjectsMap { - - /** Constructs a Node2PipelineMap Object. */ - public Node2PipelineMap() { - super(); - } - - /** - * Returns null if there no pipelines associated with this datanode ID. - * - * @param datanode - UUID - * @return Set of pipelines or Null. - */ - public Set getPipelines(UUID datanode) { - return getObjects(datanode); - } - - /** - * Adds a pipeline entry to a given dataNode in the map. - * - * @param pipeline Pipeline to be added - */ - public synchronized void addPipeline(Pipeline pipeline) { - for (DatanodeDetails details : pipeline.getNodes()) { - UUID dnId = details.getUuid(); - dn2ObjectMap.computeIfAbsent(dnId, k -> ConcurrentHashMap.newKeySet()) - .add(pipeline.getId()); - } - } - - public synchronized void removePipeline(Pipeline pipeline) { - for (DatanodeDetails details : pipeline.getNodes()) { - UUID dnId = details.getUuid(); - dn2ObjectMap.computeIfPresent(dnId, - (k, v) -> { - v.remove(pipeline.getId()); - return v; - }); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeAlreadyExistsException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeAlreadyExistsException.java deleted file mode 100644 index aa5c382f426..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeAlreadyExistsException.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.node.states; - -/** - * This exception represents that there is already a node added to NodeStateMap - * with same UUID. - */ -public class NodeAlreadyExistsException extends NodeException { - - /** - * Constructs an {@code NodeAlreadyExistsException} with {@code null} - * as its error detail message. - */ - public NodeAlreadyExistsException() { - super(); - } - - /** - * Constructs an {@code NodeAlreadyExistsException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public NodeAlreadyExistsException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeException.java deleted file mode 100644 index c67b55d9531..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeException.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.node.states; - -/** - * This exception represents all node related exceptions in NodeStateMap. - */ -public class NodeException extends Exception { - - /** - * Constructs an {@code NodeException} with {@code null} - * as its error detail message. - */ - public NodeException() { - super(); - } - - /** - * Constructs an {@code NodeException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public NodeException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java deleted file mode 100644 index c44a08cf51e..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.node.states; - -/** - * This exception represents that the node that is being accessed does not - * exist in NodeStateMap. - */ -public class NodeNotFoundException extends NodeException { - - - /** - * Constructs an {@code NodeNotFoundException} with {@code null} - * as its error detail message. - */ - public NodeNotFoundException() { - super(); - } - - /** - * Constructs an {@code NodeNotFoundException} with the specified - * detail message. - * - * @param message - * The detail message (which is saved for later retrieval - * by the {@link #getMessage()} method) - */ - public NodeNotFoundException(String message) { - super(message); - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java deleted file mode 100644 index 0c1ab2c3838..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java +++ /dev/null @@ -1,312 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.node.DatanodeInfo; - -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -/** - * Maintains the state of datanodes in SCM. This class should only be used by - * NodeStateManager to maintain the state. If anyone wants to change the - * state of a node they should call NodeStateManager, do not directly use - * this class. - */ -public class NodeStateMap { - - /** - * Node id to node info map. - */ - private final ConcurrentHashMap nodeMap; - /** - * Represents the current state of node. - */ - private final ConcurrentHashMap> stateMap; - /** - * Node to set of containers on the node. - */ - private final ConcurrentHashMap> nodeToContainer; - - private final ReadWriteLock lock; - - /** - * Creates a new instance of NodeStateMap with no nodes. - */ - public NodeStateMap() { - lock = new ReentrantReadWriteLock(); - nodeMap = new ConcurrentHashMap<>(); - stateMap = new ConcurrentHashMap<>(); - nodeToContainer = new ConcurrentHashMap<>(); - initStateMap(); - } - - /** - * Initializes the state map with available states. - */ - private void initStateMap() { - for (NodeState state : NodeState.values()) { - stateMap.put(state, new HashSet<>()); - } - } - - /** - * Adds a node to NodeStateMap. - * - * @param datanodeDetails DatanodeDetails - * @param nodeState initial NodeState - * - * @throws NodeAlreadyExistsException if the node already exist - */ - public void addNode(DatanodeDetails datanodeDetails, NodeState nodeState) - throws NodeAlreadyExistsException { - lock.writeLock().lock(); - try { - UUID id = datanodeDetails.getUuid(); - if (nodeMap.containsKey(id)) { - throw new NodeAlreadyExistsException("Node UUID: " + id); - } - nodeMap.put(id, new DatanodeInfo(datanodeDetails)); - nodeToContainer.put(id, Collections.emptySet()); - stateMap.get(nodeState).add(id); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Updates the node state. - * - * @param nodeId Node Id - * @param currentState current state - * @param newState new state - * - * @throws NodeNotFoundException if the node is not present - */ - public void updateNodeState(UUID nodeId, NodeState currentState, - NodeState newState)throws NodeNotFoundException { - lock.writeLock().lock(); - try { - checkIfNodeExist(nodeId); - if (stateMap.get(currentState).remove(nodeId)) { - stateMap.get(newState).add(nodeId); - } else { - throw new NodeNotFoundException("Node UUID: " + nodeId + - ", not found in state: " + currentState); - } - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Returns DatanodeInfo for the given node id. - * - * @param uuid Node Id - * - * @return DatanodeInfo of the node - * - * @throws NodeNotFoundException if the node is not present - */ - public DatanodeInfo getNodeInfo(UUID uuid) throws NodeNotFoundException { - lock.readLock().lock(); - try { - checkIfNodeExist(uuid); - return nodeMap.get(uuid); - } finally { - lock.readLock().unlock(); - } - } - - - /** - * Returns the list of node ids which are in the specified state. - * - * @param state NodeState - * - * @return list of node ids - */ - public List getNodes(NodeState state) { - lock.readLock().lock(); - try { - return new ArrayList<>(stateMap.get(state)); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns the list of all the node ids. - * - * @return list of all the node ids - */ - public List getAllNodes() { - lock.readLock().lock(); - try { - return new ArrayList<>(nodeMap.keySet()); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns the count of nodes in the specified state. - * - * @param state NodeState - * - * @return Number of nodes in the specified state - */ - public int getNodeCount(NodeState state) { - lock.readLock().lock(); - try { - return stateMap.get(state).size(); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns the total node count. - * - * @return node count - */ - public int getTotalNodeCount() { - lock.readLock().lock(); - try { - return nodeMap.size(); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Returns the current state of the node. - * - * @param uuid node id - * - * @return NodeState - * - * @throws NodeNotFoundException if the node is not found - */ - public NodeState getNodeState(UUID uuid) throws NodeNotFoundException { - lock.readLock().lock(); - try { - checkIfNodeExist(uuid); - for (Map.Entry> entry : stateMap.entrySet()) { - if (entry.getValue().contains(uuid)) { - return entry.getKey(); - } - } - throw new NodeNotFoundException("Node not found in node state map." + - " UUID: " + uuid); - } finally { - lock.readLock().unlock(); - } - } - - /** - * Adds the given container to the specified datanode. - * - * @param uuid - datanode uuid - * @param containerId - containerID - * @throws NodeNotFoundException - if datanode is not known. For new datanode - * use addDatanodeInContainerMap call. - */ - public void addContainer(final UUID uuid, - final ContainerID containerId) - throws NodeNotFoundException { - lock.writeLock().lock(); - try { - checkIfNodeExist(uuid); - nodeToContainer.get(uuid).add(containerId); - } finally { - lock.writeLock().unlock(); - } - } - - public void setContainers(UUID uuid, Set containers) - throws NodeNotFoundException{ - lock.writeLock().lock(); - try { - checkIfNodeExist(uuid); - nodeToContainer.put(uuid, containers); - } finally { - lock.writeLock().unlock(); - } - } - - public Set getContainers(UUID uuid) - throws NodeNotFoundException { - lock.readLock().lock(); - try { - checkIfNodeExist(uuid); - return Collections.unmodifiableSet(nodeToContainer.get(uuid)); - } finally { - lock.readLock().unlock(); - } - } - - public void removeContainer(UUID uuid, ContainerID containerID) throws - NodeNotFoundException { - lock.writeLock().lock(); - try { - checkIfNodeExist(uuid); - nodeToContainer.get(uuid).remove(containerID); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Since we don't hold a global lock while constructing this string, - * the result might be inconsistent. If someone has changed the state of node - * while we are constructing the string, the result will be inconsistent. - * This should only be used for logging. We should not parse this string and - * use it for any critical calculations. - * - * @return current state of NodeStateMap - */ - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append("Total number of nodes: ").append(getTotalNodeCount()); - for (NodeState state : NodeState.values()) { - builder.append("Number of nodes in ").append(state).append(" state: ") - .append(getNodeCount(state)); - } - return builder.toString(); - } - - /** - * Throws NodeNotFoundException if the Node for given id doesn't exist. - * - * @param uuid Node UUID - * @throws NodeNotFoundException If the node is missing. - */ - private void checkIfNodeExist(UUID uuid) throws NodeNotFoundException { - if (!nodeToContainer.containsKey(uuid)) { - throw new NodeNotFoundException("Node UUID: " + uuid); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java deleted file mode 100644 index 0c7610fc7bd..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import java.util.Collections; -import java.util.Set; - -import com.google.common.base.Preconditions; - -/** - * A Container/Pipeline Report gets processed by the - * Node2Container/Node2Pipeline and returns Report Result class. - */ -public final class ReportResult { - private ReportStatus status; - private Set missingEntries; - private Set newEntries; - - private ReportResult(ReportStatus status, - Set missingEntries, - Set newEntries) { - this.status = status; - Preconditions.checkNotNull(missingEntries); - Preconditions.checkNotNull(newEntries); - this.missingEntries = missingEntries; - this.newEntries = newEntries; - } - - public ReportStatus getStatus() { - return status; - } - - public Set getMissingEntries() { - return missingEntries; - } - - public Set getNewEntries() { - return newEntries; - } - - /** - * Result after processing report for node2Object map. - * @param - */ - public static class ReportResultBuilder { - private ReportStatus status; - private Set missingEntries; - private Set newEntries; - - public ReportResultBuilder setStatus( - ReportStatus newStatus) { - this.status = newStatus; - return this; - } - - public ReportResultBuilder setMissingEntries( - Set missingEntriesList) { - this.missingEntries = missingEntriesList; - return this; - } - - public ReportResultBuilder setNewEntries( - Set newEntriesList) { - this.newEntries = newEntriesList; - return this; - } - - public ReportResult build() { - - Set nullSafeMissingEntries = this.missingEntries; - Set nullSafeNewEntries = this.newEntries; - if (nullSafeNewEntries == null) { - nullSafeNewEntries = Collections.emptySet(); - } - if (nullSafeMissingEntries == null) { - nullSafeMissingEntries = Collections.emptySet(); - } - return new ReportResult(status, nullSafeMissingEntries, - nullSafeNewEntries); - } - } - - /** - * Results possible from processing a report. - */ - public enum ReportStatus { - ALL_IS_WELL, - MISSING_ENTRIES, - NEW_ENTRIES_FOUND, - MISSING_AND_NEW_ENTRIES_FOUND, - NEW_DATANODE_FOUND, - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java deleted file mode 100644 index c429c5c3e13..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -/** - * Node States package. - */ -package org.apache.hadoop.hdds.scm.node.states; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/package-info.java deleted file mode 100644 index 4669e741ef0..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm; - -/* - * This package contains StorageContainerManager classes. - */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java deleted file mode 100644 index 687356648c3..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.utils.Scheduler; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * Implements api for running background pipeline creation jobs. - */ -class BackgroundPipelineCreator { - - private static final Logger LOG = - LoggerFactory.getLogger(BackgroundPipelineCreator.class); - - private final Scheduler scheduler; - private final AtomicBoolean isPipelineCreatorRunning; - private final PipelineManager pipelineManager; - private final Configuration conf; - - BackgroundPipelineCreator(PipelineManager pipelineManager, - Scheduler scheduler, Configuration conf) { - this.pipelineManager = pipelineManager; - this.conf = conf; - this.scheduler = scheduler; - isPipelineCreatorRunning = new AtomicBoolean(false); - } - - private boolean shouldSchedulePipelineCreator() { - return isPipelineCreatorRunning.compareAndSet(false, true); - } - - /** - * Schedules a fixed interval job to create pipelines. - */ - void startFixedIntervalPipelineCreator() { - long intervalInMillis = conf - .getTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, - ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - // TODO: #CLUTIL We can start the job asap - scheduler.scheduleWithFixedDelay(() -> { - if (!shouldSchedulePipelineCreator()) { - return; - } - createPipelines(); - }, 0, intervalInMillis, TimeUnit.MILLISECONDS); - } - - /** - * Triggers pipeline creation via background thread. - */ - void triggerPipelineCreation() { - // TODO: #CLUTIL introduce a better mechanism to not have more than one - // job of a particular type running, probably via ratis. - if (!shouldSchedulePipelineCreator()) { - return; - } - scheduler.schedule(this::createPipelines, 0, TimeUnit.MILLISECONDS); - } - - private void createPipelines() { - // TODO: #CLUTIL Different replication factor may need to be supported - HddsProtos.ReplicationType type = HddsProtos.ReplicationType.valueOf( - conf.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE, - OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT)); - - for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor - .values()) { - while (true) { - try { - if (scheduler.isClosed()) { - break; - } - pipelineManager.createPipeline(type, factor); - } catch (IOException ioe) { - break; - } catch (Throwable t) { - LOG.error("Error while creating pipelines {}", t); - break; - } - } - } - isPipelineCreatorRunning.set(false); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/InsufficientDatanodesException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/InsufficientDatanodesException.java deleted file mode 100644 index a6a5a69a165..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/InsufficientDatanodesException.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import java.io.IOException; - -/** - * Exception thrown when there are not enough Datanodes to create a pipeline. - */ -public class InsufficientDatanodesException extends IOException { - - - public InsufficientDatanodesException() { - super(); - } - - public InsufficientDatanodesException(String message) { - super(message); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java deleted file mode 100644 index 8d497fa1b03..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode; - -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Handles pipeline actions from datanode. - */ -public class PipelineActionHandler - implements EventHandler { - - public static final Logger LOG = - LoggerFactory.getLogger(PipelineActionHandler.class); - - private final PipelineManager pipelineManager; - private final Configuration ozoneConf; - - public PipelineActionHandler(PipelineManager pipelineManager, - OzoneConfiguration conf) { - this.pipelineManager = pipelineManager; - this.ozoneConf = conf; - } - - @Override - public void onMessage(PipelineActionsFromDatanode report, - EventPublisher publisher) { - for (PipelineAction action : report.getReport().getPipelineActionsList()) { - if (action.getAction() == PipelineAction.Action.CLOSE) { - PipelineID pipelineID = null; - try { - pipelineID = PipelineID. - getFromProtobuf(action.getClosePipeline().getPipelineID()); - Pipeline pipeline = pipelineManager.getPipeline(pipelineID); - LOG.error("Received pipeline action {} for {} from datanode {}. " + - "Reason : {}", action.getAction(), pipeline, - report.getDatanodeDetails(), - action.getClosePipeline().getDetailedReason()); - pipelineManager.finalizeAndDestroyPipeline(pipeline, true); - } catch (IOException ioe) { - LOG.error("Could not execute pipeline action={} pipeline={} {}", - action, pipelineID, ioe); - } - } else { - LOG.error("unknown pipeline action:{}" + action.getAction()); - } - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java deleted file mode 100644 index 77e037a0711..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.ratis.grpc.GrpcTlsConfig; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * Creates pipeline based on replication type. - */ -public final class PipelineFactory { - - private Map providers; - - PipelineFactory(NodeManager nodeManager, PipelineStateManager stateManager, - Configuration conf, GrpcTlsConfig tlsConfig) { - providers = new HashMap<>(); - providers.put(ReplicationType.STAND_ALONE, - new SimplePipelineProvider(nodeManager)); - providers.put(ReplicationType.RATIS, - new RatisPipelineProvider(nodeManager, stateManager, conf, tlsConfig)); - } - - @VisibleForTesting - void setProvider(ReplicationType replicationType, - PipelineProvider provider) { - providers.put(replicationType, provider); - } - - public Pipeline create(ReplicationType type, ReplicationFactor factor) - throws IOException { - return providers.get(type).create(factor); - } - - public Pipeline create(ReplicationType type, ReplicationFactor factor, - List nodes) { - return providers.get(type).create(factor, nodes); - } - - public void shutdown() { - providers.values().forEach(provider -> provider.shutdown()); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java deleted file mode 100644 index 9ba5f3189f7..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.ratis.grpc.GrpcTlsConfig; - -import java.io.Closeable; -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.NavigableSet; - -/** - * Interface which exposes the api for pipeline management. - */ -public interface PipelineManager extends Closeable, PipelineManagerMXBean { - - Pipeline createPipeline(ReplicationType type, ReplicationFactor factor) - throws IOException; - - Pipeline createPipeline(ReplicationType type, ReplicationFactor factor, - List nodes); - - Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException; - - List getPipelines(); - - List getPipelines(ReplicationType type); - - List getPipelines(ReplicationType type, - ReplicationFactor factor); - - List getPipelines(ReplicationType type, - ReplicationFactor factor, Pipeline.PipelineState state); - - List getPipelines(ReplicationType type, ReplicationFactor factor, - Pipeline.PipelineState state, Collection excludeDns, - Collection excludePipelines); - - void addContainerToPipeline(PipelineID pipelineID, ContainerID containerID) - throws IOException; - - void removeContainerFromPipeline(PipelineID pipelineID, - ContainerID containerID) throws IOException; - - NavigableSet getContainersInPipeline(PipelineID pipelineID) - throws IOException; - - int getNumberOfContainers(PipelineID pipelineID) throws IOException; - - void openPipeline(PipelineID pipelineId) throws IOException; - - void finalizeAndDestroyPipeline(Pipeline pipeline, boolean onTimeout) - throws IOException; - - void startPipelineCreator(); - - void triggerPipelineCreation(); - - void incNumBlocksAllocatedMetric(PipelineID id); - - /** - * Activates a dormant pipeline. - * - * @param pipelineID ID of the pipeline to activate. - * @throws IOException in case of any Exception - */ - void activatePipeline(PipelineID pipelineID) throws IOException; - - /** - * Deactivates an active pipeline. - * - * @param pipelineID ID of the pipeline to deactivate. - * @throws IOException in case of any Exception - */ - void deactivatePipeline(PipelineID pipelineID) throws IOException; - - GrpcTlsConfig getGrpcTlsConfig(); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java deleted file mode 100644 index 77a7a810054..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.classification.InterfaceAudience; - -import java.util.Map; - -/** - * This is the JMX management interface for information related to - * PipelineManager. - */ -@InterfaceAudience.Private -public interface PipelineManagerMXBean { - - /** - * Returns the number of pipelines in different state. - * @return state to number of pipeline map - */ - Map getPipelineInfo(); - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java deleted file mode 100644 index a0ce2162672..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; - -import java.io.IOException; -import java.util.List; - -/** - * Interface for creating pipelines. - */ -public interface PipelineProvider { - - Pipeline create(ReplicationFactor factor) throws IOException; - - Pipeline create(ReplicationFactor factor, List nodes); - - void shutdown(); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java deleted file mode 100644 index 793f4e2a5e2..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReport; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server - .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Objects; - -/** - * Handles Pipeline Reports from datanode. - */ -public class PipelineReportHandler implements - EventHandler { - - private static final Logger LOGGER = LoggerFactory - .getLogger(PipelineReportHandler.class); - private final PipelineManager pipelineManager; - private final Configuration conf; - private final SCMSafeModeManager scmSafeModeManager; - private final boolean pipelineAvailabilityCheck; - - public PipelineReportHandler(SCMSafeModeManager scmSafeModeManager, - PipelineManager pipelineManager, - Configuration conf) { - Preconditions.checkNotNull(pipelineManager); - Objects.requireNonNull(scmSafeModeManager); - this.scmSafeModeManager = scmSafeModeManager; - this.pipelineManager = pipelineManager; - this.conf = conf; - this.pipelineAvailabilityCheck = conf.getBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT); - - } - - @Override - public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode, - EventPublisher publisher) { - Preconditions.checkNotNull(pipelineReportFromDatanode); - DatanodeDetails dn = pipelineReportFromDatanode.getDatanodeDetails(); - PipelineReportsProto pipelineReport = - pipelineReportFromDatanode.getReport(); - Preconditions.checkNotNull(dn, "Pipeline Report is " - + "missing DatanodeDetails."); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Processing pipeline report for dn: {}", dn); - } - for (PipelineReport report : pipelineReport.getPipelineReportList()) { - try { - processPipelineReport(report, dn); - } catch (IOException e) { - LOGGER.error("Could not process pipeline report={} from dn={} {}", - report, dn, e); - } - } - if (pipelineAvailabilityCheck && scmSafeModeManager.getInSafeMode()) { - publisher.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT, - pipelineReportFromDatanode); - } - - } - - private void processPipelineReport(PipelineReport report, DatanodeDetails dn) - throws IOException { - PipelineID pipelineID = PipelineID.getFromProtobuf(report.getPipelineID()); - Pipeline pipeline; - try { - pipeline = pipelineManager.getPipeline(pipelineID); - } catch (PipelineNotFoundException e) { - RatisPipelineUtils.destroyPipeline(dn, pipelineID, conf, - pipelineManager.getGrpcTlsConfig()); - return; - } - - if (pipeline.getPipelineState() == Pipeline.PipelineState.ALLOCATED) { - LOGGER.info("Pipeline {} reported by {}", pipeline.getId(), dn); - pipeline.reportDatanode(dn); - if (pipeline.isHealthy()) { - // if all the dns have reported, pipeline can be moved to OPEN state - pipelineManager.openPipeline(pipelineID); - } - } else { - // In OPEN state case just report the datanode - pipeline.reportDatanode(dn); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java deleted file mode 100644 index 76150579f84..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java +++ /dev/null @@ -1,164 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.NavigableSet; - -/** - * Manages the state of pipelines in SCM. All write operations like pipeline - * creation, removal and updates should come via SCMPipelineManager. - * PipelineStateMap class holds the data structures related to pipeline and its - * state. All the read and write operations in PipelineStateMap are protected - * by a read write lock. - */ -class PipelineStateManager { - - private static final Logger LOG = - LoggerFactory.getLogger(PipelineStateManager.class); - - private final PipelineStateMap pipelineStateMap; - - PipelineStateManager(Configuration conf) { - this.pipelineStateMap = new PipelineStateMap(); - } - - void addPipeline(Pipeline pipeline) throws IOException { - pipelineStateMap.addPipeline(pipeline); - if (pipeline.getPipelineState() == PipelineState.OPEN) { - LOG.info("Created pipeline " + pipeline); - } - } - - void addContainerToPipeline(PipelineID pipelineId, ContainerID containerID) - throws IOException { - pipelineStateMap.addContainerToPipeline(pipelineId, containerID); - } - - Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException { - return pipelineStateMap.getPipeline(pipelineID); - } - - public List getPipelines() { - return pipelineStateMap.getPipelines(); - } - - List getPipelines(ReplicationType type) { - return pipelineStateMap.getPipelines(type); - } - - List getPipelines(ReplicationType type, ReplicationFactor factor) { - return pipelineStateMap.getPipelines(type, factor); - } - - List getPipelines(ReplicationType type, ReplicationFactor factor, - PipelineState state) { - return pipelineStateMap.getPipelines(type, factor, state); - } - - List getPipelines(ReplicationType type, ReplicationFactor factor, - PipelineState state, Collection excludeDns, - Collection excludePipelines) { - return pipelineStateMap - .getPipelines(type, factor, state, excludeDns, excludePipelines); - } - - List getPipelines(ReplicationType type, PipelineState... states) { - return pipelineStateMap.getPipelines(type, states); - } - - NavigableSet getContainers(PipelineID pipelineID) - throws IOException { - return pipelineStateMap.getContainers(pipelineID); - } - - int getNumberOfContainers(PipelineID pipelineID) throws IOException { - return pipelineStateMap.getNumberOfContainers(pipelineID); - } - - Pipeline removePipeline(PipelineID pipelineID) throws IOException { - Pipeline pipeline = pipelineStateMap.removePipeline(pipelineID); - LOG.info("Pipeline {} removed from db", pipeline); - return pipeline; - } - - void removeContainerFromPipeline(PipelineID pipelineID, - ContainerID containerID) throws IOException { - pipelineStateMap.removeContainerFromPipeline(pipelineID, containerID); - } - - Pipeline finalizePipeline(PipelineID pipelineId) - throws PipelineNotFoundException { - Pipeline pipeline = pipelineStateMap.getPipeline(pipelineId); - if (!pipeline.isClosed()) { - pipeline = pipelineStateMap - .updatePipelineState(pipelineId, PipelineState.CLOSED); - LOG.info("Pipeline {} moved to CLOSED state", pipeline); - } - return pipeline; - } - - Pipeline openPipeline(PipelineID pipelineId) throws IOException { - Pipeline pipeline = pipelineStateMap.getPipeline(pipelineId); - if (pipeline.isClosed()) { - throw new IOException("Closed pipeline can not be opened"); - } - if (pipeline.getPipelineState() == PipelineState.ALLOCATED) { - pipeline = pipelineStateMap - .updatePipelineState(pipelineId, PipelineState.OPEN); - LOG.info("Pipeline {} moved to OPEN state", pipeline.toString()); - } - return pipeline; - } - - /** - * Activates a dormant pipeline. - * - * @param pipelineID ID of the pipeline to activate. - * @throws IOException in case of any Exception - */ - public void activatePipeline(PipelineID pipelineID) - throws IOException { - pipelineStateMap - .updatePipelineState(pipelineID, PipelineState.OPEN); - } - - /** - * Deactivates an active pipeline. - * - * @param pipelineID ID of the pipeline to deactivate. - * @throws IOException in case of any Exception - */ - public void deactivatePipeline(PipelineID pipelineID) - throws IOException { - pipelineStateMap - .updatePipelineState(pipelineID, PipelineState.DORMANT); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java deleted file mode 100644 index 443378cd183..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java +++ /dev/null @@ -1,420 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.pipeline; - -import com.google.common.base.Preconditions; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.*; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.function.Predicate; -import java.util.stream.Collectors; - -/** - * Holds the data structures which maintain the information about pipeline and - * its state. - * Invariant: If a pipeline exists in PipelineStateMap, both pipelineMap and - * pipeline2container would have a non-null mapping for it. - */ -class PipelineStateMap { - - private static final Logger LOG = LoggerFactory.getLogger( - PipelineStateMap.class); - - private final Map pipelineMap; - private final Map> pipeline2container; - private final Map> query2OpenPipelines; - - PipelineStateMap() { - - // TODO: Use TreeMap for range operations? - pipelineMap = new HashMap<>(); - pipeline2container = new HashMap<>(); - query2OpenPipelines = new HashMap<>(); - initializeQueryMap(); - - } - - private void initializeQueryMap() { - for (ReplicationType type : ReplicationType.values()) { - for (ReplicationFactor factor : ReplicationFactor.values()) { - query2OpenPipelines - .put(new PipelineQuery(type, factor), new CopyOnWriteArrayList<>()); - } - } - } - - /** - * Adds provided pipeline in the data structures. - * - * @param pipeline - Pipeline to add - * @throws IOException if pipeline with provided pipelineID already exists - */ - void addPipeline(Pipeline pipeline) throws IOException { - Preconditions.checkNotNull(pipeline, "Pipeline cannot be null"); - Preconditions.checkArgument( - pipeline.getNodes().size() == pipeline.getFactor().getNumber(), - String.format("Nodes size=%d, replication factor=%d do not match ", - pipeline.getNodes().size(), pipeline.getFactor().getNumber())); - - if (pipelineMap.putIfAbsent(pipeline.getId(), pipeline) != null) { - LOG.warn("Duplicate pipeline ID detected. {}", pipeline.getId()); - throw new IOException(String - .format("Duplicate pipeline ID %s detected.", pipeline.getId())); - } - pipeline2container.put(pipeline.getId(), new TreeSet<>()); - if (pipeline.getPipelineState() == PipelineState.OPEN) { - query2OpenPipelines.get(new PipelineQuery(pipeline)).add(pipeline); - } - } - - /** - * Add container to an existing pipeline. - * - * @param pipelineID - PipelineID of the pipeline to which container is added - * @param containerID - ContainerID of the container to add - * @throws IOException if pipeline is not in open state or does not exist - */ - void addContainerToPipeline(PipelineID pipelineID, ContainerID containerID) - throws IOException { - Preconditions.checkNotNull(pipelineID, - "Pipeline Id cannot be null"); - Preconditions.checkNotNull(containerID, - "Container Id cannot be null"); - - Pipeline pipeline = getPipeline(pipelineID); - if (pipeline.isClosed()) { - throw new IOException(String - .format("Cannot add container to pipeline=%s in closed state", - pipelineID)); - } - pipeline2container.get(pipelineID).add(containerID); - } - - /** - * Get pipeline corresponding to specified pipelineID. - * - * @param pipelineID - PipelineID of the pipeline to be retrieved - * @return Pipeline - * @throws IOException if pipeline is not found - */ - Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException { - Preconditions.checkNotNull(pipelineID, - "Pipeline Id cannot be null"); - - Pipeline pipeline = pipelineMap.get(pipelineID); - if (pipeline == null) { - throw new PipelineNotFoundException( - String.format("%s not found", pipelineID)); - } - return pipeline; - } - - /** - * Get list of pipelines in SCM. - * @return List of pipelines - */ - public List getPipelines() { - return new ArrayList<>(pipelineMap.values()); - } - - /** - * Get pipeline corresponding to specified replication type. - * - * @param type - ReplicationType - * @return List of pipelines which have the specified replication type - */ - List getPipelines(ReplicationType type) { - Preconditions.checkNotNull(type, "Replication type cannot be null"); - - return pipelineMap.values().stream() - .filter(p -> p.getType().equals(type)) - .collect(Collectors.toList()); - } - - /** - * Get pipeline corresponding to specified replication type and factor. - * - * @param type - ReplicationType - * @param factor - ReplicationFactor - * @return List of pipelines with specified replication type and factor - */ - List getPipelines(ReplicationType type, ReplicationFactor factor) { - Preconditions.checkNotNull(type, "Replication type cannot be null"); - Preconditions.checkNotNull(factor, "Replication factor cannot be null"); - - return pipelineMap.values().stream() - .filter(pipeline -> pipeline.getType() == type - && pipeline.getFactor() == factor) - .collect(Collectors.toList()); - } - - /** - * Get list of pipeline corresponding to specified replication type and - * pipeline states. - * - * @param type - ReplicationType - * @param states - Array of required PipelineState - * @return List of pipelines with specified replication type and states - */ - List getPipelines(ReplicationType type, PipelineState... states) { - Preconditions.checkNotNull(type, "Replication type cannot be null"); - Preconditions.checkNotNull(states, "Pipeline state cannot be null"); - - Set pipelineStates = new HashSet<>(); - pipelineStates.addAll(Arrays.asList(states)); - return pipelineMap.values().stream().filter( - pipeline -> pipeline.getType() == type && pipelineStates - .contains(pipeline.getPipelineState())) - .collect(Collectors.toList()); - } - - /** - * Get list of pipeline corresponding to specified replication type, - * replication factor and pipeline state. - * - * @param type - ReplicationType - * @param state - Required PipelineState - * @return List of pipelines with specified replication type, - * replication factor and pipeline state - */ - List getPipelines(ReplicationType type, ReplicationFactor factor, - PipelineState state) { - Preconditions.checkNotNull(type, "Replication type cannot be null"); - Preconditions.checkNotNull(factor, "Replication factor cannot be null"); - Preconditions.checkNotNull(state, "Pipeline state cannot be null"); - - if (state == PipelineState.OPEN) { - return Collections.unmodifiableList( - query2OpenPipelines.get(new PipelineQuery(type, factor))); - } - return pipelineMap.values().stream().filter( - pipeline -> pipeline.getType() == type - && pipeline.getPipelineState() == state - && pipeline.getFactor() == factor) - .collect(Collectors.toList()); - } - - /** - * Get list of pipeline corresponding to specified replication type, - * replication factor and pipeline state. - * - * @param type - ReplicationType - * @param state - Required PipelineState - * @param excludeDns list of dns to exclude - * @param excludePipelines pipelines to exclude - * @return List of pipelines with specified replication type, - * replication factor and pipeline state - */ - List getPipelines(ReplicationType type, ReplicationFactor factor, - PipelineState state, Collection excludeDns, - Collection excludePipelines) { - Preconditions.checkNotNull(type, "Replication type cannot be null"); - Preconditions.checkNotNull(factor, "Replication factor cannot be null"); - Preconditions.checkNotNull(state, "Pipeline state cannot be null"); - Preconditions - .checkNotNull(excludeDns, "Datanode exclude list cannot be null"); - Preconditions - .checkNotNull(excludeDns, "Pipeline exclude list cannot be null"); - return getPipelines(type, factor, state).stream().filter( - pipeline -> !discardPipeline(pipeline, excludePipelines) - && !discardDatanode(pipeline, excludeDns)) - .collect(Collectors.toList()); - } - - private boolean discardPipeline(Pipeline pipeline, - Collection excludePipelines) { - if (excludePipelines.isEmpty()) { - return false; - } - Predicate predicate = p -> p.equals(pipeline.getId()); - return excludePipelines.parallelStream().anyMatch(predicate); - } - - private boolean discardDatanode(Pipeline pipeline, - Collection excludeDns) { - if (excludeDns.isEmpty()) { - return false; - } - boolean discard = false; - for (DatanodeDetails dn : pipeline.getNodes()) { - Predicate predicate = p -> p.equals(dn); - discard = excludeDns.parallelStream().anyMatch(predicate); - if (discard) { - break; - } - } - return discard; - } - /** - * Get set of containerIDs corresponding to a pipeline. - * - * @param pipelineID - PipelineID - * @return Set of containerIDs belonging to the pipeline - * @throws IOException if pipeline is not found - */ - NavigableSet getContainers(PipelineID pipelineID) - throws PipelineNotFoundException { - Preconditions.checkNotNull(pipelineID, - "Pipeline Id cannot be null"); - - NavigableSet containerIDs = pipeline2container.get(pipelineID); - if (containerIDs == null) { - throw new PipelineNotFoundException( - String.format("%s not found", pipelineID)); - } - return new TreeSet<>(containerIDs); - } - - /** - * Get number of containers corresponding to a pipeline. - * - * @param pipelineID - PipelineID - * @return Number of containers belonging to the pipeline - * @throws IOException if pipeline is not found - */ - int getNumberOfContainers(PipelineID pipelineID) - throws PipelineNotFoundException { - Preconditions.checkNotNull(pipelineID, - "Pipeline Id cannot be null"); - - Set containerIDs = pipeline2container.get(pipelineID); - if (containerIDs == null) { - throw new PipelineNotFoundException( - String.format("%s not found", pipelineID)); - } - return containerIDs.size(); - } - - /** - * Remove pipeline from the data structures. - * - * @param pipelineID - PipelineID of the pipeline to be removed - * @throws IOException if the pipeline is not empty or does not exist - */ - Pipeline removePipeline(PipelineID pipelineID) throws IOException { - Preconditions.checkNotNull(pipelineID, "Pipeline Id cannot be null"); - - Pipeline pipeline = getPipeline(pipelineID); - if (!pipeline.isClosed()) { - throw new IOException( - String.format("Pipeline with %s is not yet closed", pipelineID)); - } - - pipelineMap.remove(pipelineID); - pipeline2container.remove(pipelineID); - return pipeline; - } - - /** - * Remove container from a pipeline. - * - * @param pipelineID - PipelineID of the pipeline from which container needs - * to be removed - * @param containerID - ContainerID of the container to remove - * @throws IOException if pipeline does not exist - */ - void removeContainerFromPipeline(PipelineID pipelineID, - ContainerID containerID) throws IOException { - Preconditions.checkNotNull(pipelineID, - "Pipeline Id cannot be null"); - Preconditions.checkNotNull(containerID, - "container Id cannot be null"); - - Set containerIDs = pipeline2container.get(pipelineID); - if (containerIDs == null) { - throw new PipelineNotFoundException( - String.format("%s not found", pipelineID)); - } - containerIDs.remove(containerID); - } - - /** - * Updates the state of pipeline. - * - * @param pipelineID - PipelineID of the pipeline whose state needs - * to be updated - * @param state - new state of the pipeline - * @return Pipeline with the updated state - * @throws IOException if pipeline does not exist - */ - Pipeline updatePipelineState(PipelineID pipelineID, PipelineState state) - throws PipelineNotFoundException { - Preconditions.checkNotNull(pipelineID, "Pipeline Id cannot be null"); - Preconditions.checkNotNull(state, "Pipeline LifeCycleState cannot be null"); - - final Pipeline pipeline = getPipeline(pipelineID); - Pipeline updatedPipeline = pipelineMap.compute(pipelineID, - (id, p) -> Pipeline.newBuilder(pipeline).setState(state).build()); - PipelineQuery query = new PipelineQuery(pipeline); - if (updatedPipeline.getPipelineState() == PipelineState.OPEN) { - // for transition to OPEN state add pipeline to query2OpenPipelines - query2OpenPipelines.get(query).add(updatedPipeline); - } else { - // for transition from OPEN to CLOSED state remove pipeline from - // query2OpenPipelines - query2OpenPipelines.get(query).remove(pipeline); - } - return updatedPipeline; - } - - private static class PipelineQuery { - private ReplicationType type; - private ReplicationFactor factor; - - PipelineQuery(ReplicationType type, ReplicationFactor factor) { - this.type = Preconditions.checkNotNull(type); - this.factor = Preconditions.checkNotNull(factor); - } - - PipelineQuery(Pipeline pipeline) { - type = pipeline.getType(); - factor = pipeline.getFactor(); - } - - @Override - @SuppressFBWarnings("NP_EQUALS_SHOULD_HANDLE_NULL_ARGUMENT") - public boolean equals(Object other) { - if (this == other) { - return true; - } - if (!this.getClass().equals(other.getClass())) { - return false; - } - PipelineQuery otherQuery = (PipelineQuery) other; - return type == otherQuery.type && factor == otherQuery.factor; - } - - @Override - public int hashCode() { - return new HashCodeBuilder() - .append(type) - .append(factor) - .toHashCode(); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java deleted file mode 100644 index 0324a58f13a..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ /dev/null @@ -1,256 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState; -import org.apache.hadoop.io.MultipleIOException; -import org.apache.hadoop.hdds.ratis.RatisHelper; -import org.apache.ratis.client.RaftClient; -import org.apache.ratis.grpc.GrpcTlsConfig; -import org.apache.ratis.protocol.RaftClientReply; -import org.apache.ratis.protocol.RaftGroup; -import org.apache.ratis.protocol.RaftPeer; -import org.apache.ratis.retry.RetryPolicy; -import org.apache.ratis.rpc.SupportedRpcType; -import org.apache.ratis.util.TimeDuration; -import org.apache.ratis.util.function.CheckedBiConsumer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ForkJoinPool; -import java.util.concurrent.ForkJoinWorkerThread; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -/** - * Implements Api for creating ratis pipelines. - */ -public class RatisPipelineProvider implements PipelineProvider { - - private static final Logger LOG = - LoggerFactory.getLogger(RatisPipelineProvider.class); - - private final NodeManager nodeManager; - private final PipelineStateManager stateManager; - private final Configuration conf; - - // Set parallelism at 3, as now in Ratis we create 1 and 3 node pipelines. - private final int parallelismForPool = 3; - - private final ForkJoinPool.ForkJoinWorkerThreadFactory factory = - (pool -> { - final ForkJoinWorkerThread worker = ForkJoinPool. - defaultForkJoinWorkerThreadFactory.newThread(pool); - worker.setName("RATISCREATEPIPELINE" + worker.getPoolIndex()); - return worker; - }); - - private final ForkJoinPool forkJoinPool = new ForkJoinPool( - parallelismForPool, factory, null, false); - private final GrpcTlsConfig tlsConfig; - - RatisPipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager, Configuration conf, - GrpcTlsConfig tlsConfig) { - this.nodeManager = nodeManager; - this.stateManager = stateManager; - this.conf = conf; - this.tlsConfig = tlsConfig; - } - - - /** - * Create pluggable container placement policy implementation instance. - * - * @param nodeManager - SCM node manager. - * @param conf - configuration. - * @return SCM container placement policy implementation instance. - */ - @SuppressWarnings("unchecked") - // TODO: should we rename ContainerPlacementPolicy to PipelinePlacementPolicy? - private static ContainerPlacementPolicy createContainerPlacementPolicy( - final NodeManager nodeManager, final Configuration conf) { - Class implClass = - (Class) conf.getClass( - ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementRandom.class); - - try { - Constructor ctor = - implClass.getDeclaredConstructor(NodeManager.class, - Configuration.class); - return ctor.newInstance(nodeManager, conf); - } catch (RuntimeException e) { - throw e; - } catch (InvocationTargetException e) { - throw new RuntimeException(implClass.getName() - + " could not be constructed.", e.getCause()); - } catch (Exception e) { -// LOG.error("Unhandled exception occurred, Placement policy will not " + -// "be functional."); - throw new IllegalArgumentException("Unable to load " + - "ContainerPlacementPolicy", e); - } - } - - @Override - public Pipeline create(ReplicationFactor factor) throws IOException { - // Get set of datanodes already used for ratis pipeline - Set dnsUsed = new HashSet<>(); - stateManager.getPipelines(ReplicationType.RATIS, factor).stream().filter( - p -> p.getPipelineState().equals(PipelineState.OPEN) || - p.getPipelineState().equals(PipelineState.DORMANT) || - p.getPipelineState().equals(PipelineState.ALLOCATED)) - .forEach(p -> dnsUsed.addAll(p.getNodes())); - - // Get list of healthy nodes - List dns = - nodeManager.getNodes(NodeState.HEALTHY) - .parallelStream() - .filter(dn -> !dnsUsed.contains(dn)) - .limit(factor.getNumber()) - .collect(Collectors.toList()); - if (dns.size() < factor.getNumber()) { - String e = String - .format("Cannot create pipeline of factor %d using %d nodes.", - factor.getNumber(), dns.size()); - throw new InsufficientDatanodesException(e); - } - - Pipeline pipeline = Pipeline.newBuilder() - .setId(PipelineID.randomId()) - .setState(PipelineState.OPEN) - .setType(ReplicationType.RATIS) - .setFactor(factor) - .setNodes(dns) - .build(); - initializePipeline(pipeline); - return pipeline; - } - - @Override - public Pipeline create(ReplicationFactor factor, - List nodes) { - return Pipeline.newBuilder() - .setId(PipelineID.randomId()) - .setState(PipelineState.OPEN) - .setType(ReplicationType.RATIS) - .setFactor(factor) - .setNodes(nodes) - .build(); - } - - - @Override - public void shutdown() { - forkJoinPool.shutdownNow(); - try { - forkJoinPool.awaitTermination(60, TimeUnit.SECONDS); - } catch (Exception e) { - LOG.error("Unexpected exception occurred during shutdown of " + - "RatisPipelineProvider", e); - } - } - - protected void initializePipeline(Pipeline pipeline) throws IOException { - final RaftGroup group = RatisHelper.newRaftGroup(pipeline); - if (LOG.isDebugEnabled()) { - LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group); - } - callRatisRpc(pipeline.getNodes(), - (raftClient, peer) -> { - RaftClientReply reply = raftClient.groupAdd(group, peer.getId()); - if (reply == null || !reply.isSuccess()) { - String msg = "Pipeline initialization failed for pipeline:" - + pipeline.getId() + " node:" + peer.getId(); - LOG.error(msg); - throw new IOException(msg); - } - }); - } - - private void callRatisRpc(List datanodes, - CheckedBiConsumer< RaftClient, RaftPeer, IOException> rpc) - throws IOException { - if (datanodes.isEmpty()) { - return; - } - - final String rpcType = conf - .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); - final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(conf); - final List< IOException > exceptions = - Collections.synchronizedList(new ArrayList<>()); - final int maxOutstandingRequests = - HddsClientUtils.getMaxOutstandingRequests(conf); - final TimeDuration requestTimeout = - RatisHelper.getClientRequestTimeout(conf); - try { - forkJoinPool.submit(() -> { - datanodes.parallelStream().forEach(d -> { - final RaftPeer p = RatisHelper.toRaftPeer(d); - try (RaftClient client = RatisHelper - .newRaftClient(SupportedRpcType.valueOfIgnoreCase(rpcType), p, - retryPolicy, maxOutstandingRequests, tlsConfig, - requestTimeout)) { - rpc.accept(client, p); - } catch (IOException ioe) { - String errMsg = - "Failed invoke Ratis rpc " + rpc + " for " + d.getUuid(); - LOG.error(errMsg, ioe); - exceptions.add(new IOException(errMsg, ioe)); - } - }); - }).get(); - } catch (ExecutionException | RejectedExecutionException ex) { - LOG.error(ex.getClass().getName() + " exception occurred during " + - "createPipeline", ex); - throw new IOException(ex.getClass().getName() + " exception occurred " + - "during createPipeline", ex); - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - throw new IOException("Interrupt exception occurred during " + - "createPipeline", ex); - } - if (!exceptions.isEmpty()) { - throw MultipleIOException.createIOException(exceptions); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java deleted file mode 100644 index 20fa092b2d0..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.pipeline; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.ratis.RatisHelper; -import org.apache.ratis.client.RaftClient; -import org.apache.ratis.grpc.GrpcTlsConfig; -import org.apache.ratis.protocol.RaftGroup; -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.protocol.RaftPeer; -import org.apache.ratis.retry.RetryPolicy; -import org.apache.ratis.rpc.SupportedRpcType; -import org.apache.ratis.util.TimeDuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -/** - * Utility class for Ratis pipelines. Contains methods to create and destroy - * ratis pipelines. - */ -public final class RatisPipelineUtils { - - private static final Logger LOG = - LoggerFactory.getLogger(RatisPipelineUtils.class); - - private RatisPipelineUtils() { - } - /** - * Removes pipeline from SCM. Sends ratis command to destroy pipeline on all - * the datanodes. - * - * @param pipeline - Pipeline to be destroyed - * @param ozoneConf - Ozone configuration - * @param grpcTlsConfig - * @throws IOException - */ - static void destroyPipeline(Pipeline pipeline, Configuration ozoneConf, - GrpcTlsConfig grpcTlsConfig) { - final RaftGroup group = RatisHelper.newRaftGroup(pipeline); - if (LOG.isDebugEnabled()) { - LOG.debug("destroying pipeline:{} with {}", pipeline.getId(), group); - } - for (DatanodeDetails dn : pipeline.getNodes()) { - try { - destroyPipeline(dn, pipeline.getId(), ozoneConf, grpcTlsConfig); - } catch (IOException e) { - LOG.warn("Pipeline destroy failed for pipeline={} dn={}", - pipeline.getId(), dn); - } - } - } - - /** - * Sends ratis command to destroy pipeline on the given datanode. - * - * @param dn - Datanode on which pipeline needs to be destroyed - * @param pipelineID - ID of pipeline to be destroyed - * @param ozoneConf - Ozone configuration - * @param grpcTlsConfig - grpc tls configuration - * @throws IOException - */ - static void destroyPipeline(DatanodeDetails dn, PipelineID pipelineID, - Configuration ozoneConf, GrpcTlsConfig grpcTlsConfig) throws IOException { - final String rpcType = ozoneConf - .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); - final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf); - final RaftPeer p = RatisHelper.toRaftPeer(dn); - final int maxOutstandingRequests = - HddsClientUtils.getMaxOutstandingRequests(ozoneConf); - final TimeDuration requestTimeout = - RatisHelper.getClientRequestTimeout(ozoneConf); - try(RaftClient client = RatisHelper - .newRaftClient(SupportedRpcType.valueOfIgnoreCase(rpcType), p, - retryPolicy, maxOutstandingRequests, grpcTlsConfig, - requestTimeout)) { - client.groupRemove(RaftGroupId.valueOf(pipelineID.getId()), - true, p.getId()); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java deleted file mode 100644 index 0964f6d4db2..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ /dev/null @@ -1,469 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; -import org.apache.hadoop.hdds.utils.Scheduler; -import org.apache.ratis.grpc.GrpcTlsConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.ObjectName; -import java.io.File; -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.NavigableSet; -import java.util.Set; -import java.util.Collection; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import static org.apache.hadoop.hdds.scm - .ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm - .ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.ozone.OzoneConsts.SCM_PIPELINE_DB; - -/** - * Implements api needed for management of pipelines. All the write operations - * for pipelines must come via PipelineManager. It synchronises all write - * and read operations via a ReadWriteLock. - */ -public class SCMPipelineManager implements PipelineManager { - - private static final Logger LOG = - LoggerFactory.getLogger(SCMPipelineManager.class); - - private final ReadWriteLock lock; - private final PipelineFactory pipelineFactory; - private final PipelineStateManager stateManager; - private final BackgroundPipelineCreator backgroundPipelineCreator; - private Scheduler scheduler; - private MetadataStore pipelineStore; - - private final EventPublisher eventPublisher; - private final NodeManager nodeManager; - private final SCMPipelineMetrics metrics; - private final Configuration conf; - // Pipeline Manager MXBean - private ObjectName pmInfoBean; - private GrpcTlsConfig grpcTlsConfig; - - public SCMPipelineManager(Configuration conf, NodeManager nodeManager, - EventPublisher eventPublisher, GrpcTlsConfig grpcTlsConfig) - throws IOException { - this.lock = new ReentrantReadWriteLock(); - this.conf = conf; - this.stateManager = new PipelineStateManager(conf); - this.pipelineFactory = new PipelineFactory(nodeManager, stateManager, - conf, grpcTlsConfig); - // TODO: See if thread priority needs to be set for these threads - scheduler = new Scheduler("RatisPipelineUtilsThread", false, 1); - this.backgroundPipelineCreator = - new BackgroundPipelineCreator(this, scheduler, conf); - int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, - OZONE_SCM_DB_CACHE_SIZE_DEFAULT); - final File metaDir = ServerUtils.getScmDbDir(conf); - final File pipelineDBPath = new File(metaDir, SCM_PIPELINE_DB); - this.pipelineStore = - MetadataStoreBuilder.newBuilder() - .setCreateIfMissing(true) - .setConf(conf) - .setDbFile(pipelineDBPath) - .setCacheSize(cacheSize * OzoneConsts.MB) - .build(); - this.eventPublisher = eventPublisher; - this.nodeManager = nodeManager; - this.metrics = SCMPipelineMetrics.create(); - this.pmInfoBean = MBeans.register("SCMPipelineManager", - "SCMPipelineManagerInfo", this); - initializePipelineState(); - this.grpcTlsConfig = grpcTlsConfig; - } - - public PipelineStateManager getStateManager() { - return stateManager; - } - - @VisibleForTesting - public void setPipelineProvider(ReplicationType replicationType, - PipelineProvider provider) { - pipelineFactory.setProvider(replicationType, provider); - } - - private void initializePipelineState() throws IOException { - if (pipelineStore.isEmpty()) { - LOG.info("No pipeline exists in current db"); - return; - } - List> pipelines = - pipelineStore.getSequentialRangeKVs(null, Integer.MAX_VALUE, - (MetadataKeyFilters.MetadataKeyFilter[])null); - - for (Map.Entry entry : pipelines) { - HddsProtos.Pipeline.Builder pipelineBuilder = HddsProtos.Pipeline - .newBuilder(HddsProtos.Pipeline.PARSER.parseFrom(entry.getValue())); - Pipeline pipeline = Pipeline.getFromProtobuf(pipelineBuilder.setState( - HddsProtos.PipelineState.PIPELINE_ALLOCATED).build()); - Preconditions.checkNotNull(pipeline); - stateManager.addPipeline(pipeline); - nodeManager.addPipeline(pipeline); - } - } - - @Override - public synchronized Pipeline createPipeline( - ReplicationType type, ReplicationFactor factor) throws IOException { - lock.writeLock().lock(); - try { - Pipeline pipeline = pipelineFactory.create(type, factor); - pipelineStore.put(pipeline.getId().getProtobuf().toByteArray(), - pipeline.getProtobufMessage().toByteArray()); - stateManager.addPipeline(pipeline); - nodeManager.addPipeline(pipeline); - metrics.incNumPipelineCreated(); - metrics.createPerPipelineMetrics(pipeline); - return pipeline; - } catch (InsufficientDatanodesException idEx) { - throw idEx; - } catch (IOException ex) { - metrics.incNumPipelineCreationFailed(); - throw ex; - } finally { - lock.writeLock().unlock(); - } - } - - @Override - public Pipeline createPipeline(ReplicationType type, ReplicationFactor factor, - List nodes) { - // This will mostly be used to create dummy pipeline for SimplePipelines. - // We don't update the metrics for SimplePipelines. - lock.writeLock().lock(); - try { - return pipelineFactory.create(type, factor, nodes); - } finally { - lock.writeLock().unlock(); - } - } - - @Override - public Pipeline getPipeline(PipelineID pipelineID) - throws PipelineNotFoundException { - lock.readLock().lock(); - try { - return stateManager.getPipeline(pipelineID); - } finally { - lock.readLock().unlock(); - } - } - - @Override - public List getPipelines() { - lock.readLock().lock(); - try { - return stateManager.getPipelines(); - } finally { - lock.readLock().unlock(); - } - } - - @Override - public List getPipelines(ReplicationType type) { - lock.readLock().lock(); - try { - return stateManager.getPipelines(type); - } finally { - lock.readLock().unlock(); - } - } - - @Override - public List getPipelines(ReplicationType type, - ReplicationFactor factor) { - lock.readLock().lock(); - try { - return stateManager.getPipelines(type, factor); - } finally { - lock.readLock().unlock(); - } - } - - @Override - public List getPipelines(ReplicationType type, - ReplicationFactor factor, Pipeline.PipelineState state) { - lock.readLock().lock(); - try { - return stateManager.getPipelines(type, factor, state); - } finally { - lock.readLock().unlock(); - } - } - - @Override - public List getPipelines(ReplicationType type, - ReplicationFactor factor, Pipeline.PipelineState state, - Collection excludeDns, - Collection excludePipelines) { - lock.readLock().lock(); - try { - return stateManager - .getPipelines(type, factor, state, excludeDns, excludePipelines); - } finally { - lock.readLock().unlock(); - } - } - - @Override - public void addContainerToPipeline(PipelineID pipelineID, - ContainerID containerID) throws IOException { - lock.writeLock().lock(); - try { - stateManager.addContainerToPipeline(pipelineID, containerID); - } finally { - lock.writeLock().unlock(); - } - } - - @Override - public void removeContainerFromPipeline(PipelineID pipelineID, - ContainerID containerID) throws IOException { - lock.writeLock().lock(); - try { - stateManager.removeContainerFromPipeline(pipelineID, containerID); - } finally { - lock.writeLock().unlock(); - } - } - - @Override - public NavigableSet getContainersInPipeline( - PipelineID pipelineID) throws IOException { - lock.readLock().lock(); - try { - return stateManager.getContainers(pipelineID); - } finally { - lock.readLock().unlock(); - } - } - - @Override - public int getNumberOfContainers(PipelineID pipelineID) throws IOException { - return stateManager.getNumberOfContainers(pipelineID); - } - - @Override - public void openPipeline(PipelineID pipelineId) throws IOException { - lock.writeLock().lock(); - try { - Pipeline pipeline = stateManager.openPipeline(pipelineId); - metrics.createPerPipelineMetrics(pipeline); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Finalizes pipeline in the SCM. Removes pipeline and makes rpc call to - * destroy pipeline on the datanodes immediately or after timeout based on the - * value of onTimeout parameter. - * - * @param pipeline - Pipeline to be destroyed - * @param onTimeout - if true pipeline is removed and destroyed on - * datanodes after timeout - * @throws IOException - */ - @Override - public void finalizeAndDestroyPipeline(Pipeline pipeline, boolean onTimeout) - throws IOException { - LOG.info("destroying pipeline:{}", pipeline); - finalizePipeline(pipeline.getId()); - if (onTimeout) { - long pipelineDestroyTimeoutInMillis = - conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, - ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - scheduler.schedule(() -> destroyPipeline(pipeline), - pipelineDestroyTimeoutInMillis, TimeUnit.MILLISECONDS, LOG, - String.format("Destroy pipeline failed for pipeline:%s", pipeline)); - } else { - destroyPipeline(pipeline); - } - } - - @Override - public Map getPipelineInfo() { - final Map pipelineInfo = new HashMap<>(); - for (Pipeline.PipelineState state : Pipeline.PipelineState.values()) { - pipelineInfo.put(state.toString(), 0); - } - stateManager.getPipelines().forEach(pipeline -> - pipelineInfo.computeIfPresent( - pipeline.getPipelineState().toString(), (k, v) -> v + 1)); - return pipelineInfo; - } - - /** - * Schedules a fixed interval job to create pipelines. - */ - @Override - public void startPipelineCreator() { - backgroundPipelineCreator.startFixedIntervalPipelineCreator(); - } - - /** - * Triggers pipeline creation after the specified time. - */ - @Override - public void triggerPipelineCreation() { - backgroundPipelineCreator.triggerPipelineCreation(); - } - - /** - * Activates a dormant pipeline. - * - * @param pipelineID ID of the pipeline to activate. - * @throws IOException in case of any Exception - */ - @Override - public void activatePipeline(PipelineID pipelineID) - throws IOException { - stateManager.activatePipeline(pipelineID); - } - - /** - * Deactivates an active pipeline. - * - * @param pipelineID ID of the pipeline to deactivate. - * @throws IOException in case of any Exception - */ - @Override - public void deactivatePipeline(PipelineID pipelineID) - throws IOException { - stateManager.deactivatePipeline(pipelineID); - } - - /** - * Moves the pipeline to CLOSED state and sends close container command for - * all the containers in the pipeline. - * - * @param pipelineId - ID of the pipeline to be moved to CLOSED state. - * @throws IOException - */ - private void finalizePipeline(PipelineID pipelineId) throws IOException { - lock.writeLock().lock(); - try { - stateManager.finalizePipeline(pipelineId); - Set containerIDs = stateManager.getContainers(pipelineId); - for (ContainerID containerID : containerIDs) { - eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID); - } - metrics.removePipelineMetrics(pipelineId); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Removes pipeline from SCM. Sends ratis command to destroy pipeline on all - * the datanodes for ratis pipelines. - * - * @param pipeline - Pipeline to be destroyed - * @throws IOException - */ - private void destroyPipeline(Pipeline pipeline) throws IOException { - RatisPipelineUtils.destroyPipeline(pipeline, conf, grpcTlsConfig); - // remove the pipeline from the pipeline manager - removePipeline(pipeline.getId()); - triggerPipelineCreation(); - } - - /** - * Removes the pipeline from the db and pipeline state map. - * - * @param pipelineId - ID of the pipeline to be removed - * @throws IOException - */ - private void removePipeline(PipelineID pipelineId) throws IOException { - lock.writeLock().lock(); - try { - pipelineStore.delete(pipelineId.getProtobuf().toByteArray()); - Pipeline pipeline = stateManager.removePipeline(pipelineId); - nodeManager.removePipeline(pipeline); - metrics.incNumPipelineDestroyed(); - } catch (IOException ex) { - metrics.incNumPipelineDestroyFailed(); - throw ex; - } finally { - lock.writeLock().unlock(); - } - } - - @Override - public void incNumBlocksAllocatedMetric(PipelineID id) { - metrics.incNumBlocksAllocated(id); - } - - @Override - public GrpcTlsConfig getGrpcTlsConfig() { - return grpcTlsConfig; - } - - @Override - public void close() throws IOException { - if (scheduler != null) { - scheduler.close(); - scheduler = null; - } - - if (pipelineStore != null) { - pipelineStore.close(); - pipelineStore = null; - } - if(pmInfoBean != null) { - MBeans.unregister(this.pmInfoBean); - pmInfoBean = null; - } - if(metrics != null) { - metrics.unRegister(); - } - // shutdown pipeline provider. - pipelineFactory.shutdown(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java deleted file mode 100644 index d0f7f6ef3be..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineMetrics.java +++ /dev/null @@ -1,160 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.Interns; -import org.apache.hadoop.metrics2.lib.MetricsRegistry; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; - -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; - -/** - * This class maintains Pipeline related metrics. - */ -@InterfaceAudience.Private -@Metrics(about = "SCM PipelineManager Metrics", context = "ozone") -public final class SCMPipelineMetrics implements MetricsSource { - - private static final String SOURCE_NAME = - SCMPipelineMetrics.class.getSimpleName(); - - private MetricsRegistry registry; - - private @Metric MutableCounterLong numPipelineCreated; - private @Metric MutableCounterLong numPipelineCreationFailed; - private @Metric MutableCounterLong numPipelineDestroyed; - private @Metric MutableCounterLong numPipelineDestroyFailed; - private @Metric MutableCounterLong numPipelineReportProcessed; - private @Metric MutableCounterLong numPipelineReportProcessingFailed; - private Map numBlocksAllocated; - - /** Private constructor. */ - private SCMPipelineMetrics() { - this.registry = new MetricsRegistry(SOURCE_NAME); - numBlocksAllocated = new ConcurrentHashMap<>(); - } - - /** - * Create and returns SCMPipelineMetrics instance. - * - * @return SCMPipelineMetrics - */ - public static SCMPipelineMetrics create() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - return ms.register(SOURCE_NAME, "SCM PipelineManager Metrics", - new SCMPipelineMetrics()); - } - - /** - * Unregister the metrics instance. - */ - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } - - @Override - @SuppressWarnings("SuspiciousMethodCalls") - public void getMetrics(MetricsCollector collector, boolean all) { - MetricsRecordBuilder recordBuilder = collector.addRecord(SOURCE_NAME); - numPipelineCreated.snapshot(recordBuilder, true); - numPipelineCreationFailed.snapshot(recordBuilder, true); - numPipelineDestroyed.snapshot(recordBuilder, true); - numPipelineDestroyFailed.snapshot(recordBuilder, true); - numPipelineReportProcessed.snapshot(recordBuilder, true); - numPipelineReportProcessingFailed.snapshot(recordBuilder, true); - numBlocksAllocated - .forEach((pid, metric) -> metric.snapshot(recordBuilder, true)); - } - - void createPerPipelineMetrics(Pipeline pipeline) { - numBlocksAllocated.put(pipeline.getId(), new MutableCounterLong(Interns - .info(getBlockAllocationMetricName(pipeline), - "Number of blocks allocated in pipeline " + pipeline.getId()), 0L)); - } - - public static String getBlockAllocationMetricName(Pipeline pipeline) { - return "NumBlocksAllocated-" + pipeline.getType() + "-" + pipeline - .getFactor() + "-" + pipeline.getId().getId(); - } - - void removePipelineMetrics(PipelineID pipelineID) { - numBlocksAllocated.remove(pipelineID); - } - - /** - * Increments number of blocks allocated for the pipeline. - */ - void incNumBlocksAllocated(PipelineID pipelineID) { - Optional.of(numBlocksAllocated.get(pipelineID)).ifPresent( - MutableCounterLong::incr); - } - - /** - * Increments number of successful pipeline creation count. - */ - void incNumPipelineCreated() { - numPipelineCreated.incr(); - } - - /** - * Increments number of failed pipeline creation count. - */ - void incNumPipelineCreationFailed() { - numPipelineCreationFailed.incr(); - } - - /** - * Increments number of successful pipeline destroy count. - */ - void incNumPipelineDestroyed() { - numPipelineDestroyed.incr(); - } - - /** - * Increments number of failed pipeline destroy count. - */ - void incNumPipelineDestroyFailed() { - numPipelineDestroyFailed.incr(); - } - - /** - * Increments number of pipeline report processed count. - */ - void incNumPipelineReportProcessed() { - numPipelineReportProcessed.incr(); - } - - /** - * Increments number of pipeline report processing failed count. - */ - void incNumPipelineReportProcessingFailed() { - numPipelineReportProcessingFailed.incr(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java deleted file mode 100644 index ab98dfa3ed7..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; - -/** - * Implements Api for creating stand alone pipelines. - */ -public class SimplePipelineProvider implements PipelineProvider { - - private final NodeManager nodeManager; - - public SimplePipelineProvider(NodeManager nodeManager) { - this.nodeManager = nodeManager; - } - - @Override - public Pipeline create(ReplicationFactor factor) throws IOException { - List dns = - nodeManager.getNodes(NodeState.HEALTHY); - if (dns.size() < factor.getNumber()) { - String e = String - .format("Cannot create pipeline of factor %d using %d nodes.", - factor.getNumber(), dns.size()); - throw new IOException(e); - } - - Collections.shuffle(dns); - return Pipeline.newBuilder() - .setId(PipelineID.randomId()) - .setState(PipelineState.OPEN) - .setType(ReplicationType.STAND_ALONE) - .setFactor(factor) - .setNodes(dns.subList(0, factor.getNumber())) - .build(); - } - - @Override - public Pipeline create(ReplicationFactor factor, - List nodes) { - return Pipeline.newBuilder() - .setId(PipelineID.randomId()) - .setState(PipelineState.OPEN) - .setType(ReplicationType.STAND_ALONE) - .setFactor(factor) - .setNodes(nodes) - .build(); - } - - @Override - public void shutdown() { - // Do nothing. - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java deleted file mode 100644 index 51adc888661..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.pipeline; -/** - Ozone supports the notion of different kind of pipelines. - That means that we can have a replication pipeline build on - Ratis, Simple or some other protocol. All Pipeline managers - the entities in charge of pipelines reside in the package. - */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java deleted file mode 100644 index 2d14fa6b060..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SCMSecurityProtocolServerSideTranslatorPB.java +++ /dev/null @@ -1,186 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.protocol; - -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertificateRequestProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetDataNodeCertRequestProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto.ResponseCode; -import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetOMCertRequestProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityRequest; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMSecurityResponse; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.Status; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB; -import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher; -import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics; - -/** - * This class is the server-side translator that forwards requests received on - * {@link SCMSecurityProtocolPB} to the {@link - * SCMSecurityProtocol} server implementation. - */ -public class SCMSecurityProtocolServerSideTranslatorPB - implements SCMSecurityProtocolPB { - - private static final Logger LOG = - LoggerFactory.getLogger(SCMSecurityProtocolServerSideTranslatorPB.class); - - private final SCMSecurityProtocol impl; - - private OzoneProtocolMessageDispatcher - dispatcher; - - public SCMSecurityProtocolServerSideTranslatorPB(SCMSecurityProtocol impl, - ProtocolMessageMetrics messageMetrics) { - this.impl = impl; - this.dispatcher = - new OzoneProtocolMessageDispatcher<>("ScmSecurityProtocol", - messageMetrics, LOG); - } - - @Override - public SCMSecurityResponse submitRequest(RpcController controller, - SCMSecurityRequest request) throws ServiceException { - return dispatcher.processRequest(request, this::processRequest, - request.getCmdType(), request.getTraceID()); - } - - public SCMSecurityResponse processRequest(SCMSecurityRequest request) - throws ServiceException { - try { - switch (request.getCmdType()) { - case GetCertificate: - return SCMSecurityResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setGetCertResponseProto( - getCertificate(request.getGetCertificateRequest())) - .build(); - case GetCACertificate: - return SCMSecurityResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setGetCertResponseProto( - getCACertificate(request.getGetCACertificateRequest())) - .build(); - case GetOMCertificate: - return SCMSecurityResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setGetCertResponseProto( - getOMCertificate(request.getGetOMCertRequest())) - .build(); - case GetDataNodeCertificate: - return SCMSecurityResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setGetCertResponseProto( - getDataNodeCertificate(request.getGetDataNodeCertRequest())) - .build(); - default: - throw new IllegalArgumentException( - "Unknown request type: " + request.getCmdType()); - } - } catch (IOException e) { - throw new ServiceException(e); - } - } - - /** - * Get SCM signed certificate for DataNode. - * - * @param request - * @return SCMGetDataNodeCertResponseProto. - */ - - public SCMGetCertResponseProto getDataNodeCertificate( - SCMGetDataNodeCertRequestProto request) - throws IOException { - - String certificate = impl - .getDataNodeCertificate(request.getDatanodeDetails(), - request.getCSR()); - SCMGetCertResponseProto.Builder builder = - SCMGetCertResponseProto - .newBuilder() - .setResponseCode(ResponseCode.success) - .setX509Certificate(certificate) - .setX509CACertificate(impl.getCACertificate()); - - return builder.build(); - - } - - /** - * Get SCM signed certificate for OzoneManager. - * - * @param request - * @return SCMGetCertResponseProto. - */ - public SCMGetCertResponseProto getOMCertificate( - SCMGetOMCertRequestProto request) throws IOException { - String certificate = impl - .getOMCertificate(request.getOmDetails(), - request.getCSR()); - SCMGetCertResponseProto.Builder builder = - SCMGetCertResponseProto - .newBuilder() - .setResponseCode(ResponseCode.success) - .setX509Certificate(certificate) - .setX509CACertificate(impl.getCACertificate()); - return builder.build(); - - } - - public SCMGetCertResponseProto getCertificate( - SCMGetCertificateRequestProto request) throws IOException { - - String certificate = impl.getCertificate(request.getCertSerialId()); - SCMGetCertResponseProto.Builder builder = - SCMGetCertResponseProto - .newBuilder() - .setResponseCode(ResponseCode.success) - .setX509Certificate(certificate); - return builder.build(); - - } - - public SCMGetCertResponseProto getCACertificate( - SCMSecurityProtocolProtos.SCMGetCACertificateRequestProto request) - throws IOException { - - String certificate = impl.getCACertificate(); - SCMGetCertResponseProto.Builder builder = - SCMGetCertResponseProto - .newBuilder() - .setResponseCode(ResponseCode.success) - .setX509Certificate(certificate); - return builder.build(); - - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java deleted file mode 100644 index b6ce067c00c..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java +++ /dev/null @@ -1,228 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.protocol; - -import java.io.IOException; -import java.util.List; -import java.util.stream.Collectors; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateBlockResponse; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockRequestProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteKeyBlocksResultProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SortDatanodesRequestProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SortDatanodesResponseProto; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Status; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; -import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics; - -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class is the server-side translator that forwards requests received on - * {@link StorageContainerLocationProtocolPB} to the - * {@link StorageContainerLocationProtocol} server implementation. - */ -@InterfaceAudience.Private -public final class ScmBlockLocationProtocolServerSideTranslatorPB - implements ScmBlockLocationProtocolPB { - - private final ScmBlockLocationProtocol impl; - - private static final Logger LOG = LoggerFactory - .getLogger(ScmBlockLocationProtocolServerSideTranslatorPB.class); - - private final OzoneProtocolMessageDispatcher - dispatcher; - - /** - * Creates a new ScmBlockLocationProtocolServerSideTranslatorPB. - * - * @param impl {@link ScmBlockLocationProtocol} server implementation - */ - public ScmBlockLocationProtocolServerSideTranslatorPB( - ScmBlockLocationProtocol impl, - ProtocolMessageMetrics metrics) throws IOException { - this.impl = impl; - dispatcher = new OzoneProtocolMessageDispatcher<>( - "BlockLocationProtocol", metrics, LOG); - - } - - private SCMBlockLocationResponse.Builder createSCMBlockResponse( - ScmBlockLocationProtocolProtos.Type cmdType, - String traceID) { - return SCMBlockLocationResponse.newBuilder() - .setCmdType(cmdType) - .setTraceID(traceID); - } - - @Override - public SCMBlockLocationResponse send(RpcController controller, - SCMBlockLocationRequest request) throws ServiceException { - return dispatcher.processRequest( - request, - this::processMessage, - request.getCmdType(), - request.getTraceID()); - } - - private SCMBlockLocationResponse processMessage( - SCMBlockLocationRequest request) throws ServiceException { - SCMBlockLocationResponse.Builder response = createSCMBlockResponse( - request.getCmdType(), - request.getTraceID()); - response.setSuccess(true); - response.setStatus(Status.OK); - - try { - switch (request.getCmdType()) { - case AllocateScmBlock: - response.setAllocateScmBlockResponse( - allocateScmBlock(request.getAllocateScmBlockRequest())); - break; - case DeleteScmKeyBlocks: - response.setDeleteScmKeyBlocksResponse( - deleteScmKeyBlocks(request.getDeleteScmKeyBlocksRequest())); - break; - case GetScmInfo: - response.setGetScmInfoResponse( - getScmInfo(request.getGetScmInfoRequest())); - break; - case SortDatanodes: - response.setSortDatanodesResponse( - sortDatanodes(request.getSortDatanodesRequest())); - break; - default: - // Should never happen - throw new IOException("Unknown Operation " + request.getCmdType() + - " in ScmBlockLocationProtocol"); - } - } catch (IOException e) { - response.setSuccess(false); - response.setStatus(exceptionToResponseStatus(e)); - if (e.getMessage() != null) { - response.setMessage(e.getMessage()); - } - } - - return response.build(); - } - - private Status exceptionToResponseStatus(IOException ex) { - if (ex instanceof SCMException) { - return Status.values()[((SCMException) ex).getResult().ordinal()]; - } else { - return Status.INTERNAL_ERROR; - } - } - - public AllocateScmBlockResponseProto allocateScmBlock( - AllocateScmBlockRequestProto request) - throws IOException { - List allocatedBlocks = - impl.allocateBlock(request.getSize(), - request.getNumBlocks(), request.getType(), - request.getFactor(), request.getOwner(), - ExcludeList.getFromProtoBuf(request.getExcludeList())); - - AllocateScmBlockResponseProto.Builder builder = - AllocateScmBlockResponseProto.newBuilder(); - - if (allocatedBlocks.size() < request.getNumBlocks()) { - throw new SCMException("Allocated " + allocatedBlocks.size() + - " blocks. Requested " + request.getNumBlocks() + " blocks", - SCMException.ResultCodes.FAILED_TO_ALLOCATE_ENOUGH_BLOCKS); - } - for (AllocatedBlock block : allocatedBlocks) { - builder.addBlocks(AllocateBlockResponse.newBuilder() - .setContainerBlockID(block.getBlockID().getProtobuf()) - .setPipeline(block.getPipeline().getProtobufMessage())); - } - - return builder.build(); - } - - public DeleteScmKeyBlocksResponseProto deleteScmKeyBlocks( - DeleteScmKeyBlocksRequestProto req) - throws IOException { - DeleteScmKeyBlocksResponseProto.Builder resp = - DeleteScmKeyBlocksResponseProto.newBuilder(); - - List infoList = req.getKeyBlocksList().stream() - .map(BlockGroup::getFromProto).collect(Collectors.toList()); - final List results = - impl.deleteKeyBlocks(infoList); - for (DeleteBlockGroupResult result : results) { - DeleteKeyBlocksResultProto.Builder deleteResult = - DeleteKeyBlocksResultProto - .newBuilder() - .setObjectKey(result.getObjectKey()) - .addAllBlockResults(result.getBlockResultProtoList()); - resp.addResults(deleteResult.build()); - } - return resp.build(); - } - - public HddsProtos.GetScmInfoResponseProto getScmInfo( - HddsProtos.GetScmInfoRequestProto req) - throws IOException { - ScmInfo scmInfo = impl.getScmInfo(); - return HddsProtos.GetScmInfoResponseProto.newBuilder() - .setClusterId(scmInfo.getClusterId()) - .setScmId(scmInfo.getScmId()) - .build(); - } - - public SortDatanodesResponseProto sortDatanodes( - SortDatanodesRequestProto request) throws ServiceException { - SortDatanodesResponseProto.Builder resp = - SortDatanodesResponseProto.newBuilder(); - try { - List nodeList = request.getNodeNetworkNameList(); - final List results = - impl.sortDatanodes(nodeList, request.getClient()); - if (results != null && results.size() > 0) { - results.stream().forEach(dn -> resp.addNode(dn.getProtoBufMessage())); - } - return resp.build(); - } catch (IOException ex) { - throw new ServiceException(ex); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java deleted file mode 100644 index 0d2f4700003..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ /dev/null @@ -1,393 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.protocol; - -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ActivatePipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ActivatePipelineResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ClosePipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ClosePipelineResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse.Status; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerResponseProto; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher; -import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics; - -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class is the server-side translator that forwards requests received on - * {@link StorageContainerLocationProtocolPB} to the - * {@link StorageContainerLocationProtocol} server implementation. - */ -@InterfaceAudience.Private -public final class StorageContainerLocationProtocolServerSideTranslatorPB - implements StorageContainerLocationProtocolPB { - - private static final Logger LOG = - LoggerFactory.getLogger( - StorageContainerLocationProtocolServerSideTranslatorPB.class); - - private final StorageContainerLocationProtocol impl; - - private OzoneProtocolMessageDispatcher - dispatcher; - - /** - * Creates a new StorageContainerLocationProtocolServerSideTranslatorPB. - * - * @param impl {@link StorageContainerLocationProtocol} server - * implementation - * @param protocolMetrics - */ - public StorageContainerLocationProtocolServerSideTranslatorPB( - StorageContainerLocationProtocol impl, - ProtocolMessageMetrics protocolMetrics) throws IOException { - this.impl = impl; - this.dispatcher = - new OzoneProtocolMessageDispatcher<>("ScmContainerLocation", - protocolMetrics, LOG); - } - - @Override - public ScmContainerLocationResponse submitRequest(RpcController controller, - ScmContainerLocationRequest request) throws ServiceException { - return dispatcher - .processRequest(request, this::processRequest, request.getCmdType(), - request.getTraceID()); - } - - public ScmContainerLocationResponse processRequest( - ScmContainerLocationRequest request) throws ServiceException { - try { - switch (request.getCmdType()) { - case AllocateContainer: - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setContainerResponse( - allocateContainer(request.getContainerRequest())) - .build(); - case GetContainer: - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setGetContainerResponse( - getContainer(request.getGetContainerRequest())) - .build(); - case GetContainerWithPipeline: - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setGetContainerWithPipelineResponse(getContainerWithPipeline( - request.getGetContainerWithPipelineRequest())) - .build(); - case ListContainer: - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setScmListContainerResponse(listContainer( - request.getScmListContainerRequest())) - .build(); - case QueryNode: - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setNodeQueryResponse(queryNode(request.getNodeQueryRequest())) - .build(); - case NotifyObjectStageChange: - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setObjectStageChangeResponse(notifyObjectStageChange( - request.getObjectStageChangeRequest())) - .build(); - case ListPipelines: - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setListPipelineResponse(listPipelines( - request.getListPipelineRequest())) - .build(); - case ActivatePipeline: - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setActivatePipelineResponse(activatePipeline( - request.getActivatePipelineRequest())) - .build(); - case GetScmInfo: - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setGetScmInfoResponse(getScmInfo( - request.getGetScmInfoRequest())) - .build(); - case InSafeMode: - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setInSafeModeResponse(inSafeMode( - request.getInSafeModeRequest())) - .build(); - case ForceExitSafeMode: - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setForceExitSafeModeResponse(forceExitSafeMode( - request.getForceExitSafeModeRequest())) - .build(); - case StartReplicationManager: - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setStartReplicationManagerResponse(startReplicationManager( - request.getStartReplicationManagerRequest())) - .build(); - case StopReplicationManager: - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setStopReplicationManagerResponse(stopReplicationManager( - request.getStopReplicationManagerRequest())) - .build(); - case GetReplicationManagerStatus: - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()) - .setStatus(Status.OK) - .setReplicationManagerStatusResponse(getReplicationManagerStatus( - request.getSeplicationManagerStatusRequest())) - .build(); - default: - throw new IllegalArgumentException( - "Unknown command type: " + request.getCmdType()); - } - - } catch (IOException e) { - throw new ServiceException(e); - } - } - - public ContainerResponseProto allocateContainer(ContainerRequestProto request) - throws IOException { - ContainerWithPipeline containerWithPipeline = impl - .allocateContainer(request.getReplicationType(), - request.getReplicationFactor(), request.getOwner()); - return ContainerResponseProto.newBuilder() - .setContainerWithPipeline(containerWithPipeline.getProtobuf()) - .setErrorCode(ContainerResponseProto.Error.success) - .build(); - - } - - public GetContainerResponseProto getContainer( - GetContainerRequestProto request) throws IOException { - ContainerInfo container = impl.getContainer(request.getContainerID()); - return GetContainerResponseProto.newBuilder() - .setContainerInfo(container.getProtobuf()) - .build(); - } - - public GetContainerWithPipelineResponseProto getContainerWithPipeline( - GetContainerWithPipelineRequestProto request) - throws IOException { - ContainerWithPipeline container = impl - .getContainerWithPipeline(request.getContainerID()); - return GetContainerWithPipelineResponseProto.newBuilder() - .setContainerWithPipeline(container.getProtobuf()) - .build(); - } - - public SCMListContainerResponseProto listContainer( - SCMListContainerRequestProto request) throws IOException { - - long startContainerID = 0; - int count = -1; - - // Arguments check. - if (request.hasStartContainerID()) { - // End container name is given. - startContainerID = request.getStartContainerID(); - } - count = request.getCount(); - List containerList = - impl.listContainer(startContainerID, count); - SCMListContainerResponseProto.Builder builder = - SCMListContainerResponseProto.newBuilder(); - for (ContainerInfo container : containerList) { - builder.addContainers(container.getProtobuf()); - } - return builder.build(); - } - - public SCMDeleteContainerResponseProto deleteContainer( - SCMDeleteContainerRequestProto request) - throws IOException { - impl.deleteContainer(request.getContainerID()); - return SCMDeleteContainerResponseProto.newBuilder().build(); - - } - - public NodeQueryResponseProto queryNode( - StorageContainerLocationProtocolProtos.NodeQueryRequestProto request) - throws IOException { - - HddsProtos.NodeState nodeState = request.getState(); - List datanodes = impl.queryNode(nodeState, - request.getScope(), request.getPoolName()); - return NodeQueryResponseProto.newBuilder() - .addAllDatanodes(datanodes) - .build(); - - } - - public ObjectStageChangeResponseProto notifyObjectStageChange( - ObjectStageChangeRequestProto request) - throws IOException { - impl.notifyObjectStageChange(request.getType(), request.getId(), - request.getOp(), request.getStage()); - return ObjectStageChangeResponseProto.newBuilder().build(); - } - - public ListPipelineResponseProto listPipelines( - ListPipelineRequestProto request) - throws IOException { - ListPipelineResponseProto.Builder builder = ListPipelineResponseProto - .newBuilder(); - List pipelines = impl.listPipelines(); - for (Pipeline pipeline : pipelines) { - HddsProtos.Pipeline protobufMessage = pipeline.getProtobufMessage(); - builder.addPipelines(protobufMessage); - } - return builder.build(); - } - - public ActivatePipelineResponseProto activatePipeline( - ActivatePipelineRequestProto request) - throws IOException { - impl.activatePipeline(request.getPipelineID()); - return ActivatePipelineResponseProto.newBuilder().build(); - } - - public DeactivatePipelineResponseProto deactivatePipeline( - DeactivatePipelineRequestProto request) - throws IOException { - impl.deactivatePipeline(request.getPipelineID()); - return DeactivatePipelineResponseProto.newBuilder().build(); - } - - public ClosePipelineResponseProto closePipeline( - RpcController controller, ClosePipelineRequestProto request) - throws IOException { - - impl.closePipeline(request.getPipelineID()); - return ClosePipelineResponseProto.newBuilder().build(); - - } - - public HddsProtos.GetScmInfoResponseProto getScmInfo( - HddsProtos.GetScmInfoRequestProto req) - throws IOException { - ScmInfo scmInfo = impl.getScmInfo(); - return HddsProtos.GetScmInfoResponseProto.newBuilder() - .setClusterId(scmInfo.getClusterId()) - .setScmId(scmInfo.getScmId()) - .build(); - - } - - public InSafeModeResponseProto inSafeMode( - InSafeModeRequestProto request) throws IOException { - - return InSafeModeResponseProto.newBuilder() - .setInSafeMode(impl.inSafeMode()).build(); - - } - - public ForceExitSafeModeResponseProto forceExitSafeMode( - ForceExitSafeModeRequestProto request) - throws IOException { - return ForceExitSafeModeResponseProto.newBuilder() - .setExitedSafeMode(impl.forceExitSafeMode()).build(); - - } - - public StartReplicationManagerResponseProto startReplicationManager( - StartReplicationManagerRequestProto request) - throws IOException { - impl.startReplicationManager(); - return StartReplicationManagerResponseProto.newBuilder().build(); - } - - public StopReplicationManagerResponseProto stopReplicationManager( - StopReplicationManagerRequestProto request) - throws IOException { - impl.stopReplicationManager(); - return StopReplicationManagerResponseProto.newBuilder().build(); - - } - - public ReplicationManagerStatusResponseProto getReplicationManagerStatus( - ReplicationManagerStatusRequestProto request) - throws IOException { - return ReplicationManagerStatusResponseProto.newBuilder() - .setIsRunning(impl.getReplicationManagerStatus()).build(); - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java deleted file mode 100644 index 411f22e6188..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.protocol; -/** - * RPC/protobuf specific translator classes for SCM protocol. - */ \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java deleted file mode 100644 index 49440175932..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.ratis; - -/** - * This package contains classes related to Apache Ratis for SCM. - */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java deleted file mode 100644 index 8eadeb35543..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.safemode; - -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicLong; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer - .NodeRegistrationContainerReport; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.hdds.server.events.TypedEvent; - -/** - * Class defining Safe mode exit criteria for Containers. - */ -public class ContainerSafeModeRule extends - SafeModeExitRule{ - - // Required cutoff % for containers with at least 1 reported replica. - private double safeModeCutoff; - // Containers read from scm db (excluding containers in ALLOCATED state). - private Map containerMap; - private double maxContainer; - - private AtomicLong containerWithMinReplicas = new AtomicLong(0); - - public ContainerSafeModeRule(String ruleName, EventQueue eventQueue, - Configuration conf, - List containers, SCMSafeModeManager manager) { - super(manager, ruleName, eventQueue); - safeModeCutoff = conf.getDouble( - HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, - HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT); - - Preconditions.checkArgument( - (safeModeCutoff >= 0.0 && safeModeCutoff <= 1.0), - HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT + - " value should be >= 0.0 and <= 1.0"); - - containerMap = new ConcurrentHashMap<>(); - containers.forEach(container -> { - // There can be containers in OPEN/CLOSING state which were never - // created by the client. We are not considering these containers for - // now. These containers can be handled by tracking pipelines. - - Optional.ofNullable(container.getState()) - .filter(state -> state != HddsProtos.LifeCycleState.OPEN) - .filter(state -> state != HddsProtos.LifeCycleState.CLOSING) - .ifPresent(s -> containerMap.put(container.getContainerID(), - container)); - }); - maxContainer = containerMap.size(); - long cutOff = (long) Math.ceil(maxContainer * safeModeCutoff); - getSafeModeMetrics().setNumContainerWithOneReplicaReportedThreshold(cutOff); - } - - - @Override - protected TypedEvent getEventType() { - return SCMEvents.NODE_REGISTRATION_CONT_REPORT; - } - - - @Override - protected boolean validate() { - return getCurrentContainerThreshold() >= safeModeCutoff; - } - - @VisibleForTesting - public double getCurrentContainerThreshold() { - if (maxContainer == 0) { - return 1; - } - return (containerWithMinReplicas.doubleValue() / maxContainer); - } - - @Override - protected void process(NodeRegistrationContainerReport reportsProto) { - - reportsProto.getReport().getReportsList().forEach(c -> { - if (containerMap.containsKey(c.getContainerID())) { - if(containerMap.remove(c.getContainerID()) != null) { - containerWithMinReplicas.getAndAdd(1); - getSafeModeMetrics() - .incCurrentContainersWithOneReplicaReportedCount(); - } - } - }); - - if (scmInSafeMode()) { - SCMSafeModeManager.getLogger().info( - "SCM in safe mode. {} % containers have at least one" - + " reported replica.", - (containerWithMinReplicas.doubleValue() / maxContainer) * 100); - } - } - - @Override - protected void cleanup() { - containerMap.clear(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java deleted file mode 100644 index 1029d711f8b..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.safemode; - -import java.util.HashSet; -import java.util.UUID; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer.NodeRegistrationContainerReport; - -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.hdds.server.events.TypedEvent; - -/** - * Class defining Safe mode exit criteria according to number of DataNodes - * registered with SCM. - */ -public class DataNodeSafeModeRule extends - SafeModeExitRule{ - - // Min DataNodes required to exit safe mode. - private int requiredDns; - private int registeredDns = 0; - // Set to track registered DataNodes. - private HashSet registeredDnSet; - - public DataNodeSafeModeRule(String ruleName, EventQueue eventQueue, - Configuration conf, - SCMSafeModeManager manager) { - super(manager, ruleName, eventQueue); - requiredDns = conf.getInt( - HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, - HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE_DEFAULT); - registeredDnSet = new HashSet<>(requiredDns * 2); - } - - @Override - protected TypedEvent getEventType() { - return SCMEvents.NODE_REGISTRATION_CONT_REPORT; - } - - @Override - protected boolean validate() { - return registeredDns >= requiredDns; - } - - @Override - protected void process(NodeRegistrationContainerReport reportsProto) { - - registeredDnSet.add(reportsProto.getDatanodeDetails().getUuid()); - registeredDns = registeredDnSet.size(); - - if (scmInSafeMode()) { - SCMSafeModeManager.getLogger().info( - "SCM in safe mode. {} DataNodes registered, {} required.", - registeredDns, requiredDns); - } - - } - - @Override - protected void cleanup() { - registeredDnSet.clear(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java deleted file mode 100644 index 7a00d760fa4..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java +++ /dev/null @@ -1,170 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.safemode; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; - - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.hdds.server.events.TypedEvent; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.HashSet; -import java.util.Set; - -/** - * Class defining Safe mode exit criteria for Pipelines. - * - * This rule defines percentage of healthy pipelines need to be reported. - * Once safe mode exit happens, this rules take care of writes can go - * through in a cluster. - */ -public class HealthyPipelineSafeModeRule - extends SafeModeExitRule{ - - public static final Logger LOG = - LoggerFactory.getLogger(HealthyPipelineSafeModeRule.class); - private final PipelineManager pipelineManager; - private final int healthyPipelineThresholdCount; - private int currentHealthyPipelineCount = 0; - private final Set processedDatanodeDetails = - new HashSet<>(); - - HealthyPipelineSafeModeRule(String ruleName, EventQueue eventQueue, - PipelineManager pipelineManager, - SCMSafeModeManager manager, Configuration configuration) { - super(manager, ruleName, eventQueue); - this.pipelineManager = pipelineManager; - double healthyPipelinesPercent = - configuration.getDouble(HddsConfigKeys. - HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT, - HddsConfigKeys. - HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT_DEFAULT); - - Preconditions.checkArgument( - (healthyPipelinesPercent >= 0.0 && healthyPipelinesPercent <= 1.0), - HddsConfigKeys. - HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT - + " value should be >= 0.0 and <= 1.0"); - - // As we want to wait for 3 node pipelines - int pipelineCount = - pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE).size(); - - // This value will be zero when pipeline count is 0. - // On a fresh installed cluster, there will be zero pipelines in the SCM - // pipeline DB. - healthyPipelineThresholdCount = - (int) Math.ceil(healthyPipelinesPercent * pipelineCount); - - LOG.info(" Total pipeline count is {}, healthy pipeline " + - "threshold count is {}", pipelineCount, healthyPipelineThresholdCount); - - getSafeModeMetrics().setNumHealthyPipelinesThreshold( - healthyPipelineThresholdCount); - } - - @Override - protected TypedEvent getEventType() { - return SCMEvents.PROCESSED_PIPELINE_REPORT; - } - - @Override - protected boolean validate() { - if (currentHealthyPipelineCount >= healthyPipelineThresholdCount) { - return true; - } - return false; - } - - @Override - protected void process(PipelineReportFromDatanode - pipelineReportFromDatanode) { - - // When SCM is in safe mode for long time, already registered - // datanode can send pipeline report again, then pipeline handler fires - // processed report event, we should not consider this pipeline report - // from datanode again during threshold calculation. - Preconditions.checkNotNull(pipelineReportFromDatanode); - DatanodeDetails dnDetails = pipelineReportFromDatanode.getDatanodeDetails(); - if (!processedDatanodeDetails.contains( - pipelineReportFromDatanode.getDatanodeDetails())) { - - Pipeline pipeline; - PipelineReportsProto pipelineReport = - pipelineReportFromDatanode.getReport(); - - for (PipelineReport report : pipelineReport.getPipelineReportList()) { - PipelineID pipelineID = PipelineID - .getFromProtobuf(report.getPipelineID()); - try { - pipeline = pipelineManager.getPipeline(pipelineID); - } catch (PipelineNotFoundException e) { - continue; - } - - if (pipeline.getFactor() == HddsProtos.ReplicationFactor.THREE && - pipeline.getPipelineState() == Pipeline.PipelineState.OPEN) { - // If the pipeline is open state mean, all 3 datanodes are reported - // for this pipeline. - currentHealthyPipelineCount++; - getSafeModeMetrics().incCurrentHealthyPipelinesCount(); - } - } - if (scmInSafeMode()) { - SCMSafeModeManager.getLogger().info( - "SCM in safe mode. Healthy pipelines reported count is {}, " + - "required healthy pipeline reported count is {}", - currentHealthyPipelineCount, healthyPipelineThresholdCount); - } - - processedDatanodeDetails.add(dnDetails); - } - - } - - @Override - protected void cleanup() { - processedDatanodeDetails.clear(); - } - - @VisibleForTesting - public int getCurrentHealthyPipelineCount() { - return currentHealthyPipelineCount; - } - - @VisibleForTesting - public int getHealthyPipelineThresholdCount() { - return healthyPipelineThresholdCount; - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java deleted file mode 100644 index 841d8ff6654..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java +++ /dev/null @@ -1,158 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.safemode; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReport; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher. - PipelineReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.hdds.server.events.TypedEvent; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.HashSet; -import java.util.Set; - -/** - * This rule covers whether we have at least one datanode is reported for each - * pipeline. This rule is for all open containers, we have at least one - * replica available for read when we exit safe mode. - */ -public class OneReplicaPipelineSafeModeRule extends - SafeModeExitRule { - - private static final Logger LOG = - LoggerFactory.getLogger(OneReplicaPipelineSafeModeRule.class); - - private int thresholdCount; - private Set reportedPipelineIDSet = new HashSet<>(); - private final PipelineManager pipelineManager; - private int currentReportedPipelineCount = 0; - - - public OneReplicaPipelineSafeModeRule(String ruleName, EventQueue eventQueue, - PipelineManager pipelineManager, - SCMSafeModeManager safeModeManager, Configuration configuration) { - super(safeModeManager, ruleName, eventQueue); - this.pipelineManager = pipelineManager; - - double percent = - configuration.getDouble( - HddsConfigKeys.HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT, - HddsConfigKeys. - HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT_DEFAULT); - - Preconditions.checkArgument((percent >= 0.0 && percent <= 1.0), - HddsConfigKeys. - HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT + - " value should be >= 0.0 and <= 1.0"); - - int totalPipelineCount = - pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE).size(); - - thresholdCount = (int) Math.ceil(percent * totalPipelineCount); - - LOG.info(" Total pipeline count is {}, pipeline's with atleast one " + - "datanode reported threshold count is {}", totalPipelineCount, - thresholdCount); - - getSafeModeMetrics().setNumPipelinesWithAtleastOneReplicaReportedThreshold( - thresholdCount); - - } - - @Override - protected TypedEvent getEventType() { - return SCMEvents.PROCESSED_PIPELINE_REPORT; - } - - @Override - protected boolean validate() { - if (currentReportedPipelineCount >= thresholdCount) { - return true; - } - return false; - } - - @Override - protected void process(PipelineReportFromDatanode - pipelineReportFromDatanode) { - Pipeline pipeline; - Preconditions.checkNotNull(pipelineReportFromDatanode); - PipelineReportsProto pipelineReport = - pipelineReportFromDatanode.getReport(); - - for (PipelineReport report : pipelineReport.getPipelineReportList()) { - PipelineID pipelineID = PipelineID - .getFromProtobuf(report.getPipelineID()); - try { - pipeline = pipelineManager.getPipeline(pipelineID); - } catch (PipelineNotFoundException e) { - continue; - } - - if (pipeline.getFactor() == HddsProtos.ReplicationFactor.THREE && - !reportedPipelineIDSet.contains(pipelineID)) { - reportedPipelineIDSet.add(pipelineID); - getSafeModeMetrics() - .incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount(); - } - } - - currentReportedPipelineCount = reportedPipelineIDSet.size(); - - if (scmInSafeMode()) { - SCMSafeModeManager.getLogger().info( - "SCM in safe mode. Pipelines with atleast one datanode reported " + - "count is {}, required atleast one datanode reported per " + - "pipeline count is {}", - currentReportedPipelineCount, thresholdCount); - } - - } - - @Override - protected void cleanup() { - reportedPipelineIDSet.clear(); - } - - @VisibleForTesting - public int getThresholdCount() { - return thresholdCount; - } - - @VisibleForTesting - public int getCurrentReportedPipelineCount() { - return currentReportedPipelineCount; - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/Precheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/Precheck.java deleted file mode 100644 index 12c6c317542..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/Precheck.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.safemode; - -import org.apache.hadoop.hdds.scm.exceptions.SCMException; - -/** - * Precheck for SCM operations. - * */ -public interface Precheck { - boolean check(T t) throws SCMException; - String type(); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java deleted file mode 100644 index a22d1623fdc..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.safemode; - -import com.google.common.annotations.VisibleForTesting; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * StorageContainerManager enters safe mode on startup to allow system to - * reach a stable state before becoming fully functional. SCM will wait - * for certain resources to be reported before coming out of safe mode. - * - * SafeModeExitRule defines format to define new rules which must be satisfied - * to exit Safe mode. - * - * Current SafeMode rules: - * 1. ContainerSafeModeRule: - * On every new datanode registration, it fires - * {@link SCMEvents#NODE_REGISTRATION_CONT_REPORT}. This rule handles this - * event. This rule process this report, increment the - * containerWithMinReplicas count when this reported replica is in the - * containerMap. Then validates if cutoff threshold for containers is meet. - * - * 2. DatanodeSafeModeRule: - * On every new datanode registration, it fires - * {@link SCMEvents#NODE_REGISTRATION_CONT_REPORT}. This rule handles this - * event. This rule process this report, and check if this is new node, add - * to its reported node list. Then validate it cutoff threshold for minimum - * number of datanode registered is met or not. - * - * 3. HealthyPipelineSafeModeRule: - * Once the pipelineReportHandler processes the - * {@link SCMEvents#PIPELINE_REPORT}, it fires - * {@link SCMEvents#PROCESSED_PIPELINE_REPORT}. This rule handles this - * event. This rule processes this report, and check if pipeline is healthy - * and increments current healthy pipeline count. Then validate it cutoff - * threshold for healthy pipeline is met or not. - * - * 4. OneReplicaPipelineSafeModeRule: - * Once the pipelineReportHandler processes the - * {@link SCMEvents#PIPELINE_REPORT}, it fires - * {@link SCMEvents#PROCESSED_PIPELINE_REPORT}. This rule handles this - * event. This rule processes this report, and add the reported pipeline to - * reported pipeline set. Then validate it cutoff threshold for one replica - * per pipeline is met or not. - * - */ -public class SCMSafeModeManager { - - private static final Logger LOG = - LoggerFactory.getLogger(SCMSafeModeManager.class); - private final boolean isSafeModeEnabled; - private AtomicBoolean inSafeMode = new AtomicBoolean(true); - - private Map exitRules = new HashMap(1); - private Configuration config; - private static final String CONT_EXIT_RULE = "ContainerSafeModeRule"; - private static final String DN_EXIT_RULE = "DataNodeSafeModeRule"; - private static final String HEALTHY_PIPELINE_EXIT_RULE = - "HealthyPipelineSafeModeRule"; - private static final String ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE = - "AtleastOneDatanodeReportedRule"; - - private Set validatedRules = new HashSet<>(); - - private final EventQueue eventPublisher; - private final PipelineManager pipelineManager; - - private final SafeModeMetrics safeModeMetrics; - - public SCMSafeModeManager(Configuration conf, - List allContainers, PipelineManager pipelineManager, - EventQueue eventQueue) { - this.config = conf; - this.pipelineManager = pipelineManager; - this.eventPublisher = eventQueue; - this.isSafeModeEnabled = conf.getBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, - HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED_DEFAULT); - - - if (isSafeModeEnabled) { - this.safeModeMetrics = SafeModeMetrics.create(); - ContainerSafeModeRule containerSafeModeRule = - new ContainerSafeModeRule(CONT_EXIT_RULE, eventQueue, config, - allContainers, this); - DataNodeSafeModeRule dataNodeSafeModeRule = - new DataNodeSafeModeRule(DN_EXIT_RULE, eventQueue, config, this); - exitRules.put(CONT_EXIT_RULE, containerSafeModeRule); - exitRules.put(DN_EXIT_RULE, dataNodeSafeModeRule); - if (conf.getBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT) - && pipelineManager != null) { - HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = - new HealthyPipelineSafeModeRule(HEALTHY_PIPELINE_EXIT_RULE, - eventQueue, pipelineManager, - this, config); - OneReplicaPipelineSafeModeRule oneReplicaPipelineSafeModeRule = - new OneReplicaPipelineSafeModeRule( - ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE, eventQueue, - pipelineManager, this, conf); - exitRules.put(HEALTHY_PIPELINE_EXIT_RULE, healthyPipelineSafeModeRule); - exitRules.put(ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE, - oneReplicaPipelineSafeModeRule); - } - emitSafeModeStatus(); - } else { - this.safeModeMetrics = null; - exitSafeMode(eventQueue); - } - } - - public void stop() { - if (isSafeModeEnabled) { - this.safeModeMetrics.unRegister(); - } - } - - public SafeModeMetrics getSafeModeMetrics() { - return safeModeMetrics; - } - - /** - * Emit Safe mode status. - */ - @VisibleForTesting - public void emitSafeModeStatus() { - eventPublisher.fireEvent(SCMEvents.SAFE_MODE_STATUS, - new SafeModeStatus(getInSafeMode())); - } - - - public synchronized void validateSafeModeExitRules(String ruleName, - EventPublisher eventQueue) { - - if (exitRules.get(ruleName) != null) { - validatedRules.add(ruleName); - } else { - // This should never happen - LOG.error("No Such Exit rule {}", ruleName); - } - - - if (validatedRules.size() == exitRules.size()) { - // All rules are satisfied, we can exit safe mode. - LOG.info("ScmSafeModeManager, all rules are successfully validated"); - exitSafeMode(eventQueue); - } - - } - - /** - * Exit safe mode. It does following actions: - * 1. Set safe mode status to false. - * 2. Emits START_REPLICATION for ReplicationManager. - * 3. Cleanup resources. - * 4. Emit safe mode status. - * @param eventQueue - */ - @VisibleForTesting - public void exitSafeMode(EventPublisher eventQueue) { - LOG.info("SCM exiting safe mode."); - setInSafeMode(false); - - // TODO: Remove handler registration as there is no need to listen to - // register events anymore. - - emitSafeModeStatus(); - // TODO: #CLUTIL if we reenter safe mode the fixed interval pipeline - // creation job needs to stop - pipelineManager.startPipelineCreator(); - } - - public boolean getInSafeMode() { - if (!isSafeModeEnabled) { - return false; - } - return inSafeMode.get(); - } - - /** - * Set safe mode status. - */ - public void setInSafeMode(boolean inSafeMode) { - this.inSafeMode.set(inSafeMode); - } - - public static Logger getLogger() { - return LOG; - } - - @VisibleForTesting - public double getCurrentContainerThreshold() { - return ((ContainerSafeModeRule) exitRules.get(CONT_EXIT_RULE)) - .getCurrentContainerThreshold(); - } - - @VisibleForTesting - public HealthyPipelineSafeModeRule getHealthyPipelineSafeModeRule() { - return (HealthyPipelineSafeModeRule) - exitRules.get(HEALTHY_PIPELINE_EXIT_RULE); - } - - @VisibleForTesting - public OneReplicaPipelineSafeModeRule getOneReplicaPipelineSafeModeRule() { - return (OneReplicaPipelineSafeModeRule) - exitRules.get(ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE); - } - - - /** - * Class used during SafeMode status event. - */ - public static class SafeModeStatus { - - private boolean safeModeStatus; - public SafeModeStatus(boolean safeModeState) { - this.safeModeStatus = safeModeState; - } - - public boolean getSafeModeStatus() { - return safeModeStatus; - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java deleted file mode 100644 index 05e84dbbb3d..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.safemode; - -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.hdds.server.events.TypedEvent; - -/** - * Abstract class for SafeModeExitRules. When a new rule is added, the new - * rule should extend this abstract class. - * - * Each rule Should do: - * 1. Should add a handler for the event it is looking for during the - * initialization of the rule. - * 2. Add the rule in ScmSafeModeManager to list of the rules. - * - * - * @param - */ -public abstract class SafeModeExitRule implements EventHandler { - - private final SCMSafeModeManager safeModeManager; - private final String ruleName; - - public SafeModeExitRule(SCMSafeModeManager safeModeManager, - String ruleName, EventQueue eventQueue) { - this.safeModeManager = safeModeManager; - this.ruleName = ruleName; - eventQueue.addHandler(getEventType(), this); - } - - /** - * Return's the name of this SafeModeExit Rule. - * @return ruleName - */ - public String getRuleName() { - return ruleName; - } - - /** - * Return's the event type this safeMode exit rule handles. - * @return TypedEvent - */ - protected abstract TypedEvent getEventType(); - - /** - * Validate's this rule. If this rule condition is met, returns true, else - * returns false. - * @return boolean - */ - protected abstract boolean validate(); - - /** - * Actual processing logic for this rule. - * @param report - */ - protected abstract void process(T report); - - /** - * Cleanup action's need to be done, once this rule is satisfied. - */ - protected abstract void cleanup(); - - @Override - public final void onMessage(T report, EventPublisher publisher) { - - // TODO: when we have remove handlers, we can remove getInSafemode check - - if (scmInSafeMode()) { - if (validate()) { - safeModeManager.validateSafeModeExitRules(ruleName, publisher); - cleanup(); - return; - } - - process(report); - - if (validate()) { - safeModeManager.validateSafeModeExitRules(ruleName, publisher); - cleanup(); - } - } - } - - /** - * Return true if SCM is in safe mode, else false. - * @return boolean - */ - protected boolean scmInSafeMode() { - return safeModeManager.getInSafeMode(); - } - - protected SafeModeMetrics getSafeModeMetrics() { - return safeModeManager.getSafeModeMetrics(); - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java deleted file mode 100644 index b9e53330691..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- *

http://www.apache.org/licenses/LICENSE-2.0 - *

- *

Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.safemode; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.scm.block.BlockManager; -import org.apache.hadoop.hdds.scm.container.ReplicationManager; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer; -import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * Class to handle the activities needed to be performed after exiting safe - * mode. - */ -public class SafeModeHandler implements EventHandler { - - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeHandler.class); - - private final SCMClientProtocolServer scmClientProtocolServer; - private final BlockManager scmBlockManager; - private final long waitTime; - private final AtomicBoolean isInSafeMode = new AtomicBoolean(true); - private final ReplicationManager replicationManager; - - private final PipelineManager scmPipelineManager; - - /** - * SafeModeHandler, to handle the logic once we exit safe mode. - * @param configuration - * @param clientProtocolServer - * @param blockManager - * @param replicationManager - */ - public SafeModeHandler(Configuration configuration, - SCMClientProtocolServer clientProtocolServer, - BlockManager blockManager, - ReplicationManager replicationManager, PipelineManager pipelineManager) { - Objects.requireNonNull(configuration, "Configuration cannot be null"); - Objects.requireNonNull(clientProtocolServer, "SCMClientProtocolServer " + - "object cannot be null"); - Objects.requireNonNull(blockManager, "BlockManager object cannot be null"); - Objects.requireNonNull(replicationManager, "ReplicationManager " + - "object cannot be null"); - Objects.requireNonNull(pipelineManager, "PipelineManager object cannot " + - "be" + "null"); - this.waitTime = configuration.getTimeDuration( - HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, - HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT_DEFAULT, - TimeUnit.MILLISECONDS); - this.scmClientProtocolServer = clientProtocolServer; - this.scmBlockManager = blockManager; - this.replicationManager = replicationManager; - this.scmPipelineManager = pipelineManager; - - final boolean safeModeEnabled = configuration.getBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, - HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED_DEFAULT); - isInSafeMode.set(safeModeEnabled); - - } - - - - /** - * Set SafeMode status based on - * {@link org.apache.hadoop.hdds.scm.events.SCMEvents#SAFE_MODE_STATUS}. - * - * Inform BlockManager, ScmClientProtocolServer and replicationAcitivity - * status about safeMode status. - * - * @param safeModeStatus - * @param publisher - */ - @Override - public void onMessage(SafeModeStatus safeModeStatus, - EventPublisher publisher) { - - isInSafeMode.set(safeModeStatus.getSafeModeStatus()); - scmClientProtocolServer.setSafeModeStatus(isInSafeMode.get()); - scmBlockManager.setSafeModeStatus(isInSafeMode.get()); - - if (!isInSafeMode.get()) { - final Thread safeModeExitThread = new Thread(() -> { - try { - Thread.sleep(waitTime); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - replicationManager.start(); - cleanupPipelines(); - }); - - safeModeExitThread.setDaemon(true); - safeModeExitThread.start(); - } - - } - - private void cleanupPipelines() { - List pipelineList = scmPipelineManager.getPipelines(); - pipelineList.forEach((pipeline) -> { - try { - if (pipeline.getPipelineState() == Pipeline.PipelineState.ALLOCATED) { - scmPipelineManager.finalizeAndDestroyPipeline(pipeline, false); - } - } catch (IOException ex) { - LOG.error("Finalize and destroy pipeline failed for pipeline " - + pipeline.toString(), ex); - } - }); - } - - public boolean getSafeModeStatus() { - return isInSafeMode.get(); - } - - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java deleted file mode 100644 index 80b8257c40b..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.safemode; - -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; - -/** - * This class is used for maintaining SafeMode metric information, which can - * be used for monitoring during SCM startup when SCM is still in SafeMode. - */ -public class SafeModeMetrics { - private static final String SOURCE_NAME = - SafeModeMetrics.class.getSimpleName(); - - - // These all values will be set to some values when safemode is enabled. - private @Metric MutableCounterLong - numContainerWithOneReplicaReportedThreshold; - private @Metric MutableCounterLong - currentContainersWithOneReplicaReportedCount; - - // When hdds.scm.safemode.pipeline-availability.check is set then only - // below metrics will have some values, otherwise they will be zero. - private @Metric MutableCounterLong numHealthyPipelinesThreshold; - private @Metric MutableCounterLong currentHealthyPipelinesCount; - private @Metric MutableCounterLong - numPipelinesWithAtleastOneReplicaReportedThreshold; - private @Metric MutableCounterLong - currentPipelinesWithAtleastOneReplicaReportedCount; - - public static SafeModeMetrics create() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - return ms.register(SOURCE_NAME, - "SCM Safemode Metrics", - new SafeModeMetrics()); - } - - public void setNumHealthyPipelinesThreshold(long val) { - this.numHealthyPipelinesThreshold.incr(val); - } - - public void incCurrentHealthyPipelinesCount() { - this.currentHealthyPipelinesCount.incr(); - } - - public void setNumPipelinesWithAtleastOneReplicaReportedThreshold(long val) { - this.numPipelinesWithAtleastOneReplicaReportedThreshold.incr(val); - } - - public void incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount() { - this.currentPipelinesWithAtleastOneReplicaReportedCount.incr(); - } - - public void setNumContainerWithOneReplicaReportedThreshold(long val) { - this.numContainerWithOneReplicaReportedThreshold.incr(val); - } - - public void incCurrentContainersWithOneReplicaReportedCount() { - this.currentContainersWithOneReplicaReportedCount.incr(); - } - - public MutableCounterLong getNumHealthyPipelinesThreshold() { - return numHealthyPipelinesThreshold; - } - - public MutableCounterLong getCurrentHealthyPipelinesCount() { - return currentHealthyPipelinesCount; - } - - public MutableCounterLong - getNumPipelinesWithAtleastOneReplicaReportedThreshold() { - return numPipelinesWithAtleastOneReplicaReportedThreshold; - } - - public MutableCounterLong getCurrentPipelinesWithAtleastOneReplicaCount() { - return currentPipelinesWithAtleastOneReplicaReportedCount; - } - - public MutableCounterLong getNumContainerWithOneReplicaReportedThreshold() { - return numContainerWithOneReplicaReportedThreshold; - } - - public MutableCounterLong getCurrentContainersWithOneReplicaReportedCount() { - return currentContainersWithOneReplicaReportedCount; - } - - - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java deleted file mode 100644 index b63d04e9bcd..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.safemode; - -import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; - -/** - * Safe mode pre-check for SCM operations. - * */ -public class SafeModePrecheck implements Precheck { - - private AtomicBoolean inSafeMode; - public static final String PRECHECK_TYPE = "SafeModePrecheck"; - - public SafeModePrecheck(Configuration conf) { - boolean safeModeEnabled = conf.getBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, - HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED_DEFAULT); - if (safeModeEnabled) { - inSafeMode = new AtomicBoolean(true); - } else { - inSafeMode = new AtomicBoolean(false); - } - } - - @Override - public boolean check(ScmOps op) throws SCMException { - if (inSafeMode.get() && SafeModeRestrictedOps - .isRestrictedInSafeMode(op)) { - throw new SCMException("SafeModePrecheck failed for " + op, - ResultCodes.SAFE_MODE_EXCEPTION); - } - return inSafeMode.get(); - } - - @Override - public String type() { - return PRECHECK_TYPE; - } - - public boolean isInSafeMode() { - return inSafeMode.get(); - } - - public void setInSafeMode(boolean inSafeMode) { - this.inSafeMode.set(inSafeMode); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRestrictedOps.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRestrictedOps.java deleted file mode 100644 index 5f516e42c72..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRestrictedOps.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.safemode; - -import java.util.EnumSet; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; - -/** - * Operations restricted in SCM safe mode. - */ -public final class SafeModeRestrictedOps { - private static EnumSet restrictedOps = EnumSet.noneOf(ScmOps.class); - - private SafeModeRestrictedOps() { - } - - static { - restrictedOps.add(ScmOps.allocateBlock); - restrictedOps.add(ScmOps.allocateContainer); - } - - public static boolean isRestrictedInSafeMode(ScmOps opName) { - return restrictedOps.contains(opName); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/package-info.java deleted file mode 100644 index b5fd8262f81..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.safemode; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java deleted file mode 100644 index 9c69758d5a0..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java +++ /dev/null @@ -1,365 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license - * agreements. See the NOTICE file distributed with this work for additional - * information regarding - * copyright ownership. The ASF licenses this file to you under the Apache - * License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a - * copy of the License at - * - *

http://www.apache.org/licenses/LICENSE-2.0 - * - *

Unless required by applicable law or agreed to in writing, software - * distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.net.Node; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.ozone.audit.AuditAction; -import org.apache.hadoop.ozone.audit.AuditEventStatus; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditLoggerType; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.audit.Auditor; -import org.apache.hadoop.ozone.audit.SCMAction; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; -import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB; - -import com.google.common.collect.Maps; -import com.google.protobuf.BlockingService; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY; -import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.startRpcServer; -import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * SCM block protocol is the protocol used by Namenode and OzoneManager to get - * blocks from the SCM. - */ -public class SCMBlockProtocolServer implements - ScmBlockLocationProtocol, Auditor { - private static final Logger LOG = - LoggerFactory.getLogger(SCMBlockProtocolServer.class); - - private static final AuditLogger AUDIT = - new AuditLogger(AuditLoggerType.SCMLOGGER); - - private final StorageContainerManager scm; - private final OzoneConfiguration conf; - private final RPC.Server blockRpcServer; - private final InetSocketAddress blockRpcAddress; - private final ProtocolMessageMetrics - protocolMessageMetrics; - - /** - * The RPC server that listens to requests from block service clients. - */ - public SCMBlockProtocolServer(OzoneConfiguration conf, - StorageContainerManager scm) throws IOException { - this.scm = scm; - this.conf = conf; - final int handlerCount = - conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY, - OZONE_SCM_HANDLER_COUNT_DEFAULT); - - RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class, - ProtobufRpcEngine.class); - - protocolMessageMetrics = - ProtocolMessageMetrics.create("ScmBlockLocationProtocol", - "SCM Block location protocol counters", - ScmBlockLocationProtocolProtos.Type.values()); - - // SCM Block Service RPC. - BlockingService blockProtoPbService = - ScmBlockLocationProtocolProtos.ScmBlockLocationProtocolService - .newReflectiveBlockingService( - new ScmBlockLocationProtocolServerSideTranslatorPB(this, - protocolMessageMetrics)); - - final InetSocketAddress scmBlockAddress = HddsServerUtil - .getScmBlockClientBindAddress(conf); - blockRpcServer = - startRpcServer( - conf, - scmBlockAddress, - ScmBlockLocationProtocolPB.class, - blockProtoPbService, - handlerCount); - blockRpcAddress = - updateRPCListenAddress( - conf, OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, scmBlockAddress, - blockRpcServer); - if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, - false)) { - blockRpcServer.refreshServiceAcl(conf, SCMPolicyProvider.getInstance()); - } - } - - public RPC.Server getBlockRpcServer() { - return blockRpcServer; - } - - public InetSocketAddress getBlockRpcAddress() { - return blockRpcAddress; - } - - public void start() { - protocolMessageMetrics.register(); - LOG.info( - StorageContainerManager.buildRpcServerStartMessage( - "RPC server for Block Protocol", getBlockRpcAddress())); - getBlockRpcServer().start(); - } - - public void stop() { - try { - protocolMessageMetrics.unregister(); - LOG.info("Stopping the RPC server for Block Protocol"); - getBlockRpcServer().stop(); - } catch (Exception ex) { - LOG.error("Block Protocol RPC stop failed.", ex); - } - IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager()); - } - - public void join() throws InterruptedException { - LOG.trace("Join RPC server for Block Protocol"); - getBlockRpcServer().join(); - } - - @Override - public List allocateBlock(long size, int num, - HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, - String owner, ExcludeList excludeList) throws IOException { - Map auditMap = Maps.newHashMap(); - auditMap.put("size", String.valueOf(size)); - auditMap.put("type", type.name()); - auditMap.put("factor", factor.name()); - auditMap.put("owner", owner); - List blocks = new ArrayList<>(num); - boolean auditSuccess = true; - try { - for (int i = 0; i < num; i++) { - AllocatedBlock block = scm.getScmBlockManager() - .allocateBlock(size, type, factor, owner, excludeList); - if (block != null) { - blocks.add(block); - } - } - return blocks; - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logWriteFailure( - buildAuditMessageForFailure(SCMAction.ALLOCATE_BLOCK, auditMap, ex) - ); - throw ex; - } finally { - if(auditSuccess) { - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(SCMAction.ALLOCATE_BLOCK, auditMap) - ); - } - } - } - - /** - * Delete blocks for a set of object keys. - * - * @param keyBlocksInfoList list of block keys with object keys to delete. - * @return deletion results. - */ - @Override - public List deleteKeyBlocks( - List keyBlocksInfoList) throws IOException { - LOG.info("SCM is informed by OM to delete {} blocks", keyBlocksInfoList - .size()); - List results = new ArrayList<>(); - Map auditMap = Maps.newHashMap(); - for (BlockGroup keyBlocks : keyBlocksInfoList) { - ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result resultCode; - try { - // We delete blocks in an atomic operation to prevent getting - // into state like only a partial of blocks are deleted, - // which will leave key in an inconsistent state. - auditMap.put("keyBlockToDelete", keyBlocks.toString()); - scm.getScmBlockManager().deleteBlocks(keyBlocks.getBlockIDList()); - resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult - .Result.success; - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(SCMAction.DELETE_KEY_BLOCK, auditMap) - ); - } catch (SCMException scmEx) { - LOG.warn("Fail to delete block: {}", keyBlocks.getGroupID(), scmEx); - AUDIT.logWriteFailure( - buildAuditMessageForFailure(SCMAction.DELETE_KEY_BLOCK, auditMap, - scmEx) - ); - switch (scmEx.getResult()) { - case SAFE_MODE_EXCEPTION: - resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult - .Result.safeMode; - break; - case FAILED_TO_FIND_BLOCK: - resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult - .Result.errorNotFound; - break; - default: - resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult - .Result.unknownFailure; - } - } catch (IOException ex) { - LOG.warn("Fail to delete blocks for object key: {}", keyBlocks - .getGroupID(), ex); - AUDIT.logWriteFailure( - buildAuditMessageForFailure(SCMAction.DELETE_KEY_BLOCK, auditMap, - ex) - ); - resultCode = ScmBlockLocationProtocolProtos.DeleteScmBlockResult - .Result.unknownFailure; - } - List blockResultList = new ArrayList<>(); - for (BlockID blockKey : keyBlocks.getBlockIDList()) { - blockResultList.add(new DeleteBlockResult(blockKey, resultCode)); - } - results.add(new DeleteBlockGroupResult(keyBlocks.getGroupID(), - blockResultList)); - } - return results; - } - - @Override - public ScmInfo getScmInfo() throws IOException { - boolean auditSuccess = true; - try{ - ScmInfo.Builder builder = - new ScmInfo.Builder() - .setClusterId(scm.getScmStorageConfig().getClusterID()) - .setScmId(scm.getScmStorageConfig().getScmId()); - return builder.build(); - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logReadFailure( - buildAuditMessageForFailure(SCMAction.GET_SCM_INFO, null, ex) - ); - throw ex; - } finally { - if(auditSuccess) { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.GET_SCM_INFO, null) - ); - } - } - } - - @Override - public List sortDatanodes(List nodes, - String clientMachine) throws IOException { - boolean auditSuccess = true; - try{ - NodeManager nodeManager = scm.getScmNodeManager(); - Node client = null; - List possibleClients = - nodeManager.getNodesByAddress(clientMachine); - if (possibleClients.size()>0){ - client = possibleClients.get(0); - } - List nodeList = new ArrayList(); - nodes.stream().forEach(uuid -> { - DatanodeDetails node = nodeManager.getNodeByUuid(uuid); - if (node != null) { - nodeList.add(node); - } - }); - List sortedNodeList = scm.getClusterMap() - .sortByDistanceCost(client, nodeList, nodes.size()); - List ret = new ArrayList<>(); - sortedNodeList.stream().forEach(node -> ret.add((DatanodeDetails)node)); - return ret; - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logReadFailure( - buildAuditMessageForFailure(SCMAction.SORT_DATANODE, null, ex) - ); - throw ex; - } finally { - if(auditSuccess) { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.SORT_DATANODE, null) - ); - } - } - } - - @Override - public AuditMessage buildAuditMessageForSuccess( - AuditAction op, Map auditMap) { - return new AuditMessage.Builder() - .setUser((Server.getRemoteUser() == null) ? null : - Server.getRemoteUser().getUserName()) - .atIp((Server.getRemoteIp() == null) ? null : - Server.getRemoteIp().getHostAddress()) - .forOperation(op.getAction()) - .withParams(auditMap) - .withResult(AuditEventStatus.SUCCESS.toString()) - .withException(null) - .build(); - } - - @Override - public AuditMessage buildAuditMessageForFailure(AuditAction op, Map auditMap, Throwable throwable) { - return new AuditMessage.Builder() - .setUser((Server.getRemoteUser() == null) ? null : - Server.getRemoteUser().getUserName()) - .atIp((Server.getRemoteIp() == null) ? null : - Server.getRemoteIp().getHostAddress()) - .forOperation(op.getAction()) - .withParams(auditMap) - .withResult(AuditEventStatus.FAILURE.toString()) - .withException(throwable) - .build(); - } - - @Override - public void close() throws IOException { - stop(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java deleted file mode 100644 index b23d938e1de..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMCertStore.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.server; - -import java.io.IOException; -import java.math.BigInteger; -import java.security.cert.X509Certificate; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A Certificate Store class that persists certificates issued by SCM CA. - */ -public class SCMCertStore implements CertificateStore { - private static final Logger LOG = - LoggerFactory.getLogger(SCMCertStore.class); - private final SCMMetadataStore scmMetadataStore; - private final Lock lock; - - public SCMCertStore(SCMMetadataStore dbStore) { - this.scmMetadataStore = dbStore; - lock = new ReentrantLock(); - - } - - @Override - public void storeValidCertificate(BigInteger serialID, - X509Certificate certificate) - throws IOException { - lock.lock(); - try { - // This makes sure that no certificate IDs are reusable. - if ((getCertificateByID(serialID, CertType.VALID_CERTS) == null) && - (getCertificateByID(serialID, CertType.REVOKED_CERTS) == null)) { - scmMetadataStore.getValidCertsTable().put(serialID, certificate); - } else { - throw new SCMSecurityException("Conflicting certificate ID"); - } - } finally { - lock.unlock(); - } - } - - @Override - public void revokeCertificate(BigInteger serialID) throws IOException { - lock.lock(); - try { - X509Certificate cert = getCertificateByID(serialID, CertType.VALID_CERTS); - if (cert == null) { - LOG.error("trying to revoke a certificate that is not valid. Serial: " + - "{}", serialID.toString()); - throw new SCMSecurityException("Trying to revoke an invalid " + - "certificate."); - } - // TODO : Check if we are trying to revoke an expired certificate. - - if (getCertificateByID(serialID, CertType.REVOKED_CERTS) != null) { - LOG.error("Trying to revoke a certificate that is already revoked."); - throw new SCMSecurityException("Trying to revoke an already revoked " + - "certificate."); - } - - // let is do this in a transaction. - try (BatchOperation batch = - scmMetadataStore.getStore().initBatchOperation();) { - scmMetadataStore.getRevokedCertsTable() - .putWithBatch(batch, serialID, cert); - scmMetadataStore.getValidCertsTable().deleteWithBatch(batch, serialID); - scmMetadataStore.getStore().commitBatchOperation(batch); - } - } finally { - lock.unlock(); - } - } - - @Override - public void removeExpiredCertificate(BigInteger serialID) - throws IOException { - // TODO: Later this allows removal of expired certificates from the system. - } - - @Override - public X509Certificate getCertificateByID(BigInteger serialID, - CertType certType) - throws IOException { - if (certType == CertType.VALID_CERTS) { - return scmMetadataStore.getValidCertsTable().get(serialID); - } else { - return scmMetadataStore.getRevokedCertsTable().get(serialID); - } - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java deleted file mode 100644 index 9c27f6a64d6..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ /dev/null @@ -1,610 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license - * agreements. See the NOTICE file distributed with this work for additional - * information regarding - * copyright ownership. The ASF licenses this file to you under the Apache - * License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a - * copy of the License at - * - *

http://www.apache.org/licenses/LICENSE-2.0 - * - *

Unless required by applicable law or agreed to in writing, software - * distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Maps; -import com.google.protobuf.BlockingService; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerLocationProtocolProtos; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.ScmUtils; -import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; -import org.apache.hadoop.hdds.scm.safemode.SafeModePrecheck; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; -import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.ozone.audit.AuditAction; -import org.apache.hadoop.ozone.audit.AuditEventStatus; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditLoggerType; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.audit.Auditor; -import org.apache.hadoop.ozone.audit.SCMAction; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocolServerSideTranslatorPB; -import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics; -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeSet; -import java.util.stream.Collectors; - -import static org.apache.hadoop.hdds.protocol.proto - .StorageContainerLocationProtocolProtos - .StorageContainerLocationProtocolService.newReflectiveBlockingService; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CLIENT_ADDRESS_KEY; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HANDLER_COUNT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HANDLER_COUNT_KEY; -import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress; -import static org.apache.hadoop.hdds.scm.server.StorageContainerManager - .startRpcServer; - -/** - * The RPC server that listens to requests from clients. - */ -public class SCMClientProtocolServer implements - StorageContainerLocationProtocol, Auditor { - private static final Logger LOG = - LoggerFactory.getLogger(SCMClientProtocolServer.class); - private static final AuditLogger AUDIT = - new AuditLogger(AuditLoggerType.SCMLOGGER); - private final RPC.Server clientRpcServer; - private final InetSocketAddress clientRpcAddress; - private final StorageContainerManager scm; - private final OzoneConfiguration conf; - private SafeModePrecheck safeModePrecheck; - private final ProtocolMessageMetrics protocolMetrics; - - public SCMClientProtocolServer(OzoneConfiguration conf, - StorageContainerManager scm) throws IOException { - this.scm = scm; - this.conf = conf; - safeModePrecheck = new SafeModePrecheck(conf); - final int handlerCount = - conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY, - OZONE_SCM_HANDLER_COUNT_DEFAULT); - RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class, - ProtobufRpcEngine.class); - - protocolMetrics = ProtocolMessageMetrics - .create("ScmContainerLocationProtocol", - "SCM ContainerLocation protocol metrics", - StorageContainerLocationProtocolProtos.Type.values()); - - // SCM Container Service RPC - BlockingService storageProtoPbService = - newReflectiveBlockingService( - new StorageContainerLocationProtocolServerSideTranslatorPB(this, - protocolMetrics)); - - final InetSocketAddress scmAddress = HddsServerUtil - .getScmClientBindAddress(conf); - clientRpcServer = - startRpcServer( - conf, - scmAddress, - StorageContainerLocationProtocolPB.class, - storageProtoPbService, - handlerCount); - clientRpcAddress = - updateRPCListenAddress(conf, OZONE_SCM_CLIENT_ADDRESS_KEY, - scmAddress, clientRpcServer); - if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, - false)) { - clientRpcServer.refreshServiceAcl(conf, SCMPolicyProvider.getInstance()); - } - } - - public RPC.Server getClientRpcServer() { - return clientRpcServer; - } - - public InetSocketAddress getClientRpcAddress() { - return clientRpcAddress; - } - - public void start() { - protocolMetrics.register(); - LOG.info( - StorageContainerManager.buildRpcServerStartMessage( - "RPC server for Client ", getClientRpcAddress())); - getClientRpcServer().start(); - } - - public void stop() { - protocolMetrics.unregister(); - try { - LOG.info("Stopping the RPC server for Client Protocol"); - getClientRpcServer().stop(); - } catch (Exception ex) { - LOG.error("Client Protocol RPC stop failed.", ex); - } - IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager()); - } - - public void join() throws InterruptedException { - LOG.trace("Join RPC server for Client Protocol"); - getClientRpcServer().join(); - } - - @VisibleForTesting - public String getRpcRemoteUsername() { - UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser(); - return user == null ? null : user.getUserName(); - } - - @Override - public ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType - replicationType, HddsProtos.ReplicationFactor factor, - String owner) throws IOException { - ScmUtils.preCheck(ScmOps.allocateContainer, safeModePrecheck); - getScm().checkAdminAccess(getRpcRemoteUsername()); - - final ContainerInfo container = scm.getContainerManager() - .allocateContainer(replicationType, factor, owner); - final Pipeline pipeline = scm.getPipelineManager() - .getPipeline(container.getPipelineID()); - return new ContainerWithPipeline(container, pipeline); - } - - @Override - public ContainerInfo getContainer(long containerID) throws IOException { - String remoteUser = getRpcRemoteUsername(); - boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); - auditMap.put("containerID", String.valueOf(containerID)); - getScm().checkAdminAccess(remoteUser); - try { - return scm.getContainerManager() - .getContainer(ContainerID.valueof(containerID)); - } catch (IOException ex) { - auditSuccess = false; - AUDIT.logReadFailure( - buildAuditMessageForFailure(SCMAction.GET_CONTAINER, auditMap, ex) - ); - throw ex; - } finally { - if(auditSuccess) { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.GET_CONTAINER, auditMap) - ); - } - } - - } - - @Override - public ContainerWithPipeline getContainerWithPipeline(long containerID) - throws IOException { - final ContainerID cid = ContainerID.valueof(containerID); - try { - final ContainerInfo container = scm.getContainerManager() - .getContainer(cid); - - if (safeModePrecheck.isInSafeMode()) { - if (container.isOpen()) { - if (!hasRequiredReplicas(container)) { - throw new SCMException("Open container " + containerID + " doesn't" - + " have enough replicas to service this operation in " - + "Safe mode.", ResultCodes.SAFE_MODE_EXCEPTION); - } - } - } - getScm().checkAdminAccess(null); - - Pipeline pipeline; - try { - pipeline = container.isOpen() ? scm.getPipelineManager() - .getPipeline(container.getPipelineID()) : null; - } catch (PipelineNotFoundException ex) { - // The pipeline is destroyed. - pipeline = null; - } - - if (pipeline == null) { - pipeline = scm.getPipelineManager().createPipeline( - HddsProtos.ReplicationType.STAND_ALONE, - container.getReplicationFactor(), - scm.getContainerManager() - .getContainerReplicas(cid).stream() - .map(ContainerReplica::getDatanodeDetails) - .collect(Collectors.toList())); - } - - AUDIT.logReadSuccess(buildAuditMessageForSuccess( - SCMAction.GET_CONTAINER_WITH_PIPELINE, - Collections.singletonMap("containerID", cid.toString()))); - - return new ContainerWithPipeline(container, pipeline); - } catch (IOException ex) { - AUDIT.logReadFailure(buildAuditMessageForFailure( - SCMAction.GET_CONTAINER_WITH_PIPELINE, - Collections.singletonMap("containerID", cid.toString()), ex)); - throw ex; - } - } - - /** - * Check if container reported replicas are equal or greater than required - * replication factor. - */ - private boolean hasRequiredReplicas(ContainerInfo contInfo) { - try{ - return getScm().getContainerManager() - .getContainerReplicas(contInfo.containerID()) - .size() >= contInfo.getReplicationFactor().getNumber(); - } catch (ContainerNotFoundException ex) { - // getContainerReplicas throws exception if no replica's exist for given - // container. - return false; - } - } - - @Override - public List listContainer(long startContainerID, - int count) throws IOException { - boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); - auditMap.put("startContainerID", String.valueOf(startContainerID)); - auditMap.put("count", String.valueOf(count)); - try { - // To allow startcontainerId to take the value "0", - // "null" is assigned, so that its handled in the - // scm.getContainerManager().listContainer method - final ContainerID containerId = startContainerID != 0 ? ContainerID - .valueof(startContainerID) : null; - return scm.getContainerManager(). - listContainer(containerId, count); - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logReadFailure( - buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex)); - throw ex; - } finally { - if(auditSuccess) { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap)); - } - } - - } - - @Override - public void deleteContainer(long containerID) throws IOException { - String remoteUser = getRpcRemoteUsername(); - boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); - auditMap.put("containerID", String.valueOf(containerID)); - auditMap.put("remoteUser", remoteUser); - try { - getScm().checkAdminAccess(remoteUser); - scm.getContainerManager().deleteContainer( - ContainerID.valueof(containerID)); - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logWriteFailure( - buildAuditMessageForFailure(SCMAction.DELETE_CONTAINER, auditMap, ex) - ); - throw ex; - } finally { - if(auditSuccess) { - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(SCMAction.DELETE_CONTAINER, auditMap) - ); - } - } - } - - @Override - public List queryNode(HddsProtos.NodeState state, - HddsProtos.QueryScope queryScope, String poolName) throws - IOException { - - if (queryScope == HddsProtos.QueryScope.POOL) { - throw new IllegalArgumentException("Not Supported yet"); - } - - List result = new ArrayList<>(); - queryNode(state).forEach(node -> result.add(HddsProtos.Node.newBuilder() - .setNodeID(node.getProtoBufMessage()) - .addNodeStates(state) - .build())); - - return result; - - } - - @Override - public void notifyObjectStageChange(StorageContainerLocationProtocolProtos - .ObjectStageChangeRequestProto.Type type, long id, - StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto.Op - op, StorageContainerLocationProtocolProtos - .ObjectStageChangeRequestProto.Stage stage) throws IOException { - - LOG.info("Object type {} id {} op {} new stage {}", type, id, op, - stage); - if (type == StorageContainerLocationProtocolProtos - .ObjectStageChangeRequestProto.Type.container) { - if (op == StorageContainerLocationProtocolProtos - .ObjectStageChangeRequestProto.Op.close) { - if (stage == StorageContainerLocationProtocolProtos - .ObjectStageChangeRequestProto.Stage.begin) { - scm.getContainerManager() - .updateContainerState(ContainerID.valueof(id), - HddsProtos.LifeCycleEvent.FINALIZE); - } else { - scm.getContainerManager() - .updateContainerState(ContainerID.valueof(id), - HddsProtos.LifeCycleEvent.CLOSE); - } - } - } // else if (type == ObjectStageChangeRequestProto.Type.pipeline) { - // TODO: pipeline state update will be addressed in future patch. - // } - - } - - @Override - public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool) - throws IOException { - // TODO: will be addressed in future patch. - // This is needed only for debugging purposes to make sure cluster is - // working correctly. - return null; - } - - @Override - public List listPipelines() { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.LIST_PIPELINE, null)); - return scm.getPipelineManager().getPipelines(); - } - - @Override - public void activatePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - AUDIT.logReadSuccess(buildAuditMessageForSuccess( - SCMAction.ACTIVATE_PIPELINE, null)); - scm.getPipelineManager().activatePipeline( - PipelineID.getFromProtobuf(pipelineID)); - } - - @Override - public void deactivatePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - AUDIT.logReadSuccess(buildAuditMessageForSuccess( - SCMAction.DEACTIVATE_PIPELINE, null)); - scm.getPipelineManager().deactivatePipeline( - PipelineID.getFromProtobuf(pipelineID)); - } - - @Override - public void closePipeline(HddsProtos.PipelineID pipelineID) - throws IOException { - Map auditMap = Maps.newHashMap(); - auditMap.put("pipelineID", pipelineID.getId()); - PipelineManager pipelineManager = scm.getPipelineManager(); - Pipeline pipeline = - pipelineManager.getPipeline(PipelineID.getFromProtobuf(pipelineID)); - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(SCMAction.CLOSE_PIPELINE, null) - ); - } - - @Override - public ScmInfo getScmInfo() throws IOException { - boolean auditSuccess = true; - try{ - ScmInfo.Builder builder = - new ScmInfo.Builder() - .setClusterId(scm.getScmStorageConfig().getClusterID()) - .setScmId(scm.getScmStorageConfig().getScmId()); - return builder.build(); - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logReadFailure( - buildAuditMessageForFailure(SCMAction.GET_SCM_INFO, null, ex) - ); - throw ex; - } finally { - if(auditSuccess) { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.GET_SCM_INFO, null) - ); - } - } - } - - /** - * Check if SCM is in safe mode. - * - * @return Returns true if SCM is in safe mode else returns false. - * @throws IOException - */ - @Override - public boolean inSafeMode() throws IOException { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.IN_SAFE_MODE, null) - ); - return scm.isInSafeMode(); - } - - /** - * Force SCM out of Safe mode. - * - * @return returns true if operation is successful. - * @throws IOException - */ - @Override - public boolean forceExitSafeMode() throws IOException { - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(SCMAction.FORCE_EXIT_SAFE_MODE, null) - ); - return scm.exitSafeMode(); - } - - @Override - public void startReplicationManager() { - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - SCMAction.START_REPLICATION_MANAGER, null)); - scm.getReplicationManager().start(); - } - - @Override - public void stopReplicationManager() { - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - SCMAction.STOP_REPLICATION_MANAGER, null)); - scm.getReplicationManager().stop(); - } - - @Override - public boolean getReplicationManagerStatus() { - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - SCMAction.GET_REPLICATION_MANAGER_STATUS, null)); - return scm.getReplicationManager().isRunning(); - } - - /** - * Queries a list of Node that match a set of statuses. - * - *

For example, if the nodeStatuses is HEALTHY and RAFT_MEMBER, then - * this call will return all - * healthy nodes which members in Raft pipeline. - * - *

Right now we don't support operations, so we assume it is an AND - * operation between the - * operators. - * - * @param state - NodeStates. - * @return List of Datanodes. - */ - public List queryNode(HddsProtos.NodeState state) { - Preconditions.checkNotNull(state, "Node Query set cannot be null"); - return new ArrayList<>(queryNodeState(state)); - } - - @VisibleForTesting - public StorageContainerManager getScm() { - return scm; - } - - /** - * Set safe mode status based on . - */ - public boolean getSafeModeStatus() { - return safeModePrecheck.isInSafeMode(); - } - - - /** - * Query the System for Nodes. - * - * @param nodeState - NodeState that we are interested in matching. - * @return Set of Datanodes that match the NodeState. - */ - private Set queryNodeState(HddsProtos.NodeState nodeState) { - Set returnSet = new TreeSet<>(); - List tmp = scm.getScmNodeManager().getNodes(nodeState); - if ((tmp != null) && (tmp.size() > 0)) { - returnSet.addAll(tmp); - } - return returnSet; - } - - @Override - public AuditMessage buildAuditMessageForSuccess( - AuditAction op, Map auditMap) { - return new AuditMessage.Builder() - .setUser((Server.getRemoteUser() == null) ? null : - Server.getRemoteUser().getUserName()) - .atIp((Server.getRemoteIp() == null) ? null : - Server.getRemoteIp().getHostAddress()) - .forOperation(op.getAction()) - .withParams(auditMap) - .withResult(AuditEventStatus.SUCCESS.toString()) - .withException(null) - .build(); - } - - @Override - public AuditMessage buildAuditMessageForFailure(AuditAction op, Map auditMap, Throwable throwable) { - return new AuditMessage.Builder() - .setUser((Server.getRemoteUser() == null) ? null : - Server.getRemoteUser().getUserName()) - .atIp((Server.getRemoteIp() == null) ? null : - Server.getRemoteIp().getHostAddress()) - .forOperation(op.getAction()) - .withParams(auditMap) - .withResult(AuditEventStatus.FAILURE.toString()) - .withException(throwable) - .build(); - } - - @Override - public void close() throws IOException { - stop(); - } - - /** - * Set SafeMode status. - * - * @param safeModeStatus - */ - public void setSafeModeStatus(boolean safeModeStatus) { - safeModePrecheck.setInSafeMode(safeModeStatus); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java deleted file mode 100644 index 9bbabd11ee0..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.server; - - -import org.apache.hadoop.hdds.scm.block.BlockManager; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ReplicationManager; -import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.security.x509.certificate.authority - .CertificateServer; - -/** - * This class acts as an SCM builder Class. This class is important for us - * from a resilience perspective of SCM. This class will allow us swap out - * different managers and replace with out on manager in the testing phase. - *

- * At some point in the future, we will make all these managers dynamically - * loadable, so other developers can extend SCM by replacing various managers. - *

- * TODO: Add different config keys, so that we can load different managers at - * run time. This will make it easy to extend SCM without having to replace - * whole SCM each time. - *

- * Different Managers supported by this builder are: - * NodeManager scmNodeManager; - * PipelineManager pipelineManager; - * ContainerManager containerManager; - * BlockManager scmBlockManager; - * ReplicationManager replicationManager; - * SCMSafeModeManager scmSafeModeManager; - * CertificateServer certificateServer; - * SCMMetadata scmMetadataStore. - * - * If any of these are *not* specified then the default version of these - * managers are used by SCM. - * - */ -public final class SCMConfigurator { - private NodeManager scmNodeManager; - private PipelineManager pipelineManager; - private ContainerManager containerManager; - private BlockManager scmBlockManager; - private ReplicationManager replicationManager; - private SCMSafeModeManager scmSafeModeManager; - private CertificateServer certificateServer; - private SCMMetadataStore metadataStore; - private NetworkTopology networkTopology; - - /** - * Allows user to specify a version of Node manager to use with this SCM. - * @param scmNodeManager - Node Manager. - */ - public void setScmNodeManager(NodeManager scmNodeManager) { - this.scmNodeManager = scmNodeManager; - } - - /** - * Allows user to specify a custom version of PipelineManager to use with - * this SCM. - * @param pipelineManager - Pipeline Manager. - */ - public void setPipelineManager(PipelineManager pipelineManager) { - this.pipelineManager = pipelineManager; - } - - /** - * Allows user to specify a custom version of containerManager to use with - * this SCM. - * @param containerManager - Container Manager. - */ - public void setContainerManager(ContainerManager containerManager) { - this.containerManager = containerManager; - } - - /** - * Allows user to specify a custom version of Block Manager to use with - * this SCM. - * @param scmBlockManager - Block Manager - */ - public void setScmBlockManager(BlockManager scmBlockManager) { - this.scmBlockManager = scmBlockManager; - } - - /** - * Allows user to specify a custom version of Replication Manager to use - * with this SCM. - * @param replicationManager - replication Manager. - */ - public void setReplicationManager(ReplicationManager replicationManager) { - this.replicationManager = replicationManager; - } - - /** - * Allows user to specify a custom version of Safe Mode Manager to use - * with this SCM. - * @param scmSafeModeManager - SafeMode Manager. - */ - public void setScmSafeModeManager(SCMSafeModeManager scmSafeModeManager) { - this.scmSafeModeManager = scmSafeModeManager; - } - - /** - * Allows user to specify a custom version of Certificate Server to use - * with this SCM. - * @param certificateAuthority - Certificate server. - */ - public void setCertificateServer(CertificateServer certificateAuthority) { - this.certificateServer = certificateAuthority; - } - - /** - * Allows user to specify a custom version of Metadata Store to be used - * with this SCM. - * @param scmMetadataStore - scm metadata store. - */ - public void setMetadataStore(SCMMetadataStore scmMetadataStore) { - this.metadataStore = scmMetadataStore; - } - - /** - * Allows user to specify a custom version of Network Topology Cluster - * to be used with this SCM. - * @param networkTopology - network topology cluster. - */ - public void setNetworkTopology(NetworkTopology networkTopology) { - this.networkTopology = networkTopology; - } - - /** - * Gets SCM Node Manager. - * @return Node Manager. - */ - public NodeManager getScmNodeManager() { - return scmNodeManager; - } - - /** - * Get Pipeline Manager. - * @return pipeline manager. - */ - public PipelineManager getPipelineManager() { - return pipelineManager; - } - - /** - * Get Container Manager. - * @return container Manger. - */ - public ContainerManager getContainerManager() { - return containerManager; - } - - /** - * Get SCM Block Manager. - * @return Block Manager. - */ - public BlockManager getScmBlockManager() { - return scmBlockManager; - } - - /** - * Get Replica Manager. - * @return Replica Manager. - */ - public ReplicationManager getReplicationManager() { - return replicationManager; - } - - /** - * Gets Safe Mode Manager. - * @return Safe Mode manager. - */ - public SCMSafeModeManager getScmSafeModeManager() { - return scmSafeModeManager; - } - - /** - * Get Certificate Manager. - * @return Certificate Manager. - */ - public CertificateServer getCertificateServer() { - return certificateServer; - } - - /** - * Get Metadata Store. - * @return SCMMetadataStore. - */ - public SCMMetadataStore getMetadataStore() { - return metadataStore; - } - - /** - * Get network topology cluster tree. - * @return NetworkTopology. - */ - public NetworkTopology getNetworkTopology() { - return networkTopology; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java deleted file mode 100644 index 5e8e13706ad..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETED; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETING; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.QUASI_CLOSED; - -import java.util.Map; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsSource; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.Interns; - -/** - * Metrics source to report number of containers in different states. - */ -@InterfaceAudience.Private -@Metrics(about = "SCM Container Manager Metrics", context = "ozone") -public class SCMContainerMetrics implements MetricsSource { - - private final SCMMXBean scmmxBean; - private static final String SOURCE = - SCMContainerMetrics.class.getSimpleName(); - - public SCMContainerMetrics(SCMMXBean scmmxBean) { - this.scmmxBean = scmmxBean; - } - - public static SCMContainerMetrics create(SCMMXBean scmmxBean) { - MetricsSystem ms = DefaultMetricsSystem.instance(); - return ms.register(SOURCE, "Storage " + - "Container Manager Metrics", new SCMContainerMetrics(scmmxBean)); - } - - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE); - } - - @Override - @SuppressWarnings("SuspiciousMethodCalls") - public void getMetrics(MetricsCollector collector, boolean all) { - Map stateCount = scmmxBean.getContainerStateCount(); - - collector.addRecord(SOURCE) - .addGauge(Interns.info("OpenContainers", - "Number of open containers"), - stateCount.get(OPEN.toString())) - .addGauge(Interns.info("ClosingContainers", - "Number of containers in closing state"), - stateCount.get(CLOSING.toString())) - .addGauge(Interns.info("QuasiClosedContainers", - "Number of containers in quasi closed state"), - stateCount.get(QUASI_CLOSED.toString())) - .addGauge(Interns.info("ClosedContainers", - "Number of containers in closed state"), - stateCount.get(CLOSED.toString())) - .addGauge(Interns.info("DeletingContainers", - "Number of containers in deleting state"), - stateCount.get(DELETING.toString())) - .addGauge(Interns.info("DeletedContainers", - "Number of containers in deleted state"), - stateCount.get(DELETED.toString())); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java deleted file mode 100644 index 9f6077b4f70..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java +++ /dev/null @@ -1,289 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineActionsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerActionsProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; - -import com.google.protobuf.GeneratedMessage; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.UUID; - -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_ACTIONS; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT; -import static org.apache.hadoop.hdds.scm.events.SCMEvents - .INCREMENTAL_CONTAINER_REPORT; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CMD_STATUS_REPORT; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_ACTIONS; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_REPORT; - -/** - * This class is responsible for dispatching heartbeat from datanode to - * appropriate EventHandler at SCM. - */ -public final class SCMDatanodeHeartbeatDispatcher { - - private static final Logger LOG = - LoggerFactory.getLogger(SCMDatanodeHeartbeatDispatcher.class); - - private final NodeManager nodeManager; - private final EventPublisher eventPublisher; - - - public SCMDatanodeHeartbeatDispatcher(NodeManager nodeManager, - EventPublisher eventPublisher) { - Preconditions.checkNotNull(nodeManager); - Preconditions.checkNotNull(eventPublisher); - this.nodeManager = nodeManager; - this.eventPublisher = eventPublisher; - } - - - /** - * Dispatches heartbeat to registered event handlers. - * - * @param heartbeat heartbeat to be dispatched. - * - * @return list of SCMCommand - */ - public List dispatch(SCMHeartbeatRequestProto heartbeat) { - DatanodeDetails datanodeDetails = - DatanodeDetails.getFromProtoBuf(heartbeat.getDatanodeDetails()); - List commands; - - // If node is not registered, ask the node to re-register. Do not process - // Heartbeat for unregistered nodes. - if (!nodeManager.isNodeRegistered(datanodeDetails)) { - LOG.info("SCM received heartbeat from an unregistered datanode {}. " + - "Asking datanode to re-register.", datanodeDetails); - UUID dnID = datanodeDetails.getUuid(); - nodeManager.addDatanodeCommand(dnID, new ReregisterCommand()); - - commands = nodeManager.getCommandQueue(dnID); - - } else { - - // should we dispatch heartbeat through eventPublisher? - commands = nodeManager.processHeartbeat(datanodeDetails); - if (heartbeat.hasNodeReport()) { - LOG.debug("Dispatching Node Report."); - eventPublisher.fireEvent( - NODE_REPORT, - new NodeReportFromDatanode( - datanodeDetails, - heartbeat.getNodeReport())); - } - - if (heartbeat.hasContainerReport()) { - LOG.debug("Dispatching Container Report."); - eventPublisher.fireEvent( - CONTAINER_REPORT, - new ContainerReportFromDatanode( - datanodeDetails, - heartbeat.getContainerReport())); - - } - - final List icrs = - heartbeat.getIncrementalContainerReportList(); - - if (icrs.size() > 0) { - LOG.debug("Dispatching ICRs."); - for (IncrementalContainerReportProto icr : icrs) { - eventPublisher.fireEvent(INCREMENTAL_CONTAINER_REPORT, - new IncrementalContainerReportFromDatanode( - datanodeDetails, icr)); - } - } - - if (heartbeat.hasContainerActions()) { - LOG.debug("Dispatching Container Actions."); - eventPublisher.fireEvent( - CONTAINER_ACTIONS, - new ContainerActionsFromDatanode( - datanodeDetails, - heartbeat.getContainerActions())); - } - - if (heartbeat.hasPipelineReports()) { - LOG.debug("Dispatching Pipeline Report."); - eventPublisher.fireEvent( - PIPELINE_REPORT, - new PipelineReportFromDatanode( - datanodeDetails, - heartbeat.getPipelineReports())); - - } - - if (heartbeat.hasPipelineActions()) { - LOG.debug("Dispatching Pipeline Actions."); - eventPublisher.fireEvent( - PIPELINE_ACTIONS, - new PipelineActionsFromDatanode( - datanodeDetails, - heartbeat.getPipelineActions())); - } - - if (heartbeat.getCommandStatusReportsCount() != 0) { - for (CommandStatusReportsProto commandStatusReport : heartbeat - .getCommandStatusReportsList()) { - eventPublisher.fireEvent( - CMD_STATUS_REPORT, - new CommandStatusReportFromDatanode( - datanodeDetails, - commandStatusReport)); - } - } - } - - return commands; - } - - /** - * Wrapper class for events with the datanode origin. - */ - public static class ReportFromDatanode { - - private final DatanodeDetails datanodeDetails; - - private final T report; - - public ReportFromDatanode(DatanodeDetails datanodeDetails, T report) { - this.datanodeDetails = datanodeDetails; - this.report = report; - } - - public DatanodeDetails getDatanodeDetails() { - return datanodeDetails; - } - - public T getReport() { - return report; - } - } - - /** - * Node report event payload with origin. - */ - public static class NodeReportFromDatanode - extends ReportFromDatanode { - - public NodeReportFromDatanode(DatanodeDetails datanodeDetails, - NodeReportProto report) { - super(datanodeDetails, report); - } - } - - /** - * Container report event payload with origin. - */ - public static class ContainerReportFromDatanode - extends ReportFromDatanode { - - public ContainerReportFromDatanode(DatanodeDetails datanodeDetails, - ContainerReportsProto report) { - super(datanodeDetails, report); - } - } - - /** - * Incremental Container report event payload with origin. - */ - public static class IncrementalContainerReportFromDatanode - extends ReportFromDatanode { - - public IncrementalContainerReportFromDatanode( - DatanodeDetails datanodeDetails, - IncrementalContainerReportProto report) { - super(datanodeDetails, report); - } - } - - /** - * Container action event payload with origin. - */ - public static class ContainerActionsFromDatanode - extends ReportFromDatanode { - - public ContainerActionsFromDatanode(DatanodeDetails datanodeDetails, - ContainerActionsProto actions) { - super(datanodeDetails, actions); - } - } - - /** - * Pipeline report event payload with origin. - */ - public static class PipelineReportFromDatanode - extends ReportFromDatanode { - - public PipelineReportFromDatanode(DatanodeDetails datanodeDetails, - PipelineReportsProto report) { - super(datanodeDetails, report); - } - } - - /** - * Pipeline action event payload with origin. - */ - public static class PipelineActionsFromDatanode - extends ReportFromDatanode { - - public PipelineActionsFromDatanode(DatanodeDetails datanodeDetails, - PipelineActionsProto actions) { - super(datanodeDetails, actions); - } - } - - /** - * Container report event payload with origin. - */ - public static class CommandStatusReportFromDatanode - extends ReportFromDatanode { - - public CommandStatusReportFromDatanode(DatanodeDetails datanodeDetails, - CommandStatusReportsProto report) { - super(datanodeDetails, report); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java deleted file mode 100644 index 530c0a6d238..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ /dev/null @@ -1,404 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license - * agreements. See the NOTICE file distributed with this work for additional - * information regarding - * copyright ownership. The ASF licenses this file to you under the Apache - * License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a - * copy of the License at - * - *

http://www.apache.org/licenses/LICENSE-2.0 - * - *

Unless required by applicable law or agreed to in writing, software - * distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReregisterCommandProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.ozone.audit.AuditAction; -import org.apache.hadoop.ozone.audit.AuditEventStatus; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditLoggerType; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.audit.Auditor; -import org.apache.hadoop.ozone.audit.SCMAction; -import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; -import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; -import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics; -import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; -import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Maps; -import com.google.protobuf.BlockingService; -import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.closeContainerCommand; -import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.deleteBlocksCommand; -import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.deleteContainerCommand; -import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.replicateContainerCommand; -import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.reregisterCommand; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_REPORT; -import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.startRpcServer; -import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Protocol Handler for Datanode Protocol. - */ -public class SCMDatanodeProtocolServer implements - StorageContainerDatanodeProtocol, Auditor { - - private static final Logger LOG = LoggerFactory.getLogger( - SCMDatanodeProtocolServer.class); - - private static final AuditLogger AUDIT = - new AuditLogger(AuditLoggerType.SCMLOGGER); - - /** - * The RPC server that listens to requests from DataNodes. - */ - private final RPC.Server datanodeRpcServer; - - private final StorageContainerManager scm; - private final InetSocketAddress datanodeRpcAddress; - private final SCMDatanodeHeartbeatDispatcher heartbeatDispatcher; - private final EventPublisher eventPublisher; - private final ProtocolMessageMetrics protocolMessageMetrics; - - public SCMDatanodeProtocolServer(final OzoneConfiguration conf, - StorageContainerManager scm, EventPublisher eventPublisher) - throws IOException { - - Preconditions.checkNotNull(scm, "SCM cannot be null"); - Preconditions.checkNotNull(eventPublisher, "EventPublisher cannot be null"); - - this.scm = scm; - this.eventPublisher = eventPublisher; - final int handlerCount = - conf.getInt(OZONE_SCM_HANDLER_COUNT_KEY, - OZONE_SCM_HANDLER_COUNT_DEFAULT); - - heartbeatDispatcher = new SCMDatanodeHeartbeatDispatcher( - scm.getScmNodeManager(), eventPublisher); - - RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class, - ProtobufRpcEngine.class); - - protocolMessageMetrics = ProtocolMessageMetrics - .create("SCMDatanodeProtocol", "SCM Datanode protocol", - StorageContainerDatanodeProtocolProtos.Type.values()); - - BlockingService dnProtoPbService = - StorageContainerDatanodeProtocolProtos - .StorageContainerDatanodeProtocolService - .newReflectiveBlockingService( - new StorageContainerDatanodeProtocolServerSideTranslatorPB( - this, protocolMessageMetrics)); - - InetSocketAddress datanodeRpcAddr = - HddsServerUtil.getScmDataNodeBindAddress(conf); - - datanodeRpcServer = - startRpcServer( - conf, - datanodeRpcAddr, - StorageContainerDatanodeProtocolPB.class, - dnProtoPbService, - handlerCount); - - datanodeRpcAddress = - updateRPCListenAddress( - conf, OZONE_SCM_DATANODE_ADDRESS_KEY, datanodeRpcAddr, - datanodeRpcServer); - - if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, - false)) { - datanodeRpcServer.refreshServiceAcl(conf, - SCMPolicyProvider.getInstance()); - } - } - - public void start() { - LOG.info( - StorageContainerManager.buildRpcServerStartMessage( - "RPC server for DataNodes", datanodeRpcAddress)); - protocolMessageMetrics.register(); - datanodeRpcServer.start(); - } - - public InetSocketAddress getDatanodeRpcAddress() { - return datanodeRpcAddress; - } - - @Override - public SCMVersionResponseProto getVersion(SCMVersionRequestProto - versionRequest) - throws IOException { - boolean auditSuccess = true; - try { - return scm.getScmNodeManager().getVersion(versionRequest) - .getProtobufMessage(); - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logReadFailure( - buildAuditMessageForFailure(SCMAction.GET_VERSION, null, ex)); - throw ex; - } finally { - if(auditSuccess) { - AUDIT.logReadSuccess( - buildAuditMessageForSuccess(SCMAction.GET_VERSION, null)); - } - } - } - - @Override - public SCMRegisteredResponseProto register( - HddsProtos.DatanodeDetailsProto datanodeDetailsProto, - NodeReportProto nodeReport, - ContainerReportsProto containerReportsProto, - PipelineReportsProto pipelineReportsProto) - throws IOException { - DatanodeDetails datanodeDetails = DatanodeDetails - .getFromProtoBuf(datanodeDetailsProto); - boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); - auditMap.put("datanodeDetails", datanodeDetails.toString()); - - // TODO : Return the list of Nodes that forms the SCM HA. - RegisteredCommand registeredCommand = scm.getScmNodeManager() - .register(datanodeDetails, nodeReport, pipelineReportsProto); - if (registeredCommand.getError() - == SCMRegisteredResponseProto.ErrorCode.success) { - eventPublisher.fireEvent(CONTAINER_REPORT, - new SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode( - datanodeDetails, containerReportsProto)); - eventPublisher.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - new NodeRegistrationContainerReport(datanodeDetails, - containerReportsProto)); - eventPublisher.fireEvent(PIPELINE_REPORT, - new PipelineReportFromDatanode(datanodeDetails, - pipelineReportsProto)); - } - try { - return getRegisteredResponse(registeredCommand); - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logWriteFailure( - buildAuditMessageForFailure(SCMAction.REGISTER, auditMap, ex)); - throw ex; - } finally { - if(auditSuccess) { - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(SCMAction.REGISTER, auditMap)); - } - } - } - - @VisibleForTesting - public static SCMRegisteredResponseProto getRegisteredResponse( - RegisteredCommand cmd) { - return cmd.getProtoBufMessage(); - } - - @Override - public SCMHeartbeatResponseProto sendHeartbeat( - SCMHeartbeatRequestProto heartbeat) throws IOException { - List cmdResponses = new ArrayList<>(); - for (SCMCommand cmd : heartbeatDispatcher.dispatch(heartbeat)) { - cmdResponses.add(getCommandResponse(cmd)); - } - boolean auditSuccess = true; - Map auditMap = Maps.newHashMap(); - auditMap.put("datanodeUUID", heartbeat.getDatanodeDetails().getUuid()); - auditMap.put("command", flatten(cmdResponses.toString())); - try { - return SCMHeartbeatResponseProto.newBuilder() - .setDatanodeUUID(heartbeat.getDatanodeDetails().getUuid()) - .addAllCommands(cmdResponses).build(); - } catch (Exception ex) { - auditSuccess = false; - AUDIT.logWriteFailure( - buildAuditMessageForFailure(SCMAction.SEND_HEARTBEAT, auditMap, ex) - ); - throw ex; - } finally { - if(auditSuccess) { - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(SCMAction.SEND_HEARTBEAT, auditMap) - ); - } - } - } - - /** - * Returns a SCMCommandRepose from the SCM Command. - * - * @param cmd - Cmd - * @return SCMCommandResponseProto - * @throws IOException - */ - @VisibleForTesting - public SCMCommandProto getCommandResponse(SCMCommand cmd) - throws IOException { - SCMCommandProto.Builder builder = - SCMCommandProto.newBuilder(); - switch (cmd.getType()) { - case reregisterCommand: - return builder - .setCommandType(reregisterCommand) - .setReregisterCommandProto(ReregisterCommandProto - .getDefaultInstance()) - .build(); - case deleteBlocksCommand: - // Once SCM sends out the deletion message, increment the count. - // this is done here instead of when SCM receives the ACK, because - // DN might not be able to response the ACK for sometime. In case - // it times out, SCM needs to re-send the message some more times. - List txs = - ((DeleteBlocksCommand) cmd) - .blocksTobeDeleted() - .stream() - .map(tx -> tx.getTxID()) - .collect(Collectors.toList()); - scm.getScmBlockManager().getDeletedBlockLog().incrementCount(txs); - return builder - .setCommandType(deleteBlocksCommand) - .setDeleteBlocksCommandProto(((DeleteBlocksCommand) cmd).getProto()) - .build(); - case closeContainerCommand: - return builder - .setCommandType(closeContainerCommand) - .setCloseContainerCommandProto( - ((CloseContainerCommand) cmd).getProto()) - .build(); - case deleteContainerCommand: - return builder.setCommandType(deleteContainerCommand) - .setDeleteContainerCommandProto( - ((DeleteContainerCommand) cmd).getProto()) - .build(); - case replicateContainerCommand: - return builder - .setCommandType(replicateContainerCommand) - .setReplicateContainerCommandProto( - ((ReplicateContainerCommand)cmd).getProto()) - .build(); - default: - throw new IllegalArgumentException("Scm command " + - cmd.getType().toString() + " is not implemented"); - } - } - - - public void join() throws InterruptedException { - LOG.trace("Join RPC server for DataNodes"); - datanodeRpcServer.join(); - } - - public void stop() { - try { - LOG.info("Stopping the RPC server for DataNodes"); - datanodeRpcServer.stop(); - } catch (Exception ex) { - LOG.error(" datanodeRpcServer stop failed.", ex); - } - IOUtils.cleanupWithLogger(LOG, scm.getScmNodeManager()); - protocolMessageMetrics.unregister(); - } - - @Override - public AuditMessage buildAuditMessageForSuccess( - AuditAction op, Map auditMap) { - return new AuditMessage.Builder() - .setUser((Server.getRemoteUser() == null) ? null : - Server.getRemoteUser().getUserName()) - .atIp((Server.getRemoteIp() == null) ? null : - Server.getRemoteIp().getHostAddress()) - .forOperation(op.getAction()) - .withParams(auditMap) - .withResult(AuditEventStatus.SUCCESS.toString()) - .withException(null) - .build(); - } - - @Override - public AuditMessage buildAuditMessageForFailure(AuditAction op, Map auditMap, Throwable throwable) { - return new AuditMessage.Builder() - .setUser((Server.getRemoteUser() == null) ? null : - Server.getRemoteUser().getUserName()) - .atIp((Server.getRemoteIp() == null) ? null : - Server.getRemoteIp().getHostAddress()) - .forOperation(op.getAction()) - .withParams(auditMap) - .withResult(AuditEventStatus.FAILURE.toString()) - .withException(throwable) - .build(); - } - - private static String flatten(String input) { - return input - .replaceAll(System.lineSeparator(), " ") - .trim() - .replaceAll(" +", " "); - } - - /** - * Wrapper class for events with the datanode origin. - */ - public static class NodeRegistrationContainerReport extends - ReportFromDatanode { - - public NodeRegistrationContainerReport(DatanodeDetails datanodeDetails, - ContainerReportsProto report) { - super(datanodeDetails, report); - } - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java deleted file mode 100644 index 13b55517225..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.server; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.server.ServiceRuntimeInfo; - -import java.util.Map; - -/** - * - * This is the JMX management interface for scm information. - */ -@InterfaceAudience.Private -public interface SCMMXBean extends ServiceRuntimeInfo { - - /** - * Get the SCM RPC server port that used to listen to datanode requests. - * @return SCM datanode RPC server port - */ - String getDatanodeRpcPort(); - - /** - * Get the SCM RPC server port that used to listen to client requests. - * @return SCM client RPC server port - */ - String getClientRpcPort(); - - /** - * Get container report info that includes container IO stats of nodes. - * @return The datanodeUUid to report json string mapping - */ - Map getContainerReport(); - - /** - * Returns safe mode status. - * @return boolean - */ - boolean isInSafeMode(); - - /** - * Returns live safe mode container threshold. - * @return String - */ - double getSafeModeCurrentContainerThreshold(); - - /** - * Returns the container count in all states. - */ - Map getContainerStateCount(); -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java deleted file mode 100644 index b21a7222ac7..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.server; - - -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.classification.InterfaceStability.Unstable; -import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; -import org.apache.hadoop.security.authorize.PolicyProvider; -import org.apache.hadoop.security.authorize.Service; - -import java.util.concurrent.atomic.AtomicReference; - -import static org.apache.hadoop.hdds.HddsConfigKeys.*; - -/** - * {@link PolicyProvider} for SCM protocols. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public final class SCMPolicyProvider extends PolicyProvider { - - private static AtomicReference atomicReference = - new AtomicReference<>(); - - private SCMPolicyProvider() { - } - - @Private - @Unstable - public static SCMPolicyProvider getInstance() { - if (atomicReference.get() == null) { - atomicReference.compareAndSet(null, new SCMPolicyProvider()); - } - return atomicReference.get(); - } - - private static final Service[] SCM_SERVICES = - new Service[]{ - new Service( - HDDS_SECURITY_CLIENT_DATANODE_CONTAINER_PROTOCOL_ACL, - StorageContainerDatanodeProtocol.class), - new Service( - HDDS_SECURITY_CLIENT_SCM_CONTAINER_PROTOCOL_ACL, - StorageContainerLocationProtocol.class), - new Service( - HDDS_SECURITY_CLIENT_SCM_BLOCK_PROTOCOL_ACL, - ScmBlockLocationProtocol.class), - new Service( - HDDS_SECURITY_CLIENT_SCM_CERTIFICATE_PROTOCOL_ACL, - SCMSecurityProtocol.class), - }; - - @SuppressFBWarnings("EI_EXPOSE_REP") - @Override - public Service[] getServices() { - return SCM_SERVICES; - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java deleted file mode 100644 index c4b4efd30e0..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java +++ /dev/null @@ -1,225 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import com.google.protobuf.BlockingService; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.util.Objects; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB; -import org.apache.hadoop.hdds.scm.protocol.SCMSecurityProtocolServerSideTranslatorPB; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics; -import org.apache.hadoop.security.KerberosInfo; - -import org.bouncycastle.cert.X509CertificateHolder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateApprover.ApprovalType.KERBEROS_TRUSTED; - -/** - * The protocol used to perform security related operations with SCM. - */ -@KerberosInfo( - serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) -@InterfaceAudience.Private -public class SCMSecurityProtocolServer implements SCMSecurityProtocol { - - private static final Logger LOGGER = LoggerFactory - .getLogger(SCMClientProtocolServer.class); - private final SecurityConfig config; - private final CertificateServer certificateServer; - private final RPC.Server rpcServer; - private final InetSocketAddress rpcAddress; - private final ProtocolMessageMetrics metrics; - - SCMSecurityProtocolServer(OzoneConfiguration conf, - CertificateServer certificateServer) throws IOException { - this.config = new SecurityConfig(conf); - this.certificateServer = certificateServer; - - final int handlerCount = - conf.getInt(ScmConfigKeys.OZONE_SCM_SECURITY_HANDLER_COUNT_KEY, - ScmConfigKeys.OZONE_SCM_SECURITY_HANDLER_COUNT_DEFAULT); - rpcAddress = HddsServerUtil - .getScmSecurityInetAddress(conf); - // SCM security service RPC service. - RPC.setProtocolEngine(conf, SCMSecurityProtocolPB.class, - ProtobufRpcEngine.class); - metrics = new ProtocolMessageMetrics("ScmSecurityProtocol", - "SCM Security protocol metrics", - SCMSecurityProtocolProtos.Type.values()); - BlockingService secureProtoPbService = - SCMSecurityProtocolProtos.SCMSecurityProtocolService - .newReflectiveBlockingService( - new SCMSecurityProtocolServerSideTranslatorPB(this, metrics)); - this.rpcServer = - StorageContainerManager.startRpcServer( - conf, - rpcAddress, - SCMSecurityProtocolPB.class, - secureProtoPbService, - handlerCount); - if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, - false)) { - rpcServer.refreshServiceAcl(conf, SCMPolicyProvider.getInstance()); - } - } - - /** - * Get SCM signed certificate for DataNode. - * - * @param dnDetails - DataNode Details. - * @param certSignReq - Certificate signing request. - * @return String - SCM signed pem encoded certificate. - */ - @Override - public String getDataNodeCertificate( - DatanodeDetailsProto dnDetails, - String certSignReq) throws IOException { - LOGGER.info("Processing CSR for dn {}, UUID: {}", dnDetails.getHostName(), - dnDetails.getUuid()); - Objects.requireNonNull(dnDetails); - Future future = - certificateServer.requestCertificate(certSignReq, - KERBEROS_TRUSTED); - - try { - return CertificateCodec.getPEMEncodedString(future.get()); - } catch (InterruptedException | ExecutionException e) { - LOGGER.error("getDataNodeCertificate operation failed. ", e); - throw new IOException("getDataNodeCertificate operation failed. ", e); - } - } - - /** - * Get SCM signed certificate for OM. - * - * @param omDetails - OzoneManager Details. - * @param certSignReq - Certificate signing request. - * @return String - SCM signed pem encoded certificate. - */ - @Override - public String getOMCertificate(OzoneManagerDetailsProto omDetails, - String certSignReq) throws IOException { - LOGGER.info("Processing CSR for om {}, UUID: {}", omDetails.getHostName(), - omDetails.getUuid()); - Objects.requireNonNull(omDetails); - Future future = - certificateServer.requestCertificate(certSignReq, - KERBEROS_TRUSTED); - - try { - return CertificateCodec.getPEMEncodedString(future.get()); - } catch (InterruptedException | ExecutionException e) { - LOGGER.error("getOMCertificate operation failed. ", e); - throw new IOException("getOMCertificate operation failed. ", e); - } - } - - /** - * Get SCM signed certificate with given serial id. - * - * @param certSerialId - Certificate serial id. - * @return string - pem encoded SCM signed certificate. - */ - @Override - public String getCertificate(String certSerialId) throws IOException { - LOGGER.debug("Getting certificate with certificate serial id", - certSerialId); - try { - X509Certificate certificate = - certificateServer.getCertificate(certSerialId); - if (certificate != null) { - return CertificateCodec.getPEMEncodedString(certificate); - } - } catch (CertificateException e) { - LOGGER.error("getCertificate operation failed. ", e); - throw new IOException("getCertificate operation failed. ", e); - } - LOGGER.debug("Certificate with serial id {} not found.", certSerialId); - throw new IOException("Certificate not found"); - } - - /** - * Get SCM signed certificate for OM. - * - * @return string - Root certificate. - */ - @Override - public String getCACertificate() throws IOException { - LOGGER.debug("Getting CA certificate."); - try { - return CertificateCodec.getPEMEncodedString( - certificateServer.getCACertificate()); - } catch (CertificateException e) { - LOGGER.error("getRootCertificate operation failed. ", e); - throw new IOException("getRootCertificate operation failed. ", e); - } - } - - public RPC.Server getRpcServer() { - return rpcServer; - } - - public InetSocketAddress getRpcAddress() { - return rpcAddress; - } - - public void start() { - LOGGER.info(StorageContainerManager.buildRpcServerStartMessage("Starting" - + " RPC server for SCMSecurityProtocolServer.", getRpcAddress())); - metrics.register(); - getRpcServer().start(); - } - - public void stop() { - try { - LOGGER.info("Stopping the SCMSecurityProtocolServer."); - metrics.unregister(); - getRpcServer().stop(); - } catch (Exception ex) { - LOGGER.error("SCMSecurityProtocolServer stop failed.", ex); - } - } - - public void join() throws InterruptedException { - LOGGER.trace("Join RPC server for SCMSecurityProtocolServer."); - getRpcServer().join(); - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java deleted file mode 100644 index 7d84fc017af..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStarterInterface.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license - * agreements. See the NOTICE file distributed with this work for additional - * information regarding - * copyright ownership. The ASF licenses this file to you under the Apache - * License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a - * copy of the License at - * - *

http://www.apache.org/licenses/LICENSE-2.0 - * - *

Unless required by applicable law or agreed to in writing, software - * distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import java.io.IOException; - -/** - * This interface is used by the StorageContainerManager to allow the - * dependencies to be injected to the CLI class. - */ -public interface SCMStarterInterface { - - void start(OzoneConfiguration conf) throws Exception; - boolean init(OzoneConfiguration conf, String clusterId) - throws IOException; - String generateClusterId(); -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java deleted file mode 100644 index 73f9cbe9be7..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.ozone.common.Storage; - -import java.io.IOException; -import java.util.Properties; -import java.util.UUID; - -import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID; -import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR; - -/** - * SCMStorageConfig is responsible for management of the - * StorageDirectories used by the SCM. - */ -public class SCMStorageConfig extends Storage { - - /** - * Construct SCMStorageConfig. - * @throws IOException if any directories are inaccessible. - */ - public SCMStorageConfig(OzoneConfiguration conf) throws IOException { - super(NodeType.SCM, ServerUtils.getScmDbDir(conf), STORAGE_DIR); - } - - public void setScmId(String scmId) throws IOException { - if (getState() == StorageState.INITIALIZED) { - throw new IOException("SCM is already initialized."); - } else { - getStorageInfo().setProperty(SCM_ID, scmId); - } - } - - /** - * Retrieves the SCM ID from the version file. - * @return SCM_ID - */ - public String getScmId() { - return getStorageInfo().getProperty(SCM_ID); - } - - @Override - protected Properties getNodeProperties() { - String scmId = getScmId(); - if (scmId == null) { - scmId = UUID.randomUUID().toString(); - } - Properties scmProperties = new Properties(); - scmProperties.setProperty(SCM_ID, scmId); - return scmProperties; - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java deleted file mode 100644 index af65e1365bf..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ /dev/null @@ -1,1103 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license - * agreements. See the NOTICE file distributed with this work for additional - * information regarding - * copyright ownership. The ASF licenses this file to you under the Apache - * License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a - * copy of the License at - * - *

http://www.apache.org/licenses/LICENSE-2.0 - * - *

Unless required by applicable law or agreed to in writing, software - * distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import com.google.protobuf.BlockingService; -import java.util.Objects; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.ratis.RatisHelper; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.block.BlockManager; -import org.apache.hadoop.hdds.scm.block.BlockManagerImpl; -import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl; -import org.apache.hadoop.hdds.scm.block.PendingDeleteHandler; -import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicyFactory; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementMetrics; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; -import org.apache.hadoop.hdds.scm.safemode.SafeModeHandler; -import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; -import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler; -import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler; -import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerReportHandler; -import org.apache.hadoop.hdds.scm.container.IncrementalContainerReportHandler; -import org.apache.hadoop.hdds.scm.container.SCMContainerManager; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics; -import org.apache.hadoop.hdds.scm.container.ReplicationManager; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; -import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; -import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreRDBImpl; -import org.apache.hadoop.hdds.scm.node.DeadNodeHandler; -import org.apache.hadoop.hdds.scm.node.NewNodeHandler; -import org.apache.hadoop.hdds.scm.node.NonHealthyToHealthyNodeHandler; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.NodeReportHandler; -import org.apache.hadoop.hdds.scm.node.SCMNodeManager; -import org.apache.hadoop.hdds.scm.node.StaleNodeHandler; -import org.apache.hadoop.hdds.scm.pipeline.PipelineActionHandler; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.PipelineReportHandler; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer; -import org.apache.hadoop.hdds.security.x509.certificate.authority.DefaultCAServer; -import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.ozone.common.Storage.StorageState; -import org.apache.hadoop.ozone.lease.LeaseManager; -import org.apache.hadoop.ozone.lock.LockManager; -import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.util.JvmPauseMonitor; -import org.apache.hadoop.hdds.utils.HddsVersionInfo; -import org.apache.ratis.grpc.GrpcTlsConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.ObjectName; -import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; - -/** - * StorageContainerManager is the main entry point for the service that - * provides information about - * which SCM nodes host containers. - * - *

DataNodes report to StorageContainerManager using heartbeat messages. - * SCM allocates containers - * and returns a pipeline. - * - *

A client once it gets a pipeline (a list of datanodes) will connect to - * the datanodes and create a container, which then can be used to store data. - */ -@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"}) -public final class StorageContainerManager extends ServiceRuntimeInfoImpl - implements SCMMXBean { - - private static final Logger LOG = LoggerFactory - .getLogger(StorageContainerManager.class); - - /** - * SCM metrics. - */ - private static SCMMetrics metrics; - - /* - * RPC Endpoints exposed by SCM. - */ - private final SCMDatanodeProtocolServer datanodeProtocolServer; - private final SCMBlockProtocolServer blockProtocolServer; - private final SCMClientProtocolServer clientProtocolServer; - private SCMSecurityProtocolServer securityProtocolServer; - - /* - * State Managers of SCM. - */ - private NodeManager scmNodeManager; - private PipelineManager pipelineManager; - private ContainerManager containerManager; - private BlockManager scmBlockManager; - private final SCMStorageConfig scmStorageConfig; - - private SCMMetadataStore scmMetadataStore; - - private final EventQueue eventQueue; - /* - * HTTP endpoint for JMX access. - */ - private final StorageContainerManagerHttpServer httpServer; - /** - * SCM super user. - */ - private final String scmUsername; - private final Collection scmAdminUsernames; - /** - * SCM mxbean. - */ - private ObjectName scmInfoBeanName; - /** - * Key = DatanodeUuid, value = ContainerStat. - */ - private Cache containerReportCache; - - private ReplicationManager replicationManager; - - private final LeaseManager commandWatcherLeaseManager; - - private SCMSafeModeManager scmSafeModeManager; - private CertificateServer certificateServer; - private GrpcTlsConfig grpcTlsConfig; - - private JvmPauseMonitor jvmPauseMonitor; - private final OzoneConfiguration configuration; - private final SafeModeHandler safeModeHandler; - private SCMContainerMetrics scmContainerMetrics; - private MetricsSystem ms; - - /** - * Network topology Map. - */ - private NetworkTopology clusterMap; - - /** - * Creates a new StorageContainerManager. Configuration will be - * updated with information on the actual listening addresses used - * for RPC servers. - * - * @param conf configuration - */ - public StorageContainerManager(OzoneConfiguration conf) - throws IOException, AuthenticationException { - // default empty configurator means default managers will be used. - this(conf, new SCMConfigurator()); - } - - - /** - * This constructor offers finer control over how SCM comes up. - * To use this, user needs to create a SCMConfigurator and set various - * managers that user wants SCM to use, if a value is missing then SCM will - * use the default value for that manager. - * - * @param conf - Configuration - * @param configurator - configurator - */ - public StorageContainerManager(OzoneConfiguration conf, - SCMConfigurator configurator) - throws IOException, AuthenticationException { - super(HddsVersionInfo.HDDS_VERSION_INFO); - - Objects.requireNonNull(configurator, "configurator cannot not be null"); - Objects.requireNonNull(conf, "configuration cannot not be null"); - - configuration = conf; - initMetrics(); - initContainerReportCache(conf); - /** - * It is assumed the scm --init command creates the SCM Storage Config. - */ - scmStorageConfig = new SCMStorageConfig(conf); - if (scmStorageConfig.getState() != StorageState.INITIALIZED) { - LOG.error("Please make sure you have run \'ozone scm --init\' " + - "command to generate all the required metadata."); - throw new SCMException("SCM not initialized due to storage config " + - "failure.", ResultCodes.SCM_NOT_INITIALIZED); - } - - /** - * Important : This initialization sequence is assumed by some of our tests. - * The testSecureOzoneCluster assumes that security checks have to be - * passed before any artifacts like SCM DB is created. So please don't - * add any other initialization above the Security checks please. - */ - if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - loginAsSCMUser(conf); - } - - // Creates the SCM DBs or opens them if it exists. - // A valid pointer to the store is required by all the other services below. - initalizeMetadataStore(conf, configurator); - - // Authenticate SCM if security is enabled, this initialization can only - // be done after the metadata store is initialized. - if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - initializeCAnSecurityProtocol(conf, configurator); - } else { - // if no Security, we do not create a Certificate Server at all. - // This allows user to boot SCM without security temporarily - // and then come back and enable it without any impact. - certificateServer = null; - securityProtocolServer = null; - } - - eventQueue = new EventQueue(); - long watcherTimeout = - conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT, - HDDS_SCM_WATCHER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - commandWatcherLeaseManager = new LeaseManager<>("CommandWatcher", - watcherTimeout); - initializeSystemManagers(conf, configurator); - - CloseContainerEventHandler closeContainerHandler = - new CloseContainerEventHandler(pipelineManager, containerManager); - NodeReportHandler nodeReportHandler = - new NodeReportHandler(scmNodeManager); - PipelineReportHandler pipelineReportHandler = - new PipelineReportHandler(scmSafeModeManager, pipelineManager, conf); - CommandStatusReportHandler cmdStatusReportHandler = - new CommandStatusReportHandler(); - - NewNodeHandler newNodeHandler = new NewNodeHandler(pipelineManager, conf); - StaleNodeHandler staleNodeHandler = - new StaleNodeHandler(scmNodeManager, pipelineManager, conf); - DeadNodeHandler deadNodeHandler = new DeadNodeHandler(scmNodeManager, - pipelineManager, containerManager); - NonHealthyToHealthyNodeHandler nonHealthyToHealthyNodeHandler = - new NonHealthyToHealthyNodeHandler(pipelineManager, conf); - ContainerActionsHandler actionsHandler = new ContainerActionsHandler(); - PendingDeleteHandler pendingDeleteHandler = - new PendingDeleteHandler(scmBlockManager.getSCMBlockDeletingService()); - - ContainerReportHandler containerReportHandler = - new ContainerReportHandler(scmNodeManager, containerManager); - - IncrementalContainerReportHandler incrementalContainerReportHandler = - new IncrementalContainerReportHandler( - scmNodeManager, containerManager); - - PipelineActionHandler pipelineActionHandler = - new PipelineActionHandler(pipelineManager, conf); - - - RetriableDatanodeEventWatcher retriableDatanodeEventWatcher = - new RetriableDatanodeEventWatcher<>( - SCMEvents.RETRIABLE_DATANODE_COMMAND, - SCMEvents.DELETE_BLOCK_STATUS, - commandWatcherLeaseManager); - retriableDatanodeEventWatcher.start(eventQueue); - - scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys - .OZONE_ADMINISTRATORS); - scmUsername = UserGroupInformation.getCurrentUser().getUserName(); - if (!scmAdminUsernames.contains(scmUsername)) { - scmAdminUsernames.add(scmUsername); - } - - datanodeProtocolServer = new SCMDatanodeProtocolServer(conf, this, - eventQueue); - blockProtocolServer = new SCMBlockProtocolServer(conf, this); - clientProtocolServer = new SCMClientProtocolServer(conf, this); - httpServer = new StorageContainerManagerHttpServer(conf); - - safeModeHandler = new SafeModeHandler(configuration, - clientProtocolServer, scmBlockManager, replicationManager, - pipelineManager); - - eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, scmNodeManager); - eventQueue.addHandler(SCMEvents.RETRIABLE_DATANODE_COMMAND, scmNodeManager); - eventQueue.addHandler(SCMEvents.NODE_REPORT, nodeReportHandler); - eventQueue.addHandler(SCMEvents.CONTAINER_REPORT, containerReportHandler); - eventQueue.addHandler(SCMEvents.INCREMENTAL_CONTAINER_REPORT, - incrementalContainerReportHandler); - eventQueue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler); - eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler); - eventQueue.addHandler(SCMEvents.NEW_NODE, newNodeHandler); - eventQueue.addHandler(SCMEvents.STALE_NODE, staleNodeHandler); - eventQueue.addHandler(SCMEvents.NON_HEALTHY_TO_HEALTHY_NODE, - nonHealthyToHealthyNodeHandler); - eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler); - eventQueue.addHandler(SCMEvents.CMD_STATUS_REPORT, cmdStatusReportHandler); - eventQueue - .addHandler(SCMEvents.PENDING_DELETE_STATUS, pendingDeleteHandler); - eventQueue.addHandler(SCMEvents.DELETE_BLOCK_STATUS, - (DeletedBlockLogImpl) scmBlockManager.getDeletedBlockLog()); - eventQueue.addHandler(SCMEvents.PIPELINE_ACTIONS, pipelineActionHandler); - eventQueue.addHandler(SCMEvents.PIPELINE_REPORT, pipelineReportHandler); - eventQueue.addHandler(SCMEvents.SAFE_MODE_STATUS, safeModeHandler); - registerMXBean(); - registerMetricsSource(this); - } - - /** - * This function initializes the following managers. If the configurator - * specifies a value, we will use it, else we will use the default value. - * - * Node Manager - * Pipeline Manager - * Container Manager - * Block Manager - * Replication Manager - * Safe Mode Manager - * - * @param conf - Ozone Configuration. - * @param configurator - A customizer which allows different managers to be - * used if needed. - * @throws IOException - on Failure. - */ - private void initializeSystemManagers(OzoneConfiguration conf, - SCMConfigurator configurator) - throws IOException { - if (configurator.getNetworkTopology() != null) { - clusterMap = configurator.getNetworkTopology(); - } else { - clusterMap = new NetworkTopologyImpl(conf); - } - - if(configurator.getScmNodeManager() != null) { - scmNodeManager = configurator.getScmNodeManager(); - } else { - scmNodeManager = new SCMNodeManager( - conf, scmStorageConfig, eventQueue, clusterMap); - } - - SCMContainerPlacementMetrics placementMetrics = - SCMContainerPlacementMetrics.create(); - ContainerPlacementPolicy containerPlacementPolicy = - ContainerPlacementPolicyFactory.getPolicy(conf, scmNodeManager, - clusterMap, true, placementMetrics); - - if (configurator.getPipelineManager() != null) { - pipelineManager = configurator.getPipelineManager(); - } else { - pipelineManager = - new SCMPipelineManager(conf, scmNodeManager, eventQueue, - grpcTlsConfig); - } - - if (configurator.getContainerManager() != null) { - containerManager = configurator.getContainerManager(); - } else { - containerManager = new SCMContainerManager( - conf, scmNodeManager, pipelineManager, eventQueue); - } - - if (configurator.getScmBlockManager() != null) { - scmBlockManager = configurator.getScmBlockManager(); - } else { - scmBlockManager = new BlockManagerImpl(conf, this); - } - if (configurator.getReplicationManager() != null) { - replicationManager = configurator.getReplicationManager(); - } else { - replicationManager = new ReplicationManager( - conf.getObject(ReplicationManagerConfiguration.class), - containerManager, - containerPlacementPolicy, - eventQueue, - new LockManager<>(conf)); - } - if(configurator.getScmSafeModeManager() != null) { - scmSafeModeManager = configurator.getScmSafeModeManager(); - } else { - scmSafeModeManager = new SCMSafeModeManager(conf, - containerManager.getContainers(), pipelineManager, eventQueue); - } - } - - /** - * If security is enabled we need to have the Security Protocol and a - * default CA. This function initializes those values based on the - * configurator. - * - * @param conf - Config - * @param configurator - configurator - * @throws IOException - on Failure - * @throws AuthenticationException - on Failure - */ - private void initializeCAnSecurityProtocol(OzoneConfiguration conf, - SCMConfigurator configurator) throws IOException { - if(configurator.getCertificateServer() != null) { - this.certificateServer = configurator.getCertificateServer(); - } else { - // This assumes that SCM init has run, and DB metadata stores are created. - certificateServer = initializeCertificateServer( - getScmStorageConfig().getClusterID(), - getScmStorageConfig().getScmId()); - } - // TODO: Support Intermediary CAs in future. - certificateServer.init(new SecurityConfig(conf), - CertificateServer.CAType.SELF_SIGNED_CA); - securityProtocolServer = new SCMSecurityProtocolServer(conf, - certificateServer); - - grpcTlsConfig = RatisHelper - .createTlsClientConfigForSCM(new SecurityConfig(conf), - certificateServer); - } - - /** - * Init the metadata store based on the configurator. - * @param conf - Config - * @param configurator - configurator - * @throws IOException - on Failure - */ - private void initalizeMetadataStore(OzoneConfiguration conf, - SCMConfigurator configurator) - throws IOException { - if(configurator.getMetadataStore() != null) { - scmMetadataStore = configurator.getMetadataStore(); - } else { - scmMetadataStore = new SCMMetadataStoreRDBImpl(conf); - if (scmMetadataStore == null) { - throw new SCMException("Unable to initialize metadata store", - ResultCodes.SCM_NOT_INITIALIZED); - } - } - } - - /** - * Login as the configured user for SCM. - * - * @param conf - */ - private void loginAsSCMUser(Configuration conf) - throws IOException, AuthenticationException { - if (LOG.isDebugEnabled()) { - LOG.debug("Ozone security is enabled. Attempting login for SCM user. " - + "Principal: {}, keytab: {}", - conf.get(HDDS_SCM_KERBEROS_PRINCIPAL_KEY), - conf.get(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY)); - } - - if (SecurityUtil.getAuthenticationMethod(conf).equals( - AuthenticationMethod.KERBEROS)) { - UserGroupInformation.setConfiguration(conf); - InetSocketAddress socAddr = HddsServerUtil - .getScmBlockClientBindAddress(conf); - SecurityUtil.login(conf, HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, - HDDS_SCM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); - } else { - throw new AuthenticationException(SecurityUtil.getAuthenticationMethod( - conf) + " authentication method not support. " - + "SCM user login failed."); - } - LOG.info("SCM login successful."); - } - - - /** - * This function creates/initializes a certificate server as needed. - * This function is idempotent, so calling this again and again after the - * server is initialized is not a problem. - * - * @param clusterID - Cluster ID - * @param scmID - SCM ID - */ - private CertificateServer initializeCertificateServer(String clusterID, - String scmID) throws IOException { - // TODO: Support Certificate Server loading via Class Name loader. - // So it is easy to use different Certificate Servers if needed. - String subject = "scm@" + InetAddress.getLocalHost().getHostName(); - if(this.scmMetadataStore == null) { - LOG.error("Cannot initialize Certificate Server without a valid meta " + - "data layer."); - throw new SCMException("Cannot initialize CA without a valid metadata " + - "store", ResultCodes.SCM_NOT_INITIALIZED); - } - SCMCertStore certStore = new SCMCertStore(this.scmMetadataStore); - return new DefaultCAServer(subject, clusterID, scmID, certStore); - } - - /** - * Builds a message for logging startup information about an RPC server. - * - * @param description RPC server description - * @param addr RPC server listening address - * @return server startup message - */ - public static String buildRpcServerStartMessage(String description, - InetSocketAddress addr) { - return addr != null - ? String.format("%s is listening at %s", description, addr.toString()) - : String.format("%s not started", description); - } - - /** - * Starts an RPC server, if configured. - * - * @param conf configuration - * @param addr configured address of RPC server - * @param protocol RPC protocol provided by RPC server - * @param instance RPC protocol implementation instance - * @param handlerCount RPC server handler count - * @return RPC server - * @throws IOException if there is an I/O error while creating RPC server - */ - public static RPC.Server startRpcServer( - OzoneConfiguration conf, - InetSocketAddress addr, - Class protocol, - BlockingService instance, - int handlerCount) - throws IOException { - RPC.Server rpcServer = - new RPC.Builder(conf) - .setProtocol(protocol) - .setInstance(instance) - .setBindAddress(addr.getHostString()) - .setPort(addr.getPort()) - .setNumHandlers(handlerCount) - .setVerbose(false) - .setSecretManager(null) - .build(); - - DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer); - return rpcServer; - } - - /** - * Create an SCM instance based on the supplied configuration. - * - * @param conf HDDS configuration - * @return SCM instance - * @throws IOException, AuthenticationException - */ - public static StorageContainerManager createSCM( - OzoneConfiguration conf) - throws IOException, AuthenticationException { - if (!HddsUtils.isHddsEnabled(conf)) { - System.err.println( - "SCM cannot be started in secure mode or when " + OZONE_ENABLED + "" + - " is set to false"); - System.exit(1); - } - return new StorageContainerManager(conf); - } - - /** - * Routine to set up the Version info for StorageContainerManager. - * - * @param conf OzoneConfiguration - * @return true if SCM initialization is successful, false otherwise. - * @throws IOException if init fails due to I/O error - */ - public static boolean scmInit(OzoneConfiguration conf, - String clusterId) throws IOException { - SCMStorageConfig scmStorageConfig = new SCMStorageConfig(conf); - StorageState state = scmStorageConfig.getState(); - if (state != StorageState.INITIALIZED) { - try { - if (clusterId != null && !clusterId.isEmpty()) { - scmStorageConfig.setClusterId(clusterId); - } - scmStorageConfig.initialize(); - System.out.println( - "SCM initialization succeeded." - + "Current cluster id for sd=" - + scmStorageConfig.getStorageDir() - + ";cid=" - + scmStorageConfig.getClusterID()); - return true; - } catch (IOException ioe) { - LOG.error("Could not initialize SCM version file", ioe); - return false; - } - } else { - System.out.println( - "SCM already initialized. Reusing existing" - + " cluster id for sd=" - + scmStorageConfig.getStorageDir() - + ";cid=" - + scmStorageConfig.getClusterID()); - return true; - } - } - - /** - * Initialize SCM metrics. - */ - public static void initMetrics() { - metrics = SCMMetrics.create(); - } - - /** - * Return SCM metrics instance. - */ - public static SCMMetrics getMetrics() { - return metrics == null ? SCMMetrics.create() : metrics; - } - - public SCMStorageConfig getScmStorageConfig() { - return scmStorageConfig; - } - - public SCMDatanodeProtocolServer getDatanodeProtocolServer() { - return datanodeProtocolServer; - } - - public SCMBlockProtocolServer getBlockProtocolServer() { - return blockProtocolServer; - } - - public SCMClientProtocolServer getClientProtocolServer() { - return clientProtocolServer; - } - - public SCMSecurityProtocolServer getSecurityProtocolServer() { - return securityProtocolServer; - } - - /** - * Initialize container reports cache that sent from datanodes. - * - * @param conf - */ - private void initContainerReportCache(OzoneConfiguration conf) { - containerReportCache = - CacheBuilder.newBuilder() - .expireAfterAccess(Long.MAX_VALUE, TimeUnit.MILLISECONDS) - .maximumSize(Integer.MAX_VALUE) - .removalListener( - new RemovalListener() { - @Override - public void onRemoval( - RemovalNotification - removalNotification) { - synchronized (containerReportCache) { - ContainerStat stat = removalNotification.getValue(); - // remove invalid container report - metrics.decrContainerStat(stat); - if (LOG.isDebugEnabled()) { - LOG.debug("Remove expired container stat entry for " + - "datanode: {}.", removalNotification.getKey()); - } - } - } - }) - .build(); - } - - private void registerMXBean() { - final Map jmxProperties = new HashMap<>(); - jmxProperties.put("component", "ServerRuntime"); - this.scmInfoBeanName = HddsUtils.registerWithJmxProperties( - "StorageContainerManager", "StorageContainerManagerInfo", - jmxProperties, this); - } - - private void registerMetricsSource(SCMMXBean scmMBean) { - scmContainerMetrics = SCMContainerMetrics.create(scmMBean); - } - - private void unregisterMXBean() { - if (this.scmInfoBeanName != null) { - MBeans.unregister(this.scmInfoBeanName); - this.scmInfoBeanName = null; - } - } - - @VisibleForTesting - public ContainerInfo getContainerInfo(long containerID) throws - IOException { - return containerManager.getContainer(ContainerID.valueof(containerID)); - } - - /** - * Returns listening address of StorageLocation Protocol RPC server. - * - * @return listen address of StorageLocation RPC server - */ - @VisibleForTesting - public InetSocketAddress getClientRpcAddress() { - return getClientProtocolServer().getClientRpcAddress(); - } - - @Override - public String getClientRpcPort() { - InetSocketAddress addr = getClientRpcAddress(); - return addr == null ? "0" : Integer.toString(addr.getPort()); - } - - /** - * Returns listening address of StorageDatanode Protocol RPC server. - * - * @return Address where datanode are communicating. - */ - public InetSocketAddress getDatanodeRpcAddress() { - return getDatanodeProtocolServer().getDatanodeRpcAddress(); - } - - @Override - public String getDatanodeRpcPort() { - InetSocketAddress addr = getDatanodeRpcAddress(); - return addr == null ? "0" : Integer.toString(addr.getPort()); - } - - /** - * Start service. - */ - public void start() throws IOException { - LOG.info( - buildRpcServerStartMessage( - "StorageContainerLocationProtocol RPC server", - getClientRpcAddress())); - - ms = HddsUtils.initializeMetrics(configuration, "StorageContainerManager"); - - commandWatcherLeaseManager.start(); - getClientProtocolServer().start(); - - LOG.info(buildRpcServerStartMessage("ScmBlockLocationProtocol RPC " + - "server", getBlockProtocolServer().getBlockRpcAddress())); - getBlockProtocolServer().start(); - - LOG.info(buildRpcServerStartMessage("ScmDatanodeProtocl RPC " + - "server", getDatanodeProtocolServer().getDatanodeRpcAddress())); - getDatanodeProtocolServer().start(); - if (getSecurityProtocolServer() != null) { - getSecurityProtocolServer().start(); - } - - httpServer.start(); - scmBlockManager.start(); - - // Start jvm monitor - jvmPauseMonitor = new JvmPauseMonitor(); - jvmPauseMonitor.init(configuration); - jvmPauseMonitor.start(); - - setStartTime(); - } - - /** - * Stop service. - */ - public void stop() { - - try { - LOG.info("Stopping Replication Manager Service."); - replicationManager.stop(); - } catch (Exception ex) { - LOG.error("Replication manager service stop failed.", ex); - } - - try { - LOG.info("Stopping Lease Manager of the command watchers"); - commandWatcherLeaseManager.shutdown(); - } catch (Exception ex) { - LOG.error("Lease Manager of the command watchers stop failed"); - } - - try { - LOG.info("Stopping datanode service RPC server"); - getDatanodeProtocolServer().stop(); - - } catch (Exception ex) { - LOG.error("Storage Container Manager datanode RPC stop failed.", ex); - } - - try { - LOG.info("Stopping block service RPC server"); - getBlockProtocolServer().stop(); - } catch (Exception ex) { - LOG.error("Storage Container Manager blockRpcServer stop failed.", ex); - } - - try { - LOG.info("Stopping the StorageContainerLocationProtocol RPC server"); - getClientProtocolServer().stop(); - } catch (Exception ex) { - LOG.error("Storage Container Manager clientRpcServer stop failed.", ex); - } - - try { - LOG.info("Stopping Storage Container Manager HTTP server."); - httpServer.stop(); - } catch (Exception ex) { - LOG.error("Storage Container Manager HTTP server stop failed.", ex); - } - - if (getSecurityProtocolServer() != null) { - getSecurityProtocolServer().stop(); - } - - try { - LOG.info("Stopping Block Manager Service."); - scmBlockManager.stop(); - } catch (Exception ex) { - LOG.error("SCM block manager service stop failed.", ex); - } - - if (containerReportCache != null) { - containerReportCache.invalidateAll(); - containerReportCache.cleanUp(); - } - - if (metrics != null) { - metrics.unRegister(); - } - - unregisterMXBean(); - if (scmContainerMetrics != null) { - scmContainerMetrics.unRegister(); - } - - // Event queue must be stopped before the DB store is closed at the end. - try { - LOG.info("Stopping SCM Event Queue."); - eventQueue.close(); - } catch (Exception ex) { - LOG.error("SCM Event Queue stop failed", ex); - } - - if (jvmPauseMonitor != null) { - jvmPauseMonitor.stop(); - } - IOUtils.cleanupWithLogger(LOG, containerManager); - IOUtils.cleanupWithLogger(LOG, pipelineManager); - - try { - scmMetadataStore.stop(); - } catch (Exception ex) { - LOG.error("SCM Metadata store stop failed", ex); - } - - if (ms != null) { - ms.stop(); - } - - scmSafeModeManager.stop(); - } - - /** - * Wait until service has completed shutdown. - */ - public void join() { - try { - getBlockProtocolServer().join(); - getClientProtocolServer().join(); - getDatanodeProtocolServer().join(); - if (getSecurityProtocolServer() != null) { - getSecurityProtocolServer().join(); - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - LOG.info("Interrupted during StorageContainerManager join."); - } - } - - /** - * Returns the Number of Datanodes that are communicating with SCM. - * - * @param nodestate Healthy, Dead etc. - * @return int -- count - */ - public int getNodeCount(NodeState nodestate) { - return scmNodeManager.getNodeCount(nodestate); - } - - /** - * Returns SCM container manager. - */ - @VisibleForTesting - public ContainerManager getContainerManager() { - return containerManager; - } - - /** - * Returns node manager. - * - * @return - Node Manager - */ - @VisibleForTesting - public NodeManager getScmNodeManager() { - return scmNodeManager; - } - - /** - * Returns pipeline manager. - * - * @return - Pipeline Manager - */ - @VisibleForTesting - public PipelineManager getPipelineManager() { - return pipelineManager; - } - - @VisibleForTesting - public BlockManager getScmBlockManager() { - return scmBlockManager; - } - - @VisibleForTesting - public SafeModeHandler getSafeModeHandler() { - return safeModeHandler; - } - - @VisibleForTesting - public SCMSafeModeManager getScmSafeModeManager() { - return scmSafeModeManager; - } - - @VisibleForTesting - public ReplicationManager getReplicationManager() { - return replicationManager; - } - - public void checkAdminAccess(String remoteUser) throws IOException { - if (remoteUser != null) { - if (!scmAdminUsernames.contains(remoteUser)) { - throw new IOException( - "Access denied for user " + remoteUser + ". Superuser privilege " + - "is required."); - } - } - } - - /** - * Invalidate container stat entry for given datanode. - * - * @param datanodeUuid - */ - public void removeContainerReport(String datanodeUuid) { - synchronized (containerReportCache) { - containerReportCache.invalidate(datanodeUuid); - } - } - - /** - * Get container stat of specified datanode. - * - * @param datanodeUuid - * @return - */ - public ContainerStat getContainerReport(String datanodeUuid) { - ContainerStat stat = null; - synchronized (containerReportCache) { - stat = containerReportCache.getIfPresent(datanodeUuid); - } - - return stat; - } - - /** - * Returns a view of the container stat entries. Modifications made to the - * map will directly - * affect the cache. - * - * @return - */ - public ConcurrentMap getContainerReportCache() { - return containerReportCache.asMap(); - } - - @Override - public Map getContainerReport() { - Map id2StatMap = new HashMap<>(); - synchronized (containerReportCache) { - ConcurrentMap map = containerReportCache.asMap(); - for (Map.Entry entry : map.entrySet()) { - id2StatMap.put(entry.getKey(), entry.getValue().toJsonString()); - } - } - - return id2StatMap; - } - - /** - * Returns live safe mode container threshold. - * - * @return String - */ - @Override - public double getSafeModeCurrentContainerThreshold() { - return getCurrentContainerThreshold(); - } - - /** - * Returns safe mode status. - * @return boolean - */ - @Override - public boolean isInSafeMode() { - return scmSafeModeManager.getInSafeMode(); - } - - /** - * Returns EventPublisher. - */ - public EventPublisher getEventQueue() { - return eventQueue; - } - - /** - * Force SCM out of safe mode. - */ - public boolean exitSafeMode() { - scmSafeModeManager.exitSafeMode(eventQueue); - return true; - } - - @VisibleForTesting - public double getCurrentContainerThreshold() { - return scmSafeModeManager.getCurrentContainerThreshold(); - } - - @Override - public Map getContainerStateCount() { - Map nodeStateCount = new HashMap<>(); - for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) { - nodeStateCount.put(state.toString(), - containerManager.getContainerCountByState(state)); - } - return nodeStateCount; - } - - /** - * Returns the SCM metadata Store. - * @return SCMMetadataStore - */ - public SCMMetadataStore getScmMetadataStore() { - return scmMetadataStore; - } - - /** - * Returns the SCM network topology cluster. - * @return NetworkTopology - */ - public NetworkTopology getClusterMap() { - return this.clusterMap; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java deleted file mode 100644 index dce2a45e87c..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.server.BaseHttpServer; - -import java.io.IOException; - -/** - * HttpServer2 wrapper for the Ozone Storage Container Manager. - */ -public class StorageContainerManagerHttpServer extends BaseHttpServer { - - public StorageContainerManagerHttpServer(Configuration conf) - throws IOException { - super(conf, "scm"); - } - - @Override protected String getHttpAddressKey() { - return ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY; - } - - @Override protected String getHttpBindHostKey() { - return ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_KEY; - } - - @Override protected String getHttpsAddressKey() { - return ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY; - } - - @Override protected String getHttpsBindHostKey() { - return ScmConfigKeys.OZONE_SCM_HTTPS_BIND_HOST_KEY; - } - - @Override protected String getBindHostDefault() { - return ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_DEFAULT; - } - - @Override protected int getHttpBindPortDefault() { - return ScmConfigKeys.OZONE_SCM_HTTP_BIND_PORT_DEFAULT; - } - - @Override protected int getHttpsBindPortDefault() { - return ScmConfigKeys.OZONE_SCM_HTTPS_BIND_PORT_DEFAULT; - } - - @Override protected String getKeytabFile() { - return ScmConfigKeys.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY; - } - - @Override protected String getSpnegoPrincipal() { - return ScmConfigKeys.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY; - } - - @Override protected String getEnabledKey() { - return ScmConfigKeys.OZONE_SCM_HTTP_ENABLED_KEY; - } - -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java deleted file mode 100644 index 62910f2314a..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java +++ /dev/null @@ -1,153 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license - * agreements. See the NOTICE file distributed with this work for additional - * information regarding - * copyright ownership. The ASF licenses this file to you under the Apache - * License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a - * copy of the License at - * - *

http://www.apache.org/licenses/LICENSE-2.0 - * - *

Unless required by applicable law or agreed to in writing, software - * distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.common.StorageInfo; -import org.apache.hadoop.util.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine; -import picocli.CommandLine.Command; - -import java.io.IOException; - -/** - * This class provides a command line interface to start the SCM - * using Picocli. - */ - -@Command(name = "ozone scm", - hidden = true, description = "Start or initialize the scm server.", - versionProvider = HddsVersionProvider.class, - mixinStandardHelpOptions = true) -public class StorageContainerManagerStarter extends GenericCli { - - private OzoneConfiguration conf; - private SCMStarterInterface receiver; - private static final Logger LOG = - LoggerFactory.getLogger(StorageContainerManagerStarter.class); - - public static void main(String[] args) throws Exception { - TracingUtil.initTracing("StorageContainerManager"); - new StorageContainerManagerStarter( - new StorageContainerManagerStarter.SCMStarterHelper()).run(args); - } - - public StorageContainerManagerStarter(SCMStarterInterface receiverObj) { - super(); - receiver = receiverObj; - } - - @Override - public Void call() throws Exception { - commonInit(); - startScm(); - return null; - } - - /** - * This function implements a sub-command to generate a new - * cluster ID from the command line. - */ - @CommandLine.Command(name = "--genclusterid", - customSynopsis = "ozone scm [global options] --genclusterid [options]", - hidden = false, - description = "Generate a new Cluster ID", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) - public void generateClusterId() { - commonInit(); - System.out.println("Generating new cluster id:"); - System.out.println(receiver.generateClusterId()); - } - - /** - * This function implements a sub-command to allow the SCM to be - * initialized from the command line. - * - * @param clusterId - Cluster ID to use when initializing. If null, - * a random ID will be generated and used. - */ - @CommandLine.Command(name = "--init", - customSynopsis = "ozone scm [global options] --init [options]", - hidden = false, - description = "Initialize the SCM if not already initialized", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) - public void initScm(@CommandLine.Option(names = { "--clusterid" }, - description = "Optional: The cluster id to use when formatting SCM", - paramLabel = "id") String clusterId) - throws Exception { - commonInit(); - boolean result = receiver.init(conf, clusterId); - if (!result) { - throw new IOException("scm init failed"); - } - } - - /** - * This function is used by the command line to start the SCM. - */ - private void startScm() throws Exception { - receiver.start(conf); - } - - /** - * This function should be called by each command to ensure the configuration - * is set and print the startup banner message. - */ - private void commonInit() { - conf = createOzoneConfiguration(); - - String[] originalArgs = getCmd().getParseResult().originalArgs() - .toArray(new String[0]); - StringUtils.startupShutdownMessage(StorageContainerManager.class, - originalArgs, LOG); - } - - /** - * This static class wraps the external dependencies needed for this command - * to execute its tasks. This allows the dependency to be injected for unit - * testing. - */ - static class SCMStarterHelper implements SCMStarterInterface { - - public void start(OzoneConfiguration conf) throws Exception { - StorageContainerManager stm = StorageContainerManager.createSCM(conf); - stm.start(); - stm.join(); - } - - public boolean init(OzoneConfiguration conf, String clusterId) - throws IOException{ - return StorageContainerManager.scmInit(conf, clusterId); - } - - public String generateClusterId() { - return StorageInfo.newClusterID(); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java deleted file mode 100644 index fe07272bb6c..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license - * agreements. See the NOTICE file distributed with this work for additional - * information regarding - * copyright ownership. The ASF licenses this file to you under the Apache - * License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a - * copy of the License at - * - *

http://www.apache.org/licenses/LICENSE-2.0 - * - *

Unless required by applicable law or agreed to in writing, software - * distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR - * CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/RetriableDatanodeEventWatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/RetriableDatanodeEventWatcher.java deleted file mode 100644 index 2a50bca9dc8..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/RetriableDatanodeEventWatcher.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; - -import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.CommandStatusEvent; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventWatcher; -import org.apache.hadoop.ozone.lease.LeaseManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * EventWatcher for start events and completion events with payload of type - * RetriablePayload and RetriableCompletionPayload respectively. - */ -public class RetriableDatanodeEventWatcher - extends EventWatcher { - - public static final Logger LOG = - LoggerFactory.getLogger(RetriableDatanodeEventWatcher.class); - - public RetriableDatanodeEventWatcher(Event startEvent, - Event completionEvent, LeaseManager leaseManager) { - super(startEvent, completionEvent, leaseManager); - } - - @Override - protected void onTimeout(EventPublisher publisher, - CommandForDatanode payload) { - LOG.info("RetriableDatanodeCommand type={} with id={} timed out. Retrying.", - payload.getCommand().getType(), payload.getId()); - //put back to the original queue - publisher.fireEvent(SCMEvents.RETRIABLE_DATANODE_COMMAND, payload); - } - - @Override - protected void onFinished(EventPublisher publisher, - CommandForDatanode payload) { - - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java deleted file mode 100644 index b1d28386a4f..00000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocol.commands; diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html deleted file mode 100644 index 1c5a3345751..00000000000 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html +++ /dev/null @@ -1,76 +0,0 @@ - - - - - - - - - - - HDFS Storage Container Manager - - - - - - - - - - - -

- -
- - - -
- - - - - - - - - - - - diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/main.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/main.html deleted file mode 100644 index 2666f81b5ff..00000000000 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/main.html +++ /dev/null @@ -1,20 +0,0 @@ - - - - - diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html deleted file mode 100644 index 38ce638f4d1..00000000000 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html +++ /dev/null @@ -1,44 +0,0 @@ - -

Node counts

- - - - - - - - -
{{typestat.key}}{{typestat.value}}
- -

Status

- - - - - - - - - - - - - - - -
Client Rpc port{{$ctrl.overview.jmx.ClientRpcPort}}
Datanode Rpc port{{$ctrl.overview.jmx.DatanodeRpcPort}}
Node Manager: Safe mode status{{$ctrl.scmmetrics.InSafeMode}}
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js deleted file mode 100644 index 2942a561dea..00000000000 --- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -(function () { - "use strict"; - angular.module('scm', ['ozone', 'nvd3']); - - angular.module('scm').component('scmOverview', { - templateUrl: 'scm-overview.html', - require: { - overview: "^overview" - }, - controller: function ($http) { - var ctrl = this; - $http.get("jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo") - .then(function (result) { - ctrl.nodemanagermetrics = result.data.beans[0]; - }); - $http.get("jmx?qry=Hadoop:service=StorageContainerManager,name=StorageContainerManagerInfo,component=ServerRuntime") - .then(function (result) { - ctrl.scmmetrics = result.data.beans[0]; - }); - - var statusSortOrder = { - "HEALTHY": "a", - "STALE": "b", - "DEAD": "c", - "UNKNOWN": "z", - "DECOMMISSIONING": "x", - "DECOMMISSIONED": "y" - }; - ctrl.nodeOrder = function (v1, v2) { - //status with non defined sort order will be "undefined" - return ("" + statusSortOrder[v1.value]).localeCompare("" + statusSortOrder[v2.value]) - } - - } - }); - -})(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java deleted file mode 100644 index 6e01e5354b6..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java +++ /dev/null @@ -1,308 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package org.apache.hadoop.hdds.scm; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.Timeout; - -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; - -import static org.apache.hadoop.hdds.HddsUtils.getSCMAddresses; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Test the HDDS server side utilities. - */ -public class HddsServerUtilTest { - - @Rule - public Timeout timeout = new Timeout(300000); - - @Rule - public ExpectedException thrown= ExpectedException.none(); - - /** - * Verify DataNode endpoint lookup failure if neither the client nor - * datanode endpoint are configured. - */ - @Test - public void testMissingScmDataNodeAddress() { - final Configuration conf = new OzoneConfiguration(); - thrown.expect(IllegalArgumentException.class); - HddsServerUtil.getScmAddressForDataNodes(conf); - } - - /** - * Verify that the datanode endpoint is parsed correctly. - * This tests the logic used by the DataNodes to determine which address - * to connect to. - */ - @Test - public void testGetScmDataNodeAddress() { - final Configuration conf = new OzoneConfiguration(); - - // First try a client address with just a host name. Verify it falls - // back to the default port. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4"); - InetSocketAddress addr = HddsServerUtil.getScmAddressForDataNodes(conf); - assertThat(addr.getHostString(), is("1.2.3.4")); - assertThat(addr.getPort(), is( - ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); - - // Next try a client address with just a host name and port. - // Verify the port is ignored and the default DataNode port is used. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - addr = HddsServerUtil.getScmAddressForDataNodes(conf); - assertThat(addr.getHostString(), is("1.2.3.4")); - assertThat(addr.getPort(), is( - ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); - - // Set both OZONE_SCM_CLIENT_ADDRESS_KEY and - // OZONE_SCM_DATANODE_ADDRESS_KEY. - // Verify that the latter overrides and the port number is still the - // default. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "5.6.7.8"); - addr = - HddsServerUtil.getScmAddressForDataNodes(conf); - assertThat(addr.getHostString(), is("5.6.7.8")); - assertThat(addr.getPort(), is( - ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); - - // Set both OZONE_SCM_CLIENT_ADDRESS_KEY and - // OZONE_SCM_DATANODE_ADDRESS_KEY. - // Verify that the latter overrides and the port number from the latter is - // used. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "5.6.7.8:200"); - addr = HddsServerUtil.getScmAddressForDataNodes(conf); - assertThat(addr.getHostString(), is("5.6.7.8")); - assertThat(addr.getPort(), is(200)); - } - - - /** - * Verify that the client endpoint bind address is computed correctly. - * This tests the logic used by the SCM to determine its own bind address. - */ - @Test - public void testScmClientBindHostDefault() { - final Configuration conf = new OzoneConfiguration(); - - // The bind host should be 0.0.0.0 unless OZONE_SCM_CLIENT_BIND_HOST_KEY - // is set differently. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4"); - InetSocketAddress addr = HddsServerUtil.getScmClientBindAddress(conf); - assertThat(addr.getHostString(), is("0.0.0.0")); - assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT)); - - // The bind host should be 0.0.0.0 unless OZONE_SCM_CLIENT_BIND_HOST_KEY - // is set differently. The port number from OZONE_SCM_CLIENT_ADDRESS_KEY - // should be respected. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200"); - addr = HddsServerUtil.getScmClientBindAddress(conf); - assertThat(addr.getHostString(), is("0.0.0.0")); - assertThat(addr.getPort(), is(100)); - - // OZONE_SCM_CLIENT_BIND_HOST_KEY should be respected. - // Port number should be default if none is specified via - // OZONE_SCM_DATANODE_ADDRESS_KEY. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4"); - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY, "5.6.7.8"); - addr = HddsServerUtil.getScmClientBindAddress(conf); - assertThat(addr.getHostString(), is("5.6.7.8")); - assertThat(addr.getPort(), is( - ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT)); - - // OZONE_SCM_CLIENT_BIND_HOST_KEY should be respected. - // Port number from OZONE_SCM_CLIENT_ADDRESS_KEY should be - // respected. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200"); - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY, "5.6.7.8"); - addr = HddsServerUtil.getScmClientBindAddress(conf); - assertThat(addr.getHostString(), is("5.6.7.8")); - assertThat(addr.getPort(), is(100)); - } - - /** - * Verify that the DataNode endpoint bind address is computed correctly. - * This tests the logic used by the SCM to determine its own bind address. - */ - @Test - public void testScmDataNodeBindHostDefault() { - final Configuration conf = new OzoneConfiguration(); - - // The bind host should be 0.0.0.0 unless OZONE_SCM_DATANODE_BIND_HOST_KEY - // is set differently. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4"); - InetSocketAddress addr = HddsServerUtil.getScmDataNodeBindAddress(conf); - assertThat(addr.getHostString(), is("0.0.0.0")); - assertThat(addr.getPort(), is( - ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); - - // The bind host should be 0.0.0.0 unless OZONE_SCM_DATANODE_BIND_HOST_KEY - // is set differently. The port number from OZONE_SCM_DATANODE_ADDRESS_KEY - // should be respected. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200"); - addr = HddsServerUtil.getScmDataNodeBindAddress(conf); - assertThat(addr.getHostString(), is("0.0.0.0")); - assertThat(addr.getPort(), is(200)); - - // OZONE_SCM_DATANODE_BIND_HOST_KEY should be respected. - // Port number should be default if none is specified via - // OZONE_SCM_DATANODE_ADDRESS_KEY. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY, "5.6.7.8"); - addr = HddsServerUtil.getScmDataNodeBindAddress(conf); - assertThat(addr.getHostString(), is("5.6.7.8")); - assertThat(addr.getPort(), is( - ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); - - // OZONE_SCM_DATANODE_BIND_HOST_KEY should be respected. - // Port number from OZONE_SCM_DATANODE_ADDRESS_KEY should be - // respected. - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY, "5.6.7.8"); - addr = HddsServerUtil.getScmDataNodeBindAddress(conf); - assertThat(addr.getHostString(), is("5.6.7.8")); - assertThat(addr.getPort(), is(200)); - } - - - - @Test - public void testGetSCMAddresses() { - final Configuration conf = new OzoneConfiguration(); - Collection addresses = null; - InetSocketAddress addr = null; - Iterator it = null; - - // Verify valid IP address setup - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "1.2.3.4"); - addresses = getSCMAddresses(conf); - assertThat(addresses.size(), is(1)); - addr = addresses.iterator().next(); - assertThat(addr.getHostName(), is("1.2.3.4")); - assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT)); - - // Verify valid hostname setup - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1"); - addresses = getSCMAddresses(conf); - assertThat(addresses.size(), is(1)); - addr = addresses.iterator().next(); - assertThat(addr.getHostName(), is("scm1")); - assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT)); - - // Verify valid hostname and port - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234"); - addresses = getSCMAddresses(conf); - assertThat(addresses.size(), is(1)); - addr = addresses.iterator().next(); - assertThat(addr.getHostName(), is("scm1")); - assertThat(addr.getPort(), is(1234)); - - final HashMap hostsAndPorts = - new HashMap(); - hostsAndPorts.put("scm1", 1234); - hostsAndPorts.put("scm2", 2345); - hostsAndPorts.put("scm3", 3456); - - // Verify multiple hosts and port - conf.setStrings( - ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234,scm2:2345,scm3:3456"); - addresses = getSCMAddresses(conf); - assertThat(addresses.size(), is(3)); - it = addresses.iterator(); - HashMap expected1 = new HashMap<>(hostsAndPorts); - while(it.hasNext()) { - InetSocketAddress current = it.next(); - assertTrue(expected1.remove(current.getHostName(), - current.getPort())); - } - assertTrue(expected1.isEmpty()); - - // Verify names with spaces - conf.setStrings( - ScmConfigKeys.OZONE_SCM_NAMES, " scm1:1234, scm2:2345 , scm3:3456 "); - addresses = getSCMAddresses(conf); - assertThat(addresses.size(), is(3)); - it = addresses.iterator(); - HashMap expected2 = new HashMap<>(hostsAndPorts); - while(it.hasNext()) { - InetSocketAddress current = it.next(); - assertTrue(expected2.remove(current.getHostName(), - current.getPort())); - } - assertTrue(expected2.isEmpty()); - - // Verify empty value - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, ""); - try { - addresses = getSCMAddresses(conf); - fail("Empty value should cause an IllegalArgumentException"); - } catch (Exception e) { - assertTrue(e instanceof IllegalArgumentException); - } - - // Verify invalid hostname - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "s..x..:1234"); - try { - addresses = getSCMAddresses(conf); - fail("An invalid hostname should cause an IllegalArgumentException"); - } catch (Exception e) { - assertTrue(e instanceof IllegalArgumentException); - } - - // Verify invalid port - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm:xyz"); - try { - addresses = getSCMAddresses(conf); - fail("An invalid port should cause an IllegalArgumentException"); - } catch (Exception e) { - assertTrue(e instanceof IllegalArgumentException); - } - - // Verify a mixed case (valid and invalid value both appears) - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234, scm:xyz"); - try { - addresses = getSCMAddresses(conf); - fail("An invalid value should cause an IllegalArgumentException"); - } catch (Exception e) { - assertTrue(e instanceof IllegalArgumentException); - } - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java deleted file mode 100644 index 38f78ad8153..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer - .NodeRegistrationContainerReport; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.common.Storage; -import org.apache.hadoop.security.authentication.client.AuthenticationException; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; - -/** - * Stateless helper functions for Hdds tests. - */ -public final class HddsTestUtils { - - private HddsTestUtils() { - } - - /** - * Create Command Status report object. - * - * @param numOfContainers number of containers to be included in report. - * @return CommandStatusReportsProto - */ - public static NodeRegistrationContainerReport - createNodeRegistrationContainerReport(int numOfContainers) { - return new NodeRegistrationContainerReport( - TestUtils.randomDatanodeDetails(), - TestUtils.getRandomContainerReports(numOfContainers)); - } - - /** - * Create NodeRegistrationContainerReport object. - * - * @param dnContainers List of containers to be included in report - * @return NodeRegistrationContainerReport - */ - public static NodeRegistrationContainerReport - createNodeRegistrationContainerReport(List dnContainers) { - List - containers = new ArrayList<>(); - dnContainers.forEach(c -> { - containers.add(TestUtils.getRandomContainerInfo(c.getContainerID())); - }); - return new NodeRegistrationContainerReport( - TestUtils.randomDatanodeDetails(), - TestUtils.getContainerReports(containers)); - } - - public static StorageContainerManager getScm(OzoneConfiguration conf) - throws IOException, AuthenticationException { - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - conf.setBoolean(OZONE_ENABLED, true); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - if(scmStore.getState() != Storage.StorageState.INITIALIZED) { - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - scmStore.setClusterId(clusterId); - scmStore.setScmId(scmId); - // writes the version file properties - scmStore.initialize(); - } - return StorageContainerManager.createSCM(conf); - } - - /** - * Creates list of ContainerInfo. - * - * @param numContainers number of ContainerInfo to be included in list. - * @return {@literal List} - */ - public static List getContainerInfo(int numContainers) { - List containerInfoList = new ArrayList<>(); - for (int i = 0; i < numContainers; i++) { - ContainerInfo.Builder builder = new ContainerInfo.Builder(); - containerInfoList.add(builder - .setContainerID(RandomUtils.nextLong()) - .build()); - } - return containerInfoList; - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsWhiteboxTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsWhiteboxTestUtils.java deleted file mode 100644 index abb96684076..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsWhiteboxTestUtils.java +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import java.lang.reflect.Field; - - -/** - * This class includes some functions copied from Mockito's - * Whitebox class for portability reasons. - * - * Whitebox methods are accessed differently in different - * versions of Hadoop. Specifically the availability of the class - * changed from Apache Hadoop 3.1.0 to Hadoop 3.2.0. - * - * Duplicating the test code is ugly but it allows building - * HDDS portably. - */ -public final class HddsWhiteboxTestUtils { - - /** - * Private constructor to disallow construction. - */ - private HddsWhiteboxTestUtils() { - } - - /** - * Get the field of the target object. - * @param target target object - * @param field field name - * @return the field of the object - */ - public static Object getInternalState(Object target, String field) { - Class c = target.getClass(); - try { - Field f = getFieldFromHierarchy(c, field); - f.setAccessible(true); - return f.get(target); - } catch (Exception e) { - throw new RuntimeException( - "Unable to set internal state on a private field.", e); - } - } - - /** - * Set the field of the target object. - * @param target target object - * @param field field name - * @param value value to set - */ - public static void setInternalState( - Object target, String field, Object value) { - Class c = target.getClass(); - try { - Field f = getFieldFromHierarchy(c, field); - f.setAccessible(true); - f.set(target, value); - } catch (Exception e) { - throw new RuntimeException( - "Unable to set internal state on a private field.", e); - } - } - - private static Field getFieldFromHierarchy(Class clazz, String field) { - Field f = getField(clazz, field); - while (f == null && clazz != Object.class) { - clazz = clazz.getSuperclass(); - f = getField(clazz, field); - } - if (f == null) { - throw new RuntimeException( - "You want me to set value to this field: '" + field + - "' on this class: '" + clazz.getSimpleName() + - "' but this field is not declared within hierarchy " + - "of this class!"); - } - return f; - } - - private static Field getField(Class clazz, String field) { - try { - return clazz.getDeclaredField(field); - } catch (NoSuchFieldException e) { - return null; - } - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java deleted file mode 100644 index 54cc398e2f7..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java +++ /dev/null @@ -1,229 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.test.PathUtils; - -import org.apache.commons.io.FileUtils; -import static org.junit.Assert.assertTrue; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.net.InetSocketAddress; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.junit.Assert.assertEquals; - -/** - * Unit tests for {@link HddsServerUtil}. - */ -public class TestHddsServerUtils { - public static final Logger LOG = LoggerFactory.getLogger( - TestHddsServerUtils.class); - - @Rule - public Timeout timeout = new Timeout(300_000); - - @Rule - public ExpectedException thrown= ExpectedException.none(); - - /** - * Test getting OZONE_SCM_DATANODE_ADDRESS_KEY with port. - */ - @Test - @SuppressWarnings("StringSplitter") - public void testGetDatanodeAddressWithPort() { - final String scmHost = "host123:100"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost); - final InetSocketAddress address = - HddsServerUtil.getScmAddressForDataNodes(conf); - assertEquals(address.getHostName(), scmHost.split(":")[0]); - assertEquals(address.getPort(), Integer.parseInt(scmHost.split(":")[1])); - } - - /** - * Test getting OZONE_SCM_DATANODE_ADDRESS_KEY without port. - */ - @Test - public void testGetDatanodeAddressWithoutPort() { - final String scmHost = "host123"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost); - final InetSocketAddress address = - HddsServerUtil.getScmAddressForDataNodes(conf); - assertEquals(scmHost, address.getHostName()); - assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort()); - } - - /** - * When OZONE_SCM_DATANODE_ADDRESS_KEY is undefined, test fallback to - * OZONE_SCM_CLIENT_ADDRESS_KEY. - */ - @Test - public void testDatanodeAddressFallbackToClientNoPort() { - final String scmHost = "host123"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost); - final InetSocketAddress address = - HddsServerUtil.getScmAddressForDataNodes(conf); - assertEquals(scmHost, address.getHostName()); - assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort()); - } - - /** - * When OZONE_SCM_DATANODE_ADDRESS_KEY is undefined, test fallback to - * OZONE_SCM_CLIENT_ADDRESS_KEY. Port number defined by - * OZONE_SCM_CLIENT_ADDRESS_KEY should be ignored. - */ - @Test - @SuppressWarnings("StringSplitter") - public void testDatanodeAddressFallbackToClientWithPort() { - final String scmHost = "host123:100"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost); - final InetSocketAddress address = - HddsServerUtil.getScmAddressForDataNodes(conf); - assertEquals(address.getHostName(), scmHost.split(":")[0]); - assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT); - } - - /** - * When OZONE_SCM_DATANODE_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY - * are undefined, test fallback to OZONE_SCM_NAMES. - */ - @Test - public void testDatanodeAddressFallbackToScmNamesNoPort() { - final String scmHost = "host123"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_NAMES, scmHost); - final InetSocketAddress address = - HddsServerUtil.getScmAddressForDataNodes(conf); - assertEquals(scmHost, address.getHostName()); - assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort()); - } - - /** - * When OZONE_SCM_DATANODE_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY - * are undefined, test fallback to OZONE_SCM_NAMES. Port number - * defined by OZONE_SCM_NAMES should be ignored. - */ - @Test - @SuppressWarnings("StringSplitter") - public void testDatanodeAddressFallbackToScmNamesWithPort() { - final String scmHost = "host123:100"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_NAMES, scmHost); - final InetSocketAddress address = - HddsServerUtil.getScmAddressForDataNodes(conf); - assertEquals(address.getHostName(), scmHost.split(":")[0]); - assertEquals(OZONE_SCM_DATANODE_PORT_DEFAULT, address.getPort()); - } - - /** - * getScmAddressForDataNodes should fail when OZONE_SCM_NAMES has - * multiple addresses. - */ - @Test - public void testClientFailsWithMultipleScmNames() { - final String scmHost = "host123,host456"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_NAMES, scmHost); - thrown.expect(IllegalArgumentException.class); - HddsServerUtil.getScmAddressForDataNodes(conf); - } - - /** - * Test {@link ServerUtils#getScmDbDir}. - */ - @Test - public void testGetScmDbDir() { - final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class); - final File dbDir = new File(testDir, "scmDbDir"); - final File metaDir = new File(testDir, "metaDir"); // should be ignored. - final Configuration conf = new OzoneConfiguration(); - conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); - - try { - assertEquals(dbDir, ServerUtils.getScmDbDir(conf)); - assertTrue(dbDir.exists()); // should have been created. - } finally { - FileUtils.deleteQuietly(dbDir); - } - } - - /** - * Test {@link ServerUtils#getScmDbDir} with fallback to OZONE_METADATA_DIRS - * when OZONE_SCM_DB_DIRS is undefined. - */ - @Test - public void testGetScmDbDirWithFallback() { - final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class); - final File metaDir = new File(testDir, "metaDir"); - final Configuration conf = new OzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); - try { - assertEquals(metaDir, ServerUtils.getScmDbDir(conf)); - assertTrue(metaDir.exists()); // should have been created. - } finally { - FileUtils.deleteQuietly(metaDir); - } - } - - @Test - public void testNoScmDbDirConfigured() { - thrown.expect(IllegalArgumentException.class); - ServerUtils.getScmDbDir(new OzoneConfiguration()); - } - - @Test - public void testGetStaleNodeInterval() { - final Configuration conf = new OzoneConfiguration(); - - // Reset OZONE_SCM_STALENODE_INTERVAL to 300s that - // larger than max limit value. - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 300, TimeUnit.SECONDS); - conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100); - // the max limit value will be returned - assertEquals(100000, HddsServerUtil.getStaleNodeInterval(conf)); - - // Reset OZONE_SCM_STALENODE_INTERVAL to 10ms that - // smaller than min limit value. - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 10, - TimeUnit.MILLISECONDS); - conf.setInt(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100); - // the min limit value will be returned - assertEquals(90000, HddsServerUtil.getStaleNodeInterval(conf)); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java deleted file mode 100644 index 0b3edcc5ec1..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java +++ /dev/null @@ -1,144 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.scm.server.StorageContainerManagerHttpServer; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.web.URLConnectionFactory; -import org.apache.hadoop.http.HttpConfig; -import org.apache.hadoop.http.HttpConfig.Policy; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; - -import java.io.File; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.URL; -import java.net.URLConnection; -import java.util.Arrays; -import java.util.Collection; - -/** - * Test http server os SCM with various HTTP option. - */ -@RunWith(value = Parameterized.class) -public class TestStorageContainerManagerHttpServer { - private static final String BASEDIR = GenericTestUtils - .getTempPath(TestStorageContainerManagerHttpServer.class.getSimpleName()); - private static String keystoresDir; - private static String sslConfDir; - private static Configuration conf; - private static URLConnectionFactory connectionFactory; - - @Parameters public static Collection policy() { - Object[][] params = new Object[][] { - {HttpConfig.Policy.HTTP_ONLY}, - {HttpConfig.Policy.HTTPS_ONLY}, - {HttpConfig.Policy.HTTP_AND_HTTPS} }; - return Arrays.asList(params); - } - - private final HttpConfig.Policy policy; - - public TestStorageContainerManagerHttpServer(Policy policy) { - super(); - this.policy = policy; - } - - @BeforeClass public static void setUp() throws Exception { - File base = new File(BASEDIR); - FileUtil.fullyDelete(base); - base.mkdirs(); - conf = new Configuration(); - keystoresDir = new File(BASEDIR).getAbsolutePath(); - sslConfDir = KeyStoreTestUtil.getClasspathDir( - TestStorageContainerManagerHttpServer.class); - KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); - connectionFactory = - URLConnectionFactory.newDefaultURLConnectionFactory(conf); - conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY, - KeyStoreTestUtil.getClientSSLConfigFileName()); - conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, - KeyStoreTestUtil.getServerSSLConfigFileName()); - } - - @AfterClass public static void tearDown() throws Exception { - FileUtil.fullyDelete(new File(BASEDIR)); - KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); - } - - @Test public void testHttpPolicy() throws Exception { - conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name()); - conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "localhost:0"); - conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0"); - - StorageContainerManagerHttpServer server = null; - try { - server = new StorageContainerManagerHttpServer(conf); - server.start(); - - Assert.assertTrue(implies(policy.isHttpEnabled(), - canAccess("http", server.getHttpAddress()))); - Assert.assertTrue(implies(policy.isHttpEnabled() && - !policy.isHttpsEnabled(), - !canAccess("https", server.getHttpsAddress()))); - - Assert.assertTrue(implies(policy.isHttpsEnabled(), - canAccess("https", server.getHttpsAddress()))); - Assert.assertTrue(implies(policy.isHttpsEnabled() && - !policy.isHttpEnabled(), - !canAccess("http", server.getHttpAddress()))); - - } finally { - if (server != null) { - server.stop(); - } - } - } - - private static boolean canAccess(String scheme, InetSocketAddress addr) { - if (addr == null) { - return false; - } - try { - URL url = - new URL(scheme + "://" + NetUtils.getHostPortString(addr) + "/jmx"); - URLConnection conn = connectionFactory.openConnection(url); - conn.connect(); - conn.getContent(); - } catch (IOException e) { - return false; - } - return true; - } - - private static boolean implies(boolean a, boolean b) { - return !a || b; - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java deleted file mode 100644 index 37321d76996..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java +++ /dev/null @@ -1,597 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineActionsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReport; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.server.SCMConfigurator; -import org.apache.hadoop.hdds.scm.server - .SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode; -import org.apache.hadoop.hdds.scm.server - .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerManager; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol - .proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol - .proto.StorageContainerDatanodeProtocolProtos.CommandStatus; -import org.apache.hadoop.hdds.protocol - .proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageTypeProto; -import org.apache.hadoop.hdds.scm.node.SCMNodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.Storage; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; -import org.apache.hadoop.security.authentication.client - .AuthenticationException; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ThreadLocalRandom; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; - -/** - * Stateless helper functions to handler scm/datanode connection. - */ -public final class TestUtils { - - private static ThreadLocalRandom random = ThreadLocalRandom.current(); - - private TestUtils() { - } - - /** - * Creates DatanodeDetails with random UUID. - * - * @return DatanodeDetails - */ - public static DatanodeDetails randomDatanodeDetails() { - return createDatanodeDetails(UUID.randomUUID()); - } - - /** - * Creates DatanodeDetails with random UUID, specific hostname and network - * location. - * - * @return DatanodeDetails - */ - public static DatanodeDetails createDatanodeDetails(String hostname, - String loc) { - String ipAddress = random.nextInt(256) - + "." + random.nextInt(256) - + "." + random.nextInt(256) - + "." + random.nextInt(256); - return createDatanodeDetails(UUID.randomUUID().toString(), hostname, - ipAddress, loc); - } - - /** - * Creates DatanodeDetails using the given UUID. - * - * @param uuid Datanode's UUID - * - * @return DatanodeDetails - */ - public static DatanodeDetails createDatanodeDetails(UUID uuid) { - String ipAddress = random.nextInt(256) - + "." + random.nextInt(256) - + "." + random.nextInt(256) - + "." + random.nextInt(256); - return createDatanodeDetails(uuid.toString(), "localhost" + "-" + ipAddress, - ipAddress, null); - } - - /** - * Generates DatanodeDetails from RegisteredCommand. - * - * @param registeredCommand registration response from SCM - * - * @return DatanodeDetails - */ - public static DatanodeDetails getDatanodeDetails( - RegisteredCommand registeredCommand) { - return createDatanodeDetails( - registeredCommand.getDatanode().getUuidString(), - registeredCommand.getDatanode().getHostName(), - registeredCommand.getDatanode().getIpAddress(), - null); - } - - /** - * Creates DatanodeDetails with the given information. - * - * @param uuid Datanode's UUID - * @param hostname hostname of Datanode - * @param ipAddress ip address of Datanode - * - * @return DatanodeDetails - */ - public static DatanodeDetails createDatanodeDetails(String uuid, - String hostname, String ipAddress, String networkLocation) { - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(uuid) - .setHostName(hostname) - .setIpAddress(ipAddress) - .addPort(containerPort) - .addPort(ratisPort) - .addPort(restPort) - .setNetworkLocation(networkLocation); - return builder.build(); - } - - /** - * Creates a random DatanodeDetails and register it with the given - * NodeManager. - * - * @param nodeManager NodeManager - * - * @return DatanodeDetails - */ - public static DatanodeDetails createRandomDatanodeAndRegister( - SCMNodeManager nodeManager) { - return getDatanodeDetails( - nodeManager.register(randomDatanodeDetails(), null, - getRandomPipelineReports())); - } - - /** - * Get specified number of DatanodeDetails and register them with node - * manager. - * - * @param nodeManager node manager to register the datanode ids. - * @param count number of DatanodeDetails needed. - * - * @return list of DatanodeDetails - */ - public static List getListOfRegisteredDatanodeDetails( - SCMNodeManager nodeManager, int count) { - ArrayList datanodes = new ArrayList<>(); - for (int i = 0; i < count; i++) { - datanodes.add(createRandomDatanodeAndRegister(nodeManager)); - } - return datanodes; - } - - /** - * Generates a random NodeReport. - * - * @return NodeReportProto - */ - public static NodeReportProto getRandomNodeReport() { - return getRandomNodeReport(1); - } - - /** - * Generates random NodeReport with the given number of storage report in it. - * - * @param numberOfStorageReport number of storage report this node report - * should have - * @return NodeReportProto - */ - public static NodeReportProto getRandomNodeReport(int numberOfStorageReport) { - UUID nodeId = UUID.randomUUID(); - return getRandomNodeReport(nodeId, File.separator + nodeId, - numberOfStorageReport); - } - - /** - * Generates random NodeReport for the given nodeId with the given - * base path and number of storage report in it. - * - * @param nodeId datanode id - * @param basePath base path of storage directory - * @param numberOfStorageReport number of storage report - * - * @return NodeReportProto - */ - public static NodeReportProto getRandomNodeReport(UUID nodeId, - String basePath, int numberOfStorageReport) { - List storageReports = new ArrayList<>(); - for (int i = 0; i < numberOfStorageReport; i++) { - storageReports.add(getRandomStorageReport(nodeId, - basePath + File.separator + i)); - } - return createNodeReport(storageReports); - } - - /** - * Creates NodeReport with the given storage reports. - * - * @param reports one or more storage report - * - * @return NodeReportProto - */ - public static NodeReportProto createNodeReport( - StorageReportProto... reports) { - return createNodeReport(Arrays.asList(reports)); - } - - /** - * Creates NodeReport with the given storage reports. - * - * @param reports storage reports to be included in the node report. - * - * @return NodeReportProto - */ - public static NodeReportProto createNodeReport( - List reports) { - NodeReportProto.Builder nodeReport = NodeReportProto.newBuilder(); - nodeReport.addAllStorageReport(reports); - return nodeReport.build(); - } - - /** - * Generates random storage report. - * - * @param nodeId datanode id for which the storage report belongs to - * @param path path of the storage - * - * @return StorageReportProto - */ - public static StorageReportProto getRandomStorageReport(UUID nodeId, - String path) { - return createStorageReport(nodeId, path, - random.nextInt(1000), - random.nextInt(500), - random.nextInt(500), - StorageTypeProto.DISK); - } - - /** - * Creates storage report with the given information. - * - * @param nodeId datanode id - * @param path storage dir - * @param capacity storage size - * @param used space used - * @param remaining space remaining - * @param type type of storage - * - * @return StorageReportProto - */ - public static StorageReportProto createStorageReport(UUID nodeId, String path, - long capacity, long used, long remaining, StorageTypeProto type) { - Preconditions.checkNotNull(nodeId); - Preconditions.checkNotNull(path); - StorageReportProto.Builder srb = StorageReportProto.newBuilder(); - srb.setStorageUuid(nodeId.toString()) - .setStorageLocation(path) - .setCapacity(capacity) - .setScmUsed(used) - .setRemaining(remaining); - StorageTypeProto storageTypeProto = - type == null ? StorageTypeProto.DISK : type; - srb.setStorageType(storageTypeProto); - return srb.build(); - } - - - /** - * Generates random container reports. - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getRandomContainerReports() { - return getRandomContainerReports(1); - } - - /** - * Generates random container report with the given number of containers. - * - * @param numberOfContainers number of containers to be in container report - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getRandomContainerReports( - int numberOfContainers) { - List containerInfos = new ArrayList<>(); - for (int i = 0; i < numberOfContainers; i++) { - containerInfos.add(getRandomContainerInfo(i)); - } - return getContainerReports(containerInfos); - } - - - public static PipelineReportsProto getRandomPipelineReports() { - return PipelineReportsProto.newBuilder().build(); - } - - public static PipelineReportFromDatanode getPipelineReportFromDatanode( - DatanodeDetails dn, PipelineID... pipelineIDs) { - PipelineReportsProto.Builder reportBuilder = - PipelineReportsProto.newBuilder(); - for (PipelineID pipelineID : pipelineIDs) { - reportBuilder.addPipelineReport( - PipelineReport.newBuilder().setPipelineID(pipelineID.getProtobuf())); - } - return new PipelineReportFromDatanode(dn, reportBuilder.build()); - } - - public static PipelineActionsFromDatanode getPipelineActionFromDatanode( - DatanodeDetails dn, PipelineID... pipelineIDs) { - PipelineActionsProto.Builder actionsProtoBuilder = - PipelineActionsProto.newBuilder(); - for (PipelineID pipelineID : pipelineIDs) { - ClosePipelineInfo closePipelineInfo = - ClosePipelineInfo.newBuilder().setPipelineID(pipelineID.getProtobuf()) - .setReason(ClosePipelineInfo.Reason.PIPELINE_FAILED) - .setDetailedReason("").build(); - actionsProtoBuilder.addPipelineActions(PipelineAction.newBuilder() - .setClosePipeline(closePipelineInfo) - .setAction(PipelineAction.Action.CLOSE) - .build()); - } - return new PipelineActionsFromDatanode(dn, actionsProtoBuilder.build()); - } - - /** - * Creates container report with the given ContainerInfo(s). - * - * @param containerInfos one or more ContainerInfo - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getContainerReports( - ContainerReplicaProto... containerInfos) { - return getContainerReports(Arrays.asList(containerInfos)); - } - - /** - * Creates container report with the given ContainerInfo(s). - * - * @param containerInfos list of ContainerInfo - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getContainerReports( - List containerInfos) { - ContainerReportsProto.Builder - reportsBuilder = ContainerReportsProto.newBuilder(); - for (ContainerReplicaProto containerInfo : containerInfos) { - reportsBuilder.addReports(containerInfo); - } - return reportsBuilder.build(); - } - - /** - * Generates random ContainerInfo. - * - * @param containerId container id of the ContainerInfo - * - * @return ContainerInfo - */ - public static ContainerReplicaProto getRandomContainerInfo( - long containerId) { - return createContainerInfo(containerId, - OzoneConsts.GB * 5, - random.nextLong(1000), - OzoneConsts.GB * random.nextInt(5), - random.nextLong(1000), - OzoneConsts.GB * random.nextInt(2), - random.nextLong(1000), - OzoneConsts.GB * random.nextInt(5)); - } - - /** - * Creates ContainerInfo with the given details. - * - * @param containerId id of the container - * @param size size of container - * @param keyCount number of keys - * @param bytesUsed bytes used by the container - * @param readCount number of reads - * @param readBytes bytes read - * @param writeCount number of writes - * @param writeBytes bytes written - * - * @return ContainerInfo - */ - @SuppressWarnings("parameternumber") - public static ContainerReplicaProto createContainerInfo( - long containerId, long size, long keyCount, long bytesUsed, - long readCount, long readBytes, long writeCount, long writeBytes) { - return ContainerReplicaProto.newBuilder() - .setContainerID(containerId) - .setState(ContainerReplicaProto.State.OPEN) - .setSize(size) - .setKeyCount(keyCount) - .setUsed(bytesUsed) - .setReadCount(readCount) - .setReadBytes(readBytes) - .setWriteCount(writeCount) - .setWriteBytes(writeBytes) - .build(); - } - - /** - * Create Command Status report object. - * @return CommandStatusReportsProto - */ - public static CommandStatusReportsProto createCommandStatusReport( - List reports) { - CommandStatusReportsProto.Builder report = CommandStatusReportsProto - .newBuilder(); - report.addAllCmdStatus(reports); - return report.build(); - } - - public static org.apache.hadoop.hdds.scm.container.ContainerInfo - allocateContainer(ContainerManager containerManager) - throws IOException { - return containerManager - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, "root"); - - } - - public static void closeContainer(ContainerManager containerManager, - ContainerID id) throws IOException { - containerManager.updateContainerState( - id, HddsProtos.LifeCycleEvent.FINALIZE); - containerManager.updateContainerState( - id, HddsProtos.LifeCycleEvent.CLOSE); - - } - - /** - * Move the container to Quaise close state. - * @param containerManager - * @param id - * @throws IOException - */ - public static void quasiCloseContainer(ContainerManager containerManager, - ContainerID id) throws IOException { - containerManager.updateContainerState( - id, HddsProtos.LifeCycleEvent.FINALIZE); - containerManager.updateContainerState( - id, HddsProtos.LifeCycleEvent.QUASI_CLOSE); - - } - - /** - * Construct and returns StorageContainerManager instance using the given - * configuration. The ports used by this StorageContainerManager are - * randomly selected from free ports available. - * - * @param conf OzoneConfiguration - * @return StorageContainerManager instance - * @throws IOException - * @throws AuthenticationException - */ - public static StorageContainerManager getScm(OzoneConfiguration conf) - throws IOException, AuthenticationException { - return getScm(conf, new SCMConfigurator()); - } - - /** - * Construct and returns StorageContainerManager instance using the given - * configuration and the configurator. The ports used by this - * StorageContainerManager are randomly selected from free ports available. - * - * @param conf OzoneConfiguration - * @param configurator SCMConfigurator - * @return StorageContainerManager instance - * @throws IOException - * @throws AuthenticationException - */ - public static StorageContainerManager getScm(OzoneConfiguration conf, - SCMConfigurator configurator) - throws IOException, AuthenticationException { - conf.setBoolean(OZONE_ENABLED, true); - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - if(scmStore.getState() != Storage.StorageState.INITIALIZED) { - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - scmStore.setClusterId(clusterId); - scmStore.setScmId(scmId); - // writes the version file properties - scmStore.initialize(); - } - return new StorageContainerManager(conf, configurator); - } - - public static ContainerInfo getContainer( - final HddsProtos.LifeCycleState state) { - return new ContainerInfo.Builder() - .setContainerID(RandomUtils.nextLong()) - .setReplicationType(HddsProtos.ReplicationType.RATIS) - .setReplicationFactor(HddsProtos.ReplicationFactor.THREE) - .setState(state) - .setSequenceId(10000L) - .setOwner("TEST") - .build(); - } - - public static Set getReplicas( - final ContainerID containerId, - final ContainerReplicaProto.State state, - final DatanodeDetails... datanodeDetails) { - return getReplicas(containerId, state, 10000L, datanodeDetails); - } - - public static Set getReplicas( - final ContainerID containerId, - final ContainerReplicaProto.State state, - final long sequenceId, - final DatanodeDetails... datanodeDetails) { - Set replicas = new HashSet<>(); - for (DatanodeDetails datanode : datanodeDetails) { - replicas.add(getReplicas(containerId, state, - sequenceId, datanode.getUuid(), datanode)); - } - return replicas; - } - - public static ContainerReplica getReplicas( - final ContainerID containerId, - final ContainerReplicaProto.State state, - final long sequenceId, - final UUID originNodeId, - final DatanodeDetails datanodeDetails) { - return ContainerReplica.newBuilder() - .setContainerID(containerId) - .setContainerState(state) - .setDatanodeDetails(datanodeDetails) - .setOriginNodeId(originNodeId) - .setSequenceId(sequenceId) - .build(); - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java deleted file mode 100644 index e5c4766697d..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ /dev/null @@ -1,327 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.block; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeoutException; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus; -import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.container.SCMContainerManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.server.SCMConfigurator; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; - -import static org.apache.hadoop.ozone.OzoneConsts.GB; -import static org.apache.hadoop.ozone.OzoneConsts.MB; - - -/** - * Tests for SCM Block Manager. - */ -public class TestBlockManager { - private StorageContainerManager scm; - private SCMContainerManager mapping; - private MockNodeManager nodeManager; - private PipelineManager pipelineManager; - private BlockManagerImpl blockManager; - private File testDir; - private final static long DEFAULT_BLOCK_SIZE = 128 * MB; - private static HddsProtos.ReplicationFactor factor; - private static HddsProtos.ReplicationType type; - private static String containerOwner = "OZONE"; - private static EventQueue eventQueue; - private int numContainerPerOwnerInPipeline; - private OzoneConfiguration conf; - private SafeModeStatus safeModeStatus = new SafeModeStatus(false); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @Rule - public TemporaryFolder folder= new TemporaryFolder(); - - @Before - public void setUp() throws Exception { - conf = SCMTestUtils.getConf(); - numContainerPerOwnerInPipeline = conf.getInt( - ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, - ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT); - - - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.newFolder().toString()); - - // Override the default Node Manager in SCM with this Mock Node Manager. - nodeManager = new MockNodeManager(true, 10); - SCMConfigurator configurator = new SCMConfigurator(); - configurator.setScmNodeManager(nodeManager); - scm = TestUtils.getScm(conf, configurator); - - // Initialize these fields so that the tests can pass. - mapping = (SCMContainerManager) scm.getContainerManager(); - pipelineManager = scm.getPipelineManager(); - blockManager = (BlockManagerImpl) scm.getScmBlockManager(); - - eventQueue = new EventQueue(); - eventQueue.addHandler(SCMEvents.SAFE_MODE_STATUS, - scm.getSafeModeHandler()); - eventQueue.addHandler(SCMEvents.SAFE_MODE_STATUS, - scm.getSafeModeHandler()); - CloseContainerEventHandler closeContainerHandler = - new CloseContainerEventHandler(pipelineManager, mapping); - eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler); - if(conf.getBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT)){ - factor = HddsProtos.ReplicationFactor.THREE; - type = HddsProtos.ReplicationType.RATIS; - } else { - factor = HddsProtos.ReplicationFactor.ONE; - type = HddsProtos.ReplicationType.STAND_ALONE; - } - } - - @After - public void cleanup() throws IOException { - scm.stop(); - } - - @Test - public void testAllocateBlock() throws Exception { - eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus); - GenericTestUtils.waitFor(() -> { - return !blockManager.isScmInSafeMode(); - }, 10, 1000 * 5); - AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, - type, factor, containerOwner, new ExcludeList()); - Assert.assertNotNull(block); - } - - @Test - public void testAllocateBlockInParallel() throws Exception { - eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus); - GenericTestUtils.waitFor(() -> { - return !blockManager.isScmInSafeMode(); - }, 10, 1000 * 5); - int threadCount = 20; - List executors = new ArrayList<>(threadCount); - for (int i = 0; i < threadCount; i++) { - executors.add(Executors.newSingleThreadExecutor()); - } - List> futureList = - new ArrayList<>(threadCount); - for (int i = 0; i < threadCount; i++) { - final CompletableFuture future = - new CompletableFuture<>(); - CompletableFuture.supplyAsync(() -> { - try { - future.complete(blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner, - new ExcludeList())); - } catch (IOException e) { - future.completeExceptionally(e); - } - return future; - }, executors.get(i)); - futureList.add(future); - } - try { - CompletableFuture - .allOf(futureList.toArray(new CompletableFuture[futureList.size()])) - .get(); - } catch (Exception e) { - Assert.fail("testAllocateBlockInParallel failed"); - } - } - - @Test - public void testAllocateOversizedBlock() throws Exception { - eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus); - GenericTestUtils.waitFor(() -> { - return !blockManager.isScmInSafeMode(); - }, 10, 1000 * 5); - long size = 6 * GB; - thrown.expectMessage("Unsupported block size"); - AllocatedBlock block = blockManager.allocateBlock(size, - type, factor, containerOwner, new ExcludeList()); - } - - - @Test - public void testAllocateBlockFailureInSafeMode() throws Exception { - eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, - new SafeModeStatus(true)); - GenericTestUtils.waitFor(() -> { - return blockManager.isScmInSafeMode(); - }, 10, 1000 * 5); - // Test1: In safe mode expect an SCMException. - thrown.expectMessage("SafeModePrecheck failed for " - + "allocateBlock"); - blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, - type, factor, containerOwner, new ExcludeList()); - } - - @Test - public void testAllocateBlockSucInSafeMode() throws Exception { - // Test2: Exit safe mode and then try allocateBock again. - eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus); - GenericTestUtils.waitFor(() -> { - return !blockManager.isScmInSafeMode(); - }, 10, 1000 * 5); - Assert.assertNotNull(blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, - type, factor, containerOwner, new ExcludeList())); - } - - @Test(timeout = 10000) - public void testMultipleBlockAllocation() - throws IOException, TimeoutException, InterruptedException { - eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus); - GenericTestUtils - .waitFor(() -> !blockManager.isScmInSafeMode(), 10, 1000 * 5); - - pipelineManager.createPipeline(type, factor); - pipelineManager.createPipeline(type, factor); - - AllocatedBlock allocatedBlock = blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner, - new ExcludeList()); - // block should be allocated in different pipelines - GenericTestUtils.waitFor(() -> { - try { - AllocatedBlock block = blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner, - new ExcludeList()); - return !block.getPipeline().getId() - .equals(allocatedBlock.getPipeline().getId()); - } catch (IOException e) { - } - return false; - }, 100, 1000); - } - - private boolean verifyNumberOfContainersInPipelines( - int numContainersPerPipeline) { - try { - for (Pipeline pipeline : pipelineManager.getPipelines(type, factor)) { - if (pipelineManager.getNumberOfContainers(pipeline.getId()) - != numContainersPerPipeline) { - return false; - } - } - } catch (IOException e) { - return false; - } - return true; - } - - @Test(timeout = 10000) - public void testMultipleBlockAllocationWithClosedContainer() - throws IOException, TimeoutException, InterruptedException { - eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus); - GenericTestUtils - .waitFor(() -> !blockManager.isScmInSafeMode(), 10, 1000 * 5); - - // create pipelines - for (int i = 0; - i < nodeManager.getNodes(HddsProtos.NodeState.HEALTHY).size(); i++) { - pipelineManager.createPipeline(type, factor); - } - - // wait till each pipeline has the configured number of containers. - // After this each pipeline has numContainerPerOwnerInPipeline containers - // for each owner - GenericTestUtils.waitFor(() -> { - try { - blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner, - new ExcludeList()); - } catch (IOException e) { - } - return verifyNumberOfContainersInPipelines( - numContainerPerOwnerInPipeline); - }, 10, 1000); - - // close all the containers in all the pipelines - for (Pipeline pipeline : pipelineManager.getPipelines(type, factor)) { - for (ContainerID cid : pipelineManager - .getContainersInPipeline(pipeline.getId())) { - eventQueue.fireEvent(SCMEvents.CLOSE_CONTAINER, cid); - } - } - // wait till no containers are left in the pipelines - GenericTestUtils - .waitFor(() -> verifyNumberOfContainersInPipelines(0), 10, 5000); - - // allocate block so that each pipeline has the configured number of - // containers. - GenericTestUtils.waitFor(() -> { - try { - blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner, - new ExcludeList()); - } catch (IOException e) { - } - return verifyNumberOfContainersInPipelines( - numContainerPerOwnerInPipeline); - }, 10, 1000); - } - - @Test(timeout = 10000) - public void testBlockAllocationWithNoAvailablePipelines() - throws IOException, TimeoutException, InterruptedException { - eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus); - GenericTestUtils - .waitFor(() -> !blockManager.isScmInSafeMode(), 10, 1000 * 5); - - for (Pipeline pipeline : pipelineManager.getPipelines()) { - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); - } - Assert.assertEquals(0, pipelineManager.getPipelines(type, factor).size()); - Assert.assertNotNull(blockManager - .allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner, - new ExcludeList())); - Assert.assertEquals(1, pipelineManager.getPipelines(type, factor).size()); - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java deleted file mode 100644 index 5982b4fb4c8..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ /dev/null @@ -1,437 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.block; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.apache.hadoop.hdds.scm.container.SCMContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto - .DeleteBlockTransactionResult; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_BLOCK_DELETION_MAX_RETRY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; -import static org.mockito.Matchers.anyObject; -import static org.mockito.Mockito.when; - -/** - * Tests for DeletedBlockLog. - */ -public class TestDeletedBlockLog { - - private static DeletedBlockLogImpl deletedBlockLog; - private OzoneConfiguration conf; - private File testDir; - private ContainerManager containerManager; - private StorageContainerManager scm; - private List dnList; - - @Before - public void setup() throws Exception { - testDir = GenericTestUtils.getTestDir( - TestDeletedBlockLog.class.getSimpleName()); - conf = new OzoneConfiguration(); - conf.set(OZONE_ENABLED, "true"); - conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - scm = TestUtils.getScm(conf); - containerManager = Mockito.mock(SCMContainerManager.class); - deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager, - scm.getScmMetadataStore()); - dnList = new ArrayList<>(3); - setupContainerManager(); - } - - private void setupContainerManager() throws IOException { - dnList.add( - DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString()) - .build()); - dnList.add( - DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString()) - .build()); - dnList.add( - DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString()) - .build()); - - final ContainerInfo container = - new ContainerInfo.Builder().setContainerID(1) - .setReplicationFactor(ReplicationFactor.THREE) - .setState(HddsProtos.LifeCycleState.CLOSED) - .build(); - final Set replicaSet = dnList.stream() - .map(datanodeDetails -> ContainerReplica.newBuilder() - .setContainerID(container.containerID()) - .setContainerState(ContainerReplicaProto.State.OPEN) - .setDatanodeDetails(datanodeDetails) - .build()) - .collect(Collectors.toSet()); - - when(containerManager.getContainerReplicas(anyObject())) - .thenReturn(replicaSet); - when(containerManager.getContainer(anyObject())) - .thenReturn(container); - } - - @After - public void tearDown() throws Exception { - deletedBlockLog.close(); - scm.stop(); - scm.join(); - FileUtils.deleteDirectory(testDir); - } - - private Map> generateData(int dataSize) { - Map> blockMap = new HashMap<>(); - Random random = new Random(1); - int continerIDBase = random.nextInt(100); - int localIDBase = random.nextInt(1000); - for (int i = 0; i < dataSize; i++) { - long containerID = continerIDBase + i; - List blocks = new ArrayList<>(); - int blockSize = random.nextInt(30) + 1; - for (int j = 0; j < blockSize; j++) { - long localID = localIDBase + j; - blocks.add(localID); - } - blockMap.put(containerID, blocks); - } - return blockMap; - } - - private void commitTransactions( - List transactionResults, - DatanodeDetails... dns) { - for (DatanodeDetails dnDetails : dns) { - deletedBlockLog - .commitTransactions(transactionResults, dnDetails.getUuid()); - } - } - - private void commitTransactions( - List transactionResults) { - commitTransactions(transactionResults, - dnList.toArray(new DatanodeDetails[3])); - } - - private void commitTransactions( - Collection deletedBlocksTransactions, - DatanodeDetails... dns) { - commitTransactions(deletedBlocksTransactions.stream() - .map(this::createDeleteBlockTransactionResult) - .collect(Collectors.toList()), dns); - } - - private void commitTransactions( - Collection deletedBlocksTransactions) { - commitTransactions(deletedBlocksTransactions.stream() - .map(this::createDeleteBlockTransactionResult) - .collect(Collectors.toList())); - } - - private DeleteBlockTransactionResult createDeleteBlockTransactionResult( - DeletedBlocksTransaction transaction) { - return DeleteBlockTransactionResult.newBuilder() - .setContainerID(transaction.getContainerID()).setSuccess(true) - .setTxID(transaction.getTxID()).build(); - } - - private List getTransactions( - int maximumAllowedTXNum) throws IOException { - DatanodeDeletedBlockTransactions transactions = - new DatanodeDeletedBlockTransactions(containerManager, - maximumAllowedTXNum, 3); - deletedBlockLog.getTransactions(transactions); - return transactions.getDatanodeTransactions(dnList.get(0).getUuid()); - } - - @Test - public void testIncrementCount() throws Exception { - int maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); - - // Create 30 TXs in the log. - for (Map.Entry> entry : generateData(30).entrySet()){ - deletedBlockLog.addTransaction(entry.getKey(), entry.getValue()); - } - - // This will return all TXs, total num 30. - List blocks = - getTransactions(40); - List txIDs = blocks.stream().map(DeletedBlocksTransaction::getTxID) - .collect(Collectors.toList()); - - for (int i = 0; i < maxRetry; i++) { - deletedBlockLog.incrementCount(txIDs); - } - - // Increment another time so it exceed the maxRetry. - // On this call, count will be set to -1 which means TX eventually fails. - deletedBlockLog.incrementCount(txIDs); - blocks = getTransactions(40); - for (DeletedBlocksTransaction block : blocks) { - Assert.assertEquals(-1, block.getCount()); - } - - // If all TXs are failed, getTransactions call will always return nothing. - blocks = getTransactions(40); - Assert.assertEquals(blocks.size(), 0); - } - - @Test - public void testCommitTransactions() throws Exception { - for (Map.Entry> entry : generateData(50).entrySet()){ - deletedBlockLog.addTransaction(entry.getKey(), entry.getValue()); - } - List blocks = - getTransactions(20); - // Add an invalid txn. - blocks.add( - DeletedBlocksTransaction.newBuilder().setContainerID(1).setTxID(70) - .setCount(0).addLocalID(0).build()); - commitTransactions(blocks); - blocks.remove(blocks.size() - 1); - - blocks = getTransactions(50); - Assert.assertEquals(30, blocks.size()); - commitTransactions(blocks, dnList.get(1), dnList.get(2), - DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString()) - .build()); - - blocks = getTransactions(50); - Assert.assertEquals(30, blocks.size()); - commitTransactions(blocks, dnList.get(0)); - - blocks = getTransactions(50); - Assert.assertEquals(0, blocks.size()); - } - - @Test - public void testRandomOperateTransactions() throws Exception { - Random random = new Random(); - int added = 0, committed = 0; - List blocks = new ArrayList<>(); - List txIDs = new ArrayList<>(); - byte[] latestTxid = DFSUtil.string2Bytes("#LATEST_TXID#"); - MetadataKeyFilters.MetadataKeyFilter avoidLatestTxid = - (preKey, currentKey, nextKey) -> - !Arrays.equals(latestTxid, currentKey); - // Randomly add/get/commit/increase transactions. - for (int i = 0; i < 100; i++) { - int state = random.nextInt(4); - if (state == 0) { - for (Map.Entry> entry : - generateData(10).entrySet()){ - deletedBlockLog.addTransaction(entry.getKey(), entry.getValue()); - } - added += 10; - } else if (state == 1) { - blocks = getTransactions(20); - txIDs = new ArrayList<>(); - for (DeletedBlocksTransaction block : blocks) { - txIDs.add(block.getTxID()); - } - deletedBlockLog.incrementCount(txIDs); - } else if (state == 2) { - commitTransactions(blocks); - committed += blocks.size(); - blocks = new ArrayList<>(); - } else { - // verify the number of added and committed. - try (TableIterator> iter = - scm.getScmMetadataStore().getDeletedBlocksTXTable().iterator()) { - AtomicInteger count = new AtomicInteger(); - iter.forEachRemaining((keyValue) -> count.incrementAndGet()); - Assert.assertEquals(added, count.get() + committed); - } - } - } - blocks = getTransactions(1000); - commitTransactions(blocks); - } - - @Test - public void testPersistence() throws Exception { - for (Map.Entry> entry : generateData(50).entrySet()){ - deletedBlockLog.addTransaction(entry.getKey(), entry.getValue()); - } - // close db and reopen it again to make sure - // transactions are stored persistently. - deletedBlockLog.close(); - deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager, - scm.getScmMetadataStore()); - List blocks = - getTransactions(10); - commitTransactions(blocks); - blocks = getTransactions(100); - Assert.assertEquals(40, blocks.size()); - commitTransactions(blocks); - } - - @Test - public void testDeletedBlockTransactions() throws IOException { - int txNum = 10; - int maximumAllowedTXNum = 5; - List blocks = null; - List containerIDs = new LinkedList<>(); - DatanodeDetails dnId1 = dnList.get(0), dnId2 = dnList.get(1); - - int count = 0; - long containerID = 0L; - - // Creates {TXNum} TX in the log. - for (Map.Entry> entry : generateData(txNum) - .entrySet()) { - count++; - containerID = entry.getKey(); - containerIDs.add(containerID); - deletedBlockLog.addTransaction(containerID, entry.getValue()); - - // make TX[1-6] for datanode1; TX[7-10] for datanode2 - if (count <= (maximumAllowedTXNum + 1)) { - mockContainerInfo(containerID, dnId1); - } else { - mockContainerInfo(containerID, dnId2); - } - } - - DatanodeDeletedBlockTransactions transactions = - new DatanodeDeletedBlockTransactions(containerManager, - maximumAllowedTXNum, 2); - deletedBlockLog.getTransactions(transactions); - - for (UUID id : transactions.getDatanodeIDs()) { - List txs = transactions - .getDatanodeTransactions(id); - // delete TX ID - commitTransactions(txs); - } - - blocks = getTransactions(txNum); - // There should be one block remained since dnID1 reaches - // the maximum value (5). - Assert.assertEquals(1, blocks.size()); - - Assert.assertFalse(transactions.isFull()); - // The number of TX in dnID1 won't more than maximum value. - Assert.assertEquals(maximumAllowedTXNum, - transactions.getDatanodeTransactions(dnId1.getUuid()).size()); - - int size = transactions.getDatanodeTransactions(dnId2.getUuid()).size(); - // add duplicated container in dnID2, this should be failed. - DeletedBlocksTransaction.Builder builder = - DeletedBlocksTransaction.newBuilder(); - builder.setTxID(11); - builder.setContainerID(containerID); - builder.setCount(0); - transactions.addTransaction(builder.build(), - null); - - // The number of TX in dnID2 should not be changed. - Assert.assertEquals(size, - transactions.getDatanodeTransactions(dnId2.getUuid()).size()); - - // Add new TX in dnID2, then dnID2 will reach maximum value. - containerID = RandomUtils.nextLong(); - builder = DeletedBlocksTransaction.newBuilder(); - builder.setTxID(12); - builder.setContainerID(containerID); - builder.setCount(0); - mockContainerInfo(containerID, dnId2); - transactions.addTransaction(builder.build(), - null); - // Since all node are full, then transactions is full. - Assert.assertTrue(transactions.isFull()); - } - - private void mockContainerInfo(long containerID, DatanodeDetails dd) - throws IOException { - List dns = Collections.singletonList(dd); - Pipeline pipeline = Pipeline.newBuilder() - .setType(ReplicationType.STAND_ALONE) - .setFactor(ReplicationFactor.ONE) - .setState(Pipeline.PipelineState.OPEN) - .setId(PipelineID.randomId()) - .setNodes(dns) - .build(); - - ContainerInfo.Builder builder = new ContainerInfo.Builder(); - builder.setPipelineID(pipeline.getId()) - .setReplicationType(pipeline.getType()) - .setReplicationFactor(pipeline.getFactor()); - - ContainerInfo containerInfo = builder.build(); - Mockito.doReturn(containerInfo).when(containerManager) - .getContainer(ContainerID.valueof(containerID)); - - final Set replicaSet = dns.stream() - .map(datanodeDetails -> ContainerReplica.newBuilder() - .setContainerID(containerInfo.containerID()) - .setContainerState(ContainerReplicaProto.State.OPEN) - .setDatanodeDetails(datanodeDetails) - .build()) - .collect(Collectors.toSet()); - when(containerManager.getContainerReplicas( - ContainerID.valueof(containerID))) - .thenReturn(replicaSet); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java deleted file mode 100644 index a67df6982aa..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Make checkstyle happy. - * */ -package org.apache.hadoop.hdds.scm.block; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java deleted file mode 100644 index 8877b2b5c4e..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.command; - -import org.apache.hadoop.hdds.HddsIdFactory; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatus; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .CommandStatusReportFromDatanode; - -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; - -/** - * Unit test for command status report handler. - */ -public class TestCommandStatusReportHandler implements EventPublisher { - - private static final Logger LOG = LoggerFactory - .getLogger(TestCommandStatusReportHandler.class); - private CommandStatusReportHandler cmdStatusReportHandler; - - @Before - public void setup() { - cmdStatusReportHandler = new CommandStatusReportHandler(); - } - - @Test - public void testCommandStatusReport() { - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(LOG); - - CommandStatusReportFromDatanode report = this.getStatusReport(Collections - .emptyList()); - cmdStatusReportHandler.onMessage(report, this); - assertFalse(logCapturer.getOutput().contains("Delete_Block_Status")); - assertFalse(logCapturer.getOutput().contains("Replicate_Command_Status")); - - report = this.getStatusReport(this.getCommandStatusList()); - cmdStatusReportHandler.onMessage(report, this); - assertTrue(logCapturer.getOutput().contains("firing event of type " + - "Delete_Block_Status")); - assertTrue(logCapturer.getOutput().contains("type: " + - "deleteBlocksCommand")); - - } - - private CommandStatusReportFromDatanode getStatusReport( - List reports) { - CommandStatusReportsProto report = TestUtils.createCommandStatusReport( - reports); - DatanodeDetails dn = TestUtils.randomDatanodeDetails(); - return new SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode( - dn, report); - } - - @Override - public > void - fireEvent(EVENT_TYPE event, PAYLOAD payload) { - LOG.info("firing event of type {}, payload {}", event.getName(), payload - .toString()); - } - - private List getCommandStatusList() { - List reports = new ArrayList<>(3); - - // Add status message for replication, close container and delete block - // command. - CommandStatus.Builder builder = CommandStatus.newBuilder(); - - builder.setCmdId(HddsIdFactory.getLongId()) - .setStatus(CommandStatus.Status.EXECUTED) - .setType(Type.deleteBlocksCommand); - reports.add(builder.build()); - - builder.setMsg("Not enough space") - .setCmdId(HddsIdFactory.getLongId()) - .setStatus(CommandStatus.Status.FAILED) - .setType(Type.replicateContainerCommand); - reports.add(builder.build()); - return reports; - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java deleted file mode 100644 index f529c20e74e..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle Happy. - */ -package org.apache.hadoop.hdds.scm.command; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java deleted file mode 100644 index 6f5d4356fb3..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ /dev/null @@ -1,584 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.net.NetConstants; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.net.Node; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap; -import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.assertj.core.util.Preconditions; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState - .HEALTHY; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; - -/** - * Test Helper for testing container Mapping. - */ -public class MockNodeManager implements NodeManager { - private final static NodeData[] NODES = { - new NodeData(10L * OzoneConsts.TB, OzoneConsts.GB), - new NodeData(64L * OzoneConsts.TB, 100 * OzoneConsts.GB), - new NodeData(128L * OzoneConsts.TB, 256 * OzoneConsts.GB), - new NodeData(40L * OzoneConsts.TB, OzoneConsts.TB), - new NodeData(256L * OzoneConsts.TB, 200 * OzoneConsts.TB), - new NodeData(20L * OzoneConsts.TB, 10 * OzoneConsts.GB), - new NodeData(32L * OzoneConsts.TB, 16 * OzoneConsts.TB), - new NodeData(OzoneConsts.TB, 900 * OzoneConsts.GB), - new NodeData(OzoneConsts.TB, 900 * OzoneConsts.GB, NodeData.STALE), - new NodeData(OzoneConsts.TB, 200L * OzoneConsts.GB, NodeData.STALE), - new NodeData(OzoneConsts.TB, 200L * OzoneConsts.GB, NodeData.DEAD) - }; - private final List healthyNodes; - private final List staleNodes; - private final List deadNodes; - private final Map nodeMetricMap; - private final SCMNodeStat aggregateStat; - private boolean safemode; - private final Map> commandMap; - private final Node2PipelineMap node2PipelineMap; - private final Node2ContainerMap node2ContainerMap; - private NetworkTopology clusterMap; - private ConcurrentHashMap> dnsToUuidMap; - - public MockNodeManager(boolean initializeFakeNodes, int nodeCount) { - this.healthyNodes = new LinkedList<>(); - this.staleNodes = new LinkedList<>(); - this.deadNodes = new LinkedList<>(); - this.nodeMetricMap = new HashMap<>(); - this.node2PipelineMap = new Node2PipelineMap(); - this.node2ContainerMap = new Node2ContainerMap(); - this.dnsToUuidMap = new ConcurrentHashMap(); - aggregateStat = new SCMNodeStat(); - if (initializeFakeNodes) { - for (int x = 0; x < nodeCount; x++) { - DatanodeDetails dd = TestUtils.randomDatanodeDetails(); - register(dd, null, null); - populateNodeMetric(dd, x); - } - } - safemode = false; - this.commandMap = new HashMap<>(); - } - - /** - * Invoked from ctor to create some node Metrics. - * - * @param datanodeDetails - Datanode details - */ - private void populateNodeMetric(DatanodeDetails datanodeDetails, int x) { - SCMNodeStat newStat = new SCMNodeStat(); - long remaining = - NODES[x % NODES.length].capacity - NODES[x % NODES.length].used; - newStat.set( - (NODES[x % NODES.length].capacity), - (NODES[x % NODES.length].used), remaining); - this.nodeMetricMap.put(datanodeDetails, newStat); - aggregateStat.add(newStat); - - if (NODES[x % NODES.length].getCurrentState() == NodeData.HEALTHY) { - healthyNodes.add(datanodeDetails); - } - - if (NODES[x % NODES.length].getCurrentState() == NodeData.STALE) { - staleNodes.add(datanodeDetails); - } - - if (NODES[x % NODES.length].getCurrentState() == NodeData.DEAD) { - deadNodes.add(datanodeDetails); - } - - } - - /** - * Sets the safe mode value. - * @param safemode boolean - */ - public void setSafemode(boolean safemode) { - this.safemode = safemode; - } - - /** - * Gets all Live Datanodes that is currently communicating with SCM. - * - * @param nodestate - State of the node - * @return List of Datanodes that are Heartbeating SCM. - */ - @Override - public List getNodes(HddsProtos.NodeState nodestate) { - if (nodestate == HEALTHY) { - return healthyNodes; - } - - if (nodestate == STALE) { - return staleNodes; - } - - if (nodestate == DEAD) { - return deadNodes; - } - - return null; - } - - /** - * Returns the Number of Datanodes that are communicating with SCM. - * - * @param nodestate - State of the node - * @return int -- count - */ - @Override - public int getNodeCount(HddsProtos.NodeState nodestate) { - List nodes = getNodes(nodestate); - if (nodes != null) { - return nodes.size(); - } - return 0; - } - - /** - * Get all datanodes known to SCM. - * - * @return List of DatanodeDetails known to SCM. - */ - @Override - public List getAllNodes() { - return new ArrayList<>(nodeMetricMap.keySet()); - } - - /** - * Returns the aggregated node stats. - * @return the aggregated node stats. - */ - @Override - public SCMNodeStat getStats() { - return aggregateStat; - } - - /** - * Return a map of nodes to their stats. - * @return a list of individual node stats (live/stale but not dead). - */ - @Override - public Map getNodeStats() { - return nodeMetricMap; - } - - /** - * Return the node stat of the specified datanode. - * @param datanodeDetails - datanode details. - * @return node stat if it is live/stale, null if it is decommissioned or - * doesn't exist. - */ - @Override - public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) { - SCMNodeStat stat = nodeMetricMap.get(datanodeDetails); - if (stat == null) { - return null; - } - return new SCMNodeMetric(stat); - } - - /** - * Returns the node state of a specific node. - * - * @param dd - DatanodeDetails - * @return Healthy/Stale/Dead. - */ - @Override - public HddsProtos.NodeState getNodeState(DatanodeDetails dd) { - return null; - } - - /** - * Get set of pipelines a datanode is part of. - * @param dnId - datanodeID - * @return Set of PipelineID - */ - @Override - public Set getPipelines(DatanodeDetails dnId) { - return node2PipelineMap.getPipelines(dnId.getUuid()); - } - - /** - * Add pipeline information in the NodeManager. - * @param pipeline - Pipeline to be added - */ - @Override - public void addPipeline(Pipeline pipeline) { - node2PipelineMap.addPipeline(pipeline); - } - - /** - * Remove a pipeline information from the NodeManager. - * @param pipeline - Pipeline to be removed - */ - @Override - public void removePipeline(Pipeline pipeline) { - node2PipelineMap.removePipeline(pipeline); - } - - @Override - public void addContainer(DatanodeDetails dd, - ContainerID containerId) - throws NodeNotFoundException { - try { - Set set = node2ContainerMap.getContainers(dd.getUuid()); - set.add(containerId); - node2ContainerMap.setContainersForDatanode(dd.getUuid(), set); - } catch (SCMException e) { - e.printStackTrace(); - } - } - - @Override - public void addDatanodeCommand(UUID dnId, SCMCommand command) { - if(commandMap.containsKey(dnId)) { - List commandList = commandMap.get(dnId); - Preconditions.checkNotNull(commandList); - commandList.add(command); - } else { - List commandList = new LinkedList<>(); - commandList.add(command); - commandMap.put(dnId, commandList); - } - } - - /** - * Empty implementation for processNodeReport. - * - * @param dnUuid - * @param nodeReport - */ - @Override - public void processNodeReport(DatanodeDetails dnUuid, - NodeReportProto nodeReport) { - // do nothing - } - - /** - * Update set of containers available on a datanode. - * @param uuid - DatanodeID - * @param containerIds - Set of containerIDs - * @throws SCMException - if datanode is not known. For new datanode use - * addDatanodeInContainerMap call. - */ - @Override - public void setContainers(DatanodeDetails uuid, Set containerIds) - throws NodeNotFoundException { - try { - node2ContainerMap.setContainersForDatanode(uuid.getUuid(), containerIds); - } catch (SCMException e) { - throw new NodeNotFoundException(e.getMessage()); - } - } - - /** - * Return set of containerIDs available on a datanode. - * @param uuid - DatanodeID - * @return - set of containerIDs - */ - @Override - public Set getContainers(DatanodeDetails uuid) { - return node2ContainerMap.getContainers(uuid.getUuid()); - } - - // Returns the number of commands that is queued to this node manager. - public int getCommandCount(DatanodeDetails dd) { - List list = commandMap.get(dd.getUuid()); - return (list == null) ? 0 : list.size(); - } - - public void clearCommandQueue(UUID dnId) { - if(commandMap.containsKey(dnId)) { - commandMap.put(dnId, new LinkedList<>()); - } - } - - /** - * Closes this stream and releases any system resources associated with it. If - * the stream is already closed then invoking this method has no effect. - *

- *

As noted in {@link AutoCloseable#close()}, cases where the close may - * fail require careful attention. It is strongly advised to relinquish the - * underlying resources and to internally mark the {@code Closeable} - * as closed, prior to throwing the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - - } - - /** - * Gets the version info from SCM. - * - * @param versionRequest - version Request. - * @return - returns SCM version info and other required information needed by - * datanode. - */ - @Override - public VersionResponse getVersion(SCMVersionRequestProto versionRequest) { - return null; - } - - /** - * Register the node if the node finds that it is not registered with any - * SCM. - * - * @param datanodeDetails DatanodeDetails - * @param nodeReport NodeReportProto - * @return SCMHeartbeatResponseProto - */ - @Override - public RegisteredCommand register(DatanodeDetails datanodeDetails, - NodeReportProto nodeReport, PipelineReportsProto pipelineReportsProto) { - try { - node2ContainerMap.insertNewDatanode(datanodeDetails.getUuid(), - Collections.emptySet()); - addEntryTodnsToUuidMap(datanodeDetails.getIpAddress(), - datanodeDetails.getUuidString()); - if (clusterMap != null) { - datanodeDetails.setNetworkName(datanodeDetails.getUuidString()); - clusterMap.add(datanodeDetails); - } - } catch (SCMException e) { - e.printStackTrace(); - } - return null; - } - - /** - * Add an entry to the dnsToUuidMap, which maps hostname / IP to the DNs - * running on that host. As each address can have many DNs running on it, - * this is a one to many mapping. - * @param dnsName String representing the hostname or IP of the node - * @param uuid String representing the UUID of the registered node. - */ - private synchronized void addEntryTodnsToUuidMap( - String dnsName, String uuid) { - Set dnList = dnsToUuidMap.get(dnsName); - if (dnList == null) { - dnList = ConcurrentHashMap.newKeySet(); - dnsToUuidMap.put(dnsName, dnList); - } - dnList.add(uuid); - } - - /** - * Send heartbeat to indicate the datanode is alive and doing well. - * - * @param datanodeDetails - Datanode ID. - * @return SCMheartbeat response list - */ - @Override - public List processHeartbeat(DatanodeDetails datanodeDetails) { - return null; - } - - @Override - public Boolean isNodeRegistered( - DatanodeDetails datanodeDetails) { - return null; - } - - @Override - public Map getNodeCount() { - Map nodeCountMap = new HashMap(); - for (HddsProtos.NodeState state : HddsProtos.NodeState.values()) { - nodeCountMap.put(state.toString(), getNodeCount(state)); - } - return nodeCountMap; - } - - @Override - public Map getNodeInfo() { - Map nodeInfo = new HashMap<>(); - nodeInfo.put("Capacity", aggregateStat.getCapacity().get()); - nodeInfo.put("Used", aggregateStat.getScmUsed().get()); - nodeInfo.put("Remaining", aggregateStat.getRemaining().get()); - return nodeInfo; - } - - /** - * Makes it easy to add a container. - * - * @param datanodeDetails datanode details - * @param size number of bytes. - */ - public void addContainer(DatanodeDetails datanodeDetails, long size) { - SCMNodeStat stat = this.nodeMetricMap.get(datanodeDetails); - if (stat != null) { - aggregateStat.subtract(stat); - stat.getCapacity().add(size); - aggregateStat.add(stat); - nodeMetricMap.put(datanodeDetails, stat); - } - } - - /** - * Makes it easy to simulate a delete of a container. - * - * @param datanodeDetails datanode Details - * @param size number of bytes. - */ - public void delContainer(DatanodeDetails datanodeDetails, long size) { - SCMNodeStat stat = this.nodeMetricMap.get(datanodeDetails); - if (stat != null) { - aggregateStat.subtract(stat); - stat.getCapacity().subtract(size); - aggregateStat.add(stat); - nodeMetricMap.put(datanodeDetails, stat); - } - } - - @Override - public void onMessage(CommandForDatanode commandForDatanode, - EventPublisher publisher) { - addDatanodeCommand(commandForDatanode.getDatanodeId(), - commandForDatanode.getCommand()); - } - - @Override - public List getCommandQueue(UUID dnID) { - return null; - } - - @Override - public DatanodeDetails getNodeByUuid(String uuid) { - Node node = clusterMap.getNode(NetConstants.DEFAULT_RACK + "/" + uuid); - return node == null ? null : (DatanodeDetails)node; - } - - @Override - public List getNodesByAddress(String address) { - List results = new LinkedList<>(); - Set uuids = dnsToUuidMap.get(address); - if (uuids == null) { - return results; - } - for(String uuid : uuids) { - DatanodeDetails dn = getNodeByUuid(uuid); - if (dn != null) { - results.add(dn); - } - } - return results; - } - - public void setNetworkTopology(NetworkTopology topology) { - this.clusterMap = topology; - } - - /** - * A class to declare some values for the nodes so that our tests - * won't fail. - */ - private static class NodeData { - public static final long HEALTHY = 1; - public static final long STALE = 2; - public static final long DEAD = 3; - - private long capacity; - private long used; - - private long currentState; - - /** - * By default nodes are healthy. - * @param capacity - * @param used - */ - NodeData(long capacity, long used) { - this(capacity, used, HEALTHY); - } - - /** - * Constructs a nodeDefinition. - * - * @param capacity capacity. - * @param used used. - * @param currentState - Healthy, Stale and DEAD nodes. - */ - NodeData(long capacity, long used, long currentState) { - this.capacity = capacity; - this.used = used; - this.currentState = currentState; - } - - public long getCapacity() { - return capacity; - } - - public void setCapacity(long capacity) { - this.capacity = capacity; - } - - public long getUsed() { - return used; - } - - public void setUsed(long used) { - this.used = used; - } - - public long getCurrentState() { - return currentState; - } - - public void setCurrentState(long currentState) { - this.currentState = currentState; - } - - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java deleted file mode 100644 index a8364a43556..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java +++ /dev/null @@ -1,171 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; -import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND; - -/** - * Tests the closeContainerEventHandler class. - */ -public class TestCloseContainerEventHandler { - - private static Configuration configuration; - private static MockNodeManager nodeManager; - private static SCMPipelineManager pipelineManager; - private static SCMContainerManager containerManager; - private static long size; - private static File testDir; - private static EventQueue eventQueue; - - @BeforeClass - public static void setUp() throws Exception { - configuration = SCMTestUtils.getConf(); - size = (long)configuration.getStorageSize(OZONE_SCM_CONTAINER_SIZE, - OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); - testDir = GenericTestUtils - .getTestDir(TestCloseContainerEventHandler.class.getSimpleName()); - configuration - .set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - nodeManager = new MockNodeManager(true, 10); - pipelineManager = - new SCMPipelineManager(configuration, nodeManager, eventQueue, null); - PipelineProvider mockRatisProvider = - new MockRatisPipelineProvider(nodeManager, - pipelineManager.getStateManager(), configuration); - pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, - mockRatisProvider); - containerManager = new - SCMContainerManager(configuration, nodeManager, - pipelineManager, new EventQueue()); - eventQueue = new EventQueue(); - eventQueue.addHandler(CLOSE_CONTAINER, - new CloseContainerEventHandler(pipelineManager, containerManager)); - eventQueue.addHandler(DATANODE_COMMAND, nodeManager); - } - - @AfterClass - public static void tearDown() throws Exception { - if (containerManager != null) { - containerManager.close(); - } - FileUtil.fullyDelete(testDir); - } - - @Test - public void testIfCloseContainerEventHadnlerInvoked() { - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(CloseContainerEventHandler.LOG); - eventQueue.fireEvent(CLOSE_CONTAINER, - new ContainerID(Math.abs(RandomUtils.nextInt()))); - eventQueue.processAll(1000); - Assert.assertTrue(logCapturer.getOutput() - .contains("Close container Event triggered for container")); - } - - @Test - public void testCloseContainerEventWithInvalidContainer() { - long id = Math.abs(RandomUtils.nextInt()); - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(CloseContainerEventHandler.LOG); - eventQueue.fireEvent(CLOSE_CONTAINER, - new ContainerID(id)); - eventQueue.processAll(1000); - Assert.assertTrue(logCapturer.getOutput() - .contains("Failed to close the container")); - } - - @Test - public void testCloseContainerEventWithValidContainers() throws IOException { - - ContainerInfo container = containerManager - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, "ozone"); - ContainerID id = container.containerID(); - DatanodeDetails datanode = pipelineManager - .getPipeline(container.getPipelineID()).getFirstNode(); - int closeCount = nodeManager.getCommandCount(datanode); - eventQueue.fireEvent(CLOSE_CONTAINER, id); - eventQueue.processAll(1000); - Assert.assertEquals(closeCount + 1, - nodeManager.getCommandCount(datanode)); - Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING, - containerManager.getContainer(id).getState()); - } - - @Test - public void testCloseContainerEventWithRatis() throws IOException { - - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(CloseContainerEventHandler.LOG); - ContainerInfo container = containerManager - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, "ozone"); - ContainerID id = container.containerID(); - int[] closeCount = new int[3]; - eventQueue.fireEvent(CLOSE_CONTAINER, id); - eventQueue.processAll(1000); - int i = 0; - for (DatanodeDetails details : pipelineManager - .getPipeline(container.getPipelineID()).getNodes()) { - closeCount[i] = nodeManager.getCommandCount(details); - i++; - } - i = 0; - for (DatanodeDetails details : pipelineManager - .getPipeline(container.getPipelineID()).getNodes()) { - Assert.assertEquals(closeCount[i], nodeManager.getCommandCount(details)); - i++; - } - eventQueue.fireEvent(CLOSE_CONTAINER, id); - eventQueue.processAll(1000); - i = 0; - // Make sure close is queued for each datanode on the pipeline - for (DatanodeDetails details : pipelineManager - .getPipeline(container.getPipelineID()).getNodes()) { - Assert.assertEquals(closeCount[i] + 1, - nodeManager.getCommandCount(details)); - Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING, - containerManager.getContainer(id).getState()); - i++; - } - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java deleted file mode 100644 index 09daa596aa7..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerActionsProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerActionsFromDatanode; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.junit.Test; -import org.mockito.Mockito; - -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -/** - * Tests ContainerActionsHandler. - */ -public class TestContainerActionsHandler { - - @Test - public void testCloseContainerAction() { - EventQueue queue = new EventQueue(); - ContainerActionsHandler actionsHandler = new ContainerActionsHandler(); - CloseContainerEventHandler closeContainerEventHandler = Mockito.mock( - CloseContainerEventHandler.class); - queue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerEventHandler); - queue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler); - - ContainerAction action = ContainerAction.newBuilder() - .setContainerID(1L) - .setAction(ContainerAction.Action.CLOSE) - .setReason(ContainerAction.Reason.CONTAINER_FULL) - .build(); - - ContainerActionsProto cap = ContainerActionsProto.newBuilder() - .addContainerActions(action) - .build(); - - ContainerActionsFromDatanode containerActions = - new ContainerActionsFromDatanode( - TestUtils.randomDatanodeDetails(), cap); - - queue.fireEvent(SCMEvents.CONTAINER_ACTIONS, containerActions); - queue.processAll(1000L); - verify(closeContainerEventHandler, times(1)) - .onMessage(ContainerID.valueof(1L), queue); - - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java deleted file mode 100644 index 41585bc8f7d..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java +++ /dev/null @@ -1,510 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.scm.server - .SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.IOException; -import java.util.Iterator; -import java.util.Set; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static org.apache.hadoop.hdds.scm.TestUtils.getReplicas; -import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; - -/** - * Test the behaviour of the ContainerReportHandler. - */ -public class TestContainerReportHandler { - - private NodeManager nodeManager; - private ContainerManager containerManager; - private ContainerStateManager containerStateManager; - private EventPublisher publisher; - - @Before - public void setup() throws IOException { - final Configuration conf = new OzoneConfiguration(); - this.nodeManager = new MockNodeManager(true, 10); - this.containerManager = Mockito.mock(ContainerManager.class); - this.containerStateManager = new ContainerStateManager(conf); - this.publisher = Mockito.mock(EventPublisher.class); - - - Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class))) - .thenAnswer(invocation -> containerStateManager - .getContainer((ContainerID)invocation.getArguments()[0])); - - Mockito.when(containerManager.getContainerReplicas( - Mockito.any(ContainerID.class))) - .thenAnswer(invocation -> containerStateManager - .getContainerReplicas((ContainerID)invocation.getArguments()[0])); - - Mockito.doAnswer(invocation -> { - containerStateManager - .updateContainerState((ContainerID)invocation.getArguments()[0], - (HddsProtos.LifeCycleEvent)invocation.getArguments()[1]); - return null; - }).when(containerManager).updateContainerState( - Mockito.any(ContainerID.class), - Mockito.any(HddsProtos.LifeCycleEvent.class)); - - Mockito.doAnswer(invocation -> { - containerStateManager.updateContainerReplica( - (ContainerID) invocation.getArguments()[0], - (ContainerReplica) invocation.getArguments()[1]); - return null; - }).when(containerManager).updateContainerReplica( - Mockito.any(ContainerID.class), Mockito.any(ContainerReplica.class)); - - Mockito.doAnswer(invocation -> { - containerStateManager.removeContainerReplica( - (ContainerID) invocation.getArguments()[0], - (ContainerReplica) invocation.getArguments()[1]); - return null; - }).when(containerManager).removeContainerReplica( - Mockito.any(ContainerID.class), Mockito.any(ContainerReplica.class)); - - } - - @After - public void tearDown() throws IOException { - containerStateManager.close(); - } - - @Test - public void testUnderReplicatedContainer() - throws NodeNotFoundException, ContainerNotFoundException, SCMException { - - final ContainerReportHandler reportHandler = new ContainerReportHandler( - nodeManager, containerManager); - final Iterator nodeIterator = nodeManager.getNodes( - NodeState.HEALTHY).iterator(); - final DatanodeDetails datanodeOne = nodeIterator.next(); - final DatanodeDetails datanodeTwo = nodeIterator.next(); - final DatanodeDetails datanodeThree = nodeIterator.next(); - - final ContainerInfo containerOne = getContainer(LifeCycleState.CLOSED); - final ContainerInfo containerTwo = getContainer(LifeCycleState.CLOSED); - final Set containerIDSet = Stream.of( - containerOne.containerID(), containerTwo.containerID()) - .collect(Collectors.toSet()); - - nodeManager.setContainers(datanodeOne, containerIDSet); - nodeManager.setContainers(datanodeTwo, containerIDSet); - nodeManager.setContainers(datanodeThree, containerIDSet); - - containerStateManager.loadContainer(containerOne); - containerStateManager.loadContainer(containerTwo); - - getReplicas(containerOne.containerID(), - ContainerReplicaProto.State.CLOSED, - datanodeOne, datanodeTwo, datanodeThree) - .forEach(r -> { - try { - containerStateManager.updateContainerReplica( - containerOne.containerID(), r); - } catch (ContainerNotFoundException ignored) { - - } - }); - - getReplicas(containerTwo.containerID(), - ContainerReplicaProto.State.CLOSED, - datanodeOne, datanodeTwo, datanodeThree) - .forEach(r -> { - try { - containerStateManager.updateContainerReplica( - containerTwo.containerID(), r); - } catch (ContainerNotFoundException ignored) { - - } - }); - - - // SCM expects both containerOne and containerTwo to be in all the three - // datanodes datanodeOne, datanodeTwo and datanodeThree - - // Now datanodeOne is sending container report in which containerOne is - // missing. - - // containerOne becomes under replicated. - final ContainerReportsProto containerReport = getContainerReportsProto( - containerTwo.containerID(), ContainerReplicaProto.State.CLOSED, - datanodeOne.getUuidString()); - final ContainerReportFromDatanode containerReportFromDatanode = - new ContainerReportFromDatanode(datanodeOne, containerReport); - reportHandler.onMessage(containerReportFromDatanode, publisher); - Assert.assertEquals(2, containerManager.getContainerReplicas( - containerOne.containerID()).size()); - - } - - @Test - public void testOverReplicatedContainer() throws NodeNotFoundException, - SCMException, ContainerNotFoundException { - - final ContainerReportHandler reportHandler = new ContainerReportHandler( - nodeManager, containerManager); - - final Iterator nodeIterator = nodeManager.getNodes( - NodeState.HEALTHY).iterator(); - final DatanodeDetails datanodeOne = nodeIterator.next(); - final DatanodeDetails datanodeTwo = nodeIterator.next(); - final DatanodeDetails datanodeThree = nodeIterator.next(); - final DatanodeDetails datanodeFour = nodeIterator.next(); - - final ContainerInfo containerOne = getContainer(LifeCycleState.CLOSED); - final ContainerInfo containerTwo = getContainer(LifeCycleState.CLOSED); - - final Set containerIDSet = Stream.of( - containerOne.containerID(), containerTwo.containerID()) - .collect(Collectors.toSet()); - - nodeManager.setContainers(datanodeOne, containerIDSet); - nodeManager.setContainers(datanodeTwo, containerIDSet); - nodeManager.setContainers(datanodeThree, containerIDSet); - - containerStateManager.loadContainer(containerOne); - containerStateManager.loadContainer(containerTwo); - - getReplicas(containerOne.containerID(), - ContainerReplicaProto.State.CLOSED, - datanodeOne, datanodeTwo, datanodeThree) - .forEach(r -> { - try { - containerStateManager.updateContainerReplica( - containerOne.containerID(), r); - } catch (ContainerNotFoundException ignored) { - - } - }); - - getReplicas(containerTwo.containerID(), - ContainerReplicaProto.State.CLOSED, - datanodeOne, datanodeTwo, datanodeThree) - .forEach(r -> { - try { - containerStateManager.updateContainerReplica( - containerTwo.containerID(), r); - } catch (ContainerNotFoundException ignored) { - - } - }); - - - // SCM expects both containerOne and containerTwo to be in all the three - // datanodes datanodeOne, datanodeTwo and datanodeThree - - // Now datanodeFour is sending container report which has containerOne. - - // containerOne becomes over replicated. - - final ContainerReportsProto containerReport = getContainerReportsProto( - containerOne.containerID(), ContainerReplicaProto.State.CLOSED, - datanodeFour.getUuidString()); - final ContainerReportFromDatanode containerReportFromDatanode = - new ContainerReportFromDatanode(datanodeFour, containerReport); - reportHandler.onMessage(containerReportFromDatanode, publisher); - - Assert.assertEquals(4, containerManager.getContainerReplicas( - containerOne.containerID()).size()); - } - - - @Test - public void testClosingToClosed() throws NodeNotFoundException, IOException { - /* - * The container is in CLOSING state and all the replicas are in - * OPEN/CLOSING state. - * - * The datanode reports that one of the replica is now CLOSED. - * - * In this case SCM should mark the container as CLOSED. - */ - - final ContainerReportHandler reportHandler = new ContainerReportHandler( - nodeManager, containerManager); - - final Iterator nodeIterator = nodeManager.getNodes( - NodeState.HEALTHY).iterator(); - final DatanodeDetails datanodeOne = nodeIterator.next(); - final DatanodeDetails datanodeTwo = nodeIterator.next(); - final DatanodeDetails datanodeThree = nodeIterator.next(); - - final ContainerInfo containerOne = getContainer(LifeCycleState.CLOSING); - final ContainerInfo containerTwo = getContainer(LifeCycleState.CLOSED); - - final Set containerIDSet = Stream.of( - containerOne.containerID(), containerTwo.containerID()) - .collect(Collectors.toSet()); - - final Set containerOneReplicas = getReplicas( - containerOne.containerID(), - ContainerReplicaProto.State.CLOSING, - datanodeOne); - - containerOneReplicas.addAll(getReplicas( - containerOne.containerID(), - ContainerReplicaProto.State.OPEN, - datanodeTwo, datanodeThree)); - - final Set containerTwoReplicas = getReplicas( - containerTwo.containerID(), - ContainerReplicaProto.State.CLOSED, - datanodeOne, datanodeTwo, datanodeThree); - - nodeManager.setContainers(datanodeOne, containerIDSet); - nodeManager.setContainers(datanodeTwo, containerIDSet); - nodeManager.setContainers(datanodeThree, containerIDSet); - - containerStateManager.loadContainer(containerOne); - containerStateManager.loadContainer(containerTwo); - - containerOneReplicas.forEach(r -> { - try { - containerStateManager.updateContainerReplica( - containerTwo.containerID(), r); - } catch (ContainerNotFoundException ignored) { - - } - }); - - containerTwoReplicas.forEach(r -> { - try { - containerStateManager.updateContainerReplica( - containerTwo.containerID(), r); - } catch (ContainerNotFoundException ignored) { - - } - }); - - - final ContainerReportsProto containerReport = getContainerReportsProto( - containerOne.containerID(), ContainerReplicaProto.State.CLOSED, - datanodeOne.getUuidString()); - final ContainerReportFromDatanode containerReportFromDatanode = - new ContainerReportFromDatanode(datanodeOne, containerReport); - reportHandler.onMessage(containerReportFromDatanode, publisher); - - Assert.assertEquals(LifeCycleState.CLOSED, containerOne.getState()); - } - - @Test - public void testClosingToQuasiClosed() - throws NodeNotFoundException, IOException { - /* - * The container is in CLOSING state and all the replicas are in - * OPEN/CLOSING state. - * - * The datanode reports that the replica is now QUASI_CLOSED. - * - * In this case SCM should move the container to QUASI_CLOSED. - */ - - final ContainerReportHandler reportHandler = new ContainerReportHandler( - nodeManager, containerManager); - - final Iterator nodeIterator = nodeManager.getNodes( - NodeState.HEALTHY).iterator(); - final DatanodeDetails datanodeOne = nodeIterator.next(); - final DatanodeDetails datanodeTwo = nodeIterator.next(); - final DatanodeDetails datanodeThree = nodeIterator.next(); - - final ContainerInfo containerOne = getContainer(LifeCycleState.CLOSING); - final ContainerInfo containerTwo = getContainer(LifeCycleState.CLOSED); - - final Set containerIDSet = Stream.of( - containerOne.containerID(), containerTwo.containerID()) - .collect(Collectors.toSet()); - - final Set containerOneReplicas = getReplicas( - containerOne.containerID(), - ContainerReplicaProto.State.CLOSING, - datanodeOne, datanodeTwo); - containerOneReplicas.addAll(getReplicas( - containerOne.containerID(), - ContainerReplicaProto.State.OPEN, - datanodeThree)); - final Set containerTwoReplicas = getReplicas( - containerTwo.containerID(), - ContainerReplicaProto.State.CLOSED, - datanodeOne, datanodeTwo, datanodeThree); - - nodeManager.setContainers(datanodeOne, containerIDSet); - nodeManager.setContainers(datanodeTwo, containerIDSet); - nodeManager.setContainers(datanodeThree, containerIDSet); - - containerStateManager.loadContainer(containerOne); - containerStateManager.loadContainer(containerTwo); - - containerOneReplicas.forEach(r -> { - try { - containerStateManager.updateContainerReplica( - containerTwo.containerID(), r); - } catch (ContainerNotFoundException ignored) { - - } - }); - - containerTwoReplicas.forEach(r -> { - try { - containerStateManager.updateContainerReplica( - containerTwo.containerID(), r); - } catch (ContainerNotFoundException ignored) { - - } - }); - - - final ContainerReportsProto containerReport = getContainerReportsProto( - containerOne.containerID(), ContainerReplicaProto.State.QUASI_CLOSED, - datanodeOne.getUuidString()); - final ContainerReportFromDatanode containerReportFromDatanode = - new ContainerReportFromDatanode(datanodeOne, containerReport); - reportHandler.onMessage(containerReportFromDatanode, publisher); - - Assert.assertEquals(LifeCycleState.QUASI_CLOSED, containerOne.getState()); - } - - @Test - public void testQuasiClosedToClosed() - throws NodeNotFoundException, IOException { - /* - * The container is in QUASI_CLOSED state. - * - One of the replica is in QUASI_CLOSED state - * - The other two replica are in OPEN/CLOSING state - * - * The datanode reports the second replica is now CLOSED. - * - * In this case SCM should CLOSE the container. - */ - - final ContainerReportHandler reportHandler = new ContainerReportHandler( - nodeManager, containerManager); - final Iterator nodeIterator = nodeManager.getNodes( - NodeState.HEALTHY).iterator(); - - final DatanodeDetails datanodeOne = nodeIterator.next(); - final DatanodeDetails datanodeTwo = nodeIterator.next(); - final DatanodeDetails datanodeThree = nodeIterator.next(); - - final ContainerInfo containerOne = - getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerInfo containerTwo = - getContainer(LifeCycleState.CLOSED); - - final Set containerIDSet = Stream.of( - containerOne.containerID(), containerTwo.containerID()) - .collect(Collectors.toSet()); - final Set containerOneReplicas = getReplicas( - containerOne.containerID(), - ContainerReplicaProto.State.QUASI_CLOSED, - 10000L, - datanodeOne); - containerOneReplicas.addAll(getReplicas( - containerOne.containerID(), - ContainerReplicaProto.State.CLOSING, - datanodeTwo, datanodeThree)); - final Set containerTwoReplicas = getReplicas( - containerTwo.containerID(), - ContainerReplicaProto.State.CLOSED, - datanodeOne, datanodeTwo, datanodeThree); - - nodeManager.setContainers(datanodeOne, containerIDSet); - nodeManager.setContainers(datanodeTwo, containerIDSet); - nodeManager.setContainers(datanodeThree, containerIDSet); - - containerStateManager.loadContainer(containerOne); - containerStateManager.loadContainer(containerTwo); - - containerOneReplicas.forEach(r -> { - try { - containerStateManager.updateContainerReplica( - containerTwo.containerID(), r); - } catch (ContainerNotFoundException ignored) { - - } - }); - - containerTwoReplicas.forEach(r -> { - try { - containerStateManager.updateContainerReplica( - containerTwo.containerID(), r); - } catch (ContainerNotFoundException ignored) { - - } - }); - - - final ContainerReportsProto containerReport = getContainerReportsProto( - containerOne.containerID(), ContainerReplicaProto.State.CLOSED, - datanodeOne.getUuidString()); - - final ContainerReportFromDatanode containerReportFromDatanode = - new ContainerReportFromDatanode(datanodeOne, containerReport); - reportHandler.onMessage(containerReportFromDatanode, publisher); - - Assert.assertEquals(LifeCycleState.CLOSED, containerOne.getState()); - } - - private static ContainerReportsProto getContainerReportsProto( - final ContainerID containerId, final ContainerReplicaProto.State state, - final String originNodeId) { - final ContainerReportsProto.Builder crBuilder = - ContainerReportsProto.newBuilder(); - final ContainerReplicaProto replicaProto = - ContainerReplicaProto.newBuilder() - .setContainerID(containerId.getId()) - .setState(state) - .setOriginNodeId(originNodeId) - .setFinalhash("e16cc9d6024365750ed8dbd194ea46d2") - .setSize(5368709120L) - .setUsed(2000000000L) - .setKeyCount(100000000L) - .setReadCount(100000000L) - .setWriteCount(100000000L) - .setReadBytes(2000000000L) - .setWriteBytes(2000000000L) - .setBlockCommitSequenceId(10000L) - .setDeleteTransactionId(0) - .build(); - return crBuilder.addReports(replicaProto).build(); - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java deleted file mode 100644 index 5c4617c9e42..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Set; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.TestUtils; - -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import static org.mockito.Mockito.when; - -/** - * Testing ContainerStatemanager. - */ -public class TestContainerStateManager { - - private ContainerStateManager containerStateManager; - - @Before - public void init() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - containerStateManager = new ContainerStateManager(conf); - - } - - @Test - public void checkReplicationStateOK() throws IOException { - //GIVEN - ContainerInfo c1 = allocateContainer(); - - DatanodeDetails d1 = TestUtils.randomDatanodeDetails(); - DatanodeDetails d2 = TestUtils.randomDatanodeDetails(); - DatanodeDetails d3 = TestUtils.randomDatanodeDetails(); - - addReplica(c1, d1); - addReplica(c1, d2); - addReplica(c1, d3); - - //WHEN - Set replicas = containerStateManager - .getContainerReplicas(c1.containerID()); - - //THEN - Assert.assertEquals(3, replicas.size()); - } - - @Test - public void checkReplicationStateMissingReplica() throws IOException { - //GIVEN - - ContainerInfo c1 = allocateContainer(); - - DatanodeDetails d1 = TestUtils.randomDatanodeDetails(); - DatanodeDetails d2 = TestUtils.randomDatanodeDetails(); - - addReplica(c1, d1); - addReplica(c1, d2); - - //WHEN - Set replicas = containerStateManager - .getContainerReplicas(c1.containerID()); - - Assert.assertEquals(2, replicas.size()); - Assert.assertEquals(3, c1.getReplicationFactor().getNumber()); - } - - private void addReplica(ContainerInfo cont, DatanodeDetails node) - throws ContainerNotFoundException { - ContainerReplica replica = ContainerReplica.newBuilder() - .setContainerID(cont.containerID()) - .setContainerState(ContainerReplicaProto.State.CLOSED) - .setDatanodeDetails(node) - .build(); - containerStateManager - .updateContainerReplica(cont.containerID(), replica); - } - - private ContainerInfo allocateContainer() throws IOException { - - PipelineManager pipelineManager = Mockito.mock(SCMPipelineManager.class); - - Pipeline pipeline = - Pipeline.newBuilder().setState(Pipeline.PipelineState.CLOSED) - .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(HddsProtos.ReplicationFactor.THREE) - .setNodes(new ArrayList<>()).build(); - - when(pipelineManager.createPipeline(HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.THREE)).thenReturn(pipeline); - - return containerStateManager.allocateContainer(pipelineManager, - HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.THREE, "root"); - - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java deleted file mode 100644 index 9468954eae5..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java +++ /dev/null @@ -1,223 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .IncrementalContainerReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.IOException; -import java.util.Set; - -import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; -import static org.apache.hadoop.hdds.scm.TestUtils.getReplicas; - -/** - * Test cases to verify the functionality of IncrementalContainerReportHandler. - */ -public class TestIncrementalContainerReportHandler { - - private NodeManager nodeManager; - private ContainerManager containerManager; - private ContainerStateManager containerStateManager; - private EventPublisher publisher; - - @Before - public void setup() throws IOException { - final Configuration conf = new OzoneConfiguration(); - this.containerManager = Mockito.mock(ContainerManager.class); - this.nodeManager = Mockito.mock(NodeManager.class); - this.containerStateManager = new ContainerStateManager(conf); - this.publisher = Mockito.mock(EventPublisher.class); - - - Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class))) - .thenAnswer(invocation -> containerStateManager - .getContainer((ContainerID)invocation.getArguments()[0])); - - Mockito.when(containerManager.getContainerReplicas( - Mockito.any(ContainerID.class))) - .thenAnswer(invocation -> containerStateManager - .getContainerReplicas((ContainerID)invocation.getArguments()[0])); - - Mockito.doAnswer(invocation -> { - containerStateManager - .updateContainerState((ContainerID)invocation.getArguments()[0], - (HddsProtos.LifeCycleEvent)invocation.getArguments()[1]); - return null; - }).when(containerManager).updateContainerState( - Mockito.any(ContainerID.class), - Mockito.any(HddsProtos.LifeCycleEvent.class)); - - } - - @After - public void tearDown() throws IOException { - containerStateManager.close(); - } - - - @Test - public void testClosingToClosed() throws IOException { - final IncrementalContainerReportHandler reportHandler = - new IncrementalContainerReportHandler(nodeManager, containerManager); - final ContainerInfo container = getContainer(LifeCycleState.CLOSING); - final DatanodeDetails datanodeOne = TestUtils.randomDatanodeDetails(); - final DatanodeDetails datanodeTwo = TestUtils.randomDatanodeDetails(); - final DatanodeDetails datanodeThree = TestUtils.randomDatanodeDetails(); - final Set containerReplicas = getReplicas( - container.containerID(), - ContainerReplicaProto.State.CLOSING, - datanodeOne, datanodeTwo, datanodeThree); - - containerStateManager.loadContainer(container); - containerReplicas.forEach(r -> { - try { - containerStateManager.updateContainerReplica( - container.containerID(), r); - } catch (ContainerNotFoundException ignored) { - - } - }); - - final IncrementalContainerReportProto containerReport = - getIncrementalContainerReportProto(container.containerID(), - ContainerReplicaProto.State.CLOSED, - datanodeOne.getUuidString()); - final IncrementalContainerReportFromDatanode icrFromDatanode = - new IncrementalContainerReportFromDatanode( - datanodeOne, containerReport); - reportHandler.onMessage(icrFromDatanode, publisher); - Assert.assertEquals(LifeCycleState.CLOSED, container.getState()); - } - - @Test - public void testClosingToQuasiClosed() throws IOException { - final IncrementalContainerReportHandler reportHandler = - new IncrementalContainerReportHandler(nodeManager, containerManager); - final ContainerInfo container = getContainer(LifeCycleState.CLOSING); - final DatanodeDetails datanodeOne = TestUtils.randomDatanodeDetails(); - final DatanodeDetails datanodeTwo = TestUtils.randomDatanodeDetails(); - final DatanodeDetails datanodeThree = TestUtils.randomDatanodeDetails(); - final Set containerReplicas = getReplicas( - container.containerID(), - ContainerReplicaProto.State.CLOSING, - datanodeOne, datanodeTwo, datanodeThree); - - containerStateManager.loadContainer(container); - containerReplicas.forEach(r -> { - try { - containerStateManager.updateContainerReplica( - container.containerID(), r); - } catch (ContainerNotFoundException ignored) { - - } - }); - - - final IncrementalContainerReportProto containerReport = - getIncrementalContainerReportProto(container.containerID(), - ContainerReplicaProto.State.QUASI_CLOSED, - datanodeOne.getUuidString()); - final IncrementalContainerReportFromDatanode icrFromDatanode = - new IncrementalContainerReportFromDatanode( - datanodeOne, containerReport); - reportHandler.onMessage(icrFromDatanode, publisher); - Assert.assertEquals(LifeCycleState.QUASI_CLOSED, container.getState()); - } - - @Test - public void testQuasiClosedToClosed() throws IOException { - final IncrementalContainerReportHandler reportHandler = - new IncrementalContainerReportHandler(nodeManager, containerManager); - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final DatanodeDetails datanodeOne = TestUtils.randomDatanodeDetails(); - final DatanodeDetails datanodeTwo = TestUtils.randomDatanodeDetails(); - final DatanodeDetails datanodeThree = TestUtils.randomDatanodeDetails(); - final Set containerReplicas = getReplicas( - container.containerID(), - ContainerReplicaProto.State.CLOSING, - datanodeOne, datanodeTwo); - containerReplicas.addAll(getReplicas( - container.containerID(), - ContainerReplicaProto.State.QUASI_CLOSED, - datanodeThree)); - - containerStateManager.loadContainer(container); - containerReplicas.forEach(r -> { - try { - containerStateManager.updateContainerReplica( - container.containerID(), r); - } catch (ContainerNotFoundException ignored) { - - } - }); - - final IncrementalContainerReportProto containerReport = - getIncrementalContainerReportProto(container.containerID(), - ContainerReplicaProto.State.CLOSED, - datanodeThree.getUuidString()); - final IncrementalContainerReportFromDatanode icr = - new IncrementalContainerReportFromDatanode( - datanodeOne, containerReport); - reportHandler.onMessage(icr, publisher); - Assert.assertEquals(LifeCycleState.CLOSED, container.getState()); - } - - private static IncrementalContainerReportProto - getIncrementalContainerReportProto( - final ContainerID containerId, - final ContainerReplicaProto.State state, - final String originNodeId) { - final IncrementalContainerReportProto.Builder crBuilder = - IncrementalContainerReportProto.newBuilder(); - final ContainerReplicaProto replicaProto = - ContainerReplicaProto.newBuilder() - .setContainerID(containerId.getId()) - .setState(state) - .setOriginNodeId(originNodeId) - .setFinalhash("e16cc9d6024365750ed8dbd194ea46d2") - .setSize(5368709120L) - .setUsed(2000000000L) - .setKeyCount(100000000L) - .setReadCount(100000000L) - .setWriteCount(100000000L) - .setReadBytes(2000000000L) - .setWriteBytes(2000000000L) - .setBlockCommitSequenceId(10000L) - .setDeleteTransactionId(0) - .build(); - return crBuilder.addReport(replicaProto).build(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java deleted file mode 100644 index 1631447af1f..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java +++ /dev/null @@ -1,662 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.lock.LockManager; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static org.apache.hadoop.hdds.scm.TestUtils.createDatanodeDetails; -import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; -import static org.apache.hadoop.hdds.scm.TestUtils.getReplicas; -import static org.apache.hadoop.hdds.scm.TestUtils.randomDatanodeDetails; - -/** - * Test cases to verify the functionality of ReplicationManager. - */ -public class TestReplicationManager { - - private ReplicationManager replicationManager; - private ContainerStateManager containerStateManager; - private ContainerPlacementPolicy containerPlacementPolicy; - private EventQueue eventQueue; - private DatanodeCommandHandler datanodeCommandHandler; - - @Before - public void setup() throws IOException, InterruptedException { - final Configuration conf = new OzoneConfiguration(); - final ContainerManager containerManager = - Mockito.mock(ContainerManager.class); - eventQueue = new EventQueue(); - containerStateManager = new ContainerStateManager(conf); - - datanodeCommandHandler = new DatanodeCommandHandler(); - eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, datanodeCommandHandler); - - Mockito.when(containerManager.getContainerIDs()) - .thenAnswer(invocation -> containerStateManager.getAllContainerIDs()); - - Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class))) - .thenAnswer(invocation -> containerStateManager - .getContainer((ContainerID)invocation.getArguments()[0])); - - Mockito.when(containerManager.getContainerReplicas( - Mockito.any(ContainerID.class))) - .thenAnswer(invocation -> containerStateManager - .getContainerReplicas((ContainerID)invocation.getArguments()[0])); - - containerPlacementPolicy = Mockito.mock(ContainerPlacementPolicy.class); - - Mockito.when(containerPlacementPolicy.chooseDatanodes( - Mockito.anyListOf(DatanodeDetails.class), - Mockito.anyListOf(DatanodeDetails.class), - Mockito.anyInt(), Mockito.anyLong())) - .thenAnswer(invocation -> { - int count = (int) invocation.getArguments()[2]; - return IntStream.range(0, count) - .mapToObj(i -> randomDatanodeDetails()) - .collect(Collectors.toList()); - }); - - replicationManager = new ReplicationManager( - new ReplicationManagerConfiguration(), - containerManager, - containerPlacementPolicy, - eventQueue, - new LockManager<>(conf)); - replicationManager.start(); - Thread.sleep(100L); - } - - - /** - * Checks if restarting of replication manager works. - */ - @Test - public void testReplicationManagerRestart() throws InterruptedException { - Assert.assertTrue(replicationManager.isRunning()); - replicationManager.stop(); - // Stop is a non-blocking call, it might take sometime for the - // ReplicationManager to shutdown - Thread.sleep(500); - Assert.assertFalse(replicationManager.isRunning()); - replicationManager.start(); - Assert.assertTrue(replicationManager.isRunning()); - } - - /** - * Open containers are not handled by ReplicationManager. - * This test-case makes sure that ReplicationManages doesn't take - * any action on OPEN containers. - */ - @Test - public void testOpenContainer() throws SCMException, InterruptedException { - final ContainerInfo container = getContainer(LifeCycleState.OPEN); - containerStateManager.loadContainer(container); - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - Assert.assertEquals(0, datanodeCommandHandler.getInvocation()); - - } - - /** - * If the container is in CLOSING state we resend close container command - * to all the datanodes. - */ - @Test - public void testClosingContainer() throws - SCMException, ContainerNotFoundException, InterruptedException { - final ContainerInfo container = getContainer(LifeCycleState.CLOSING); - final ContainerID id = container.containerID(); - - containerStateManager.loadContainer(container); - - // Two replicas in CLOSING state - final Set replicas = getReplicas(id, State.CLOSING, - randomDatanodeDetails(), - randomDatanodeDetails()); - - // One replica in OPEN state - final DatanodeDetails datanode = randomDatanodeDetails(); - replicas.addAll(getReplicas(id, State.OPEN, datanode)); - - for (ContainerReplica replica : replicas) { - containerStateManager.updateContainerReplica(id, replica); - } - - final int currentCloseCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand); - - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - Assert.assertEquals(currentCloseCommandCount + 3, datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand)); - - // Update the OPEN to CLOSING - for (ContainerReplica replica : getReplicas(id, State.CLOSING, datanode)) { - containerStateManager.updateContainerReplica(id, replica); - } - - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - Assert.assertEquals(currentCloseCommandCount + 6, datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand)); - } - - - /** - * The container is QUASI_CLOSED but two of the replica is still in - * open state. ReplicationManager should resend close command to those - * datanodes. - */ - @Test - public void testQuasiClosedContainerWithTwoOpenReplica() throws - SCMException, ContainerNotFoundException, InterruptedException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, State.OPEN, 1000L, originNodeId, randomDatanodeDetails()); - final DatanodeDetails datanodeDetails = randomDatanodeDetails(); - final ContainerReplica replicaThree = getReplicas( - id, State.OPEN, 1000L, datanodeDetails.getUuid(), datanodeDetails); - - containerStateManager.loadContainer(container); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica(id, replicaThree); - - final int currentCloseCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand); - // Two of the replicas are in OPEN state - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - Assert.assertEquals(currentCloseCommandCount + 2, datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand)); - Assert.assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.closeContainerCommand, - replicaTwo.getDatanodeDetails())); - Assert.assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.closeContainerCommand, - replicaThree.getDatanodeDetails())); - } - - /** - * When the container is in QUASI_CLOSED state and all the replicas are - * also in QUASI_CLOSED state and doesn't have a quorum to force close - * the container, ReplicationManager will not do anything. - */ - @Test - public void testHealthyQuasiClosedContainer() throws - SCMException, ContainerNotFoundException, InterruptedException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaThree = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.loadContainer(container); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica(id, replicaThree); - - // All the QUASI_CLOSED replicas have same originNodeId, so the - // container will not be closed. ReplicationManager should take no action. - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - Assert.assertEquals(0, datanodeCommandHandler.getInvocation()); - } - - /** - * When a container is QUASI_CLOSED and we don't have quorum to force close - * the container, the container should have all the replicas in QUASI_CLOSED - * state, else ReplicationManager will take action. - * - * In this test case we make one of the replica unhealthy, replication manager - * will send delete container command to the datanode which has the unhealthy - * replica. - */ - @Test - public void testQuasiClosedContainerWithUnhealthyReplica() - throws SCMException, ContainerNotFoundException, InterruptedException, - ContainerReplicaNotFoundException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaThree = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.loadContainer(container); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica(id, replicaThree); - - final int currentDeleteCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand); - final int currentReplicateCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand); - - // All the QUASI_CLOSED replicas have same originNodeId, so the - // container will not be closed. ReplicationManager should take no action. - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - Assert.assertEquals(0, datanodeCommandHandler.getInvocation()); - - // Make the first replica unhealthy - final ContainerReplica unhealthyReplica = getReplicas( - id, State.UNHEALTHY, 1000L, originNodeId, - replicaOne.getDatanodeDetails()); - containerStateManager.updateContainerReplica(id, unhealthyReplica); - - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - Assert.assertEquals(currentDeleteCommandCount + 1, datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand)); - Assert.assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.deleteContainerCommand, - replicaOne.getDatanodeDetails())); - - // Now we will delete the unhealthy replica from in-memory. - containerStateManager.removeContainerReplica(id, replicaOne); - - // The container is under replicated as unhealthy replica is removed - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - - // We should get replicate command - Assert.assertEquals(currentReplicateCommandCount + 1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - } - - /** - * When a QUASI_CLOSED container is over replicated, ReplicationManager - * deletes the excess replicas. - */ - @Test - public void testOverReplicatedQuasiClosedContainer() throws - SCMException, ContainerNotFoundException, InterruptedException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaThree = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaFour = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.loadContainer(container); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica(id, replicaThree); - containerStateManager.updateContainerReplica(id, replicaFour); - - final int currentDeleteCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand); - - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - Assert.assertEquals(currentDeleteCommandCount + 1, datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand)); - } - - /** - * When a QUASI_CLOSED container is over replicated, ReplicationManager - * deletes the excess replicas. While choosing the replica for deletion - * ReplicationManager should prioritize unhealthy replica over QUASI_CLOSED - * replica. - */ - @Test - public void testOverReplicatedQuasiClosedContainerWithUnhealthyReplica() - throws SCMException, ContainerNotFoundException, InterruptedException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, State.UNHEALTHY, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaThree = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaFour = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.loadContainer(container); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - containerStateManager.updateContainerReplica(id, replicaThree); - containerStateManager.updateContainerReplica(id, replicaFour); - - final int currentDeleteCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand); - - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - Assert.assertEquals(currentDeleteCommandCount + 1, datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand)); - Assert.assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.deleteContainerCommand, - replicaOne.getDatanodeDetails())); - } - - /** - * ReplicationManager should replicate an QUASI_CLOSED replica if it is - * under replicated. - */ - @Test - public void testUnderReplicatedQuasiClosedContainer() throws - SCMException, ContainerNotFoundException, InterruptedException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.loadContainer(container); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - - final int currentReplicateCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand); - - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - Assert.assertEquals(currentReplicateCommandCount + 1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - } - - /** - * When a QUASI_CLOSED container is under replicated, ReplicationManager - * should re-replicate it. If there are any unhealthy replica, it has to - * be deleted. - * - * In this test case, the container is QUASI_CLOSED and is under replicated - * and also has an unhealthy replica. - * - * In the first iteration of ReplicationManager, it should re-replicate - * the container so that it has enough replicas. - * - * In the second iteration, ReplicationManager should delete the unhealthy - * replica. - * - * In the third iteration, ReplicationManager will re-replicate as the - * container has again become under replicated after the unhealthy - * replica has been deleted. - * - */ - @Test - public void testUnderReplicatedQuasiClosedContainerWithUnhealthyReplica() - throws SCMException, ContainerNotFoundException, InterruptedException, - ContainerReplicaNotFoundException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerID id = container.containerID(); - final UUID originNodeId = UUID.randomUUID(); - final ContainerReplica replicaOne = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, randomDatanodeDetails()); - final ContainerReplica replicaTwo = getReplicas( - id, State.UNHEALTHY, 1000L, originNodeId, randomDatanodeDetails()); - - containerStateManager.loadContainer(container); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - - final int currentReplicateCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.replicateContainerCommand); - final int currentDeleteCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand); - - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - Assert.assertEquals(currentReplicateCommandCount + 1, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - - Optional replicateCommand = datanodeCommandHandler - .getReceivedCommands().stream() - .filter(c -> c.getCommand().getType() - .equals(SCMCommandProto.Type.replicateContainerCommand)) - .findFirst(); - - Assert.assertTrue(replicateCommand.isPresent()); - - DatanodeDetails newNode = createDatanodeDetails( - replicateCommand.get().getDatanodeId()); - ContainerReplica newReplica = getReplicas( - id, State.QUASI_CLOSED, 1000L, originNodeId, newNode); - containerStateManager.updateContainerReplica(id, newReplica); - - /* - * We have report the replica to SCM, in the next ReplicationManager - * iteration it should delete the unhealthy replica. - */ - - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - Assert.assertEquals(currentDeleteCommandCount + 1, datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.deleteContainerCommand)); - // ReplicaTwo should be deleted, that is the unhealthy one - Assert.assertTrue(datanodeCommandHandler.received( - SCMCommandProto.Type.deleteContainerCommand, - replicaTwo.getDatanodeDetails())); - - containerStateManager.removeContainerReplica(id, replicaTwo); - - /* - * We have now removed unhealthy replica, next iteration of - * ReplicationManager should re-replicate the container as it - * is under replicated now - */ - - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - Assert.assertEquals(currentReplicateCommandCount + 2, - datanodeCommandHandler.getInvocationCount( - SCMCommandProto.Type.replicateContainerCommand)); - } - - - /** - * When a container is QUASI_CLOSED and it has >50% of its replica - * in QUASI_CLOSED state with unique origin node id, - * ReplicationManager should force close the replica(s) with - * highest BCSID. - */ - @Test - public void testQuasiClosedToClosed() throws - SCMException, ContainerNotFoundException, InterruptedException { - final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED); - final ContainerID id = container.containerID(); - final Set replicas = getReplicas(id, State.QUASI_CLOSED, - randomDatanodeDetails(), - randomDatanodeDetails(), - randomDatanodeDetails()); - containerStateManager.loadContainer(container); - for (ContainerReplica replica : replicas) { - containerStateManager.updateContainerReplica(id, replica); - } - - final int currentCloseCommandCount = datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand); - - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - - // All the replicas have same BCSID, so all of them will be closed. - Assert.assertEquals(currentCloseCommandCount + 3, datanodeCommandHandler - .getInvocationCount(SCMCommandProto.Type.closeContainerCommand)); - - } - - - /** - * ReplicationManager should not take any action if the container is - * CLOSED and healthy. - */ - @Test - public void testHealthyClosedContainer() - throws SCMException, ContainerNotFoundException, InterruptedException { - final ContainerInfo container = getContainer(LifeCycleState.CLOSED); - final ContainerID id = container.containerID(); - final Set replicas = getReplicas(id, State.CLOSED, - randomDatanodeDetails(), - randomDatanodeDetails(), - randomDatanodeDetails()); - - containerStateManager.loadContainer(container); - for (ContainerReplica replica : replicas) { - containerStateManager.updateContainerReplica(id, replica); - } - - replicationManager.processContainersNow(); - // Wait for EventQueue to call the event handler - Thread.sleep(100L); - Assert.assertEquals(0, datanodeCommandHandler.getInvocation()); - } - - @Test - public void testGeneratedConfig() { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - - ReplicationManagerConfiguration rmc = - ozoneConfiguration.getObject(ReplicationManagerConfiguration.class); - - //default is not included in ozone-site.xml but generated from annotation - //to the ozone-site-generated.xml which should be loaded by the - // OzoneConfiguration. - Assert.assertEquals(600000, rmc.getEventTimeout()); - - } - - @After - public void teardown() throws IOException { - containerStateManager.close(); - replicationManager.stop(); - } - - private class DatanodeCommandHandler implements - EventHandler { - - private AtomicInteger invocation = new AtomicInteger(0); - private Map commandInvocation = - new HashMap<>(); - private List commands = new ArrayList<>(); - - @Override - public void onMessage(final CommandForDatanode command, - final EventPublisher publisher) { - final SCMCommandProto.Type type = command.getCommand().getType(); - commandInvocation.computeIfAbsent(type, k -> new AtomicInteger(0)); - commandInvocation.get(type).incrementAndGet(); - invocation.incrementAndGet(); - commands.add(command); - } - - private int getInvocation() { - return invocation.get(); - } - - private int getInvocationCount(SCMCommandProto.Type type) { - return commandInvocation.containsKey(type) ? - commandInvocation.get(type).get() : 0; - } - - private List getReceivedCommands() { - return commands; - } - - /** - * Returns true if the command handler has received the given - * command type for the provided datanode. - * - * @param type Command Type - * @param datanode DatanodeDetails - * @return True if command was received, false otherwise - */ - private boolean received(final SCMCommandProto.Type type, - final DatanodeDetails datanode) { - return commands.stream().anyMatch(dc -> - dc.getCommand().getType().equals(type) && - dc.getDatanodeId().equals(datanode.getUuid())); - } - } - -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java deleted file mode 100644 index 75a1ad360fd..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java +++ /dev/null @@ -1,318 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.File; -import java.io.IOException; -import java.util.Random; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; -import java.util.Iterator; -import java.util.Optional; -import java.util.List; -import java.util.ArrayList; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -/** - * Tests for Container ContainerManager. - */ -public class TestSCMContainerManager { - private static SCMContainerManager containerManager; - private static MockNodeManager nodeManager; - private static PipelineManager pipelineManager; - private static File testDir; - private static XceiverClientManager xceiverClientManager; - private static String containerOwner = "OZONE"; - private static Random random; - - private static final long TIMEOUT = 10000; - - @Rule - public ExpectedException thrown = ExpectedException.none(); - @BeforeClass - public static void setUp() throws Exception { - Configuration conf = SCMTestUtils.getConf(); - - testDir = GenericTestUtils - .getTestDir(TestSCMContainerManager.class.getSimpleName()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - conf.setTimeDuration( - ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT, - TIMEOUT, - TimeUnit.MILLISECONDS); - boolean folderExisted = testDir.exists() || testDir.mkdirs(); - if (!folderExisted) { - throw new IOException("Unable to create test directory path"); - } - nodeManager = new MockNodeManager(true, 10); - pipelineManager = - new SCMPipelineManager(conf, nodeManager, new EventQueue(), null); - containerManager = new SCMContainerManager(conf, nodeManager, - pipelineManager, new EventQueue()); - xceiverClientManager = new XceiverClientManager(conf); - random = new Random(); - } - - @AfterClass - public static void cleanup() throws IOException { - if(containerManager != null) { - containerManager.close(); - } - if (pipelineManager != null) { - pipelineManager.close(); - } - FileUtil.fullyDelete(testDir); - } - - @Before - public void clearSafeMode() { - nodeManager.setSafemode(false); - } - - @Test - public void testallocateContainer() throws Exception { - ContainerInfo containerInfo = containerManager.allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), - containerOwner); - Assert.assertNotNull(containerInfo); - } - - @Test - public void testallocateContainerDistributesAllocation() throws Exception { - /* This is a lame test, we should really be testing something like - z-score or make sure that we don't have 3sigma kind of events. Too lazy - to write all that code. This test very lamely tests if we have more than - 5 separate nodes from the list of 10 datanodes that got allocated a - container. - */ - Set pipelineList = new TreeSet<>(); - for (int x = 0; x < 30; x++) { - ContainerInfo containerInfo = containerManager.allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), - containerOwner); - - Assert.assertNotNull(containerInfo); - Assert.assertNotNull(containerInfo.getPipelineID()); - pipelineList.add(pipelineManager.getPipeline( - containerInfo.getPipelineID()).getFirstNode() - .getUuid()); - } - Assert.assertTrue(pipelineList.size() > 5); - } - - @Test - public void testAllocateContainerInParallel() throws Exception { - int threadCount = 20; - List executors = new ArrayList<>(threadCount); - for (int i = 0; i < threadCount; i++) { - executors.add(Executors.newSingleThreadExecutor()); - } - List> futureList = - new ArrayList<>(threadCount); - for (int i = 0; i < threadCount; i++) { - final CompletableFuture future = new CompletableFuture<>(); - CompletableFuture.supplyAsync(() -> { - try { - ContainerInfo containerInfo = containerManager - .allocateContainer(xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - - Assert.assertNotNull(containerInfo); - Assert.assertNotNull(containerInfo.getPipelineID()); - future.complete(containerInfo); - return containerInfo; - } catch (IOException e) { - future.completeExceptionally(e); - } - return future; - }, executors.get(i)); - futureList.add(future); - } - try { - CompletableFuture - .allOf(futureList.toArray(new CompletableFuture[futureList.size()])) - .get(); - } catch (Exception e) { - Assert.fail("testAllocateBlockInParallel failed"); - } - } - - @Test - public void testGetContainer() throws IOException { - ContainerInfo containerInfo = containerManager.allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), - containerOwner); - Assert.assertNotNull(containerInfo); - Pipeline pipeline = pipelineManager - .getPipeline(containerInfo.getPipelineID()); - Assert.assertNotNull(pipeline); - Assert.assertEquals(containerInfo, - containerManager.getContainer(containerInfo.containerID())); - } - - @Test - public void testGetContainerWithPipeline() throws Exception { - ContainerInfo contInfo = containerManager - .allocateContainer(xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - // Add dummy replicas for container. - Iterator nodes = pipelineManager - .getPipeline(contInfo.getPipelineID()).getNodes().iterator(); - DatanodeDetails dn1 = nodes.next(); - containerManager.updateContainerState(contInfo.containerID(), - LifeCycleEvent.FINALIZE); - containerManager - .updateContainerState(contInfo.containerID(), LifeCycleEvent.CLOSE); - ContainerInfo finalContInfo = contInfo; - Assert.assertEquals(0, - containerManager.getContainerReplicas( - finalContInfo.containerID()).size()); - - containerManager.updateContainerReplica(contInfo.containerID(), - ContainerReplica.newBuilder().setContainerID(contInfo.containerID()) - .setContainerState(ContainerReplicaProto.State.CLOSED) - .setDatanodeDetails(dn1).build()); - - Assert.assertEquals(1, - containerManager.getContainerReplicas( - finalContInfo.containerID()).size()); - - contInfo = containerManager.getContainer(contInfo.containerID()); - Assert.assertEquals(contInfo.getState(), LifeCycleState.CLOSED); - // After closing the container, we should get the replica and construct - // standalone pipeline. No more ratis pipeline. - - Set replicaNodes = containerManager - .getContainerReplicas(contInfo.containerID()) - .stream().map(ContainerReplica::getDatanodeDetails) - .collect(Collectors.toSet()); - Assert.assertTrue(replicaNodes.contains(dn1)); - } - - @Test - public void testGetContainerReplicaWithParallelUpdate() throws Exception { - testGetContainerWithPipeline(); - final Optional id = containerManager.getContainerIDs() - .stream().findFirst(); - Assert.assertTrue(id.isPresent()); - final ContainerID cId = id.get(); - final Optional replica = containerManager - .getContainerReplicas(cId).stream().findFirst(); - Assert.assertTrue(replica.isPresent()); - final ContainerReplica cReplica = replica.get(); - final AtomicBoolean runUpdaterThread = - new AtomicBoolean(true); - - Thread updaterThread = new Thread(() -> { - while (runUpdaterThread.get()) { - try { - containerManager.removeContainerReplica(cId, cReplica); - containerManager.updateContainerReplica(cId, cReplica); - } catch (ContainerException e) { - Assert.fail("Container Exception: " + e.getMessage()); - } - } - }); - - updaterThread.setDaemon(true); - updaterThread.start(); - - IntStream.range(0, 100).forEach(i -> { - try { - Assert.assertNotNull(containerManager - .getContainerReplicas(cId) - .stream().map(ContainerReplica::getDatanodeDetails) - .collect(Collectors.toSet())); - } catch (ContainerNotFoundException e) { - Assert.fail("Missing Container " + id); - } - }); - runUpdaterThread.set(false); - } - - @Test - public void testgetNoneExistentContainer() { - try { - containerManager.getContainer(ContainerID.valueof( - random.nextInt() & Integer.MAX_VALUE)); - Assert.fail(); - } catch (ContainerNotFoundException ex) { - // Success! - } - } - - @Test - public void testCloseContainer() throws IOException { - ContainerID id = createContainer().containerID(); - containerManager.updateContainerState(id, - HddsProtos.LifeCycleEvent.FINALIZE); - containerManager.updateContainerState(id, - HddsProtos.LifeCycleEvent.CLOSE); - ContainerInfo closedContainer = containerManager.getContainer(id); - Assert.assertEquals(LifeCycleState.CLOSED, closedContainer.getState()); - } - - /** - * Creates a container with the given name in SCMContainerManager. - * @throws IOException - */ - private ContainerInfo createContainer() - throws IOException { - nodeManager.setSafemode(false); - return containerManager - .allocateContainer(xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java deleted file mode 100644 index 2f35719816f..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle happy. - */ -package org.apache.hadoop.hdds.scm.container.closer; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java deleted file mode 100644 index f93aea66e7b..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle Happy. - */ -package org.apache.hadoop.hdds.scm.container; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java deleted file mode 100644 index 18c4a64a040..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java +++ /dev/null @@ -1,152 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; -import org.apache.hadoop.hdds.scm.net.NodeSchema; -import org.apache.hadoop.hdds.scm.net.NodeSchemaManager; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; -import static org.mockito.Matchers.anyObject; -import static org.mockito.Mockito.when; - -/** - * Test for scm container placement factory. - */ -public class TestContainerPlacementFactory { - // network topology cluster - private NetworkTopology cluster; - // datanodes array list - private List datanodes = new ArrayList<>(); - // node storage capacity - private final long storageCapacity = 100L; - // configuration - private Configuration conf; - // node manager - private NodeManager nodeManager; - - @Before - public void setup() { - //initialize network topology instance - conf = new OzoneConfiguration(); - } - - @Test - public void testRackAwarePolicy() throws IOException { - conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementRackAware.class.getName()); - - NodeSchema[] schemas = new NodeSchema[] - {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}; - NodeSchemaManager.getInstance().init(schemas, true); - cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance()); - - // build datanodes, and network topology - String rack = "/rack"; - String hostname = "node"; - for (int i = 0; i < 15; i++) { - // Totally 3 racks, each has 5 datanodes - DatanodeDetails node = TestUtils.createDatanodeDetails( - hostname + i, rack + (i / 5)); - datanodes.add(node); - cluster.add(node); - } - - // create mock node manager - nodeManager = Mockito.mock(NodeManager.class); - when(nodeManager.getNodes(NodeState.HEALTHY)) - .thenReturn(new ArrayList<>(datanodes)); - when(nodeManager.getNodeStat(anyObject())) - .thenReturn(new SCMNodeMetric(storageCapacity, 0L, 100L)); - when(nodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(storageCapacity, 90L, 10L)); - when(nodeManager.getNodeStat(datanodes.get(3))) - .thenReturn(new SCMNodeMetric(storageCapacity, 80L, 20L)); - when(nodeManager.getNodeStat(datanodes.get(4))) - .thenReturn(new SCMNodeMetric(storageCapacity, 70L, 30L)); - - ContainerPlacementPolicy policy = ContainerPlacementPolicyFactory - .getPolicy(conf, nodeManager, cluster, true, - SCMContainerPlacementMetrics.create()); - - int nodeNum = 3; - List datanodeDetails = - policy.chooseDatanodes(null, null, nodeNum, 15); - Assert.assertEquals(nodeNum, datanodeDetails.size()); - Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), - datanodeDetails.get(1))); - Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), - datanodeDetails.get(2))); - Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1), - datanodeDetails.get(2))); - } - - @Test - public void testDefaultPolicy() throws IOException { - ContainerPlacementPolicy policy = ContainerPlacementPolicyFactory - .getPolicy(conf, null, null, true, null); - Assert.assertSame(SCMContainerPlacementRandom.class, policy.getClass()); - } - - /** - * A dummy container placement implementation for test. - */ - public static class DummyImpl implements ContainerPlacementPolicy { - @Override - public List chooseDatanodes( - List excludedNodes, List favoredNodes, - int nodesRequired, long sizeRequired) { - return null; - } - } - - @Test(expected = SCMException.class) - public void testConstuctorNotFound() throws SCMException { - // set a placement class which does't have the right constructor implemented - conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - DummyImpl.class.getName()); - ContainerPlacementPolicyFactory.getPolicy(conf, null, null, true, null); - } - - @Test(expected = RuntimeException.class) - public void testClassNotImplemented() throws SCMException { - // set a placement class not implemented - conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - "org.apache.hadoop.hdds.scm.container.placement.algorithm.HelloWorld"); - ContainerPlacementPolicyFactory.getPolicy(conf, null, null, true, null); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java deleted file mode 100644 index 00ec3988e8a..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; - -import org.junit.Assert; -import org.junit.Test; -import static org.mockito.Matchers.anyObject; -import org.mockito.Mockito; -import static org.mockito.Mockito.when; - -/** - * Test for the scm container placement. - */ -public class TestSCMContainerPlacementCapacity { - @Test - public void chooseDatanodes() throws SCMException { - //given - Configuration conf = new OzoneConfiguration(); - - List datanodes = new ArrayList<>(); - for (int i = 0; i < 7; i++) { - datanodes.add(TestUtils.randomDatanodeDetails()); - } - - NodeManager mockNodeManager = Mockito.mock(NodeManager.class); - when(mockNodeManager.getNodes(NodeState.HEALTHY)) - .thenReturn(new ArrayList<>(datanodes)); - - when(mockNodeManager.getNodeStat(anyObject())) - .thenReturn(new SCMNodeMetric(100L, 0L, 100L)); - when(mockNodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(100L, 90L, 10L)); - when(mockNodeManager.getNodeStat(datanodes.get(3))) - .thenReturn(new SCMNodeMetric(100L, 80L, 20L)); - when(mockNodeManager.getNodeStat(datanodes.get(4))) - .thenReturn(new SCMNodeMetric(100L, 70L, 30L)); - - SCMContainerPlacementCapacity scmContainerPlacementRandom = - new SCMContainerPlacementCapacity(mockNodeManager, conf, null, true, - null); - - List existingNodes = new ArrayList<>(); - existingNodes.add(datanodes.get(0)); - existingNodes.add(datanodes.get(1)); - - Map selectedCount = new HashMap<>(); - for (DatanodeDetails datanode : datanodes) { - selectedCount.put(datanode, 0); - } - - for (int i = 0; i < 1000; i++) { - - //when - List datanodeDetails = scmContainerPlacementRandom - .chooseDatanodes(existingNodes, null, 1, 15); - - //then - Assert.assertEquals(1, datanodeDetails.size()); - DatanodeDetails datanode0Details = datanodeDetails.get(0); - - Assert.assertNotEquals( - "Datanode 0 should not been selected: excluded by parameter", - datanodes.get(0), datanode0Details); - Assert.assertNotEquals( - "Datanode 1 should not been selected: excluded by parameter", - datanodes.get(1), datanode0Details); - Assert.assertNotEquals( - "Datanode 2 should not been selected: not enough space there", - datanodes.get(2), datanode0Details); - - selectedCount - .put(datanode0Details, selectedCount.get(datanode0Details) + 1); - - } - - //datanode 4 has less space. Should be selected less times. - Assert.assertTrue(selectedCount.get(datanodes.get(3)) > selectedCount - .get(datanodes.get(6))); - Assert.assertTrue(selectedCount.get(datanodes.get(4)) > selectedCount - .get(datanodes.get(6))); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java deleted file mode 100644 index 2d8b81633e7..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ /dev/null @@ -1,375 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.net.NetConstants; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; -import org.apache.hadoop.hdds.scm.net.NodeSchema; -import org.apache.hadoop.hdds.scm.net.NodeSchemaManager; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.mockito.Mockito; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; - -import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.junit.Assume.assumeTrue; -import static org.mockito.Matchers.anyObject; -import static org.mockito.Mockito.when; - -/** - * Test for the scm container rack aware placement. - */ -@RunWith(Parameterized.class) -public class TestSCMContainerPlacementRackAware { - private NetworkTopology cluster; - private Configuration conf; - private NodeManager nodeManager; - private Integer datanodeCount; - private List datanodes = new ArrayList<>(); - // policy with fallback capability - private SCMContainerPlacementRackAware policy; - // policy prohibit fallback - private SCMContainerPlacementRackAware policyNoFallback; - // node storage capacity - private static final long STORAGE_CAPACITY = 100L; - private SCMContainerPlacementMetrics metrics; - private static final int NODE_PER_RACK = 5; - - public TestSCMContainerPlacementRackAware(Integer count) { - this.datanodeCount = count; - } - - @Parameterized.Parameters - public static Collection setupDatanodes() { - return Arrays.asList(new Object[][]{{3}, {4}, {5}, {6}, {7}, {8}, {9}, - {10}, {11}, {12}, {13}, {14}, {15}}); - } - - @Before - public void setup() { - //initialize network topology instance - conf = new OzoneConfiguration(); - NodeSchema[] schemas = new NodeSchema[] - {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}; - NodeSchemaManager.getInstance().init(schemas, true); - cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance()); - - // build datanodes, and network topology - String rack = "/rack"; - String hostname = "node"; - for (int i = 0; i < datanodeCount; i++) { - // Totally 3 racks, each has 5 datanodes - DatanodeDetails node = TestUtils.createDatanodeDetails( - hostname + i, rack + (i / NODE_PER_RACK)); - datanodes.add(node); - cluster.add(node); - } - - // create mock node manager - nodeManager = Mockito.mock(NodeManager.class); - when(nodeManager.getNodes(NodeState.HEALTHY)) - .thenReturn(new ArrayList<>(datanodes)); - when(nodeManager.getNodeStat(anyObject())) - .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 0L, 100L)); - if (datanodeCount > 4) { - when(nodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 90L, 10L)); - when(nodeManager.getNodeStat(datanodes.get(3))) - .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 80L, 20L)); - when(nodeManager.getNodeStat(datanodes.get(4))) - .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 70L, 30L)); - } else if (datanodeCount > 3) { - when(nodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 90L, 10L)); - when(nodeManager.getNodeStat(datanodes.get(3))) - .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 80L, 20L)); - } else if (datanodeCount > 2) { - when(nodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 84L, 16L)); - } - - // create placement policy instances - metrics = SCMContainerPlacementMetrics.create(); - policy = new SCMContainerPlacementRackAware( - nodeManager, conf, cluster, true, metrics); - policyNoFallback = new SCMContainerPlacementRackAware( - nodeManager, conf, cluster, false, metrics); - } - - - @Test - public void chooseNodeWithNoExcludedNodes() throws SCMException { - // test choose new datanodes for new pipeline cases - // 1 replica - int nodeNum = 1; - List datanodeDetails = - policy.chooseDatanodes(null, null, nodeNum, 15); - Assert.assertEquals(nodeNum, datanodeDetails.size()); - - // 2 replicas - nodeNum = 2; - datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); - Assert.assertEquals(nodeNum, datanodeDetails.size()); - Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), - datanodeDetails.get(1)) || (datanodeCount % NODE_PER_RACK == 1)); - - // 3 replicas - nodeNum = 3; - datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); - Assert.assertEquals(nodeNum, datanodeDetails.size()); - // requires at least 2 racks for following statement - assumeTrue(datanodeCount > NODE_PER_RACK && - datanodeCount % NODE_PER_RACK > 1); - Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), - datanodeDetails.get(1))); - Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), - datanodeDetails.get(2))); - Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1), - datanodeDetails.get(2))); - - // 4 replicas - nodeNum = 4; - datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); - Assert.assertEquals(nodeNum, datanodeDetails.size()); - // requires at least 2 racks and enough datanodes for following statement - assumeTrue(datanodeCount > NODE_PER_RACK + 1); - Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), - datanodeDetails.get(1))); - Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), - datanodeDetails.get(2))); - Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1), - datanodeDetails.get(2))); - } - - @Test - public void chooseNodeWithExcludedNodes() throws SCMException { - // test choose new datanodes for under replicated pipeline - // 3 replicas, two existing datanodes on same rack - assumeTrue(datanodeCount > NODE_PER_RACK); - int nodeNum = 1; - List excludedNodes = new ArrayList<>(); - - excludedNodes.add(datanodes.get(0)); - excludedNodes.add(datanodes.get(1)); - List datanodeDetails = policy.chooseDatanodes( - excludedNodes, null, nodeNum, 15); - Assert.assertEquals(nodeNum, datanodeDetails.size()); - Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), - excludedNodes.get(0))); - Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), - excludedNodes.get(1))); - - // 3 replicas, one existing datanode - nodeNum = 2; - excludedNodes.clear(); - excludedNodes.add(datanodes.get(0)); - datanodeDetails = policy.chooseDatanodes( - excludedNodes, null, nodeNum, 15); - Assert.assertEquals(nodeNum, datanodeDetails.size()); - Assert.assertTrue(cluster.isSameParent( - datanodeDetails.get(0), excludedNodes.get(0)) || - cluster.isSameParent(datanodeDetails.get(0), excludedNodes.get(1))); - - // 3 replicas, two existing datanodes on different rack - nodeNum = 1; - excludedNodes.clear(); - excludedNodes.add(datanodes.get(0)); - excludedNodes.add(datanodes.get(5)); - datanodeDetails = policy.chooseDatanodes( - excludedNodes, null, nodeNum, 15); - Assert.assertEquals(nodeNum, datanodeDetails.size()); - Assert.assertTrue(cluster.isSameParent( - datanodeDetails.get(0), excludedNodes.get(0)) || - cluster.isSameParent(datanodeDetails.get(0), excludedNodes.get(1))); - } - - @Test - public void testFallback() throws SCMException { - // 5 replicas. there are only 3 racks. policy with fallback should - // allocate the 5th datanode though it will break the rack rule(first - // 2 replicas on same rack, others on different racks). - assumeTrue(datanodeCount > NODE_PER_RACK * 2 && - (datanodeCount % NODE_PER_RACK > 1)); - int nodeNum = 5; - List datanodeDetails = - policy.chooseDatanodes(null, null, nodeNum, 15); - Assert.assertEquals(nodeNum, datanodeDetails.size()); - Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), - datanodeDetails.get(1))); - Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), - datanodeDetails.get(2))); - Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1), - datanodeDetails.get(2))); - Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), - datanodeDetails.get(3))); - Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(2), - datanodeDetails.get(3))); - - // get metrics - long totalRequest = metrics.getDatanodeRequestCount(); - long successCount = metrics.getDatanodeChooseSuccessCount(); - long tryCount = metrics.getDatanodeChooseAttemptCount(); - long compromiseCount = metrics.getDatanodeChooseFallbackCount(); - - // verify metrics - Assert.assertTrue(totalRequest == nodeNum); - Assert.assertTrue(successCount == nodeNum); - Assert.assertTrue(tryCount > nodeNum); - Assert.assertTrue(compromiseCount >= 1); - } - - @Test - public void testNoFallback() throws SCMException { - assumeTrue(datanodeCount > (NODE_PER_RACK * 2) && - (datanodeCount <= NODE_PER_RACK * 3)); - // 5 replicas. there are only 3 racks. policy prohibit fallback should fail. - int nodeNum = 5; - try { - policyNoFallback.chooseDatanodes(null, null, nodeNum, 15); - fail("Fallback prohibited, this call should fail"); - } catch (Exception e) { - assertTrue(e.getClass().getSimpleName().equals("SCMException")); - } - - // get metrics - long totalRequest = metrics.getDatanodeRequestCount(); - long successCount = metrics.getDatanodeChooseSuccessCount(); - long tryCount = metrics.getDatanodeChooseAttemptCount(); - long compromiseCount = metrics.getDatanodeChooseFallbackCount(); - - Assert.assertTrue(totalRequest == nodeNum); - Assert.assertTrue(successCount >= 3); - Assert.assertTrue(tryCount >= nodeNum); - Assert.assertTrue(compromiseCount == 0); - } - - @Test - public void chooseNodeWithFavoredNodes() throws SCMException { - int nodeNum = 1; - List excludedNodes = new ArrayList<>(); - List favoredNodes = new ArrayList<>(); - - // no excludedNodes, only favoredNodes - favoredNodes.add(datanodes.get(0)); - List datanodeDetails = policy.chooseDatanodes( - excludedNodes, favoredNodes, nodeNum, 15); - Assert.assertEquals(nodeNum, datanodeDetails.size()); - Assert.assertTrue(datanodeDetails.get(0).getNetworkFullPath() - .equals(favoredNodes.get(0).getNetworkFullPath())); - - // no overlap between excludedNodes and favoredNodes, favoredNodes can been - // chosen. - excludedNodes.clear(); - favoredNodes.clear(); - excludedNodes.add(datanodes.get(0)); - favoredNodes.add(datanodes.get(2)); - datanodeDetails = policy.chooseDatanodes( - excludedNodes, favoredNodes, nodeNum, 15); - Assert.assertEquals(nodeNum, datanodeDetails.size()); - Assert.assertTrue(datanodeDetails.get(0).getNetworkFullPath() - .equals(favoredNodes.get(0).getNetworkFullPath())); - - // there is overlap between excludedNodes and favoredNodes, favoredNodes - // should not be chosen. - excludedNodes.clear(); - favoredNodes.clear(); - excludedNodes.add(datanodes.get(0)); - favoredNodes.add(datanodes.get(0)); - datanodeDetails = policy.chooseDatanodes( - excludedNodes, favoredNodes, nodeNum, 15); - Assert.assertEquals(nodeNum, datanodeDetails.size()); - Assert.assertFalse(datanodeDetails.get(0).getNetworkFullPath() - .equals(favoredNodes.get(0).getNetworkFullPath())); - } - - @Test - public void testNoInfiniteLoop() throws SCMException { - int nodeNum = 1; - - try { - // request storage space larger than node capability - policy.chooseDatanodes(null, null, nodeNum, STORAGE_CAPACITY + 15); - fail("Storage requested exceeds capacity, this call should fail"); - } catch (Exception e) { - assertTrue(e.getClass().getSimpleName().equals("SCMException")); - } - - // get metrics - long totalRequest = metrics.getDatanodeRequestCount(); - long successCount = metrics.getDatanodeChooseSuccessCount(); - long tryCount = metrics.getDatanodeChooseAttemptCount(); - long compromiseCount = metrics.getDatanodeChooseFallbackCount(); - - Assert.assertTrue(totalRequest == nodeNum); - Assert.assertTrue(successCount == 0); - Assert.assertTrue(tryCount > nodeNum); - Assert.assertTrue(compromiseCount == 0); - } - - @Test - public void testDatanodeWithDefaultNetworkLocation() throws SCMException { - String hostname = "node"; - List dataList = new ArrayList<>(); - NetworkTopology clusterMap = - new NetworkTopologyImpl(NodeSchemaManager.getInstance()); - for (int i = 0; i < 15; i++) { - // Totally 3 racks, each has 5 datanodes - DatanodeDetails node = TestUtils.createDatanodeDetails( - hostname + i, null); - dataList.add(node); - clusterMap.add(node); - } - Assert.assertEquals(dataList.size(), StringUtils.countMatches( - clusterMap.toString(), NetConstants.DEFAULT_RACK)); - - // choose nodes to host 3 replica - int nodeNum = 3; - SCMContainerPlacementRackAware newPolicy = - new SCMContainerPlacementRackAware(nodeManager, conf, clusterMap, true, - metrics); - List datanodeDetails = - newPolicy.chooseDatanodes(null, null, nodeNum, 15); - Assert.assertEquals(nodeNum, datanodeDetails.size()); - Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), - datanodeDetails.get(1))); - Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), - datanodeDetails.get(2))); - Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(1), - datanodeDetails.get(2))); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java deleted file mode 100644 index 43e3a8d1346..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container.placement.algorithms; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; - -import org.junit.Assert; -import org.junit.Test; -import static org.mockito.Matchers.anyObject; -import org.mockito.Mockito; -import static org.mockito.Mockito.when; - -/** - * Test for the random container placement. - */ -public class TestSCMContainerPlacementRandom { - - @Test - public void chooseDatanodes() throws SCMException { - //given - Configuration conf = new OzoneConfiguration(); - - List datanodes = new ArrayList<>(); - for (int i = 0; i < 5; i++) { - datanodes.add(TestUtils.randomDatanodeDetails()); - } - - NodeManager mockNodeManager = Mockito.mock(NodeManager.class); - when(mockNodeManager.getNodes(NodeState.HEALTHY)) - .thenReturn(new ArrayList<>(datanodes)); - - when(mockNodeManager.getNodeStat(anyObject())) - .thenReturn(new SCMNodeMetric(100L, 0L, 100L)); - when(mockNodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(100L, 90L, 10L)); - - SCMContainerPlacementRandom scmContainerPlacementRandom = - new SCMContainerPlacementRandom(mockNodeManager, conf, null, true, - null); - - List existingNodes = new ArrayList<>(); - existingNodes.add(datanodes.get(0)); - existingNodes.add(datanodes.get(1)); - - for (int i = 0; i < 100; i++) { - //when - List datanodeDetails = scmContainerPlacementRandom - .chooseDatanodes(existingNodes, null, 1, 15); - - //then - Assert.assertEquals(1, datanodeDetails.size()); - DatanodeDetails datanode0Details = datanodeDetails.get(0); - - Assert.assertNotEquals( - "Datanode 0 should not been selected: excluded by parameter", - datanodes.get(0), datanode0Details); - Assert.assertNotEquals( - "Datanode 1 should not been selected: excluded by parameter", - datanodes.get(1), datanode0Details); - Assert.assertNotEquals( - "Datanode 2 should not been selected: not enough space there", - datanodes.get(2), datanode0Details); - - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java deleted file mode 100644 index 1423c999381..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * SCM Testing and Mocking Utils. - */ -package org.apache.hadoop.hdds.scm.container.replication; -// Test classes for Replication functionality. \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java deleted file mode 100644 index 63cc9bfd789..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.container.states; - -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.util.Arrays; -import java.util.List; - -/** - * Test ContainerAttribute management. - */ -public class TestContainerAttribute { - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @Test - public void testInsert() throws SCMException { - ContainerAttribute containerAttribute = new ContainerAttribute<>(); - ContainerID id = new ContainerID(42); - containerAttribute.insert(1, id); - Assert.assertEquals(1, - containerAttribute.getCollection(1).size()); - Assert.assertTrue(containerAttribute.getCollection(1).contains(id)); - - // Insert again and verify that it overwrites an existing value. - ContainerID newId = - new ContainerID(42); - containerAttribute.insert(1, newId); - Assert.assertEquals(1, - containerAttribute.getCollection(1).size()); - Assert.assertTrue(containerAttribute.getCollection(1).contains(newId)); - } - - @Test - public void testHasKey() throws SCMException { - ContainerAttribute containerAttribute = new ContainerAttribute<>(); - - for (int x = 1; x < 42; x++) { - containerAttribute.insert(1, new ContainerID(x)); - } - Assert.assertTrue(containerAttribute.hasKey(1)); - for (int x = 1; x < 42; x++) { - Assert.assertTrue(containerAttribute.hasContainerID(1, x)); - } - - Assert.assertFalse(containerAttribute.hasContainerID(1, - new ContainerID(42))); - } - - @Test - public void testClearSet() throws SCMException { - List keyslist = Arrays.asList("Key1", "Key2", "Key3"); - ContainerAttribute containerAttribute = new ContainerAttribute<>(); - for (String k : keyslist) { - for (int x = 1; x < 101; x++) { - containerAttribute.insert(k, new ContainerID(x)); - } - } - for (String k : keyslist) { - Assert.assertEquals(100, - containerAttribute.getCollection(k).size()); - } - containerAttribute.clearSet("Key1"); - Assert.assertEquals(0, - containerAttribute.getCollection("Key1").size()); - } - - @Test - public void testRemove() throws SCMException { - - List keyslist = Arrays.asList("Key1", "Key2", "Key3"); - ContainerAttribute containerAttribute = new ContainerAttribute<>(); - - for (String k : keyslist) { - for (int x = 1; x < 101; x++) { - containerAttribute.insert(k, new ContainerID(x)); - } - } - for (int x = 1; x < 101; x += 2) { - containerAttribute.remove("Key1", new ContainerID(x)); - } - - for (int x = 1; x < 101; x += 2) { - Assert.assertFalse(containerAttribute.hasContainerID("Key1", - new ContainerID(x))); - } - - Assert.assertEquals(100, - containerAttribute.getCollection("Key2").size()); - - Assert.assertEquals(100, - containerAttribute.getCollection("Key3").size()); - - Assert.assertEquals(50, - containerAttribute.getCollection("Key1").size()); - } - - @Test - public void tesUpdate() throws SCMException { - String key1 = "Key1"; - String key2 = "Key2"; - String key3 = "Key3"; - - ContainerAttribute containerAttribute = new ContainerAttribute<>(); - ContainerID id = new ContainerID(42); - - containerAttribute.insert(key1, id); - Assert.assertTrue(containerAttribute.hasContainerID(key1, id)); - Assert.assertFalse(containerAttribute.hasContainerID(key2, id)); - - // This should move the id from key1 bucket to key2 bucket. - containerAttribute.update(key1, key2, id); - Assert.assertFalse(containerAttribute.hasContainerID(key1, id)); - Assert.assertTrue(containerAttribute.hasContainerID(key2, id)); - - // This should fail since we cannot find this id in the key3 bucket. - thrown.expect(SCMException.class); - containerAttribute.update(key3, key1, id); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java deleted file mode 100644 index 795dfc1e1b9..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle Happy. - */ -package org.apache.hadoop.hdds.scm.container.states; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java deleted file mode 100644 index 26ffd8d1d34..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ /dev/null @@ -1,179 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.SCMContainerManager; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.test.PathUtils; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.mockito.Mockito; - -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState - .HEALTHY; -import static org.junit.Assert.assertEquals; - -/** - * Test for different container placement policy. - */ -public class TestContainerPlacement { - @Rule - public ExpectedException thrown = ExpectedException.none(); - - /** - * Returns a new copy of Configuration. - * - * @return Config - */ - OzoneConfiguration getConf() { - return new OzoneConfiguration(); - } - - /** - * Creates a NodeManager. - * - * @param config - Config for the node manager. - * @return SCNNodeManager - * @throws IOException - */ - - SCMNodeManager createNodeManager(OzoneConfiguration config) - throws IOException { - EventQueue eventQueue = new EventQueue(); - eventQueue.addHandler(SCMEvents.NEW_NODE, - Mockito.mock(NewNodeHandler.class)); - eventQueue.addHandler(SCMEvents.STALE_NODE, - Mockito.mock(StaleNodeHandler.class)); - eventQueue.addHandler(SCMEvents.DEAD_NODE, - Mockito.mock(DeadNodeHandler.class)); - - SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class); - Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1"); - - SCMNodeManager nodeManager = new SCMNodeManager(config, - storageConfig, eventQueue, null); - return nodeManager; - } - - SCMContainerManager createContainerManager(Configuration config, - NodeManager scmNodeManager) throws IOException { - EventQueue eventQueue = new EventQueue(); - final int cacheSize = config.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, - OZONE_SCM_DB_CACHE_SIZE_DEFAULT); - PipelineManager pipelineManager = - new SCMPipelineManager(config, scmNodeManager, eventQueue, null); - return new SCMContainerManager(config, scmNodeManager, pipelineManager, - eventQueue); - - } - - /** - * Test capacity based container placement policy with node reports. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - @Ignore - public void testContainerPlacementCapacity() throws IOException, - InterruptedException, TimeoutException { - OzoneConfiguration conf = getConf(); - final int nodeCount = 4; - final long capacity = 10L * OzoneConsts.GB; - final long used = 2L * OzoneConsts.GB; - final long remaining = capacity - used; - - final File testDir = PathUtils.getTestDir( - TestContainerPlacement.class); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); - - SCMNodeManager nodeManager = createNodeManager(conf); - SCMContainerManager containerManager = - createContainerManager(conf, nodeManager); - List datanodes = - TestUtils.getListOfRegisteredDatanodeDetails(nodeManager, nodeCount); - XceiverClientManager xceiverClientManager = null; - try { - for (DatanodeDetails datanodeDetails : datanodes) { - nodeManager.processHeartbeat(datanodeDetails); - } - - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY)); - assertEquals(capacity * nodeCount, - (long) nodeManager.getStats().getCapacity().get()); - assertEquals(used * nodeCount, - (long) nodeManager.getStats().getScmUsed().get()); - assertEquals(remaining * nodeCount, - (long) nodeManager.getStats().getRemaining().get()); - - xceiverClientManager= new XceiverClientManager(new OzoneConfiguration()); - - ContainerInfo container = containerManager - .allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), "OZONE"); - assertEquals(xceiverClientManager.getFactor().getNumber(), - containerManager.getContainerReplicas( - container.containerID()).size()); - } finally { - IOUtils.closeQuietly(containerManager); - IOUtils.closeQuietly(nodeManager); - if (xceiverClientManager != null) { - xceiverClientManager.close(); - } - FileUtil.fullyDelete(testDir); - } - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java deleted file mode 100644 index 7657b54373f..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java +++ /dev/null @@ -1,227 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import java.io.File; -import java.io.IOException; -import java.util.Arrays; -import java.util.Set; -import java.util.UUID; -import java.util.stream.Collectors; - -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; -import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .NodeReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.server.events.EventPublisher; - -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.security.authentication.client - .AuthenticationException; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -/** - * Test DeadNodeHandler. - */ -public class TestDeadNodeHandler { - - private StorageContainerManager scm; - private SCMNodeManager nodeManager; - private ContainerManager containerManager; - private NodeReportHandler nodeReportHandler; - private DeadNodeHandler deadNodeHandler; - private EventPublisher publisher; - private EventQueue eventQueue; - private String storageDir; - - @Before - public void setup() throws IOException, AuthenticationException { - OzoneConfiguration conf = new OzoneConfiguration(); - storageDir = GenericTestUtils.getTempPath( - TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); - eventQueue = new EventQueue(); - scm = HddsTestUtils.getScm(conf); - nodeManager = (SCMNodeManager) scm.getScmNodeManager(); - SCMPipelineManager manager = - (SCMPipelineManager)scm.getPipelineManager(); - PipelineProvider mockRatisProvider = - new MockRatisPipelineProvider(nodeManager, manager.getStateManager(), - conf); - manager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, - mockRatisProvider); - containerManager = scm.getContainerManager(); - deadNodeHandler = new DeadNodeHandler(nodeManager, - Mockito.mock(PipelineManager.class), containerManager); - eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler); - publisher = Mockito.mock(EventPublisher.class); - nodeReportHandler = new NodeReportHandler(nodeManager); - } - - @After - public void teardown() { - scm.stop(); - scm.join(); - FileUtil.fullyDelete(new File(storageDir)); - } - - @Test - public void testOnMessage() throws IOException, NodeNotFoundException { - //GIVEN - DatanodeDetails datanode1 = TestUtils.randomDatanodeDetails(); - DatanodeDetails datanode2 = TestUtils.randomDatanodeDetails(); - DatanodeDetails datanode3 = TestUtils.randomDatanodeDetails(); - - String storagePath = GenericTestUtils.getRandomizedTempPath() - .concat("/" + datanode1.getUuidString()); - - StorageReportProto storageOne = TestUtils.createStorageReport( - datanode1.getUuid(), storagePath, 100, 10, 90, null); - - // Standalone pipeline now excludes the nodes which are already used, - // is the a proper behavior. Adding 9 datanodes for now to make the - // test case happy. - - nodeManager.register(datanode1, - TestUtils.createNodeReport(storageOne), null); - nodeManager.register(datanode2, - TestUtils.createNodeReport(storageOne), null); - nodeManager.register(datanode3, - TestUtils.createNodeReport(storageOne), null); - - nodeManager.register(TestUtils.randomDatanodeDetails(), - TestUtils.createNodeReport(storageOne), null); - nodeManager.register(TestUtils.randomDatanodeDetails(), - TestUtils.createNodeReport(storageOne), null); - nodeManager.register(TestUtils.randomDatanodeDetails(), - TestUtils.createNodeReport(storageOne), null); - - nodeManager.register(TestUtils.randomDatanodeDetails(), - TestUtils.createNodeReport(storageOne), null); - nodeManager.register(TestUtils.randomDatanodeDetails(), - TestUtils.createNodeReport(storageOne), null); - nodeManager.register(TestUtils.randomDatanodeDetails(), - TestUtils.createNodeReport(storageOne), null); - - ContainerInfo container1 = - TestUtils.allocateContainer(containerManager); - ContainerInfo container2 = - TestUtils.allocateContainer(containerManager); - ContainerInfo container3 = - TestUtils.allocateContainer(containerManager); - ContainerInfo container4 = - TestUtils.allocateContainer(containerManager); - - registerContainers(datanode1, container1, container2, container4); - registerContainers(datanode2, container1, container2); - registerContainers(datanode3, container3); - - registerReplicas(containerManager, container1, datanode1, datanode2); - registerReplicas(containerManager, container2, datanode1, datanode2); - registerReplicas(containerManager, container3, datanode3); - registerReplicas(containerManager, container4, datanode1); - - TestUtils.closeContainer(containerManager, container1.containerID()); - TestUtils.closeContainer(containerManager, container2.containerID()); - TestUtils.quasiCloseContainer(containerManager, container3.containerID()); - - deadNodeHandler.onMessage(datanode1, publisher); - - Set container1Replicas = containerManager - .getContainerReplicas(new ContainerID(container1.getContainerID())); - Assert.assertEquals(1, container1Replicas.size()); - Assert.assertEquals(datanode2, - container1Replicas.iterator().next().getDatanodeDetails()); - - Set container2Replicas = containerManager - .getContainerReplicas(new ContainerID(container2.getContainerID())); - Assert.assertEquals(1, container2Replicas.size()); - Assert.assertEquals(datanode2, - container2Replicas.iterator().next().getDatanodeDetails()); - - Set container3Replicas = containerManager - .getContainerReplicas(new ContainerID(container3.getContainerID())); - Assert.assertEquals(1, container3Replicas.size()); - Assert.assertEquals(datanode3, - container3Replicas.iterator().next().getDatanodeDetails()); - } - - private void registerReplicas(ContainerManager contManager, - ContainerInfo container, DatanodeDetails... datanodes) - throws ContainerNotFoundException { - for (DatanodeDetails datanode : datanodes) { - contManager.updateContainerReplica( - new ContainerID(container.getContainerID()), - ContainerReplica.newBuilder() - .setContainerState(ContainerReplicaProto.State.OPEN) - .setContainerID(container.containerID()) - .setDatanodeDetails(datanode).build()); - } - } - - /** - * Update containers available on the datanode. - * @param datanode - * @param containers - * @throws NodeNotFoundException - */ - private void registerContainers(DatanodeDetails datanode, - ContainerInfo... containers) - throws NodeNotFoundException { - nodeManager - .setContainers(datanode, - Arrays.stream(containers) - .map(container -> new ContainerID(container.getContainerID())) - .collect(Collectors.toSet())); - } - - private NodeReportFromDatanode getNodeReport(DatanodeDetails dn, - StorageReportProto... reports) { - NodeReportProto nodeReportProto = TestUtils.createNodeReport(reports); - return new NodeReportFromDatanode(dn, nodeReportProto); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java deleted file mode 100644 index 88de27d9965..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import java.io.IOException; -import java.util.UUID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Test for the Node Report Handler. - */ -public class TestNodeReportHandler implements EventPublisher { - - private static final Logger LOG = LoggerFactory - .getLogger(TestNodeReportHandler.class); - private NodeReportHandler nodeReportHandler; - private SCMNodeManager nodeManager; - private String storagePath = GenericTestUtils.getRandomizedTempPath() - .concat("/" + UUID.randomUUID().toString()); - - @Before - public void resetEventCollector() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class); - Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1"); - nodeManager = - new SCMNodeManager(conf, storageConfig, new EventQueue(), Mockito.mock( - NetworkTopology.class)); - nodeReportHandler = new NodeReportHandler(nodeManager); - } - - @Test - public void testNodeReport() throws IOException { - DatanodeDetails dn = TestUtils.randomDatanodeDetails(); - StorageReportProto storageOne = TestUtils - .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); - - SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn); - Assert.assertNull(nodeMetric); - - nodeManager.register(dn, getNodeReport(dn, storageOne).getReport(), null); - nodeMetric = nodeManager.getNodeStat(dn); - - Assert.assertTrue(nodeMetric.get().getCapacity().get() == 100); - Assert.assertTrue(nodeMetric.get().getRemaining().get() == 90); - Assert.assertTrue(nodeMetric.get().getScmUsed().get() == 10); - - StorageReportProto storageTwo = TestUtils - .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); - nodeReportHandler.onMessage( - getNodeReport(dn, storageOne, storageTwo), this); - nodeMetric = nodeManager.getNodeStat(dn); - - Assert.assertTrue(nodeMetric.get().getCapacity().get() == 200); - Assert.assertTrue(nodeMetric.get().getRemaining().get() == 180); - Assert.assertTrue(nodeMetric.get().getScmUsed().get() == 20); - - } - - private NodeReportFromDatanode getNodeReport(DatanodeDetails dn, - StorageReportProto... reports) { - NodeReportProto nodeReportProto = TestUtils.createNodeReport(reports); - return new NodeReportFromDatanode(dn, nodeReportProto); - } - - @Override - public > void fireEvent( - EVENT_TYPE event, PAYLOAD payload) { - LOG.info("Event is published: {}", payload); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java deleted file mode 100644 index db76d667878..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ /dev/null @@ -1,1225 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.PathUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic - .NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic - .NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState - .HEALTHY; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -/** - * Test the SCM Node Manager class. - */ -public class TestSCMNodeManager { - - private File testDir; - private StorageContainerManager scm; - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @BeforeClass - public static void init() throws IOException { - } - - @Before - public void setup() { - testDir = PathUtils.getTestDir( - TestSCMNodeManager.class); - } - - @After - public void cleanup() { - if (scm != null) { - scm.stop(); - scm.join(); - } - FileUtil.fullyDelete(testDir); - } - - /** - * Returns a new copy of Configuration. - * - * @return Config - */ - OzoneConfiguration getConf() { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, - TimeUnit.MILLISECONDS); - return conf; - } - - /** - * Creates a NodeManager. - * - * @param config - Config for the node manager. - * @return SCNNodeManager - * @throws IOException - */ - - SCMNodeManager createNodeManager(OzoneConfiguration config) - throws IOException, AuthenticationException { - scm = HddsTestUtils.getScm(config); - return (SCMNodeManager) scm.getScmNodeManager(); - } - - /** - * Tests that Node manager handles heartbeats correctly, and comes out of - * safe Mode. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmHeartbeat() - throws IOException, InterruptedException, AuthenticationException { - - try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - int registeredNodes = 5; - // Send some heartbeats from different nodes. - for (int x = 0; x < registeredNodes; x++) { - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(datanodeDetails); - } - - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertTrue("Heartbeat thread should have picked up the" + - "scheduled heartbeats.", - nodeManager.getAllNodes().size() == registeredNodes); - } - } - - /** - * asserts that if we send no heartbeats node manager stays in safemode. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmNoHeartbeats() - throws IOException, InterruptedException, AuthenticationException { - - try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertTrue("No heartbeats, 0 nodes should be registered", - nodeManager.getAllNodes().size() == 0); - } - } - - /** - * Asserts that adding heartbeats after shutdown does not work. This implies - * that heartbeat thread has been shutdown safely by closing the node - * manager. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmShutdown() - throws IOException, InterruptedException, AuthenticationException { - OzoneConfiguration conf = getConf(); - conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - 100, TimeUnit.MILLISECONDS); - SCMNodeManager nodeManager = createNodeManager(conf); - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - nodeManager.close(); - - // These should never be processed. - nodeManager.processHeartbeat(datanodeDetails); - - // Let us just wait for 2 seconds to prove that HBs are not processed. - Thread.sleep(2 * 1000); - - //TODO: add assertion - } - - /** - * Asserts that we detect as many healthy nodes as we have generated heartbeat - * for. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmHealthyNodeCount() - throws IOException, InterruptedException, AuthenticationException { - OzoneConfiguration conf = getConf(); - final int count = 10; - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - - for (int x = 0; x < count; x++) { - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(datanodeDetails); - } - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertEquals(count, nodeManager.getNodeCount(HEALTHY)); - } - } - - /** - * Asserts that if Stale Interval value is more than 5 times the value of HB - * processing thread it is a sane value. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmSanityOfUserConfig2() - throws IOException, AuthenticationException { - OzoneConfiguration conf = getConf(); - final int interval = 100; - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, TimeUnit.SECONDS); - - // This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - // and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000, MILLISECONDS); - createNodeManager(conf).close(); - } - - /** - * Asserts that a single node moves from Healthy to stale node, then from - * stale node to dead node if it misses enough heartbeats. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmDetectStaleAndDeadNode() - throws IOException, InterruptedException, AuthenticationException { - final int interval = 100; - final int nodeCount = 10; - - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); - - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - List nodeList = createNodeSet(nodeManager, nodeCount); - - - DatanodeDetails staleNode = TestUtils.createRandomDatanodeAndRegister( - nodeManager); - - // Heartbeat once - nodeManager.processHeartbeat(staleNode); - - // Heartbeat all other nodes. - for (DatanodeDetails dn : nodeList) { - nodeManager.processHeartbeat(dn); - } - - // Wait for 2 seconds .. and heartbeat good nodes again. - Thread.sleep(2 * 1000); - - for (DatanodeDetails dn : nodeList) { - nodeManager.processHeartbeat(dn); - } - - // Wait for 2 seconds, wait a total of 4 seconds to make sure that the - // node moves into stale state. - Thread.sleep(2 * 1000); - List staleNodeList = nodeManager.getNodes(STALE); - assertEquals("Expected to find 1 stale node", - 1, nodeManager.getNodeCount(STALE)); - assertEquals("Expected to find 1 stale node", - 1, staleNodeList.size()); - assertEquals("Stale node is not the expected ID", staleNode - .getUuid(), staleNodeList.get(0).getUuid()); - Thread.sleep(1000); - - // heartbeat good nodes again. - for (DatanodeDetails dn : nodeList) { - nodeManager.processHeartbeat(dn); - } - - // 6 seconds is the dead window for this test , so we wait a total of - // 7 seconds to make sure that the node moves into dead state. - Thread.sleep(2 * 1000); - - // the stale node has been removed - staleNodeList = nodeManager.getNodes(STALE); - assertEquals("Expected to find 1 stale node", - 0, nodeManager.getNodeCount(STALE)); - assertEquals("Expected to find 1 stale node", - 0, staleNodeList.size()); - - // Check for the dead node now. - List deadNodeList = nodeManager.getNodes(DEAD); - assertEquals("Expected to find 1 dead node", 1, - nodeManager.getNodeCount(DEAD)); - assertEquals("Expected to find 1 dead node", - 1, deadNodeList.size()); - assertEquals("Dead node is not the expected ID", staleNode - .getUuid(), deadNodeList.get(0).getUuid()); - } - } - - /** - * Simulate a JVM Pause by pausing the health check process - * Ensure that none of the nodes with heartbeats become Dead or Stale. - * @throws IOException - * @throws InterruptedException - * @throws AuthenticationException - */ - @Test - public void testScmHandleJvmPause() - throws IOException, InterruptedException, AuthenticationException { - final int healthCheckInterval = 200; // milliseconds - final int heartbeatInterval = 1; // seconds - final int staleNodeInterval = 3; // seconds - final int deadNodeInterval = 6; // seconds - ScheduledFuture schedFuture; - - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - healthCheckInterval, MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, - heartbeatInterval, SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, - staleNodeInterval, SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, - deadNodeInterval, SECONDS); - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails node1 = - TestUtils.createRandomDatanodeAndRegister(nodeManager); - DatanodeDetails node2 = - TestUtils.createRandomDatanodeAndRegister(nodeManager); - - nodeManager.processHeartbeat(node1); - nodeManager.processHeartbeat(node2); - - // Sleep so that heartbeat processing thread gets to run. - Thread.sleep(1000); - - //Assert all nodes are healthy. - assertEquals(2, nodeManager.getAllNodes().size()); - assertEquals(2, nodeManager.getNodeCount(HEALTHY)); - - /** - * Simulate a JVM Pause and subsequent handling in following steps: - * Step 1 : stop heartbeat check process for stale node interval - * Step 2 : resume heartbeat check - * Step 3 : wait for 1 iteration of heartbeat check thread - * Step 4 : retrieve the state of all nodes - assert all are HEALTHY - * Step 5 : heartbeat for node1 - * [TODO : what if there is scheduling delay of test thread in Step 5?] - * Step 6 : wait for some time to allow iterations of check process - * Step 7 : retrieve the state of all nodes - assert node2 is STALE - * and node1 is HEALTHY - */ - - // Step 1 : stop health check process (simulate JVM pause) - nodeManager.pauseHealthCheck(); - Thread.sleep(MILLISECONDS.convert(staleNodeInterval, SECONDS)); - - // Step 2 : resume health check - assertTrue("Unexpected, already skipped heartbeat checks", - (nodeManager.getSkippedHealthChecks() == 0)); - schedFuture = nodeManager.unpauseHealthCheck(); - - // Step 3 : wait for 1 iteration of health check - try { - schedFuture.get(); - assertTrue("We did not skip any heartbeat checks", - nodeManager.getSkippedHealthChecks() > 0); - } catch (ExecutionException e) { - assertEquals("Unexpected exception waiting for Scheduled Health Check", - 0, 1); - } - - // Step 4 : all nodes should still be HEALTHY - assertEquals(2, nodeManager.getAllNodes().size()); - assertEquals(2, nodeManager.getNodeCount(HEALTHY)); - - // Step 5 : heartbeat for node1 - nodeManager.processHeartbeat(node1); - - // Step 6 : wait for health check process to run - Thread.sleep(1000); - - // Step 7 : node2 should transition to STALE - assertEquals(1, nodeManager.getNodeCount(HEALTHY)); - assertEquals(1, nodeManager.getNodeCount(STALE)); - } - } - - /** - * Check for NPE when datanodeDetails is passed null for sendHeartbeat. - * - * @throws IOException - */ - @Test - public void testScmCheckForErrorOnNullDatanodeDetails() - throws IOException, AuthenticationException { - try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - nodeManager.processHeartbeat(null); - } catch (NullPointerException npe) { - GenericTestUtils.assertExceptionContains("Heartbeat is missing " + - "DatanodeDetails.", npe); - } - } - - /** - * Asserts that a dead node, stale node and healthy nodes co-exist. The counts - * , lists and node ID match the expected node state. - *

- * This test is pretty complicated because it explores all states of Node - * manager in a single test. Please read thru the comments to get an idea of - * the current state of the node Manager. - *

- * This test is written like a state machine to avoid threads and concurrency - * issues. This test is replicated below with the use of threads. Avoiding - * threads make it easy to debug the state machine. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - /** - * These values are very important. Here is what it means so you don't - * have to look it up while reading this code. - * - * OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - This the frequency of the - * HB processing thread that is running in the SCM. This thread must run - * for the SCM to process the Heartbeats. - * - * OZONE_SCM_HEARTBEAT_INTERVAL - This is the frequency at which - * datanodes will send heartbeats to SCM. Please note: This is the only - * config value for node manager that is specified in seconds. We don't - * want SCM heartbeat resolution to be more than in seconds. - * In this test it is not used, but we are forced to set it because we - * have validation code that checks Stale Node interval and Dead Node - * interval is larger than the value of - * OZONE_SCM_HEARTBEAT_INTERVAL. - * - * OZONE_SCM_STALENODE_INTERVAL - This is the time that must elapse - * from the last heartbeat for us to mark a node as stale. In this test - * we set that to 3. That is if a node has not heartbeat SCM for last 3 - * seconds we will mark it as stale. - * - * OZONE_SCM_DEADNODE_INTERVAL - This is the time that must elapse - * from the last heartbeat for a node to be marked dead. We have an - * additional constraint that this must be at least 2 times bigger than - * Stale node Interval. - * - * With these we are trying to explore the state of this cluster with - * various timeouts. Each section is commented so that you can keep - * track of the state of the cluster nodes. - * - */ - - @Test - public void testScmClusterIsInExpectedState1() - throws IOException, InterruptedException, AuthenticationException { - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); - - - /** - * Cluster state: Healthy: All nodes are heartbeat-ing like normal. - */ - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails healthyNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); - DatanodeDetails staleNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); - DatanodeDetails deadNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(healthyNode); - nodeManager.processHeartbeat(staleNode); - nodeManager.processHeartbeat(deadNode); - - // Sleep so that heartbeat processing thread gets to run. - Thread.sleep(500); - - //Assert all nodes are healthy. - assertEquals(3, nodeManager.getAllNodes().size()); - assertEquals(3, nodeManager.getNodeCount(HEALTHY)); - - /** - * Cluster state: Quiesced: We are going to sleep for 3 seconds. Which - * means that no node is heartbeating. All nodes should move to Stale. - */ - Thread.sleep(3 * 1000); - assertEquals(3, nodeManager.getAllNodes().size()); - assertEquals(3, nodeManager.getNodeCount(STALE)); - - - /** - * Cluster State : Move healthy node back to healthy state, move other 2 - * nodes to Stale State. - * - * We heartbeat healthy node after 1 second and let other 2 nodes elapse - * the 3 second windows. - */ - - nodeManager.processHeartbeat(healthyNode); - nodeManager.processHeartbeat(staleNode); - nodeManager.processHeartbeat(deadNode); - - Thread.sleep(1500); - nodeManager.processHeartbeat(healthyNode); - Thread.sleep(2 * 1000); - assertEquals(1, nodeManager.getNodeCount(HEALTHY)); - - - // 3.5 seconds from last heartbeat for the stale and deadNode. So those - // 2 nodes must move to Stale state and the healthy node must - // remain in the healthy State. - List healthyList = nodeManager.getNodes(HEALTHY); - assertEquals("Expected one healthy node", 1, healthyList.size()); - assertEquals("Healthy node is not the expected ID", healthyNode - .getUuid(), healthyList.get(0).getUuid()); - - assertEquals(2, nodeManager.getNodeCount(STALE)); - - /** - * Cluster State: Allow healthyNode to remain in healthy state and - * staleNode to move to stale state and deadNode to move to dead state. - */ - - nodeManager.processHeartbeat(healthyNode); - nodeManager.processHeartbeat(staleNode); - Thread.sleep(1500); - nodeManager.processHeartbeat(healthyNode); - Thread.sleep(2 * 1000); - - // 3.5 seconds have elapsed for stale node, so it moves into Stale. - // 7 seconds have elapsed for dead node, so it moves into dead. - // 2 Seconds have elapsed for healthy node, so it stays in healthy state. - healthyList = nodeManager.getNodes(HEALTHY); - List staleList = nodeManager.getNodes(STALE); - List deadList = nodeManager.getNodes(DEAD); - - assertEquals(3, nodeManager.getAllNodes().size()); - assertEquals(1, nodeManager.getNodeCount(HEALTHY)); - assertEquals(1, nodeManager.getNodeCount(STALE)); - assertEquals(1, nodeManager.getNodeCount(DEAD)); - - assertEquals("Expected one healthy node", - 1, healthyList.size()); - assertEquals("Healthy node is not the expected ID", healthyNode - .getUuid(), healthyList.get(0).getUuid()); - - assertEquals("Expected one stale node", - 1, staleList.size()); - assertEquals("Stale node is not the expected ID", staleNode - .getUuid(), staleList.get(0).getUuid()); - - assertEquals("Expected one dead node", - 1, deadList.size()); - assertEquals("Dead node is not the expected ID", deadNode - .getUuid(), deadList.get(0).getUuid()); - /** - * Cluster State : let us heartbeat all the nodes and verify that we get - * back all the nodes in healthy state. - */ - nodeManager.processHeartbeat(healthyNode); - nodeManager.processHeartbeat(staleNode); - nodeManager.processHeartbeat(deadNode); - Thread.sleep(500); - //Assert all nodes are healthy. - assertEquals(3, nodeManager.getAllNodes().size()); - assertEquals(3, nodeManager.getNodeCount(HEALTHY)); - } - } - - /** - * Heartbeat a given set of nodes at a specified frequency. - * - * @param manager - Node Manager - * @param list - List of datanodeIDs - * @param sleepDuration - Duration to sleep between heartbeats. - * @throws InterruptedException - */ - private void heartbeatNodeSet(SCMNodeManager manager, - List list, - int sleepDuration) throws InterruptedException { - while (!Thread.currentThread().isInterrupted()) { - for (DatanodeDetails dn : list) { - manager.processHeartbeat(dn); - } - Thread.sleep(sleepDuration); - } - } - - /** - * Create a set of Nodes with a given prefix. - * - * @param count - number of nodes. - * @return List of Nodes. - */ - private List createNodeSet(SCMNodeManager nodeManager, int - count) { - List list = new ArrayList<>(); - for (int x = 0; x < count; x++) { - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - list.add(datanodeDetails); - } - return list; - } - - /** - * Function that tells us if we found the right number of stale nodes. - * - * @param nodeManager - node manager - * @param count - number of stale nodes to look for. - * @return true if we found the expected number. - */ - private boolean findNodes(NodeManager nodeManager, int count, - HddsProtos.NodeState state) { - return count == nodeManager.getNodeCount(state); - } - - /** - * Asserts that we can create a set of nodes that send its heartbeats from - * different threads and NodeManager behaves as expected. - * - * @throws IOException - * @throws InterruptedException - */ - @Test - public void testScmClusterIsInExpectedState2() - throws IOException, InterruptedException, TimeoutException, - AuthenticationException { - final int healthyCount = 5000; - final int staleCount = 100; - final int deadCount = 10; - - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); - - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - List healthyNodeList = createNodeSet(nodeManager, - healthyCount); - List staleNodeList = createNodeSet(nodeManager, - staleCount); - List deadNodeList = createNodeSet(nodeManager, - deadCount); - - Runnable healthyNodeTask = () -> { - try { - // 2 second heartbeat makes these nodes stay healthy. - heartbeatNodeSet(nodeManager, healthyNodeList, 2 * 1000); - } catch (InterruptedException ignored) { - } - }; - - Runnable staleNodeTask = () -> { - try { - // 4 second heartbeat makes these nodes go to stale and back to - // healthy again. - heartbeatNodeSet(nodeManager, staleNodeList, 4 * 1000); - } catch (InterruptedException ignored) { - } - }; - - - // No Thread just one time HBs the node manager, so that these will be - // marked as dead nodes eventually. - for (DatanodeDetails dn : deadNodeList) { - nodeManager.processHeartbeat(dn); - } - - - Thread thread1 = new Thread(healthyNodeTask); - thread1.setDaemon(true); - thread1.start(); - - - Thread thread2 = new Thread(staleNodeTask); - thread2.setDaemon(true); - thread2.start(); - - Thread.sleep(10 * 1000); - - // Assert all healthy nodes are healthy now, this has to be a greater - // than check since Stale nodes can be healthy when we check the state. - - assertTrue(nodeManager.getNodeCount(HEALTHY) >= healthyCount); - - assertEquals(deadCount, nodeManager.getNodeCount(DEAD)); - - List deadList = nodeManager.getNodes(DEAD); - - for (DatanodeDetails node : deadList) { - assertTrue(deadNodeList.contains(node)); - } - - - - // Checking stale nodes is tricky since they have to move between - // healthy and stale to avoid becoming dead nodes. So we search for - // that state for a while, if we don't find that state waitfor will - // throw. - GenericTestUtils.waitFor(() -> findNodes(nodeManager, staleCount, STALE), - 500, 4 * 1000); - - thread1.interrupt(); - thread2.interrupt(); - } - } - - /** - * Asserts that we can handle 6000+ nodes heartbeating SCM. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmCanHandleScale() - throws IOException, InterruptedException, TimeoutException, - AuthenticationException { - final int healthyCount = 3000; - final int staleCount = 3000; - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, - SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000, - MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6 * 1000, - MILLISECONDS); - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - List healthyList = createNodeSet(nodeManager, - healthyCount); - List staleList = createNodeSet(nodeManager, - staleCount); - - Runnable healthyNodeTask = () -> { - try { - heartbeatNodeSet(nodeManager, healthyList, 2 * 1000); - } catch (InterruptedException ignored) { - - } - }; - - Runnable staleNodeTask = () -> { - try { - heartbeatNodeSet(nodeManager, staleList, 4 * 1000); - } catch (InterruptedException ignored) { - } - }; - - Thread thread1 = new Thread(healthyNodeTask); - thread1.setDaemon(true); - thread1.start(); - - Thread thread2 = new Thread(staleNodeTask); - thread2.setDaemon(true); - thread2.start(); - Thread.sleep(3 * 1000); - - GenericTestUtils.waitFor(() -> findNodes(nodeManager, staleCount, STALE), - 500, 20 * 1000); - assertEquals("Node count mismatch", - healthyCount + staleCount, nodeManager.getAllNodes().size()); - - thread1.interrupt(); - thread2.interrupt(); - } - } - - /** - * Test multiple nodes sending initial heartbeat with their node report. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - @Ignore - // TODO: Enable this after we implement NodeReportEvent handler. - public void testScmStatsFromNodeReport() - throws IOException, InterruptedException, AuthenticationException { - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, - MILLISECONDS); - final int nodeCount = 10; - final long capacity = 2000; - final long used = 100; - final long remaining = capacity - used; - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - for (int x = 0; x < nodeCount; x++) { - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - UUID dnId = datanodeDetails.getUuid(); - long free = capacity - used; - String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = TestUtils - .createStorageReport(dnId, storagePath, capacity, used, free, null); - nodeManager.processHeartbeat(datanodeDetails); - } - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY)); - assertEquals(capacity * nodeCount, (long) nodeManager.getStats() - .getCapacity().get()); - assertEquals(used * nodeCount, (long) nodeManager.getStats() - .getScmUsed().get()); - assertEquals(remaining * nodeCount, (long) nodeManager.getStats() - .getRemaining().get()); - } - } - - /** - * Test single node stat update based on nodereport from different heartbeat - * status (healthy, stale and dead). - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - @Ignore - // TODO: Enable this after we implement NodeReportEvent handler. - public void testScmNodeReportUpdate() - throws IOException, InterruptedException, TimeoutException, - AuthenticationException { - OzoneConfiguration conf = getConf(); - final int heartbeatCount = 5; - final int nodeCount = 1; - final int interval = 100; - - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails datanodeDetails = - TestUtils.createRandomDatanodeAndRegister(nodeManager); - final long capacity = 2000; - final long usedPerHeartbeat = 100; - UUID dnId = datanodeDetails.getUuid(); - for (int x = 0; x < heartbeatCount; x++) { - long scmUsed = x * usedPerHeartbeat; - long remaining = capacity - scmUsed; - String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = TestUtils - .createStorageReport(dnId, storagePath, capacity, scmUsed, - remaining, null); - - nodeManager.processHeartbeat(datanodeDetails); - Thread.sleep(100); - } - - final long expectedScmUsed = usedPerHeartbeat * (heartbeatCount - 1); - final long expectedRemaining = capacity - expectedScmUsed; - - GenericTestUtils.waitFor( - () -> nodeManager.getStats().getScmUsed().get() == expectedScmUsed, - 100, 4 * 1000); - - long foundCapacity = nodeManager.getStats().getCapacity().get(); - assertEquals(capacity, foundCapacity); - - long foundScmUsed = nodeManager.getStats().getScmUsed().get(); - assertEquals(expectedScmUsed, foundScmUsed); - - long foundRemaining = nodeManager.getStats().getRemaining().get(); - assertEquals(expectedRemaining, foundRemaining); - - // Test NodeManager#getNodeStats - assertEquals(nodeCount, nodeManager.getNodeStats().size()); - long nodeCapacity = nodeManager.getNodeStat(datanodeDetails).get() - .getCapacity().get(); - assertEquals(capacity, nodeCapacity); - - foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get().getScmUsed() - .get(); - assertEquals(expectedScmUsed, foundScmUsed); - - foundRemaining = nodeManager.getNodeStat(datanodeDetails).get() - .getRemaining().get(); - assertEquals(expectedRemaining, foundRemaining); - - // Compare the result from - // NodeManager#getNodeStats and NodeManager#getNodeStat - SCMNodeStat stat1 = nodeManager.getNodeStats(). - get(datanodeDetails.getUuid()); - SCMNodeStat stat2 = nodeManager.getNodeStat(datanodeDetails).get(); - assertEquals(stat1, stat2); - - // Wait up to 4s so that the node becomes stale - // Verify the usage info should be unchanged. - GenericTestUtils.waitFor( - () -> nodeManager.getNodeCount(STALE) == 1, 100, - 4 * 1000); - assertEquals(nodeCount, nodeManager.getNodeStats().size()); - - foundCapacity = nodeManager.getNodeStat(datanodeDetails).get() - .getCapacity().get(); - assertEquals(capacity, foundCapacity); - foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get() - .getScmUsed().get(); - assertEquals(expectedScmUsed, foundScmUsed); - - foundRemaining = nodeManager.getNodeStat(datanodeDetails).get(). - getRemaining().get(); - assertEquals(expectedRemaining, foundRemaining); - - // Wait up to 4 more seconds so the node becomes dead - // Verify usage info should be updated. - GenericTestUtils.waitFor( - () -> nodeManager.getNodeCount(DEAD) == 1, 100, - 4 * 1000); - - assertEquals(0, nodeManager.getNodeStats().size()); - foundCapacity = nodeManager.getStats().getCapacity().get(); - assertEquals(0, foundCapacity); - - foundScmUsed = nodeManager.getStats().getScmUsed().get(); - assertEquals(0, foundScmUsed); - - foundRemaining = nodeManager.getStats().getRemaining().get(); - assertEquals(0, foundRemaining); - - nodeManager.processHeartbeat(datanodeDetails); - - // Wait up to 5 seconds so that the dead node becomes healthy - // Verify usage info should be updated. - GenericTestUtils.waitFor( - () -> nodeManager.getNodeCount(HEALTHY) == 1, - 100, 5 * 1000); - GenericTestUtils.waitFor( - () -> nodeManager.getStats().getScmUsed().get() == expectedScmUsed, - 100, 4 * 1000); - assertEquals(nodeCount, nodeManager.getNodeStats().size()); - foundCapacity = nodeManager.getNodeStat(datanodeDetails).get() - .getCapacity().get(); - assertEquals(capacity, foundCapacity); - foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get().getScmUsed() - .get(); - assertEquals(expectedScmUsed, foundScmUsed); - foundRemaining = nodeManager.getNodeStat(datanodeDetails).get() - .getRemaining().get(); - assertEquals(expectedRemaining, foundRemaining); - } - } - - @Test - public void testHandlingSCMCommandEvent() - throws IOException, AuthenticationException { - OzoneConfiguration conf = getConf(); - conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - 100, TimeUnit.MILLISECONDS); - - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - UUID dnId = datanodeDetails.getUuid(); - String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = - TestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null); - - EventQueue eq = new EventQueue(); - try (SCMNodeManager nodemanager = createNodeManager(conf)) { - eq.addHandler(DATANODE_COMMAND, nodemanager); - - nodemanager - .register(datanodeDetails, TestUtils.createNodeReport(report), - TestUtils.getRandomPipelineReports()); - eq.fireEvent(DATANODE_COMMAND, - new CommandForDatanode<>(datanodeDetails.getUuid(), - new CloseContainerCommand(1L, - PipelineID.randomId()))); - - eq.processAll(1000L); - List command = - nodemanager.processHeartbeat(datanodeDetails); - Assert.assertEquals(1, command.size()); - Assert - .assertEquals(command.get(0).getClass(), CloseContainerCommand.class); - } catch (IOException e) { - e.printStackTrace(); - throw e; - } - } - - /** - * Test add node into network topology during node register. Datanode - * uses Ip address to resolve network location. - */ - @Test - public void testScmRegisterNodeWithIpAddress() - throws IOException, InterruptedException, AuthenticationException { - testScmRegisterNodeWithNetworkTopology(false); - } - - /** - * Test add node into network topology during node register. Datanode - * uses hostname to resolve network location. - */ - @Test - public void testScmRegisterNodeWithHostname() - throws IOException, InterruptedException, AuthenticationException { - testScmRegisterNodeWithNetworkTopology(true); - } - - /** - * Test getNodesByAddress when using IPs. - * - */ - @Test - public void testgetNodesByAddressWithIpAddress() - throws IOException, InterruptedException, AuthenticationException { - testGetNodesByAddress(false); - } - - /** - * Test getNodesByAddress when using hostnames. - */ - @Test - public void testgetNodesByAddressWithHostname() - throws IOException, InterruptedException, AuthenticationException { - testGetNodesByAddress(true); - } - - /** - * Test add node into a 4-layer network topology during node register. - */ - @Test - public void testScmRegisterNodeWith4LayerNetworkTopology() - throws IOException, InterruptedException, AuthenticationException { - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, - MILLISECONDS); - - // create table mapping file - String[] hostNames = {"host1", "host2", "host3", "host4"}; - String[] ipAddress = {"1.2.3.4", "2.3.4.5", "3.4.5.6", "4.5.6.7"}; - String mapFile = this.getClass().getClassLoader() - .getResource("nodegroup-mapping").getPath(); - - // create and register nodes - conf.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, - "org.apache.hadoop.net.TableMapping"); - conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile); - conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, - "network-topology-nodegroup.xml"); - final int nodeCount = hostNames.length; - // use default IP address to resolve node - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails[] nodes = new DatanodeDetails[nodeCount]; - for (int i = 0; i < nodeCount; i++) { - DatanodeDetails node = TestUtils.createDatanodeDetails( - UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null); - nodeManager.register(node, null, null); - nodes[i] = node; - } - - // verify network topology cluster has all the registered nodes - Thread.sleep(4 * 1000); - NetworkTopology clusterMap = scm.getClusterMap(); - assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY)); - assertEquals(nodeCount, clusterMap.getNumOfLeafNode("")); - assertEquals(4, clusterMap.getMaxLevel()); - List nodeList = nodeManager.getAllNodes(); - nodeList.stream().forEach(node -> - Assert.assertTrue(node.getNetworkLocation().startsWith("/rack1/ng"))); - } - } - - private void testScmRegisterNodeWithNetworkTopology(boolean useHostname) - throws IOException, InterruptedException, AuthenticationException { - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, - MILLISECONDS); - - // create table mapping file - String[] hostNames = {"host1", "host2", "host3", "host4"}; - String[] ipAddress = {"1.2.3.4", "2.3.4.5", "3.4.5.6", "4.5.6.7"}; - String mapFile = this.getClass().getClassLoader() - .getResource("rack-mapping").getPath(); - - // create and register nodes - conf.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, - "org.apache.hadoop.net.TableMapping"); - conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, mapFile); - if (useHostname) { - conf.set(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, "true"); - } - final int nodeCount = hostNames.length; - // use default IP address to resolve node - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails[] nodes = new DatanodeDetails[nodeCount]; - for (int i = 0; i < nodeCount; i++) { - DatanodeDetails node = TestUtils.createDatanodeDetails( - UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null); - nodeManager.register(node, null, null); - nodes[i] = node; - } - - // verify network topology cluster has all the registered nodes - Thread.sleep(4 * 1000); - NetworkTopology clusterMap = scm.getClusterMap(); - assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY)); - assertEquals(nodeCount, clusterMap.getNumOfLeafNode("")); - assertEquals(3, clusterMap.getMaxLevel()); - List nodeList = nodeManager.getAllNodes(); - nodeList.stream().forEach(node -> - Assert.assertTrue(node.getNetworkLocation().equals("/rack1"))); - - // test get node - if (useHostname) { - Arrays.stream(hostNames).forEach(hostname -> - Assert.assertNotEquals(0, nodeManager.getNodesByAddress(hostname) - .size())); - } else { - Arrays.stream(ipAddress).forEach(ip -> - Assert.assertNotEquals(0, nodeManager.getNodesByAddress(ip) - .size())); - } - } - } - - /** - * Test add node into a 4-layer network topology during node register. - */ - private void testGetNodesByAddress(boolean useHostname) - throws IOException, InterruptedException, AuthenticationException { - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, - MILLISECONDS); - - // create a set of hosts - note two hosts on "host1" - String[] hostNames = {"host1", "host1", "host2", "host3", "host4"}; - String[] ipAddress = - {"1.2.3.4", "1.2.3.4", "2.3.4.5", "3.4.5.6", "4.5.6.7"}; - - if (useHostname) { - conf.set(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, "true"); - } - final int nodeCount = hostNames.length; - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails[] nodes = new DatanodeDetails[nodeCount]; - for (int i = 0; i < nodeCount; i++) { - DatanodeDetails node = TestUtils.createDatanodeDetails( - UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null); - nodeManager.register(node, null, null); - } - // test get node - Assert.assertEquals(0, nodeManager.getNodesByAddress(null).size()); - if (useHostname) { - Assert.assertEquals(2, - nodeManager.getNodesByAddress("host1").size()); - Assert.assertEquals(1, nodeManager.getNodesByAddress("host2").size()); - Assert.assertEquals(0, nodeManager.getNodesByAddress("unknown").size()); - } else { - Assert.assertEquals(2, - nodeManager.getNodesByAddress("1.2.3.4").size()); - Assert.assertEquals(1, nodeManager.getNodesByAddress("2.3.4.5").size()); - Assert.assertEquals(0, nodeManager.getNodesByAddress("1.9.8.7").size()); - } - } - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java deleted file mode 100644 index e12c6433c97..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java +++ /dev/null @@ -1,262 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.*; -import org.junit.Rule; -import org.junit.rules.ExpectedException; - -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.Set; -import java.util.ArrayList; -import java.util.HashSet; -import java.io.IOException; -import java.util.concurrent.ConcurrentHashMap; - -/** - * Test Node Storage Map. - */ -public class TestSCMNodeStorageStatMap { - private final static int DATANODE_COUNT = 100; - private final long capacity = 10L * OzoneConsts.GB; - private final long used = 2L * OzoneConsts.GB; - private final long remaining = capacity - used; - private static OzoneConfiguration conf = new OzoneConfiguration(); - private final Map> testData = - new ConcurrentHashMap<>(); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private void generateData() { - for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) { - UUID dnId = UUID.randomUUID(); - Set reportSet = new HashSet<>(); - String path = GenericTestUtils.getTempPath( - TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + - Integer.toString(dnIndex)); - StorageLocationReport.Builder builder = - StorageLocationReport.newBuilder(); - builder.setStorageType(StorageType.DISK).setId(dnId.toString()) - .setStorageLocation(path).setScmUsed(used).setRemaining(remaining) - .setCapacity(capacity).setFailed(false); - reportSet.add(builder.build()); - testData.put(UUID.randomUUID(), reportSet); - } - } - - private UUID getFirstKey() { - return testData.keySet().iterator().next(); - } - - @Before - public void setUp() throws Exception { - generateData(); - } - - @After - public void tearDown() throws Exception { - } - - @Test - public void testIsKnownDatanode() throws SCMException { - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - UUID knownNode = getFirstKey(); - UUID unknownNode = UUID.randomUUID(); - Set report = testData.get(knownNode); - map.insertNewDatanode(knownNode, report); - Assert.assertTrue("Not able to detect a known node", - map.isKnownDatanode(knownNode)); - Assert.assertFalse("Unknown node detected", - map.isKnownDatanode(unknownNode)); - } - - @Test - public void testInsertNewDatanode() throws SCMException { - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - UUID knownNode = getFirstKey(); - Set report = testData.get(knownNode); - map.insertNewDatanode(knownNode, report); - Assert.assertEquals(map.getStorageVolumes(knownNode), - testData.get(knownNode)); - thrown.expect(SCMException.class); - thrown.expectMessage("already exists"); - map.insertNewDatanode(knownNode, report); - } - - @Test - public void testUpdateUnknownDatanode() throws SCMException { - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - UUID unknownNode = UUID.randomUUID(); - String path = GenericTestUtils.getTempPath( - TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + unknownNode - .toString()); - Set reportSet = new HashSet<>(); - StorageLocationReport.Builder builder = StorageLocationReport.newBuilder(); - builder.setStorageType(StorageType.DISK).setId(unknownNode.toString()) - .setStorageLocation(path).setScmUsed(used).setRemaining(remaining) - .setCapacity(capacity).setFailed(false); - reportSet.add(builder.build()); - thrown.expect(SCMException.class); - thrown.expectMessage("No such datanode"); - map.updateDatanodeMap(unknownNode, reportSet); - } - - @Test - public void testProcessNodeReportCheckOneNode() throws IOException { - UUID key = getFirstKey(); - List reportList = new ArrayList<>(); - Set reportSet = testData.get(key); - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - map.insertNewDatanode(key, reportSet); - Assert.assertTrue(map.isKnownDatanode(key)); - UUID storageId = UUID.randomUUID(); - String path = - GenericTestUtils.getRandomizedTempPath().concat("/" + storageId); - StorageLocationReport report = reportSet.iterator().next(); - long reportCapacity = report.getCapacity(); - long reportScmUsed = report.getScmUsed(); - long reportRemaining = report.getRemaining(); - StorageReportProto storageReport = TestUtils.createStorageReport(storageId, - path, reportCapacity, reportScmUsed, reportRemaining, null); - StorageReportResult result = - map.processNodeReport(key, TestUtils.createNodeReport(storageReport)); - Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL, - result.getStatus()); - StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb = - NodeReportProto.newBuilder(); - StorageReportProto srb = reportSet.iterator().next().getProtoBufMessage(); - reportList.add(srb); - result = map.processNodeReport(key, TestUtils.createNodeReport(reportList)); - Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL, - result.getStatus()); - - reportList.add(TestUtils - .createStorageReport(UUID.randomUUID(), path, reportCapacity, - reportCapacity, 0, null)); - result = map.processNodeReport(key, TestUtils.createNodeReport(reportList)); - Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE, - result.getStatus()); - // Mark a disk failed - StorageReportProto srb2 = StorageReportProto.newBuilder() - .setStorageUuid(UUID.randomUUID().toString()) - .setStorageLocation(srb.getStorageLocation()).setScmUsed(reportCapacity) - .setCapacity(reportCapacity).setRemaining(0).setFailed(true).build(); - reportList.add(srb2); - nrb.addAllStorageReport(reportList); - result = map.processNodeReport(key, nrb.addStorageReport(srb).build()); - Assert.assertEquals(SCMNodeStorageStatMap.ReportStatus - .FAILED_AND_OUT_OF_SPACE_STORAGE, result.getStatus()); - - } - - @Test - public void testProcessMultipleNodeReports() throws SCMException { - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - int counter = 1; - // Insert all testData into the SCMNodeStorageStatMap Map. - for (Map.Entry> keyEntry : testData - .entrySet()) { - map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue()); - } - Assert.assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity()); - Assert.assertEquals(DATANODE_COUNT * remaining, map.getTotalFreeSpace()); - Assert.assertEquals(DATANODE_COUNT * used, map.getTotalSpaceUsed()); - - // upadate 1/4th of the datanode to be full - for (Map.Entry> keyEntry : testData - .entrySet()) { - Set reportSet = new HashSet<>(); - String path = GenericTestUtils.getTempPath( - TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + keyEntry - .getKey().toString()); - StorageLocationReport.Builder builder = - StorageLocationReport.newBuilder(); - builder.setStorageType(StorageType.DISK) - .setId(keyEntry.getKey().toString()).setStorageLocation(path) - .setScmUsed(capacity).setRemaining(0).setCapacity(capacity) - .setFailed(false); - reportSet.add(builder.build()); - - map.updateDatanodeMap(keyEntry.getKey(), reportSet); - counter++; - if (counter > DATANODE_COUNT / 4) { - break; - } - } - Assert.assertEquals(DATANODE_COUNT / 4, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.CRITICAL) - .size()); - Assert.assertEquals(0, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.WARN) - .size()); - Assert.assertEquals(0.75 * DATANODE_COUNT, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL) - .size(), 0); - - Assert.assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity(), 0); - Assert.assertEquals(0.75 * DATANODE_COUNT * remaining, - map.getTotalFreeSpace(), 0); - Assert.assertEquals( - 0.75 * DATANODE_COUNT * used + (0.25 * DATANODE_COUNT * capacity), - map.getTotalSpaceUsed(), 0); - counter = 1; - // Remove 1/4 of the DataNodes from the Map - for (Map.Entry> keyEntry : testData - .entrySet()) { - map.removeDatanode(keyEntry.getKey()); - counter++; - if (counter > DATANODE_COUNT / 4) { - break; - } - } - - Assert.assertEquals(0, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.CRITICAL) - .size()); - Assert.assertEquals(0, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.WARN) - .size()); - Assert.assertEquals(0.75 * DATANODE_COUNT, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL) - .size(), 0); - - Assert - .assertEquals(0.75 * DATANODE_COUNT * capacity, map.getTotalCapacity(), - 0); - Assert.assertEquals(0.75 * DATANODE_COUNT * remaining, - map.getTotalFreeSpace(), 0); - Assert - .assertEquals(0.75 * DATANODE_COUNT * used, map.getTotalSpaceUsed(), 0); - - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java deleted file mode 100644 index 9bce94b2b1d..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .NodeReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.security.authentication.client - .AuthenticationException; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.IOException; -import java.util.UUID; - -/** - * Verifies the statics in NodeManager. - */ -public class TestStatisticsUpdate { - - private NodeManager nodeManager; - private NodeReportHandler nodeReportHandler; - - @Before - public void setup() throws IOException, AuthenticationException { - final OzoneConfiguration conf = new OzoneConfiguration(); - final String storageDir = GenericTestUtils.getTempPath( - TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); - conf.set(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, "100ms"); - conf.set(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, "50ms"); - conf.set(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, "1s"); - conf.set(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, "2s"); - final EventQueue eventQueue = new EventQueue(); - final StorageContainerManager scm = HddsTestUtils.getScm(conf); - nodeManager = scm.getScmNodeManager(); - final DeadNodeHandler deadNodeHandler = new DeadNodeHandler( - nodeManager, Mockito.mock(PipelineManager.class), - scm.getContainerManager()); - eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler); - nodeReportHandler = new NodeReportHandler(nodeManager); - } - - @Test - public void testStatisticsUpdate() throws Exception { - //GIVEN - DatanodeDetails datanode1 = TestUtils.randomDatanodeDetails(); - DatanodeDetails datanode2 = TestUtils.randomDatanodeDetails(); - - String storagePath1 = GenericTestUtils.getRandomizedTempPath() - .concat("/" + datanode1.getUuidString()); - String storagePath2 = GenericTestUtils.getRandomizedTempPath() - .concat("/" + datanode2.getUuidString()); - - StorageReportProto storageOne = TestUtils.createStorageReport( - datanode1.getUuid(), storagePath1, 100, 10, 90, null); - StorageReportProto storageTwo = TestUtils.createStorageReport( - datanode2.getUuid(), storagePath2, 200, 20, 180, null); - - nodeManager.register(datanode1, - TestUtils.createNodeReport(storageOne), null); - nodeManager.register(datanode2, - TestUtils.createNodeReport(storageTwo), null); - - NodeReportProto nodeReportProto1 = TestUtils.createNodeReport(storageOne); - NodeReportProto nodeReportProto2 = TestUtils.createNodeReport(storageTwo); - - nodeReportHandler.onMessage( - new NodeReportFromDatanode(datanode1, nodeReportProto1), - Mockito.mock(EventPublisher.class)); - nodeReportHandler.onMessage( - new NodeReportFromDatanode(datanode2, nodeReportProto2), - Mockito.mock(EventPublisher.class)); - - SCMNodeStat stat = nodeManager.getStats(); - Assert.assertEquals(300L, stat.getCapacity().get().longValue()); - Assert.assertEquals(270L, stat.getRemaining().get().longValue()); - Assert.assertEquals(30L, stat.getScmUsed().get().longValue()); - - SCMNodeMetric nodeStat = nodeManager.getNodeStat(datanode1); - Assert.assertEquals(100L, nodeStat.get().getCapacity().get().longValue()); - Assert.assertEquals(90L, nodeStat.get().getRemaining().get().longValue()); - Assert.assertEquals(10L, nodeStat.get().getScmUsed().get().longValue()); - - //TODO: Support logic to mark a node as dead in NodeManager. - - nodeManager.processHeartbeat(datanode2); - Thread.sleep(1000); - nodeManager.processHeartbeat(datanode2); - Thread.sleep(1000); - nodeManager.processHeartbeat(datanode2); - Thread.sleep(1000); - nodeManager.processHeartbeat(datanode2); - //THEN statistics in SCM should changed. - stat = nodeManager.getStats(); - Assert.assertEquals(200L, stat.getCapacity().get().longValue()); - Assert.assertEquals(180L, - stat.getRemaining().get().longValue()); - Assert.assertEquals(20L, stat.getScmUsed().get().longValue()); - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java deleted file mode 100644 index dfd83977712..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle Happy. - */ -package org.apache.hadoop.hdds.scm.node; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java deleted file mode 100644 index 77ed9075ae1..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java +++ /dev/null @@ -1,327 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; - -/** - * Test classes for Node2ContainerMap. - */ -public class TestNode2ContainerMap { - private final static int DATANODE_COUNT = 300; - private final static int CONTAINER_COUNT = 1000; - private final Map> testData = new - ConcurrentHashMap<>(); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private void generateData() { - for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) { - TreeSet currentSet = new TreeSet<>(); - for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) { - long currentCnIndex = (long) (dnIndex * CONTAINER_COUNT) + cnIndex; - currentSet.add(new ContainerID(currentCnIndex)); - } - testData.put(UUID.randomUUID(), currentSet); - } - } - - private UUID getFirstKey() { - return testData.keySet().iterator().next(); - } - - @Before - public void setUp() throws Exception { - generateData(); - } - - @After - public void tearDown() throws Exception { - } - - @Test - public void testIsKnownDatanode() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID knownNode = getFirstKey(); - UUID unknownNode = UUID.randomUUID(); - Set containerIDs = testData.get(knownNode); - map.insertNewDatanode(knownNode, containerIDs); - Assert.assertTrue("Not able to detect a known node", - map.isKnownDatanode(knownNode)); - Assert.assertFalse("Unknown node detected", - map.isKnownDatanode(unknownNode)); - } - - @Test - public void testInsertNewDatanode() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID knownNode = getFirstKey(); - Set containerIDs = testData.get(knownNode); - map.insertNewDatanode(knownNode, containerIDs); - Set readSet = map.getContainers(knownNode); - - // Assert that all elements are present in the set that we read back from - // node map. - Set newSet = new TreeSet((readSet)); - Assert.assertTrue(newSet.removeAll(containerIDs)); - Assert.assertTrue(newSet.size() == 0); - - thrown.expect(SCMException.class); - thrown.expectMessage("already exists"); - map.insertNewDatanode(knownNode, containerIDs); - - map.removeDatanode(knownNode); - map.insertNewDatanode(knownNode, containerIDs); - - } - - @Test - public void testProcessReportCheckOneNode() throws SCMException { - UUID key = getFirstKey(); - Set values = testData.get(key); - Node2ContainerMap map = new Node2ContainerMap(); - map.insertNewDatanode(key, values); - Assert.assertTrue(map.isKnownDatanode(key)); - ReportResult result = map.processReport(key, values); - Assert.assertEquals(ReportResult.ReportStatus.ALL_IS_WELL, - result.getStatus()); - } - - @Test - public void testUpdateDatanodeMap() throws SCMException { - UUID datanodeId = getFirstKey(); - Set values = testData.get(datanodeId); - Node2ContainerMap map = new Node2ContainerMap(); - map.insertNewDatanode(datanodeId, values); - Assert.assertTrue(map.isKnownDatanode(datanodeId)); - Assert.assertEquals(CONTAINER_COUNT, map.getContainers(datanodeId).size()); - - //remove one container - values.remove(values.iterator().next()); - Assert.assertEquals(CONTAINER_COUNT - 1, values.size()); - Assert.assertEquals(CONTAINER_COUNT, map.getContainers(datanodeId).size()); - - map.setContainersForDatanode(datanodeId, values); - - Assert.assertEquals(values.size(), map.getContainers(datanodeId).size()); - Assert.assertEquals(values, map.getContainers(datanodeId)); - } - - @Test - public void testProcessReportInsertAll() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - - for (Map.Entry> keyEntry : testData.entrySet()) { - map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue()); - } - // Assert all Keys are known datanodes. - for (UUID key : testData.keySet()) { - Assert.assertTrue(map.isKnownDatanode(key)); - } - } - - /* - For ProcessReport we have to test the following scenarios. - - 1. New Datanode - A new datanode appears and we have to add that to the - SCM's Node2Container Map. - - 2. New Container - A Datanode exists, but a new container is added to that - DN. We need to detect that and return a list of added containers. - - 3. Missing Container - A Datanode exists, but one of the expected container - on that datanode is missing. We need to detect that. - - 4. We get a container report that has both the missing and new containers. - We need to return separate lists for these. - */ - - /** - * Assert that we are able to detect the addition of a new datanode. - * - * @throws SCMException - */ - @Test - public void testProcessReportDetectNewDataNode() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - // If we attempt to process a node that is not present in the map, - // we get a result back that says, NEW_NODE_FOUND. - UUID key = getFirstKey(); - TreeSet values = testData.get(key); - ReportResult result = map.processReport(key, values); - Assert.assertEquals(ReportResult.ReportStatus.NEW_DATANODE_FOUND, - result.getStatus()); - Assert.assertEquals(result.getNewEntries().size(), values.size()); - } - - /** - * This test asserts that processReport is able to detect new containers - * when it is added to a datanode. For that we populate the DN with a list - * of containerIDs and then add few more containers and make sure that we - * are able to detect them. - * - * @throws SCMException - */ - @Test - public void testProcessReportDetectNewContainers() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID key = getFirstKey(); - TreeSet values = testData.get(key); - map.insertNewDatanode(key, values); - - final int newCount = 100; - ContainerID last = values.last(); - TreeSet addedContainers = new TreeSet<>(); - for (int x = 1; x <= newCount; x++) { - long cTemp = last.getId() + x; - addedContainers.add(new ContainerID(cTemp)); - } - - // This set is the super set of existing containers and new containers. - TreeSet newContainersSet = new TreeSet<>(values); - newContainersSet.addAll(addedContainers); - - ReportResult result = map.processReport(key, newContainersSet); - - //Assert that expected size of missing container is same as addedContainers - Assert.assertEquals(ReportResult.ReportStatus.NEW_ENTRIES_FOUND, - result.getStatus()); - - Assert.assertEquals(addedContainers.size(), - result.getNewEntries().size()); - - // Assert that the Container IDs are the same as we added new. - Assert.assertTrue("All objects are not removed.", - result.getNewEntries().removeAll(addedContainers)); - } - - /** - * This test asserts that processReport is able to detect missing containers - * if they are misssing from a list. - * - * @throws SCMException - */ - @Test - public void testProcessReportDetectMissingContainers() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID key = getFirstKey(); - TreeSet values = testData.get(key); - map.insertNewDatanode(key, values); - - final int removeCount = 100; - Random r = new Random(); - - ContainerID first = values.first(); - TreeSet removedContainers = new TreeSet<>(); - - // Pick a random container to remove it is ok to collide no issues. - for (int x = 0; x < removeCount; x++) { - int startBase = (int) first.getId(); - long cTemp = r.nextInt(values.size()); - removedContainers.add(new ContainerID(cTemp + startBase)); - } - - // This set is a new set with some containers removed. - TreeSet newContainersSet = new TreeSet<>(values); - newContainersSet.removeAll(removedContainers); - - ReportResult result = map.processReport(key, newContainersSet); - - - //Assert that expected size of missing container is same as addedContainers - Assert.assertEquals(ReportResult.ReportStatus.MISSING_ENTRIES, - result.getStatus()); - Assert.assertEquals(removedContainers.size(), - result.getMissingEntries().size()); - - // Assert that the Container IDs are the same as we added new. - Assert.assertTrue("All missing containers not found.", - result.getMissingEntries().removeAll(removedContainers)); - } - - @Test - public void testProcessReportDetectNewAndMissingContainers() throws - SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID key = getFirstKey(); - TreeSet values = testData.get(key); - map.insertNewDatanode(key, values); - - Set insertedSet = new TreeSet<>(); - // Insert nodes from 1..30 - for (int x = 1; x <= 30; x++) { - insertedSet.add(new ContainerID(x)); - } - - - final int removeCount = 100; - Random r = new Random(); - - ContainerID first = values.first(); - TreeSet removedContainers = new TreeSet<>(); - - // Pick a random container to remove it is ok to collide no issues. - for (int x = 0; x < removeCount; x++) { - int startBase = (int) first.getId(); - long cTemp = r.nextInt(values.size()); - removedContainers.add(new ContainerID(cTemp + startBase)); - } - - Set newSet = new TreeSet<>(values); - newSet.addAll(insertedSet); - newSet.removeAll(removedContainers); - - ReportResult result = map.processReport(key, newSet); - - - Assert.assertEquals( - ReportResult.ReportStatus.MISSING_AND_NEW_ENTRIES_FOUND, - result.getStatus()); - Assert.assertEquals(removedContainers.size(), - result.getMissingEntries().size()); - - - // Assert that the Container IDs are the same as we added new. - Assert.assertTrue("All missing containers not found.", - result.getMissingEntries().removeAll(removedContainers)); - - Assert.assertEquals(insertedSet.size(), - result.getNewEntries().size()); - - // Assert that the Container IDs are the same as we added new. - Assert.assertTrue("All inserted containers are not found.", - result.getNewEntries().removeAll(insertedSet)); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java deleted file mode 100644 index 6610fcd7106..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Test Node2Container Map. - */ -package org.apache.hadoop.hdds.scm.node.states; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java deleted file mode 100644 index da05c59acfc..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; -/** - * SCM tests - */ diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java deleted file mode 100644 index 01c53baf2bf..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.node.NodeManager; - -import java.io.IOException; - -/** - * Mock Ratis Pipeline Provider for Mock Nodes. - */ -public class MockRatisPipelineProvider extends RatisPipelineProvider { - - public MockRatisPipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager, - Configuration conf) { - super(nodeManager, stateManager, conf, null); - } - - protected void initializePipeline(Pipeline pipeline) throws IOException { - // do nothing as the datanodes do not exists - } - - @Override - public void shutdown() { - // Do nothing. - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java deleted file mode 100644 index 94c3039d41d..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.safemode; - -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.PipelineReport; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; -import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Test; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -/** - * This class tests HealthyPipelineSafeMode rule. - */ -public class TestHealthyPipelineSafeModeRule { - - @Test - public void testHealthyPipelineSafeModeRuleWithNoPipelines() - throws Exception { - - String storageDir = GenericTestUtils.getTempPath( - TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID()); - try { - EventQueue eventQueue = new EventQueue(); - List containers = new ArrayList<>(); - containers.addAll(HddsTestUtils.getContainerInfo(1)); - - OzoneConfiguration config = new OzoneConfiguration(); - MockNodeManager nodeManager = new MockNodeManager(true, 0); - config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); - // enable pipeline check - config.setBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); - - - SCMPipelineManager pipelineManager = new SCMPipelineManager(config, - nodeManager, eventQueue, null); - PipelineProvider mockRatisProvider = - new MockRatisPipelineProvider(nodeManager, - pipelineManager.getStateManager(), config); - pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, - mockRatisProvider); - SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, pipelineManager, eventQueue); - - HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = - scmSafeModeManager.getHealthyPipelineSafeModeRule(); - - // This should be immediately satisfied, as no pipelines are there yet. - Assert.assertTrue(healthyPipelineSafeModeRule.validate()); - } finally { - FileUtil.fullyDelete(new File(storageDir)); - } - - } - - - @Test - public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { - - String storageDir = GenericTestUtils.getTempPath( - TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID()); - - try { - EventQueue eventQueue = new EventQueue(); - List containers = new ArrayList<>(); - containers.addAll(HddsTestUtils.getContainerInfo(1)); - - OzoneConfiguration config = new OzoneConfiguration(); - - // In Mock Node Manager, first 8 nodes are healthy, next 2 nodes are - // stale and last one is dead, and this repeats. So for a 12 node, 9 - // healthy, 2 stale and one dead. - MockNodeManager nodeManager = new MockNodeManager(true, 12); - config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); - // enable pipeline check - config.setBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); - - - SCMPipelineManager pipelineManager = new SCMPipelineManager(config, - nodeManager, eventQueue, null); - - PipelineProvider mockRatisProvider = - new MockRatisPipelineProvider(nodeManager, - pipelineManager.getStateManager(), config); - pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, - mockRatisProvider); - - // Create 3 pipelines - Pipeline pipeline1 = - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - Pipeline pipeline2 = - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - Pipeline pipeline3 = - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - - - SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, pipelineManager, eventQueue); - - HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = - scmSafeModeManager.getHealthyPipelineSafeModeRule(); - - - // No datanodes have sent pipelinereport from datanode - Assert.assertFalse(healthyPipelineSafeModeRule.validate()); - - // Fire pipeline report from all datanodes in first pipeline, as here we - // have 3 pipelines, 10% is 0.3, when doing ceil it is 1. So, we should - // validate should return true after fire pipeline event - - - //Here testing with out pipelinereport handler, so not moving created - // pipelines to allocated state, as pipelines changing to healthy is - // handled by pipeline report handler. So, leaving pipeline's in pipeline - // manager in open state for test case simplicity. - - firePipelineEvent(pipeline1, eventQueue); - GenericTestUtils.waitFor(() -> healthyPipelineSafeModeRule.validate(), - 1000, 5000); - } finally { - FileUtil.fullyDelete(new File(storageDir)); - } - - } - - - @Test - public void testHealthyPipelineSafeModeRuleWithMixedPipelines() - throws Exception { - - String storageDir = GenericTestUtils.getTempPath( - TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID()); - - try { - EventQueue eventQueue = new EventQueue(); - List containers = new ArrayList<>(); - containers.addAll(HddsTestUtils.getContainerInfo(1)); - - OzoneConfiguration config = new OzoneConfiguration(); - - // In Mock Node Manager, first 8 nodes are healthy, next 2 nodes are - // stale and last one is dead, and this repeats. So for a 12 node, 9 - // healthy, 2 stale and one dead. - MockNodeManager nodeManager = new MockNodeManager(true, 12); - config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); - // enable pipeline check - config.setBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); - - - SCMPipelineManager pipelineManager = new SCMPipelineManager(config, - nodeManager, eventQueue, null); - PipelineProvider mockRatisProvider = - new MockRatisPipelineProvider(nodeManager, - pipelineManager.getStateManager(), config); - pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, - mockRatisProvider); - - // Create 3 pipelines - Pipeline pipeline1 = - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); - Pipeline pipeline2 = - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - Pipeline pipeline3 = - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - - - SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, pipelineManager, eventQueue); - - HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = - scmSafeModeManager.getHealthyPipelineSafeModeRule(); - - - // No datanodes have sent pipelinereport from datanode - Assert.assertFalse(healthyPipelineSafeModeRule.validate()); - - - GenericTestUtils.LogCapturer logCapturer = - GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger( - SCMSafeModeManager.class)); - - // fire event with pipeline report with ratis type and factor 1 - // pipeline, validate() should return false - firePipelineEvent(pipeline1, eventQueue); - - GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains( - "reported count is 0"), - 1000, 5000); - Assert.assertFalse(healthyPipelineSafeModeRule.validate()); - - firePipelineEvent(pipeline2, eventQueue); - firePipelineEvent(pipeline3, eventQueue); - - GenericTestUtils.waitFor(() -> healthyPipelineSafeModeRule.validate(), - 1000, 5000); - - } finally { - FileUtil.fullyDelete(new File(storageDir)); - } - - } - - - private void firePipelineEvent(Pipeline pipeline, EventQueue eventQueue) { - PipelineReportsProto.Builder reportBuilder = PipelineReportsProto - .newBuilder(); - - reportBuilder.addPipelineReport(PipelineReport.newBuilder() - .setPipelineID(pipeline.getId().getProtobuf())); - - // Here no need to fire event from 3 nodes, as already pipeline is in - // open state, but doing it. - eventQueue.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT, - new SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode( - pipeline.getNodes().get(0), reportBuilder.build())); - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java deleted file mode 100644 index ca54d052113..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java +++ /dev/null @@ -1,209 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.safemode; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReport; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.List; - -/** - * This class tests OneReplicaPipelineSafeModeRule. - */ -public class TestOneReplicaPipelineSafeModeRule { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - private OneReplicaPipelineSafeModeRule rule; - private SCMPipelineManager pipelineManager; - private EventQueue eventQueue; - - - private void setup(int nodes, int pipelineFactorThreeCount, - int pipelineFactorOneCount) throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.setBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); - ozoneConfiguration.set(HddsConfigKeys.OZONE_METADATA_DIRS, - folder.newFolder().toString()); - - List containers = new ArrayList<>(); - containers.addAll(HddsTestUtils.getContainerInfo(1)); - MockNodeManager mockNodeManager = new MockNodeManager(true, nodes); - - eventQueue = new EventQueue(); - pipelineManager = - new SCMPipelineManager(ozoneConfiguration, mockNodeManager, - eventQueue, null); - - PipelineProvider mockRatisProvider = - new MockRatisPipelineProvider(mockNodeManager, - pipelineManager.getStateManager(), ozoneConfiguration); - pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, - mockRatisProvider); - - createPipelines(pipelineFactorThreeCount, - HddsProtos.ReplicationFactor.THREE); - createPipelines(pipelineFactorOneCount, - HddsProtos.ReplicationFactor.ONE); - - SCMSafeModeManager scmSafeModeManager = - new SCMSafeModeManager(ozoneConfiguration, containers, - pipelineManager, eventQueue); - - rule = scmSafeModeManager.getOneReplicaPipelineSafeModeRule(); - } - - @Test - public void testOneReplicaPipelineRule() throws Exception { - - // As with 30 nodes, We can create 7 pipelines with replication factor 3. - // (This is because in node manager for every 10 nodes, 7 nodes are - // healthy, 2 are stale one is dead.) - int nodes = 30; - int pipelineFactorThreeCount = 7; - int pipelineCountOne = 0; - setup(nodes, pipelineFactorThreeCount, pipelineCountOne); - - GenericTestUtils.LogCapturer logCapturer = - GenericTestUtils.LogCapturer.captureLogs( - LoggerFactory.getLogger(SCMSafeModeManager.class)); - - List pipelines = pipelineManager.getPipelines(); - for (int i = 0; i < pipelineFactorThreeCount -1; i++) { - firePipelineEvent(pipelines.get(i)); - } - - // As 90% of 7 with ceil is 7, if we send 6 pipeline reports, rule - // validate should be still false. - - GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains( - "reported count is 6"), 1000, 5000); - - Assert.assertFalse(rule.validate()); - - //Fire last pipeline event from datanode. - firePipelineEvent(pipelines.get(pipelineFactorThreeCount - 1)); - - GenericTestUtils.waitFor(() -> rule.validate(), 1000, 5000); - - } - - - @Test - public void testOneReplicaPipelineRuleMixedPipelines() throws Exception { - - // As with 30 nodes, We can create 7 pipelines with replication factor 3. - // (This is because in node manager for every 10 nodes, 7 nodes are - // healthy, 2 are stale one is dead.) - int nodes = 30; - int pipelineCountThree = 7; - int pipelineCountOne = 21; - - setup(nodes, pipelineCountThree, pipelineCountOne); - - GenericTestUtils.LogCapturer logCapturer = - GenericTestUtils.LogCapturer.captureLogs( - LoggerFactory.getLogger(SCMSafeModeManager.class)); - - List pipelines = - pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE); - for (int i = 0; i < pipelineCountOne; i++) { - firePipelineEvent(pipelines.get(i)); - } - - GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains( - "reported count is 0"), 1000, 5000); - - // fired events for one node ratis pipeline, so we will be still false. - Assert.assertFalse(rule.validate()); - - pipelines = - pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - for (int i = 0; i < pipelineCountThree - 1; i++) { - firePipelineEvent(pipelines.get(i)); - } - - GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains( - "reported count is 6"), 1000, 5000); - - //Fire last pipeline event from datanode. - firePipelineEvent(pipelines.get(pipelineCountThree - 1)); - - GenericTestUtils.waitFor(() -> rule.validate(), 1000, 5000); - - } - - - - private void createPipelines(int count, - HddsProtos.ReplicationFactor factor) throws Exception { - for (int i = 0; i < count; i++) { - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - factor); - } - } - - private void firePipelineEvent(Pipeline pipeline) { - PipelineReportsProto.Builder reportBuilder = - PipelineReportsProto.newBuilder(); - - reportBuilder.addPipelineReport(PipelineReport.newBuilder() - .setPipelineID(pipeline.getId().getProtobuf())); - - if (pipeline.getFactor() == HddsProtos.ReplicationFactor.THREE) { - eventQueue.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT, - new SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode( - pipeline.getNodes().get(0), reportBuilder.build())); - eventQueue.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT, - new SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode( - pipeline.getNodes().get(1), reportBuilder.build())); - eventQueue.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT, - new SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode( - pipeline.getNodes().get(2), reportBuilder.build())); - } else { - eventQueue.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT, - new SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode( - pipeline.getNodes().get(0), reportBuilder.build())); - } - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java deleted file mode 100644 index 247b38afc7f..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ /dev/null @@ -1,521 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.safemode; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.File; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.UUID; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.pipeline.*; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.Timeout; -import org.mockito.Mockito; - -/** Test class for SCMSafeModeManager. - */ -public class TestSCMSafeModeManager { - - private static EventQueue queue; - private SCMSafeModeManager scmSafeModeManager; - private static Configuration config; - private List containers = Collections.emptyList(); - - @Rule - public Timeout timeout = new Timeout(1000 * 300); - - @Rule - public final TemporaryFolder tempDir = new TemporaryFolder(); - - @BeforeClass - public static void setUp() { - queue = new EventQueue(); - config = new OzoneConfiguration(); - } - - @Test - public void testSafeModeState() throws Exception { - // Test 1: test for 0 containers - testSafeMode(0); - - // Test 2: test for 20 containers - testSafeMode(20); - } - - @Test - public void testSafeModeStateWithNullContainers() { - new SCMSafeModeManager(config, Collections.emptyList(), - null, queue); - } - - private void testSafeMode(int numContainers) throws Exception { - containers = new ArrayList<>(); - containers.addAll(HddsTestUtils.getContainerInfo(numContainers)); - - // Currently only considered containers which are not in open state. - for (ContainerInfo container : containers) { - container.setState(HddsProtos.LifeCycleState.CLOSED); - } - scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, queue); - - assertTrue(scmSafeModeManager.getInSafeMode()); - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(containers)); - - long cutOff = (long) Math.ceil(numContainers * config.getDouble( - HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, - HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT)); - - Assert.assertEquals(cutOff, scmSafeModeManager.getSafeModeMetrics() - .getNumContainerWithOneReplicaReportedThreshold().value()); - - GenericTestUtils.waitFor(() -> { - return !scmSafeModeManager.getInSafeMode(); - }, 100, 1000 * 5); - - Assert.assertEquals(cutOff, scmSafeModeManager.getSafeModeMetrics() - .getCurrentContainersWithOneReplicaReportedCount().value()); - - } - - @Test - public void testSafeModeExitRule() throws Exception { - containers = new ArrayList<>(); - int numContainers = 100; - containers.addAll(HddsTestUtils.getContainerInfo(numContainers)); - // Assign open state to containers to be included in the safe mode - // container list - for (ContainerInfo container : containers) { - container.setState(HddsProtos.LifeCycleState.CLOSED); - } - scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, queue); - - long cutOff = (long) Math.ceil(numContainers * config.getDouble( - HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, - HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT)); - - Assert.assertEquals(cutOff, scmSafeModeManager.getSafeModeMetrics() - .getNumContainerWithOneReplicaReportedThreshold().value()); - - assertTrue(scmSafeModeManager.getInSafeMode()); - - testContainerThreshold(containers.subList(0, 25), 0.25); - Assert.assertEquals(25, scmSafeModeManager.getSafeModeMetrics() - .getCurrentContainersWithOneReplicaReportedCount().value()); - assertTrue(scmSafeModeManager.getInSafeMode()); - testContainerThreshold(containers.subList(25, 50), 0.50); - Assert.assertEquals(50, scmSafeModeManager.getSafeModeMetrics() - .getCurrentContainersWithOneReplicaReportedCount().value()); - assertTrue(scmSafeModeManager.getInSafeMode()); - testContainerThreshold(containers.subList(50, 75), 0.75); - Assert.assertEquals(75, scmSafeModeManager.getSafeModeMetrics() - .getCurrentContainersWithOneReplicaReportedCount().value()); - assertTrue(scmSafeModeManager.getInSafeMode()); - testContainerThreshold(containers.subList(75, 100), 1.0); - Assert.assertEquals(100, scmSafeModeManager.getSafeModeMetrics() - .getCurrentContainersWithOneReplicaReportedCount().value()); - - GenericTestUtils.waitFor(() -> { - return !scmSafeModeManager.getInSafeMode(); - }, 100, 1000 * 5); - } - - - private OzoneConfiguration createConf(double healthyPercent, - double oneReplicaPercent) throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - tempDir.newFolder().toString()); - conf.setBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, - true); - conf.setDouble(HddsConfigKeys. - HDDS_SCM_SAFEMODE_HEALTHY_PIPELINE_THRESHOLD_PCT, healthyPercent); - conf.setDouble(HddsConfigKeys. - HDDS_SCM_SAFEMODE_ONE_NODE_REPORTED_PIPELINE_PCT, oneReplicaPercent); - - return conf; - } - - @Test - public void testSafeModeExitRuleWithPipelineAvailabilityCheck() - throws Exception{ - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 30, 8, 0.90, 1); - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0.10, 0.9); - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 30, 8, 0, 0.9); - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0, 0); - testSafeModeExitRuleWithPipelineAvailabilityCheck(100, 90, 22, 0, 0.5); - } - - @Test - public void testFailWithIncorrectValueForHealthyPipelinePercent() - throws Exception { - try { - OzoneConfiguration conf = createConf(100, - 0.9); - MockNodeManager mockNodeManager = new MockNodeManager(true, 10); - PipelineManager pipelineManager = new SCMPipelineManager(conf, - mockNodeManager, queue, null); - scmSafeModeManager = new SCMSafeModeManager( - conf, containers, pipelineManager, queue); - fail("testFailWithIncorrectValueForHealthyPipelinePercent"); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" + - " 1.0", ex); - } - } - - @Test - public void testFailWithIncorrectValueForOneReplicaPipelinePercent() - throws Exception { - try { - OzoneConfiguration conf = createConf(0.9, - 200); - MockNodeManager mockNodeManager = new MockNodeManager(true, 10); - PipelineManager pipelineManager = new SCMPipelineManager(conf, - mockNodeManager, queue, null); - scmSafeModeManager = new SCMSafeModeManager( - conf, containers, pipelineManager, queue); - fail("testFailWithIncorrectValueForOneReplicaPipelinePercent"); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" + - " 1.0", ex); - } - } - - @Test - public void testFailWithIncorrectValueForSafeModePercent() throws Exception { - try { - OzoneConfiguration conf = createConf(0.9, 0.1); - conf.setDouble(HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, -1.0); - MockNodeManager mockNodeManager = new MockNodeManager(true, 10); - PipelineManager pipelineManager = new SCMPipelineManager(conf, - mockNodeManager, queue, null); - scmSafeModeManager = new SCMSafeModeManager( - conf, containers, pipelineManager, queue); - fail("testFailWithIncorrectValueForSafeModePercent"); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("value should be >= 0.0 and <=" + - " 1.0", ex); - } - } - - - public void testSafeModeExitRuleWithPipelineAvailabilityCheck( - int containerCount, int nodeCount, int pipelineCount, - double healthyPipelinePercent, double oneReplicaPercent) - throws Exception { - - OzoneConfiguration conf = createConf(healthyPipelinePercent, - oneReplicaPercent); - - containers = new ArrayList<>(); - containers.addAll(HddsTestUtils.getContainerInfo(containerCount)); - - MockNodeManager mockNodeManager = new MockNodeManager(true, nodeCount); - SCMPipelineManager pipelineManager = new SCMPipelineManager(conf, - mockNodeManager, queue, null); - PipelineProvider mockRatisProvider = - new MockRatisPipelineProvider(mockNodeManager, - pipelineManager.getStateManager(), config); - pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, - mockRatisProvider); - - - for (int i=0; i < pipelineCount; i++) { - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - } - - for (ContainerInfo container : containers) { - container.setState(HddsProtos.LifeCycleState.CLOSED); - } - - scmSafeModeManager = new SCMSafeModeManager(conf, containers, - pipelineManager, queue); - - assertTrue(scmSafeModeManager.getInSafeMode()); - testContainerThreshold(containers, 1.0); - - List pipelines = pipelineManager.getPipelines(); - - int healthyPipelineThresholdCount = - scmSafeModeManager.getHealthyPipelineSafeModeRule() - .getHealthyPipelineThresholdCount(); - int oneReplicaThresholdCount = - scmSafeModeManager.getOneReplicaPipelineSafeModeRule() - .getThresholdCount(); - - Assert.assertEquals(healthyPipelineThresholdCount, - scmSafeModeManager.getSafeModeMetrics() - .getNumHealthyPipelinesThreshold().value()); - - Assert.assertEquals(oneReplicaThresholdCount, - scmSafeModeManager.getSafeModeMetrics() - .getNumPipelinesWithAtleastOneReplicaReportedThreshold().value()); - - // Because even if no pipelines are there, and threshold we set to zero, - // we shall a get an event when datanode is registered. In that case, - // validate will return true, and add this to validatedRules. - if (Math.max(healthyPipelinePercent, oneReplicaThresholdCount) == 0) { - firePipelineEvent(pipelines.get(0)); - } - - for (int i = 0; i < Math.max(healthyPipelineThresholdCount, - oneReplicaThresholdCount); i++) { - firePipelineEvent(pipelines.get(i)); - - if (i < healthyPipelineThresholdCount) { - checkHealthy(i + 1); - Assert.assertEquals(i + 1, - scmSafeModeManager.getSafeModeMetrics() - .getCurrentHealthyPipelinesCount().value()); - } - - if (i < oneReplicaThresholdCount) { - checkOpen(i + 1); - Assert.assertEquals(i + 1, - scmSafeModeManager.getSafeModeMetrics() - .getCurrentPipelinesWithAtleastOneReplicaCount().value()); - } - } - - Assert.assertEquals(healthyPipelineThresholdCount, - scmSafeModeManager.getSafeModeMetrics() - .getCurrentHealthyPipelinesCount().value()); - - Assert.assertEquals(oneReplicaThresholdCount, - scmSafeModeManager.getSafeModeMetrics() - .getCurrentPipelinesWithAtleastOneReplicaCount().value()); - - - GenericTestUtils.waitFor(() -> { - return !scmSafeModeManager.getInSafeMode(); - }, 100, 1000 * 5); - } - - private void checkHealthy(int expectedCount) throws Exception{ - GenericTestUtils.waitFor(() -> scmSafeModeManager - .getHealthyPipelineSafeModeRule() - .getCurrentHealthyPipelineCount() == expectedCount, - 100, 5000); - } - - private void checkOpen(int expectedCount) throws Exception { - GenericTestUtils.waitFor(() -> scmSafeModeManager - .getOneReplicaPipelineSafeModeRule() - .getCurrentReportedPipelineCount() == expectedCount, - 1000, 5000); - } - - private void firePipelineEvent(Pipeline pipeline) throws Exception { - PipelineReportsProto.Builder reportBuilder = - PipelineReportsProto.newBuilder(); - - reportBuilder.addPipelineReport(PipelineReport.newBuilder() - .setPipelineID(pipeline.getId().getProtobuf())); - queue.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT, - new PipelineReportFromDatanode(pipeline.getNodes().get(0), - reportBuilder.build())); - } - - - @Test - public void testDisableSafeMode() { - OzoneConfiguration conf = new OzoneConfiguration(config); - conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, false); - PipelineManager pipelineManager = Mockito.mock(PipelineManager.class); - Mockito.doNothing().when(pipelineManager).startPipelineCreator(); - scmSafeModeManager = - new SCMSafeModeManager(conf, containers, pipelineManager, queue); - assertFalse(scmSafeModeManager.getInSafeMode()); - } - - @Test - public void testSafeModeDataNodeExitRule() throws Exception { - containers = new ArrayList<>(); - testSafeModeDataNodes(0); - testSafeModeDataNodes(3); - testSafeModeDataNodes(5); - } - - /** - * Check that containers in Allocated state are not considered while - * computing percentage of containers with at least 1 reported replica in - * safe mode exit rule. - */ - @Test - public void testContainerSafeModeRule() throws Exception { - containers = new ArrayList<>(); - // Add 100 containers to the list of containers in SCM - containers.addAll(HddsTestUtils.getContainerInfo(25 * 4)); - // Assign CLOSED state to first 25 containers and OPEM state to rest - // of the containers - for (ContainerInfo container : containers.subList(0, 25)) { - container.setState(HddsProtos.LifeCycleState.CLOSED); - } - for (ContainerInfo container : containers.subList(25, 100)) { - container.setState(HddsProtos.LifeCycleState.OPEN); - } - - scmSafeModeManager = new SCMSafeModeManager( - config, containers, null, queue); - - assertTrue(scmSafeModeManager.getInSafeMode()); - - // When 10 CLOSED containers are reported by DNs, the computed container - // threshold should be 10/25 as there are only 25 CLOSED containers. - // Containers in OPEN state should not contribute towards list of - // containers while calculating container threshold in SCMSafeNodeManager - testContainerThreshold(containers.subList(0, 10), 0.4); - assertTrue(scmSafeModeManager.getInSafeMode()); - - // When remaining 15 OPEN containers are reported by DNs, the container - // threshold should be (10+15)/25. - testContainerThreshold(containers.subList(10, 25), 1.0); - - GenericTestUtils.waitFor(() -> { - return !scmSafeModeManager.getInSafeMode(); - }, 100, 1000 * 5); - } - - private void testSafeModeDataNodes(int numOfDns) throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(config); - conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, numOfDns); - scmSafeModeManager = new SCMSafeModeManager( - conf, containers, null, queue); - - // Assert SCM is in Safe mode. - assertTrue(scmSafeModeManager.getInSafeMode()); - - // Register all DataNodes except last one and assert SCM is in safe mode. - for (int i = 0; i < numOfDns-1; i++) { - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(containers)); - assertTrue(scmSafeModeManager.getInSafeMode()); - assertTrue(scmSafeModeManager.getCurrentContainerThreshold() == 1); - } - - if(numOfDns == 0){ - GenericTestUtils.waitFor(() -> { - return scmSafeModeManager.getInSafeMode(); - }, 10, 1000 * 10); - return; - } - // Register last DataNode and check that SCM is out of Safe mode. - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(containers)); - GenericTestUtils.waitFor(() -> { - return !scmSafeModeManager.getInSafeMode(); - }, 10, 1000 * 10); - } - - private void testContainerThreshold(List dnContainers, - double expectedThreshold) - throws Exception { - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(dnContainers)); - GenericTestUtils.waitFor(() -> { - double threshold = scmSafeModeManager.getCurrentContainerThreshold(); - return threshold == expectedThreshold; - }, 100, 2000 * 9); - } - - @Test - public void testSafeModePipelineExitRule() throws Exception { - containers = new ArrayList<>(); - containers.addAll(HddsTestUtils.getContainerInfo(25 * 4)); - String storageDir = GenericTestUtils.getTempPath( - TestSCMSafeModeManager.class.getName() + UUID.randomUUID()); - try{ - MockNodeManager nodeManager = new MockNodeManager(true, 3); - config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); - // enable pipeline check - config.setBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); - - SCMPipelineManager pipelineManager = new SCMPipelineManager(config, - nodeManager, queue, null); - - PipelineProvider mockRatisProvider = - new MockRatisPipelineProvider(nodeManager, - pipelineManager.getStateManager(), config); - pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, - mockRatisProvider); - - Pipeline pipeline = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - PipelineReportsProto.Builder reportBuilder = PipelineReportsProto - .newBuilder(); - reportBuilder.addPipelineReport(PipelineReport.newBuilder() - .setPipelineID(pipeline.getId().getProtobuf())); - - scmSafeModeManager = new SCMSafeModeManager( - config, containers, pipelineManager, queue); - - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(containers)); - assertTrue(scmSafeModeManager.getInSafeMode()); - - // Trigger the processed pipeline report event - queue.fireEvent(SCMEvents.PROCESSED_PIPELINE_REPORT, - new PipelineReportFromDatanode(pipeline.getNodes().get(0), - reportBuilder.build())); - - GenericTestUtils.waitFor(() -> { - return !scmSafeModeManager.getInSafeMode(); - }, 100, 1000 * 10); - pipelineManager.close(); - } finally { - config.setBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, - false); - FileUtil.fullyDelete(new File(storageDir)); - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSafeModeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSafeModeHandler.java deleted file mode 100644 index 5572e9aa1ef..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSafeModeHandler.java +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.safemode; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.block.BlockManager; -import org.apache.hadoop.hdds.scm.block.BlockManagerImpl; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ReplicationManager; -import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; -import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.lock.LockManager; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import java.util.HashSet; - -/** - * Tests SafeModeHandler behavior. - */ -public class TestSafeModeHandler { - - - private OzoneConfiguration configuration; - private SCMClientProtocolServer scmClientProtocolServer; - private ReplicationManager replicationManager; - private BlockManager blockManager; - private SafeModeHandler safeModeHandler; - private EventQueue eventQueue; - private SCMSafeModeManager.SafeModeStatus safeModeStatus; - private PipelineManager scmPipelineManager; - - public void setup(boolean enabled) { - configuration = new OzoneConfiguration(); - configuration.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, - enabled); - configuration.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, - "3s"); - scmClientProtocolServer = - Mockito.mock(SCMClientProtocolServer.class); - eventQueue = new EventQueue(); - final ContainerManager containerManager = - Mockito.mock(ContainerManager.class); - Mockito.when(containerManager.getContainerIDs()) - .thenReturn(new HashSet<>()); - replicationManager = new ReplicationManager( - new ReplicationManagerConfiguration(), - containerManager, Mockito.mock(ContainerPlacementPolicy.class), - eventQueue, new LockManager(configuration)); - scmPipelineManager = Mockito.mock(SCMPipelineManager.class); - blockManager = Mockito.mock(BlockManagerImpl.class); - safeModeHandler = - new SafeModeHandler(configuration, scmClientProtocolServer, - blockManager, replicationManager, scmPipelineManager); - - eventQueue.addHandler(SCMEvents.SAFE_MODE_STATUS, safeModeHandler); - safeModeStatus = new SCMSafeModeManager.SafeModeStatus(false); - - } - - @Test - public void testSafeModeHandlerWithSafeModeEnabled() throws Exception { - setup(true); - - Assert.assertTrue(safeModeHandler.getSafeModeStatus()); - - eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus); - - GenericTestUtils.waitFor(() -> !safeModeHandler.getSafeModeStatus(), - 1000, 5000); - - Assert.assertFalse(scmClientProtocolServer.getSafeModeStatus()); - Assert.assertFalse(((BlockManagerImpl) blockManager).isScmInSafeMode()); - GenericTestUtils.waitFor(() -> - replicationManager.isRunning(), 1000, 5000); - } - - - @Test - public void testSafeModeHandlerWithSafeModeDisbaled() throws Exception{ - - setup(false); - - Assert.assertFalse(safeModeHandler.getSafeModeStatus()); - - eventQueue.fireEvent(SCMEvents.SAFE_MODE_STATUS, safeModeStatus); - - Assert.assertFalse(safeModeHandler.getSafeModeStatus()); - Assert.assertFalse(scmClientProtocolServer.getSafeModeStatus()); - Assert.assertFalse(((BlockManagerImpl) blockManager).isScmInSafeMode()); - GenericTestUtils.waitFor(() -> - replicationManager.isRunning(), 1000, 5000); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/package-info.java deleted file mode 100644 index 098c68bc3a8..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.safemode; -/** - * SCM Safe mode tests. - */ diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java deleted file mode 100644 index d2044f59369..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.server; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB; -import org.apache.hadoop.test.GenericTestUtils; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.File; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -/** - * Test class for @{@link SCMBlockProtocolServer}. - */ -public class TestSCMBlockProtocolServer { - private OzoneConfiguration config; - private SCMBlockProtocolServer server; - private StorageContainerManager scm; - private NodeManager nodeManager; - private ScmBlockLocationProtocolServerSideTranslatorPB service; - private final int nodeCount = 10; - - @Before - public void setUp() throws Exception { - config = new OzoneConfiguration(); - File dir = GenericTestUtils.getRandomizedTestDir(); - config.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); - SCMConfigurator configurator = new SCMConfigurator(); - scm = TestUtils.getScm(config, configurator); - scm.start(); - scm.exitSafeMode(); - // add nodes to scm node manager - nodeManager = scm.getScmNodeManager(); - for (int i = 0; i < nodeCount; i++) { - nodeManager.register(TestUtils.randomDatanodeDetails(), null, null); - - } - server = scm.getBlockProtocolServer(); - service = new ScmBlockLocationProtocolServerSideTranslatorPB(server, - Mockito.mock(ProtocolMessageMetrics.class)); - } - - @After - public void tearDown() throws Exception { - if (scm != null) { - scm.stop(); - scm.join(); - } - } - - @Test - public void testSortDatanodes() throws Exception { - List nodes = new ArrayList(); - nodeManager.getAllNodes().stream().forEach( - node -> nodes.add(node.getNetworkName())); - - // sort normal datanodes - String client; - client = nodes.get(0); - List datanodeDetails = - server.sortDatanodes(nodes, client); - System.out.println("client = " + client); - datanodeDetails.stream().forEach( - node -> System.out.println(node.toString())); - Assert.assertTrue(datanodeDetails.size() == nodeCount); - - // illegal client 1 - client += "X"; - datanodeDetails = server.sortDatanodes(nodes, client); - System.out.println("client = " + client); - datanodeDetails.stream().forEach( - node -> System.out.println(node.toString())); - Assert.assertTrue(datanodeDetails.size() == nodeCount); - // illegal client 2 - client = "/default-rack"; - datanodeDetails = server.sortDatanodes(nodes, client); - System.out.println("client = " + client); - datanodeDetails.stream().forEach( - node -> System.out.println(node.toString())); - Assert.assertTrue(datanodeDetails.size() == nodeCount); - - // unknown node to sort - nodes.add(UUID.randomUUID().toString()); - ScmBlockLocationProtocolProtos.SortDatanodesRequestProto request = - ScmBlockLocationProtocolProtos.SortDatanodesRequestProto - .newBuilder() - .addAllNodeNetworkName(nodes) - .setClient(client) - .build(); - ScmBlockLocationProtocolProtos.SortDatanodesResponseProto resp = - service.sortDatanodes(request); - Assert.assertTrue(resp.getNodeList().size() == nodeCount); - System.out.println("client = " + client); - resp.getNodeList().stream().forEach( - node -> System.out.println(node.getNetworkName())); - - // all unknown nodes - nodes.clear(); - nodes.add(UUID.randomUUID().toString()); - nodes.add(UUID.randomUUID().toString()); - nodes.add(UUID.randomUUID().toString()); - request = ScmBlockLocationProtocolProtos.SortDatanodesRequestProto - .newBuilder() - .addAllNodeNetworkName(nodes) - .setClient(client) - .build(); - resp = service.sortDatanodes(request); - System.out.println("client = " + client); - Assert.assertTrue(resp.getNodeList().size() == 0); - resp.getNodeList().stream().forEach( - node -> System.out.println(node.getNetworkName())); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java deleted file mode 100644 index 23568d85fd5..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.server; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.block.BlockManager; -import org.apache.hadoop.hdds.scm.block.BlockManagerImpl; -import org.apache.hadoop.hdds.scm.safemode.SafeModeHandler; -import org.apache.hadoop.hdds.scm.container.ReplicationManager; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -/** - * Test class for @{@link SCMClientProtocolServer}. - * */ -public class TestSCMClientProtocolServer { - private SCMClientProtocolServer scmClientProtocolServer; - private OzoneConfiguration config; - private EventQueue eventQueue; - - @Before - public void setUp() throws Exception { - config = new OzoneConfiguration(); - config.set(OZONE_SCM_CLIENT_ADDRESS_KEY, - OZONE_SCM_CLIENT_BIND_HOST_DEFAULT + ":0"); - eventQueue = new EventQueue(); - scmClientProtocolServer = new SCMClientProtocolServer(config, null); - BlockManager blockManager = Mockito.mock(BlockManagerImpl.class); - ReplicationManager replicationManager = - Mockito.mock(ReplicationManager.class); - PipelineManager pipelineManager = Mockito.mock(SCMPipelineManager.class); - SafeModeHandler safeModeHandler = new SafeModeHandler(config, - scmClientProtocolServer, blockManager, replicationManager, - pipelineManager); - eventQueue.addHandler(SCMEvents.SAFE_MODE_STATUS, safeModeHandler); - } - - @After - public void tearDown() throws Exception { - } - - @Test - public void testAllocateContainerFailureInSafeMode() throws Exception { - LambdaTestUtils.intercept(SCMException.class, - "SafeModePrecheck failed for allocateContainer", () -> { - scmClientProtocolServer.allocateContainer( - ReplicationType.STAND_ALONE, ReplicationFactor.ONE, ""); - }); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMContainerMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMContainerMetrics.java deleted file mode 100644 index 0a2eeefd1fe..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMContainerMetrics.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyInt; -import static org.mockito.Matchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.util.HashMap; -import java.util.Map; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.metrics2.MetricsCollector; -import org.apache.hadoop.metrics2.MetricsInfo; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.metrics2.lib.Interns; -import org.junit.Test; - -/** - * Test metrics that represent container states. - */ -public class TestSCMContainerMetrics { - @Test - public void testSCMContainerMetrics() { - SCMMXBean scmmxBean = mock(SCMMXBean.class); - - Map stateInfo = new HashMap() {{ - put(HddsProtos.LifeCycleState.OPEN.toString(), 2); - put(HddsProtos.LifeCycleState.CLOSING.toString(), 3); - put(HddsProtos.LifeCycleState.QUASI_CLOSED.toString(), 4); - put(HddsProtos.LifeCycleState.CLOSED.toString(), 5); - put(HddsProtos.LifeCycleState.DELETING.toString(), 6); - put(HddsProtos.LifeCycleState.DELETED.toString(), 7); - }}; - - - when(scmmxBean.getContainerStateCount()).thenReturn(stateInfo); - - MetricsRecordBuilder mb = mock(MetricsRecordBuilder.class); - when(mb.addGauge(any(MetricsInfo.class), anyInt())).thenReturn(mb); - - MetricsCollector metricsCollector = mock(MetricsCollector.class); - when(metricsCollector.addRecord(anyString())).thenReturn(mb); - - SCMContainerMetrics containerMetrics = new SCMContainerMetrics(scmmxBean); - - containerMetrics.getMetrics(metricsCollector, true); - - verify(mb, times(1)).addGauge(Interns.info("OpenContainers", - "Number of open containers"), 2); - verify(mb, times(1)).addGauge(Interns.info("ClosingContainers", - "Number of containers in closing state"), 3); - verify(mb, times(1)).addGauge(Interns.info("QuasiClosedContainers", - "Number of containers in quasi closed state"), 4); - verify(mb, times(1)).addGauge(Interns.info("ClosedContainers", - "Number of containers in closed state"), 5); - verify(mb, times(1)).addGauge(Interns.info("DeletingContainers", - "Number of containers in deleting state"), 6); - verify(mb, times(1)).addGauge(Interns.info("DeletedContainers", - "Number of containers in deleted state"), 7); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java deleted file mode 100644 index eac8c906034..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.server; - -import java.io.IOException; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.UUID; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.scm.server. - SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .ContainerReportFromDatanode; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .NodeReportFromDatanode; -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand; - -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.CMD_STATUS_REPORT; - -/** - * This class tests the behavior of SCMDatanodeHeartbeatDispatcher. - */ -public class TestSCMDatanodeHeartbeatDispatcher { - - - @Test - public void testNodeReportDispatcher() throws IOException { - - AtomicInteger eventReceived = new AtomicInteger(); - - NodeReportProto nodeReport = NodeReportProto.getDefaultInstance(); - - NodeManager mockNodeManager = Mockito.mock(NodeManager.class); - Mockito.when(mockNodeManager.isNodeRegistered(Mockito.any())) - .thenReturn(true); - - SCMDatanodeHeartbeatDispatcher dispatcher = - new SCMDatanodeHeartbeatDispatcher(mockNodeManager, - new EventPublisher() { - @Override - public > void fireEvent( - EVENT_TYPE event, PAYLOAD payload) { - Assert.assertEquals(event, NODE_REPORT); - eventReceived.incrementAndGet(); - Assert.assertEquals(nodeReport, - ((NodeReportFromDatanode)payload).getReport()); - - } - }); - - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - - SCMHeartbeatRequestProto heartbeat = - SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) - .setNodeReport(nodeReport) - .build(); - dispatcher.dispatch(heartbeat); - Assert.assertEquals(1, eventReceived.get()); - - - } - - @Test - public void testContainerReportDispatcher() throws IOException { - - - AtomicInteger eventReceived = new AtomicInteger(); - - ContainerReportsProto containerReport = - ContainerReportsProto.getDefaultInstance(); - CommandStatusReportsProto commandStatusReport = - CommandStatusReportsProto.getDefaultInstance(); - - NodeManager mockNodeManager = Mockito.mock(NodeManager.class); - Mockito.when(mockNodeManager.isNodeRegistered(Mockito.any())) - .thenReturn(true); - - SCMDatanodeHeartbeatDispatcher dispatcher = - new SCMDatanodeHeartbeatDispatcher( - mockNodeManager, - new EventPublisher() { - @Override - public > void fireEvent( - EVENT_TYPE event, PAYLOAD payload) { - Assert.assertTrue( - event.equals(CONTAINER_REPORT) - || event.equals(CMD_STATUS_REPORT)); - - if (payload instanceof ContainerReportFromDatanode) { - Assert.assertEquals(containerReport, - ((ContainerReportFromDatanode) payload).getReport()); - } - if (payload instanceof CommandStatusReportFromDatanode) { - Assert.assertEquals(commandStatusReport, - ((CommandStatusReportFromDatanode) payload).getReport()); - } - eventReceived.incrementAndGet(); - } - }); - - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - - SCMHeartbeatRequestProto heartbeat = - SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) - .setContainerReport(containerReport) - .addCommandStatusReports(commandStatusReport) - .build(); - dispatcher.dispatch(heartbeat); - Assert.assertEquals(2, eventReceived.get()); - - - } - - /** - * Asserts scm informs datanodes to re-register on a restart. - * - * @throws Exception - */ - @Test - public void testScmHeartbeatAfterRestart() throws Exception { - - NodeManager mockNodeManager = Mockito.mock(NodeManager.class); - SCMDatanodeHeartbeatDispatcher dispatcher = - new SCMDatanodeHeartbeatDispatcher( - mockNodeManager, Mockito.mock(EventPublisher.class)); - - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - - SCMHeartbeatRequestProto heartbeat = - SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(datanodeDetails.getProtoBufMessage()) - .build(); - - dispatcher.dispatch(heartbeat); - // If SCM receives heartbeat from a node after it restarts and the node - // is not registered, it should send a Re-Register command back to the node. - Mockito.verify(mockNodeManager, Mockito.times(1)).addDatanodeCommand( - Mockito.any(UUID.class), Mockito.any(ReregisterCommand.class)); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java deleted file mode 100644 index 8040cb4b6f7..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -/** - * Test class for {@link SCMSecurityProtocolServer}. - * */ -public class TestSCMSecurityProtocolServer { - private SCMSecurityProtocolServer securityProtocolServer; - private OzoneConfiguration config; - - @Rule - public Timeout timeout = new Timeout(1000 * 20); - - @Before - public void setUp() throws Exception { - config = new OzoneConfiguration(); - config.set(OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY, - OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT + ":0"); - securityProtocolServer = new SCMSecurityProtocolServer(config, null); - } - - @After - public void tearDown() { - if (securityProtocolServer != null) { - securityProtocolServer.stop(); - securityProtocolServer = null; - } - config = null; - } - - @Test - public void testStart() { - securityProtocolServer.start(); - } - - @Test - public void testStop() { - securityProtocolServer.stop(); - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java deleted file mode 100644 index 60a56e3ffbc..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java +++ /dev/null @@ -1,166 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.PrintStream; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import static org.junit.Assert.*; - - -/** - * This class is used to test the StorageContainerManagerStarter using a mock - * class to avoid starting any services and hence just test the CLI component. - */ -public class TestStorageContainerManagerStarter { - - private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); - private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); - private final PrintStream originalOut = System.out; - private final PrintStream originalErr = System.err; - - private MockSCMStarter mock; - - @Before - public void setUpStreams() { - System.setOut(new PrintStream(outContent)); - System.setErr(new PrintStream(errContent)); - mock = new MockSCMStarter(); - } - - @After - public void restoreStreams() { - System.setOut(originalOut); - System.setErr(originalErr); - } - - @Test - public void testCallsStartWhenServerStarted() throws Exception { - executeCommand(); - assertTrue(mock.startCalled); - } - - @Test - public void testExceptionThrownWhenStartFails() throws Exception { - mock.throwOnStart = true; - try { - executeCommand(); - fail("Exception show have been thrown"); - } catch (Exception e) { - assertTrue(true); - } - } - - @Test - public void testStartNotCalledWithInvalidParam() throws Exception { - executeCommand("--invalid"); - assertFalse(mock.startCalled); - } - - @Test - public void testPassingInitSwitchCallsInit() { - executeCommand("--init"); - assertTrue(mock.initCalled); - } - - @Test - public void testInitSwitchAcceptsClusterIdSSwitch() { - executeCommand("--init", "--clusterid=abcdefg"); - assertEquals("abcdefg", mock.clusterId); - } - - @Test - public void testInitSwitchWithInvalidParamDoesNotRun() { - executeCommand("--init", "--clusterid=abcdefg", "--invalid"); - assertFalse(mock.initCalled); - } - - @Test - public void testUnSuccessfulInitThrowsException() { - mock.throwOnInit = true; - try { - executeCommand("--init"); - fail("Exception show have been thrown"); - } catch (Exception e) { - assertTrue(true); - } - } - - @Test - public void testGenClusterIdRunsGenerate() { - executeCommand("--genclusterid"); - assertTrue(mock.generateCalled); - } - - @Test - public void testGenClusterIdWithInvalidParamDoesNotRun() { - executeCommand("--genclusterid", "--invalid"); - assertFalse(mock.generateCalled); - } - - @Test - public void testUsagePrintedOnInvalidInput() { - executeCommand("--invalid"); - Pattern p = Pattern.compile("^Unknown option:.*--invalid.*\nUsage"); - Matcher m = p.matcher(errContent.toString()); - assertTrue(m.find()); - } - - private void executeCommand(String... args) { - new StorageContainerManagerStarter(mock).execute(args); - } - - static class MockSCMStarter implements SCMStarterInterface { - - private boolean initStatus = true; - private boolean throwOnStart = false; - private boolean throwOnInit = false; - private boolean startCalled = false; - private boolean initCalled = false; - private boolean generateCalled = false; - private String clusterId = null; - - public void start(OzoneConfiguration conf) throws Exception { - if (throwOnStart) { - throw new Exception("Simulated error on start"); - } - startCalled = true; - } - - public boolean init(OzoneConfiguration conf, String cid) - throws IOException { - if (throwOnInit) { - throw new IOException("Simulated error on init"); - } - initCalled = true; - clusterId = cid; - return initStatus; - } - - public String generateClusterId() { - generateCalled = true; - return "static-cluster-id"; - } - } -} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java deleted file mode 100644 index 6b493edb683..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ /dev/null @@ -1,542 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common; - -import java.util.List; -import java.util.Map; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatus.Status; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.DeleteBlocksCommandProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.VersionInfo; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.states.endpoint - .HeartbeatEndpointTask; -import org.apache.hadoop.ozone.container.common.states.endpoint - .RegisterEndpointTask; -import org.apache.hadoop.ozone.container.common.states.endpoint - .VersionEndpointTask; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.CommandStatus; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.PathUtils; -import org.apache.hadoop.util.Time; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.mockito.Mockito; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.mockito.Mockito.mock; - -import java.io.File; -import java.net.InetSocketAddress; -import java.util.UUID; - -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; -import static org.apache.hadoop.ozone.container.common.ContainerTestUtils - .createEndpoint; -import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.mockito.Mockito.when; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -/** - * Tests the endpoints. - */ -public class TestEndPoint { - private static InetSocketAddress serverAddress; - private static RPC.Server scmServer; - private static ScmTestMock scmServerImpl; - private static File testDir; - private static Configuration config; - - @AfterClass - public static void tearDown() throws Exception { - if (scmServer != null) { - scmServer.stop(); - } - FileUtil.fullyDelete(testDir); - } - - @BeforeClass - public static void setUp() throws Exception { - serverAddress = SCMTestUtils.getReuseableAddress(); - scmServerImpl = new ScmTestMock(); - scmServer = SCMTestUtils.startScmRpcServer(SCMTestUtils.getConf(), - scmServerImpl, serverAddress, 10); - testDir = PathUtils.getTestDir(TestEndPoint.class); - config = SCMTestUtils.getConf(); - config.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath()); - config.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - config - .setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - config.set(HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL, "1s"); - } - - @Test - /** - * This test asserts that we are able to make a version call to SCM server - * and gets back the expected values. - */ - public void testGetVersion() throws Exception { - try (EndpointStateMachine rpcEndPoint = - createEndpoint(SCMTestUtils.getConf(), - serverAddress, 1000)) { - SCMVersionResponseProto responseProto = rpcEndPoint.getEndPoint() - .getVersion(null); - Assert.assertNotNull(responseProto); - Assert.assertEquals(VersionInfo.DESCRIPTION_KEY, - responseProto.getKeys(0).getKey()); - Assert.assertEquals(VersionInfo.getLatestVersion().getDescription(), - responseProto.getKeys(0).getValue()); - } - } - - @Test - /** - * We make getVersion RPC call, but via the VersionEndpointTask which is - * how the state machine would make the call. - */ - public void testGetVersionTask() throws Exception { - OzoneConfiguration conf = SCMTestUtils.getConf(); - try (EndpointStateMachine rpcEndPoint = createEndpoint(conf, - serverAddress, 1000)) { - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - OzoneContainer ozoneContainer = new OzoneContainer( - datanodeDetails, conf, getContext(datanodeDetails), null); - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); - VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, - conf, ozoneContainer); - EndpointStateMachine.EndPointStates newState = versionTask.call(); - - // if version call worked the endpoint should automatically move to the - // next state. - Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER, - newState); - - // Now rpcEndpoint should remember the version it got from SCM - Assert.assertNotNull(rpcEndPoint.getVersion()); - } - } - - @Test - public void testCheckVersionResponse() throws Exception { - OzoneConfiguration conf = SCMTestUtils.getConf(); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, - true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, - true); - try (EndpointStateMachine rpcEndPoint = createEndpoint(conf, - serverAddress, 1000)) { - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(VersionEndpointTask.LOG); - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - OzoneContainer ozoneContainer = new OzoneContainer( - datanodeDetails, conf, getContext(datanodeDetails), null); - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); - VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, - conf, ozoneContainer); - EndpointStateMachine.EndPointStates newState = versionTask.call(); - - // if version call worked the endpoint should automatically move to the - // next state. - Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER, - newState); - - // Now rpcEndpoint should remember the version it got from SCM - Assert.assertNotNull(rpcEndPoint.getVersion()); - - // Now change server scmId, so datanode scmId will be - // different from SCM server response scmId - String newScmId = UUID.randomUUID().toString(); - scmServerImpl.setScmId(newScmId); - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); - newState = versionTask.call(); - Assert.assertEquals(EndpointStateMachine.EndPointStates.SHUTDOWN, - newState); - List volumesList = ozoneContainer.getVolumeSet() - .getFailedVolumesList(); - Assert.assertTrue(volumesList.size() == 1); - File expectedScmDir = new File(volumesList.get(0).getHddsRootDir(), - scmServerImpl.getScmId()); - Assert.assertTrue(logCapturer.getOutput().contains("expected scm " + - "directory " + expectedScmDir.getAbsolutePath() + " does not " + - "exist")); - Assert.assertTrue(ozoneContainer.getVolumeSet().getVolumesList().size() - == 0); - Assert.assertTrue(ozoneContainer.getVolumeSet().getFailedVolumesList() - .size() == 1); - - } - } - - - - @Test - /** - * This test makes a call to end point where there is no SCM server. We - * expect that versionTask should be able to handle it. - */ - public void testGetVersionToInvalidEndpoint() throws Exception { - OzoneConfiguration conf = SCMTestUtils.getConf(); - InetSocketAddress nonExistentServerAddress = SCMTestUtils - .getReuseableAddress(); - try (EndpointStateMachine rpcEndPoint = createEndpoint(conf, - nonExistentServerAddress, 1000)) { - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - OzoneContainer ozoneContainer = new OzoneContainer( - datanodeDetails, conf, getContext(datanodeDetails), null); - VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, - conf, ozoneContainer); - EndpointStateMachine.EndPointStates newState = versionTask.call(); - - // This version call did NOT work, so endpoint should remain in the same - // state. - Assert.assertEquals(EndpointStateMachine.EndPointStates.GETVERSION, - newState); - } - } - - @Test - /** - * This test makes a getVersionRPC call, but the DummyStorageServer is - * going to respond little slowly. We will assert that we are still in the - * GETVERSION state after the timeout. - */ - public void testGetVersionAssertRpcTimeOut() throws Exception { - final long rpcTimeout = 1000; - final long tolerance = 100; - OzoneConfiguration conf = SCMTestUtils.getConf(); - - try (EndpointStateMachine rpcEndPoint = createEndpoint(conf, - serverAddress, (int) rpcTimeout)) { - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - OzoneContainer ozoneContainer = new OzoneContainer( - datanodeDetails, conf, getContext(datanodeDetails), null); - VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, - conf, ozoneContainer); - - scmServerImpl.setRpcResponseDelay(1500); - long start = Time.monotonicNow(); - EndpointStateMachine.EndPointStates newState = versionTask.call(); - long end = Time.monotonicNow(); - scmServerImpl.setRpcResponseDelay(0); - Assert.assertThat(end - start, lessThanOrEqualTo(rpcTimeout + tolerance)); - Assert.assertEquals(EndpointStateMachine.EndPointStates.GETVERSION, - newState); - } - } - - @Test - public void testRegister() throws Exception { - DatanodeDetails nodeToRegister = TestUtils.randomDatanodeDetails(); - try (EndpointStateMachine rpcEndPoint = createEndpoint( - SCMTestUtils.getConf(), serverAddress, 1000)) { - SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint() - .register(nodeToRegister.getProtoBufMessage(), TestUtils - .createNodeReport( - getStorageReports(nodeToRegister.getUuid())), - TestUtils.getRandomContainerReports(10), - TestUtils.getRandomPipelineReports()); - Assert.assertNotNull(responseProto); - Assert.assertEquals(nodeToRegister.getUuidString(), - responseProto.getDatanodeUUID()); - Assert.assertNotNull(responseProto.getClusterID()); - Assert.assertEquals(10, scmServerImpl. - getContainerCountsForDatanode(nodeToRegister)); - Assert.assertEquals(1, scmServerImpl.getNodeReportsCount(nodeToRegister)); - } - } - - private StorageReportProto getStorageReports(UUID id) { - String storagePath = testDir.getAbsolutePath() + "/" + id; - return TestUtils.createStorageReport(id, storagePath, 100, 10, 90, null); - } - - private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress, - int rpcTimeout, boolean clearDatanodeDetails) throws Exception { - Configuration conf = SCMTestUtils.getConf(); - EndpointStateMachine rpcEndPoint = - createEndpoint(conf, - scmAddress, rpcTimeout); - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.REGISTER); - OzoneContainer ozoneContainer = mock(OzoneContainer.class); - when(ozoneContainer.getNodeReport()).thenReturn(TestUtils - .createNodeReport(getStorageReports(UUID.randomUUID()))); - ContainerController controller = Mockito.mock(ContainerController.class); - when(controller.getContainerReport()).thenReturn( - TestUtils.getRandomContainerReports(10)); - when(ozoneContainer.getController()).thenReturn(controller); - when(ozoneContainer.getPipelineReport()).thenReturn( - TestUtils.getRandomPipelineReports()); - RegisterEndpointTask endpointTask = - new RegisterEndpointTask(rpcEndPoint, conf, ozoneContainer, - mock(StateContext.class)); - if (!clearDatanodeDetails) { - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - endpointTask.setDatanodeDetails(datanodeDetails); - } - endpointTask.call(); - return rpcEndPoint; - } - - @Test - public void testRegisterTask() throws Exception { - try (EndpointStateMachine rpcEndpoint = - registerTaskHelper(serverAddress, 1000, false)) { - // Successful register should move us to Heartbeat state. - Assert.assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT, - rpcEndpoint.getState()); - } - } - - @Test - public void testRegisterToInvalidEndpoint() throws Exception { - InetSocketAddress address = SCMTestUtils.getReuseableAddress(); - try (EndpointStateMachine rpcEndpoint = - registerTaskHelper(address, 1000, false)) { - Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER, - rpcEndpoint.getState()); - } - } - - @Test - public void testRegisterNoContainerID() throws Exception { - InetSocketAddress address = SCMTestUtils.getReuseableAddress(); - try (EndpointStateMachine rpcEndpoint = - registerTaskHelper(address, 1000, true)) { - // No Container ID, therefore we tell the datanode that we would like to - // shutdown. - Assert.assertEquals(EndpointStateMachine.EndPointStates.SHUTDOWN, - rpcEndpoint.getState()); - } - } - - @Test - public void testRegisterRpcTimeout() throws Exception { - final long rpcTimeout = 1000; - final long tolerance = 200; - scmServerImpl.setRpcResponseDelay(1500); - long start = Time.monotonicNow(); - registerTaskHelper(serverAddress, 1000, false).close(); - long end = Time.monotonicNow(); - scmServerImpl.setRpcResponseDelay(0); - Assert.assertThat(end - start, lessThanOrEqualTo(rpcTimeout + tolerance)); - } - - @Test - public void testHeartbeat() throws Exception { - DatanodeDetails dataNode = TestUtils.randomDatanodeDetails(); - try (EndpointStateMachine rpcEndPoint = - createEndpoint(SCMTestUtils.getConf(), - serverAddress, 1000)) { - SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(dataNode.getProtoBufMessage()) - .setNodeReport(TestUtils.createNodeReport( - getStorageReports(UUID.randomUUID()))) - .build(); - - SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint() - .sendHeartbeat(request); - Assert.assertNotNull(responseProto); - Assert.assertEquals(0, responseProto.getCommandsCount()); - } - } - - @Test - public void testHeartbeatWithCommandStatusReport() throws Exception { - DatanodeDetails dataNode = TestUtils.randomDatanodeDetails(); - try (EndpointStateMachine rpcEndPoint = - createEndpoint(SCMTestUtils.getConf(), - serverAddress, 1000)) { - // Add some scmCommands for heartbeat response - addScmCommands(); - - - SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder() - .setDatanodeDetails(dataNode.getProtoBufMessage()) - .setNodeReport(TestUtils.createNodeReport( - getStorageReports(UUID.randomUUID()))) - .build(); - - SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint() - .sendHeartbeat(request); - assertNotNull(responseProto); - assertEquals(3, responseProto.getCommandsCount()); - assertEquals(0, scmServerImpl.getCommandStatusReportCount()); - - // Send heartbeat again from heartbeat endpoint task - final StateContext stateContext = heartbeatTaskHelper( - serverAddress, 3000); - Map map = stateContext.getCommandStatusMap(); - assertNotNull(map); - assertEquals("Should have 1 objects", 1, map.size()); - assertTrue(map.containsKey(3L)); - assertEquals(Type.deleteBlocksCommand, map.get(3L).getType()); - assertEquals(Status.PENDING, map.get(3L).getStatus()); - - scmServerImpl.clearScmCommandRequests(); - } - } - - private void addScmCommands() { - SCMCommandProto closeCommand = SCMCommandProto.newBuilder() - .setCloseContainerCommandProto( - CloseContainerCommandProto.newBuilder().setCmdId(1) - .setContainerID(1) - .setPipelineID(PipelineID.randomId().getProtobuf()) - .build()) - .setCommandType(Type.closeContainerCommand) - .build(); - SCMCommandProto replicationCommand = SCMCommandProto.newBuilder() - .setReplicateContainerCommandProto( - ReplicateContainerCommandProto.newBuilder() - .setCmdId(2) - .setContainerID(2) - .build()) - .setCommandType(Type.replicateContainerCommand) - .build(); - SCMCommandProto deleteBlockCommand = SCMCommandProto.newBuilder() - .setDeleteBlocksCommandProto( - DeleteBlocksCommandProto.newBuilder() - .setCmdId(3) - .addDeletedBlocksTransactions( - DeletedBlocksTransaction.newBuilder() - .setContainerID(45) - .setCount(1) - .setTxID(23) - .build()) - .build()) - .setCommandType(Type.deleteBlocksCommand) - .build(); - scmServerImpl.addScmCommandRequest(closeCommand); - scmServerImpl.addScmCommandRequest(deleteBlockCommand); - scmServerImpl.addScmCommandRequest(replicationCommand); - } - - private StateContext heartbeatTaskHelper(InetSocketAddress scmAddress, - int rpcTimeout) throws Exception { - Configuration conf = SCMTestUtils.getConf(); - conf.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath()); - conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - // Mini Ozone cluster will not come up if the port is not true, since - // Ratis will exit if the server port cannot be bound. We can remove this - // hard coding once we fix the Ratis default behaviour. - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - - - // Create a datanode state machine for stateConext used by endpoint task - try (DatanodeStateMachine stateMachine = new DatanodeStateMachine( - TestUtils.randomDatanodeDetails(), conf, null, null); - EndpointStateMachine rpcEndPoint = - createEndpoint(conf, scmAddress, rpcTimeout)) { - HddsProtos.DatanodeDetailsProto datanodeDetailsProto = - TestUtils.randomDatanodeDetails().getProtoBufMessage(); - rpcEndPoint.setState(EndpointStateMachine.EndPointStates.HEARTBEAT); - - final StateContext stateContext = - new StateContext(conf, DatanodeStateMachine.DatanodeStates.RUNNING, - stateMachine); - - HeartbeatEndpointTask endpointTask = - new HeartbeatEndpointTask(rpcEndPoint, conf, stateContext); - endpointTask.setDatanodeDetailsProto(datanodeDetailsProto); - endpointTask.call(); - Assert.assertNotNull(endpointTask.getDatanodeDetailsProto()); - - Assert.assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT, - rpcEndPoint.getState()); - return stateContext; - } - } - - @Test - public void testHeartbeatTask() throws Exception { - heartbeatTaskHelper(serverAddress, 1000); - } - - @Test - public void testHeartbeatTaskToInvalidNode() throws Exception { - InetSocketAddress invalidAddress = SCMTestUtils.getReuseableAddress(); - heartbeatTaskHelper(invalidAddress, 1000); - } - - @Test - public void testHeartbeatTaskRpcTimeOut() throws Exception { - final long rpcTimeout = 1000; - final long tolerance = 200; - scmServerImpl.setRpcResponseDelay(1500); - long start = Time.monotonicNow(); - InetSocketAddress invalidAddress = SCMTestUtils.getReuseableAddress(); - heartbeatTaskHelper(invalidAddress, 1000); - long end = Time.monotonicNow(); - scmServerImpl.setRpcResponseDelay(0); - Assert.assertThat(end - start, - lessThanOrEqualTo(rpcTimeout + tolerance)); - } - - private StateContext getContext(DatanodeDetails datanodeDetails) { - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails); - Mockito.when(context.getParent()).thenReturn(stateMachine); - return context; - } - -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java deleted file mode 100644 index da2ae843e11..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle Happy. - */ -package org.apache.hadoop.ozone.container.common; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java deleted file mode 100644 index f0b1cbb146b..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java +++ /dev/null @@ -1,139 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.placement; - -import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementRandom; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.OzoneConsts; -import org.junit.Assert; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; -import java.util.Random; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState - .HEALTHY; -import static org.junit.Assert.assertEquals; - -/** - * Asserts that allocation strategy works as expected. - */ -public class TestContainerPlacement { - - private DescriptiveStatistics computeStatistics(NodeManager nodeManager) { - DescriptiveStatistics descriptiveStatistics = new DescriptiveStatistics(); - for (DatanodeDetails dd : nodeManager.getNodes(HEALTHY)) { - float weightedValue = - nodeManager.getNodeStat(dd).get().getScmUsed().get() / (float) - nodeManager.getNodeStat(dd).get().getCapacity().get(); - descriptiveStatistics.addValue(weightedValue); - } - return descriptiveStatistics; - } - - /** - * This test simulates lots of Cluster I/O and updates the metadata in SCM. - * We simulate adding and removing containers from the cluster. It asserts - * that our placement algorithm has taken the capacity of nodes into - * consideration by asserting that standard deviation of used space on these - * has improved. - */ - @Test - public void testCapacityPlacementYieldsBetterDataDistribution() throws - SCMException { - final int opsCount = 200 * 1000; - final int nodesRequired = 3; - Random random = new Random(); - - // The nature of init code in MockNodeManager yields similar clusters. - MockNodeManager nodeManagerCapacity = new MockNodeManager(true, 100); - MockNodeManager nodeManagerRandom = new MockNodeManager(true, 100); - DescriptiveStatistics beforeCapacity = - computeStatistics(nodeManagerCapacity); - DescriptiveStatistics beforeRandom = computeStatistics(nodeManagerRandom); - - //Assert that our initial layout of clusters are similar. - assertEquals(beforeCapacity.getStandardDeviation(), beforeRandom - .getStandardDeviation(), 0.001); - - SCMContainerPlacementCapacity capacityPlacer = new - SCMContainerPlacementCapacity(nodeManagerCapacity, new Configuration(), - null, true, null); - SCMContainerPlacementRandom randomPlacer = new - SCMContainerPlacementRandom(nodeManagerRandom, new Configuration(), - null, true, null); - - for (int x = 0; x < opsCount; x++) { - long containerSize = random.nextInt(100) * OzoneConsts.GB; - List nodesCapacity = - capacityPlacer.chooseDatanodes(new ArrayList<>(), null, nodesRequired, - containerSize); - assertEquals(nodesRequired, nodesCapacity.size()); - - List nodesRandom = - randomPlacer.chooseDatanodes(nodesCapacity, null, nodesRequired, - containerSize); - - // One fifth of all calls are delete - if (x % 5 == 0) { - deleteContainer(nodeManagerCapacity, nodesCapacity, containerSize); - deleteContainer(nodeManagerRandom, nodesRandom, containerSize); - } else { - createContainer(nodeManagerCapacity, nodesCapacity, containerSize); - createContainer(nodeManagerRandom, nodesRandom, containerSize); - } - } - DescriptiveStatistics postCapacity = computeStatistics(nodeManagerCapacity); - DescriptiveStatistics postRandom = computeStatistics(nodeManagerRandom); - - // This is a very bold claim, and needs large number of I/O operations. - // The claim in this assertion is that we improved the data distribution - // of this cluster in relation to the start state of the cluster. - Assert.assertTrue(beforeCapacity.getStandardDeviation() > - postCapacity.getStandardDeviation()); - - // This asserts that Capacity placement yields a better placement - // algorithm than random placement, since both cluster started at an - // identical state. - - Assert.assertTrue(postRandom.getStandardDeviation() > - postCapacity.getStandardDeviation()); - } - - private void deleteContainer(MockNodeManager nodeManager, - List nodes, long containerSize) { - for (DatanodeDetails dd : nodes) { - nodeManager.delContainer(dd, containerSize); - } - } - - private void createContainer(MockNodeManager nodeManager, - List nodes, long containerSize) { - for (DatanodeDetails dd : nodes) { - nodeManager.addContainer(dd, containerSize); - } - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java deleted file mode 100644 index 328ba307b96..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.placement; - -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.junit.Test; - - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -/** - * Tests that test Metrics that support placement. - */ -public class TestDatanodeMetrics { - @Test - public void testSCMNodeMetric() { - SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L); - assertEquals((long) stat.getCapacity().get(), 100L); - assertEquals(10L, (long) stat.getScmUsed().get()); - assertEquals(90L, (long) stat.getRemaining().get()); - SCMNodeMetric metric = new SCMNodeMetric(stat); - - SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L); - assertEquals(100L, (long) stat.getCapacity().get()); - assertEquals(10L, (long) stat.getScmUsed().get()); - assertEquals(90L, (long) stat.getRemaining().get()); - - SCMNodeMetric newMetric = new SCMNodeMetric(newStat); - assertTrue(metric.isEqual(newMetric.get())); - - newMetric.add(stat); - assertTrue(newMetric.isGreater(metric.get())); - - SCMNodeMetric zeroMetric = new SCMNodeMetric(new SCMNodeStat()); - // Assert we can handle zero capacity. - assertTrue(metric.isGreater(zeroMetric.get())); - - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java deleted file mode 100644 index ddd751c3795..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle Happy. - */ -package org.apache.hadoop.ozone.container.placement; \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java deleted file mode 100644 index 318c54d9585..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.replication; -// Test classes for replication. \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java deleted file mode 100644 index 0ecff3f541a..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ /dev/null @@ -1,330 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.testutils; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.scm.node.CommandQueue; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.LinkedList; - -/** - * A Node Manager to test replication. - */ -public class ReplicationNodeManagerMock implements NodeManager { - private final Map nodeStateMap; - private final CommandQueue commandQueue; - - /** - * A list of Datanodes and current states. - * @param nodeState A node state map. - */ - public ReplicationNodeManagerMock(Map nodeState, - CommandQueue commandQueue) { - Preconditions.checkNotNull(nodeState); - this.nodeStateMap = nodeState; - this.commandQueue = commandQueue; - } - - /** - * Get the number of data nodes that in all states. - * - * @return A state to number of nodes that in this state mapping - */ - @Override - public Map getNodeCount() { - return null; - } - - @Override - public Map getNodeInfo() { - return null; - } - - /** - * Gets all Live Datanodes that is currently communicating with SCM. - * - * @param nodestate - State of the node - * @return List of Datanodes that are Heartbeating SCM. - */ - @Override - public List getNodes(NodeState nodestate) { - return null; - } - - /** - * Returns the Number of Datanodes that are communicating with SCM. - * - * @param nodestate - State of the node - * @return int -- count - */ - @Override - public int getNodeCount(NodeState nodestate) { - return 0; - } - - /** - * Get all datanodes known to SCM. - * - * @return List of DatanodeDetails known to SCM. - */ - @Override - public List getAllNodes() { - return null; - } - - /** - * Returns the aggregated node stats. - * - * @return the aggregated node stats. - */ - @Override - public SCMNodeStat getStats() { - return null; - } - - /** - * Return a map of node stats. - * - * @return a map of individual node stats (live/stale but not dead). - */ - @Override - public Map getNodeStats() { - return null; - } - - /** - * Return the node stat of the specified datanode. - * - * @param dd - datanode details. - * @return node stat if it is live/stale, null if it is decommissioned or - * doesn't exist. - */ - @Override - public SCMNodeMetric getNodeStat(DatanodeDetails dd) { - return null; - } - - - /** - * Returns the node state of a specific node. - * - * @param dd - DatanodeDetails - * @return Healthy/Stale/Dead. - */ - @Override - public NodeState getNodeState(DatanodeDetails dd) { - return nodeStateMap.get(dd); - } - - /** - * Get set of pipelines a datanode is part of. - * @param dnId - datanodeID - * @return Set of PipelineID - */ - @Override - public Set getPipelines(DatanodeDetails dnId) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Add pipeline information in the NodeManager. - * @param pipeline - Pipeline to be added - */ - @Override - public void addPipeline(Pipeline pipeline) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Remove a pipeline information from the NodeManager. - * @param pipeline - Pipeline to be removed - */ - @Override - public void removePipeline(Pipeline pipeline) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - @Override - public void addContainer(DatanodeDetails datanodeDetails, - ContainerID containerId) - throws NodeNotFoundException { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Update set of containers available on a datanode. - * @param uuid - DatanodeID - * @param containerIds - Set of containerIDs - * @throws NodeNotFoundException - if datanode is not known. For new datanode - * use addDatanodeInContainerMap call. - */ - @Override - public void setContainers(DatanodeDetails uuid, Set containerIds) - throws NodeNotFoundException { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Return set of containerIDs available on a datanode. - * @param uuid - DatanodeID - * @return - set of containerIDs - */ - @Override - public Set getContainers(DatanodeDetails uuid) { - throw new UnsupportedOperationException("Not yet implemented"); - } - - /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - *

- *

As noted in {@link AutoCloseable#close()}, cases where the - * close may fail require careful attention. It is strongly advised - * to relinquish the underlying resources and to internally - * mark the {@code Closeable} as closed, prior to throwing - * the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - - } - - /** - * Gets the version info from SCM. - * - * @param versionRequest - version Request. - * @return - returns SCM version info and other required information needed by - * datanode. - */ - @Override - public VersionResponse getVersion(SCMVersionRequestProto versionRequest) { - return null; - } - - /** - * Register the node if the node finds that it is not registered with any SCM. - * - * @param dd DatanodeDetailsProto - * @param nodeReport NodeReportProto - * @return SCMHeartbeatResponseProto - */ - @Override - public RegisteredCommand register(DatanodeDetails dd, - NodeReportProto nodeReport, - PipelineReportsProto pipelineReportsProto) { - return null; - } - - /** - * Send heartbeat to indicate the datanode is alive and doing well. - * - * @param dd - Datanode Details. - * @return SCMheartbeat response list - */ - @Override - public List processHeartbeat(DatanodeDetails dd) { - return null; - } - - @Override - public Boolean isNodeRegistered( - DatanodeDetails datanodeDetails) { - return null; - } - - /** - * Clears all nodes from the node Manager. - */ - public void clearMap() { - this.nodeStateMap.clear(); - } - - /** - * Adds a node to the existing Node manager. This is used only for test - * purposes. - * @param id DatanodeDetails - * @param state State you want to put that node to. - */ - public void addNode(DatanodeDetails id, NodeState state) { - nodeStateMap.put(id, state); - } - - @Override - public void addDatanodeCommand(UUID dnId, SCMCommand command) { - this.commandQueue.addCommand(dnId, command); - } - - /** - * Empty implementation for processNodeReport. - * @param dnUuid - * @param nodeReport - */ - @Override - public void processNodeReport(DatanodeDetails dnUuid, - NodeReportProto nodeReport) { - // do nothing. - } - - @Override - public void onMessage(CommandForDatanode commandForDatanode, - EventPublisher publisher) { - // do nothing. - } - - @Override - public List getCommandQueue(UUID dnID) { - return null; - } - - @Override - public DatanodeDetails getNodeByUuid(String address) { - return null; - } - - @Override - public List getNodesByAddress(String address) { - return new LinkedList<>(); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java deleted file mode 100644 index 4e8a90bf1d4..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.testutils; -// Helper classes for ozone and container tests. \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/resources/nodegroup-mapping b/hadoop-hdds/server-scm/src/test/resources/nodegroup-mapping deleted file mode 100644 index 01f7d5db2d4..00000000000 --- a/hadoop-hdds/server-scm/src/test/resources/nodegroup-mapping +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -host1 /rack1/ng1 -host2 /rack1/ng1 -host3 /rack1/ng2 -host4 /rack1/ng2 -1.2.3.4 /rack1/ng1 -2.3.4.5 /rack1/ng1 -3.4.5.6 /rack1/ng2 -4.5.6.7 /rack1/ng2 \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/resources/rack-mapping b/hadoop-hdds/server-scm/src/test/resources/rack-mapping deleted file mode 100644 index 47eac9731e7..00000000000 --- a/hadoop-hdds/server-scm/src/test/resources/rack-mapping +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -host1 /rack1 -host2 /rack1 -host3 /rack1 -host4 /rack1 -1.2.3.4 /rack1 -2.3.4.5 /rack1 -3.4.5.6 /rack1 -4.5.6.7 /rack1 \ No newline at end of file diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml deleted file mode 100644 index 6f0be62bd60..00000000000 --- a/hadoop-hdds/tools/pom.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-hdds - 0.5.0-SNAPSHOT - - - hadoop-hdds-tools - 0.5.0-SNAPSHOT - Apache Hadoop Distributed Data Store Tools - Apache Hadoop HDDS Tools - jar - - - - org.apache.hadoop - hadoop-hdds-common - - - org.apache.hadoop - hadoop-hdds-client - - - org.apache.hadoop - hadoop-common - - - commons-cli - commons-cli - - - org.xerial - sqlite-jdbc - 3.25.2 - - - - diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java deleted file mode 100644 index f42a8f8411c..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; - -import java.util.concurrent.Callable; - -/** - * Subcommand to group replication manager related operations. - */ -@Command( - name = "replicationmanager", - description = "ReplicationManager specific operations", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class, - subcommands = { - ReplicationManagerStartSubcommand.class, - ReplicationManagerStopSubcommand.class, - ReplicationManagerStatusSubcommand.class - }) -public class ReplicationManagerCommands implements Callable { - - @ParentCommand - private SCMCLI parent; - - public SCMCLI getParent() { - return parent; - } - - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.parent.getCmd().getSubcommands().get("replicationmanager")); - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java deleted file mode 100644 index 1adec6b0c4b..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; - -import java.util.concurrent.Callable; - -/** - * This is the handler that process safe mode check command. - */ -@Command( - name = "start", - description = "Start ReplicationManager", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class ReplicationManagerStartSubcommand implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationManagerStartSubcommand.class); - - @ParentCommand - private ReplicationManagerCommands parent; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.startReplicationManager(); - LOG.info("Starting ReplicationManager..."); - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java deleted file mode 100644 index 2ebf28c8074..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; - -import java.util.concurrent.Callable; - -/** - * This is the handler that process safe mode check command. - */ -@Command( - name = "status", - description = "Check if ReplicationManager is running or not", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class ReplicationManagerStatusSubcommand implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationManagerStatusSubcommand.class); - - @ParentCommand - private ReplicationManagerCommands parent; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - - boolean execReturn = scmClient.getReplicationManagerStatus(); - - // Output data list - if(execReturn){ - LOG.info("ReplicationManager is Running."); - } else { - LOG.info("ReplicationManager is Not Running."); - } - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java deleted file mode 100644 index 7cafd01b12d..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; - -import java.util.concurrent.Callable; - -/** - * This is the handler that process safe mode check command. - */ -@Command( - name = "stop", - description = "Stop ReplicationManager", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class ReplicationManagerStopSubcommand implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationManagerStopSubcommand.class); - - @ParentCommand - private ReplicationManagerCommands parent; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.stopReplicationManager(); - LOG.info("Stopping ReplicationManager..."); - LOG.info("Requested SCM to stop ReplicationManager, " + - "it might take sometime for the ReplicationManager to stop."); - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java deleted file mode 100644 index 0b5c18e8205..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java +++ /dev/null @@ -1,168 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli; - -import java.io.IOException; -import java.net.InetSocketAddress; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.cli.container.ContainerCommands; -import org.apache.hadoop.hdds.scm.cli.pipeline.PipelineCommands; -import org.apache.hadoop.hdds.scm.client.ContainerOperationClient; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.hdds.scm.protocolPB - .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.NativeCodeLoader; - -import org.apache.commons.lang3.StringUtils; -import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; -import static org.apache.hadoop.hdds.HddsUtils.getScmSecurityClient; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_SIZE_DEFAULT; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; -import picocli.CommandLine.Command; -import picocli.CommandLine.Option; - -/** - * This class is the CLI of SCM. - */ - -/** - * Container subcommand. - */ -@Command(name = "ozone scmcli", hidden = true, description = - "Developer tools to handle SCM specific " - + "operations.", - versionProvider = HddsVersionProvider.class, - subcommands = { - SafeModeCommands.class, - ContainerCommands.class, - PipelineCommands.class, - TopologySubcommand.class, - ReplicationManagerCommands.class - }, - mixinStandardHelpOptions = true) -public class SCMCLI extends GenericCli { - - @Option(names = {"--scm"}, description = "The destination scm (host:port)") - private String scm = ""; - - /** - * Main for the scm shell Command handling. - * - * @param argv - System Args Strings[] - * @throws Exception - */ - public static void main(String[] argv) throws Exception { - - LogManager.resetConfiguration(); - Logger.getRootLogger().setLevel(Level.INFO); - Logger.getRootLogger() - .addAppender(new ConsoleAppender(new PatternLayout("%m%n"))); - Logger.getLogger(NativeCodeLoader.class).setLevel(Level.ERROR); - - new SCMCLI().run(argv); - } - - public ScmClient createScmClient() - throws IOException { - - OzoneConfiguration ozoneConf = createOzoneConfiguration(); - if (StringUtils.isNotEmpty(scm)) { - ozoneConf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scm); - } - if (!HddsUtils.getHostNameFromConfigKeys(ozoneConf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY).isPresent()) { - - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY - + " should be set in ozone-site.xml or with the --scm option"); - } - - long version = RPC.getProtocolVersion( - StorageContainerLocationProtocolPB.class); - InetSocketAddress scmAddress = - getScmAddressForClients(ozoneConf); - int containerSizeGB = (int) ozoneConf.getStorageSize( - OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT, - StorageUnit.GB); - ContainerOperationClient - .setContainerSizeB(containerSizeGB * OzoneConsts.GB); - - RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class, - ProtobufRpcEngine.class); - StorageContainerLocationProtocol client = - TracingUtil.createProxy( - new StorageContainerLocationProtocolClientSideTranslatorPB( - RPC.getProxy(StorageContainerLocationProtocolPB.class, version, - scmAddress, UserGroupInformation.getCurrentUser(), ozoneConf, - NetUtils.getDefaultSocketFactory(ozoneConf), - Client.getRpcTimeout(ozoneConf))), - StorageContainerLocationProtocol.class, ozoneConf); - - XceiverClientManager xceiverClientManager = null; - if (OzoneSecurityUtil.isSecurityEnabled(ozoneConf)) { - SecurityConfig securityConfig = new SecurityConfig(ozoneConf); - SCMSecurityProtocol scmSecurityProtocolClient = getScmSecurityClient( - (OzoneConfiguration) securityConfig.getConfiguration()); - String caCertificate = - scmSecurityProtocolClient.getCACertificate(); - xceiverClientManager = new XceiverClientManager(ozoneConf, - OzoneConfiguration.of(ozoneConf).getObject(XceiverClientManager - .ScmClientConfig.class), caCertificate); - } else { - xceiverClientManager = new XceiverClientManager(ozoneConf); - } - return new ContainerOperationClient(client, xceiverClientManager); - } - - public void checkContainerExists(ScmClient scmClient, long containerId) - throws IOException { - ContainerInfo container = scmClient.getContainer(containerId); - if (container == null) { - throw new IllegalArgumentException("No such container " + containerId); - } - } - -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java deleted file mode 100644 index f969f4c2dad..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.client.ScmClient; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; - -/** - * This is the handler that process safe mode check command. - */ -@Command( - name = "status", - description = "Check if SCM is in safe mode", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class SafeModeCheckSubcommand implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeCheckSubcommand.class); - - @ParentCommand - private SafeModeCommands parent; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - - boolean execReturn = scmClient.inSafeMode(); - - // Output data list - if(execReturn){ - LOG.info("SCM is in safe mode."); - } else { - LOG.info("SCM is out of safe mode."); - } - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java deleted file mode 100644 index 3a9a63c5a6a..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; - -/** - * Subcommand to group safe mode related operations. - */ -@Command( - name = "safemode", - description = "Safe mode specific operations", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class, - subcommands = { - SafeModeCheckSubcommand.class, - SafeModeExitSubcommand.class, - }) -public class SafeModeCommands implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeCommands.class); - - @ParentCommand - private SCMCLI parent; - - public SCMCLI getParent() { - return parent; - } - - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.parent.getCmd().getSubcommands().get("safemode")); - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java deleted file mode 100644 index 9f1db45bb4e..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.client.ScmClient; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; - -/** - * This is the handler that process safe mode exit command. - */ -@Command( - name = "exit", - description = "Force SCM out of safe mode", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class SafeModeExitSubcommand implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeExitSubcommand.class); - - @ParentCommand - private SafeModeCommands parent; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - - boolean execReturn = scmClient.forceExitSafeMode(); - if(execReturn){ - LOG.info("SCM exit safe mode successfully."); - } - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java deleted file mode 100644 index 7de2e4be9c4..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/TopologySubcommand.java +++ /dev/null @@ -1,124 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.cli; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import picocli.CommandLine; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONED; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DECOMMISSIONING; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.TreeSet; -import java.util.concurrent.Callable; - -/** - * Handler of printTopology command. - */ -@CommandLine.Command( - name = "printTopology", - description = "Print a tree of the network topology as reported by SCM", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class TopologySubcommand implements Callable { - - @CommandLine.ParentCommand - private SCMCLI parent; - - private static List stateArray = new ArrayList<>(); - - static { - stateArray.add(HEALTHY); - stateArray.add(STALE); - stateArray.add(DEAD); - stateArray.add(DECOMMISSIONING); - stateArray.add(DECOMMISSIONED); - } - - @CommandLine.Option(names = {"-o", "--order"}, - description = "Print Topology ordered by network location") - private boolean order; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.createScmClient()) { - for (HddsProtos.NodeState state : stateArray) { - List nodes = scmClient.queryNode(state, - HddsProtos.QueryScope.CLUSTER, ""); - if (nodes != null && nodes.size() > 0) { - // show node state - System.out.println("State = " + state.toString()); - if (order) { - printOrderedByLocation(nodes); - } else { - printNodesWithLocation(nodes); - } - } - } - return null; - } - } - - // Format - // Location: rack1 - // ipAddress(hostName) - private void printOrderedByLocation(List nodes) { - HashMap> tree = - new HashMap<>(); - for (HddsProtos.Node node : nodes) { - String location = node.getNodeID().getNetworkLocation(); - if (location != null && !tree.containsKey(location)) { - tree.put(location, new TreeSet<>()); - } - tree.get(location).add(DatanodeDetails.getFromProtoBuf(node.getNodeID())); - } - ArrayList locations = new ArrayList<>(tree.keySet()); - Collections.sort(locations); - - locations.forEach(location -> { - System.out.println("Location: " + location); - tree.get(location).forEach(node -> { - System.out.println(" " + node.getIpAddress() + "(" + node.getHostName() - + ")"); - }); - }); - } - - - // Format "ipAddress(hostName) networkLocation" - private void printNodesWithLocation(Collection nodes) { - nodes.forEach(node -> { - System.out.print(" " + node.getNodeID().getIpAddress() + "(" + - node.getNodeID().getHostName() + ")"); - System.out.println(" " + - (node.getNodeID().getNetworkLocation() != null ? - node.getNodeID().getNetworkLocation() : "NA")); - }); - } -} \ No newline at end of file diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java deleted file mode 100644 index 4bf2013b054..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseSubcommand.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli.container; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.client.ScmClient; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; -import picocli.CommandLine.ParentCommand; - -/** - * The handler of close container command. - */ -@Command( - name = "close", - description = "close container", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class CloseSubcommand implements Callable { - - @ParentCommand - private ContainerCommands parent; - - @Parameters(description = "Id of the container to close") - private long containerId; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - parent.getParent().checkContainerExists(scmClient, containerId); - scmClient.closeContainer(containerId); - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java deleted file mode 100644 index bf17bfda338..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli.container; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.scm.cli.SCMCLI; -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; - -import java.util.concurrent.Callable; - -/** - * Subcommand to group container related operations. - */ -@Command( - name = "container", - description = "Container specific operations", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class, - subcommands = { - ListSubcommand.class, - InfoSubcommand.class, - DeleteSubcommand.class, - CreateSubcommand.class, - CloseSubcommand.class - }) -public class ContainerCommands implements Callable { - - @ParentCommand - private SCMCLI parent; - - public SCMCLI getParent() { - return parent; - } - - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.parent.getCmd().getSubcommands().get("container")); - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java deleted file mode 100644 index eb79e50506e..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli.container; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.hdds.scm.container.common.helpers - .ContainerWithPipeline; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; -import picocli.CommandLine.Option; -import picocli.CommandLine.ParentCommand; - -/** - * This is the handler that process container creation command. - */ -@Command( - name = "create", - description = "Create container", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class CreateSubcommand implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(CreateSubcommand.class); - - @ParentCommand - private ContainerCommands parent; - - @Option(description = "Owner of the new container", defaultValue = "OZONE", - required = false, names = { - "-o", "--owner"}) - - private String owner; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - ContainerWithPipeline container = scmClient.createContainer(owner); - LOG.info("Container {} is created.", - container.getContainerInfo().getContainerID()); - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java deleted file mode 100644 index 4989e03b34f..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteSubcommand.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.cli.container; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.client.ScmClient; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Option; -import picocli.CommandLine.Parameters; -import picocli.CommandLine.ParentCommand; - -/** - * This is the handler that process delete container command. - */ -@Command( - name = "delete", - description = "Delete container", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class DeleteSubcommand implements Callable { - - @Parameters(description = "Id of the container to close") - private long containerId; - - @Option(names = {"-f", - "--force"}, description = "forcibly delete the container") - private boolean force; - - @ParentCommand - private ContainerCommands parent; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - parent.getParent().checkContainerExists(scmClient, containerId); - scmClient.deleteContainer(containerId, force); - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java deleted file mode 100644 index 31fdb1d33c5..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli.container; - -import java.util.concurrent.Callable; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.hdds.scm.container.common.helpers - .ContainerWithPipeline; - -import com.google.common.base.Preconditions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; -import picocli.CommandLine.ParentCommand; - -/** - * This is the handler that process container info command. - */ -@Command( - name = "info", - description = "Show information about a specific container", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class InfoSubcommand implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(InfoSubcommand.class); - - @ParentCommand - private ContainerCommands parent; - - @Parameters(description = "Decimal id of the container.") - private long containerID; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - ContainerWithPipeline container = scmClient. - getContainerWithPipeline(containerID); - Preconditions.checkNotNull(container, "Container cannot be null"); - - ContainerDataProto containerData = scmClient.readContainer(container - .getContainerInfo().getContainerID(), container.getPipeline()); - - // Print container report info. - LOG.info("Container id: {}", containerID); - String openStatus = - containerData.getState() == ContainerDataProto.State.OPEN ? "OPEN" : - "CLOSED"; - LOG.info("Container State: {}", openStatus); - LOG.info("Container Path: {}", containerData.getContainerPath()); - - // Output meta data. - String metadataStr = containerData.getMetadataList().stream().map( - p -> p.getKey() + ":" + p.getValue()) - .collect(Collectors.joining(", ")); - LOG.info("Container Metadata: {}", metadataStr); - - // Print pipeline of an existing container. - String machinesStr = container.getPipeline().getNodes().stream().map( - DatanodeDetails::getHostName).collect(Collectors.joining(",")); - LOG.info("Datanodes: [{}]", machinesStr); - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java deleted file mode 100644 index 5169c807799..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli.container; - -import java.io.IOException; -import java.util.List; -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; -import picocli.CommandLine.Help.Visibility; -import picocli.CommandLine.Option; -import picocli.CommandLine.ParentCommand; - -/** - * This is the handler that process container list command. - */ -@Command( - name = "list", - description = "List containers", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class ListSubcommand implements Callable { - - private static final Logger LOG = - LoggerFactory.getLogger(ListSubcommand.class); - - @ParentCommand - private ContainerCommands parent; - - @Option(names = {"-s", "--start"}, - description = "Container id to start the iteration", required = true) - private long startId = 1; - - @Option(names = {"-c", "--count"}, - description = "Maximum number of containers to list", - defaultValue = "20", showDefaultValue = Visibility.ALWAYS) - private int count = 20; - - private void outputContainerInfo(ContainerInfo containerInfo) - throws IOException { - // Print container report info. - LOG.info("{}", containerInfo.toJsonString()); - } - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - - List containerList = - scmClient.listContainer(startId, count); - - // Output data list - for (ContainerInfo container : containerList) { - outputContainerInfo(container); - } - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java deleted file mode 100644 index ff8adbc56f1..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Contains all of the container related scm commands. - */ -package org.apache.hadoop.hdds.scm.cli.container; \ No newline at end of file diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java deleted file mode 100644 index d358b3cf6a5..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *

- * SCM related cli tools. - */ -/** - * SCM related cli tools. - */ -package org.apache.hadoop.hdds.scm.cli; \ No newline at end of file diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java deleted file mode 100644 index ec4b1b789e8..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ActivatePipelineSubcommand.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.cli.pipeline; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import picocli.CommandLine; - -import java.util.concurrent.Callable; - -/** - * Handler of activate pipeline command. - */ -@CommandLine.Command( - name = "activate", - description = "Activates the given Pipeline", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class ActivatePipelineSubcommand implements Callable { - - @CommandLine.ParentCommand - private PipelineCommands parent; - - @CommandLine.Parameters(description = "ID of the pipeline to activate") - private String pipelineId; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.activatePipeline( - HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java deleted file mode 100644 index 89a280e805c..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ClosePipelineSubcommand.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.cli.pipeline; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import picocli.CommandLine; - -import java.util.concurrent.Callable; - -/** - * Handler of close pipeline command. - */ -@CommandLine.Command( - name = "close", - description = "Close pipeline", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class ClosePipelineSubcommand implements Callable { - - @CommandLine.ParentCommand - private PipelineCommands parent; - - @CommandLine.Parameters(description = "ID of the pipeline to close") - private String pipelineId; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.closePipeline( - HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java deleted file mode 100644 index 4f4f741a364..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/DeactivatePipelineSubcommand.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.cli.pipeline; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import picocli.CommandLine; - -import java.util.concurrent.Callable; - -/** - * Handler of deactivate pipeline command. - */ -@CommandLine.Command( - name = "deactivate", - description = "Deactivates the given Pipeline", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class DeactivatePipelineSubcommand implements Callable { - - @CommandLine.ParentCommand - private PipelineCommands parent; - - @CommandLine.Parameters(description = "ID of the pipeline to deactivate") - private String pipelineId; - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - scmClient.deactivatePipeline( - HddsProtos.PipelineID.newBuilder().setId(pipelineId).build()); - return null; - } - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java deleted file mode 100644 index 8b3b1b3b8cb..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.cli.pipeline; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import picocli.CommandLine; - -import java.util.concurrent.Callable; - -/** - * Handler of list pipelines command. - */ -@CommandLine.Command( - name = "list", - description = "List all active pipelines", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class ListPipelinesSubcommand implements Callable { - - @CommandLine.ParentCommand - private PipelineCommands parent; - - @CommandLine.Option(names = {"-ffc", "--filterByFactor"}, - description = "Filter listed pipelines by Factor(ONE/one)", - defaultValue = "", - required = false) - private String factor; - - @CommandLine.Option(names = {"-fst", "--filterByState"}, - description = "Filter listed pipelines by State(OPEN/CLOSE)", - defaultValue = "", - required = false) - private String state; - - - @Override - public Void call() throws Exception { - try (ScmClient scmClient = parent.getParent().createScmClient()) { - if (isNullOrEmpty(factor) && isNullOrEmpty(state)) { - scmClient.listPipelines().forEach(System.out::println); - } else { - scmClient.listPipelines().stream() - .filter(p -> ((isNullOrEmpty(factor) || - (p.getFactor().toString().compareToIgnoreCase(factor) == 0)) - && (isNullOrEmpty(state) || - (p.getPipelineState().toString().compareToIgnoreCase(state) - == 0)))) - .forEach(System.out::println); - } - return null; - } - } - - protected static boolean isNullOrEmpty(String str) { - return ((str == null) || str.trim().isEmpty()); - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java deleted file mode 100644 index 948a51a8eb5..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.cli.pipeline; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.scm.cli.SCMCLI; -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; - -import java.util.concurrent.Callable; - -/** - * Subcommand to group pipeline related operations. - */ -@Command( - name = "pipeline", - description = "Pipeline specific operations", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class, - subcommands = { - ListPipelinesSubcommand.class, - ActivatePipelineSubcommand.class, - DeactivatePipelineSubcommand.class, - ClosePipelineSubcommand.class - }) -public class PipelineCommands implements Callable { - - @ParentCommand - private SCMCLI parent; - - public SCMCLI getParent() { - return parent; - } - - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.parent.getCmd().getSubcommands().get("pipeline")); - } -} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/package-info.java deleted file mode 100644 index 64924d1219f..00000000000 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Contains all of the pipeline related scm commands. - */ -package org.apache.hadoop.hdds.scm.cli.pipeline; \ No newline at end of file diff --git a/hadoop-ozone/.gitignore b/hadoop-ozone/.gitignore deleted file mode 100644 index 93c683135f1..00000000000 --- a/hadoop-ozone/.gitignore +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*~ -*.pyc -.blockade -.cache -__pycache__ diff --git a/hadoop-ozone/Jenkinsfile b/hadoop-ozone/Jenkinsfile deleted file mode 100644 index 0055486368d..00000000000 --- a/hadoop-ozone/Jenkinsfile +++ /dev/null @@ -1,116 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -node("ubuntu") { - docker.image('elek/ozone-build').pull() - docker.image('elek/ozone-build').inside("--privileged") { - - stage('Checkout') { - checkout scm - //use this for external Jenkinsfile builds - //checkout poll: false, scm: [$class: 'GitSCM', branches: [[name: env.branch]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: 'github-token', url: "https://github.com/${organization}/${repository}.git"]]] - - } - - stage('Clean') { - status = sh returnStatus: true, script: 'mvn clean -P hdds -am -pl :hadoop-ozone-dist ' - } - - stageRunner('Author', "author", {}) - - stageRunner('Licence', "rat", { - archiveArtifacts 'target/rat-aggregated.txt' - }, 'artifact/target/rat-aggregated.txt/*view*/') - - stageRunner('Build', "build", {}) - - stageRunner('Findbugs', "findbugs", { - archiveArtifacts 'target/findbugs-all.txt' - - }, 'artifact/target/findbugs-all.txt/*view*/') - - stageRunner('Checkstyle', "checkstyle", { - checkstyle canComputeNew: false, canRunOnFailed: true, defaultEncoding: '', healthy: '', pattern: '**/checkstyle-errors.xml', unHealthy: '' - }, 'checkstyleResult') - - stageRunner('Acceptance', "acceptance", { - archiveArtifacts 'hadoop-ozone/dist/target/ozone-0.4.0-SNAPSHOT/smoketest/result/**' - }) - - stageRunner('Unit test', "unit", { - junit '**/target/surefire-reports/*.xml' - }, 'testReport/') - - } - -} - -def stageRunner(name, type, processResult, url = '') { - try { - stage(name) { - prStatusStart(type) - status = sh returnStatus: true, script: 'hadoop-ozone/dev-support/checks/' + type + '.sh' - processResult() - prStatusResult(status, type, url) - } - return true - } catch (RuntimeException ex) { - currentBuild.result = "FAILED" - return false - } -} - -def githubStatus(name, status, description, url='') { - commitId = sh(returnStdout: true, script: 'git rev-parse HEAD') - context = 'ci/ozone/' + name - if (url) { - githubNotify account: 'apache', context: context, credentialsId: 'github-pr-ozone', description: description, repo: 'hadoop', sha: commitId, status: status, targetUrl: url - } else { - githubNotify account: 'apache', context: context, credentialsId: 'github-pr-ozone', description: description, repo: 'hadoop', sha: commitId, status: status - } -} -def prStatusStart(name) { - githubStatus(name, - "PENDING", - name + " is started") - - -} - -def prStatusResult(responseCode, name, url = '') { - status = "ERROR" - desc = "failed" - if (responseCode == 0) { - status = "SUCCESS" - desc = "passed" - } - message = name + " check is " + desc - if (url) { - githubStatus(name, - status, - message, - env.BUILD_URL + url) - } else { - githubStatus(name, - status, - message) - } - - if (responseCode != 0) { - throw new RuntimeException(message) - } -} diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml deleted file mode 100644 index 2fefd8b1ef8..00000000000 --- a/hadoop-ozone/client/pom.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-client - 0.5.0-SNAPSHOT - Apache Hadoop Ozone Client - Apache Hadoop Ozone Client - jar - - - - org.apache.hadoop - hadoop-ozone-common - - - junit - junit - test - - - diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java deleted file mode 100644 index 5bae15ddfe1..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.OzoneAcl; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * This class encapsulates the arguments that are - * required for creating a bucket. - */ -public final class BucketArgs { - - /** - * ACL Information. - */ - private List acls; - /** - * Bucket Version flag. - */ - private Boolean versioning; - /** - * Type of storage to be used for this bucket. - * [RAM_DISK, SSD, DISK, ARCHIVE] - */ - private StorageType storageType; - - /** - * Custom key/value metadata. - */ - private Map metadata; - - /** - * Bucket encryption key name. - */ - private String bucketEncryptionKey; - - /** - * Private constructor, constructed via builder. - * @param versioning Bucket version flag. - * @param storageType Storage type to be used. - * @param acls list of ACLs. - * @param metadata map of bucket metadata - * @param bucketEncryptionKey bucket encryption key name - */ - private BucketArgs(Boolean versioning, StorageType storageType, - List acls, Map metadata, - String bucketEncryptionKey) { - this.acls = acls; - this.versioning = versioning; - this.storageType = storageType; - this.metadata = metadata; - this.bucketEncryptionKey = bucketEncryptionKey; - } - - /** - * Returns true if bucket version is enabled, else false. - * @return isVersionEnabled - */ - public Boolean getVersioning() { - return versioning; - } - - /** - * Returns the type of storage to be used. - * @return StorageType - */ - public StorageType getStorageType() { - return storageType; - } - - /** - * Returns the ACL's associated with this bucket. - * @return {@literal List} - */ - public List getAcls() { - return acls; - } - - /** - * Custom metadata for the buckets. - * - * @return key value map - */ - public Map getMetadata() { - return metadata; - } - - /** - * Returns the bucket encryption key name. - * @return bucket encryption key - */ - public String getEncryptionKey() { - return bucketEncryptionKey; - } - - /** - * Returns new builder class that builds a OmBucketInfo. - * - * @return Builder - */ - public static BucketArgs.Builder newBuilder() { - return new BucketArgs.Builder(); - } - - /** - * Builder for OmBucketInfo. - */ - public static class Builder { - private Boolean versioning; - private StorageType storageType; - private List acls; - private Map metadata; - private String bucketEncryptionKey; - - public Builder() { - metadata = new HashMap<>(); - } - - public BucketArgs.Builder setVersioning(Boolean versionFlag) { - this.versioning = versionFlag; - return this; - } - - public BucketArgs.Builder setStorageType(StorageType storage) { - this.storageType = storage; - return this; - } - - public BucketArgs.Builder setAcls(List listOfAcls) { - this.acls = listOfAcls; - return this; - } - - public BucketArgs.Builder addMetadata(String key, String value) { - this.metadata.put(key, value); - return this; - } - - public BucketArgs.Builder setBucketEncryptionKey(String bek) { - this.bucketEncryptionKey = bek; - return this; - } - /** - * Constructs the BucketArgs. - * @return instance of BucketArgs. - */ - public BucketArgs build() { - return new BucketArgs(versioning, storageType, acls, metadata, - bucketEncryptionKey); - } - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java deleted file mode 100644 index 2db4a6d5aa3..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ /dev/null @@ -1,498 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import java.io.IOException; -import java.net.URI; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.Objects; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.security.UserGroupInformation; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Strings; -import org.apache.hadoop.security.token.Token; - -/** - * ObjectStore class is responsible for the client operations that can be - * performed on Ozone Object Store. - */ -public class ObjectStore { - - /** - * The proxy used for connecting to the cluster and perform - * client operations. - */ - // TODO: remove rest api and client - private final ClientProtocol proxy; - - /** - * Cache size to be used for listVolume calls. - */ - private int listCacheSize; - - /** - * Creates an instance of ObjectStore. - * @param conf Configuration object. - * @param proxy ClientProtocol proxy. - */ - public ObjectStore(Configuration conf, ClientProtocol proxy) { - this.proxy = TracingUtil.createProxy(proxy, ClientProtocol.class, conf); - this.listCacheSize = HddsClientUtils.getListCacheSize(conf); - } - - @VisibleForTesting - protected ObjectStore() { - proxy = null; - } - - @VisibleForTesting - public ClientProtocol getClientProxy() { - return proxy; - } - - /** - * Creates the volume with default values. - * @param volumeName Name of the volume to be created. - * @throws IOException - */ - public void createVolume(String volumeName) throws IOException { - proxy.createVolume(volumeName); - } - - /** - * Creates the volume. - * @param volumeName Name of the volume to be created. - * @param volumeArgs Volume properties. - * @throws IOException - */ - public void createVolume(String volumeName, VolumeArgs volumeArgs) - throws IOException { - proxy.createVolume(volumeName, volumeArgs); - } - - /** - * Creates an S3 bucket inside Ozone manager and creates the mapping needed - * to access via both S3 and Ozone. - * @param userName - S3 user name. - * @param s3BucketName - S3 bucket Name. - * @throws IOException - On failure, throws an exception like Bucket exists. - */ - public void createS3Bucket(String userName, String s3BucketName) throws - IOException { - proxy.createS3Bucket(userName, s3BucketName); - } - - /** - * Deletes an s3 bucket and removes mapping of Ozone volume/bucket. - * @param bucketName - S3 Bucket Name. - * @throws IOException in case the bucket cannot be deleted. - */ - public void deleteS3Bucket(String bucketName) throws IOException { - proxy.deleteS3Bucket(bucketName); - } - - /** - * Returns the Ozone Namespace for the S3Bucket. It will return the - * OzoneVolume/OzoneBucketName. - * @param s3BucketName - S3 Bucket Name. - * @return String - The Ozone canonical name for this s3 bucket. This - * string is useful for mounting an OzoneFS. - * @throws IOException - Error is throw if the s3bucket does not exist. - */ - public String getOzoneBucketMapping(String s3BucketName) throws IOException { - return proxy.getOzoneBucketMapping(s3BucketName); - } - - /** - * Returns the corresponding Ozone volume given an S3 Bucket. - * @param s3BucketName - S3Bucket Name. - * @return String - Ozone Volume name. - * @throws IOException - Throws if the s3Bucket does not exist. - */ - @SuppressWarnings("StringSplitter") - public String getOzoneVolumeName(String s3BucketName) throws IOException { - String mapping = getOzoneBucketMapping(s3BucketName); - return mapping.split("/")[0]; - - } - - /** - * Returns the corresponding Ozone bucket name for the given S3 bucket. - * @param s3BucketName - S3Bucket Name. - * @return String - Ozone bucket Name. - * @throws IOException - Throws if the s3bucket does not exist. - */ - @SuppressWarnings("StringSplitter") - public String getOzoneBucketName(String s3BucketName) throws IOException { - String mapping = getOzoneBucketMapping(s3BucketName); - return mapping.split("/")[1]; - } - - - /** - * Returns the volume information. - * @param volumeName Name of the volume. - * @return OzoneVolume - * @throws IOException - */ - public OzoneVolume getVolume(String volumeName) throws IOException { - OzoneVolume volume = proxy.getVolumeDetails(volumeName); - return volume; - } - - public S3SecretValue getS3Secret(String kerberosID) throws IOException { - return proxy.getS3Secret(kerberosID); - } - - /** - * Returns Iterator to iterate over all buckets for a user. - * The result can be restricted using bucket prefix, will return all - * buckets if bucket prefix is null. - * - * @param userName user name - * @param bucketPrefix Bucket prefix to match - * @return {@code Iterator} - */ - public Iterator listS3Buckets(String userName, - String bucketPrefix) { - return listS3Buckets(userName, bucketPrefix, null); - } - - /** - * Returns Iterator to iterate over all buckets after prevBucket for a - * specific user. If prevBucket is null it returns an iterator to iterate over - * all the buckets of a user. The result can be restricted using bucket - * prefix, will return all buckets if bucket prefix is null. - * - * @param userName user name - * @param bucketPrefix Bucket prefix to match - * @param prevBucket Buckets are listed after this bucket - * @return {@code Iterator} - */ - public Iterator listS3Buckets(String userName, - String bucketPrefix, - String prevBucket) { - return new S3BucketIterator(userName, bucketPrefix, prevBucket); - } - - /** - * Returns Iterator to iterate over all the volumes in object store. - * The result can be restricted using volume prefix, will return all - * volumes if volume prefix is null. - * - * @param volumePrefix Volume prefix to match - * @return {@code Iterator} - */ - public Iterator listVolumes(String volumePrefix) - throws IOException { - return listVolumes(volumePrefix, null); - } - - /** - * Returns Iterator to iterate over all the volumes after prevVolume in object - * store. If prevVolume is null it iterates from the first volume. - * The result can be restricted using volume prefix, will return all - * volumes if volume prefix is null. - * - * @param volumePrefix Volume prefix to match - * @param prevVolume Volumes will be listed after this volume name - * @return {@code Iterator} - */ - public Iterator listVolumes(String volumePrefix, - String prevVolume) throws IOException { - return new VolumeIterator(null, volumePrefix, prevVolume); - } - - /** - * Returns Iterator to iterate over the list of volumes after prevVolume owned - * by a specific user. The result can be restricted using volume prefix, will - * return all volumes if volume prefix is null. If user is not null, returns - * the volume of current user. - * - * @param user User Name - * @param volumePrefix Volume prefix to match - * @param prevVolume Volumes will be listed after this volume name - * @return {@code Iterator} - */ - public Iterator listVolumesByUser(String user, - String volumePrefix, String prevVolume) - throws IOException { - if(Strings.isNullOrEmpty(user)) { - user = UserGroupInformation.getCurrentUser().getShortUserName(); - } - return new VolumeIterator(user, volumePrefix, prevVolume); - } - - /** - * Deletes the volume. - * @param volumeName Name of the volume. - * @throws IOException - */ - public void deleteVolume(String volumeName) throws IOException { - proxy.deleteVolume(volumeName); - } - - public KeyProvider getKeyProvider() throws IOException { - return proxy.getKeyProvider(); - } - - public URI getKeyProviderUri() throws IOException { - return proxy.getKeyProviderUri(); - } - - /** - * An Iterator to iterate over {@link OzoneVolume} list. - */ - private class VolumeIterator implements Iterator { - - private String user = null; - private String volPrefix = null; - - private Iterator currentIterator; - private OzoneVolume currentValue; - - /** - * Creates an Iterator to iterate over all volumes after - * prevVolume of the user. If prevVolume is null it iterates from the - * first volume. The returned volumes match volume prefix. - * @param user user name - * @param volPrefix volume prefix to match - */ - VolumeIterator(String user, String volPrefix, String prevVolume) { - this.user = user; - this.volPrefix = volPrefix; - this.currentValue = null; - this.currentIterator = getNextListOfVolumes(prevVolume).iterator(); - } - - @Override - public boolean hasNext() { - if(!currentIterator.hasNext()) { - currentIterator = getNextListOfVolumes( - currentValue != null ? currentValue.getName() : null) - .iterator(); - } - return currentIterator.hasNext(); - } - - @Override - public OzoneVolume next() { - if(hasNext()) { - currentValue = currentIterator.next(); - return currentValue; - } - throw new NoSuchElementException(); - } - - /** - * Returns the next set of volume list using proxy. - * @param prevVolume previous volume, this will be excluded from the result - * @return {@code List} - */ - private List getNextListOfVolumes(String prevVolume) { - try { - //if user is null, we do list of all volumes. - if(user != null) { - return proxy.listVolumes(user, volPrefix, prevVolume, listCacheSize); - } - return proxy.listVolumes(volPrefix, prevVolume, listCacheSize); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - } - - /** - * An Iterator to iterate over {@link OzoneBucket} list. - */ - public class S3BucketIterator implements Iterator { - - private String bucketPrefix = null; - private String userName; - - private Iterator currentIterator; - private OzoneBucket currentValue; - - - /** - * Creates an Iterator to iterate over all buckets after prevBucket for - * a user. If prevBucket is null it returns an iterator which list all - * the buckets of the user. - * The returned buckets match bucket prefix. - * @param user - * @param bucketPrefix - * @param prevBucket - */ - public S3BucketIterator(String user, String bucketPrefix, String - prevBucket) { - Objects.requireNonNull(user); - this.userName = user; - this.bucketPrefix = bucketPrefix; - this.currentValue = null; - this.currentIterator = getNextListOfS3Buckets(prevBucket).iterator(); - } - - @Override - public boolean hasNext() { - if(!currentIterator.hasNext()) { - currentIterator = getNextListOfS3Buckets( - currentValue != null ? currentValue.getName() : null) - .iterator(); - } - return currentIterator.hasNext(); - } - - @Override - public OzoneBucket next() { - if(hasNext()) { - currentValue = currentIterator.next(); - return currentValue; - } - throw new NoSuchElementException(); - } - - /** - * Gets the next set of bucket list using proxy. - * @param prevBucket - * @return {@code List} - */ - private List getNextListOfS3Buckets(String prevBucket) { - try { - return proxy.listS3Buckets(userName, bucketPrefix, prevBucket, - listCacheSize); - } catch (OMException e) { - if (e.getResult() == ResultCodes.VOLUME_NOT_FOUND) { - return new ArrayList<>(); - } else { - throw new RuntimeException(e); - } - } catch (IOException e) { - throw new RuntimeException(e); - } - } - } - - /** - * Get a valid Delegation Token. - * - * @param renewer the designated renewer for the token - * @return Token - * @throws IOException - */ - public Token getDelegationToken(Text renewer) - throws IOException { - return proxy.getDelegationToken(renewer); - } - - /** - * Renew an existing delegation token. - * - * @param token delegation token obtained earlier - * @return the new expiration time - * @throws IOException - */ - public long renewDelegationToken(Token token) - throws IOException { - return proxy.renewDelegationToken(token); - } - - /** - * Cancel an existing delegation token. - * - * @param token delegation token - * @throws IOException - */ - public void cancelDelegationToken(Token token) - throws IOException { - proxy.cancelDelegationToken(token); - } - - /** - * @return canonical service name of ozone delegation token. - */ - public String getCanonicalServiceName() { - return proxy.getCanonicalServiceName(); - } - - /** - * Add acl for Ozone object. Return true if acl is added successfully else - * false. - * @param obj Ozone object for which acl should be added. - * @param acl ozone acl top be added. - * @return true if acl is added successfully, else false. - * @throws IOException if there is error. - * */ - public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - return proxy.addAcl(obj, acl); - } - - /** - * Remove acl for Ozone object. Return true if acl is removed successfully - * else false. - * - * @param obj Ozone object. - * @param acl Ozone acl to be removed. - * @return true if acl is added successfully, else false. - * @throws IOException if there is error. - */ - public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - return proxy.removeAcl(obj, acl); - } - - /** - * Acls to be set for given Ozone object. This operations reset ACL for given - * object to list of ACLs provided in argument. - * - * @param obj Ozone object. - * @param acls List of acls. - * @return true if acl is added successfully, else false. - * @throws IOException if there is error. - */ - public boolean setAcl(OzoneObj obj, List acls) throws IOException { - return proxy.setAcl(obj, acls); - } - - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @return true if acl is added successfully, else false. - * @throws IOException if there is error. - */ - public List getAcl(OzoneObj obj) throws IOException { - return proxy.getAcl(obj); - } - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java deleted file mode 100644 index bcd7152a7d1..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ /dev/null @@ -1,624 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.helpers.WithMetadata; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.NoSuchElementException; - -/** - * A class that encapsulates OzoneBucket. - */ -public class OzoneBucket extends WithMetadata { - - /** - * The proxy used for connecting to the cluster and perform - * client operations. - */ - private final ClientProtocol proxy; - /** - * Name of the volume in which the bucket belongs to. - */ - private final String volumeName; - /** - * Name of the bucket. - */ - private final String name; - /** - * Default replication factor to be used while creating keys. - */ - private final ReplicationFactor defaultReplication; - - /** - * Default replication type to be used while creating keys. - */ - private final ReplicationType defaultReplicationType; - - /** - * Type of storage to be used for this bucket. - * [RAM_DISK, SSD, DISK, ARCHIVE] - */ - private StorageType storageType; - - /** - * Bucket Version flag. - */ - private Boolean versioning; - - /** - * Cache size to be used for listKey calls. - */ - private int listCacheSize; - - /** - * Creation time of the bucket. - */ - private long creationTime; - - /** - * Bucket Encryption key name if bucket encryption is enabled. - */ - private String encryptionKeyName; - - private OzoneObj ozoneObj; - - - private OzoneBucket(Configuration conf, String volumeName, - String bucketName, ReplicationFactor defaultReplication, - ReplicationType defaultReplicationType, ClientProtocol proxy) { - Preconditions.checkNotNull(proxy, "Client proxy is not set."); - this.volumeName = volumeName; - this.name = bucketName; - if (defaultReplication == null) { - this.defaultReplication = ReplicationFactor.valueOf(conf.getInt( - OzoneConfigKeys.OZONE_REPLICATION, - OzoneConfigKeys.OZONE_REPLICATION_DEFAULT)); - } else { - this.defaultReplication = defaultReplication; - } - - if (defaultReplicationType == null) { - this.defaultReplicationType = ReplicationType.valueOf(conf.get( - OzoneConfigKeys.OZONE_REPLICATION_TYPE, - OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT)); - } else { - this.defaultReplicationType = defaultReplicationType; - } - this.proxy = proxy; - this.ozoneObj = OzoneObjInfo.Builder.newBuilder() - .setBucketName(bucketName) - .setVolumeName(volumeName) - .setResType(OzoneObj.ResourceType.BUCKET) - .setStoreType(OzoneObj.StoreType.OZONE).build(); - } - @SuppressWarnings("parameternumber") - public OzoneBucket(Configuration conf, ClientProtocol proxy, - String volumeName, String bucketName, StorageType storageType, - Boolean versioning, long creationTime, Map metadata, - String encryptionKeyName) { - this(conf, volumeName, bucketName, null, null, proxy); - this.storageType = storageType; - this.versioning = versioning; - this.listCacheSize = HddsClientUtils.getListCacheSize(conf); - this.creationTime = creationTime; - this.metadata = metadata; - this.encryptionKeyName = encryptionKeyName; - } - - /** - * Constructs OzoneBucket instance. - * @param conf Configuration object. - * @param proxy ClientProtocol proxy. - * @param volumeName Name of the volume the bucket belongs to. - * @param bucketName Name of the bucket. - * @param storageType StorageType of the bucket. - * @param versioning versioning status of the bucket. - * @param creationTime creation time of the bucket. - */ - @SuppressWarnings("parameternumber") - public OzoneBucket(Configuration conf, ClientProtocol proxy, - String volumeName, String bucketName, StorageType storageType, - Boolean versioning, long creationTime, Map metadata) { - this(conf, volumeName, bucketName, null, null, proxy); - this.storageType = storageType; - this.versioning = versioning; - this.listCacheSize = HddsClientUtils.getListCacheSize(conf); - this.creationTime = creationTime; - this.metadata = metadata; - } - - @VisibleForTesting - @SuppressWarnings("parameternumber") - OzoneBucket(String volumeName, String name, - ReplicationFactor defaultReplication, - ReplicationType defaultReplicationType, StorageType storageType, - Boolean versioning, long creationTime) { - this.proxy = null; - this.volumeName = volumeName; - this.name = name; - this.defaultReplication = defaultReplication; - this.defaultReplicationType = defaultReplicationType; - this.storageType = storageType; - this.versioning = versioning; - this.creationTime = creationTime; - this.ozoneObj = OzoneObjInfo.Builder.newBuilder() - .setBucketName(name) - .setVolumeName(volumeName) - .setResType(OzoneObj.ResourceType.BUCKET) - .setStoreType(OzoneObj.StoreType.OZONE).build(); - } - - - /** - * Returns Volume Name. - * - * @return volumeName - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Returns Bucket Name. - * - * @return bucketName - */ - public String getName() { - return name; - } - - /** - * Returns ACL's associated with the Bucket. - * - * @return acls - */ - @JsonIgnore - public List getAcls() throws IOException { - return proxy.getAcl(ozoneObj); - } - - /** - * Returns StorageType of the Bucket. - * - * @return storageType - */ - public StorageType getStorageType() { - return storageType; - } - - /** - * Returns Versioning associated with the Bucket. - * - * @return versioning - */ - public Boolean getVersioning() { - return versioning; - } - - /** - * Returns creation time of the Bucket. - * - * @return creation time of the bucket - */ - public long getCreationTime() { - return creationTime; - } - - /** - * Return the bucket encryption key name. - * @return the bucket encryption key name - */ - public String getEncryptionKeyName() { - return encryptionKeyName; - } - - /** - * Adds ACLs to the Bucket. - * @param addAcl ACL to be added - * @return true - if acl is successfully added, false if acl already exists - * for the bucket. - * @throws IOException - */ - public boolean addAcls(OzoneAcl addAcl) throws IOException { - return proxy.addAcl(ozoneObj, addAcl); - } - - /** - * Removes ACLs from the bucket. - * @return true - if acl is successfully removed, false if acl to be - * removed does not exist for the bucket. - * @throws IOException - */ - public boolean removeAcls(OzoneAcl removeAcl) throws IOException { - return proxy.removeAcl(ozoneObj, removeAcl); - } - - /** - * Sets/Changes the storage type of the bucket. - * @param newStorageType Storage type to be set - * @throws IOException - */ - public void setStorageType(StorageType newStorageType) throws IOException { - proxy.setBucketStorageType(volumeName, name, newStorageType); - storageType = newStorageType; - } - - /** - * Enable/Disable versioning of the bucket. - * @param newVersioning - * @throws IOException - */ - public void setVersioning(Boolean newVersioning) throws IOException { - proxy.setBucketVersioning(volumeName, name, newVersioning); - versioning = newVersioning; - } - - /** - * Creates a new key in the bucket, with default replication type RATIS and - * with replication factor THREE. - * @param key Name of the key to be created. - * @param size Size of the data the key will point to. - * @return OzoneOutputStream to which the data has to be written. - * @throws IOException - */ - public OzoneOutputStream createKey(String key, long size) - throws IOException { - return createKey(key, size, defaultReplicationType, defaultReplication, - new HashMap<>()); - } - - /** - * Creates a new key in the bucket. - * @param key Name of the key to be created. - * @param size Size of the data the key will point to. - * @param type Replication type to be used. - * @param factor Replication factor of the key. - * @return OzoneOutputStream to which the data has to be written. - * @throws IOException - */ - public OzoneOutputStream createKey(String key, long size, - ReplicationType type, - ReplicationFactor factor, - Map keyMetadata) - throws IOException { - return proxy - .createKey(volumeName, name, key, size, type, factor, keyMetadata); - } - - /** - * Reads an existing key from the bucket. - * @param key Name of the key to be read. - * @return OzoneInputStream the stream using which the data can be read. - * @throws IOException - */ - public OzoneInputStream readKey(String key) throws IOException { - return proxy.getKey(volumeName, name, key); - } - - /** - * Returns information about the key. - * @param key Name of the key. - * @return OzoneKeyDetails Information about the key. - * @throws IOException - */ - public OzoneKeyDetails getKey(String key) throws IOException { - return proxy.getKeyDetails(volumeName, name, key); - } - - /** - * Returns Iterator to iterate over all keys in the bucket. - * The result can be restricted using key prefix, will return all - * keys if key prefix is null. - * - * @param keyPrefix Bucket prefix to match - * @return {@code Iterator} - */ - public Iterator listKeys(String keyPrefix) { - return listKeys(keyPrefix, null); - } - - /** - * Returns Iterator to iterate over all keys after prevKey in the bucket. - * If prevKey is null it iterates from the first key in the bucket. - * The result can be restricted using key prefix, will return all - * keys if key prefix is null. - * - * @param keyPrefix Bucket prefix to match - * @param prevKey Keys will be listed after this key name - * @return {@code Iterator} - */ - public Iterator listKeys(String keyPrefix, - String prevKey) { - return new KeyIterator(keyPrefix, prevKey); - } - - /** - * Deletes key from the bucket. - * @param key Name of the key to be deleted. - * @throws IOException - */ - public void deleteKey(String key) throws IOException { - proxy.deleteKey(volumeName, name, key); - } - - public void renameKey(String fromKeyName, String toKeyName) - throws IOException { - proxy.renameKey(volumeName, name, fromKeyName, toKeyName); - } - - /** - * Initiate multipart upload for a specified key. - * @param keyName - * @param type - * @param factor - * @return OmMultipartInfo - * @throws IOException - */ - public OmMultipartInfo initiateMultipartUpload(String keyName, - ReplicationType type, - ReplicationFactor factor) - throws IOException { - return proxy.initiateMultipartUpload(volumeName, name, keyName, type, - factor); - } - - /** - * Initiate multipart upload for a specified key, with default replication - * type RATIS and with replication factor THREE. - * @param key Name of the key to be created. - * @return OmMultipartInfo. - * @throws IOException - */ - public OmMultipartInfo initiateMultipartUpload(String key) - throws IOException { - return initiateMultipartUpload(key, defaultReplicationType, - defaultReplication); - } - - /** - * Create a part key for a multipart upload key. - * @param key - * @param size - * @param partNumber - * @param uploadID - * @return OzoneOutputStream - * @throws IOException - */ - public OzoneOutputStream createMultipartKey(String key, long size, - int partNumber, String uploadID) - throws IOException { - return proxy.createMultipartKey(volumeName, name, key, size, partNumber, - uploadID); - } - - /** - * Complete Multipart upload. This will combine all the parts and make the - * key visible in ozone. - * @param key - * @param uploadID - * @param partsMap - * @return OmMultipartUploadCompleteInfo - * @throws IOException - */ - public OmMultipartUploadCompleteInfo completeMultipartUpload(String key, - String uploadID, Map partsMap) throws IOException { - return proxy.completeMultipartUpload(volumeName, name, key, uploadID, - partsMap); - } - - /** - * Abort multipart upload request. - * @param keyName - * @param uploadID - * @throws IOException - */ - public void abortMultipartUpload(String keyName, String uploadID) throws - IOException { - proxy.abortMultipartUpload(volumeName, name, keyName, uploadID); - } - - /** - * Returns list of parts of a multipart upload key. - * @param keyName - * @param uploadID - * @param partNumberMarker - * @param maxParts - * @return OzoneMultipartUploadPartListParts - */ - public OzoneMultipartUploadPartListParts listParts(String keyName, - String uploadID, int partNumberMarker, int maxParts) throws IOException { - // As at most we can have 10000 parts for a key, not using iterator. If - // needed, it can be done later. So, if we send 10000 as max parts at - // most in a single rpc call, we return 0.6 mb, by assuming each part - // size as 60 bytes (ignored the replication type size during calculation) - - return proxy.listParts(volumeName, name, keyName, uploadID, - partNumberMarker, maxParts); - } - - /** - * OzoneFS api to get file status for an entry. - * - * @param keyName Key name - * @throws OMException if file does not exist - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - public OzoneFileStatus getFileStatus(String keyName) throws IOException { - return proxy.getOzoneFileStatus(volumeName, name, keyName); - } - - /** - * Ozone FS api to create a directory. Parent directories if do not exist - * are created for the input directory. - * - * @param keyName Key name - * @throws OMException if any entry in the path exists as a file - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - public void createDirectory(String keyName) throws IOException { - proxy.createDirectory(volumeName, name, keyName); - } - - /** - * OzoneFS api to creates an input stream for a file. - * - * @param keyName Key name - * @throws OMException if given key is not found or it is not a file - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - public OzoneInputStream readFile(String keyName) throws IOException { - return proxy.readFile(volumeName, name, keyName); - } - - /** - * OzoneFS api to creates an output stream for a file. - * - * @param keyName Key name - * @param overWrite if true existing file at the location will be overwritten - * @param recursive if true file would be created even if parent directories - * do not exist - * @throws OMException if given key is a directory - * if file exists and isOverwrite flag is false - * if an ancestor exists as a file - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - public OzoneOutputStream createFile(String keyName, long size, - ReplicationType type, ReplicationFactor factor, boolean overWrite, - boolean recursive) throws IOException { - return proxy - .createFile(volumeName, name, keyName, size, type, factor, overWrite, - recursive); - } - - /** - * List the status for a file or a directory and its contents. - * - * @param keyName Absolute path of the entry to be listed - * @param recursive For a directory if true all the descendants of a - * particular directory are listed - * @param startKey Key from which listing needs to start. If startKey exists - * its status is included in the final list. - * @param numEntries Number of entries to list from the start key - * @return list of file status - */ - public List listStatus(String keyName, boolean recursive, - String startKey, long numEntries) throws IOException { - return proxy - .listStatus(volumeName, name, keyName, recursive, startKey, numEntries); - } - - /** - * Return with the list of the in-flight multipart uploads. - * - * @param prefix Optional string to filter for the selected keys. - */ - public OzoneMultipartUploadList listMultipartUploads(String prefix) - throws IOException { - return proxy.listMultipartUploads(volumeName, getName(), prefix); - } - - /** - * An Iterator to iterate over {@link OzoneKey} list. - */ - private class KeyIterator implements Iterator { - - private String keyPrefix = null; - - private Iterator currentIterator; - private OzoneKey currentValue; - - - /** - * Creates an Iterator to iterate over all keys after prevKey in the bucket. - * If prevKey is null it iterates from the first key in the bucket. - * The returned keys match key prefix. - * @param keyPrefix - */ - KeyIterator(String keyPrefix, String prevKey) { - this.keyPrefix = keyPrefix; - this.currentValue = null; - this.currentIterator = getNextListOfKeys(prevKey).iterator(); - } - - @Override - public boolean hasNext() { - if(!currentIterator.hasNext()) { - currentIterator = getNextListOfKeys( - currentValue != null ? currentValue.getName() : null) - .iterator(); - } - return currentIterator.hasNext(); - } - - @Override - public OzoneKey next() { - if(hasNext()) { - currentValue = currentIterator.next(); - return currentValue; - } - throw new NoSuchElementException(); - } - - /** - * Gets the next set of key list using proxy. - * @param prevKey - * @return {@code List} - */ - private List getNextListOfKeys(String prevKey) { - try { - return proxy.listKeys(volumeName, name, keyPrefix, prevKey, - listCacheSize); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java deleted file mode 100644 index 0d65d73fc3b..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; - -import java.io.Closeable; -import java.io.IOException; - -import com.google.common.annotations.VisibleForTesting; - -/** - * OzoneClient connects to Ozone Cluster and - * perform basic operations. - */ -public class OzoneClient implements Closeable { - - /* - * OzoneClient connects to Ozone Cluster and - * perform basic operations. - * - * +-------------+ +---+ +-------------------------------------+ - * | OzoneClient | --> | C | | Object Store | - * |_____________| | l | | +-------------------------------+ | - * | i | | | Volume(s) | | - * | e | | | +------------------------+ | | - * | n | | | | Bucket(s) | | | - * | t | | | | +------------------+ | | | - * | | | | | | Key -> Value (s) | | | | - * | P |-->| | | | | | | | - * | r | | | | |__________________| | | | - * | o | | | | | | | - * | t | | | |________________________| | | - * | o | | | | | - * | c | | |_______________________________| | - * | o | | | - * | l | |_____________________________________| - * |___| - * Example: - * ObjectStore store = client.getObjectStore(); - * store.createVolume(“volume one”, VolumeArgs); - * volume.setQuota(“10 GB”); - * OzoneVolume volume = store.getVolume(“volume one”); - * volume.createBucket(“bucket one”, BucketArgs); - * bucket.setVersioning(true); - * OzoneOutputStream os = bucket.createKey(“key one”, 1024); - * os.write(byte[]); - * os.close(); - * OzoneInputStream is = bucket.readKey(“key one”); - * is.read(); - * is.close(); - * bucket.deleteKey(“key one”); - * volume.deleteBucket(“bucket one”); - * store.deleteVolume(“volume one”); - * client.close(); - */ - - private final ClientProtocol proxy; - private final ObjectStore objectStore; - - /** - * Creates a new OzoneClient object, generally constructed - * using {@link OzoneClientFactory}. - * @param conf Configuration object - * @param proxy ClientProtocol proxy instance - */ - public OzoneClient(Configuration conf, ClientProtocol proxy) { - this.proxy = proxy; - this.objectStore = new ObjectStore(conf, this.proxy); - } - - @VisibleForTesting - protected OzoneClient(ObjectStore objectStore) { - this.objectStore = objectStore; - this.proxy = null; - } - /** - * Returns the object store associated with the Ozone Cluster. - * @return ObjectStore - */ - public ObjectStore getObjectStore() { - return objectStore; - } - - /** - * Closes the client and all the underlying resources. - * @throws IOException - */ - @Override - public void close() throws IOException { - proxy.close(); - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java deleted file mode 100644 index 2e9080a66f8..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client; - -/** - * This exception is thrown by the Ozone Clients. - */ -public class OzoneClientException extends Exception { - public OzoneClientException() { - } - - public OzoneClientException(String s) { - super(s); - } - - public OzoneClientException(String s, Throwable throwable) { - super(s, throwable); - } - - public OzoneClientException(Throwable throwable) { - super(throwable); - } - - public OzoneClientException(String s, Throwable throwable, boolean b, - boolean b1) { - super(s, throwable, b, b1); - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java deleted file mode 100644 index caf989ed497..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ /dev/null @@ -1,268 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import java.io.IOException; -import java.lang.reflect.Proxy; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.rpc.RpcClient; - -import com.google.common.base.Preconditions; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Factory class to create OzoneClients. - */ -public final class OzoneClientFactory { - - private static final Logger LOG = LoggerFactory.getLogger( - OzoneClientFactory.class); - - /** - * Private constructor, class is not meant to be initialized. - */ - private OzoneClientFactory(){} - - - /** - * Constructs and return an OzoneClient with default configuration. - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getClient() throws IOException { - LOG.info("Creating OzoneClient with default configuration."); - return getClient(new OzoneConfiguration()); - } - - /** - * Constructs and return an OzoneClient based on the configuration object. - * Protocol type is decided by ozone.client.protocol. - * - * @param config - * Configuration to be used for OzoneClient creation - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getClient(Configuration config) - throws IOException { - Preconditions.checkNotNull(config); - return getClient(getClientProtocol(config), config); - } - - /** - * Returns an OzoneClient which will use RPC protocol. - * - * @param omHost - * hostname of OzoneManager to connect. - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRpcClient(String omHost) - throws IOException { - Configuration config = new OzoneConfiguration(); - int port = OmUtils.getOmRpcPort(config); - return getRpcClient(omHost, port, config); - } - - /** - * Returns an OzoneClient which will use RPC protocol. - * - * @param omHost - * hostname of OzoneManager to connect. - * - * @param omRpcPort - * RPC port of OzoneManager. - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRpcClient(String omHost, Integer omRpcPort) - throws IOException { - return getRpcClient(omHost, omRpcPort, new OzoneConfiguration()); - } - - /** - * Returns an OzoneClient which will use RPC protocol. - * - * @param omHost - * hostname of OzoneManager to connect. - * - * @param omRpcPort - * RPC port of OzoneManager. - * - * @param omServiceId - * Service ID of OzoneManager HA cluster. - * - * @param config - * Configuration to be used for OzoneClient creation - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRpcClient(String omHost, Integer omRpcPort, - String omServiceId, Configuration config) throws IOException { - Preconditions.checkNotNull(omHost); - Preconditions.checkNotNull(omRpcPort); - Preconditions.checkNotNull(omServiceId); - Preconditions.checkNotNull(config); - config.set(OZONE_OM_ADDRESS_KEY, omHost + ":" + omRpcPort); - return getRpcClient(omServiceId, config); - } - - /** - * Returns an OzoneClient which will use RPC protocol. - * - * @param omHost - * hostname of OzoneManager to connect. - * - * @param omRpcPort - * RPC port of OzoneManager. - * - * @param config - * Configuration to be used for OzoneClient creation - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRpcClient(String omHost, Integer omRpcPort, - Configuration config) - throws IOException { - Preconditions.checkNotNull(omHost); - Preconditions.checkNotNull(omRpcPort); - Preconditions.checkNotNull(config); - config.set(OZONE_OM_ADDRESS_KEY, omHost + ":" + omRpcPort); - return getRpcClient(config); - } - - /** - * Returns an OzoneClient which will use RPC protocol. - * - * @param omServiceId - * Service ID of OzoneManager HA cluster. - * - * @param config - * Configuration to be used for OzoneClient creation - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRpcClient(String omServiceId, - Configuration config) throws IOException { - Preconditions.checkNotNull(omServiceId); - Preconditions.checkNotNull(config); - // Won't set OZONE_OM_ADDRESS_KEY here since service id is passed directly, - // leaving OZONE_OM_ADDRESS_KEY value as is. - return getClient(getClientProtocol(config, omServiceId), config); - } - - /** - * Returns an OzoneClient which will use RPC protocol. - * - * @param config - * used for OzoneClient creation - * - * @return OzoneClient - * - * @throws IOException - */ - public static OzoneClient getRpcClient(Configuration config) - throws IOException { - Preconditions.checkNotNull(config); - return getClient(getClientProtocol(config), - config); - } - - /** - * Creates OzoneClient with the given ClientProtocol and Configuration. - * - * @param clientProtocol - * Protocol to be used by the OzoneClient - * - * @param config - * Configuration to be used for OzoneClient creation - */ - private static OzoneClient getClient(ClientProtocol clientProtocol, - Configuration config) { - OzoneClientInvocationHandler clientHandler = - new OzoneClientInvocationHandler(clientProtocol); - ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance( - OzoneClientInvocationHandler.class.getClassLoader(), - new Class[]{ClientProtocol.class}, clientHandler); - return new OzoneClient(config, proxy); - } - - /** - * Returns an instance of Protocol class. - * - * - * @param config - * Configuration used to initialize ClientProtocol. - * - * @return ClientProtocol - * - * @throws IOException - */ - private static ClientProtocol getClientProtocol(Configuration config) - throws IOException { - return getClientProtocol(config, null); - } - - /** - * Returns an instance of Protocol class. - * - * - * @param config - * Configuration used to initialize ClientProtocol. - * - * @return ClientProtocol - * - * @throws IOException - */ - private static ClientProtocol getClientProtocol(Configuration config, - String omServiceId) throws IOException { - try { - return new RpcClient(config, omServiceId); - } catch (Exception e) { - final String message = "Couldn't create RpcClient protocol"; - LOG.error(message + " exception: ", e); - if (e.getCause() instanceof IOException) { - throw (IOException) e.getCause(); - } else { - throw new IOException(message, e); - } - } - } - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java deleted file mode 100644 index cdc7702ef2e..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientInvocationHandler.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; - -/** - * Invocation Handler for ozone client which dispatches the call to underlying - * ClientProtocol implementation. - */ -public class OzoneClientInvocationHandler implements InvocationHandler { - - - private static final Logger LOG = LoggerFactory.getLogger(OzoneClient.class); - private final ClientProtocol target; - - /** - * Constructs OzoneClientInvocationHandler with the proxy. - * @param target proxy to be used for method invocation. - */ - public OzoneClientInvocationHandler(ClientProtocol target) { - this.target = target; - } - - @Override - public Object invoke(Object proxy, Method method, Object[] args) - throws Throwable { - LOG.trace("Invoking method {} on target {}", method, target); - try { - long startTime = Time.monotonicNow(); - Object result = method.invoke(target, args); - LOG.debug("Call: {} took {} ms", method, - Time.monotonicNow() - startTime); - return result; - } catch(InvocationTargetException iEx) { - throw iEx.getCause(); - } - } -} \ No newline at end of file diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java deleted file mode 100644 index 8531bfbe71e..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client; - -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; - -/** A utility class for OzoneClient. */ -public final class OzoneClientUtils { - - private OzoneClientUtils() {} - - public static RetryPolicy createRetryPolicy(int maxRetryCount, - long retryInterval) { - // retry with fixed sleep between retries - return RetryPolicies.retryUpToMaximumCountWithFixedSleep( - maxRetryCount, retryInterval, TimeUnit.MILLISECONDS); - } - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java deleted file mode 100644 index d654a604e99..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java +++ /dev/null @@ -1,148 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.hdds.client.ReplicationType; - -/** - * A class that encapsulates OzoneKey. - */ -public class OzoneKey { - - /** - * Name of the Volume the Key belongs to. - */ - private final String volumeName; - /** - * Name of the Bucket the Key belongs to. - */ - private final String bucketName; - /** - * Name of the Key. - */ - private final String name; - /** - * Size of the data. - */ - private final long dataSize; - /** - * Creation time of the key. - */ - private long creationTime; - /** - * Modification time of the key. - */ - private long modificationTime; - - private ReplicationType replicationType; - - private int replicationFactor; - - /** - * Constructs OzoneKey from OmKeyInfo. - * - */ - @SuppressWarnings("parameternumber") - public OzoneKey(String volumeName, String bucketName, - String keyName, long size, long creationTime, - long modificationTime, ReplicationType type, - int replicationFactor) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.name = keyName; - this.dataSize = size; - this.creationTime = creationTime; - this.modificationTime = modificationTime; - this.replicationType = type; - this.replicationFactor = replicationFactor; - } - - /** - * Returns Volume Name associated with the Key. - * - * @return volumeName - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Returns Bucket Name associated with the Key. - * - * @return bucketName - */ - public String getBucketName(){ - return bucketName; - } - - /** - * Returns the Key Name. - * - * @return keyName - */ - public String getName() { - return name; - } - - /** - * Returns the size of the data. - * - * @return dataSize - */ - public long getDataSize() { - return dataSize; - } - - /** - * Returns the creation time of the key. - * - * @return creation time - */ - public long getCreationTime() { - return creationTime; - } - - /** - * Returns the modification time of the key. - * - * @return modification time - */ - public long getModificationTime() { - return modificationTime; - } - - /** - * Returns the replication type of the key. - * - * @return replicationType - */ - public ReplicationType getReplicationType() { - return replicationType; - } - - /** - * Returns the replication factor of the key. - * - * @return replicationFactor - */ - public int getReplicationFactor() { - return replicationFactor; - } - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java deleted file mode 100644 index a57b663450c..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyDetails.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.hdds.client.ReplicationType; - -import java.util.List; -import java.util.Map; - -/** - * A class that encapsulates OzoneKeyLocation. - */ -public class OzoneKeyDetails extends OzoneKey { - - /** - * A list of block location information to specify replica locations. - */ - private List ozoneKeyLocations; - - private Map metadata; - - private FileEncryptionInfo feInfo; - - /** - * Constructs OzoneKeyDetails from OmKeyInfo. - */ - @SuppressWarnings("parameternumber") - public OzoneKeyDetails(String volumeName, String bucketName, String keyName, - long size, long creationTime, long modificationTime, - List ozoneKeyLocations, - ReplicationType type, Map metadata, - FileEncryptionInfo feInfo, int replicationFactor) { - super(volumeName, bucketName, keyName, size, creationTime, - modificationTime, type, replicationFactor); - this.ozoneKeyLocations = ozoneKeyLocations; - this.metadata = metadata; - this.feInfo = feInfo; - } - - /** - * Returns the location detail information of the specific Key. - */ - public List getOzoneKeyLocations() { - return ozoneKeyLocations; - } - - public Map getMetadata() { - return metadata; - } - - public FileEncryptionInfo getFileEncryptionInfo() { - return feInfo; - } - /** - * Set details of key location. - * @param ozoneKeyLocations - details of key location - */ - public void setOzoneKeyLocations(List ozoneKeyLocations) { - this.ozoneKeyLocations = ozoneKeyLocations; - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyLocation.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyLocation.java deleted file mode 100644 index 0ff8ba749b6..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKeyLocation.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -/** - * One key can be stored in one or more containers as one or more blocks. - * This class represents one such block instance. - */ -public class OzoneKeyLocation { - /** - * Which container this key stored. - */ - private final long containerID; - /** - * Which block this key stored inside a container. - */ - private final long localID; - /** - * Data length of this key replica. - */ - private final long length; - /** - * Offset of this key. - */ - private final long offset; - - /** - * Constructs OzoneKeyLocation. - */ - public OzoneKeyLocation(long containerID, long localID, - long length, long offset) { - this.containerID = containerID; - this.localID = localID; - this.length = length; - this.offset = offset; - } - - /** - * Returns the containerID of this Key. - */ - public long getContainerID() { - return containerID; - } - - /** - * Returns the localID of this Key. - */ - public long getLocalID() { - return localID; - } - - /** - * Returns the length of this Key. - */ - public long getLength() { - return length; - } - - /** - * Returns the offset of this Key. - */ - public long getOffset() { - return offset; - } - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUpload.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUpload.java deleted file mode 100644 index 6eb76c4ff11..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUpload.java +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import java.time.Instant; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; - -/** - * Information about one initialized upload. - */ -public class OzoneMultipartUpload { - - private String volumeName; - - private String bucketName; - - private String keyName; - - private String uploadId; - - private Instant creationTime; - - private ReplicationType replicationType; - - private ReplicationFactor replicationFactor; - - public OzoneMultipartUpload(String volumeName, String bucketName, - String keyName, String uploadId, Instant creationTime, - ReplicationType replicationType, - ReplicationFactor replicationFactor) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.uploadId = uploadId; - this.creationTime = creationTime; - this.replicationType = replicationType; - this.replicationFactor = replicationFactor; - } - - public String getVolumeName() { - return volumeName; - } - - public String getBucketName() { - return bucketName; - } - - public String getKeyName() { - return keyName; - } - - public String getUploadId() { - return uploadId; - } - - public Instant getCreationTime() { - return creationTime; - } - - public void setCreationTime(Instant creationTime) { - this.creationTime = creationTime; - } - - public ReplicationType getReplicationType() { - return replicationType; - } - - public ReplicationFactor getReplicationFactor() { - return replicationFactor; - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java deleted file mode 100644 index 38377ebc176..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadList.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import java.util.List; - -import com.google.common.base.Preconditions; - -/** - * List of in-flight MPU upoads. - */ -public class OzoneMultipartUploadList { - - private List uploads; - - public OzoneMultipartUploadList( - List uploads) { - Preconditions.checkNotNull(uploads); - this.uploads = uploads; - } - - public List getUploads() { - return uploads; - } - - public void setUploads( - List uploads) { - this.uploads = uploads; - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java deleted file mode 100644 index 7ce3148f01d..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; - -import java.util.ArrayList; -import java.util.List; - -/** - * Class that represents Multipart upload List parts response. - */ -public class OzoneMultipartUploadPartListParts { - - private ReplicationType replicationType; - - private ReplicationFactor replicationFactor; - - //When a list is truncated, this element specifies the last part in the list, - // as well as the value to use for the part-number-marker request parameter - // in a subsequent request. - private int nextPartNumberMarker; - // Indicates whether the returned list of parts is truncated. A true value - // indicates that the list was truncated. - // A list can be truncated if the number of parts exceeds the limit - // returned in the MaxParts element. - private boolean truncated; - private List partInfoList = new ArrayList<>(); - - public OzoneMultipartUploadPartListParts(ReplicationType type, - ReplicationFactor factor, - int nextMarker, boolean truncate) { - this.replicationType = type; - this.nextPartNumberMarker = nextMarker; - this.truncated = truncate; - this.replicationFactor = factor; - } - - public void addAllParts(List partInfos) { - partInfoList.addAll(partInfos); - } - - public void addPart(PartInfo partInfo) { - this.partInfoList.add(partInfo); - } - - public ReplicationType getReplicationType() { - return replicationType; - } - - public int getNextPartNumberMarker() { - return nextPartNumberMarker; - } - - public boolean isTruncated() { - return truncated; - } - - public List getPartInfoList() { - return partInfoList; - } - - public ReplicationFactor getReplicationFactor() { - return replicationFactor; - } - - /** - * Class that represents each Part information of a multipart upload part. - */ - public static class PartInfo { - - private int partNumber; - private String partName; - private long modificationTime; - private long size; - - public PartInfo(int number, String name, long time, long size) { - this.partNumber = number; - this.partName = name; - this.modificationTime = time; - this.size = size; - } - - public int getPartNumber() { - return partNumber; - } - - public String getPartName() { - return partName; - } - - public long getModificationTime() { - return modificationTime; - } - - public long getSize() { - return size; - } - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java deleted file mode 100644 index f2bdfddbac1..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ /dev/null @@ -1,328 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.NoSuchElementException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.om.helpers.WithMetadata; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -/** - * A class that encapsulates OzoneVolume. - */ -public class OzoneVolume extends WithMetadata { - - /** - * The proxy used for connecting to the cluster and perform - * client operations. - */ - private final ClientProtocol proxy; - - /** - * Name of the Volume. - */ - private final String name; - - /** - * Admin Name of the Volume. - */ - private String admin; - /** - * Owner of the Volume. - */ - private String owner; - /** - * Quota allocated for the Volume. - */ - private long quotaInBytes; - /** - * Creation time of the volume. - */ - private long creationTime; - /** - * Volume ACLs. - */ - private List acls; - - private int listCacheSize; - - /** - * Constructs OzoneVolume instance. - * @param conf Configuration object. - * @param proxy ClientProtocol proxy. - * @param name Name of the volume. - * @param admin Volume admin. - * @param owner Volume owner. - * @param quotaInBytes Volume quota in bytes. - * @param creationTime creation time of the volume - * @param acls ACLs associated with the volume. - * @param metadata custom key value metadata. - */ - @SuppressWarnings("parameternumber") - public OzoneVolume(Configuration conf, ClientProtocol proxy, String name, - String admin, String owner, long quotaInBytes, - long creationTime, List acls, - Map metadata) { - Preconditions.checkNotNull(proxy, "Client proxy is not set."); - this.proxy = proxy; - this.name = name; - this.admin = admin; - this.owner = owner; - this.quotaInBytes = quotaInBytes; - this.creationTime = creationTime; - this.acls = acls; - this.listCacheSize = HddsClientUtils.getListCacheSize(conf); - this.metadata = metadata; - } - - @SuppressWarnings("parameternumber") - public OzoneVolume(Configuration conf, ClientProtocol proxy, String name, - String admin, String owner, long quotaInBytes, - long creationTime, List acls) { - this(conf, proxy, name, admin, owner, quotaInBytes, creationTime, acls, - new HashMap<>()); - } - - @VisibleForTesting - protected OzoneVolume(String name, String admin, String owner, - long quotaInBytes, - long creationTime, List acls) { - this.proxy = null; - this.name = name; - this.admin = admin; - this.owner = owner; - this.quotaInBytes = quotaInBytes; - this.creationTime = creationTime; - this.acls = acls; - this.metadata = new HashMap<>(); - } - - /** - * Returns Volume name. - * - * @return volumeName - */ - public String getName() { - return name; - } - - /** - * Returns Volume's admin name. - * - * @return adminName - */ - public String getAdmin() { - return admin; - } - - /** - * Returns Volume's owner name. - * - * @return ownerName - */ - public String getOwner() { - return owner; - } - - /** - * Returns Quota allocated for the Volume in bytes. - * - * @return quotaInBytes - */ - public long getQuota() { - return quotaInBytes; - } - - /** - * Returns creation time of the volume. - * - * @return creation time. - */ - public long getCreationTime() { - return creationTime; - } - - /** - * Returns OzoneAcl list associated with the Volume. - * - * @return aclMap - */ - public List getAcls() { - return acls; - } - - /** - * Sets/Changes the owner of this Volume. - * @param owner new owner - * @throws IOException - */ - public void setOwner(String owner) throws IOException { - proxy.setVolumeOwner(name, owner); - this.owner = owner; - } - - /** - * Sets/Changes the quota of this Volume. - * @param quota new quota - * @throws IOException - */ - public void setQuota(OzoneQuota quota) throws IOException { - proxy.setVolumeQuota(name, quota); - this.quotaInBytes = quota.sizeInBytes(); - } - - /** - * Creates a new Bucket in this Volume, with default values. - * @param bucketName Name of the Bucket - * @throws IOException - */ - public void createBucket(String bucketName) - throws IOException { - proxy.createBucket(name, bucketName); - } - - /** - * Creates a new Bucket in this Volume, with properties set in bucketArgs. - * @param bucketName Name of the Bucket - * @param bucketArgs Properties to be set - * @throws IOException - */ - public void createBucket(String bucketName, BucketArgs bucketArgs) - throws IOException { - proxy.createBucket(name, bucketName, bucketArgs); - } - - /** - * Get the Bucket from this Volume. - * @param bucketName Name of the Bucket - * @return OzoneBucket - * @throws IOException - */ - public OzoneBucket getBucket(String bucketName) throws IOException { - OzoneBucket bucket = proxy.getBucketDetails(name, bucketName); - return bucket; - } - - /** - * Returns Iterator to iterate over all buckets in the volume. - * The result can be restricted using bucket prefix, will return all - * buckets if bucket prefix is null. - * - * @param bucketPrefix Bucket prefix to match - * @return {@code Iterator} - */ - public Iterator listBuckets(String bucketPrefix) { - return listBuckets(bucketPrefix, null); - } - - /** - * Returns Iterator to iterate over all buckets after prevBucket in the - * volume. - * If prevBucket is null it iterates from the first bucket in the volume. - * The result can be restricted using bucket prefix, will return all - * buckets if bucket prefix is null. - * - * @param bucketPrefix Bucket prefix to match - * @param prevBucket Buckets are listed after this bucket - * @return {@code Iterator} - */ - public Iterator listBuckets(String bucketPrefix, - String prevBucket) { - return new BucketIterator(bucketPrefix, prevBucket); - } - - /** - * Deletes the Bucket from this Volume. - * @param bucketName Name of the Bucket - * @throws IOException - */ - public void deleteBucket(String bucketName) throws IOException { - proxy.deleteBucket(name, bucketName); - } - - - /** - * An Iterator to iterate over {@link OzoneBucket} list. - */ - private class BucketIterator implements Iterator { - - private String bucketPrefix = null; - - private Iterator currentIterator; - private OzoneBucket currentValue; - - - /** - * Creates an Iterator to iterate over all buckets after prevBucket in - * the volume. - * If prevBucket is null it iterates from the first bucket in the volume. - * The returned buckets match bucket prefix. - * @param bucketPrefix - */ - BucketIterator(String bucketPrefix, String prevBucket) { - this.bucketPrefix = bucketPrefix; - this.currentValue = null; - this.currentIterator = getNextListOfBuckets(prevBucket).iterator(); - } - - @Override - public boolean hasNext() { - if(!currentIterator.hasNext()) { - currentIterator = getNextListOfBuckets( - currentValue != null ? currentValue.getName() : null) - .iterator(); - } - return currentIterator.hasNext(); - } - - @Override - public OzoneBucket next() { - if(hasNext()) { - currentValue = currentIterator.next(); - return currentValue; - } - throw new NoSuchElementException(); - } - - /** - * Gets the next set of bucket list using proxy. - * @param prevBucket - * @return {@code List} - */ - private List getNextListOfBuckets(String prevBucket) { - try { - return proxy.listBuckets(name, bucketPrefix, prevBucket, listCacheSize); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - } -} \ No newline at end of file diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java deleted file mode 100644 index 359e195a9f7..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java +++ /dev/null @@ -1,150 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.ozone.OzoneAcl; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * This class encapsulates the arguments that are - * required for creating a volume. - */ -public final class VolumeArgs { - - private final String admin; - private final String owner; - private final String quota; - private final List acls; - private Map metadata; - - /** - * Private constructor, constructed via builder. - * @param admin Administrator's name. - * @param owner Volume owner's name - * @param quota Volume Quota. - * @param acls User to access rights map. - */ - private VolumeArgs(String admin, - String owner, - String quota, - List acls, - Map metadata) { - this.admin = admin; - this.owner = owner; - this.quota = quota; - this.acls = acls; - this.metadata = metadata; - } - - /** - * Returns the Admin Name. - * @return String. - */ - public String getAdmin() { - return admin; - } - - /** - * Returns the owner Name. - * @return String - */ - public String getOwner() { - return owner; - } - - /** - * Returns Volume Quota. - * @return Quota. - */ - public String getQuota() { - return quota; - } - - /** - * Return custom key value map. - * - * @return metadata - */ - public Map getMetadata() { - return metadata; - } - - public List getAcls() { - return acls; - } - /** - * Returns new builder class that builds a OmVolumeArgs. - * - * @return Builder - */ - public static VolumeArgs.Builder newBuilder() { - return new VolumeArgs.Builder(); - } - - /** - * Builder for OmVolumeArgs. - */ - public static class Builder { - private String adminName; - private String ownerName; - private String volumeQuota; - private List listOfAcls; - private Map metadata = new HashMap<>(); - - - public VolumeArgs.Builder setAdmin(String admin) { - this.adminName = admin; - return this; - } - - public VolumeArgs.Builder setOwner(String owner) { - this.ownerName = owner; - return this; - } - - public VolumeArgs.Builder setQuota(String quota) { - this.volumeQuota = quota; - return this; - } - - public VolumeArgs.Builder addMetadata(String key, String value) { - metadata.put(key, value); - return this; - } - public VolumeArgs.Builder setAcls(List acls) - throws IOException { - this.listOfAcls = acls; - return this; - } - - /** - * Constructs a CreateVolumeArgument. - * @return CreateVolumeArgs. - */ - public VolumeArgs build() { - return new VolumeArgs(adminName, ownerName, volumeQuota, listOfAcls, - metadata); - } - } - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java deleted file mode 100644 index 8381be08da4..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java +++ /dev/null @@ -1,354 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.io; - -import java.io.IOException; -import java.io.OutputStream; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ChecksumType; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; -import org.apache.hadoop.hdds.scm.storage.BufferPool; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; - -import java.util.Collection; - -/** - * Helper class used inside {@link BlockOutputStream}. - * */ -public final class BlockOutputStreamEntry extends OutputStream { - - private OutputStream outputStream; - private BlockID blockID; - private final String key; - private final XceiverClientManager xceiverClientManager; - private final Pipeline pipeline; - private final ChecksumType checksumType; - private final int bytesPerChecksum; - private final int chunkSize; - // total number of bytes that should be written to this stream - private final long length; - // the current position of this stream 0 <= currentPosition < length - private long currentPosition; - private Token token; - - private final long streamBufferFlushSize; - private final long streamBufferMaxSize; - private final long watchTimeout; - private BufferPool bufferPool; - - @SuppressWarnings("parameternumber") - private BlockOutputStreamEntry(BlockID blockID, String key, - XceiverClientManager xceiverClientManager, - Pipeline pipeline, String requestId, int chunkSize, - long length, long streamBufferFlushSize, long streamBufferMaxSize, - long watchTimeout, BufferPool bufferPool, - ChecksumType checksumType, int bytesPerChecksum, - Token token) { - this.outputStream = null; - this.blockID = blockID; - this.key = key; - this.xceiverClientManager = xceiverClientManager; - this.pipeline = pipeline; - this.chunkSize = chunkSize; - this.token = token; - this.length = length; - this.currentPosition = 0; - this.streamBufferFlushSize = streamBufferFlushSize; - this.streamBufferMaxSize = streamBufferMaxSize; - this.watchTimeout = watchTimeout; - this.bufferPool = bufferPool; - this.checksumType = checksumType; - this.bytesPerChecksum = bytesPerChecksum; - } - - long getLength() { - return length; - } - - Token getToken() { - return token; - } - - long getRemaining() { - return length - currentPosition; - } - - /** - * BlockOutputStream is initialized in this function. This makes sure that - * xceiverClient initialization is not done during preallocation and only - * done when data is written. - * @throws IOException if xceiverClient initialization fails - */ - private void checkStream() throws IOException { - if (this.outputStream == null) { - if (getToken() != null) { - UserGroupInformation.getCurrentUser().addToken(getToken()); - } - this.outputStream = - new BlockOutputStream(blockID, xceiverClientManager, - pipeline, chunkSize, streamBufferFlushSize, - streamBufferMaxSize, watchTimeout, bufferPool, checksumType, - bytesPerChecksum); - } - } - - - @Override - public void write(int b) throws IOException { - checkStream(); - outputStream.write(b); - this.currentPosition += 1; - } - - @Override - public void write(byte[] b, int off, int len) throws IOException { - checkStream(); - outputStream.write(b, off, len); - this.currentPosition += len; - } - - @Override - public void flush() throws IOException { - if (this.outputStream != null) { - this.outputStream.flush(); - } - } - - @Override - public void close() throws IOException { - if (this.outputStream != null) { - this.outputStream.close(); - // after closing the chunkOutPutStream, blockId would have been - // reconstructed with updated bcsId - this.blockID = ((BlockOutputStream) outputStream).getBlockID(); - } - } - - boolean isClosed() { - if (outputStream != null) { - return ((BlockOutputStream) outputStream).isClosed(); - } - return false; - } - - long getTotalAckDataLength() { - if (outputStream != null) { - BlockOutputStream out = (BlockOutputStream) this.outputStream; - blockID = out.getBlockID(); - return out.getTotalAckDataLength(); - } else { - // For a pre allocated block for which no write has been initiated, - // the OutputStream will be null here. - // In such cases, the default blockCommitSequenceId will be 0 - return 0; - } - } - - Collection getFailedServers() throws IOException { - if (outputStream != null) { - BlockOutputStream out = (BlockOutputStream) this.outputStream; - return out.getFailedServers(); - } - return null; - } - - long getWrittenDataLength() throws IOException { - if (outputStream != null) { - BlockOutputStream out = (BlockOutputStream) this.outputStream; - return out.getWrittenDataLength(); - } else { - // For a pre allocated block for which no write has been initiated, - // the OutputStream will be null here. - // In such cases, the default blockCommitSequenceId will be 0 - return 0; - } - } - - void cleanup(boolean invalidateClient) throws IOException { - checkStream(); - BlockOutputStream out = (BlockOutputStream) this.outputStream; - out.cleanup(invalidateClient); - - } - - void writeOnRetry(long len) throws IOException { - checkStream(); - BlockOutputStream out = (BlockOutputStream) this.outputStream; - out.writeOnRetry(len); - this.currentPosition += len; - - } - - /** - * Builder class for ChunkGroupOutputStreamEntry. - * */ - public static class Builder { - - private BlockID blockID; - private String key; - private XceiverClientManager xceiverClientManager; - private Pipeline pipeline; - private String requestId; - private int chunkSize; - private long length; - private long streamBufferFlushSize; - private long streamBufferMaxSize; - private long watchTimeout; - private BufferPool bufferPool; - private Token token; - private ChecksumType checksumType; - private int bytesPerChecksum; - - public Builder setChecksumType(ChecksumType type) { - this.checksumType = type; - return this; - } - - public Builder setBytesPerChecksum(int bytes) { - this.bytesPerChecksum = bytes; - return this; - } - - public Builder setBlockID(BlockID bID) { - this.blockID = bID; - return this; - } - - public Builder setKey(String keys) { - this.key = keys; - return this; - } - - public Builder setXceiverClientManager(XceiverClientManager - xClientManager) { - this.xceiverClientManager = xClientManager; - return this; - } - - public Builder setPipeline(Pipeline ppln) { - this.pipeline = ppln; - return this; - } - - public Builder setRequestId(String request) { - this.requestId = request; - return this; - } - - public Builder setChunkSize(int cSize) { - this.chunkSize = cSize; - return this; - } - - public Builder setLength(long len) { - this.length = len; - return this; - } - - public Builder setStreamBufferFlushSize(long bufferFlushSize) { - this.streamBufferFlushSize = bufferFlushSize; - return this; - } - - public Builder setStreamBufferMaxSize(long bufferMaxSize) { - this.streamBufferMaxSize = bufferMaxSize; - return this; - } - - public Builder setWatchTimeout(long timeout) { - this.watchTimeout = timeout; - return this; - } - - public Builder setbufferPool(BufferPool pool) { - this.bufferPool = pool; - return this; - } - - public Builder setToken(Token bToken) { - this.token = bToken; - return this; - } - - public BlockOutputStreamEntry build() { - return new BlockOutputStreamEntry(blockID, key, - xceiverClientManager, pipeline, requestId, chunkSize, - length, streamBufferFlushSize, streamBufferMaxSize, watchTimeout, - bufferPool, checksumType, bytesPerChecksum, token); - } - } - - @VisibleForTesting - public OutputStream getOutputStream() { - return outputStream; - } - - public BlockID getBlockID() { - return blockID; - } - - public String getKey() { - return key; - } - - public XceiverClientManager getXceiverClientManager() { - return xceiverClientManager; - } - - public Pipeline getPipeline() { - return pipeline; - } - - public int getChunkSize() { - return chunkSize; - } - - public long getCurrentPosition() { - return currentPosition; - } - - public long getStreamBufferFlushSize() { - return streamBufferFlushSize; - } - - public long getStreamBufferMaxSize() { - return streamBufferMaxSize; - } - - public long getWatchTimeout() { - return watchTimeout; - } - - public BufferPool getBufferPool() { - return bufferPool; - } - - public void setCurrentPosition(long curPosition) { - this.currentPosition = curPosition; - } -} - - diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java deleted file mode 100644 index b179ca53956..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java +++ /dev/null @@ -1,354 +0,0 @@ - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.io; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.storage.BufferPool; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.om.helpers.*; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.ListIterator; - -/** - * This class manages the stream entries list and handles block allocation - * from OzoneManager. - */ -public class BlockOutputStreamEntryPool { - - public static final Logger LOG = - LoggerFactory.getLogger(BlockOutputStreamEntryPool.class); - - private final List streamEntries; - private int currentStreamIndex; - private final OzoneManagerProtocol omClient; - private final OmKeyArgs keyArgs; - private final XceiverClientManager xceiverClientManager; - private final int chunkSize; - private final String requestID; - private final long streamBufferFlushSize; - private final long streamBufferMaxSize; - private final long watchTimeout; - private final long blockSize; - private final int bytesPerChecksum; - private final ContainerProtos.ChecksumType checksumType; - private final BufferPool bufferPool; - private OmMultipartCommitUploadPartInfo commitUploadPartInfo; - private final long openID; - private ExcludeList excludeList; - - @SuppressWarnings("parameternumber") - public BlockOutputStreamEntryPool(OzoneManagerProtocol omClient, - int chunkSize, String requestId, HddsProtos.ReplicationFactor factor, - HddsProtos.ReplicationType type, long bufferFlushSize, long bufferMaxSize, - long size, long watchTimeout, ContainerProtos.ChecksumType checksumType, - int bytesPerChecksum, String uploadID, int partNumber, - boolean isMultipart, OmKeyInfo info, - XceiverClientManager xceiverClientManager, long openID) { - streamEntries = new ArrayList<>(); - currentStreamIndex = 0; - this.omClient = omClient; - this.keyArgs = new OmKeyArgs.Builder().setVolumeName(info.getVolumeName()) - .setBucketName(info.getBucketName()).setKeyName(info.getKeyName()) - .setType(type).setFactor(factor).setDataSize(info.getDataSize()) - .setIsMultipartKey(isMultipart).setMultipartUploadID(uploadID) - .setMultipartUploadPartNumber(partNumber).build(); - this.xceiverClientManager = xceiverClientManager; - this.chunkSize = chunkSize; - this.requestID = requestId; - this.streamBufferFlushSize = bufferFlushSize; - this.streamBufferMaxSize = bufferMaxSize; - this.blockSize = size; - this.watchTimeout = watchTimeout; - this.bytesPerChecksum = bytesPerChecksum; - this.checksumType = checksumType; - this.openID = openID; - this.excludeList = new ExcludeList(); - - Preconditions.checkState(chunkSize > 0); - Preconditions.checkState(streamBufferFlushSize > 0); - Preconditions.checkState(streamBufferMaxSize > 0); - Preconditions.checkState(blockSize > 0); - Preconditions.checkState(streamBufferFlushSize % chunkSize == 0); - Preconditions.checkState(streamBufferMaxSize % streamBufferFlushSize == 0); - Preconditions.checkState(blockSize % streamBufferMaxSize == 0); - this.bufferPool = - new BufferPool(chunkSize, (int) streamBufferMaxSize / chunkSize, - xceiverClientManager.byteBufferToByteStringConversion()); - } - - /** - * A constructor for testing purpose only. - * - * @see KeyOutputStream#KeyOutputStream() - */ - @VisibleForTesting - BlockOutputStreamEntryPool() { - streamEntries = new ArrayList<>(); - omClient = null; - keyArgs = null; - xceiverClientManager = null; - chunkSize = 0; - requestID = null; - streamBufferFlushSize = 0; - streamBufferMaxSize = 0; - bufferPool = new BufferPool(chunkSize, 1); - watchTimeout = 0; - blockSize = 0; - this.checksumType = ContainerProtos.ChecksumType.valueOf( - OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT); - this.bytesPerChecksum = OzoneConfigKeys - .OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT_BYTES; // Default is 1MB - currentStreamIndex = 0; - openID = -1; - } - - /** - * When a key is opened, it is possible that there are some blocks already - * allocated to it for this open session. In this case, to make use of these - * blocks, we need to add these blocks to stream entries. But, a key's version - * also includes blocks from previous versions, we need to avoid adding these - * old blocks to stream entries, because these old blocks should not be picked - * for write. To do this, the following method checks that, only those - * blocks created in this particular open version are added to stream entries. - * - * @param version the set of blocks that are pre-allocated. - * @param openVersion the version corresponding to the pre-allocation. - * @throws IOException - */ - public void addPreallocateBlocks(OmKeyLocationInfoGroup version, - long openVersion) throws IOException { - // server may return any number of blocks, (0 to any) - // only the blocks allocated in this open session (block createVersion - // equals to open session version) - for (OmKeyLocationInfo subKeyInfo : version.getLocationList()) { - if (subKeyInfo.getCreateVersion() == openVersion) { - addKeyLocationInfo(subKeyInfo); - } - } - } - - private void addKeyLocationInfo(OmKeyLocationInfo subKeyInfo) - throws IOException { - Preconditions.checkNotNull(subKeyInfo.getPipeline()); - UserGroupInformation.getCurrentUser().addToken(subKeyInfo.getToken()); - BlockOutputStreamEntry.Builder builder = - new BlockOutputStreamEntry.Builder() - .setBlockID(subKeyInfo.getBlockID()) - .setKey(keyArgs.getKeyName()) - .setXceiverClientManager(xceiverClientManager) - .setPipeline(subKeyInfo.getPipeline()) - .setRequestId(requestID) - .setChunkSize(chunkSize) - .setLength(subKeyInfo.getLength()) - .setStreamBufferFlushSize(streamBufferFlushSize) - .setStreamBufferMaxSize(streamBufferMaxSize) - .setWatchTimeout(watchTimeout) - .setbufferPool(bufferPool) - .setChecksumType(checksumType) - .setBytesPerChecksum(bytesPerChecksum) - .setToken(subKeyInfo.getToken()); - streamEntries.add(builder.build()); - } - - public List getLocationInfoList() { - List locationInfoList = new ArrayList<>(); - for (BlockOutputStreamEntry streamEntry : streamEntries) { - long length = streamEntry.getCurrentPosition(); - - // Commit only those blocks to OzoneManager which are not empty - if (length != 0) { - OmKeyLocationInfo info = - new OmKeyLocationInfo.Builder().setBlockID(streamEntry.getBlockID()) - .setLength(streamEntry.getCurrentPosition()).setOffset(0) - .setToken(streamEntry.getToken()) - .setPipeline(streamEntry.getPipeline()).build(); - locationInfoList.add(info); - } - if (LOG.isDebugEnabled()) { - LOG.debug( - "block written " + streamEntry.getBlockID() + ", length " + length - + " bcsID " + streamEntry.getBlockID() - .getBlockCommitSequenceId()); - } - } - return locationInfoList; - } - - /** - * Discards the subsequent pre allocated blocks and removes the streamEntries - * from the streamEntries list for the container which is closed. - * @param containerID id of the closed container - * @param pipelineId id of the associated pipeline - */ - void discardPreallocatedBlocks(long containerID, PipelineID pipelineId) { - // currentStreamIndex < streamEntries.size() signifies that, there are still - // pre allocated blocks available. - - // This will be called only to discard the next subsequent unused blocks - // in the streamEntryList. - if (currentStreamIndex + 1 < streamEntries.size()) { - ListIterator streamEntryIterator = - streamEntries.listIterator(currentStreamIndex + 1); - while (streamEntryIterator.hasNext()) { - BlockOutputStreamEntry streamEntry = streamEntryIterator.next(); - Preconditions.checkArgument(streamEntry.getCurrentPosition() == 0); - if ((pipelineId != null && streamEntry.getPipeline().getId() - .equals(pipelineId)) || (containerID != -1 - && streamEntry.getBlockID().getContainerID() == containerID)) { - streamEntryIterator.remove(); - } - } - } - } - - List getStreamEntries() { - return streamEntries; - } - - XceiverClientManager getXceiverClientManager() { - return xceiverClientManager; - } - - String getKeyName() { - return keyArgs.getKeyName(); - } - - long getKeyLength() { - return streamEntries.stream().mapToLong(e -> e.getCurrentPosition()).sum(); - } - /** - * Contact OM to get a new block. Set the new block with the index (e.g. - * first block has index = 0, second has index = 1 etc.) - * - * The returned block is made to new BlockOutputStreamEntry to write. - * - * @throws IOException - */ - private void allocateNewBlock() throws IOException { - OmKeyLocationInfo subKeyInfo = - omClient.allocateBlock(keyArgs, openID, excludeList); - addKeyLocationInfo(subKeyInfo); - } - - - void commitKey(long offset) throws IOException { - if (keyArgs != null) { - // in test, this could be null - long length = getKeyLength(); - Preconditions.checkArgument(offset == length); - keyArgs.setDataSize(length); - keyArgs.setLocationInfoList(getLocationInfoList()); - // When the key is multipart upload part file upload, we should not - // commit the key, as this is not an actual key, this is a just a - // partial key of a large file. - if (keyArgs.getIsMultipartKey()) { - commitUploadPartInfo = - omClient.commitMultipartUploadPart(keyArgs, openID); - } else { - omClient.commitKey(keyArgs, openID); - } - } else { - LOG.warn("Closing KeyOutputStream, but key args is null"); - } - } - - public BlockOutputStreamEntry getCurrentStreamEntry() { - if (streamEntries.isEmpty() || streamEntries.size() <= currentStreamIndex) { - return null; - } else { - return streamEntries.get(currentStreamIndex); - } - } - - BlockOutputStreamEntry allocateBlockIfNeeded() throws IOException { - BlockOutputStreamEntry streamEntry = getCurrentStreamEntry(); - if (streamEntry != null && streamEntry.isClosed()) { - // a stream entry gets closed either by : - // a. If the stream gets full - // b. it has encountered an exception - currentStreamIndex++; - } - if (streamEntries.size() <= currentStreamIndex) { - Preconditions.checkNotNull(omClient); - // allocate a new block, if a exception happens, log an error and - // throw exception to the caller directly, and the write fails. - int succeededAllocates = 0; - try { - allocateNewBlock(); - succeededAllocates += 1; - } catch (IOException ioe) { - LOG.error("Try to allocate more blocks for write failed, already " - + "allocated {} blocks for this write.", succeededAllocates, ioe); - throw ioe; - } - } - // in theory, this condition should never violate due the check above - // still do a sanity check. - Preconditions.checkArgument(currentStreamIndex < streamEntries.size()); - BlockOutputStreamEntry current = streamEntries.get(currentStreamIndex); - return current; - } - - long computeBufferData() { - return bufferPool.computeBufferData(); - } - - void cleanup() { - if (excludeList != null) { - excludeList.clear(); - excludeList = null; - } - if (bufferPool != null) { - bufferPool.clearBufferPool(); - } - - if (streamEntries != null) { - streamEntries.clear(); - } - } - - public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return commitUploadPartInfo; - } - - public ExcludeList getExcludeList() { - return excludeList; - } - - public long getStreamBufferMaxSize() { - return streamBufferMaxSize; - } - - boolean isEmpty() { - return streamEntries.isEmpty(); - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java deleted file mode 100644 index ecbb3290a7d..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.io; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.fs.FSExceptionMessages; -import org.apache.hadoop.fs.Seekable; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.storage.BlockInputStream; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.EOFException; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -/** - * Maintaining a list of BlockInputStream. Read based on offset. - */ -public class KeyInputStream extends InputStream implements Seekable { - - private static final Logger LOG = - LoggerFactory.getLogger(KeyInputStream.class); - - private static final int EOF = -1; - - private String key; - private long length = 0; - private boolean closed = false; - - // List of BlockInputStreams, one for each block in the key - private final List blockStreams; - - // blockOffsets[i] stores the index of the first data byte in - // blockStream w.r.t the key data. - // For example, let’s say the block size is 200 bytes and block[0] stores - // data from indices 0 - 199, block[1] from indices 200 - 399 and so on. - // Then, blockOffset[0] = 0 (the offset of the first byte of data in - // block[0]), blockOffset[1] = 200 and so on. - private long[] blockOffsets = null; - - // Index of the blockStream corresponding to the current position of the - // KeyInputStream i.e. offset of the data to be read next - private int blockIndex; - - // Tracks the blockIndex corresponding to the last seeked position so that it - // can be reset if a new position is seeked. - private int blockIndexOfPrevPosition; - - public KeyInputStream() { - blockStreams = new ArrayList<>(); - blockIndex = 0; - } - - /** - * For each block in keyInfo, add a BlockInputStream to blockStreams. - */ - public static LengthInputStream getFromOmKeyInfo(OmKeyInfo keyInfo, - XceiverClientManager xceiverClientManager, - boolean verifyChecksum) { - List keyLocationInfos = keyInfo - .getLatestVersionLocations().getBlocksLatestVersionOnly(); - - KeyInputStream keyInputStream = new KeyInputStream(); - keyInputStream.initialize(keyInfo.getKeyName(), keyLocationInfos, - xceiverClientManager, verifyChecksum); - - return new LengthInputStream(keyInputStream, keyInputStream.length); - } - - private synchronized void initialize(String keyName, - List blockInfos, - XceiverClientManager xceiverClientManager, - boolean verifyChecksum) { - this.key = keyName; - this.blockOffsets = new long[blockInfos.size()]; - long keyLength = 0; - for (int i = 0; i < blockInfos.size(); i++) { - OmKeyLocationInfo omKeyLocationInfo = blockInfos.get(i); - if (LOG.isDebugEnabled()) { - LOG.debug("Adding stream for accessing {}. The stream will be " + - "initialized later.", omKeyLocationInfo); - } - - addStream(omKeyLocationInfo, xceiverClientManager, - verifyChecksum); - - this.blockOffsets[i] = keyLength; - keyLength += omKeyLocationInfo.getLength(); - } - this.length = keyLength; - } - - /** - * Append another BlockInputStream to the end of the list. Note that the - * BlockInputStream is only created here and not initialized. The - * BlockInputStream is initialized when a read operation is performed on - * the block for the first time. - */ - private synchronized void addStream(OmKeyLocationInfo blockInfo, - XceiverClientManager xceiverClientMngr, - boolean verifyChecksum) { - blockStreams.add(new BlockInputStream(blockInfo.getBlockID(), - blockInfo.getLength(), blockInfo.getPipeline(), blockInfo.getToken(), - verifyChecksum, xceiverClientMngr)); - } - - @VisibleForTesting - public void addStream(BlockInputStream blockInputStream) { - blockStreams.add(blockInputStream); - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized int read() throws IOException { - byte[] buf = new byte[1]; - if (read(buf, 0, 1) == EOF) { - return EOF; - } - return Byte.toUnsignedInt(buf[0]); - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized int read(byte[] b, int off, int len) throws IOException { - checkOpen(); - if (b == null) { - throw new NullPointerException(); - } - if (off < 0 || len < 0 || len > b.length - off) { - throw new IndexOutOfBoundsException(); - } - if (len == 0) { - return 0; - } - int totalReadLen = 0; - while (len > 0) { - // if we are at the last block and have read the entire block, return - if (blockStreams.size() == 0 || - (blockStreams.size() - 1 <= blockIndex && - blockStreams.get(blockIndex) - .getRemaining() == 0)) { - return totalReadLen == 0 ? EOF : totalReadLen; - } - - // Get the current blockStream and read data from it - BlockInputStream current = blockStreams.get(blockIndex); - int numBytesToRead = Math.min(len, (int)current.getRemaining()); - int numBytesRead = current.read(b, off, numBytesToRead); - if (numBytesRead != numBytesToRead) { - // This implies that there is either data loss or corruption in the - // chunk entries. Even EOF in the current stream would be covered in - // this case. - throw new IOException(String.format( - "Inconsistent read for blockID=%s length=%d numBytesRead=%d", - current.getBlockID(), current.getLength(), numBytesRead)); - } - totalReadLen += numBytesRead; - off += numBytesRead; - len -= numBytesRead; - if (current.getRemaining() <= 0 && - ((blockIndex + 1) < blockStreams.size())) { - blockIndex += 1; - } - } - return totalReadLen; - } - - /** - * Seeks the KeyInputStream to the specified position. This involves 2 steps: - * 1. Updating the blockIndex to the blockStream corresponding to the - * seeked position. - * 2. Seeking the corresponding blockStream to the adjusted position. - * - * For example, let’s say the block size is 200 bytes and block[0] stores - * data from indices 0 - 199, block[1] from indices 200 - 399 and so on. - * Let’s say we seek to position 240. In the first step, the blockIndex - * would be updated to 1 as indices 200 - 399 reside in blockStream[1]. In - * the second step, the blockStream[1] would be seeked to position 40 (= - * 240 - blockOffset[1] (= 200)). - */ - @Override - public synchronized void seek(long pos) throws IOException { - checkOpen(); - if (pos < 0 || pos >= length) { - if (pos == 0) { - // It is possible for length and pos to be zero in which case - // seek should return instead of throwing exception - return; - } - throw new EOFException( - "EOF encountered at pos: " + pos + " for key: " + key); - } - - // 1. Update the blockIndex - if (blockIndex >= blockStreams.size()) { - blockIndex = Arrays.binarySearch(blockOffsets, pos); - } else if (pos < blockOffsets[blockIndex]) { - blockIndex = - Arrays.binarySearch(blockOffsets, 0, blockIndex, pos); - } else if (pos >= blockOffsets[blockIndex] + blockStreams - .get(blockIndex).getLength()) { - blockIndex = Arrays - .binarySearch(blockOffsets, blockIndex + 1, - blockStreams.size(), pos); - } - if (blockIndex < 0) { - // Binary search returns -insertionPoint - 1 if element is not present - // in the array. insertionPoint is the point at which element would be - // inserted in the sorted array. We need to adjust the blockIndex - // accordingly so that blockIndex = insertionPoint - 1 - blockIndex = -blockIndex - 2; - } - - // Reset the previous blockStream's position - blockStreams.get(blockIndexOfPrevPosition).resetPosition(); - - // 2. Seek the blockStream to the adjusted position - blockStreams.get(blockIndex).seek(pos - blockOffsets[blockIndex]); - blockIndexOfPrevPosition = blockIndex; - } - - @Override - public synchronized long getPos() throws IOException { - return length == 0 ? 0 : blockOffsets[blockIndex] + - blockStreams.get(blockIndex).getPos(); - } - - @Override - public boolean seekToNewSource(long targetPos) throws IOException { - return false; - } - - @Override - public int available() throws IOException { - checkOpen(); - long remaining = length - getPos(); - return remaining <= Integer.MAX_VALUE ? (int) remaining : Integer.MAX_VALUE; - } - - @Override - public void close() throws IOException { - closed = true; - for (BlockInputStream blockStream : blockStreams) { - blockStream.close(); - } - } - - /** - * Verify that the input stream is open. Non blocking; this gives - * the last state of the volatile {@link #closed} field. - * @throws IOException if the connection is closed. - */ - private void checkOpen() throws IOException { - if (closed) { - throw new IOException( - ": " + FSExceptionMessages.STREAM_IS_CLOSED + " Key: " + key); - } - } - - @VisibleForTesting - public synchronized int getCurrentStreamIndex() { - return blockIndex; - } - - @VisibleForTesting - public long getRemainingOfIndex(int index) throws IOException { - return blockStreams.get(index).getRemaining(); - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java deleted file mode 100644 index fd503c344d9..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java +++ /dev/null @@ -1,629 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.io; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.fs.FSExceptionMessages; -import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ozone.om.helpers.*; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.ratis.protocol.AlreadyClosedException; -import org.apache.ratis.protocol.RaftRetryFailureException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.io.InterruptedIOException; -import java.io.OutputStream; -import java.util.List; -import java.util.Collection; -import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; - -/** - * Maintaining a list of BlockInputStream. Write based on offset. - * - * Note that this may write to multiple containers in one write call. In case - * that first container succeeded but later ones failed, the succeeded writes - * are not rolled back. - * - * TODO : currently not support multi-thread access. - */ -public class KeyOutputStream extends OutputStream { - - /** - * Defines stream action while calling handleFlushOrClose. - */ - enum StreamAction { - FLUSH, CLOSE, FULL - } - - public static final Logger LOG = - LoggerFactory.getLogger(KeyOutputStream.class); - - private boolean closed; - private FileEncryptionInfo feInfo; - private final Map, RetryPolicy> retryPolicyMap; - private int retryCount; - private long offset; - private final BlockOutputStreamEntryPool blockOutputStreamEntryPool; - - /** - * A constructor for testing purpose only. - */ - @VisibleForTesting - public KeyOutputStream() { - closed = false; - this.retryPolicyMap = HddsClientUtils.getExceptionList() - .stream() - .collect(Collectors.toMap(Function.identity(), - e -> RetryPolicies.TRY_ONCE_THEN_FAIL)); - retryCount = 0; - offset = 0; - blockOutputStreamEntryPool = new BlockOutputStreamEntryPool(); - } - - @VisibleForTesting - public List getStreamEntries() { - return blockOutputStreamEntryPool.getStreamEntries(); - } - - @VisibleForTesting - public XceiverClientManager getXceiverClientManager() { - return blockOutputStreamEntryPool.getXceiverClientManager(); - } - - @VisibleForTesting - public List getLocationInfoList() { - return blockOutputStreamEntryPool.getLocationInfoList(); - } - - @VisibleForTesting - public int getRetryCount() { - return retryCount; - } - - @SuppressWarnings("parameternumber") - public KeyOutputStream(OpenKeySession handler, - XceiverClientManager xceiverClientManager, - OzoneManagerProtocol omClient, int chunkSize, - String requestId, ReplicationFactor factor, ReplicationType type, - long bufferFlushSize, long bufferMaxSize, long size, long watchTimeout, - ChecksumType checksumType, int bytesPerChecksum, - String uploadID, int partNumber, boolean isMultipart, - int maxRetryCount, long retryInterval) { - OmKeyInfo info = handler.getKeyInfo(); - blockOutputStreamEntryPool = - new BlockOutputStreamEntryPool(omClient, chunkSize, requestId, factor, - type, bufferFlushSize, bufferMaxSize, size, watchTimeout, - checksumType, bytesPerChecksum, uploadID, partNumber, isMultipart, - info, xceiverClientManager, handler.getId()); - // Retrieve the file encryption key info, null if file is not in - // encrypted bucket. - this.feInfo = info.getFileEncryptionInfo(); - this.retryPolicyMap = HddsClientUtils.getRetryPolicyByException( - maxRetryCount, retryInterval); - this.retryCount = 0; - } - - /** - * When a key is opened, it is possible that there are some blocks already - * allocated to it for this open session. In this case, to make use of these - * blocks, we need to add these blocks to stream entries. But, a key's version - * also includes blocks from previous versions, we need to avoid adding these - * old blocks to stream entries, because these old blocks should not be picked - * for write. To do this, the following method checks that, only those - * blocks created in this particular open version are added to stream entries. - * - * @param version the set of blocks that are pre-allocated. - * @param openVersion the version corresponding to the pre-allocation. - * @throws IOException - */ - public void addPreallocateBlocks(OmKeyLocationInfoGroup version, - long openVersion) throws IOException { - blockOutputStreamEntryPool.addPreallocateBlocks(version, openVersion); - } - - @Override - public void write(int b) throws IOException { - byte[] buf = new byte[1]; - buf[0] = (byte) b; - write(buf, 0, 1); - } - - /** - * Try to write the bytes sequence b[off:off+len) to streams. - * - * NOTE: Throws exception if the data could not fit into the remaining space. - * In which case nothing will be written. - * TODO:May need to revisit this behaviour. - * - * @param b byte data - * @param off starting offset - * @param len length to write - * @throws IOException - */ - @Override - public void write(byte[] b, int off, int len) - throws IOException { - checkNotClosed(); - if (b == null) { - throw new NullPointerException(); - } - if ((off < 0) || (off > b.length) || (len < 0) || ((off + len) > b.length) - || ((off + len) < 0)) { - throw new IndexOutOfBoundsException(); - } - if (len == 0) { - return; - } - handleWrite(b, off, len, false); - } - - private void handleWrite(byte[] b, int off, long len, boolean retry) - throws IOException { - while (len > 0) { - try { - BlockOutputStreamEntry current = - blockOutputStreamEntryPool.allocateBlockIfNeeded(); - // length(len) will be in int range if the call is happening through - // write API of blockOutputStream. Length can be in long range if it - // comes via Exception path. - int writeLen = Math.min((int) len, (int) current.getRemaining()); - long currentPos = current.getWrittenDataLength(); - try { - if (retry) { - current.writeOnRetry(len); - } else { - current.write(b, off, writeLen); - offset += writeLen; - } - } catch (IOException ioe) { - // for the current iteration, totalDataWritten - currentPos gives the - // amount of data already written to the buffer - - // In the retryPath, the total data to be written will always be equal - // to or less than the max length of the buffer allocated. - // The len specified here is the combined sum of the data length of - // the buffers - Preconditions.checkState(!retry || len <= blockOutputStreamEntryPool - .getStreamBufferMaxSize()); - int dataWritten = (int) (current.getWrittenDataLength() - currentPos); - writeLen = retry ? (int) len : dataWritten; - // In retry path, the data written is already accounted in offset. - if (!retry) { - offset += writeLen; - } - LOG.debug("writeLen {}, total len {}", writeLen, len); - handleException(current, ioe); - } - if (current.getRemaining() <= 0) { - // since the current block is already written close the stream. - handleFlushOrClose(StreamAction.FULL); - } - len -= writeLen; - off += writeLen; - } catch (Exception e) { - markStreamClosed(); - throw e; - } - } - } - - /** - * It performs following actions : - * a. Updates the committed length at datanode for the current stream in - * datanode. - * b. Reads the data from the underlying buffer and writes it the next stream. - * - * @param streamEntry StreamEntry - * @param exception actual exception that occurred - * @throws IOException Throws IOException if Write fails - */ - private void handleException(BlockOutputStreamEntry streamEntry, - IOException exception) throws IOException { - Throwable t = HddsClientUtils.checkForException(exception); - Preconditions.checkNotNull(t); - boolean retryFailure = checkForRetryFailure(t); - boolean containerExclusionException = false; - if (!retryFailure) { - containerExclusionException = checkIfContainerToExclude(t); - } - Pipeline pipeline = streamEntry.getPipeline(); - PipelineID pipelineId = pipeline.getId(); - long totalSuccessfulFlushedData = streamEntry.getTotalAckDataLength(); - //set the correct length for the current stream - streamEntry.setCurrentPosition(totalSuccessfulFlushedData); - long bufferedDataLen = blockOutputStreamEntryPool.computeBufferData(); - if (containerExclusionException) { - LOG.debug( - "Encountered exception {}. The last committed block length is {}, " - + "uncommitted data length is {} retry count {}", exception, - totalSuccessfulFlushedData, bufferedDataLen, retryCount); - } else { - LOG.warn( - "Encountered exception {} on the pipeline {}. " - + "The last committed block length is {}, " - + "uncommitted data length is {} retry count {}", exception, - pipeline, totalSuccessfulFlushedData, bufferedDataLen, retryCount); - } - Preconditions.checkArgument( - bufferedDataLen <= blockOutputStreamEntryPool.getStreamBufferMaxSize()); - Preconditions.checkArgument( - offset - blockOutputStreamEntryPool.getKeyLength() == bufferedDataLen); - long containerId = streamEntry.getBlockID().getContainerID(); - Collection failedServers = streamEntry.getFailedServers(); - Preconditions.checkNotNull(failedServers); - ExcludeList excludeList = blockOutputStreamEntryPool.getExcludeList(); - if (!failedServers.isEmpty()) { - excludeList.addDatanodes(failedServers); - } - - // if the container needs to be excluded , add the container to the - // exclusion list , otherwise add the pipeline to the exclusion list - if (containerExclusionException) { - excludeList.addConatinerId(ContainerID.valueof(containerId)); - } else { - excludeList.addPipeline(pipelineId); - } - // just clean up the current stream. - streamEntry.cleanup(retryFailure); - - // discard all subsequent blocks the containers and pipelines which - // are in the exclude list so that, the very next retry should never - // write data on the closed container/pipeline - if (containerExclusionException) { - // discard subsequent pre allocated blocks from the streamEntries list - // from the closed container - blockOutputStreamEntryPool - .discardPreallocatedBlocks(streamEntry.getBlockID().getContainerID(), - null); - } else { - // In case there is timeoutException or Watch for commit happening over - // majority or the client connection failure to the leader in the - // pipeline, just discard all the pre allocated blocks on this pipeline. - // Next block allocation will happen with excluding this specific pipeline - // This will ensure if 2 way commit happens , it cannot span over multiple - // blocks - blockOutputStreamEntryPool - .discardPreallocatedBlocks(-1, pipelineId); - } - if (bufferedDataLen > 0) { - // If the data is still cached in the underlying stream, we need to - // allocate new block and write this data in the datanode. - handleRetry(exception, bufferedDataLen); - // reset the retryCount after handling the exception - retryCount = 0; - } - } - - private void markStreamClosed() { - blockOutputStreamEntryPool.cleanup(); - closed = true; - } - - private void handleRetry(IOException exception, long len) throws IOException { - RetryPolicy retryPolicy = retryPolicyMap - .get(HddsClientUtils.checkForException(exception).getClass()); - if (retryPolicy == null) { - retryPolicy = retryPolicyMap.get(Exception.class); - } - RetryPolicy.RetryAction action; - try { - action = retryPolicy.shouldRetry(exception, retryCount, 0, true); - } catch (Exception e) { - throw e instanceof IOException ? (IOException) e : new IOException(e); - } - if (action.action == RetryPolicy.RetryAction.RetryDecision.FAIL) { - String msg = ""; - if (action.reason != null) { - msg = "Retry request failed. " + action.reason; - LOG.error(msg, exception); - } - throw new IOException(msg, exception); - } - - // Throw the exception if the thread is interrupted - if (Thread.currentThread().isInterrupted()) { - LOG.warn("Interrupted while trying for retry"); - throw exception; - } - Preconditions.checkArgument( - action.action == RetryPolicy.RetryAction.RetryDecision.RETRY); - if (action.delayMillis > 0) { - try { - Thread.sleep(action.delayMillis); - } catch (InterruptedException e) { - throw (IOException) new InterruptedIOException( - "Interrupted: action=" + action + ", retry policy=" + retryPolicy) - .initCause(e); - } - } - retryCount++; - LOG.trace("Retrying Write request. Already tried " + retryCount - + " time(s); retry policy is " + retryPolicy); - handleWrite(null, 0, len, true); - } - - /** - * Checks if the provided exception signifies retry failure in ratis client. - * In case of retry failure, ratis client throws RaftRetryFailureException - * and all succeeding operations are failed with AlreadyClosedException. - */ - private boolean checkForRetryFailure(Throwable t) { - return t instanceof RaftRetryFailureException - || t instanceof AlreadyClosedException; - } - - // Every container specific exception from datatnode will be seen as - // StorageContainerException - private boolean checkIfContainerToExclude(Throwable t) { - return t instanceof StorageContainerException; - } - - @Override - public void flush() throws IOException { - checkNotClosed(); - handleFlushOrClose(StreamAction.FLUSH); - } - - /** - * Close or Flush the latest outputStream depending upon the action. - * This function gets called when while write is going on, the current stream - * gets full or explicit flush or close request is made by client. when the - * stream gets full and we try to close the stream , we might end up hitting - * an exception in the exception handling path, we write the data residing in - * in the buffer pool to a new Block. In cases, as such, when the data gets - * written to new stream , it will be at max half full. In such cases, we - * should just write the data and not close the stream as the block won't be - * completely full. - * - * @param op Flag which decides whether to call close or flush on the - * outputStream. - * @throws IOException In case, flush or close fails with exception. - */ - private void handleFlushOrClose(StreamAction op) throws IOException { - if (blockOutputStreamEntryPool.isEmpty()) { - return; - } - while (true) { - try { - BlockOutputStreamEntry entry = - blockOutputStreamEntryPool.getCurrentStreamEntry(); - if (entry != null) { - try { - Collection failedServers = - entry.getFailedServers(); - // failed servers can be null in case there is no data written in - // the stream - if (failedServers != null && !failedServers.isEmpty()) { - blockOutputStreamEntryPool.getExcludeList() - .addDatanodes(failedServers); - } - switch (op) { - case CLOSE: - entry.close(); - break; - case FULL: - if (entry.getRemaining() == 0) { - entry.close(); - } - break; - case FLUSH: - entry.flush(); - break; - default: - throw new IOException("Invalid Operation"); - } - } catch (IOException ioe) { - handleException(entry, ioe); - continue; - } - } - break; - } catch (Exception e) { - markStreamClosed(); - throw e; - } - } - } - - /** - * Commit the key to OM, this will add the blocks as the new key blocks. - * - * @throws IOException - */ - @Override - public void close() throws IOException { - if (closed) { - return; - } - closed = true; - try { - handleFlushOrClose(StreamAction.CLOSE); - blockOutputStreamEntryPool.commitKey(offset); - } catch (IOException ioe) { - throw ioe; - } finally { - blockOutputStreamEntryPool.cleanup(); - } - } - - public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return blockOutputStreamEntryPool.getCommitUploadPartInfo(); - } - - public FileEncryptionInfo getFileEncryptionInfo() { - return feInfo; - } - - @VisibleForTesting - public ExcludeList getExcludeList() { - return blockOutputStreamEntryPool.getExcludeList(); - } - - /** - * Builder class of KeyOutputStream. - */ - public static class Builder { - private OpenKeySession openHandler; - private XceiverClientManager xceiverManager; - private OzoneManagerProtocol omClient; - private int chunkSize; - private String requestID; - private ReplicationType type; - private ReplicationFactor factor; - private long streamBufferFlushSize; - private long streamBufferMaxSize; - private long blockSize; - private long watchTimeout; - private ChecksumType checksumType; - private int bytesPerChecksum; - private String multipartUploadID; - private int multipartNumber; - private boolean isMultipartKey; - private int maxRetryCount; - private long retryInterval; - - public Builder setMultipartUploadID(String uploadID) { - this.multipartUploadID = uploadID; - return this; - } - - public Builder setMultipartNumber(int partNumber) { - this.multipartNumber = partNumber; - return this; - } - - public Builder setHandler(OpenKeySession handler) { - this.openHandler = handler; - return this; - } - - public Builder setXceiverClientManager(XceiverClientManager manager) { - this.xceiverManager = manager; - return this; - } - - public Builder setOmClient(OzoneManagerProtocol client) { - this.omClient = client; - return this; - } - - public Builder setChunkSize(int size) { - this.chunkSize = size; - return this; - } - - public Builder setRequestID(String id) { - this.requestID = id; - return this; - } - - public Builder setType(ReplicationType replicationType) { - this.type = replicationType; - return this; - } - - public Builder setFactor(ReplicationFactor replicationFactor) { - this.factor = replicationFactor; - return this; - } - - public Builder setStreamBufferFlushSize(long size) { - this.streamBufferFlushSize = size; - return this; - } - - public Builder setStreamBufferMaxSize(long size) { - this.streamBufferMaxSize = size; - return this; - } - - public Builder setBlockSize(long size) { - this.blockSize = size; - return this; - } - - public Builder setWatchTimeout(long timeout) { - this.watchTimeout = timeout; - return this; - } - - public Builder setChecksumType(ChecksumType cType) { - this.checksumType = cType; - return this; - } - - public Builder setBytesPerChecksum(int bytes) { - this.bytesPerChecksum = bytes; - return this; - } - - public Builder setIsMultipartKey(boolean isMultipart) { - this.isMultipartKey = isMultipart; - return this; - } - - public Builder setMaxRetryCount(int maxCount) { - this.maxRetryCount = maxCount; - return this; - } - - public Builder setRetryInterval(long retryIntervalInMS) { - this.retryInterval = retryIntervalInMS; - return this; - } - - public KeyOutputStream build() { - return new KeyOutputStream(openHandler, xceiverManager, omClient, - chunkSize, requestID, factor, type, streamBufferFlushSize, - streamBufferMaxSize, blockSize, watchTimeout, checksumType, - bytesPerChecksum, multipartUploadID, multipartNumber, isMultipartKey, - maxRetryCount, retryInterval); - } - } - - /** - * Verify that the output stream is open. Non blocking; this gives - * the last state of the volatile {@link #closed} field. - * @throws IOException if the connection is closed. - */ - private void checkNotClosed() throws IOException { - if (closed) { - throw new IOException( - ": " + FSExceptionMessages.STREAM_IS_CLOSED + " Key: " - + blockOutputStreamEntryPool.getKeyName()); - } - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java deleted file mode 100644 index a69740f0795..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.io; - -import java.io.IOException; -import java.io.InputStream; - -/** - * OzoneInputStream is used to read data from Ozone. - * It uses {@link KeyInputStream} for reading the data. - */ -public class OzoneInputStream extends InputStream { - - private final InputStream inputStream; - - /** - * Constructs OzoneInputStream with KeyInputStream. - * - * @param inputStream - */ - public OzoneInputStream(InputStream inputStream) { - this.inputStream = inputStream; - } - - @Override - public int read() throws IOException { - return inputStream.read(); - } - - @Override - public int read(byte[] b, int off, int len) throws IOException { - return inputStream.read(b, off, len); - } - - @Override - public synchronized void close() throws IOException { - inputStream.close(); - } - - @Override - public int available() throws IOException { - return inputStream.available(); - } - - public InputStream getInputStream() { - return inputStream; - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java deleted file mode 100644 index e4a7d6a100c..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.io; - -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; - -import java.io.IOException; -import java.io.OutputStream; - -/** - * OzoneOutputStream is used to write data into Ozone. - * It uses SCM's {@link KeyOutputStream} for writing the data. - */ -public class OzoneOutputStream extends OutputStream { - - private final OutputStream outputStream; - - /** - * Constructs OzoneOutputStream with KeyOutputStream. - * - * @param outputStream - */ - public OzoneOutputStream(OutputStream outputStream) { - this.outputStream = outputStream; - } - - @Override - public void write(int b) throws IOException { - outputStream.write(b); - } - - @Override - public void write(byte[] b, int off, int len) throws IOException { - outputStream.write(b, off, len); - } - - @Override - public synchronized void flush() throws IOException { - outputStream.flush(); - } - - @Override - public synchronized void close() throws IOException { - //commitKey can be done here, if needed. - outputStream.close(); - } - - public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - if (outputStream instanceof KeyOutputStream) { - return ((KeyOutputStream) outputStream).getCommitUploadPartInfo(); - } - // Otherwise return null. - return null; - } - - public OutputStream getOutputStream() { - return outputStream; - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java deleted file mode 100644 index 493ece8074e..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.io; - -/** - * This package contains Ozone I/O classes. - */ diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java deleted file mode 100644 index 7e2591a20ee..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -/** - * This package contains Ozone Client classes. - */ \ No newline at end of file diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java deleted file mode 100644 index 1b8f5bb6884..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ /dev/null @@ -1,648 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.protocol; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.*; -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; - -import java.io.IOException; -import java.net.URI; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.security.KerberosInfo; -import org.apache.hadoop.security.token.Token; - -/** - * An implementer of this interface is capable of connecting to Ozone Cluster - * and perform client operations. The protocol used for communication is - * determined by the implementation class specified by - * property ozone.client.protocol. The build-in implementation - * includes: {@link org.apache.hadoop.ozone.client.rpc.RpcClient} for RPC and - * {@link org.apache.hadoop.ozone.client.rest.RestClient} for REST. - */ -@KerberosInfo(serverPrincipal = OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY) -public interface ClientProtocol { - - /** - * Creates a new Volume. - * @param volumeName Name of the Volume - * @throws IOException - */ - void createVolume(String volumeName) - throws IOException; - - /** - * Creates a new Volume with properties set in VolumeArgs. - * @param volumeName Name of the Volume - * @param args Properties to be set for the Volume - * @throws IOException - */ - void createVolume(String volumeName, VolumeArgs args) - throws IOException; - - /** - * Sets the owner of volume. - * @param volumeName Name of the Volume - * @param owner to be set for the Volume - * @throws IOException - */ - void setVolumeOwner(String volumeName, String owner) throws IOException; - - /** - * Set Volume Quota. - * @param volumeName Name of the Volume - * @param quota Quota to be set for the Volume - * @throws IOException - */ - void setVolumeQuota(String volumeName, OzoneQuota quota) - throws IOException; - - /** - * Returns {@link OzoneVolume}. - * @param volumeName Name of the Volume - * @return {@link OzoneVolume} - * @throws IOException - * */ - OzoneVolume getVolumeDetails(String volumeName) - throws IOException; - - /** - * Checks if a Volume exists and the user with a role specified has access - * to the Volume. - * @param volumeName Name of the Volume - * @param acl requested acls which needs to be checked for access - * @return Boolean - True if the user with a role can access the volume. - * This is possible for owners of the volume and admin users - * @throws IOException - */ - boolean checkVolumeAccess(String volumeName, OzoneAcl acl) - throws IOException; - - /** - * Deletes an empty Volume. - * @param volumeName Name of the Volume - * @throws IOException - */ - void deleteVolume(String volumeName) throws IOException; - - /** - * Lists all volumes in the cluster that matches the volumePrefix, - * size of the returned list depends on maxListResult. If volume prefix - * is null, returns all the volumes. The caller has to make multiple calls - * to read all volumes. - * - * @param volumePrefix Volume prefix to match - * @param prevVolume Starting point of the list, this volume is excluded - * @param maxListResult Max number of volumes to return. - * @return {@code List} - * @throws IOException - */ - List listVolumes(String volumePrefix, String prevVolume, - int maxListResult) - throws IOException; - - /** - * Lists all volumes in the cluster that are owned by the specified - * user and matches the volumePrefix, size of the returned list depends on - * maxListResult. If the user is null, return volumes owned by current user. - * If volume prefix is null, returns all the volumes. The caller has to make - * multiple calls to read all volumes. - * - * @param user User Name - * @param volumePrefix Volume prefix to match - * @param prevVolume Starting point of the list, this volume is excluded - * @param maxListResult Max number of volumes to return. - * @return {@code List} - * @throws IOException - */ - List listVolumes(String user, String volumePrefix, - String prevVolume, int maxListResult) - throws IOException; - - /** - * Creates a new Bucket in the Volume. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @throws IOException - */ - void createBucket(String volumeName, String bucketName) - throws IOException; - - /** - * Creates a new Bucket in the Volume, with properties set in BucketArgs. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param bucketArgs Bucket Arguments - * @throws IOException - */ - void createBucket(String volumeName, String bucketName, - BucketArgs bucketArgs) - throws IOException; - - - /** - * Enables or disables Bucket Versioning. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param versioning True to enable Versioning, False to disable. - * @throws IOException - */ - void setBucketVersioning(String volumeName, String bucketName, - Boolean versioning) - throws IOException; - - /** - * Sets the Storage Class of a Bucket. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param storageType StorageType to be set - * @throws IOException - */ - void setBucketStorageType(String volumeName, String bucketName, - StorageType storageType) - throws IOException; - - /** - * Deletes a bucket if it is empty. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @throws IOException - */ - void deleteBucket(String volumeName, String bucketName) - throws IOException; - - /** - * True if the bucket exists and user has read access - * to the bucket else throws Exception. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @throws IOException - */ - void checkBucketAccess(String volumeName, String bucketName) - throws IOException; - - /** - * Returns {@link OzoneBucket}. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @return {@link OzoneBucket} - * @throws IOException - */ - OzoneBucket getBucketDetails(String volumeName, String bucketName) - throws IOException; - - /** - * Returns the List of Buckets in the Volume that matches the bucketPrefix, - * size of the returned list depends on maxListResult. The caller has to make - * multiple calls to read all volumes. - * @param volumeName Name of the Volume - * @param bucketPrefix Bucket prefix to match - * @param prevBucket Starting point of the list, this bucket is excluded - * @param maxListResult Max number of buckets to return. - * @return {@code List} - * @throws IOException - */ - List listBuckets(String volumeName, String bucketPrefix, - String prevBucket, int maxListResult) - throws IOException; - - /** - * Writes a key in an existing bucket. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param keyName Name of the Key - * @param size Size of the data - * @param metadata custom key value metadata - * @return {@link OzoneOutputStream} - * - */ - OzoneOutputStream createKey(String volumeName, String bucketName, - String keyName, long size, ReplicationType type, - ReplicationFactor factor, - Map metadata) - throws IOException; - - /** - * Reads a key from an existing bucket. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param keyName Name of the Key - * @return {@link OzoneInputStream} - * @throws IOException - */ - OzoneInputStream getKey(String volumeName, String bucketName, String keyName) - throws IOException; - - - /** - * Deletes an existing key. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param keyName Name of the Key - * @throws IOException - */ - void deleteKey(String volumeName, String bucketName, String keyName) - throws IOException; - - /** - * Renames an existing key within a bucket. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param fromKeyName Name of the Key to be renamed - * @param toKeyName New name to be used for the Key - * @throws IOException - */ - void renameKey(String volumeName, String bucketName, String fromKeyName, - String toKeyName) throws IOException; - - /** - * Returns list of Keys in {Volume/Bucket} that matches the keyPrefix, - * size of the returned list depends on maxListResult. The caller has - * to make multiple calls to read all keys. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param keyPrefix Bucket prefix to match - * @param prevKey Starting point of the list, this key is excluded - * @param maxListResult Max number of buckets to return. - * @return {@code List} - * @throws IOException - */ - List listKeys(String volumeName, String bucketName, - String keyPrefix, String prevKey, int maxListResult) - throws IOException; - - - /** - * Get OzoneKey. - * @param volumeName Name of the Volume - * @param bucketName Name of the Bucket - * @param keyName Key name - * @return {@link OzoneKey} - * @throws IOException - */ - OzoneKeyDetails getKeyDetails(String volumeName, String bucketName, - String keyName) - throws IOException; - - /** - * Creates an S3 bucket inside Ozone manager and creates the mapping needed - * to access via both S3 and Ozone. - * @param userName - S3 user name. - * @param s3BucketName - S3 bucket Name. - * @throws IOException - On failure, throws an exception like Bucket exists. - */ - void createS3Bucket(String userName, String s3BucketName) throws IOException; - - /** - * Deletes an s3 bucket and removes mapping of Ozone volume/bucket. - * @param bucketName - S3 Bucket Name. - * @throws IOException in case the bucket cannot be deleted. - */ - void deleteS3Bucket(String bucketName) throws IOException; - - - /** - * Returns the Ozone Namespace for the S3Bucket. It will return the - * OzoneVolume/OzoneBucketName. - * @param s3BucketName - S3 Bucket Name. - * @return String - The Ozone canonical name for this s3 bucket. This - * string is useful for mounting an OzoneFS. - * @throws IOException - Error is throw if the s3bucket does not exist. - */ - String getOzoneBucketMapping(String s3BucketName) throws IOException; - - /** - * Returns the corresponding Ozone volume given an S3 Bucket. - * @param s3BucketName - S3Bucket Name. - * @return String - Ozone Volume name. - * @throws IOException - Throws if the s3Bucket does not exist. - */ - String getOzoneVolumeName(String s3BucketName) throws IOException; - - /** - * Returns the corresponding Ozone bucket name for the given S3 bucket. - * @param s3BucketName - S3Bucket Name. - * @return String - Ozone bucket Name. - * @throws IOException - Throws if the s3bucket does not exist. - */ - String getOzoneBucketName(String s3BucketName) throws IOException; - - /** - * Returns Iterator to iterate over all buckets after prevBucket for a - * specific user. If prevBucket is null it returns an iterator to iterate over - * all the buckets of a user. The result can be restricted using bucket - * prefix, will return all buckets if bucket prefix is null. - * - * @param userName user name - * @param bucketPrefix Bucket prefix to match - * @param prevBucket Buckets are listed after this bucket - * @return {@code Iterator} - * @throws IOException - */ - List listS3Buckets(String userName, String bucketPrefix, - String prevBucket, int maxListResult) - throws IOException; - - /** - * Close and release the resources. - */ - void close() throws IOException; - - /** - * Initiate Multipart upload. - * @param volumeName - * @param bucketName - * @param keyName - * @param type - * @param factor - * @return {@link OmMultipartInfo} - * @throws IOException - */ - OmMultipartInfo initiateMultipartUpload(String volumeName, String - bucketName, String keyName, ReplicationType type, ReplicationFactor - factor) throws IOException; - - /** - * Create a part key for a multipart upload key. - * @param volumeName - * @param bucketName - * @param keyName - * @param size - * @param partNumber - * @param uploadID - * @return OzoneOutputStream - * @throws IOException - */ - OzoneOutputStream createMultipartKey(String volumeName, String bucketName, - String keyName, long size, - int partNumber, String uploadID) - throws IOException; - - /** - * Complete Multipart upload. This will combine all the parts and make the - * key visible in ozone. - * @param volumeName - * @param bucketName - * @param keyName - * @param uploadID - * @param partsMap - * @return OmMultipartUploadCompleteInfo - * @throws IOException - */ - OmMultipartUploadCompleteInfo completeMultipartUpload(String volumeName, - String bucketName, String keyName, String uploadID, - Map partsMap) throws IOException; - - /** - * Abort Multipart upload request for the given key with given uploadID. - * @param volumeName - * @param bucketName - * @param keyName - * @param uploadID - * @throws IOException - */ - void abortMultipartUpload(String volumeName, - String bucketName, String keyName, String uploadID) throws IOException; - - /** - * Returns list of parts of a multipart upload key. - * @param volumeName - * @param bucketName - * @param keyName - * @param uploadID - * @param partNumberMarker - returns parts with part number which are greater - * than this partNumberMarker. - * @param maxParts - * @return OmMultipartUploadListParts - */ - OzoneMultipartUploadPartListParts listParts(String volumeName, - String bucketName, String keyName, String uploadID, int partNumberMarker, - int maxParts) throws IOException; - - /** - * Return with the inflight multipart uploads. - */ - OzoneMultipartUploadList listMultipartUploads(String volumename, - String bucketName, String prefix) throws IOException; - - /** - * Get a valid Delegation Token. - * - * @param renewer the designated renewer for the token - * @return Token - * @throws IOException - */ - Token getDelegationToken(Text renewer) - throws IOException; - - /** - * Renew an existing delegation token. - * - * @param token delegation token obtained earlier - * @return the new expiration time - * @throws IOException - */ - long renewDelegationToken(Token token) - throws IOException; - - /** - * Cancel an existing delegation token. - * - * @param token delegation token - * @throws IOException - */ - void cancelDelegationToken(Token token) - throws IOException; - - /** - * returns S3 Secret given kerberos user. - * @param kerberosID - * @return S3SecretValue - * @throws IOException - */ - S3SecretValue getS3Secret(String kerberosID) throws IOException; - - @VisibleForTesting - OMFailoverProxyProvider getOMProxyProvider(); - - /** - * Get KMS client provider. - * @return KMS client provider. - * @throws IOException - */ - KeyProvider getKeyProvider() throws IOException; - - /** - * Get KMS client provider uri. - * @return KMS client provider uri. - * @throws IOException - */ - URI getKeyProviderUri() throws IOException; - - /** - * Get CanonicalServiceName for ozone delegation token. - * @return Canonical Service Name of ozone delegation token. - */ - String getCanonicalServiceName(); - - /** - * Get the Ozone File Status for a particular Ozone key. - * - * @param volumeName volume name. - * @param bucketName bucket name. - * @param keyName key name. - * @return OzoneFileStatus for the key. - * @throws OMException if file does not exist - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - OzoneFileStatus getOzoneFileStatus(String volumeName, String bucketName, - String keyName) throws IOException; - - /** - * Creates directory with keyName as the absolute path for the directory. - * - * @param volumeName Volume name - * @param bucketName Bucket name - * @param keyName Absolute path for the directory - * @throws OMException if any entry in the path exists as a file - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - void createDirectory(String volumeName, String bucketName, String keyName) - throws IOException; - - /** - * Creates an input stream for reading file contents. - * - * @param volumeName Volume name - * @param bucketName Bucket name - * @param keyName Absolute path of the file to be read - * @return Input stream for reading the file - * @throws OMException if any entry in the path exists as a file - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - OzoneInputStream readFile(String volumeName, String bucketName, - String keyName) throws IOException; - - /** - * Creates an output stream for writing to a file. - * - * @param volumeName Volume name - * @param bucketName Bucket name - * @param keyName Absolute path of the file to be written - * @param size Size of data to be written - * @param type Replication Type - * @param factor Replication Factor - * @param overWrite if true existing file at the location will be overwritten - * @param recursive if true file would be created even if parent directories - * do not exist - * @return Output stream for writing to the file - * @throws OMException if given key is a directory - * if file exists and isOverwrite flag is false - * if an ancestor exists as a file - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - @SuppressWarnings("checkstyle:parameternumber") - OzoneOutputStream createFile(String volumeName, String bucketName, - String keyName, long size, ReplicationType type, ReplicationFactor factor, - boolean overWrite, boolean recursive) throws IOException; - - /** - * List the status for a file or a directory and its contents. - * - * @param volumeName Volume name - * @param bucketName Bucket name - * @param keyName Absolute path of the entry to be listed - * @param recursive For a directory if true all the descendants of a - * particular directory are listed - * @param startKey Key from which listing needs to start. If startKey exists - * its status is included in the final list. - * @param numEntries Number of entries to list from the start key - * @return list of file status - */ - List listStatus(String volumeName, String bucketName, - String keyName, boolean recursive, String startKey, long numEntries) - throws IOException; - - - /** - * Add acl for Ozone object. Return true if acl is added successfully else - * false. - * @param obj Ozone object for which acl should be added. - * @param acl ozone acl top be added. - * - * @throws IOException if there is error. - * */ - boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException; - - /** - * Remove acl for Ozone object. Return true if acl is removed successfully - * else false. - * @param obj Ozone object. - * @param acl Ozone acl to be removed. - * - * @throws IOException if there is error. - * */ - boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException; - - /** - * Acls to be set for given Ozone object. This operations reset ACL for - * given object to list of ACLs provided in argument. - * @param obj Ozone object. - * @param acls List of acls. - * - * @throws IOException if there is error. - * */ - boolean setAcl(OzoneObj obj, List acls) throws IOException; - - /** - * Returns list of ACLs for given Ozone object. - * @param obj Ozone object. - * - * @throws IOException if there is error. - * */ - List getAcl(OzoneObj obj) throws IOException; - -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java deleted file mode 100644 index f4890a1e8b8..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.protocol; - -/** - * This package contains Ozone client protocol library classes. - */ diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java deleted file mode 100644 index 6be77709d4b..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.CipherSuite; -import org.apache.hadoop.crypto.CryptoCodec; -import org.apache.hadoop.crypto.CryptoProtocolVersion; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; -import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; -import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.KMSUtil; - -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.net.URI; -import java.nio.charset.StandardCharsets; -import java.security.GeneralSecurityException; - -/** - * KMS utility class for Ozone Data Encryption At-Rest. - */ -public final class OzoneKMSUtil { - - private static final String UTF8_CSN = StandardCharsets.UTF_8.name(); - private static final String O3_KMS_PREFIX = "ozone-kms-"; - private static String keyProviderUriKeyName = - "hadoop.security.key.provider.path"; - - private OzoneKMSUtil() { - } - - public static KeyProvider.KeyVersion decryptEncryptedDataEncryptionKey( - FileEncryptionInfo feInfo, KeyProvider keyProvider) throws IOException { - if (keyProvider == null) { - throw new IOException("No KeyProvider is configured, " + - "cannot access an encrypted file"); - } else { - EncryptedKeyVersion ekv = EncryptedKeyVersion.createForDecryption( - feInfo.getKeyName(), feInfo.getEzKeyVersionName(), feInfo.getIV(), - feInfo.getEncryptedDataEncryptionKey()); - - try { - KeyProviderCryptoExtension cryptoProvider = KeyProviderCryptoExtension - .createKeyProviderCryptoExtension(keyProvider); - return cryptoProvider.decryptEncryptedKey(ekv); - } catch (GeneralSecurityException gse) { - throw new IOException(gse); - } - } - } - - /** - * Returns a key to map ozone uri to key provider uri. - * Tasks will lookup this key to find key Provider. - */ - public static Text getKeyProviderMapKey(URI namespaceUri) { - return new Text(O3_KMS_PREFIX + namespaceUri.getScheme() - +"://" + namespaceUri.getAuthority()); - } - - public static String bytes2String(byte[] bytes) { - return bytes2String(bytes, 0, bytes.length); - } - - private static String bytes2String(byte[] bytes, int offset, int length) { - try { - return new String(bytes, offset, length, UTF8_CSN); - } catch (UnsupportedEncodingException e) { - throw new IllegalArgumentException("UTF8 encoding is not supported", e); - } - } - - public static URI getKeyProviderUri(UserGroupInformation ugi, - URI namespaceUri, String kmsUriSrv, Configuration conf) - throws IOException { - URI keyProviderUri = null; - Credentials credentials = ugi.getCredentials(); - Text credsKey = null; - if (namespaceUri != null) { - // from ugi - credsKey = getKeyProviderMapKey(namespaceUri); - byte[] keyProviderUriBytes = credentials.getSecretKey(credsKey); - if (keyProviderUriBytes != null) { - keyProviderUri = URI.create(bytes2String(keyProviderUriBytes)); - } - } - if (keyProviderUri == null) { - // from client conf - if (kmsUriSrv == null) { - keyProviderUri = KMSUtil.getKeyProviderUri( - conf, keyProviderUriKeyName); - } else if (!kmsUriSrv.isEmpty()) { - // from om server - keyProviderUri = URI.create(kmsUriSrv); - } - } - // put back into UGI - if (keyProviderUri != null && credsKey != null) { - credentials.addSecretKey( - credsKey, DFSUtil.string2Bytes(keyProviderUri.toString())); - } - - return keyProviderUri; - } - - public static KeyProvider getKeyProvider(final Configuration conf, - final URI serverProviderUri) throws IOException{ - if (serverProviderUri == null) { - throw new IOException("KMS serverProviderUri is not configured."); - } - return KMSUtil.createKeyProviderFromUri(conf, serverProviderUri); - } - - public static CryptoProtocolVersion getCryptoProtocolVersion( - FileEncryptionInfo feInfo) throws IOException { - CryptoProtocolVersion version = feInfo.getCryptoProtocolVersion(); - if (!CryptoProtocolVersion.supports(version)) { - throw new IOException("Client does not support specified " + - "CryptoProtocolVersion " + version.getDescription() + - " version number" + version.getVersion()); - } else { - return version; - } - } - - public static void checkCryptoProtocolVersion( - FileEncryptionInfo feInfo) throws IOException { - CryptoProtocolVersion version = feInfo.getCryptoProtocolVersion(); - if (!CryptoProtocolVersion.supports(version)) { - throw new IOException("Client does not support specified " + - "CryptoProtocolVersion " + version.getDescription() + - " version number" + version.getVersion()); - } - } - - public static CryptoCodec getCryptoCodec(Configuration conf, - FileEncryptionInfo feInfo) throws IOException { - CipherSuite suite = feInfo.getCipherSuite(); - if (suite.equals(CipherSuite.UNKNOWN)) { - throw new IOException("NameNode specified unknown CipherSuite with ID " + - suite.getUnknownValue() + ", cannot instantiate CryptoCodec."); - } else { - CryptoCodec codec = CryptoCodec.getInstance(conf, suite); - if (codec == null) { - throw new OMException("No configuration found for the cipher suite " + - suite.getConfigSuffix() + " prefixed with " + - "hadoop.security.crypto.codec.classes. Please see the" + - " example configuration hadoop.security.crypto.codec.classes." + - "EXAMPLE CIPHER SUITE at core-default.xml for details.", - OMException.ResultCodes.UNKNOWN_CIPHER_SUITE); - } else { - return codec; - } - } - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java deleted file mode 100644 index 06351ab2c3d..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ /dev/null @@ -1,1177 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.crypto.CryptoInputStream; -import org.apache.hadoop.crypto.CryptoOutputStream; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ChecksumType; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.ozone.client.*; -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.ozone.client.io.KeyInputStream; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.LengthInputStream; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider; -import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; -import org.apache.hadoop.ozone.om.helpers.OmPartInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.protocolPB - .OzoneManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.ozone.security.GDPRSymmetricKey; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.io.Text; -import org.apache.logging.log4j.util.Strings; -import org.apache.ratis.protocol.ClientId; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.crypto.Cipher; -import javax.crypto.CipherInputStream; -import javax.crypto.CipherOutputStream; -import java.io.IOException; -import java.net.URI; -import java.security.InvalidKeyException; -import java.security.SecureRandom; -import java.util.*; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; - -/** - * Ozone RPC Client Implementation, it connects to OM, SCM and DataNode - * to execute client calls. This uses RPC protocol for communication - * with the servers. - */ -public class RpcClient implements ClientProtocol { - - private static final Logger LOG = - LoggerFactory.getLogger(RpcClient.class); - - private final OzoneConfiguration conf; - private final OzoneManagerProtocol ozoneManagerClient; - private final XceiverClientManager xceiverClientManager; - private final int chunkSize; - private final ChecksumType checksumType; - private final int bytesPerChecksum; - private boolean verifyChecksum; - private final UserGroupInformation ugi; - private final ACLType userRights; - private final ACLType groupRights; - private final long streamBufferFlushSize; - private final long streamBufferMaxSize; - private final long blockSize; - private final long watchTimeout; - private final ClientId clientId = ClientId.randomId(); - private final int maxRetryCount; - private final long retryInterval; - private Text dtService; - private final boolean topologyAwareReadEnabled; - - /** - * Creates RpcClient instance with the given configuration. - * @param conf Configuration - * @param omServiceId OM HA Service ID, set this to null if not HA - * @throws IOException - */ - public RpcClient(Configuration conf, String omServiceId) throws IOException { - Preconditions.checkNotNull(conf); - this.conf = new OzoneConfiguration(conf); - this.ugi = UserGroupInformation.getCurrentUser(); - // Get default acl rights for user and group. - OzoneAclConfig aclConfig = this.conf.getObject(OzoneAclConfig.class); - this.userRights = aclConfig.getUserDefaultRights(); - this.groupRights = aclConfig.getGroupDefaultRights(); - - this.ozoneManagerClient = TracingUtil.createProxy( - new OzoneManagerProtocolClientSideTranslatorPB( - this.conf, clientId.toString(), omServiceId, ugi), - OzoneManagerProtocol.class, conf - ); - - ServiceInfoEx serviceInfoEx = ozoneManagerClient.getServiceInfo(); - String caCertPem = null; - if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - caCertPem = serviceInfoEx.getCaCertificate(); - } - - this.xceiverClientManager = new XceiverClientManager(conf, - OzoneConfiguration.of(conf).getObject(XceiverClientManager. - ScmClientConfig.class), caCertPem); - - int configuredChunkSize = (int) conf - .getStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, - ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES); - if(configuredChunkSize > OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) { - LOG.warn("The chunk size ({}) is not allowed to be more than" - + " the maximum size ({})," - + " resetting to the maximum size.", - configuredChunkSize, OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE); - chunkSize = OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE; - } else { - chunkSize = configuredChunkSize; - } - streamBufferFlushSize = (long) conf - .getStorageSize(OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE, - OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE_DEFAULT, - StorageUnit.BYTES); - streamBufferMaxSize = (long) conf - .getStorageSize(OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE, - OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE_DEFAULT, - StorageUnit.BYTES); - blockSize = (long) conf.getStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, - OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); - watchTimeout = - conf.getTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, - OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - - int configuredChecksumSize = (int) conf.getStorageSize( - OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM, - OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT, - StorageUnit.BYTES); - if(configuredChecksumSize < - OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE) { - LOG.warn("The checksum size ({}) is not allowed to be less than the " + - "minimum size ({}), resetting to the minimum size.", - configuredChecksumSize, - OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE); - bytesPerChecksum = - OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE; - } else { - bytesPerChecksum = configuredChecksumSize; - } - - String checksumTypeStr = conf.get( - OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, - OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT); - checksumType = ChecksumType.valueOf(checksumTypeStr); - this.verifyChecksum = - conf.getBoolean(OzoneConfigKeys.OZONE_CLIENT_VERIFY_CHECKSUM, - OzoneConfigKeys.OZONE_CLIENT_VERIFY_CHECKSUM_DEFAULT); - maxRetryCount = - conf.getInt(OzoneConfigKeys.OZONE_CLIENT_MAX_RETRIES, OzoneConfigKeys. - OZONE_CLIENT_MAX_RETRIES_DEFAULT); - retryInterval = OzoneUtils.getTimeDurationInMS(conf, - OzoneConfigKeys.OZONE_CLIENT_RETRY_INTERVAL, - OzoneConfigKeys.OZONE_CLIENT_RETRY_INTERVAL_DEFAULT); - dtService = getOMProxyProvider().getCurrentProxyDelegationToken(); - topologyAwareReadEnabled = conf.getBoolean( - OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, - OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_DEFAULT); - } - - @Override - public void createVolume(String volumeName) throws IOException { - createVolume(volumeName, VolumeArgs.newBuilder().build()); - } - - @Override - public void createVolume(String volumeName, VolumeArgs volArgs) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName); - Preconditions.checkNotNull(volArgs); - - String admin = volArgs.getAdmin() == null ? - ugi.getUserName() : volArgs.getAdmin(); - String owner = volArgs.getOwner() == null ? - ugi.getUserName() : volArgs.getOwner(); - long quota = volArgs.getQuota() == null ? - OzoneConsts.MAX_QUOTA_IN_BYTES : - OzoneQuota.parseQuota(volArgs.getQuota()).sizeInBytes(); - List listOfAcls = new ArrayList<>(); - //User ACL - listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, - owner, userRights, ACCESS)); - //Group ACLs of the User - List userGroups = Arrays.asList(UserGroupInformation - .createRemoteUser(owner).getGroupNames()); - userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(ACLIdentityType.GROUP, group, groupRights, ACCESS))); - //ACLs from VolumeArgs - if(volArgs.getAcls() != null) { - listOfAcls.addAll(volArgs.getAcls()); - } - - OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder(); - builder.setVolume(volumeName); - builder.setAdminName(admin); - builder.setOwnerName(owner); - builder.setQuotaInBytes(quota); - builder.addAllMetadata(volArgs.getMetadata()); - - //Remove duplicates and add ACLs - for (OzoneAcl ozoneAcl : - listOfAcls.stream().distinct().collect(Collectors.toList())) { - builder.addOzoneAcls(OzoneAcl.toProtobuf(ozoneAcl)); - } - - if (volArgs.getQuota() == null) { - LOG.info("Creating Volume: {}, with {} as owner.", volumeName, owner); - } else { - LOG.info("Creating Volume: {}, with {} as owner " - + "and quota set to {} bytes.", volumeName, owner, quota); - } - ozoneManagerClient.createVolume(builder.build()); - } - - @Override - public void setVolumeOwner(String volumeName, String owner) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName); - Preconditions.checkNotNull(owner); - ozoneManagerClient.setOwner(volumeName, owner); - } - - @Override - public void setVolumeQuota(String volumeName, OzoneQuota quota) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName); - Preconditions.checkNotNull(quota); - long quotaInBytes = quota.sizeInBytes(); - ozoneManagerClient.setQuota(volumeName, quotaInBytes); - } - - @Override - public OzoneVolume getVolumeDetails(String volumeName) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName); - OmVolumeArgs volume = ozoneManagerClient.getVolumeInfo(volumeName); - return new OzoneVolume( - conf, - this, - volume.getVolume(), - volume.getAdminName(), - volume.getOwnerName(), - volume.getQuotaInBytes(), - volume.getCreationTime(), - volume.getAclMap().ozoneAclGetProtobuf().stream(). - map(OzoneAcl::fromProtobuf).collect(Collectors.toList()), - volume.getMetadata()); - } - - @Override - public boolean checkVolumeAccess(String volumeName, OzoneAcl acl) - throws IOException { - throw new UnsupportedOperationException("Not yet implemented."); - } - - @Override - public void deleteVolume(String volumeName) throws IOException { - HddsClientUtils.verifyResourceName(volumeName); - ozoneManagerClient.deleteVolume(volumeName); - } - - @Override - public List listVolumes(String volumePrefix, String prevVolume, - int maxListResult) - throws IOException { - List volumes = ozoneManagerClient.listAllVolumes( - volumePrefix, prevVolume, maxListResult); - - return volumes.stream().map(volume -> new OzoneVolume( - conf, - this, - volume.getVolume(), - volume.getAdminName(), - volume.getOwnerName(), - volume.getQuotaInBytes(), - volume.getCreationTime(), - volume.getAclMap().ozoneAclGetProtobuf().stream(). - map(OzoneAcl::fromProtobuf).collect(Collectors.toList()))) - .collect(Collectors.toList()); - } - - @Override - public List listVolumes(String user, String volumePrefix, - String prevVolume, int maxListResult) - throws IOException { - List volumes = ozoneManagerClient.listVolumeByUser( - user, volumePrefix, prevVolume, maxListResult); - - return volumes.stream().map(volume -> new OzoneVolume( - conf, - this, - volume.getVolume(), - volume.getAdminName(), - volume.getOwnerName(), - volume.getQuotaInBytes(), - volume.getCreationTime(), - volume.getAclMap().ozoneAclGetProtobuf().stream(). - map(OzoneAcl::fromProtobuf).collect(Collectors.toList()), - volume.getMetadata())) - .collect(Collectors.toList()); - } - - @Override - public void createBucket(String volumeName, String bucketName) - throws IOException { - // Set acls of current user. - createBucket(volumeName, bucketName, - BucketArgs.newBuilder().build()); - } - - @Override - public void createBucket( - String volumeName, String bucketName, BucketArgs bucketArgs) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(bucketArgs); - - Boolean isVersionEnabled = bucketArgs.getVersioning() == null ? - Boolean.FALSE : bucketArgs.getVersioning(); - StorageType storageType = bucketArgs.getStorageType() == null ? - StorageType.DEFAULT : bucketArgs.getStorageType(); - BucketEncryptionKeyInfo bek = null; - if (bucketArgs.getEncryptionKey() != null) { - bek = new BucketEncryptionKeyInfo.Builder() - .setKeyName(bucketArgs.getEncryptionKey()).build(); - } - - List listOfAcls = getAclList(); - //ACLs from BucketArgs - if(bucketArgs.getAcls() != null) { - listOfAcls.addAll(bucketArgs.getAcls()); - } - - OmBucketInfo.Builder builder = OmBucketInfo.newBuilder(); - builder.setVolumeName(volumeName) - .setBucketName(bucketName) - .setIsVersionEnabled(isVersionEnabled) - .addAllMetadata(bucketArgs.getMetadata()) - .setStorageType(storageType) - .setAcls(listOfAcls.stream().distinct().collect(Collectors.toList())); - - if (bek != null) { - builder.setBucketEncryptionKey(bek); - } - - LOG.info("Creating Bucket: {}/{}, with Versioning {} and " + - "Storage Type set to {} and Encryption set to {} ", - volumeName, bucketName, isVersionEnabled, storageType, bek != null); - ozoneManagerClient.createBucket(builder.build()); - } - - /** - * Helper function to get default acl list for current user. - * - * @return listOfAcls - * */ - private List getAclList() { - return OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(), - userRights, groupRights); - } - - /** - * Get a valid Delegation Token. - * - * @param renewer the designated renewer for the token - * @return Token - * @throws IOException - */ - @Override - public Token getDelegationToken(Text renewer) - throws IOException { - - Token token = - ozoneManagerClient.getDelegationToken(renewer); - if (token != null) { - token.setService(dtService); - if (LOG.isDebugEnabled()) { - LOG.debug("Created token {} for dtService {}", token, dtService); - } - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("Cannot get ozone delegation token for renewer {} to " + - "access service {}", renewer, dtService); - } - } - return token; - } - - /** - * Renew an existing delegation token. - * - * @param token delegation token obtained earlier - * @return the new expiration time - * @throws IOException - */ - @Override - public long renewDelegationToken(Token token) - throws IOException { - return ozoneManagerClient.renewDelegationToken(token); - } - - /** - * Cancel an existing delegation token. - * - * @param token delegation token - * @throws IOException - */ - @Override - public void cancelDelegationToken(Token token) - throws IOException { - ozoneManagerClient.cancelDelegationToken(token); - } - - /** - * Returns s3 secret given a kerberos user. - * @param kerberosID - * @return S3SecretValue - * @throws IOException - */ - @Override - public S3SecretValue getS3Secret(String kerberosID) throws IOException { - Preconditions.checkArgument(Strings.isNotBlank(kerberosID), - "kerberosID cannot be null or empty."); - - return ozoneManagerClient.getS3Secret(kerberosID); - } - - @Override - @VisibleForTesting - public OMFailoverProxyProvider getOMProxyProvider() { - return ozoneManagerClient.getOMFailoverProxyProvider(); - } - - @Override - public void setBucketVersioning( - String volumeName, String bucketName, Boolean versioning) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(versioning); - OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); - builder.setVolumeName(volumeName) - .setBucketName(bucketName) - .setIsVersionEnabled(versioning); - ozoneManagerClient.setBucketProperty(builder.build()); - } - - @Override - public void setBucketStorageType( - String volumeName, String bucketName, StorageType storageType) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(storageType); - OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); - builder.setVolumeName(volumeName) - .setBucketName(bucketName) - .setStorageType(storageType); - ozoneManagerClient.setBucketProperty(builder.build()); - } - - @Override - public void deleteBucket( - String volumeName, String bucketName) throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - ozoneManagerClient.deleteBucket(volumeName, bucketName); - } - - @Override - public void checkBucketAccess( - String volumeName, String bucketName) throws IOException { - - } - - @Override - public OzoneBucket getBucketDetails( - String volumeName, String bucketName) throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - OmBucketInfo bucketInfo = - ozoneManagerClient.getBucketInfo(volumeName, bucketName); - return new OzoneBucket( - conf, - this, - bucketInfo.getVolumeName(), - bucketInfo.getBucketName(), - bucketInfo.getStorageType(), - bucketInfo.getIsVersionEnabled(), - bucketInfo.getCreationTime(), - bucketInfo.getMetadata(), - bucketInfo.getEncryptionKeyInfo() != null ? bucketInfo - .getEncryptionKeyInfo().getKeyName() : null); - } - - @Override - public List listBuckets(String volumeName, String bucketPrefix, - String prevBucket, int maxListResult) - throws IOException { - List buckets = ozoneManagerClient.listBuckets( - volumeName, prevBucket, bucketPrefix, maxListResult); - - return buckets.stream().map(bucket -> new OzoneBucket( - conf, - this, - bucket.getVolumeName(), - bucket.getBucketName(), - bucket.getStorageType(), - bucket.getIsVersionEnabled(), - bucket.getCreationTime(), - bucket.getMetadata(), - bucket.getEncryptionKeyInfo() != null ? bucket - .getEncryptionKeyInfo().getKeyName() : null)) - .collect(Collectors.toList()); - } - - @Override - public OzoneOutputStream createKey( - String volumeName, String bucketName, String keyName, long size, - ReplicationType type, ReplicationFactor factor, - Map metadata) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - HddsClientUtils.checkNotNull(keyName, type, factor); - String requestId = UUID.randomUUID().toString(); - - if(Boolean.valueOf(metadata.get(OzoneConsts.GDPR_FLAG))){ - try{ - GDPRSymmetricKey gKey = new GDPRSymmetricKey(new SecureRandom()); - metadata.putAll(gKey.getKeyDetails()); - }catch (Exception e) { - if(e instanceof InvalidKeyException && - e.getMessage().contains("Illegal key size or default parameters")) { - LOG.error("Missing Unlimited Strength Policy jars. Please install " + - "Java Cryptography Extension (JCE) Unlimited Strength " + - "Jurisdiction Policy Files"); - } - throw new IOException(e); - } - } - - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setDataSize(size) - .setType(HddsProtos.ReplicationType.valueOf(type.toString())) - .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) - .addAllMetadata(metadata) - .setAcls(getAclList()) - .build(); - - OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); - return createOutputStream(openKey, requestId, type, factor); - } - - private KeyProvider.KeyVersion getDEK(FileEncryptionInfo feInfo) - throws IOException { - // check crypto protocol version - OzoneKMSUtil.checkCryptoProtocolVersion(feInfo); - KeyProvider.KeyVersion decrypted; - decrypted = OzoneKMSUtil.decryptEncryptedDataEncryptionKey(feInfo, - getKeyProvider()); - return decrypted; - } - - @Override - public OzoneInputStream getKey( - String volumeName, String bucketName, String keyName) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(keyName); - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setRefreshPipeline(true) - .setSortDatanodesInPipeline(topologyAwareReadEnabled) - .build(); - OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); - return createInputStream(keyInfo); - } - - @Override - public void deleteKey( - String volumeName, String bucketName, String keyName) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - Preconditions.checkNotNull(keyName); - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .build(); - ozoneManagerClient.deleteKey(keyArgs); - } - - @Override - public void renameKey(String volumeName, String bucketName, - String fromKeyName, String toKeyName) throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - HddsClientUtils.checkNotNull(fromKeyName, toKeyName); - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(fromKeyName) - .build(); - ozoneManagerClient.renameKey(keyArgs, toKeyName); - } - - @Override - public List listKeys(String volumeName, String bucketName, - String keyPrefix, String prevKey, - int maxListResult) - throws IOException { - List keys = ozoneManagerClient.listKeys( - volumeName, bucketName, prevKey, keyPrefix, maxListResult); - - return keys.stream().map(key -> new OzoneKey( - key.getVolumeName(), - key.getBucketName(), - key.getKeyName(), - key.getDataSize(), - key.getCreationTime(), - key.getModificationTime(), - ReplicationType.valueOf(key.getType().toString()), - key.getFactor().getNumber())) - .collect(Collectors.toList()); - } - - @Override - public OzoneKeyDetails getKeyDetails( - String volumeName, String bucketName, String keyName) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkNotNull(keyName); - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setRefreshPipeline(true) - .setSortDatanodesInPipeline(topologyAwareReadEnabled) - .build(); - OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); - - List ozoneKeyLocations = new ArrayList<>(); - keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().forEach( - (a) -> ozoneKeyLocations.add(new OzoneKeyLocation(a.getContainerID(), - a.getLocalID(), a.getLength(), a.getOffset()))); - return new OzoneKeyDetails(keyInfo.getVolumeName(), keyInfo.getBucketName(), - keyInfo.getKeyName(), keyInfo.getDataSize(), keyInfo.getCreationTime(), - keyInfo.getModificationTime(), ozoneKeyLocations, ReplicationType - .valueOf(keyInfo.getType().toString()), keyInfo.getMetadata(), - keyInfo.getFileEncryptionInfo(), keyInfo.getFactor().getNumber()); - } - - @Override - public void createS3Bucket(String userName, String s3BucketName) - throws IOException { - Preconditions.checkArgument(Strings.isNotBlank(userName), "user name " + - "cannot be null or empty."); - - Preconditions.checkArgument(Strings.isNotBlank(s3BucketName), "bucket " + - "name cannot be null or empty."); - ozoneManagerClient.createS3Bucket(userName, s3BucketName); - } - - @Override - public void deleteS3Bucket(String s3BucketName) - throws IOException { - Preconditions.checkArgument(Strings.isNotBlank(s3BucketName), "bucket " + - "name cannot be null or empty."); - ozoneManagerClient.deleteS3Bucket(s3BucketName); - } - - @Override - public String getOzoneBucketMapping(String s3BucketName) throws IOException { - Preconditions.checkArgument(Strings.isNotBlank(s3BucketName), "bucket " + - "name cannot be null or empty."); - return ozoneManagerClient.getOzoneBucketMapping(s3BucketName); - } - - @Override - @SuppressWarnings("StringSplitter") - public String getOzoneVolumeName(String s3BucketName) throws IOException { - String mapping = getOzoneBucketMapping(s3BucketName); - return mapping.split("/")[0]; - - } - - @Override - @SuppressWarnings("StringSplitter") - public String getOzoneBucketName(String s3BucketName) throws IOException { - String mapping = getOzoneBucketMapping(s3BucketName); - return mapping.split("/")[1]; - } - - @Override - public List listS3Buckets(String userName, String bucketPrefix, - String prevBucket, int maxListResult) - throws IOException { - List buckets = ozoneManagerClient.listS3Buckets( - userName, prevBucket, bucketPrefix, maxListResult); - - return buckets.stream().map(bucket -> new OzoneBucket( - conf, - this, - bucket.getVolumeName(), - bucket.getBucketName(), - bucket.getStorageType(), - bucket.getIsVersionEnabled(), - bucket.getCreationTime(), - bucket.getMetadata(), - bucket.getEncryptionKeyInfo() != null ? - bucket.getEncryptionKeyInfo().getKeyName(): null)) - .collect(Collectors.toList()); - } - - @Override - public void close() throws IOException { - IOUtils.cleanupWithLogger(LOG, ozoneManagerClient); - IOUtils.cleanupWithLogger(LOG, xceiverClientManager); - } - - @Override - public OmMultipartInfo initiateMultipartUpload(String volumeName, - String bucketName, - String keyName, - ReplicationType type, - ReplicationFactor factor) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - HddsClientUtils.checkNotNull(keyName, type, factor); - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setType(HddsProtos.ReplicationType.valueOf(type.toString())) - .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) - .setAcls(getAclList()) - .build(); - OmMultipartInfo multipartInfo = ozoneManagerClient - .initiateMultipartUpload(keyArgs); - return multipartInfo; - } - - @Override - public OzoneOutputStream createMultipartKey(String volumeName, - String bucketName, - String keyName, - long size, - int partNumber, - String uploadID) - throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - HddsClientUtils.checkNotNull(keyName, uploadID); - Preconditions.checkArgument(partNumber > 0 && partNumber <=10000, "Part " + - "number should be greater than zero and less than or equal to 10000"); - Preconditions.checkArgument(size >=0, "size should be greater than or " + - "equal to zero"); - String requestId = UUID.randomUUID().toString(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setDataSize(size) - .setIsMultipartKey(true) - .setMultipartUploadID(uploadID) - .setMultipartUploadPartNumber(partNumber) - .setAcls(getAclList()) - .build(); - - OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs); - KeyOutputStream keyOutputStream = - new KeyOutputStream.Builder() - .setHandler(openKey) - .setXceiverClientManager(xceiverClientManager) - .setOmClient(ozoneManagerClient) - .setChunkSize(chunkSize) - .setRequestID(requestId) - .setType(openKey.getKeyInfo().getType()) - .setFactor(openKey.getKeyInfo().getFactor()) - .setStreamBufferFlushSize(streamBufferFlushSize) - .setStreamBufferMaxSize(streamBufferMaxSize) - .setWatchTimeout(watchTimeout) - .setBlockSize(blockSize) - .setBytesPerChecksum(bytesPerChecksum) - .setChecksumType(checksumType) - .setMultipartNumber(partNumber) - .setMultipartUploadID(uploadID) - .setIsMultipartKey(true) - .setMaxRetryCount(maxRetryCount) - .setRetryInterval(retryInterval) - .build(); - keyOutputStream.addPreallocateBlocks( - openKey.getKeyInfo().getLatestVersionLocations(), - openKey.getOpenVersion()); - return new OzoneOutputStream(keyOutputStream); - } - - @Override - public OmMultipartUploadCompleteInfo completeMultipartUpload( - String volumeName, String bucketName, String keyName, String uploadID, - Map partsMap) throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - HddsClientUtils.checkNotNull(keyName, uploadID); - - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setMultipartUploadID(uploadID) - .setAcls(getAclList()) - .build(); - - OmMultipartUploadCompleteList - omMultipartUploadCompleteList = new OmMultipartUploadCompleteList( - partsMap); - - OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = - ozoneManagerClient.completeMultipartUpload(keyArgs, - omMultipartUploadCompleteList); - - return omMultipartUploadCompleteInfo; - - } - - @Override - public void abortMultipartUpload(String volumeName, - String bucketName, String keyName, String uploadID) throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - HddsClientUtils.checkNotNull(keyName, uploadID); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setMultipartUploadID(uploadID) - .build(); - ozoneManagerClient.abortMultipartUpload(omKeyArgs); - } - - @Override - public OzoneMultipartUploadPartListParts listParts(String volumeName, - String bucketName, String keyName, String uploadID, int partNumberMarker, - int maxParts) throws IOException { - HddsClientUtils.verifyResourceName(volumeName, bucketName); - HddsClientUtils.checkNotNull(uploadID); - Preconditions.checkArgument(maxParts > 0, "Max Parts Should be greater " + - "than zero"); - Preconditions.checkArgument(partNumberMarker >= 0, "Part Number Marker " + - "Should be greater than or equal to zero, as part numbers starts from" + - " 1 and ranges till 10000"); - OmMultipartUploadListParts omMultipartUploadListParts = - ozoneManagerClient.listParts(volumeName, bucketName, keyName, - uploadID, partNumberMarker, maxParts); - - OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = - new OzoneMultipartUploadPartListParts(ReplicationType - .fromProto(omMultipartUploadListParts.getReplicationType()), - ReplicationFactor - .fromProto(omMultipartUploadListParts.getReplicationFactor()), - omMultipartUploadListParts.getNextPartNumberMarker(), - omMultipartUploadListParts.isTruncated()); - - for (OmPartInfo omPartInfo : omMultipartUploadListParts.getPartInfoList()) { - ozoneMultipartUploadPartListParts.addPart( - new OzoneMultipartUploadPartListParts.PartInfo( - omPartInfo.getPartNumber(), omPartInfo.getPartName(), - omPartInfo.getModificationTime(), omPartInfo.getSize())); - } - return ozoneMultipartUploadPartListParts; - - } - - @Override - public OzoneMultipartUploadList listMultipartUploads(String volumeName, - String bucketName, String prefix) throws IOException { - - OmMultipartUploadList omMultipartUploadList = - ozoneManagerClient.listMultipartUploads(volumeName, bucketName, prefix); - List uploads = omMultipartUploadList.getUploads() - .stream() - .map(upload -> new OzoneMultipartUpload(upload.getVolumeName(), - upload.getBucketName(), - upload.getKeyName(), - upload.getUploadId(), - upload.getCreationTime(), - ReplicationType.fromProto(upload.getReplicationType()), - ReplicationFactor.fromProto(upload.getReplicationFactor()))) - .collect(Collectors.toList()); - OzoneMultipartUploadList result = new OzoneMultipartUploadList(uploads); - return result; - } - - @Override - public OzoneFileStatus getOzoneFileStatus(String volumeName, - String bucketName, String keyName) throws IOException { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .build(); - return ozoneManagerClient.getFileStatus(keyArgs); - } - - @Override - public void createDirectory(String volumeName, String bucketName, - String keyName) throws IOException { - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setAcls(getAclList()) - .build(); - ozoneManagerClient.createDirectory(keyArgs); - } - - @Override - public OzoneInputStream readFile(String volumeName, String bucketName, - String keyName) throws IOException { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setSortDatanodesInPipeline(topologyAwareReadEnabled) - .build(); - OmKeyInfo keyInfo = ozoneManagerClient.lookupFile(keyArgs); - return createInputStream(keyInfo); - } - - @Override - public OzoneOutputStream createFile(String volumeName, String bucketName, - String keyName, long size, ReplicationType type, ReplicationFactor factor, - boolean overWrite, boolean recursive) throws IOException { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setDataSize(size) - .setType(HddsProtos.ReplicationType.valueOf(type.name())) - .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) - .setAcls(getAclList()) - .build(); - OpenKeySession keySession = - ozoneManagerClient.createFile(keyArgs, overWrite, recursive); - return createOutputStream(keySession, UUID.randomUUID().toString(), type, - factor); - } - - @Override - public List listStatus(String volumeName, String bucketName, - String keyName, boolean recursive, String startKey, long numEntries) - throws IOException { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .build(); - return ozoneManagerClient - .listStatus(keyArgs, recursive, startKey, numEntries); - } - - /** - * Add acl for Ozone object. Return true if acl is added successfully else - * false. - * - * @param obj Ozone object for which acl should be added. - * @param acl ozone acl top be added. - * @throws IOException if there is error. - */ - @Override - public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - return ozoneManagerClient.addAcl(obj, acl); - } - - /** - * Remove acl for Ozone object. Return true if acl is removed successfully - * else false. - * - * @param obj Ozone object. - * @param acl Ozone acl to be removed. - * @throws IOException if there is error. - */ - @Override - public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - return ozoneManagerClient.removeAcl(obj, acl); - } - - /** - * Acls to be set for given Ozone object. This operations reset ACL for given - * object to list of ACLs provided in argument. - * - * @param obj Ozone object. - * @param acls List of acls. - * @throws IOException if there is error. - */ - @Override - public boolean setAcl(OzoneObj obj, List acls) throws IOException { - return ozoneManagerClient.setAcl(obj, acls); - } - - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @throws IOException if there is error. - */ - @Override - public List getAcl(OzoneObj obj) throws IOException { - return ozoneManagerClient.getAcl(obj); - } - - private OzoneInputStream createInputStream(OmKeyInfo keyInfo) - throws IOException { - LengthInputStream lengthInputStream = KeyInputStream - .getFromOmKeyInfo(keyInfo, xceiverClientManager, - verifyChecksum); - FileEncryptionInfo feInfo = keyInfo.getFileEncryptionInfo(); - if (feInfo != null) { - final KeyProvider.KeyVersion decrypted = getDEK(feInfo); - final CryptoInputStream cryptoIn = - new CryptoInputStream(lengthInputStream.getWrappedStream(), - OzoneKMSUtil.getCryptoCodec(conf, feInfo), - decrypted.getMaterial(), feInfo.getIV()); - return new OzoneInputStream(cryptoIn); - } else { - try{ - GDPRSymmetricKey gk; - Map keyInfoMetadata = keyInfo.getMetadata(); - if(Boolean.valueOf(keyInfoMetadata.get(OzoneConsts.GDPR_FLAG))){ - gk = new GDPRSymmetricKey( - keyInfoMetadata.get(OzoneConsts.GDPR_SECRET), - keyInfoMetadata.get(OzoneConsts.GDPR_ALGORITHM) - ); - gk.getCipher().init(Cipher.DECRYPT_MODE, gk.getSecretKey()); - return new OzoneInputStream( - new CipherInputStream(lengthInputStream, gk.getCipher())); - } - }catch (Exception ex){ - throw new IOException(ex); - } - } - return new OzoneInputStream(lengthInputStream.getWrappedStream()); - } - - private OzoneOutputStream createOutputStream(OpenKeySession openKey, - String requestId, ReplicationType type, ReplicationFactor factor) - throws IOException { - KeyOutputStream keyOutputStream = - new KeyOutputStream.Builder() - .setHandler(openKey) - .setXceiverClientManager(xceiverClientManager) - .setOmClient(ozoneManagerClient) - .setChunkSize(chunkSize) - .setRequestID(requestId) - .setType(HddsProtos.ReplicationType.valueOf(type.toString())) - .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) - .setStreamBufferFlushSize(streamBufferFlushSize) - .setStreamBufferMaxSize(streamBufferMaxSize) - .setWatchTimeout(watchTimeout) - .setBlockSize(blockSize) - .setChecksumType(checksumType) - .setBytesPerChecksum(bytesPerChecksum) - .setMaxRetryCount(maxRetryCount) - .setRetryInterval(retryInterval) - .build(); - keyOutputStream - .addPreallocateBlocks(openKey.getKeyInfo().getLatestVersionLocations(), - openKey.getOpenVersion()); - final FileEncryptionInfo feInfo = keyOutputStream.getFileEncryptionInfo(); - if (feInfo != null) { - KeyProvider.KeyVersion decrypted = getDEK(feInfo); - final CryptoOutputStream cryptoOut = - new CryptoOutputStream(keyOutputStream, - OzoneKMSUtil.getCryptoCodec(conf, feInfo), - decrypted.getMaterial(), feInfo.getIV()); - return new OzoneOutputStream(cryptoOut); - } else { - try{ - GDPRSymmetricKey gk; - Map openKeyMetadata = - openKey.getKeyInfo().getMetadata(); - if(Boolean.valueOf(openKeyMetadata.get(OzoneConsts.GDPR_FLAG))){ - gk = new GDPRSymmetricKey( - openKeyMetadata.get(OzoneConsts.GDPR_SECRET), - openKeyMetadata.get(OzoneConsts.GDPR_ALGORITHM) - ); - gk.getCipher().init(Cipher.ENCRYPT_MODE, gk.getSecretKey()); - return new OzoneOutputStream( - new CipherOutputStream(keyOutputStream, gk.getCipher())); - } - }catch (Exception ex){ - throw new IOException(ex); - } - - return new OzoneOutputStream(keyOutputStream); - } - } - - @Override - public KeyProvider getKeyProvider() throws IOException { - return OzoneKMSUtil.getKeyProvider(conf, getKeyProviderUri()); - } - - @Override - public URI getKeyProviderUri() throws IOException { - // TODO: fix me to support kms instances for difference OMs - return OzoneKMSUtil.getKeyProviderUri(ugi, - null, null, conf); - } - - @Override - public String getCanonicalServiceName() { - return (dtService != null) ? dtService.toString() : null; - } -} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/package-info.java deleted file mode 100644 index 0fcc3fc3583..00000000000 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -/** - * This package contains Ozone rpc client library classes. - */ diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java deleted file mode 100644 index ff4aeb32f04..00000000000 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java +++ /dev/null @@ -1,220 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.Timeout; - -import java.net.InetSocketAddress; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; - -/** - * This test class verifies the parsing of SCM endpoint config settings. The - * parsing logic is in - * {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}. - */ -public class TestHddsClientUtils { - @Rule - public Timeout timeout = new Timeout(300000); - - @Rule - public ExpectedException thrown= ExpectedException.none(); - - /** - * Verify client endpoint lookup failure if it is not configured. - */ - @Test - public void testMissingScmClientAddress() { - final Configuration conf = new OzoneConfiguration(); - thrown.expect(IllegalArgumentException.class); - HddsUtils.getScmAddressForClients(conf); - } - - /** - * Verify that the client endpoint can be correctly parsed from - * configuration. - */ - @Test - public void testGetScmClientAddress() { - final Configuration conf = new OzoneConfiguration(); - - // First try a client address with just a host name. Verify it falls - // back to the default port. - conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4"); - InetSocketAddress addr = HddsUtils.getScmAddressForClients(conf); - assertThat(addr.getHostString(), is("1.2.3.4")); - assertThat(addr.getPort(), is(OZONE_SCM_CLIENT_PORT_DEFAULT)); - - // Next try a client address with a host name and port. Verify both - // are used correctly. - conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - addr = HddsUtils.getScmAddressForClients(conf); - assertThat(addr.getHostString(), is("1.2.3.4")); - assertThat(addr.getPort(), is(100)); - } - - @Test - public void testgetOmSocketAddress() { - final Configuration conf = new OzoneConfiguration(); - - // First try a client address with just a host name. Verify it falls - // back to the default port. - conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "1.2.3.4"); - InetSocketAddress addr = OmUtils.getOmAddress(conf); - assertThat(addr.getHostString(), is("1.2.3.4")); - assertThat(addr.getPort(), is(OMConfigKeys.OZONE_OM_PORT_DEFAULT)); - - // Next try a client address with just a host name and port. Verify the port - // is ignored and the default OM port is used. - conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "1.2.3.4:100"); - addr = OmUtils.getOmAddress(conf); - assertThat(addr.getHostString(), is("1.2.3.4")); - assertThat(addr.getPort(), is(100)); - - // Assert the we are able to use default configs if no value is specified. - conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, ""); - addr = OmUtils.getOmAddress(conf); - assertThat(addr.getHostString(), is("0.0.0.0")); - assertThat(addr.getPort(), is(OMConfigKeys.OZONE_OM_PORT_DEFAULT)); - } - - @Test - public void testBlockClientFallbackToClientNoPort() { - // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY is undefined it should - // fallback to OZONE_SCM_CLIENT_ADDRESS_KEY. - final String scmHost = "host123"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost); - final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients( - conf); - assertEquals(scmHost, address.getHostName()); - assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort()); - } - - @Test - @SuppressWarnings("StringSplitter") - public void testBlockClientFallbackToClientWithPort() { - // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY is undefined it should - // fallback to OZONE_SCM_CLIENT_ADDRESS_KEY. - // - // Verify that the OZONE_SCM_CLIENT_ADDRESS_KEY port number is ignored, - // if present. Instead we should use OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT. - final String scmHost = "host123:100"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost); - final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients( - conf); - assertEquals(scmHost.split(":")[0], address.getHostName()); - assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort()); - } - - @Test - public void testBlockClientFallbackToScmNamesNoPort() { - // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY - // are undefined it should fallback to OZONE_SCM_NAMES. - final String scmHost = "host456"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_NAMES, scmHost); - final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients( - conf); - assertEquals(scmHost, address.getHostName()); - assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort()); - } - - @Test - @SuppressWarnings("StringSplitter") - public void testBlockClientFallbackToScmNamesWithPort() { - // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY - // are undefined it should fallback to OZONE_SCM_NAMES. - // - // Verify that the OZONE_SCM_NAMES port number is ignored, if present. - // Instead we should use OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT. - final String scmHost = "host456:200"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_NAMES, scmHost); - final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients( - conf); - assertEquals(scmHost.split(":")[0], address.getHostName()); - assertEquals(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, address.getPort()); - } - - @Test - public void testClientFallbackToScmNamesNoPort() { - // When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, it should fallback - // to OZONE_SCM_NAMES. - final String scmHost = "host456"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_NAMES, scmHost); - final InetSocketAddress address = HddsUtils.getScmAddressForClients(conf); - assertEquals(scmHost, address.getHostName()); - assertEquals(OZONE_SCM_CLIENT_PORT_DEFAULT, address.getPort()); - } - - @Test - @SuppressWarnings("StringSplitter") - public void testClientFallbackToScmNamesWithPort() { - // When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, it should fallback - // to OZONE_SCM_NAMES. - // - // Verify that the OZONE_SCM_NAMES port number is ignored, if present. - // Instead we should use OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT. - final String scmHost = "host456:300"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_NAMES, scmHost); - final InetSocketAddress address = HddsUtils.getScmAddressForClients(conf); - assertEquals(scmHost.split(":")[0], address.getHostName()); - assertEquals(OZONE_SCM_CLIENT_PORT_DEFAULT, address.getPort()); - } - - @Test - public void testBlockClientFailsWithMultipleScmNames() { - // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY - // are undefined, fail if OZONE_SCM_NAMES has multiple SCMs. - final String scmHost = "host123,host456"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_NAMES, scmHost); - thrown.expect(IllegalArgumentException.class); - HddsUtils.getScmAddressForBlockClients(conf); - } - - @Test - public void testClientFailsWithMultipleScmNames() { - // When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, fail if OZONE_SCM_NAMES - // has multiple SCMs. - final String scmHost = "host123,host456"; - final Configuration conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_NAMES, scmHost); - thrown.expect(IllegalArgumentException.class); - HddsUtils.getScmAddressForClients(conf); - } -} diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/package-info.java deleted file mode 100644 index be63eab0c7a..00000000000 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -/** - * This package contains test classes for Ozone Client. - */ \ No newline at end of file diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java deleted file mode 100644 index 49fb5e33511..00000000000 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; - -import static org.junit.Assert.*; - -/** - * Test class for {@link OzoneKMSUtil}. - * */ -public class TestOzoneKMSUtil { - private OzoneConfiguration config; - - @Before - public void setUp() { - config = new OzoneConfiguration(); - config.setBoolean(OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY, true); - } - - @Test - public void getKeyProvider() { - try { - OzoneKMSUtil.getKeyProvider(config, null); - fail("Expected IOException."); - } catch (IOException ioe) { - assertEquals(ioe.getMessage(), "KMS serverProviderUri is " + - "not configured."); - } - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/common/dev-support/findbugsExcludeFile.xml deleted file mode 100644 index df58f3650a4..00000000000 --- a/hadoop-ozone/common/dev-support/findbugsExcludeFile.xml +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - - diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml deleted file mode 100644 index 09ac27ad60e..00000000000 --- a/hadoop-ozone/common/pom.xml +++ /dev/null @@ -1,189 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-common - 0.5.0-SNAPSHOT - Apache Hadoop Ozone Common - Apache Hadoop Ozone Common - jar - - - - - org.apache.commons - commons-compress - - - org.apache.hadoop - hadoop-common - - - org.apache.hadoop - hadoop-hdfs - - - org.apache.hadoop - hadoop-hdfs-client - - - com.squareup.okhttp - okhttp - - - - - org.apache.hadoop - hadoop-hdds-common - - - org.apache.hadoop - hadoop-hdds-server-framework - - - org.apache.hadoop - hadoop-hdds-container-service - - - org.apache.hadoop - hadoop-hdds-client - - - org.apache.hadoop - hadoop-hdds-tools - - - junit - junit - test - - - org.apache.hadoop - hadoop-common - test - test-jar - - - org.apache.hadoop - hadoop-hdfs - test - test-jar - - - - - - - ${basedir}/src/main/resources - - ozone-version-info.properties - - false - - - ${basedir}/src/main/resources - - ozone-version-info.properties - - true - - - - - org.apache.hadoop - hadoop-maven-plugins - - - version-info - generate-resources - - version-info - - - - ${basedir}/../ - - */src/main/java/**/*.java - */src/main/proto/*.proto - - - - - - compile-protoc - - protoc - - - ${protobuf.version} - ${protoc.path} - - - ${basedir}/../../hadoop-hdds/common/src/main/proto/ - - ${basedir}/src/main/proto - - - ${basedir}/src/main/proto - - OzoneManagerProtocol.proto - - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - - - - - - k8s-dev - - - - io.fabric8 - docker-maven-plugin - 0.29.0 - - - - ${user.name}/ozone:${project.version} - - ${project.basedir} - - - - - - - - - - diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone deleted file mode 100755 index cd8f202c2cf..00000000000 --- a/hadoop-ozone/common/src/main/bin/ozone +++ /dev/null @@ -1,312 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# The name of the script being executed. -HADOOP_SHELL_EXECNAME="ozone" -MYNAME="${BASH_SOURCE-$0}" -JVM_PID="$$" -## @description build up the hdfs command's usage text. -## @audience public -## @stability stable -## @replaceable no -function hadoop_usage -{ - hadoop_add_option "--buildpaths" "attempt to add class files from build tree" - hadoop_add_option "--daemon (start|status|stop)" "operate on a daemon" - hadoop_add_option "--hostnames list[,of,host,names]" "hosts to use in worker mode" - hadoop_add_option "--hosts filename" "list of hosts to use in worker mode" - hadoop_add_option "--loglevel level" "set the log4j level for this command" - hadoop_add_option "--workers" "turn on worker mode" - - hadoop_add_subcommand "auditparser" client "runs audit parser tool" - hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries" - hadoop_add_subcommand "datanode" daemon "run a HDDS datanode" - hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" - hadoop_add_subcommand "freon" client "runs an ozone data generator" - hadoop_add_subcommand "fs" client "run a filesystem command on Ozone file system. Equivalent to 'hadoop fs'" - hadoop_add_subcommand "genconf" client "generate minimally required ozone configs and output to ozone-site.xml in specified path" - hadoop_add_subcommand "genesis" client "runs a collection of ozone benchmarks to help with tuning." - hadoop_add_subcommand "getconf" client "get ozone config values from configuration" - hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode." - hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata into relational data" - hadoop_add_subcommand "om" daemon "Ozone Manager" - hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service" - hadoop_add_subcommand "s3g" daemon "run the S3 compatible REST gateway" - hadoop_add_subcommand "csi" daemon "run the standalone CSI daemon" - hadoop_add_subcommand "recon" daemon "run the Recon service" - hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager" - hadoop_add_subcommand "sh" client "command line interface for object store operations" - hadoop_add_subcommand "s3" client "command line interface for s3 related operations" - hadoop_add_subcommand "insight" client "tool to get runtime opeartion information" - hadoop_add_subcommand "version" client "print the version" - hadoop_add_subcommand "dtutil" client "operations related to delegation tokens" - hadoop_add_subcommand "upgrade" client "HDFS to Ozone in-place upgrade tool" - - hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false -} - -## @description Default command handler for hadoop command -## @audience public -## @stability stable -## @replaceable no -## @param CLI arguments -function ozonecmd_case -{ - subcmd=$1 - shift - - ozone_default_log4j="${HADOOP_CONF_DIR}/log4j.properties" - ozone_shell_log4j="${HADOOP_CONF_DIR}/ozone-shell-log4j.properties" - if [ ! -f "${ozone_shell_log4j}" ]; then - ozone_shell_log4j=${ozone_default_log4j} - fi - - case ${subcmd} in - auditparser) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.audit.parser.AuditParser - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" - ;; - classpath) - if [[ "$#" -gt 0 ]]; then - OZONE_RUN_ARTIFACT_NAME="$1" - HADOOP_CLASSNAME="org.apache.hadoop.util.Classpath" - #remove the artifact name and replace it with glob - # (We need at least one argument to execute the Classpath helper class) - HADOOP_SUBCMD_ARGS[0]="--glob" - else - hadoop_finalize - echo "Usage: ozone classpath " - echo "Where the artifact name is one of:" - echo "" - ls -1 ${HADOOP_HDFS_HOME}/share/ozone/classpath/ | sed 's/.classpath//' - exit -1 - fi - ;; - datanode) - HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - # Add JVM parameter (org.apache.ratis.thirdparty.io.netty.allocator.useCacheForAllThreads=false) - # for disabling netty PooledByteBufAllocator thread caches for non-netty threads. - # This parameter significantly reduces GC pressure for Datanode. - # Corresponding Ratis issue https://issues.apache.org/jira/browse/RATIS-534. - HDDS_DN_OPTS="${HDDS_DN_OPTS} -Dlog4j.configurationFile=${HADOOP_CONF_DIR}/dn-audit-log4j2.properties -Dorg.apache.ratis.thirdparty.io.netty.allocator.useCacheForAllThreads=false" - HADOOP_OPTS="${HADOOP_OPTS} ${HDDS_DN_OPTS}" - HADOOP_CLASSNAME=org.apache.hadoop.ozone.HddsDatanodeService - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-datanode" - ;; - envvars) - echo "JAVA_HOME='${JAVA_HOME}'" - echo "HADOOP_HDFS_HOME='${HADOOP_HDFS_HOME}'" - echo "HDFS_DIR='${HDFS_DIR}'" - echo "HDFS_LIB_JARS_DIR='${HDFS_LIB_JARS_DIR}'" - echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'" - echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'" - echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'" - echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'" - if [[ -n "${QATESTMODE}" ]]; then - echo "MYNAME=${MYNAME}" - echo "HADOOP_SHELL_EXECNAME=${HADOOP_SHELL_EXECNAME}" - fi - exit 0 - ;; - freon) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.Freon - OZONE_FREON_OPTS="${OZONE_FREON_OPTS} -Dhadoop.log.file=ozone-freon.log -Dlog4j.configuration=file:${ozone_shell_log4j}" - HADOOP_OPTS="${HADOOP_OPTS} ${OZONE_FREON_OPTS}" - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" - ;; - genesis) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.genesis.Genesis - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" - ;; - getconf) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.OzoneGetConf; - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" - ;; - om) - HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_CLASSNAME=org.apache.hadoop.ozone.om.OzoneManagerStarter - HDFS_OM_OPTS="${HDFS_OM_OPTS} -Dlog4j.configurationFile=${HADOOP_CONF_DIR}/om-audit-log4j2.properties" - HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_OM_OPTS}" - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager" - ;; - sh | shell) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.OzoneShell - HDFS_OM_SH_OPTS="${HDFS_OM_SH_OPTS} -Dhadoop.log.file=ozone-shell.log - -Dlog4j.configuration=file:${ozone_shell_log4j}" - HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_OM_SH_OPTS}" - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager" - ;; - s3) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.s3.S3Shell - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-ozone-manager" - ;; - scm) - HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_CLASSNAME='org.apache.hadoop.hdds.scm.server.StorageContainerManagerStarter' - hadoop_debug "Appending HDFS_STORAGECONTAINERMANAGER_OPTS onto HADOOP_OPTS" - HDFS_STORAGECONTAINERMANAGER_OPTS="${HDFS_STORAGECONTAINERMANAGER_OPTS} -Dlog4j.configurationFile=${HADOOP_CONF_DIR}/scm-audit-log4j2.properties" - HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_STORAGECONTAINERMANAGER_OPTS}" - OZONE_RUN_ARTIFACT_NAME="hadoop-hdds-server-scm" - ;; - s3g) - HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_CLASSNAME='org.apache.hadoop.ozone.s3.Gateway' - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-s3gateway" - ;; - csi) - HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_CLASSNAME='org.apache.hadoop.ozone.csi.CsiServer' - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-csi" - ;; - recon) - HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_CLASSNAME='org.apache.hadoop.ozone.recon.ReconServer' - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-recon" - ;; - fs) - HADOOP_CLASSNAME=org.apache.hadoop.fs.ozone.OzoneFsShell - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" - ;; - scmcli) - HADOOP_CLASSNAME=org.apache.hadoop.hdds.scm.cli.SCMCLI - HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_SCM_CLI_OPTS}" - OZONE_RUN_ARTIFACT_NAME="hadoop-hdds-tools" - ;; - insight) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.insight.Insight - HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_SCM_CLI_OPTS}" - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-insight" - ;; - version) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.util.OzoneVersionInfo - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-common" - ;; - genconf) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.genconf.GenerateOzoneRequiredConfigurations - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" - ;; - dtutil) - HADOOP_CLASSNAME=org.apache.hadoop.security.token.DtUtilShell - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-tools" - ;; - upgrade) - HADOOP_CLASSNAME=org.apache.hadoop.ozone.upgrade.InPlaceUpgrade - OZONE_RUN_ARTIFACT_NAME="hadoop-ozone-upgrade" - ;; - *) - HADOOP_CLASSNAME="${subcmd}" - if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then - hadoop_exit_with_usage 1 - fi - ;; - esac -} - -# let's locate libexec... -if [[ -n "${HADOOP_HOME}" ]]; then - HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" -else - bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P) - HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" -fi - -HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" -# shellcheck disable=SC2034 -HADOOP_NEW_CONFIG=true -if [[ -f "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" ]]; then - # shellcheck source=./hadoop-ozone/common/src/main/bin/ozone-config.sh - . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" -else - echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/ozone-config.sh." 2>&1 - exit 1 -fi - -# now that we have support code, let's abs MYNAME so we can use it later -MYNAME=$(hadoop_abs "${MYNAME}") - -if [[ $# = 0 ]]; then - hadoop_exit_with_usage 1 -fi - -HADOOP_SUBCMD=$1 -shift - - -if hadoop_need_reexec ozone "${HADOOP_SUBCMD}"; then - hadoop_uservar_su ozone "${HADOOP_SUBCMD}" \ - "${MYNAME}" \ - "--reexec" \ - "${HADOOP_USER_PARAMS[@]}" - exit $? -fi - -hadoop_verify_user_perm "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" - -HADOOP_SUBCMD_ARGS=("$@") - -if declare -f ozone_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then - hadoop_debug "Calling dynamically: ozone_subcommand_${HADOOP_SUBCMD} ${HADOOP_SUBCMD_ARGS[*]}" - "ozone_subcommand_${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}" -else - ozonecmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}" -fi - - -# -# Setting up classpath based on the generate classpath descriptors -# -if [ ! "$OZONE_RUN_ARTIFACT_NAME" ]; then - echo "ERROR: Ozone components require to set OZONE_RUN_ARTIFACT_NAME to set the classpath" - exit -1 -fi -export HDDS_LIB_JARS_DIR="${HADOOP_HDFS_HOME}/share/ozone/lib" -CLASSPATH_FILE="${HADOOP_HDFS_HOME}/share/ozone/classpath/${OZONE_RUN_ARTIFACT_NAME}.classpath" -if [ ! "$CLASSPATH_FILE" ]; then - echo "ERROR: Classpath file descriptor $CLASSPATH_FILE is missing" - exit -1 -fi -# shellcheck disable=SC1090,SC2086 -source $CLASSPATH_FILE -OIFS=$IFS -IFS=':' -# shellcheck disable=SC2154 -for jar in $classpath; do - hadoop_add_classpath "$jar" -done -hadoop_add_classpath "${HADOOP_HDFS_HOME}/share/ozone/web" - -#We need to add the artifact manually as it's not part the generated classpath desciptor -ARTIFACT_LIB_DIR="${HADOOP_HDFS_HOME}/share/ozone/lib" -MAIN_ARTIFACT=$(find "$ARTIFACT_LIB_DIR" -name "${OZONE_RUN_ARTIFACT_NAME}-*.jar") -if [ ! "$MAIN_ARTIFACT" ]; then - echo "ERROR: Component jar file $MAIN_ARTIFACT is missing from ${HADOOP_HDFS_HOME}/share/ozone/lib" -fi -hadoop_add_classpath "${MAIN_ARTIFACT}" -IFS=$OIFS - - -hadoop_add_client_opts - -if [[ ${HADOOP_WORKER_MODE} = true ]]; then - hadoop_common_worker_mode_execute "${HADOOP_HDFS_HOME}/bin/ozone" "${HADOOP_USER_PARAMS[@]}" - exit $? -fi - -hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" - -# everything is in globals at this point, so call the generic handler -hadoop_generic_java_subcmd_handler diff --git a/hadoop-ozone/common/src/main/bin/ozone-config.sh b/hadoop-ozone/common/src/main/bin/ozone-config.sh deleted file mode 100755 index 5ccb646f720..00000000000 --- a/hadoop-ozone/common/src/main/bin/ozone-config.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# included in all the ozone scripts with source command -# should not be executed directly - -function hadoop_subproject_init -{ - if [[ -z "${HADOOP_OZONE_ENV_PROCESSED}" ]]; then - if [[ -e "${HADOOP_CONF_DIR}/ozone-env.sh" ]]; then - . "${HADOOP_CONF_DIR}/ozone-env.sh" - export HADOOP_OZONE_ENV_PROCESSED=true - fi - fi - HADOOP_OZONE_HOME="${HADOOP_OZONE_HOME:-$HADOOP_HOME}" - -} - -if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then - _hd_this="${BASH_SOURCE-$0}" - HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hd_this}")" >/dev/null && pwd -P) -fi - -# shellcheck source=./hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh - -if [[ -n "${HADOOP_COMMON_HOME}" ]] && - [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then - . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" -elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then - . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" -elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then - . "${HADOOP_HOME}/libexec/hadoop-config.sh" -else - echo "ERROR: Hadoop common not found." 2>&1 - exit 1 -fi - -# HADOOP_OZONE_DELEGATED_CLASSES defines a list of classes which will be loaded by default -# class loader of application instead of isolated class loader. With this way we can solve -# incompatible problem when using hadoop3.x + ozone with older hadoop version. -#export HADOOP_OZONE_DELEGATED_CLASSES= - diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh b/hadoop-ozone/common/src/main/bin/start-ozone.sh deleted file mode 100755 index 9ddaab64a7f..00000000000 --- a/hadoop-ozone/common/src/main/bin/start-ozone.sh +++ /dev/null @@ -1,140 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Start hadoop hdfs and ozone daemons. -# Run this on master node. -## @description usage info -## @audience private -## @stability evolving -## @replaceable no -function hadoop_usage -{ - echo "Usage: start-ozone.sh" -} - -this="${BASH_SOURCE-$0}" -bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) - -# let's locate libexec... -if [[ -n "${HADOOP_HOME}" ]]; then - HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" -else - HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" -fi - -HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" -# shellcheck disable=SC2034 -HADOOP_NEW_CONFIG=true -if [[ -f "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" ]]; then - # shellcheck disable=SC1090 - . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" -elif [[ -f "${bin}/../libexec/ozone-config.sh" ]]; then - HADOOP_HOME="${bin}/../" - HADOOP_LIBEXEC_DIR="${HADOOP_HOME}/libexec" - HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" - . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" -else - echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/ozone-config.sh." 2>&1 - exit 1 -fi - -# get arguments -if [[ $# -ge 1 ]]; then - startOpt="$1" - shift - case "$startOpt" in - -upgrade) - nameStartOpt="$startOpt" - ;; - -rollback) - dataStartOpt="$startOpt" - ;; - *) - hadoop_exit_with_usage 1 - ;; - esac -fi - -#Add other possible options -nameStartOpt="$nameStartOpt $*" - -SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-) -SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-) - -#if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} -# == "true" ]]; then -# echo "Ozone is not supported in a security enabled cluster." -# exit 1 -#fi - -#SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-) -#SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-) -#if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then -# echo "Ozone is not supported in a security enabled cluster." -# exit 1 -#fi - -#--------------------------------------------------------- -# Check if ozone is enabled -OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-) -if [[ "${OZONE_ENABLED}" != "true" ]]; then - echo "Operation is not supported because ozone is not enabled." - exit -1 -fi - -#--------------------------------------------------------- -# datanodes (using default workers file) - -echo "Starting datanodes" -hadoop_uservar_su hdfs datanode "${HADOOP_HDFS_HOME}/bin/ozone" \ - --workers \ - --config "${HADOOP_CONF_DIR}" \ - --daemon start \ - datanode ${dataStartOpt} -(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? )) - -#--------------------------------------------------------- -# Ozone ozonemanager nodes -OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -ozonemanagers 2>/dev/null) -echo "Starting Ozone Manager nodes [${OM_NODES}]" -if [[ "${OM_NODES}" == "0.0.0.0" ]]; then - OM_NODES=$(hostname) -fi - -hadoop_uservar_su hdfs om "${HADOOP_HDFS_HOME}/bin/ozone" \ - --workers \ - --config "${HADOOP_CONF_DIR}" \ - --hostnames "${OM_NODES}" \ - --daemon start \ - om - -HADOOP_JUMBO_RETCOUNTER=$? - -#--------------------------------------------------------- -# Ozone storagecontainermanager nodes -SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -storagecontainermanagers 2>/dev/null) -echo "Starting storage container manager nodes [${SCM_NODES}]" -hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/ozone" \ - --workers \ - --config "${HADOOP_CONF_DIR}" \ - --hostnames "${SCM_NODES}" \ - --daemon start \ - scm - -(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? )) - -exit ${HADOOP_JUMBO_RETCOUNTER} diff --git a/hadoop-ozone/common/src/main/bin/stop-ozone.sh b/hadoop-ozone/common/src/main/bin/stop-ozone.sh deleted file mode 100755 index c07d42b4281..00000000000 --- a/hadoop-ozone/common/src/main/bin/stop-ozone.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Stop hdfs and ozone daemons. -# Run this on master node. -## @description usage info -## @audience private -## @stability evolving -## @replaceable no -function hadoop_usage -{ - echo "Usage: stop-ozone.sh" -} - -this="${BASH_SOURCE-$0}" -bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P) - -# let's locate libexec... -if [[ -n "${HADOOP_HOME}" ]]; then - HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec" -else - HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec" -fi - -HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}" -# shellcheck disable=SC2034 -HADOOP_NEW_CONFIG=true -if [[ -f "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" ]]; then - # shellcheck disable=SC1090 - . "${HADOOP_LIBEXEC_DIR}/ozone-config.sh" -else - echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/ozone-config.sh." 2>&1 - exit 1 -fi - -#SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-) -#SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-) -#if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then -# echo "Ozone is not supported in a security enabled cluster." -# exit 1 -#fi - -#--------------------------------------------------------- -# Check if ozone is enabled -OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-) -if [[ "${OZONE_ENABLED}" != "true" ]]; then - echo "Operation is not supported because ozone is not enabled." - exit -1 -fi - -#--------------------------------------------------------- -# datanodes (using default workers file) - -echo "Stopping datanodes" - -hadoop_uservar_su ozone datanode "${HADOOP_HDFS_HOME}/bin/ozone" \ - --workers \ - --config "${HADOOP_CONF_DIR}" \ - --daemon stop \ - datanode - -#--------------------------------------------------------- -# Ozone Manager nodes -OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -ozonemanagers 2>/dev/null) -echo "Stopping Ozone Manager nodes [${OM_NODES}]" -if [[ "${OM_NODES}" == "0.0.0.0" ]]; then - OM_NODES=$(hostname) -fi - -hadoop_uservar_su hdfs om "${HADOOP_HDFS_HOME}/bin/ozone" \ - --workers \ - --config "${HADOOP_CONF_DIR}" \ - --hostnames "${OM_NODES}" \ - --daemon stop \ - om - -#--------------------------------------------------------- -# Ozone storagecontainermanager nodes -SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getconf -storagecontainermanagers 2>/dev/null) -echo "Stopping storage container manager nodes [${SCM_NODES}]" -hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/ozone" \ - --workers \ - --config "${HADOOP_CONF_DIR}" \ - --hostnames "${SCM_NODES}" \ - --daemon stop \ - scm \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java deleted file mode 100644 index 8d5ca6d72ee..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.protocol; - -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.StorageTypeProto; - -/** - * Ozone specific storage types. - */ -public enum StorageType { - RAM_DISK, - SSD, - DISK, - ARCHIVE; - - public static final StorageType DEFAULT = DISK; - - public StorageTypeProto toProto() { - switch (this) { - case DISK: - return StorageTypeProto.DISK; - case SSD: - return StorageTypeProto.SSD; - case ARCHIVE: - return StorageTypeProto.ARCHIVE; - case RAM_DISK: - return StorageTypeProto.RAM_DISK; - default: - throw new IllegalStateException( - "BUG: StorageType not found, type=" + this); - } - } - - public static StorageType valueOf(StorageTypeProto type) { - switch (type) { - case DISK: - return DISK; - case SSD: - return SSD; - case ARCHIVE: - return ARCHIVE; - case RAM_DISK: - return RAM_DISK; - default: - throw new IllegalStateException( - "BUG: StorageTypeProto not found, type=" + type); - } - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java deleted file mode 100644 index 89d7de08899..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.protocol; -/** - * Helper classes for the hdds protocol. - */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java deleted file mode 100644 index 8e129c9d230..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ /dev/null @@ -1,528 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone; - -import com.google.common.base.Joiner; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.net.InetSocketAddress; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.security.SecureRandom; -import java.util.Collection; -import java.util.Collections; -import java.util.Optional; -import java.util.stream.Collectors; - -import com.google.common.base.Strings; - -import org.apache.commons.compress.archivers.ArchiveEntry; -import org.apache.commons.compress.archivers.ArchiveOutputStream; -import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; -import org.apache.commons.compress.compressors.CompressorException; -import org.apache.commons.compress.compressors.CompressorOutputStream; -import org.apache.commons.compress.compressors.CompressorStreamFactory; -import org.apache.commons.compress.utils.IOUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.HddsServerUtil; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.hdds.utils.db.DBCheckpoint; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; - -import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys; -import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_BIND_HOST_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTPS_BIND_PORT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODES_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_PORT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Stateless helper functions for the server and client side of OM - * communication. - */ -public final class OmUtils { - public static final Logger LOG = LoggerFactory.getLogger(OmUtils.class); - private static final SecureRandom SRAND = new SecureRandom(); - private static byte[] randomBytes = new byte[32]; - - private OmUtils() { - } - - /** - * Retrieve the socket address that is used by OM. - * @param conf - * @return Target InetSocketAddress for the SCM service endpoint. - */ - public static InetSocketAddress getOmAddress(Configuration conf) { - return NetUtils.createSocketAddr(getOmRpcAddress(conf)); - } - - /** - * Retrieve the socket address that is used by OM. - * @param conf - * @return Target InetSocketAddress for the SCM service endpoint. - */ - public static String getOmRpcAddress(Configuration conf) { - final Optional host = getHostNameFromConfigKeys(conf, - OZONE_OM_ADDRESS_KEY); - - return host.orElse(OZONE_OM_BIND_HOST_DEFAULT) + ":" + - getOmRpcPort(conf); - } - - /** - * Retrieve the socket address that is used by OM as specified by the confKey. - * Return null if the specified conf key is not set. - * @param conf configuration - * @param confKey configuration key to lookup address from - * @return Target InetSocketAddress for the OM RPC server. - */ - public static String getOmRpcAddress(Configuration conf, String confKey) { - final Optional host = getHostNameFromConfigKeys(conf, confKey); - - if (host.isPresent()) { - return host.get() + ":" + getOmRpcPort(conf, confKey); - } else { - // The specified confKey is not set - return null; - } - } - - /** - * Retrieve the socket address that should be used by clients to connect - * to OM. - * @param conf - * @return Target InetSocketAddress for the OM service endpoint. - */ - public static InetSocketAddress getOmAddressForClients( - Configuration conf) { - final Optional host = getHostNameFromConfigKeys(conf, - OZONE_OM_ADDRESS_KEY); - - if (!host.isPresent()) { - throw new IllegalArgumentException( - OZONE_OM_ADDRESS_KEY + " must be defined. See" + - " https://wiki.apache.org/hadoop/Ozone#Configuration for" + - " details on configuring Ozone."); - } - - return NetUtils.createSocketAddr( - host.get() + ":" + getOmRpcPort(conf)); - } - - /** - * Returns true if OZONE_OM_SERVICE_IDS_KEY is defined and not empty. - * @param conf Configuration - * @return true if OZONE_OM_SERVICE_IDS_KEY is defined and not empty; - * else false. - */ - public static boolean isServiceIdsDefined(Configuration conf) { - String val = conf.get(OZONE_OM_SERVICE_IDS_KEY); - return val != null && val.length() > 0; - } - - /** - * Returns true if HA for OzoneManager is configured for the given service id. - * @param conf Configuration - * @param serviceId OM HA cluster service ID - * @return true if HA is configured in the configuration; else false. - */ - public static boolean isOmHAServiceId(Configuration conf, String serviceId) { - Collection omServiceIds = conf.getTrimmedStringCollection( - OZONE_OM_SERVICE_IDS_KEY); - return omServiceIds.contains(serviceId); - } - - public static int getOmRpcPort(Configuration conf) { - // If no port number is specified then we'll just try the defaultBindPort. - final Optional port = getPortNumberFromConfigKeys(conf, - OZONE_OM_ADDRESS_KEY); - return port.orElse(OZONE_OM_PORT_DEFAULT); - } - - /** - * Retrieve the port that is used by OM as specified by the confKey. - * Return default port if port is not specified in the confKey. - * @param conf configuration - * @param confKey configuration key to lookup address from - * @return Port on which OM RPC server will listen on - */ - public static int getOmRpcPort(Configuration conf, String confKey) { - // If no port number is specified then we'll just try the defaultBindPort. - final Optional port = getPortNumberFromConfigKeys(conf, confKey); - return port.orElse(OZONE_OM_PORT_DEFAULT); - } - - public static int getOmRestPort(Configuration conf) { - // If no port number is specified then we'll just try the default - // HTTP BindPort. - final Optional port = - getPortNumberFromConfigKeys(conf, OZONE_OM_HTTP_ADDRESS_KEY); - return port.orElse(OZONE_OM_HTTP_BIND_PORT_DEFAULT); - } - - /** - * Get the location where OM should store its metadata directories. - * Fall back to OZONE_METADATA_DIRS if not defined. - * - * @param conf - Config - * @return File path, after creating all the required Directories. - */ - public static File getOmDbDir(Configuration conf) { - return ServerUtils.getDBPath(conf, OMConfigKeys.OZONE_OM_DB_DIRS); - } - - /** - * Checks if the OM request is read only or not. - * @param omRequest OMRequest proto - * @return True if its readOnly, false otherwise. - */ - public static boolean isReadOnly( - OzoneManagerProtocolProtos.OMRequest omRequest) { - OzoneManagerProtocolProtos.Type cmdType = omRequest.getCmdType(); - switch (cmdType) { - case CheckVolumeAccess: - case InfoVolume: - case ListVolume: - case InfoBucket: - case ListBuckets: - case LookupKey: - case ListKeys: - case InfoS3Bucket: - case ListS3Buckets: - case ServiceList: - case ListMultiPartUploadParts: - case GetFileStatus: - case LookupFile: - case ListStatus: - case GetAcl: - case DBUpdates: - case ListMultipartUploads: - return true; - case CreateVolume: - case SetVolumeProperty: - case DeleteVolume: - case CreateBucket: - case SetBucketProperty: - case DeleteBucket: - case CreateKey: - case RenameKey: - case DeleteKey: - case CommitKey: - case AllocateBlock: - case CreateS3Bucket: - case DeleteS3Bucket: - case InitiateMultiPartUpload: - case CommitMultiPartUpload: - case CompleteMultiPartUpload: - case AbortMultiPartUpload: - case GetS3Secret: - case GetDelegationToken: - case RenewDelegationToken: - case CancelDelegationToken: - case CreateDirectory: - case CreateFile: - case RemoveAcl: - case SetAcl: - case AddAcl: - case PurgeKeys: - return false; - default: - LOG.error("CmdType {} is not categorized as readOnly or not.", cmdType); - return false; - } - } - - public static byte[] getMD5Digest(String input) throws IOException { - try { - MessageDigest md = MessageDigest.getInstance(OzoneConsts.MD5_HASH); - return md.digest(input.getBytes(StandardCharsets.UTF_8)); - } catch (NoSuchAlgorithmException ex) { - throw new IOException("Error creating an instance of MD5 digest.\n" + - "This could possibly indicate a faulty JRE"); - } - } - - public static byte[] getSHADigest() throws IOException { - try { - SRAND.nextBytes(randomBytes); - MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH); - return sha.digest(randomBytes); - } catch (NoSuchAlgorithmException ex) { - throw new IOException("Error creating an instance of SHA-256 digest.\n" + - "This could possibly indicate a faulty JRE"); - } - } - - /** - * Add non empty and non null suffix to a key. - */ - private static String addSuffix(String key, String suffix) { - if (suffix == null || suffix.isEmpty()) { - return key; - } - assert !suffix.startsWith(".") : - "suffix '" + suffix + "' should not already have '.' prepended."; - return key + "." + suffix; - } - - /** - * Concatenate list of suffix strings '.' separated. - */ - private static String concatSuffixes(String... suffixes) { - if (suffixes == null) { - return null; - } - return Joiner.on(".").skipNulls().join(suffixes); - } - - /** - * Return configuration key of format key.suffix1.suffix2...suffixN. - */ - public static String addKeySuffixes(String key, String... suffixes) { - String keySuffix = concatSuffixes(suffixes); - return addSuffix(key, keySuffix); - } - - /** - * Match input address to local address. - * Return true if it matches, false otherwsie. - */ - public static boolean isAddressLocal(InetSocketAddress addr) { - return NetUtils.isLocalAddress(addr.getAddress()); - } - - /** - * Get a collection of all omNodeIds for the given omServiceId. - */ - public static Collection getOMNodeIds(Configuration conf, - String omServiceId) { - String key = addSuffix(OZONE_OM_NODES_KEY, omServiceId); - return conf.getTrimmedStringCollection(key); - } - - /** - * @return coll if it is non-null and non-empty. Otherwise, - * returns a list with a single null value. - */ - public static Collection emptyAsSingletonNull(Collection - coll) { - if (coll == null || coll.isEmpty()) { - return Collections.singletonList(null); - } else { - return coll; - } - } - - /** - * Write OM DB Checkpoint to an output stream as a compressed file (tgz). - * @param checkpoint checkpoint file - * @param destination desination output stream. - * @throws IOException - */ - public static void writeOmDBCheckpointToStream(DBCheckpoint checkpoint, - OutputStream destination) - throws IOException { - - try (CompressorOutputStream gzippedOut = new CompressorStreamFactory() - .createCompressorOutputStream(CompressorStreamFactory.GZIP, - destination)) { - - try (ArchiveOutputStream archiveOutputStream = - new TarArchiveOutputStream(gzippedOut)) { - - Path checkpointPath = checkpoint.getCheckpointLocation(); - for (Path path : Files.list(checkpointPath) - .collect(Collectors.toList())) { - if (path != null) { - Path fileName = path.getFileName(); - if (fileName != null) { - includeFile(path.toFile(), fileName.toString(), - archiveOutputStream); - } - } - } - } - } catch (CompressorException e) { - throw new IOException( - "Can't compress the checkpoint: " + - checkpoint.getCheckpointLocation(), e); - } - } - - private static void includeFile(File file, String entryName, - ArchiveOutputStream archiveOutputStream) - throws IOException { - ArchiveEntry archiveEntry = - archiveOutputStream.createArchiveEntry(file, entryName); - archiveOutputStream.putArchiveEntry(archiveEntry); - try (FileInputStream fis = new FileInputStream(file)) { - IOUtils.copy(fis, archiveOutputStream); - } - archiveOutputStream.closeArchiveEntry(); - } - - /** - * If a OM conf is only set with key suffixed with OM Node ID, return the - * set value. - * @return if the value is set for key suffixed with OM Node ID, return the - * value, else return null. - */ - public static String getConfSuffixedWithOMNodeId(Configuration conf, - String confKey, String omServiceID, String omNodeId) { - String suffixedConfKey = OmUtils.addKeySuffixes( - confKey, omServiceID, omNodeId); - String confValue = conf.getTrimmed(suffixedConfKey); - if (StringUtils.isNotEmpty(confValue)) { - return confValue; - } - return null; - } - - /** - * Returns the http address of peer OM node. - * @param conf Configuration - * @param omNodeId peer OM node ID - * @param omNodeHostAddr peer OM node host address - * @return http address of peer OM node in the format : - */ - public static String getHttpAddressForOMPeerNode(Configuration conf, - String omServiceId, String omNodeId, String omNodeHostAddr) { - final Optional bindHost = getHostNameFromConfigKeys(conf, - addKeySuffixes(OZONE_OM_HTTP_BIND_HOST_KEY, omServiceId, omNodeId)); - - final Optional addressPort = getPortNumberFromConfigKeys(conf, - addKeySuffixes(OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId)); - - final Optional addressHost = getHostNameFromConfigKeys(conf, - addKeySuffixes(OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId)); - - String hostName = bindHost.orElse(addressHost.orElse(omNodeHostAddr)); - - return hostName + ":" + addressPort.orElse(OZONE_OM_HTTP_BIND_PORT_DEFAULT); - } - - /** - * Returns the https address of peer OM node. - * @param conf Configuration - * @param omNodeId peer OM node ID - * @param omNodeHostAddr peer OM node host address - * @return https address of peer OM node in the format : - */ - public static String getHttpsAddressForOMPeerNode(Configuration conf, - String omServiceId, String omNodeId, String omNodeHostAddr) { - final Optional bindHost = getHostNameFromConfigKeys(conf, - addKeySuffixes(OZONE_OM_HTTPS_BIND_HOST_KEY, omServiceId, omNodeId)); - - final Optional addressPort = getPortNumberFromConfigKeys(conf, - addKeySuffixes(OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId)); - - final Optional addressHost = getHostNameFromConfigKeys(conf, - addKeySuffixes(OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId)); - - String hostName = bindHost.orElse(addressHost.orElse(omNodeHostAddr)); - - return hostName + ":" + - addressPort.orElse(OZONE_OM_HTTPS_BIND_PORT_DEFAULT); - } - - /** - * Get the local directory where ratis logs will be stored. - */ - public static String getOMRatisDirectory(Configuration conf) { - String storageDir = conf.get(OMConfigKeys.OZONE_OM_RATIS_STORAGE_DIR); - - if (Strings.isNullOrEmpty(storageDir)) { - storageDir = HddsServerUtil.getDefaultRatisDirectory(conf); - } - return storageDir; - } - - public static String getOMRatisSnapshotDirectory(Configuration conf) { - String snapshotDir = conf.get(OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_DIR); - - if (Strings.isNullOrEmpty(snapshotDir)) { - snapshotDir = Paths.get(getOMRatisDirectory(conf), - "snapshot").toString(); - } - return snapshotDir; - } - - public static File createOMDir(String dirPath) { - File dirFile = new File(dirPath); - if (!dirFile.exists() && !dirFile.mkdirs()) { - throw new IllegalArgumentException("Unable to create path: " + dirFile); - } - return dirFile; - } - - /** - * Prepares key info to be moved to deletedTable. - * 1. It strips GDPR metadata from key info - * 2. For given object key, if the repeatedOmKeyInfo instance is null, it - * implies that no entry for the object key exists in deletedTable so we - * create a new instance to include this key, else we update the existing - * repeatedOmKeyInfo instance. - * @param keyInfo args supplied by client - * @param repeatedOmKeyInfo key details from deletedTable - * @return {@link RepeatedOmKeyInfo} - * @throws IOException if I/O Errors when checking for key - */ - public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo, - RepeatedOmKeyInfo repeatedOmKeyInfo) throws IOException{ - // If this key is in a GDPR enforced bucket, then before moving - // KeyInfo to deletedTable, remove the GDPR related metadata from - // KeyInfo. - if(Boolean.valueOf(keyInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) { - keyInfo.getMetadata().remove(OzoneConsts.GDPR_FLAG); - keyInfo.getMetadata().remove(OzoneConsts.GDPR_ALGORITHM); - keyInfo.getMetadata().remove(OzoneConsts.GDPR_SECRET); - } - - if(repeatedOmKeyInfo == null) { - //The key doesn't exist in deletedTable, so create a new instance. - repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo); - } else { - //The key exists in deletedTable, so update existing instance. - repeatedOmKeyInfo.addOmKeyInfo(keyInfo); - } - - return repeatedOmKeyInfo; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java deleted file mode 100644 index 6a74342b8d2..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ /dev/null @@ -1,343 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package org.apache.hadoop.ozone; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.google.protobuf.ByteString; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclScope; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; - -import java.util.ArrayList; -import java.util.BitSet; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; - -/** - * OzoneACL classes define bucket ACLs used in OZONE. - * - * ACLs in Ozone follow this pattern. - *

    - *
  • user:name:rw - *
  • group:name:rw - *
  • world::rw - *
- */ -@JsonIgnoreProperties(value = {"aclBitSet"}) -public class OzoneAcl { - - private static final String ACL_SCOPE_REGEX = ".*\\[(ACCESS|DEFAULT)\\]"; - private ACLIdentityType type; - private String name; - private BitSet aclBitSet; - private AclScope aclScope; - private static final List EMPTY_LIST = new ArrayList<>(0); - public static final BitSet ZERO_BITSET = new BitSet(0); - - /** - * Default constructor. - */ - public OzoneAcl() { - } - - /** - * Constructor for OzoneAcl. - * - * @param type - Type - * @param name - Name of user - * @param acl - Rights - * @param scope - AclScope - */ - public OzoneAcl(ACLIdentityType type, String name, ACLType acl, - AclScope scope) { - this.name = name; - this.aclBitSet = new BitSet(ACLType.getNoOfAcls()); - aclBitSet.set(acl.ordinal(), true); - this.type = type; - if (type == ACLIdentityType.WORLD || type == ACLIdentityType.ANONYMOUS) { - if (!name.equals(ACLIdentityType.WORLD.name()) && - !name.equals(ACLIdentityType.ANONYMOUS.name()) && - name.length() != 0) { - throw new IllegalArgumentException("Unexpected name:{" + name + - "} for type WORLD, ANONYMOUS. It should be WORLD & " + - "ANONYMOUS respectively."); - } - // For type WORLD and ANONYMOUS we allow only one acl to be set. - this.name = type.name(); - } - if (((type == ACLIdentityType.USER) || (type == ACLIdentityType.GROUP)) - && (name.length() == 0)) { - throw new IllegalArgumentException("User or group name is required"); - } - aclScope = scope; - } - - /** - * Constructor for OzoneAcl. - * - * @param type - Type - * @param name - Name of user - * @param acls - Rights - * @param scope - AclScope - */ - public OzoneAcl(ACLIdentityType type, String name, BitSet acls, - AclScope scope) { - Objects.requireNonNull(type); - Objects.requireNonNull(acls); - - if(acls.cardinality() > ACLType.getNoOfAcls()) { - throw new IllegalArgumentException("Acl bitset passed has unexpected " + - "size. bitset size:" + acls.cardinality() + ", bitset:" - + acls.toString()); - } - this.aclBitSet = (BitSet) acls.clone(); - - this.name = name; - this.type = type; - if (type == ACLIdentityType.WORLD || type == ACLIdentityType.ANONYMOUS) { - if (!name.equals(ACLIdentityType.WORLD.name()) && - !name.equals(ACLIdentityType.ANONYMOUS.name()) && - name.length() != 0) { - throw new IllegalArgumentException("Unexpected name:{" + name + - "} for type WORLD, ANONYMOUS. It should be WORLD & " + - "ANONYMOUS respectively."); - } - // For type WORLD and ANONYMOUS we allow only one acl to be set. - this.name = type.name(); - } - if (((type == ACLIdentityType.USER) || (type == ACLIdentityType.GROUP)) - && (name.length() == 0)) { - throw new IllegalArgumentException("User or group name is required"); - } - aclScope = scope; - } - - /** - * Parses an ACL string and returns the ACL object. If acl scope is not - * passed in input string then scope is set to ACCESS. - * - * @param acl - Acl String , Ex. user:anu:rw - * - * @return - Ozone ACLs - */ - public static OzoneAcl parseAcl(String acl) - throws IllegalArgumentException { - if ((acl == null) || acl.isEmpty()) { - throw new IllegalArgumentException("ACLs cannot be null or empty"); - } - String[] parts = acl.trim().split(":"); - if (parts.length < 3) { - throw new IllegalArgumentException("ACLs are not in expected format"); - } - - ACLIdentityType aclType = ACLIdentityType.valueOf(parts[0].toUpperCase()); - BitSet acls = new BitSet(ACLType.getNoOfAcls()); - - String bits = parts[2]; - - // Default acl scope is ACCESS. - AclScope aclScope = AclScope.ACCESS; - - // Check if acl string contains scope info. - if(parts[2].matches(ACL_SCOPE_REGEX)) { - int indexOfOpenBracket = parts[2].indexOf("["); - bits = parts[2].substring(0, indexOfOpenBracket); - aclScope = AclScope.valueOf(parts[2].substring(indexOfOpenBracket + 1, - parts[2].indexOf("]"))); - } - - // Set all acl bits. - for (char ch : bits.toCharArray()) { - acls.set(ACLType.getACLRight(String.valueOf(ch)).ordinal()); - } - - // TODO : Support sanitation of these user names by calling into - // userAuth Interface. - return new OzoneAcl(aclType, parts[1], acls, aclScope); - } - - /** - * Parses an ACL string and returns the ACL object. - * - * @param acls - Acl String , Ex. user:anu:rw - * - * @return - Ozone ACLs - */ - public static List parseAcls(String acls) - throws IllegalArgumentException { - if ((acls == null) || acls.isEmpty()) { - throw new IllegalArgumentException("ACLs cannot be null or empty"); - } - String[] parts = acls.trim().split(","); - if (parts.length < 1) { - throw new IllegalArgumentException("ACLs are not in expected format"); - } - List ozAcls = new ArrayList<>(); - - for(String acl:parts) { - ozAcls.add(parseAcl(acl)); - } - return ozAcls; - } - - public static OzoneAclInfo toProtobuf(OzoneAcl acl) { - OzoneAclInfo.Builder builder = OzoneAclInfo.newBuilder() - .setName(acl.getName()) - .setType(OzoneAclType.valueOf(acl.getType().name())) - .setAclScope(OzoneAclScope.valueOf(acl.getAclScope().name())) - .setRights(ByteString.copyFrom(acl.getAclBitSet().toByteArray())); - return builder.build(); - } - - public static OzoneAcl fromProtobuf(OzoneAclInfo protoAcl) { - BitSet aclRights = BitSet.valueOf(protoAcl.getRights().toByteArray()); - return new OzoneAcl(ACLIdentityType.valueOf(protoAcl.getType().name()), - protoAcl.getName(), aclRights, - AclScope.valueOf(protoAcl.getAclScope().name())); - } - - /** - * Helper function to convert a proto message of type {@link OzoneAclInfo} - * to {@link OzoneAcl} with acl scope of type ACCESS. - * - * @param protoAcl - * @return OzoneAcl - * */ - public static OzoneAcl fromProtobufWithAccessType(OzoneAclInfo protoAcl) { - BitSet aclRights = BitSet.valueOf(protoAcl.getRights().toByteArray()); - return new OzoneAcl(ACLIdentityType.valueOf(protoAcl.getType().name()), - protoAcl.getName(), aclRights, AclScope.ACCESS); - } - - /** - * Helper function to convert an {@link OzoneAcl} to proto message of type - * {@link OzoneAclInfo} with acl scope of type ACCESS. - * - * @param acl - * @return OzoneAclInfo - * */ - public static OzoneAclInfo toProtobufWithAccessType(OzoneAcl acl) { - OzoneAclInfo.Builder builder = OzoneAclInfo.newBuilder() - .setName(acl.getName()) - .setType(OzoneAclType.valueOf(acl.getType().name())) - .setAclScope(OzoneAclScope.ACCESS) - .setRights(ByteString.copyFrom(acl.getAclBitSet().toByteArray())); - return builder.build(); - } - - public AclScope getAclScope() { - return aclScope; - } - - @Override - public String toString() { - return type + ":" + name + ":" + ACLType.getACLString(aclBitSet) - + "[" + aclScope + "]"; - } - - /** - * Returns a hash code value for the object. This method is - * supported for the benefit of hash tables. - * - * @return a hash code value for this object. - * - * @see Object#equals(Object) - * @see System#identityHashCode - */ - @Override - public int hashCode() { - return Objects.hash(this.getName(), this.getAclBitSet(), - this.getType().toString(), this.getAclScope()); - } - - /** - * Returns name. - * - * @return name - */ - public String getName() { - return name; - } - - /** - * Returns Rights. - * - * @return - Rights - */ - public BitSet getAclBitSet() { - return aclBitSet; - } - - public List getAclList() { - if(aclBitSet != null) { - return aclBitSet.stream().mapToObj(a -> - ACLType.values()[a]).collect(Collectors.toList()); - } - return EMPTY_LIST; - } - - /** - * Returns Type. - * - * @return type - */ - public ACLIdentityType getType() { - return type; - } - - /** - * Indicates whether some other object is "equal to" this one. - * - * @param obj the reference object with which to compare. - * - * @return {@code true} if this object is the same as the obj - * argument; {@code false} otherwise. - */ - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - OzoneAcl otherAcl = (OzoneAcl) obj; - return otherAcl.getName().equals(this.getName()) && - otherAcl.getType().equals(this.getType()) && - otherAcl.getAclBitSet().equals(this.getAclBitSet()) && - otherAcl.getAclScope().equals(this.getAclScope()); - } - - public OzoneAcl setAclScope(AclScope scope) { - this.aclScope = scope; - return this; - } - - /** - * Scope of ozone acl. - * */ - public enum AclScope { - ACCESS, - DEFAULT; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneIllegalArgumentException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneIllegalArgumentException.java deleted file mode 100644 index e732dc22f32..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneIllegalArgumentException.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * Indicates that a method has been passed illegal or invalid argument. This - * exception is thrown instead of IllegalArgumentException to differentiate the - * exception thrown in Hadoop implementation from the one thrown in JDK. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public class OzoneIllegalArgumentException extends IllegalArgumentException { - private static final long serialVersionUID = 1L; - - /** - * Constructs exception with the specified detail message. - * @param message detailed message. - */ - public OzoneIllegalArgumentException(final String message) { - super(message); - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java deleted file mode 100644 index 89c5dfad6f1..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.audit; - -/** - * Enum to define Audit Action types for OzoneManager. - */ -public enum OMAction implements AuditAction { - - // WRITE Actions - ALLOCATE_BLOCK, - ALLOCATE_KEY, - COMMIT_KEY, - CREATE_VOLUME, - CREATE_BUCKET, - DELETE_VOLUME, - DELETE_BUCKET, - DELETE_KEY, - RENAME_KEY, - SET_OWNER, - SET_QUOTA, - UPDATE_VOLUME, - UPDATE_BUCKET, - UPDATE_KEY, - PURGE_KEYS, - - // S3 Bucket - CREATE_S3_BUCKET, - DELETE_S3_BUCKET, - - // READ Actions - CHECK_VOLUME_ACCESS, - LIST_BUCKETS, - LIST_VOLUMES, - LIST_KEYS, - READ_VOLUME, - READ_BUCKET, - READ_KEY, - LIST_S3BUCKETS, - INITIATE_MULTIPART_UPLOAD, - COMMIT_MULTIPART_UPLOAD_PARTKEY, - COMPLETE_MULTIPART_UPLOAD, - LIST_MULTIPART_UPLOAD_PARTS, - LIST_MULTIPART_UPLOADS, - ABORT_MULTIPART_UPLOAD, - - //ACL Actions - ADD_ACL, - GET_ACL, - SET_ACL, - REMOVE_ACL, - - //FS Actions - GET_FILE_STATUS, - CREATE_DIRECTORY, - CREATE_FILE, - LOOKUP_FILE, - LIST_STATUS, - - GET_S3_SECRET; - - @Override - public String getAction() { - return this.toString(); - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java deleted file mode 100644 index 0f887909d49..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit; -/** - * This package defines OMAction - an implementation of AuditAction - * OMAction defines audit action types for various actions that will be - * audited in OzoneManager. - */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/LengthInputStream.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/LengthInputStream.java deleted file mode 100644 index baf1887c468..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/LengthInputStream.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.io; - -import java.io.FilterInputStream; -import java.io.InputStream; - -/** - * An input stream with length. - */ -public class LengthInputStream extends FilterInputStream { - - private final long length; - - /** - * Create an stream. - * @param in the underlying input stream. - * @param length the length of the stream. - */ - public LengthInputStream(InputStream in, long length) { - super(in); - this.length = length; - } - - /** @return the length. */ - public long getLength() { - return length; - } - - public InputStream getWrappedStream() { - return in; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java deleted file mode 100644 index ece1ff4463c..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.io; - -/** - * IO related ozone helper classes. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java deleted file mode 100644 index 3c60e5956d4..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java +++ /dev/null @@ -1,271 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.freon; - -import java.io.IOException; -import java.io.PrintStream; -import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import org.apache.hadoop.HadoopIllegalArgumentException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Tool; -import org.apache.hadoop.util.ToolRunner; - -/** - * CLI utility to print out ozone related configuration. - */ -public class OzoneGetConf extends Configured implements Tool { - - private static final String DESCRIPTION = "ozone getconf is utility for " - + "getting configuration information from the config file.\n"; - - enum Command { - INCLUDE_FILE("-includeFile", - "gets the include file path that defines the datanodes " + - "that can join the cluster."), - EXCLUDE_FILE("-excludeFile", - "gets the exclude file path that defines the datanodes " + - "that need to decommissioned."), - OZONEMANAGER("-ozonemanagers", - "gets list of Ozone Manager nodes in the cluster"), - STORAGECONTAINERMANAGER("-storagecontainermanagers", - "gets list of ozone storage container manager nodes in the cluster"), - CONFKEY("-confKey [key]", "gets a specific key from the configuration"); - - private static final Map HANDLERS; - - static { - HANDLERS = new HashMap(); - HANDLERS.put(StringUtils.toLowerCase(OZONEMANAGER.getName()), - new OzoneManagersCommandHandler()); - HANDLERS.put(StringUtils.toLowerCase(STORAGECONTAINERMANAGER.getName()), - new StorageContainerManagersCommandHandler()); - HANDLERS.put(StringUtils.toLowerCase(CONFKEY.getName()), - new PrintConfKeyCommandHandler()); - } - - private final String cmd; - private final String description; - - Command(String cmd, String description) { - this.cmd = cmd; - this.description = description; - } - - public String getName() { - return cmd.split(" ")[0]; - } - - public String getUsage() { - return cmd; - } - - public String getDescription() { - return description; - } - - public static OzoneGetConf.CommandHandler getHandler(String cmd) { - return HANDLERS.get(StringUtils.toLowerCase(cmd)); - } - } - - static final String USAGE; - static { - HdfsConfiguration.init(); - - /* Initialize USAGE based on Command values */ - StringBuilder usage = new StringBuilder(DESCRIPTION); - usage.append("\nozone getconf \n"); - for (OzoneGetConf.Command cmd : OzoneGetConf.Command.values()) { - usage.append("\t[" + cmd.getUsage() + "]\t\t\t" + cmd.getDescription() - + "\n"); - } - USAGE = usage.toString(); - } - - /** - * Handler to return value for key corresponding to the - * {@link OzoneGetConf.Command}. - */ - static class CommandHandler { - - @SuppressWarnings("visibilitymodifier") - protected String key; // Configuration key to lookup - - CommandHandler() { - this(null); - } - - CommandHandler(String key) { - this.key = key; - } - - final int doWork(OzoneGetConf tool, String[] args) { - try { - checkArgs(args); - - return doWorkInternal(tool, args); - } catch (Exception e) { - tool.printError(e.getMessage()); - } - return -1; - } - - protected void checkArgs(String[] args) { - if (args.length > 0) { - throw new HadoopIllegalArgumentException( - "Did not expect argument: " + args[0]); - } - } - - - /** Method to be overridden by sub classes for specific behavior. */ - int doWorkInternal(OzoneGetConf tool, String[] args) throws Exception { - - String value = tool.getConf().getTrimmed(key); - if (value != null) { - tool.printOut(value); - return 0; - } - tool.printError("Configuration " + key + " is missing."); - return -1; - } - } - - static class PrintConfKeyCommandHandler extends OzoneGetConf.CommandHandler { - @Override - protected void checkArgs(String[] args) { - if (args.length != 1) { - throw new HadoopIllegalArgumentException( - "usage: " + OzoneGetConf.Command.CONFKEY.getUsage()); - } - } - - @Override - int doWorkInternal(OzoneGetConf tool, String[] args) throws Exception { - this.key = args[0]; - return super.doWorkInternal(tool, args); - } - } - - private final PrintStream out; // Stream for printing command output - private final PrintStream err; // Stream for printing error - - protected OzoneGetConf(Configuration conf) { - this(conf, System.out, System.err); - } - - protected OzoneGetConf(Configuration conf, PrintStream out, PrintStream err) { - super(conf); - this.out = out; - this.err = err; - } - - void printError(String message) { - err.println(message); - } - - void printOut(String message) { - out.println(message); - } - - private void printUsage() { - printError(USAGE); - } - - /** - * Main method that runs the tool for given arguments. - * @param args arguments - * @return return status of the command - */ - private int doWork(String[] args) { - if (args.length >= 1) { - OzoneGetConf.CommandHandler handler = - OzoneGetConf.Command.getHandler(args[0]); - if (handler != null) { - return handler.doWork(this, Arrays.copyOfRange(args, 1, args.length)); - } - } - printUsage(); - return -1; - } - - @Override - public int run(final String[] args) throws Exception { - return SecurityUtil.doAsCurrentUser( - new PrivilegedExceptionAction() { - @Override - public Integer run() throws Exception { - return doWork(args); - } - }); - } - - /** - * Handler for {@link Command#STORAGECONTAINERMANAGER}. - */ - static class StorageContainerManagersCommandHandler extends CommandHandler { - - @Override - public int doWorkInternal(OzoneGetConf tool, String[] args) - throws IOException { - Collection addresses = HddsUtils - .getSCMAddresses(tool.getConf()); - - for (InetSocketAddress addr : addresses) { - tool.printOut(addr.getHostName()); - } - return 0; - } - } - - /** - * Handler for {@link Command#OZONEMANAGER}. - */ - static class OzoneManagersCommandHandler extends CommandHandler { - @Override - public int doWorkInternal(OzoneGetConf tool, String[] args) - throws IOException { - tool.printOut(OmUtils.getOmAddress(tool.getConf()).getHostName()); - return 0; - } - } - - public static void main(String[] args) throws Exception { - if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) { - System.exit(0); - } - - Configuration conf = new Configuration(); - conf.addResource(new OzoneConfiguration()); - int res = ToolRunner.run(new OzoneGetConf(conf), args); - System.exit(res); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java deleted file mode 100644 index 150c64e7d96..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.freon; -/** - * Classes related to Ozone tools. - */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java deleted file mode 100644 index dcb9b5cdeac..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ /dev/null @@ -1,244 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om; - -import java.util.concurrent.TimeUnit; - -import org.apache.ratis.util.TimeDuration; - -/** - * Ozone Manager Constants. - */ -public final class OMConfigKeys { - /** - * Never constructed. - */ - private OMConfigKeys() { - } - - // Location where the OM stores its DB files. In the future we may support - // multiple entries for performance (sharding).. - public static final String OZONE_OM_DB_DIRS = "ozone.om.db.dirs"; - - public static final String OZONE_OM_HANDLER_COUNT_KEY = - "ozone.om.handler.count.key"; - public static final int OZONE_OM_HANDLER_COUNT_DEFAULT = 20; - - public static final String OZONE_OM_SERVICE_IDS_KEY = - "ozone.om.service.ids"; - public static final String OZONE_OM_NODES_KEY = - "ozone.om.nodes"; - public static final String OZONE_OM_NODE_ID_KEY = - "ozone.om.node.id"; - - public static final String OZONE_OM_ADDRESS_KEY = - "ozone.om.address"; - public static final String OZONE_OM_BIND_HOST_DEFAULT = - "0.0.0.0"; - public static final int OZONE_OM_PORT_DEFAULT = 9862; - - public static final String OZONE_OM_HTTP_ENABLED_KEY = - "ozone.om.http.enabled"; - public static final String OZONE_OM_HTTP_BIND_HOST_KEY = - "ozone.om.http-bind-host"; - public static final String OZONE_OM_HTTPS_BIND_HOST_KEY = - "ozone.om.https-bind-host"; - public static final String OZONE_OM_HTTP_ADDRESS_KEY = - "ozone.om.http-address"; - public static final String OZONE_OM_HTTPS_ADDRESS_KEY = - "ozone.om.https-address"; - public static final String OZONE_OM_KEYTAB_FILE = - "ozone.om.keytab.file"; - public static final String OZONE_OM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0"; - public static final int OZONE_OM_HTTP_BIND_PORT_DEFAULT = 9874; - public static final int OZONE_OM_HTTPS_BIND_PORT_DEFAULT = 9875; - - // LevelDB cache file uses an off-heap cache in LevelDB of 128 MB. - public static final String OZONE_OM_DB_CACHE_SIZE_MB = - "ozone.om.db.cache.size.mb"; - public static final int OZONE_OM_DB_CACHE_SIZE_DEFAULT = 128; - - public static final String OZONE_OM_USER_MAX_VOLUME = - "ozone.om.user.max.volume"; - public static final int OZONE_OM_USER_MAX_VOLUME_DEFAULT = 1024; - - public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK = - "ozone.key.deleting.limit.per.task"; - public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000; - - public static final String OZONE_OM_METRICS_SAVE_INTERVAL = - "ozone.om.save.metrics.interval"; - public static final String OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT = "5m"; - - /** - * OM Ratis related configurations. - */ - public static final String OZONE_OM_RATIS_ENABLE_KEY - = "ozone.om.ratis.enable"; - public static final boolean OZONE_OM_RATIS_ENABLE_DEFAULT - = false; - public static final String OZONE_OM_RATIS_PORT_KEY - = "ozone.om.ratis.port"; - public static final int OZONE_OM_RATIS_PORT_DEFAULT - = 9872; - public static final String OZONE_OM_RATIS_RPC_TYPE_KEY - = "ozone.om.ratis.rpc.type"; - public static final String OZONE_OM_RATIS_RPC_TYPE_DEFAULT - = "GRPC"; - - // OM Ratis Log configurations - public static final String OZONE_OM_RATIS_STORAGE_DIR - = "ozone.om.ratis.storage.dir"; - public static final String OZONE_OM_RATIS_SEGMENT_SIZE_KEY - = "ozone.om.ratis.segment.size"; - public static final String OZONE_OM_RATIS_SEGMENT_SIZE_DEFAULT - = "16KB"; - public static final String OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY - = "ozone.om.ratis.segment.preallocated.size"; - public static final String OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT - = "16KB"; - - // OM Ratis Log Appender configurations - public static final String - OZONE_OM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = - "ozone.om.ratis.log.appender.queue.num-elements"; - public static final int - OZONE_OM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1024; - public static final String OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = - "ozone.om.ratis.log.appender.queue.byte-limit"; - public static final String - OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; - public static final String OZONE_OM_RATIS_LOG_PURGE_GAP = - "ozone.om.ratis.log.purge.gap"; - public static final int OZONE_OM_RATIS_LOG_PURGE_GAP_DEFAULT = 1000000; - - // OM Snapshot configurations - public static final String OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY - = "ozone.om.ratis.snapshot.auto.trigger.threshold"; - public static final long - OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_DEFAULT - = 400000; - - // OM Ratis server configurations - public static final String OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_KEY - = "ozone.om.ratis.server.request.timeout"; - public static final TimeDuration - OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT - = TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS); - public static final String - OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_KEY - = "ozone.om.ratis.server.retry.cache.timeout"; - public static final TimeDuration - OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT - = TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS); - public static final String OZONE_OM_RATIS_MINIMUM_TIMEOUT_KEY - = "ozone.om.ratis.minimum.timeout"; - public static final TimeDuration OZONE_OM_RATIS_MINIMUM_TIMEOUT_DEFAULT - = TimeDuration.valueOf(1, TimeUnit.SECONDS); - - // OM Ratis client configurations - public static final String OZONE_OM_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY - = "ozone.om.ratis.client.request.timeout.duration"; - public static final TimeDuration - OZONE_OM_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT - = TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS); - public static final String OZONE_OM_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY - = "ozone.om.ratis.client.request.max.retries"; - public static final int OZONE_OM_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT - = 180; - public static final String OZONE_OM_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY - = "ozone.om.ratis.client.request.retry.interval"; - public static final TimeDuration - OZONE_OM_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT - = TimeDuration.valueOf(100, TimeUnit.MILLISECONDS); - - // OM Ratis Leader Election configurations - public static final String - OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - "ozone.om.leader.election.minimum.timeout.duration"; - public static final TimeDuration - OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = - TimeDuration.valueOf(1, TimeUnit.SECONDS); - public static final String OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_KEY - = "ozone.om.ratis.server.failure.timeout.duration"; - public static final TimeDuration - OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT - = TimeDuration.valueOf(120, TimeUnit.SECONDS); - - // OM Leader server role check interval - public static final String OZONE_OM_RATIS_SERVER_ROLE_CHECK_INTERVAL_KEY - = "ozone.om.ratis.server.role.check.interval"; - public static final TimeDuration - OZONE_OM_RATIS_SERVER_ROLE_CHECK_INTERVAL_DEFAULT - = TimeDuration.valueOf(15, TimeUnit.SECONDS); - - // OM SnapshotProvider configurations - public static final String OZONE_OM_RATIS_SNAPSHOT_DIR = - "ozone.om.ratis.snapshot.dir"; - public static final String OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_KEY = - "ozone.om.snapshot.provider.socket.timeout"; - public static final TimeDuration - OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT = - TimeDuration.valueOf(5000, TimeUnit.MILLISECONDS); - - public static final String OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_KEY = - "ozone.om.snapshot.provider.connection.timeout"; - public static final TimeDuration - OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT = - TimeDuration.valueOf(5000, TimeUnit.MILLISECONDS); - - public static final String OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_KEY = - "ozone.om.snapshot.provider.request.timeout"; - public static final TimeDuration - OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT = - TimeDuration.valueOf(5000, TimeUnit.MILLISECONDS); - - public static final String OZONE_OM_KERBEROS_KEYTAB_FILE_KEY = "ozone.om." - + "kerberos.keytab.file"; - public static final String OZONE_OM_KERBEROS_PRINCIPAL_KEY = "ozone.om" - + ".kerberos.principal"; - public static final String OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE = - "ozone.om.http.kerberos.keytab"; - public static final String OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY - = "ozone.om.http.kerberos.principal"; - // Delegation token related keys - public static final String DELEGATION_REMOVER_SCAN_INTERVAL_KEY = - "ozone.manager.delegation.remover.scan.interval"; - public static final long DELEGATION_REMOVER_SCAN_INTERVAL_DEFAULT = - 60*60*1000; - public static final String DELEGATION_TOKEN_RENEW_INTERVAL_KEY = - "ozone.manager.delegation.token.renew-interval"; - public static final long DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT = - 24*60*60*1000; // 1 day = 86400000 ms - public static final String DELEGATION_TOKEN_MAX_LIFETIME_KEY = - "ozone.manager.delegation.token.max-lifetime"; - public static final long DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT = - 7*24*60*60*1000; // 7 days - - public static final String OZONE_DB_CHECKPOINT_TRANSFER_RATE_KEY = - "ozone.manager.db.checkpoint.transfer.bandwidthPerSec"; - public static final long OZONE_DB_CHECKPOINT_TRANSFER_RATE_DEFAULT = - 0; //no throttling - - // Comma separated acls (users, groups) allowing clients accessing - // OM client protocol - // when hadoop.security.authorization is true, this needs to be set in - // hadoop-policy.xml, "*" allows all users/groups to access. - public static final String OZONE_OM_SECURITY_CLIENT_PROTOCOL_ACL = - "ozone.om.security.client.protocol.acl"; -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java deleted file mode 100644 index 673d26ade66..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ /dev/null @@ -1,339 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .UserVolumeInfo; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.Table; - -import com.google.common.annotations.VisibleForTesting; - -/** - * OM metadata manager interface. - */ -public interface OMMetadataManager { - /** - * Start metadata manager. - * - * @param configuration - * @throws IOException - */ - void start(OzoneConfiguration configuration) throws IOException; - - /** - * Stop metadata manager. - */ - void stop() throws Exception; - - /** - * Get metadata store. - * - * @return metadata store. - */ - @VisibleForTesting - DBStore getStore(); - - /** - * Returns the OzoneManagerLock used on Metadata DB. - * - * @return OzoneManagerLock - */ - OzoneManagerLock getLock(); - - /** - * Given a volume return the corresponding DB key. - * - * @param volume - Volume name - */ - String getVolumeKey(String volume); - - /** - * Given a user return the corresponding DB key. - * - * @param user - User name - */ - String getUserKey(String user); - - /** - * Given a volume and bucket, return the corresponding DB key. - * - * @param volume - User name - * @param bucket - Bucket name - */ - String getBucketKey(String volume, String bucket); - - /** - * Given a volume, bucket and a key, return the corresponding DB key. - * - * @param volume - volume name - * @param bucket - bucket name - * @param key - key name - * @return DB key as String. - */ - - String getOzoneKey(String volume, String bucket, String key); - - /** - * Given a volume, bucket and a key, return the corresponding DB directory - * key. - * - * @param volume - volume name - * @param bucket - bucket name - * @param key - key name - * @return DB directory key as String. - */ - String getOzoneDirKey(String volume, String bucket, String key); - - - /** - * Returns the DB key name of a open key in OM metadata store. Should be - * #open# prefix followed by actual key name. - * - * @param volume - volume name - * @param bucket - bucket name - * @param key - key name - * @param id - the id for this open - * @return bytes of DB key. - */ - String getOpenKey(String volume, String bucket, String key, long id); - - /** - * Given a volume, check if it is empty, i.e there are no buckets inside it. - * - * @param volume - Volume name - */ - boolean isVolumeEmpty(String volume) throws IOException; - - /** - * Given a volume/bucket, check if it is empty, i.e there are no keys inside - * it. - * - * @param volume - Volume name - * @param bucket - Bucket name - * @return true if the bucket is empty - */ - boolean isBucketEmpty(String volume, String bucket) throws IOException; - - /** - * Returns a list of buckets represented by {@link OmBucketInfo} in the given - * volume. - * - * @param volumeName the name of the volume. This argument is required, this - * method returns buckets in this given volume. - * @param startBucket the start bucket name. Only the buckets whose name is - * after this value will be included in the result. This key is excluded from - * the result. - * @param bucketPrefix bucket name prefix. Only the buckets whose name has - * this prefix will be included in the result. - * @param maxNumOfBuckets the maximum number of buckets to return. It ensures - * the size of the result will not exceed this limit. - * @return a list of buckets. - * @throws IOException - */ - List listBuckets(String volumeName, String startBucket, - String bucketPrefix, int maxNumOfBuckets) - throws IOException; - - /** - * Returns a list of keys represented by {@link OmKeyInfo} in the given - * bucket. - * - * @param volumeName the name of the volume. - * @param bucketName the name of the bucket. - * @param startKey the start key name, only the keys whose name is after this - * value will be included in the result. This key is excluded from the - * result. - * @param keyPrefix key name prefix, only the keys whose name has this prefix - * will be included in the result. - * @param maxKeys the maximum number of keys to return. It ensures the size of - * the result will not exceed this limit. - * @return a list of keys. - * @throws IOException - */ - List listKeys(String volumeName, - String bucketName, String startKey, String keyPrefix, int maxKeys) - throws IOException; - - /** - * Returns a list of volumes owned by a given user; if user is null, returns - * all volumes. - * - * @param userName volume owner - * @param prefix the volume prefix used to filter the listing result. - * @param startKey the start volume name determines where to start listing - * from, this key is excluded from the result. - * @param maxKeys the maximum number of volumes to return. - * @return a list of {@link OmVolumeArgs} - * @throws IOException - */ - List listVolumes(String userName, String prefix, - String startKey, int maxKeys) throws IOException; - - /** - * Returns a list of pending deletion key info that ups to the given count. - * Each entry is a {@link BlockGroup}, which contains the info about the key - * name and all its associated block IDs. A pending deletion key is stored - * with #deleting# prefix in OM DB. - * - * @param count max number of keys to return. - * @return a list of {@link BlockGroup} represent keys and blocks. - * @throws IOException - */ - List getPendingDeletionKeys(int count) throws IOException; - - /** - * Returns a list of all still open key info. Which contains the info about - * the key name and all its associated block IDs. A pending open key has - * prefix #open# in OM DB. - * - * @return a list of {@link BlockGroup} representing keys and blocks. - * @throws IOException - */ - List getExpiredOpenKeys() throws IOException; - - /** - * Returns the user Table. - * - * @return UserTable. - */ - Table getUserTable(); - - /** - * Returns the Volume Table. - * - * @return VolumeTable. - */ - Table getVolumeTable(); - - /** - * Returns the BucketTable. - * - * @return BucketTable. - */ - Table getBucketTable(); - - /** - * Returns the KeyTable. - * - * @return KeyTable. - */ - Table getKeyTable(); - - /** - * Get Deleted Table. - * - * @return Deleted Table. - */ - Table getDeletedTable(); - - /** - * Gets the OpenKeyTable. - * - * @return Table. - */ - Table getOpenKeyTable(); - - /** - * Gets the DelegationTokenTable. - * - * @return Table. - */ - Table getDelegationTokenTable(); - - /** - * Gets the S3Bucket to Ozone Volume/bucket mapping table. - * - * @return Table. - */ - - Table getS3Table(); - - /** - * Gets the Ozone prefix path to its acl mapping table. - * @return Table. - */ - Table getPrefixTable(); - - /** - * Returns the DB key name of a multipart upload key in OM metadata store. - * - * @param volume - volume name - * @param bucket - bucket name - * @param key - key name - * @param uploadId - the upload id for this key - * @return bytes of DB key. - */ - String getMultipartKey(String volume, String bucket, String key, String - uploadId); - - - /** - * Gets the multipart info table which holds the information about - * multipart upload information of the keys. - * @return Table - */ - Table getMultipartInfoTable(); - - /** - * Gets the S3 Secrets table. - * @return Table - */ - Table getS3SecretTable(); - - /** - * Returns number of rows in a table. This should not be used for very - * large tables. - * @param table - * @return long - * @throws IOException - */ - long countRowsInTable(Table table) - throws IOException; - - /** - * Returns an estimated number of rows in a table. This is much quicker - * than {@link OMMetadataManager#countRowsInTable} but the result can be - * inaccurate. - * @param table Table - * @return long Estimated number of rows in the table. - * @throws IOException - */ - long countEstimatedRowsInTable(Table table) - throws IOException; - - /** - * Return the existing upload keys which includes volumeName, bucketName, - * keyName. - */ - List getMultipartUploadKeys(String volumeName, - String bucketName, String prefix) throws IOException; -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java deleted file mode 100644 index 3ca8cbbb21f..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManager.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; - -import java.io.IOException; -/** - * Interface to manager s3 secret. - */ -public interface S3SecretManager { - - S3SecretValue getS3Secret(String kerberosID) throws IOException; - - /** - * API to get s3 secret for given awsAccessKey. - * @param awsAccessKey - * */ - String getS3UserSecretString(String awsAccessKey) throws IOException; -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java deleted file mode 100644 index fb566582062..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.ozone.om; - -import com.google.common.base.Preconditions; -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.security.OzoneSecurityException; -import org.apache.logging.log4j.util.Strings; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_SECRET_LOCK; -import static org.apache.hadoop.ozone.security.OzoneSecurityException.ResultCodes.S3_SECRET_NOT_FOUND; - -/** - * S3 Secret manager. - */ -public class S3SecretManagerImpl implements S3SecretManager { - private static final Logger LOG = - LoggerFactory.getLogger(S3SecretManagerImpl.class); - /** - * OMMetadataManager is used for accessing OM MetadataDB and ReadWriteLock. - */ - private final OMMetadataManager omMetadataManager; - private final OzoneConfiguration configuration; - - /** - * Constructs S3SecretManager. - * - * @param omMetadataManager - */ - public S3SecretManagerImpl(OzoneConfiguration configuration, - OMMetadataManager omMetadataManager) { - this.configuration = configuration; - this.omMetadataManager = omMetadataManager; - } - - @Override - public S3SecretValue getS3Secret(String kerberosID) throws IOException { - Preconditions.checkArgument(Strings.isNotBlank(kerberosID), - "kerberosID cannot be null or empty."); - S3SecretValue result = null; - omMetadataManager.getLock().acquireLock(S3_SECRET_LOCK, kerberosID); - try { - S3SecretValue s3Secret = - omMetadataManager.getS3SecretTable().get(kerberosID); - if(s3Secret == null) { - byte[] secret = OmUtils.getSHADigest(); - result = new S3SecretValue(kerberosID, DigestUtils.sha256Hex(secret)); - omMetadataManager.getS3SecretTable().put(kerberosID, result); - } else { - return s3Secret; - } - } finally { - omMetadataManager.getLock().releaseLock(S3_SECRET_LOCK, kerberosID); - } - if (LOG.isTraceEnabled()) { - LOG.trace("Secret for accessKey:{}, proto:{}", kerberosID, result); - } - return result; - } - - @Override - public String getS3UserSecretString(String kerberosID) - throws IOException { - Preconditions.checkArgument(Strings.isNotBlank(kerberosID), - "awsAccessKeyId cannot be null or empty."); - LOG.trace("Get secret for awsAccessKey:{}", kerberosID); - - S3SecretValue s3Secret; - omMetadataManager.getLock().acquireLock(S3_SECRET_LOCK, kerberosID); - try { - s3Secret = omMetadataManager.getS3SecretTable().get(kerberosID); - if (s3Secret == null) { - throw new OzoneSecurityException("S3 secret not found for " + - "awsAccessKeyId " + kerberosID, S3_SECRET_NOT_FOUND); - } - } finally { - omMetadataManager.getLock().releaseLock(S3_SECRET_LOCK, kerberosID); - } - - return s3Secret.getAwsSecret(); - } - - public OMMetadataManager getOmMetadataManager() { - return omMetadataManager; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmBucketInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmBucketInfoCodec.java deleted file mode 100644 index 8f4d0fc76d4..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmBucketInfoCodec.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.codec; - -import java.io.IOException; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; -import org.apache.hadoop.hdds.utils.db.Codec; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * Codec to encode OmBucketInfo as byte array. - */ -public class OmBucketInfoCodec implements Codec { - - @Override - public byte[] toPersistedFormat(OmBucketInfo object) throws IOException { - Preconditions - .checkNotNull(object, "Null object can't be converted to byte array."); - return object.getProtobuf().toByteArray(); - } - - @Override - public OmBucketInfo fromPersistedFormat(byte[] rawData) throws IOException { - Preconditions - .checkNotNull(rawData, - "Null byte array can't converted to real object."); - try { - return OmBucketInfo.getFromProtobuf(BucketInfo.parseFrom(rawData)); - } catch (InvalidProtocolBufferException e) { - throw new IllegalArgumentException( - "Can't encode the the raw data from the byte array", e); - } - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java deleted file mode 100644 index 0c52a24b015..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.codec; - -import java.io.IOException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; -import org.apache.hadoop.hdds.utils.db.Codec; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * Codec to encode OmKeyInfo as byte array. - */ -public class OmKeyInfoCodec implements Codec { - - @Override - public byte[] toPersistedFormat(OmKeyInfo object) throws IOException { - Preconditions - .checkNotNull(object, "Null object can't be converted to byte array."); - return object.getProtobuf().toByteArray(); - } - - @Override - public OmKeyInfo fromPersistedFormat(byte[] rawData) throws IOException { - Preconditions - .checkNotNull(rawData, - "Null byte array can't converted to real object."); - try { - return OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(rawData)); - } catch (InvalidProtocolBufferException e) { - throw new IllegalArgumentException( - "Can't encode the the raw data from the byte array", e); - } - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmMultipartKeyInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmMultipartKeyInfoCodec.java deleted file mode 100644 index 4f6a7b14cff..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmMultipartKeyInfoCodec.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.codec; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; -import java.io.IOException; -import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.hdds.utils.db.Codec; - - -/** - * Codec Registry for OmMultipartKeyInfo. - */ -public class OmMultipartKeyInfoCodec implements Codec { - - @Override - public byte[] toPersistedFormat(OmMultipartKeyInfo object) - throws IOException { - Preconditions.checkNotNull(object, - "Null object can't be converted to byte array."); - return object.getProto().toByteArray(); - - } - - @Override - /** - * Construct {@link OmMultipartKeyInfo} from byte[]. If unable to convert - * return null. - */ - public OmMultipartKeyInfo fromPersistedFormat(byte[] rawData) - throws IOException { - Preconditions.checkNotNull(rawData, - "Null byte array can't converted to real object."); - try { - return OmMultipartKeyInfo.getFromProto(OzoneManagerProtocolProtos - .MultipartKeyInfo.parseFrom(rawData)); - } catch (InvalidProtocolBufferException e) { - throw new IllegalArgumentException( - "Can't encode the the raw data from the byte array", e); - } - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java deleted file mode 100644 index df3c90d5dbf..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.codec; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrefixInfo; - -import org.apache.hadoop.hdds.utils.db.Codec; - -import java.io.IOException; - -/** - * Codec to encode PrefixAcl as byte array. - */ -public class OmPrefixInfoCodec implements Codec { - - @Override - public byte[] toPersistedFormat(OmPrefixInfo object) throws IOException { - Preconditions - .checkNotNull(object, "Null object can't be converted to byte array."); - return object.getProtobuf().toByteArray(); - } - - @Override - public OmPrefixInfo fromPersistedFormat(byte[] rawData) throws IOException { - Preconditions - .checkNotNull(rawData, - "Null byte array can't converted to real object."); - try { - return OmPrefixInfo.getFromProtobuf(PrefixInfo.parseFrom(rawData)); - } catch (InvalidProtocolBufferException e) { - throw new IllegalArgumentException( - "Can't encode the the raw data from the byte array", e); - } - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmVolumeArgsCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmVolumeArgsCodec.java deleted file mode 100644 index e283e92b8c4..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/OmVolumeArgsCodec.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.codec; - -import java.io.IOException; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo; -import org.apache.hadoop.hdds.utils.db.Codec; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * Codec to encode OmVolumeArgsCodec as byte array. - */ -public class OmVolumeArgsCodec implements Codec { - - @Override - public byte[] toPersistedFormat(OmVolumeArgs object) throws IOException { - Preconditions - .checkNotNull(object, "Null object can't be converted to byte array."); - return object.getProtobuf().toByteArray(); - } - - @Override - public OmVolumeArgs fromPersistedFormat(byte[] rawData) throws IOException { - Preconditions - .checkNotNull(rawData, - "Null byte array can't converted to real object."); - try { - return OmVolumeArgs.getFromProtobuf(VolumeInfo.parseFrom(rawData)); - } catch (InvalidProtocolBufferException e) { - throw new IllegalArgumentException( - "Can't encode the the raw data from the byte array", e); - } - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java deleted file mode 100644 index a0ef4a57538..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om.codec; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hadoop.hdds.utils.db.Codec; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .RepeatedKeyInfo; - -import java.io.IOException; - -/** - * Codec to encode RepeatedOmKeyInfo as byte array. - */ -public class RepeatedOmKeyInfoCodec implements Codec { - @Override - public byte[] toPersistedFormat(RepeatedOmKeyInfo object) - throws IOException { - Preconditions.checkNotNull(object, - "Null object can't be converted to byte array."); - return object.getProto().toByteArray(); - } - - @Override - public RepeatedOmKeyInfo fromPersistedFormat(byte[] rawData) - throws IOException { - Preconditions.checkNotNull(rawData, - "Null byte array can't converted to real object."); - try { - return RepeatedOmKeyInfo.getFromProto(RepeatedKeyInfo.parseFrom(rawData)); - } catch (InvalidProtocolBufferException e) { - throw new IllegalArgumentException( - "Can't encode the the raw data from the byte array", e); - } - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/S3SecretValueCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/S3SecretValueCodec.java deleted file mode 100644 index 7ea3fda9029..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/S3SecretValueCodec.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.codec; - - -import java.io.IOException; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; - -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.hdds.utils.db.Codec; - - -/** - * Codec to encode S3SecretValue as byte array. - */ -public class S3SecretValueCodec implements Codec { - - @Override - public byte[] toPersistedFormat(S3SecretValue object) throws IOException { - Preconditions - .checkNotNull(object, "Null object can't be converted to byte array."); - return object.getProtobuf().toByteArray(); - } - - @Override - public S3SecretValue fromPersistedFormat(byte[] rawData) throws IOException { - Preconditions - .checkNotNull(rawData, - "Null byte array can't converted to real object."); - try { - return S3SecretValue.fromProtobuf( - OzoneManagerProtocolProtos.S3Secret.parseFrom(rawData)); - } catch (InvalidProtocolBufferException e) { - throw new IllegalArgumentException( - "Can't encode the the raw data from the byte array", e); - } - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java deleted file mode 100644 index 626fa010059..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.codec; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.hdds.utils.db.Codec; - -import java.io.IOException; - -/** - * Codec to encode TokenIdentifierCodec as byte array. - */ -public class TokenIdentifierCodec implements Codec { - - @Override - public byte[] toPersistedFormat(OzoneTokenIdentifier object) { - Preconditions - .checkNotNull(object, "Null object can't be converted to byte array."); - return object.getBytes(); - } - - @Override - public OzoneTokenIdentifier fromPersistedFormat(byte[] rawData) - throws IOException { - Preconditions.checkNotNull(rawData, - "Null byte array can't converted to real object."); - try { - return OzoneTokenIdentifier.readProtoBuf(rawData); - } catch (InvalidProtocolBufferException e) { - throw new IllegalArgumentException( - "Can't encode the the raw data from the byte array", e); - } - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/UserVolumeInfoCodec.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/UserVolumeInfoCodec.java deleted file mode 100644 index 2545454a16f..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/UserVolumeInfoCodec.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.codec; - -import java.io.IOException; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.UserVolumeInfo; -import org.apache.hadoop.hdds.utils.db.Codec; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; - -/** - * Codec to encode UserVolumeInfo as byte array. - */ -public class UserVolumeInfoCodec implements Codec { - - @Override - public byte[] toPersistedFormat(UserVolumeInfo object) throws IOException { - Preconditions - .checkNotNull(object, "Null object can't be converted to byte array."); - return object.toByteArray(); - } - - @Override - public UserVolumeInfo fromPersistedFormat(byte[] rawData) throws IOException { - Preconditions - .checkNotNull(rawData, - "Null byte array can't converted to real object."); - try { - return UserVolumeInfo.parseFrom(rawData); - } catch (InvalidProtocolBufferException e) { - throw new IllegalArgumentException( - "Can't encode the the raw data from the byte array", e); - } - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/package-info.java deleted file mode 100644 index df6ed9c5414..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/codec/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *

- * Utility classes to encode/decode DTO objects to/from byte array. - */ - -/** - * Utility classes to encode/decode DTO objects to/from byte array. - */ -package org.apache.hadoop.ozone.om.codec; \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/NotLeaderException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/NotLeaderException.java deleted file mode 100644 index 974ab0e5edd..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/NotLeaderException.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.exceptions; - -import java.io.IOException; - -/** - * Exception thrown by - * {@link org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB} when - * a read request is received by a non leader OM node. - */ -public class NotLeaderException extends IOException { - - private final String currentPeerId; - private final String leaderPeerId; - - public NotLeaderException(String currentPeerIdStr) { - super("OM " + currentPeerIdStr + " is not the leader. Could not " + - "determine the leader node."); - this.currentPeerId = currentPeerIdStr; - this.leaderPeerId = null; - } - - public NotLeaderException(String currentPeerIdStr, - String suggestedLeaderPeerIdStr) { - super("OM " + currentPeerIdStr + " is not the leader. Suggested leader is " - + suggestedLeaderPeerIdStr); - this.currentPeerId = currentPeerIdStr; - this.leaderPeerId = suggestedLeaderPeerIdStr; - } - - public String getSuggestedLeaderNodeId() { - return leaderPeerId; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java deleted file mode 100644 index 268471a62c3..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java +++ /dev/null @@ -1,216 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.exceptions; - -import java.io.IOException; - -/** - * Exception thrown by Ozone Manager. - */ -public class OMException extends IOException { - - public static final String STATUS_CODE = "STATUS_CODE="; - private final OMException.ResultCodes result; - - /** - * Constructs an {@code IOException} with {@code null} - * as its error detail message. - */ - public OMException(OMException.ResultCodes result) { - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified detail message. - * - * @param message The detail message (which is saved for later retrieval by - * the - * {@link #getMessage()} method) - */ - public OMException(String message, OMException.ResultCodes result) { - super(message); - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified detail message - * and cause. - *

- *

Note that the detail message associated with {@code cause} is - * not automatically incorporated into this exception's detail - * message. - * - * @param message The detail message (which is saved for later retrieval by - * the - * {@link #getMessage()} method) - * @param cause The cause (which is saved for later retrieval by the {@link - * #getCause()} method). (A null value is permitted, and indicates that the - * cause is nonexistent or unknown.) - * @since 1.6 - */ - public OMException(String message, Throwable cause, - OMException.ResultCodes result) { - super(message, cause); - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified cause and a - * detail message of {@code (cause==null ? null : cause.toString())} - * (which typically contains the class and detail message of {@code cause}). - * This constructor is useful for IO exceptions that are little more - * than wrappers for other throwables. - * - * @param cause The cause (which is saved for later retrieval by the {@link - * #getCause()} method). (A null value is permitted, and indicates that the - * cause is nonexistent or unknown.) - * @since 1.6 - */ - public OMException(Throwable cause, OMException.ResultCodes result) { - super(cause); - this.result = result; - } - - /** - * Returns resultCode. - * @return ResultCode - */ - public OMException.ResultCodes getResult() { - return result; - } - - @Override - public String toString() { - return result + " " + super.toString(); - } - /** - * Error codes to make it easy to decode these exceptions. - */ - public enum ResultCodes { - - OK, - - VOLUME_NOT_UNIQUE, - - VOLUME_NOT_FOUND, - - VOLUME_NOT_EMPTY, - - VOLUME_ALREADY_EXISTS, - - USER_NOT_FOUND, - - USER_TOO_MANY_VOLUMES, - - BUCKET_NOT_FOUND, - - BUCKET_NOT_EMPTY, - - BUCKET_ALREADY_EXISTS, - - KEY_ALREADY_EXISTS, - - KEY_NOT_FOUND, - - INVALID_KEY_NAME, - - ACCESS_DENIED, - - INTERNAL_ERROR, - - KEY_ALLOCATION_ERROR, - - KEY_DELETION_ERROR, - - KEY_RENAME_ERROR, - - METADATA_ERROR, - - OM_NOT_INITIALIZED, - - SCM_VERSION_MISMATCH_ERROR, - - S3_BUCKET_NOT_FOUND, - - S3_BUCKET_ALREADY_EXISTS, - - INITIATE_MULTIPART_UPLOAD_ERROR, - - MULTIPART_UPLOAD_PARTFILE_ERROR, - - NO_SUCH_MULTIPART_UPLOAD_ERROR, - - MISMATCH_MULTIPART_LIST, - - MISSING_UPLOAD_PARTS, - - COMPLETE_MULTIPART_UPLOAD_ERROR, - - ENTITY_TOO_SMALL, - - ABORT_MULTIPART_UPLOAD_FAILED, - - S3_SECRET_NOT_FOUND, - - INVALID_AUTH_METHOD, - - INVALID_TOKEN, - - TOKEN_EXPIRED, - - TOKEN_ERROR_OTHER, - - LIST_MULTIPART_UPLOAD_PARTS_FAILED, - - SCM_IN_SAFE_MODE, - - INVALID_REQUEST, - - BUCKET_ENCRYPTION_KEY_NOT_FOUND, - - UNKNOWN_CIPHER_SUITE, - - INVALID_KMS_PROVIDER, - - TOKEN_CREATION_ERROR, - - FILE_NOT_FOUND, - - DIRECTORY_NOT_FOUND, - - FILE_ALREADY_EXISTS, - - NOT_A_FILE, - - PERMISSION_DENIED, // Error codes used during acl validation - - TIMEOUT, // Error codes used during acl validation - - PREFIX_NOT_FOUND, - - S3_BUCKET_INVALID_LENGTH, - - RATIS_ERROR, // Error in Ratis server - - INVALID_PATH_IN_ACL_REQUEST, // Error code when path name is invalid during - // acl requests. - - USER_MISMATCH // Error code when requested user name passed is different - // from remote user. - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java deleted file mode 100644 index 50915454338..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.exceptions; -// Exception thrown by OM. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java deleted file mode 100644 index 32684de5b73..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java +++ /dev/null @@ -1,294 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.ha; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.retry.FailoverProxyProvider; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; - -/** - * A failover proxy provider implementation which allows clients to configure - * multiple OMs to connect to. In case of OM failover, client can try - * connecting to another OM node from the list of proxies. - */ -public class OMFailoverProxyProvider implements - FailoverProxyProvider, Closeable { - - public static final Logger LOG = - LoggerFactory.getLogger(OMFailoverProxyProvider.class); - - // Map of OMNodeID to its proxy - private Map> omProxies; - private Map omProxyInfos; - private List omNodeIDList; - - private String currentProxyOMNodeId; - private int currentProxyIndex; - - private final Configuration conf; - private final long omVersion; - private final UserGroupInformation ugi; - private final Text delegationTokenService; - - private final String omServiceId; - - public OMFailoverProxyProvider(OzoneConfiguration configuration, - UserGroupInformation ugi, String omServiceId) throws IOException { - this.conf = configuration; - this.omVersion = RPC.getProtocolVersion(OzoneManagerProtocolPB.class); - this.ugi = ugi; - this.omServiceId = omServiceId; - loadOMClientConfigs(conf, this.omServiceId); - this.delegationTokenService = computeDelegationTokenService(); - - currentProxyIndex = 0; - currentProxyOMNodeId = omNodeIDList.get(currentProxyIndex); - } - - public OMFailoverProxyProvider(OzoneConfiguration configuration, - UserGroupInformation ugi) throws IOException { - this(configuration, ugi, null); - } - - private void loadOMClientConfigs(Configuration config, String omSvcId) - throws IOException { - this.omProxies = new HashMap<>(); - this.omProxyInfos = new HashMap<>(); - this.omNodeIDList = new ArrayList<>(); - - Collection omServiceIds = Collections.singletonList(omSvcId); - - for (String serviceId : OmUtils.emptyAsSingletonNull(omServiceIds)) { - Collection omNodeIds = OmUtils.getOMNodeIds(config, serviceId); - - for (String nodeId : OmUtils.emptyAsSingletonNull(omNodeIds)) { - - String rpcAddrKey = OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY, - serviceId, nodeId); - String rpcAddrStr = OmUtils.getOmRpcAddress(config, rpcAddrKey); - if (rpcAddrStr == null) { - continue; - } - - OMProxyInfo omProxyInfo = new OMProxyInfo(nodeId, rpcAddrStr); - - if (omProxyInfo.getAddress() != null) { - - ProxyInfo proxyInfo = - new ProxyInfo(null, omProxyInfo.toString()); - - // For a non-HA OM setup, nodeId might be null. If so, we assign it - // a dummy value - if (nodeId == null) { - nodeId = OzoneConsts.OM_NODE_ID_DUMMY; - } - omProxies.put(nodeId, proxyInfo); - omProxyInfos.put(nodeId, omProxyInfo); - omNodeIDList.add(nodeId); - } else { - LOG.error("Failed to create OM proxy for {} at address {}", - nodeId, rpcAddrStr); - } - } - } - - if (omProxies.isEmpty()) { - throw new IllegalArgumentException("Could not find any configured " + - "addresses for OM. Please configure the system with " - + OZONE_OM_ADDRESS_KEY); - } - } - - @VisibleForTesting - public synchronized String getCurrentProxyOMNodeId() { - return currentProxyOMNodeId; - } - - private OzoneManagerProtocolPB createOMProxy(InetSocketAddress omAddress) - throws IOException { - RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class, - ProtobufRpcEngine.class); - return RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, omAddress, ugi, - conf, NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf)); - } - - /** - * Get the proxy object which should be used until the next failover event - * occurs. RPC proxy object is intialized lazily. - * @return the OM proxy object to invoke methods upon - */ - @Override - public synchronized ProxyInfo getProxy() { - ProxyInfo currentProxyInfo = omProxies.get(currentProxyOMNodeId); - createOMProxyIfNeeded(currentProxyInfo, currentProxyOMNodeId); - return currentProxyInfo; - } - - /** - * Creates proxy object if it does not already exist. - */ - private void createOMProxyIfNeeded(ProxyInfo proxyInfo, - String nodeId) { - if (proxyInfo.proxy == null) { - InetSocketAddress address = omProxyInfos.get(nodeId).getAddress(); - try { - proxyInfo.proxy = createOMProxy(address); - } catch (IOException ioe) { - LOG.error("{} Failed to create RPC proxy to OM at {}", - this.getClass().getSimpleName(), address, ioe); - throw new RuntimeException(ioe); - } - } - } - - public Text getCurrentProxyDelegationToken() { - return delegationTokenService; - } - - private Text computeDelegationTokenService() { - // For HA, this will return "," separated address of all OM's. - StringBuilder rpcAddress = new StringBuilder(); - int count = 0; - for (Map.Entry omProxyInfoSet : - omProxyInfos.entrySet()) { - count++; - rpcAddress = - rpcAddress.append( - omProxyInfoSet.getValue().getDelegationTokenService()); - - if (omProxyInfos.size() != count) { - rpcAddress.append(","); - } - } - - return new Text(rpcAddress.toString()); - } - - - - /** - * Called whenever an error warrants failing over. It is determined by the - * retry policy. - */ - @Override - public void performFailover(OzoneManagerProtocolPB currentProxy) { - int newProxyIndex = incrementProxyIndex(); - if (LOG.isDebugEnabled()) { - LOG.debug("Failing over OM proxy to index: {}, nodeId: {}", - newProxyIndex, omNodeIDList.get(newProxyIndex)); - } - } - - /** - * Update the proxy index to the next proxy in the list. - * @return the new proxy index - */ - private synchronized int incrementProxyIndex() { - currentProxyIndex = (currentProxyIndex + 1) % omProxies.size(); - currentProxyOMNodeId = omNodeIDList.get(currentProxyIndex); - return currentProxyIndex; - } - - @Override - public Class getInterface() { - return OzoneManagerProtocolPB.class; - } - - /** - * Performs failover if the leaderOMNodeId returned through OMReponse does - * not match the current leaderOMNodeId cached by the proxy provider. - */ - public void performFailoverIfRequired(String newLeaderOMNodeId) { - if (newLeaderOMNodeId == null) { - LOG.debug("No suggested leader nodeId. Performing failover to next peer" + - " node"); - performFailover(null); - } else { - if (updateLeaderOMNodeId(newLeaderOMNodeId)) { - LOG.debug("Failing over OM proxy to nodeId: {}", newLeaderOMNodeId); - } - } - } - - /** - * Failover to the OM proxy specified by the new leader OMNodeId. - * @param newLeaderOMNodeId OMNodeId to failover to. - * @return true if failover is successful, false otherwise. - */ - synchronized boolean updateLeaderOMNodeId(String newLeaderOMNodeId) { - if (!currentProxyOMNodeId.equals(newLeaderOMNodeId)) { - if (omProxies.containsKey(newLeaderOMNodeId)) { - currentProxyOMNodeId = newLeaderOMNodeId; - currentProxyIndex = omNodeIDList.indexOf(currentProxyOMNodeId); - return true; - } - } - return false; - } - - /** - * Close all the proxy objects which have been opened over the lifetime of - * the proxy provider. - */ - @Override - public synchronized void close() throws IOException { - for (ProxyInfo proxy : omProxies.values()) { - OzoneManagerProtocolPB omProxy = proxy.proxy; - if (omProxy != null) { - RPC.stopProxy(omProxy); - } - } - } - - @VisibleForTesting - public List getOMProxies() { - return new ArrayList(omProxies.values()); - } - - @VisibleForTesting - public List getOMProxyInfos() { - return new ArrayList(omProxyInfos.values()); - } -} - diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMProxyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMProxyInfo.java deleted file mode 100644 index b429ca0044e..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMProxyInfo.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.ha; - -import org.apache.hadoop.io.Text; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.SecurityUtil; - -import java.net.InetSocketAddress; - -/** - * Class to store OM proxy information. - */ -public class OMProxyInfo { - private String nodeId; - private String rpcAddrStr; - private InetSocketAddress rpcAddr; - private Text dtService; - - OMProxyInfo(String nodeID, String rpcAddress) { - this.nodeId = nodeID; - this.rpcAddrStr = rpcAddress; - this.rpcAddr = NetUtils.createSocketAddr(rpcAddrStr); - this.dtService = SecurityUtil.buildTokenService(rpcAddr); - } - - public String toString() { - StringBuilder sb = new StringBuilder() - .append("nodeId=") - .append(nodeId) - .append(",nodeAddress=") - .append(rpcAddrStr); - return sb.toString(); - } - - public InetSocketAddress getAddress() { - return rpcAddr; - } - - public Text getDelegationTokenService() { - return dtService; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java deleted file mode 100644 index a95f09fa237..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.ha; - -/** - * This package contains Ozone Client's OM Proxy classes. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java deleted file mode 100644 index e1ae0bbfbd8..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BucketEncryptionKeyInfo.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.crypto.CipherSuite; -import org.apache.hadoop.crypto.CryptoProtocolVersion; - -/** - * Encryption key info for bucket encryption key. - */ -public class BucketEncryptionKeyInfo { - private final CryptoProtocolVersion version; - private final CipherSuite suite; - private final String keyName; - - public BucketEncryptionKeyInfo( - CryptoProtocolVersion version, CipherSuite suite, - String keyName) { - this.version = version; - this.suite = suite; - this.keyName = keyName; - } - - public String getKeyName() { - return keyName; - } - - public CipherSuite getSuite() { - return suite; - } - - public CryptoProtocolVersion getVersion() { - return version; - } - - /** - * Builder for BucketEncryptionKeyInfo. - */ - public static class Builder { - private CryptoProtocolVersion version; - private CipherSuite suite; - private String keyName; - - public Builder setKeyName(String name) { - this.keyName = name; - return this; - } - - public Builder setSuite(CipherSuite cs) { - this.suite = cs; - return this; - } - - public Builder setVersion(CryptoProtocolVersion ver) { - this.version = ver; - return this; - } - - public BucketEncryptionKeyInfo build() { - return new BucketEncryptionKeyInfo(version, suite, keyName); - } - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/EncryptionBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/EncryptionBucketInfo.java deleted file mode 100644 index 0f82fe51617..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/EncryptionBucketInfo.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.crypto.CipherSuite; -import org.apache.hadoop.crypto.CryptoProtocolVersion; - -/** - * A simple class for representing an encryption bucket. Presently an encryption - * bucket only has a path (the root of the encryption zone), a key name, and a - * unique id. The id is used to implement batched listing of encryption zones. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class EncryptionBucketInfo { - - private final CipherSuite suite; - private final CryptoProtocolVersion version; - private final String keyName; - - private final long id; - private final String path; - - public EncryptionBucketInfo(long id, String path, CipherSuite suite, - CryptoProtocolVersion version, String keyName) { - this.id = id; - this.path = path; - this.suite = suite; - this.version = version; - this.keyName = keyName; - } - - public long getId() { - return id; - } - - public String getPath() { - return path; - } - - public CipherSuite getSuite() { - return suite; - } - - public CryptoProtocolVersion getVersion() { - return version; - } - - public String getKeyName() { - return keyName; - } - - @Override - public int hashCode() { - return new HashCodeBuilder(13, 31) - .append(id) - .append(path) - .append(suite) - .append(version) - .append(keyName). - toHashCode(); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (obj == this) { - return true; - } - if (obj.getClass() != getClass()) { - return false; - } - - EncryptionBucketInfo rhs = (EncryptionBucketInfo) obj; - return new EqualsBuilder(). - append(id, rhs.id). - append(path, rhs.path). - append(suite, rhs.suite). - append(version, rhs.version). - append(keyName, rhs.keyName). - isEquals(); - } - - @Override - public String toString() { - return "EncryptionBucketInfo [id=" + id + - ", path=" + path + - ", suite=" + suite + - ", version=" + version + - ", keyName=" + keyName + "]"; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java deleted file mode 100644 index 4da8d2b8eee..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyValueUtil.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; - -/** - * Convert from/to hdds KeyValue protobuf structure. - */ -public final class KeyValueUtil { - private KeyValueUtil() { - } - - /** - * Parse Key,Value map data from protobuf representation. - */ - public static Map getFromProtobuf(List metadata) { - return metadata.stream() - .collect(Collectors.toMap(KeyValue::getKey, - KeyValue::getValue)); - } - - /** - * Encode key value map to protobuf. - */ - public static List toProtobuf(Map keyValueMap) { - List metadataList = new LinkedList<>(); - for (Map.Entry entry : keyValueMap.entrySet()) { - metadataList.add(KeyValue.newBuilder().setKey(entry.getKey()). - setValue(entry.getValue()).build()); - } - return metadataList; - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java deleted file mode 100644 index c1930c85d03..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMRatisHelper.java +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.ratis.RaftConfigKeys; -import org.apache.ratis.client.RaftClient; -import org.apache.ratis.conf.RaftProperties; -import org.apache.ratis.grpc.GrpcConfigKeys; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientReply; -import org.apache.ratis.protocol.RaftGroup; -import org.apache.ratis.protocol.RaftPeerId; -import org.apache.ratis.retry.RetryPolicy; -import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.util.SizeInBytes; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Ratis helper methods for OM Ratis server and client. - */ -public final class OMRatisHelper { - private static final Logger LOG = LoggerFactory.getLogger( - OMRatisHelper.class); - - private OMRatisHelper() { - } - - /** - * Creates a new RaftClient object. - * - * @param rpcType Replication Type - * @param omId OM id of the client - * @param group RaftGroup - * @param retryPolicy Retry policy - * @return RaftClient object - */ - public static RaftClient newRaftClient(RpcType rpcType, String omId, RaftGroup - group, RetryPolicy retryPolicy, Configuration conf) { - if (LOG.isTraceEnabled()) { - LOG.trace("newRaftClient: {}, leader={}, group={}", rpcType, omId, group); - } - final RaftProperties properties = new RaftProperties(); - RaftConfigKeys.Rpc.setType(properties, rpcType); - - final int raftSegmentPreallocatedSize = (int) conf.getStorageSize( - OMConfigKeys.OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, - OMConfigKeys.OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, - StorageUnit.BYTES); - GrpcConfigKeys.setMessageSizeMax( - properties, SizeInBytes.valueOf(raftSegmentPreallocatedSize)); - - return RaftClient.newBuilder() - .setRaftGroup(group) - .setLeaderId(getRaftPeerId(omId)) - .setProperties(properties) - .setRetryPolicy(retryPolicy) - .build(); - } - - static RaftPeerId getRaftPeerId(String omId) { - return RaftPeerId.valueOf(omId); - } - - public static ByteString convertRequestToByteString(OMRequest request) { - byte[] requestBytes = request.toByteArray(); - return ByteString.copyFrom(requestBytes); - } - - public static OMRequest convertByteStringToOMRequest(ByteString byteString) - throws InvalidProtocolBufferException { - byte[] bytes = byteString.toByteArray(); - return OMRequest.parseFrom(bytes); - } - - public static Message convertResponseToMessage(OMResponse response) { - byte[] requestBytes = response.toByteArray(); - return Message.valueOf(ByteString.copyFrom(requestBytes)); - } - - public static OMResponse getOMResponseFromRaftClientReply( - RaftClientReply reply) throws InvalidProtocolBufferException { - byte[] bytes = reply.getMessage().getContent().toByteArray(); - return OMResponse.newBuilder(OMResponse.parseFrom(bytes)) - .setLeaderOMNodeId(reply.getReplierId()) - .build(); - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java deleted file mode 100644 index aa6e8f52e1a..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java +++ /dev/null @@ -1,202 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import java.util.LinkedHashMap; -import java.util.Map; - -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.Auditable; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs; - -import com.google.common.base.Preconditions; - -/** - * A class that encapsulates Bucket Arguments. - */ -public final class OmBucketArgs extends WithMetadata implements Auditable { - /** - * Name of the volume in which the bucket belongs to. - */ - private final String volumeName; - /** - * Name of the bucket. - */ - private final String bucketName; - /** - * Bucket Version flag. - */ - private Boolean isVersionEnabled; - /** - * Type of storage to be used for this bucket. - * [RAM_DISK, SSD, DISK, ARCHIVE] - */ - private StorageType storageType; - - /** - * Private constructor, constructed via builder. - * @param volumeName - Volume name. - * @param bucketName - Bucket name. - * @param isVersionEnabled - Bucket version flag. - * @param storageType - Storage type to be used. - */ - private OmBucketArgs(String volumeName, String bucketName, - Boolean isVersionEnabled, StorageType storageType, - Map metadata) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.isVersionEnabled = isVersionEnabled; - this.storageType = storageType; - this.metadata = metadata; - } - - /** - * Returns the Volume Name. - * @return String. - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Returns the Bucket Name. - * @return String - */ - public String getBucketName() { - return bucketName; - } - - /** - * Returns true if bucket version is enabled, else false. - * @return isVersionEnabled - */ - public Boolean getIsVersionEnabled() { - return isVersionEnabled; - } - - /** - * Returns the type of storage to be used. - * @return StorageType - */ - public StorageType getStorageType() { - return storageType; - } - - /** - * Returns new builder class that builds a OmBucketArgs. - * - * @return Builder - */ - public static Builder newBuilder() { - return new Builder(); - } - - @Override - public Map toAuditMap() { - Map auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.VOLUME, this.volumeName); - auditMap.put(OzoneConsts.BUCKET, this.bucketName); - auditMap.put(OzoneConsts.GDPR_FLAG, - this.metadata.get(OzoneConsts.GDPR_FLAG)); - auditMap.put(OzoneConsts.IS_VERSION_ENABLED, - String.valueOf(this.isVersionEnabled)); - if(this.storageType != null){ - auditMap.put(OzoneConsts.STORAGE_TYPE, this.storageType.name()); - } - return auditMap; - } - - /** - * Builder for OmBucketArgs. - */ - public static class Builder { - private String volumeName; - private String bucketName; - private Boolean isVersionEnabled; - private StorageType storageType; - private Map metadata; - - public Builder setVolumeName(String volume) { - this.volumeName = volume; - return this; - } - - public Builder setBucketName(String bucket) { - this.bucketName = bucket; - return this; - } - - public Builder setIsVersionEnabled(Boolean versionFlag) { - this.isVersionEnabled = versionFlag; - return this; - } - - public Builder addMetadata(Map metadataMap) { - this.metadata = metadataMap; - return this; - } - - public Builder setStorageType(StorageType storage) { - this.storageType = storage; - return this; - } - - /** - * Constructs the OmBucketArgs. - * @return instance of OmBucketArgs. - */ - public OmBucketArgs build() { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - return new OmBucketArgs(volumeName, bucketName, isVersionEnabled, - storageType, metadata); - } - } - - /** - * Creates BucketArgs protobuf from OmBucketArgs. - */ - public BucketArgs getProtobuf() { - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setVolumeName(volumeName) - .setBucketName(bucketName); - if(isVersionEnabled != null) { - builder.setIsVersionEnabled(isVersionEnabled); - } - if(storageType != null) { - builder.setStorageType(storageType.toProto()); - } - return builder.build(); - } - - /** - * Parses BucketInfo protobuf and creates OmBucketArgs. - * @param bucketArgs - * @return instance of OmBucketArgs - */ - public static OmBucketArgs getFromProtobuf(BucketArgs bucketArgs) { - return new OmBucketArgs(bucketArgs.getVolumeName(), - bucketArgs.getBucketName(), - bucketArgs.hasIsVersionEnabled() ? - bucketArgs.getIsVersionEnabled() : null, - bucketArgs.hasStorageType() ? StorageType.valueOf( - bucketArgs.getStorageType()) : null, - KeyValueUtil.getFromProtobuf(bucketArgs.getMetadataList())); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java deleted file mode 100644 index eb108024c31..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ /dev/null @@ -1,369 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - - -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.Auditable; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .BucketInfo; -import org.apache.hadoop.ozone.protocolPB.OMPBHelper; - -import com.google.common.base.Preconditions; - -/** - * A class that encapsulates Bucket Info. - */ -public final class OmBucketInfo extends WithMetadata implements Auditable { - /** - * Name of the volume in which the bucket belongs to. - */ - private final String volumeName; - /** - * Name of the bucket. - */ - private final String bucketName; - /** - * ACL Information. - */ - private List acls; - /** - * Bucket Version flag. - */ - private Boolean isVersionEnabled; - /** - * Type of storage to be used for this bucket. - * [RAM_DISK, SSD, DISK, ARCHIVE] - */ - private StorageType storageType; - /** - * Creation time of bucket. - */ - private final long creationTime; - - /** - * Bucket encryption key info if encryption is enabled. - */ - private BucketEncryptionKeyInfo bekInfo; - - /** - * Private constructor, constructed via builder. - * @param volumeName - Volume name. - * @param bucketName - Bucket name. - * @param acls - list of ACLs. - * @param isVersionEnabled - Bucket version flag. - * @param storageType - Storage type to be used. - * @param creationTime - Bucket creation time. - * @param metadata - metadata. - * @param bekInfo - bucket encryption key info. - */ - @SuppressWarnings("checkstyle:ParameterNumber") - private OmBucketInfo(String volumeName, - String bucketName, - List acls, - boolean isVersionEnabled, - StorageType storageType, - long creationTime, - Map metadata, - BucketEncryptionKeyInfo bekInfo) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.acls = acls; - this.isVersionEnabled = isVersionEnabled; - this.storageType = storageType; - this.creationTime = creationTime; - this.metadata = metadata; - this.bekInfo = bekInfo; - } - - /** - * Returns the Volume Name. - * @return String. - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Returns the Bucket Name. - * @return String - */ - public String getBucketName() { - return bucketName; - } - - /** - * Returns the ACL's associated with this bucket. - * @return {@literal List} - */ - public List getAcls() { - return acls; - } - - /** - * Add an ozoneAcl to list of existing Acl set. - * @param ozoneAcl - * @return true - if successfully added, false if not added or acl is - * already existing in the acl list. - */ - public boolean addAcl(OzoneAcl ozoneAcl) { - return OzoneAclUtil.addAcl(acls, ozoneAcl); - } - - /** - * Remove acl from existing acl list. - * @param ozoneAcl - * @return true - if successfully removed, false if not able to remove due - * to that acl is not in the existing acl list. - */ - public boolean removeAcl(OzoneAcl ozoneAcl) { - return OzoneAclUtil.removeAcl(acls, ozoneAcl); - } - - /** - * Reset the existing acl list. - * @param ozoneAcls - * @return true - if successfully able to reset. - */ - public boolean setAcls(List ozoneAcls) { - return OzoneAclUtil.setAcl(acls, ozoneAcls); - } - - /** - * Returns true if bucket version is enabled, else false. - * @return isVersionEnabled - */ - public boolean getIsVersionEnabled() { - return isVersionEnabled; - } - - /** - * Returns the type of storage to be used. - * @return StorageType - */ - public StorageType getStorageType() { - return storageType; - } - - /** - * Returns creation time. - * - * @return long - */ - public long getCreationTime() { - return creationTime; - } - - /** - * Returns bucket encryption key info. - * @return bucket encryption key info - */ - public BucketEncryptionKeyInfo getEncryptionKeyInfo() { - return bekInfo; - } - - - /** - * Returns new builder class that builds a OmBucketInfo. - * - * @return Builder - */ - public static Builder newBuilder() { - return new Builder(); - } - - @Override - public Map toAuditMap() { - Map auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.VOLUME, this.volumeName); - auditMap.put(OzoneConsts.BUCKET, this.bucketName); - auditMap.put(OzoneConsts.GDPR_FLAG, - this.metadata.get(OzoneConsts.GDPR_FLAG)); - auditMap.put(OzoneConsts.ACLS, - (this.acls != null) ? this.acls.toString() : null); - auditMap.put(OzoneConsts.IS_VERSION_ENABLED, - String.valueOf(this.isVersionEnabled)); - auditMap.put(OzoneConsts.STORAGE_TYPE, - (this.storageType != null) ? this.storageType.name() : null); - auditMap.put(OzoneConsts.CREATION_TIME, String.valueOf(this.creationTime)); - return auditMap; - } - - /** - * Builder for OmBucketInfo. - */ - public static class Builder { - private String volumeName; - private String bucketName; - private List acls; - private Boolean isVersionEnabled; - private StorageType storageType; - private long creationTime; - private Map metadata; - private BucketEncryptionKeyInfo bekInfo; - - public Builder() { - //Default values - this.acls = new LinkedList<>(); - this.isVersionEnabled = false; - this.storageType = StorageType.DISK; - this.metadata = new HashMap<>(); - } - - public Builder setVolumeName(String volume) { - this.volumeName = volume; - return this; - } - - public Builder setBucketName(String bucket) { - this.bucketName = bucket; - return this; - } - - public Builder setAcls(List listOfAcls) { - if (listOfAcls != null) { - this.acls.addAll(listOfAcls); - } - return this; - } - - public Builder setIsVersionEnabled(Boolean versionFlag) { - this.isVersionEnabled = versionFlag; - return this; - } - - public Builder setStorageType(StorageType storage) { - this.storageType = storage; - return this; - } - - public Builder setCreationTime(long createdOn) { - this.creationTime = createdOn; - return this; - } - - public Builder addMetadata(String key, String value) { - metadata.put(key, value); - return this; - } - - public Builder addAllMetadata(Map additionalMetadata) { - if (additionalMetadata != null) { - metadata.putAll(additionalMetadata); - } - return this; - } - - public Builder setBucketEncryptionKey( - BucketEncryptionKeyInfo info) { - this.bekInfo = info; - return this; - } - - /** - * Constructs the OmBucketInfo. - * @return instance of OmBucketInfo. - */ - public OmBucketInfo build() { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkNotNull(acls); - Preconditions.checkNotNull(isVersionEnabled); - Preconditions.checkNotNull(storageType); - - return new OmBucketInfo(volumeName, bucketName, acls, - isVersionEnabled, storageType, creationTime, metadata, bekInfo); - } - } - - /** - * Creates BucketInfo protobuf from OmBucketInfo. - */ - public BucketInfo getProtobuf() { - BucketInfo.Builder bib = BucketInfo.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .addAllAcls(OzoneAclUtil.toProtobuf(acls)) - .setIsVersionEnabled(isVersionEnabled) - .setStorageType(storageType.toProto()) - .setCreationTime(creationTime) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)); - if (bekInfo != null && bekInfo.getKeyName() != null) { - bib.setBeinfo(OMPBHelper.convert(bekInfo)); - } - return bib.build(); - } - - /** - * Parses BucketInfo protobuf and creates OmBucketInfo. - * @param bucketInfo - * @return instance of OmBucketInfo - */ - public static OmBucketInfo getFromProtobuf(BucketInfo bucketInfo) { - OmBucketInfo.Builder obib = OmBucketInfo.newBuilder() - .setVolumeName(bucketInfo.getVolumeName()) - .setBucketName(bucketInfo.getBucketName()) - .setAcls(bucketInfo.getAclsList().stream().map( - OzoneAcl::fromProtobuf).collect(Collectors.toList())) - .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) - .setStorageType(StorageType.valueOf(bucketInfo.getStorageType())) - .setCreationTime(bucketInfo.getCreationTime()); - if (bucketInfo.getMetadataList() != null) { - obib.addAllMetadata(KeyValueUtil - .getFromProtobuf(bucketInfo.getMetadataList())); - } - if (bucketInfo.hasBeinfo()) { - obib.setBucketEncryptionKey(OMPBHelper.convert(bucketInfo.getBeinfo())); - } - return obib.build(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - OmBucketInfo that = (OmBucketInfo) o; - return creationTime == that.creationTime && - volumeName.equals(that.volumeName) && - bucketName.equals(that.bucketName) && - Objects.equals(acls, that.acls) && - Objects.equals(isVersionEnabled, that.isVersionEnabled) && - storageType == that.storageType && - Objects.equals(metadata, that.metadata) && - Objects.equals(bekInfo, that.bekInfo); - } - - @Override - public int hashCode() { - return Objects.hash(volumeName, bucketName); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java deleted file mode 100644 index 6bca3aaa8e5..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ /dev/null @@ -1,269 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.Auditable; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -/** - * Args for key. Client use this to specify key's attributes on key creation - * (putKey()). - */ -public final class OmKeyArgs implements Auditable { - private final String volumeName; - private final String bucketName; - private final String keyName; - private long dataSize; - private final ReplicationType type; - private final ReplicationFactor factor; - private List locationInfoList; - private final boolean isMultipartKey; - private final String multipartUploadID; - private final int multipartUploadPartNumber; - private Map metadata; - private boolean refreshPipeline; - private boolean sortDatanodesInPipeline; - private List acls; - - @SuppressWarnings("parameternumber") - private OmKeyArgs(String volumeName, String bucketName, String keyName, - long dataSize, ReplicationType type, ReplicationFactor factor, - List locationInfoList, boolean isMultipart, - String uploadID, int partNumber, - Map metadataMap, boolean refreshPipeline, - List acls, boolean sortDatanode) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.dataSize = dataSize; - this.type = type; - this.factor = factor; - this.locationInfoList = locationInfoList; - this.isMultipartKey = isMultipart; - this.multipartUploadID = uploadID; - this.multipartUploadPartNumber = partNumber; - this.metadata = metadataMap; - this.refreshPipeline = refreshPipeline; - this.acls = acls; - this.sortDatanodesInPipeline = sortDatanode; - } - - public boolean getIsMultipartKey() { - return isMultipartKey; - } - - public String getMultipartUploadID() { - return multipartUploadID; - } - - public int getMultipartUploadPartNumber() { - return multipartUploadPartNumber; - } - - public ReplicationType getType() { - return type; - } - - public ReplicationFactor getFactor() { - return factor; - } - - public List getAcls() { - return acls; - } - - public String getVolumeName() { - return volumeName; - } - - public String getBucketName() { - return bucketName; - } - - public String getKeyName() { - return keyName; - } - - public long getDataSize() { - return dataSize; - } - - public void setDataSize(long size) { - dataSize = size; - } - - public Map getMetadata() { - return metadata; - } - - public void setMetadata(Map metadata) { - this.metadata = metadata; - } - - public void setLocationInfoList(List locationInfoList) { - this.locationInfoList = locationInfoList; - } - - public List getLocationInfoList() { - return locationInfoList; - } - - public boolean getRefreshPipeline() { - return refreshPipeline; - } - - public boolean getSortDatanodes() { - return sortDatanodesInPipeline; - } - - @Override - public Map toAuditMap() { - Map auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.VOLUME, this.volumeName); - auditMap.put(OzoneConsts.BUCKET, this.bucketName); - auditMap.put(OzoneConsts.KEY, this.keyName); - auditMap.put(OzoneConsts.DATA_SIZE, String.valueOf(this.dataSize)); - auditMap.put(OzoneConsts.REPLICATION_TYPE, - (this.type != null) ? this.type.name() : null); - auditMap.put(OzoneConsts.REPLICATION_FACTOR, - (this.factor != null) ? this.factor.name() : null); - auditMap.put(OzoneConsts.KEY_LOCATION_INFO, - (this.locationInfoList != null) ? locationInfoList.toString() : null); - return auditMap; - } - - @VisibleForTesting - public void addLocationInfo(OmKeyLocationInfo locationInfo) { - if (this.locationInfoList == null) { - locationInfoList = new ArrayList<>(); - } - locationInfoList.add(locationInfo); - } - - /** - * Builder class of OmKeyArgs. - */ - public static class Builder { - private String volumeName; - private String bucketName; - private String keyName; - private long dataSize; - private ReplicationType type; - private ReplicationFactor factor; - private List locationInfoList; - private boolean isMultipartKey; - private String multipartUploadID; - private int multipartUploadPartNumber; - private Map metadata = new HashMap<>(); - private boolean refreshPipeline; - private boolean sortDatanodesInPipeline; - private List acls; - - public Builder setVolumeName(String volume) { - this.volumeName = volume; - return this; - } - - public Builder setBucketName(String bucket) { - this.bucketName = bucket; - return this; - } - - public Builder setKeyName(String key) { - this.keyName = key; - return this; - } - - public Builder setDataSize(long size) { - this.dataSize = size; - return this; - } - - public Builder setType(ReplicationType replicationType) { - this.type = replicationType; - return this; - } - - public Builder setFactor(ReplicationFactor replicationFactor) { - this.factor = replicationFactor; - return this; - } - - public Builder setLocationInfoList(List locationInfos) { - this.locationInfoList = locationInfos; - return this; - } - - public Builder setAcls(List listOfAcls) { - this.acls = listOfAcls; - return this; - } - - public Builder setIsMultipartKey(boolean isMultipart) { - this.isMultipartKey = isMultipart; - return this; - } - - public Builder setMultipartUploadID(String uploadID) { - this.multipartUploadID = uploadID; - return this; - } - - public Builder setMultipartUploadPartNumber(int partNumber) { - this.multipartUploadPartNumber = partNumber; - return this; - } - - public Builder addMetadata(String key, String value) { - this.metadata.put(key, value); - return this; - } - - public Builder addAllMetadata(Map metadatamap) { - this.metadata.putAll(metadatamap); - return this; - } - - public Builder setRefreshPipeline(boolean refresh) { - this.refreshPipeline = refresh; - return this; - } - - public Builder setSortDatanodesInPipeline(boolean sort) { - this.sortDatanodesInPipeline = sort; - return this; - } - - public OmKeyArgs build() { - return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, type, - factor, locationInfoList, isMultipartKey, multipartUploadID, - multipartUploadPartNumber, metadata, refreshPipeline, acls, - sortDatanodesInPipeline); - } - - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java deleted file mode 100644 index 83adee980a8..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ /dev/null @@ -1,421 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.stream.Collectors; - -import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; -import org.apache.hadoop.ozone.protocolPB.OMPBHelper; -import org.apache.hadoop.util.Time; - -import com.google.common.base.Preconditions; - -/** - * Args for key block. The block instance for the key requested in putKey. - * This is returned from OM to client, and client use class to talk to - * datanode. Also, this is the metadata written to om.db on server side. - */ -public final class OmKeyInfo extends WithMetadata { - private final String volumeName; - private final String bucketName; - // name of key client specified - private String keyName; - private long dataSize; - private List keyLocationVersions; - private final long creationTime; - private long modificationTime; - private HddsProtos.ReplicationType type; - private HddsProtos.ReplicationFactor factor; - private FileEncryptionInfo encInfo; - /** - * ACL Information. - */ - private List acls; - - @SuppressWarnings("parameternumber") - OmKeyInfo(String volumeName, String bucketName, String keyName, - List versions, long dataSize, - long creationTime, long modificationTime, - HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, - Map metadata, - FileEncryptionInfo encInfo, List acls) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.dataSize = dataSize; - // it is important that the versions are ordered from old to new. - // Do this sanity check when versions got loaded on creating OmKeyInfo. - // TODO : this is not necessary, here only because versioning is still a - // work in-progress, remove this following check when versioning is - // complete and prove correctly functioning - long currentVersion = -1; - for (OmKeyLocationInfoGroup version : versions) { - Preconditions.checkArgument( - currentVersion + 1 == version.getVersion()); - currentVersion = version.getVersion(); - } - this.keyLocationVersions = versions; - this.creationTime = creationTime; - this.modificationTime = modificationTime; - this.factor = factor; - this.type = type; - this.metadata = metadata; - this.encInfo = encInfo; - this.acls = acls; - } - - public String getVolumeName() { - return volumeName; - } - - public String getBucketName() { - return bucketName; - } - - public HddsProtos.ReplicationType getType() { - return type; - } - - public HddsProtos.ReplicationFactor getFactor() { - return factor; - } - - public String getKeyName() { - return keyName; - } - - public void setKeyName(String keyName) { - this.keyName = keyName; - } - - public long getDataSize() { - return dataSize; - } - - public void setDataSize(long size) { - this.dataSize = size; - } - - public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() { - return keyLocationVersions.size() == 0? null : - keyLocationVersions.get(keyLocationVersions.size() - 1); - } - - public List getKeyLocationVersions() { - return keyLocationVersions; - } - - public void updateModifcationTime() { - this.modificationTime = Time.monotonicNow(); - } - - /** - * updates the length of the each block in the list given. - * This will be called when the key is being committed to OzoneManager. - * - * @param locationInfoList list of locationInfo - */ - public void updateLocationInfoList(List locationInfoList) { - long latestVersion = getLatestVersionLocations().getVersion(); - OmKeyLocationInfoGroup keyLocationInfoGroup = getLatestVersionLocations(); - List currentList = - keyLocationInfoGroup.getLocationList(); - List latestVersionList = - keyLocationInfoGroup.getBlocksLatestVersionOnly(); - // Updates the latest locationList in the latest version only with - // given locationInfoList here. - // TODO : The original allocated list and the updated list here may vary - // as the containers on the Datanode on which the blocks were pre allocated - // might get closed. The diff of blocks between these two lists here - // need to be garbage collected in case the ozone client dies. - currentList.removeAll(latestVersionList); - // set each of the locationInfo object to the latest version - locationInfoList.stream().forEach(omKeyLocationInfo -> omKeyLocationInfo - .setCreateVersion(latestVersion)); - currentList.addAll(locationInfoList); - } - - /** - * Append a set of blocks to the latest version. Note that these blocks are - * part of the latest version, not a new version. - * - * @param newLocationList the list of new blocks to be added. - * @param updateTime if true, will update modification time. - * @throws IOException - */ - public synchronized void appendNewBlocks( - List newLocationList, boolean updateTime) - throws IOException { - if (keyLocationVersions.size() == 0) { - throw new IOException("Appending new block, but no version exist"); - } - OmKeyLocationInfoGroup currentLatestVersion = - keyLocationVersions.get(keyLocationVersions.size() - 1); - currentLatestVersion.appendNewBlocks(newLocationList); - if (updateTime) { - setModificationTime(Time.now()); - } - } - - /** - * Add a new set of blocks. The new blocks will be added as appending a new - * version to the all version list. - * - * @param newLocationList the list of new blocks to be added. - * @param updateTime - if true, updates modification time. - * @throws IOException - */ - public synchronized long addNewVersion( - List newLocationList, boolean updateTime) - throws IOException { - long latestVersionNum; - if (keyLocationVersions.size() == 0) { - // no version exist, these blocks are the very first version. - keyLocationVersions.add(new OmKeyLocationInfoGroup(0, newLocationList)); - latestVersionNum = 0; - } else { - // it is important that the new version are always at the tail of the list - OmKeyLocationInfoGroup currentLatestVersion = - keyLocationVersions.get(keyLocationVersions.size() - 1); - // the new version is created based on the current latest version - OmKeyLocationInfoGroup newVersion = - currentLatestVersion.generateNextVersion(newLocationList); - keyLocationVersions.add(newVersion); - latestVersionNum = newVersion.getVersion(); - } - - if (updateTime) { - setModificationTime(Time.now()); - } - return latestVersionNum; - } - - public long getCreationTime() { - return creationTime; - } - - public long getModificationTime() { - return modificationTime; - } - - public void setModificationTime(long modificationTime) { - this.modificationTime = modificationTime; - } - - public FileEncryptionInfo getFileEncryptionInfo() { - return encInfo; - } - - public List getAcls() { - return acls; - } - - public boolean addAcl(OzoneAcl acl) { - return OzoneAclUtil.addAcl(acls, acl); - } - - public boolean removeAcl(OzoneAcl acl) { - return OzoneAclUtil.removeAcl(acls, acl); - } - - public boolean setAcls(List newAcls) { - return OzoneAclUtil.setAcl(acls, newAcls); - } - - /** - * Builder of OmKeyInfo. - */ - public static class Builder { - private String volumeName; - private String bucketName; - private String keyName; - private long dataSize; - private List omKeyLocationInfoGroups = - new ArrayList<>(); - private long creationTime; - private long modificationTime; - private HddsProtos.ReplicationType type; - private HddsProtos.ReplicationFactor factor; - private Map metadata; - private FileEncryptionInfo encInfo; - private List acls; - - public Builder() { - this.metadata = new HashMap<>(); - omKeyLocationInfoGroups = new ArrayList<>(); - acls = new ArrayList<>(); - } - - public Builder setVolumeName(String volume) { - this.volumeName = volume; - return this; - } - - public Builder setBucketName(String bucket) { - this.bucketName = bucket; - return this; - } - - public Builder setKeyName(String key) { - this.keyName = key; - return this; - } - - public Builder setOmKeyLocationInfos( - List omKeyLocationInfoList) { - this.omKeyLocationInfoGroups = omKeyLocationInfoList; - return this; - } - - public Builder setDataSize(long size) { - this.dataSize = size; - return this; - } - - public Builder setCreationTime(long crTime) { - this.creationTime = crTime; - return this; - } - - public Builder setModificationTime(long mTime) { - this.modificationTime = mTime; - return this; - } - - public Builder setReplicationFactor(HddsProtos.ReplicationFactor replFact) { - this.factor = replFact; - return this; - } - - public Builder setReplicationType(HddsProtos.ReplicationType replType) { - this.type = replType; - return this; - } - - public Builder addMetadata(String key, String value) { - metadata.put(key, value); - return this; - } - - public Builder addAllMetadata(Map newMetadata) { - metadata.putAll(newMetadata); - return this; - } - - public Builder setFileEncryptionInfo(FileEncryptionInfo feInfo) { - this.encInfo = feInfo; - return this; - } - - public Builder setAcls(List listOfAcls) { - if (listOfAcls != null) { - this.acls.addAll(listOfAcls); - } - return this; - } - - public OmKeyInfo build() { - return new OmKeyInfo( - volumeName, bucketName, keyName, omKeyLocationInfoGroups, - dataSize, creationTime, modificationTime, type, factor, metadata, - encInfo, acls); - } - } - - public KeyInfo getProtobuf() { - long latestVersion = keyLocationVersions.size() == 0 ? -1 : - keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion(); - KeyInfo.Builder kb = KeyInfo.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setDataSize(dataSize) - .setFactor(factor) - .setType(type) - .addAllKeyLocationList(keyLocationVersions.stream() - .map(OmKeyLocationInfoGroup::getProtobuf) - .collect(Collectors.toList())) - .setLatestVersion(latestVersion) - .setCreationTime(creationTime) - .setModificationTime(modificationTime) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) - .addAllAcls(OzoneAclUtil.toProtobuf(acls)); - if (encInfo != null) { - kb.setFileEncryptionInfo(OMPBHelper.convert(encInfo)); - } - return kb.build(); - } - - public static OmKeyInfo getFromProtobuf(KeyInfo keyInfo) { - return new OmKeyInfo.Builder() - .setVolumeName(keyInfo.getVolumeName()) - .setBucketName(keyInfo.getBucketName()) - .setKeyName(keyInfo.getKeyName()) - .setOmKeyLocationInfos(keyInfo.getKeyLocationListList().stream() - .map(OmKeyLocationInfoGroup::getFromProtobuf) - .collect(Collectors.toList())) - .setDataSize(keyInfo.getDataSize()) - .setCreationTime(keyInfo.getCreationTime()) - .setModificationTime(keyInfo.getModificationTime()) - .setReplicationType(keyInfo.getType()) - .setReplicationFactor(keyInfo.getFactor()) - .addAllMetadata(KeyValueUtil.getFromProtobuf(keyInfo.getMetadataList())) - .setFileEncryptionInfo(keyInfo.hasFileEncryptionInfo() ? - OMPBHelper.convert(keyInfo.getFileEncryptionInfo()): null) - .setAcls(OzoneAclUtil.fromProtobuf(keyInfo.getAclsList())) - .build(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - OmKeyInfo omKeyInfo = (OmKeyInfo) o; - return dataSize == omKeyInfo.dataSize && - creationTime == omKeyInfo.creationTime && - modificationTime == omKeyInfo.modificationTime && - volumeName.equals(omKeyInfo.volumeName) && - bucketName.equals(omKeyInfo.bucketName) && - keyName.equals(omKeyInfo.keyName) && - Objects - .equals(keyLocationVersions, omKeyInfo.keyLocationVersions) && - type == omKeyInfo.type && - factor == omKeyInfo.factor && - Objects.equals(metadata, omKeyInfo.metadata) && - Objects.equals(acls, omKeyInfo.acls); - } - - @Override - public int hashCode() { - return Objects.hash(volumeName, bucketName, keyName); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java deleted file mode 100644 index b81fcd03483..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java +++ /dev/null @@ -1,230 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.UnknownPipelineStateException; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation; -import org.apache.hadoop.security.token.Token; - -import java.util.Objects; - -/** - * One key can be too huge to fit in one container. In which case it gets split - * into a number of subkeys. This class represents one such subkey instance. - */ -public final class OmKeyLocationInfo { - private final BlockID blockID; - // the id of this subkey in all the subkeys. - private long length; - private final long offset; - // Block token, required for client authentication when security is enabled. - private Token token; - // the version number indicating when this block was added - private long createVersion; - - private Pipeline pipeline; - - private OmKeyLocationInfo(BlockID blockID, Pipeline pipeline, long length, - long offset) { - this.blockID = blockID; - this.pipeline = pipeline; - this.length = length; - this.offset = offset; - } - - private OmKeyLocationInfo(BlockID blockID, Pipeline pipeline, long length, - long offset, Token token) { - this.blockID = blockID; - this.pipeline = pipeline; - this.length = length; - this.offset = offset; - this.token = token; - } - - public void setCreateVersion(long version) { - createVersion = version; - } - - public long getCreateVersion() { - return createVersion; - } - - public BlockID getBlockID() { - return blockID; - } - - public long getContainerID() { - return blockID.getContainerID(); - } - - public long getLocalID() { - return blockID.getLocalID(); - } - - public Pipeline getPipeline() { - return pipeline; - } - - public long getLength() { - return length; - } - - public void setLength(long length) { - this.length = length; - } - - public long getOffset() { - return offset; - } - - public long getBlockCommitSequenceId() { - return blockID.getBlockCommitSequenceId(); - } - - public Token getToken() { - return token; - } - - public void setToken(Token token) { - this.token = token; - } - - public void setPipeline(Pipeline pipeline) { - this.pipeline = pipeline; - } - - /** - * Builder of OmKeyLocationInfo. - */ - public static class Builder { - private BlockID blockID; - private long length; - private long offset; - private Token token; - private Pipeline pipeline; - - public Builder setBlockID(BlockID blockId) { - this.blockID = blockId; - return this; - } - - @SuppressWarnings("checkstyle:hiddenfield") - public Builder setPipeline(Pipeline pipeline) { - this.pipeline = pipeline; - return this; - } - - public Builder setLength(long len) { - this.length = len; - return this; - } - - public Builder setOffset(long off) { - this.offset = off; - return this; - } - - public Builder setToken(Token bToken) { - this.token = bToken; - return this; - } - - public OmKeyLocationInfo build() { - if (token == null) { - return new OmKeyLocationInfo(blockID, pipeline, length, offset); - } else { - return new OmKeyLocationInfo(blockID, pipeline, length, offset, token); - } - } - } - - public KeyLocation getProtobuf() { - KeyLocation.Builder builder = KeyLocation.newBuilder() - .setBlockID(blockID.getProtobuf()) - .setLength(length) - .setOffset(offset) - .setCreateVersion(createVersion); - if (this.token != null) { - builder.setToken(this.token.toTokenProto()); - } - try { - builder.setPipeline(pipeline.getProtobufMessage()); - } catch (UnknownPipelineStateException e) { - //TODO: fix me: we should not return KeyLocation without pipeline. - } - return builder.build(); - } - - private static Pipeline getPipeline(KeyLocation keyLocation) { - try { - return keyLocation.hasPipeline() ? - Pipeline.getFromProtobuf(keyLocation.getPipeline()) : null; - } catch (UnknownPipelineStateException e) { - return null; - } - } - - public static OmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) { - OmKeyLocationInfo info = new OmKeyLocationInfo( - BlockID.getFromProtobuf(keyLocation.getBlockID()), - getPipeline(keyLocation), - keyLocation.getLength(), - keyLocation.getOffset()); - if(keyLocation.hasToken()) { - info.token = new Token<>(keyLocation.getToken()); - } - info.setCreateVersion(keyLocation.getCreateVersion()); - return info; - } - - @Override - public String toString() { - return "{blockID={containerID=" + blockID.getContainerID() + - ", localID=" + blockID.getLocalID() + "}" + - ", length=" + length + - ", offset=" + offset + - ", token=" + token + - ", pipeline=" + pipeline + - ", createVersion=" + createVersion + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - OmKeyLocationInfo that = (OmKeyLocationInfo) o; - return length == that.length && - offset == that.offset && - createVersion == that.createVersion && - Objects.equals(blockID, that.blockID) && - Objects.equals(token, that.token) && - Objects.equals(pipeline, that.pipeline); - } - - @Override - public int hashCode() { - return Objects.hash(blockID, length, offset, token, createVersion, - pipeline); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java deleted file mode 100644 index 8bdcee3803c..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java +++ /dev/null @@ -1,118 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -/** - * A list of key locations. This class represents one single version of the - * blocks of a key. - */ -public class OmKeyLocationInfoGroup { - private final long version; - private final List locationList; - - public OmKeyLocationInfoGroup(long version, - List locations) { - this.version = version; - this.locationList = locations; - } - - /** - * Return only the blocks that are created in the most recent version. - * - * @return the list of blocks that are created in the latest version. - */ - public List getBlocksLatestVersionOnly() { - List list = new ArrayList<>(); - locationList.stream().filter(x -> x.getCreateVersion() == version) - .forEach(list::add); - return list; - } - - public long getVersion() { - return version; - } - - public List getLocationList() { - return locationList; - } - - public KeyLocationList getProtobuf() { - return KeyLocationList.newBuilder() - .setVersion(version) - .addAllKeyLocations( - locationList.stream().map(OmKeyLocationInfo::getProtobuf) - .collect(Collectors.toList())) - .build(); - } - - public static OmKeyLocationInfoGroup getFromProtobuf( - KeyLocationList keyLocationList) { - return new OmKeyLocationInfoGroup( - keyLocationList.getVersion(), - keyLocationList.getKeyLocationsList().stream() - .map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList())); - } - - /** - * Given a new block location, generate a new version list based upon this - * one. - * - * @param newLocationList a list of new location to be added. - * @return - */ - OmKeyLocationInfoGroup generateNextVersion( - List newLocationList) throws IOException { - // TODO : revisit if we can do this method more efficiently - // one potential inefficiency here is that later version always include - // older ones. e.g. v1 has B1, then v2, v3...will all have B1 and only add - // more - List newList = new ArrayList<>(); - newList.addAll(locationList); - for (OmKeyLocationInfo newInfo : newLocationList) { - // all these new blocks will have addVersion of current version + 1 - newInfo.setCreateVersion(version + 1); - newList.add(newInfo); - } - return new OmKeyLocationInfoGroup(version + 1, newList); - } - - void appendNewBlocks(List newLocationList) - throws IOException { - for (OmKeyLocationInfo info : newLocationList) { - info.setCreateVersion(version); - locationList.add(info); - } - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("version:").append(version).append(" "); - for (OmKeyLocationInfo kli : locationList) { - sb.append(kli.getLocalID()).append(" || "); - } - return sb.toString(); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java deleted file mode 100644 index 646cb421e43..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -/** - * This class holds information about the response from commit multipart - * upload part request. - */ -public class OmMultipartCommitUploadPartInfo { - - private final String partName; - - public OmMultipartCommitUploadPartInfo(String name) { - this.partName = name; - } - - public String getPartName() { - return partName; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartInfo.java deleted file mode 100644 index 98913d3ff70..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartInfo.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -/** - * Class which holds information about the response of initiate multipart - * upload request. - */ -public class OmMultipartInfo { - - private String volumeName; - private String bucketName; - private String keyName; - private String uploadID; - - /** - * Construct OmMultipartInfo object which holds information about the - * response from initiate multipart upload request. - * @param volume - * @param bucket - * @param key - * @param id - */ - public OmMultipartInfo(String volume, String bucket, String key, String id) { - this.volumeName = volume; - this.bucketName = bucket; - this.keyName = key; - this.uploadID = id; - } - - /** - * Return volume name. - * @return volumeName - */ - public String getVolumeName() { - return volumeName; - } - - /** - * Return bucket name. - * @return bucketName - */ - public String getBucketName() { - return bucketName; - } - - /** - * Return key name. - * @return keyName - */ - public String getKeyName() { - return keyName; - } - - /** - * Return uploadID. - * @return uploadID - */ - public String getUploadID() { - return uploadID; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java deleted file mode 100644 index 80123fd37d7..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .MultipartKeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .PartKeyInfo; - -import java.util.HashMap; -import java.util.Map; -import java.util.TreeMap; - -/** - * This class represents multipart upload information for a key, which holds - * upload part information of the key. - */ -public class OmMultipartKeyInfo { - private String uploadID; - private TreeMap partKeyInfoList; - - /** - * Construct OmMultipartKeyInfo object which holds multipart upload - * information for a key. - */ - public OmMultipartKeyInfo(String id, Map list) { - this.uploadID = id; - this.partKeyInfoList = new TreeMap<>(list); - } - - /** - * Returns the uploadID for this multi part upload of a key. - * @return uploadID - */ - public String getUploadID() { - return uploadID; - } - - public TreeMap getPartKeyInfoMap() { - return partKeyInfoList; - } - - public void addPartKeyInfo(int partNumber, PartKeyInfo partKeyInfo) { - this.partKeyInfoList.put(partNumber, partKeyInfo); - } - - public PartKeyInfo getPartKeyInfo(int partNumber) { - return partKeyInfoList.get(partNumber); - } - - - /** - * Construct OmMultipartInfo from MultipartKeyInfo proto object. - * @param multipartKeyInfo - * @return OmMultipartKeyInfo - */ - public static OmMultipartKeyInfo getFromProto(MultipartKeyInfo - multipartKeyInfo) { - Map list = new HashMap<>(); - multipartKeyInfo.getPartKeyInfoListList().stream().forEach(partKeyInfo - -> list.put(partKeyInfo.getPartNumber(), partKeyInfo)); - return new OmMultipartKeyInfo(multipartKeyInfo.getUploadID(), list); - } - - /** - * Construct MultipartKeyInfo from this object. - * @return MultipartKeyInfo - */ - public MultipartKeyInfo getProto() { - MultipartKeyInfo.Builder builder = MultipartKeyInfo.newBuilder() - .setUploadID(uploadID); - partKeyInfoList.forEach((key, value) -> builder.addPartKeyInfoList(value)); - return builder.build(); - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } - return other instanceof OmMultipartKeyInfo && uploadID.equals( - ((OmMultipartKeyInfo)other).getUploadID()); - } - - @Override - public int hashCode() { - return uploadID.hashCode(); - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUpload.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUpload.java deleted file mode 100644 index 9d2d2ae81d0..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUpload.java +++ /dev/null @@ -1,149 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import java.time.Instant; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; - -import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; - -/** - * Information about one initialized upload. - */ -public class OmMultipartUpload { - - private String volumeName; - - private String bucketName; - - private String keyName; - - private String uploadId; - - private Instant creationTime; - - private HddsProtos.ReplicationType replicationType; - - private HddsProtos.ReplicationFactor replicationFactor; - - public OmMultipartUpload(String volumeName, String bucketName, - String keyName, String uploadId) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.uploadId = uploadId; - } - - public OmMultipartUpload(String volumeName, String bucketName, - String keyName, String uploadId, Instant creationDate) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.uploadId = uploadId; - this.creationTime = creationDate; - } - - public OmMultipartUpload(String volumeName, String bucketName, - String keyName, String uploadId, Instant creationTime, - ReplicationType replicationType, - ReplicationFactor replicationFactor) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.uploadId = uploadId; - this.creationTime = creationTime; - this.replicationType = replicationType; - this.replicationFactor = replicationFactor; - } - - public static OmMultipartUpload from(String key) { - String[] split = key.split(OM_KEY_PREFIX); - if (split.length < 5) { - throw new IllegalArgumentException("Key " + key - + " doesn't have enough segments to be a valid multipart upload key"); - } - String uploadId = split[split.length - 1]; - String volume = split[1]; - String bucket = split[2]; - return new OmMultipartUpload(volume, bucket, - key.substring(volume.length() + bucket.length() + 3, - key.length() - uploadId.length() - 1), uploadId); - } - - public String getDbKey() { - return OmMultipartUpload - .getDbKey(volumeName, bucketName, keyName, uploadId); - } - - public static String getDbKey(String volume, String bucket, String key, - String uploadId) { - return getDbKey(volume, bucket, key) + OM_KEY_PREFIX + uploadId; - - } - - public static String getDbKey(String volume, String bucket, String key) { - return OM_KEY_PREFIX + volume + OM_KEY_PREFIX + bucket + - OM_KEY_PREFIX + key; - } - - public String getVolumeName() { - return volumeName; - } - - public String getBucketName() { - return bucketName; - } - - public String getKeyName() { - return keyName; - } - - public String getUploadId() { - return uploadId; - } - - public Instant getCreationTime() { - return creationTime; - } - - public void setCreationTime(Instant creationTime) { - this.creationTime = creationTime; - } - - public ReplicationType getReplicationType() { - return replicationType; - } - - public void setReplicationType( - ReplicationType replicationType) { - this.replicationType = replicationType; - } - - public ReplicationFactor getReplicationFactor() { - return replicationFactor; - } - - public void setReplicationFactor( - ReplicationFactor replicationFactor) { - this.replicationFactor = replicationFactor; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteInfo.java deleted file mode 100644 index 71ce882c6f8..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteInfo.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -/** - * This class holds information about the response of complete Multipart - * upload request. - */ -public class OmMultipartUploadCompleteInfo { - - private String volume; - private String bucket; - private String key; - private String hash; // this is used as ETag for S3. - - public OmMultipartUploadCompleteInfo(String volumeName, String bucketName, - String keyName, String md5) { - this.volume = volumeName; - this.bucket = bucketName; - this.key = keyName; - this.hash = md5; - } - - public String getVolume() { - return volume; - } - - public void setVolume(String volume) { - this.volume = volume; - } - - public String getBucket() { - return bucket; - } - - public void setBucket(String bucket) { - this.bucket = bucket; - } - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getHash() { - return hash; - } - - public void setHash(String hash) { - this.hash = hash; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java deleted file mode 100644 index 50c0a4706fc..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; - -/** - * This class represents multipart list, which is required for - * CompleteMultipart upload request. - */ -public class OmMultipartUploadCompleteList { - - private final TreeMap multipartMap; - - /** - * Construct OmMultipartUploadCompleteList which holds multipart map which - * contains part number and part name. - * @param partMap - */ - public OmMultipartUploadCompleteList(Map partMap) { - this.multipartMap = new TreeMap<>(partMap); - } - - /** - * Return multipartMap which is a map of part number and part name. - * @return multipartMap - */ - public TreeMap getMultipartMap() { - return multipartMap; - } - - /** - * Construct Part list from the multipartMap. - * @return List - */ - public List getPartsList() { - List partList = new ArrayList<>(); - multipartMap.forEach((partNumber, partName) -> partList.add(Part - .newBuilder().setPartName(partName).setPartNumber(partNumber).build())); - return partList; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadList.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadList.java deleted file mode 100644 index 0c13a0d4a92..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadList.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import java.util.List; - -/** - * List of in-flight MPU uploads. - */ -public class OmMultipartUploadList { - - private List uploads; - - public OmMultipartUploadList( - List uploads) { - this.uploads = uploads; - } - - public List getUploads() { - return uploads; - } - - public void setUploads( - List uploads) { - this.uploads = uploads; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java deleted file mode 100644 index ba0cd426586..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .PartInfo; - -import java.util.ArrayList; -import java.util.List; - -/** - * Class which is response for the list parts of a multipart upload key. - */ -public class OmMultipartUploadListParts { - - private HddsProtos.ReplicationType replicationType; - - private HddsProtos.ReplicationFactor replicationFactor; - - //When a list is truncated, this element specifies the last part in the list, - // as well as the value to use for the part-number-marker request parameter - // in a subsequent request. - private int nextPartNumberMarker; - // Indicates whether the returned list of parts is truncated. A true value - // indicates that the list was truncated. - // A list can be truncated if the number of parts exceeds the limit - // returned in the MaxParts element. - private boolean truncated; - - private final List partInfoList = new ArrayList<>(); - - public OmMultipartUploadListParts(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, - int nextMarker, boolean truncate) { - this.replicationType = type; - this.replicationFactor = factor; - - this.nextPartNumberMarker = nextMarker; - this.truncated = truncate; - } - - public void addPart(OmPartInfo partInfo) { - partInfoList.add(partInfo); - } - - public HddsProtos.ReplicationType getReplicationType() { - return replicationType; - } - - public int getNextPartNumberMarker() { - return nextPartNumberMarker; - } - - public boolean isTruncated() { - return truncated; - } - - public void setReplicationType(HddsProtos.ReplicationType replicationType) { - this.replicationType = replicationType; - } - - public List getPartInfoList() { - return partInfoList; - } - - public ReplicationFactor getReplicationFactor() { - return replicationFactor; - } - - public void addPartList(List partInfos) { - this.partInfoList.addAll(partInfos); - } - - public void addProtoPartList(List partInfos) { - partInfos.forEach(partInfo -> partInfoList.add(new OmPartInfo( - partInfo.getPartNumber(), partInfo.getPartName(), - partInfo.getModificationTime(), partInfo.getSize()))); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java deleted file mode 100644 index b4f0d1679f8..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import com.google.protobuf.ByteString; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclScope; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.security.UserGroupInformation; - -import java.util.BitSet; -import java.util.Collection; -import java.util.List; -import java.util.LinkedList; -import java.util.Map; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Objects; -import java.util.stream.Collectors; - -import static org.apache.hadoop.ozone.OzoneAcl.ZERO_BITSET; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE; - -/** - * This helper class keeps a map of all user and their permissions. - */ -@SuppressWarnings("ProtocolBufferOrdinal") -public class OmOzoneAclMap { - // per Acl Type user:rights map - private ArrayList> accessAclMap; - private List defaultAclList; - - OmOzoneAclMap() { - accessAclMap = new ArrayList<>(); - defaultAclList = new ArrayList<>(); - for (OzoneAclType aclType : OzoneAclType.values()) { - accessAclMap.add(aclType.ordinal(), new HashMap<>()); - } - } - - private Map getAccessAclMap(OzoneAclType type) { - return accessAclMap.get(type.ordinal()); - } - - // For a given acl type and user, get the stored acl - private BitSet getAcl(OzoneAclType type, String user) { - return getAccessAclMap(type).get(user); - } - - public List getAcl() { - List acls = new ArrayList<>(); - - acls.addAll(getAccessAcls()); - acls.addAll(defaultAclList.stream().map(a -> - OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())); - return acls; - } - - private Collection getAccessAcls() { - List acls = new ArrayList<>(); - for (OzoneAclType type : OzoneAclType.values()) { - accessAclMap.get(type.ordinal()).entrySet().stream(). - forEach(entry -> acls.add(new OzoneAcl(ACLIdentityType. - valueOf(type.name()), entry.getKey(), entry.getValue(), - OzoneAcl.AclScope.ACCESS))); - } - return acls; - } - - // Add a new acl to the map - public void addAcl(OzoneAcl acl) throws OMException { - Objects.requireNonNull(acl, "Acl should not be null."); - if (acl.getAclScope().equals(OzoneAcl.AclScope.DEFAULT)) { - defaultAclList.add(OzoneAcl.toProtobuf(acl)); - return; - } - - OzoneAclType aclType = OzoneAclType.valueOf(acl.getType().name()); - if (!getAccessAclMap(aclType).containsKey(acl.getName())) { - getAccessAclMap(aclType).put(acl.getName(), acl.getAclBitSet()); - } else { - // Check if we are adding new rights to existing acl. - BitSet temp = (BitSet) acl.getAclBitSet().clone(); - BitSet curRights = (BitSet) getAccessAclMap(aclType). - get(acl.getName()).clone(); - temp.or(curRights); - - if (temp.equals(curRights)) { - // throw exception if acl is already added. - throw new OMException("Acl " + acl + " already exist.", - INVALID_REQUEST); - } - getAccessAclMap(aclType).replace(acl.getName(), temp); - } - } - - // Add a new acl to the map - public void setAcls(List acls) throws OMException { - Objects.requireNonNull(acls, "Acls should not be null."); - // Remove all Acls. - for (OzoneAclType type : OzoneAclType.values()) { - accessAclMap.get(type.ordinal()).clear(); - } - // Add acls. - for (OzoneAcl acl : acls) { - addAcl(acl); - } - } - - // Add a new acl to the map - public void removeAcl(OzoneAcl acl) throws OMException { - Objects.requireNonNull(acl, "Acl should not be null."); - if (acl.getAclScope().equals(OzoneAcl.AclScope.DEFAULT)) { - defaultAclList.remove(OzoneAcl.toProtobuf(acl)); - return; - } - - OzoneAclType aclType = OzoneAclType.valueOf(acl.getType().name()); - if (getAccessAclMap(aclType).containsKey(acl.getName())) { - BitSet aclRights = getAccessAclMap(aclType).get(acl.getName()); - BitSet bits = (BitSet) acl.getAclBitSet().clone(); - bits.and(aclRights); - - if (bits.equals(ZERO_BITSET)) { - // throw exception if acl doesn't exist. - throw new OMException("Acl [" + acl + "] doesn't exist.", - INVALID_REQUEST); - } - - acl.getAclBitSet().and(aclRights); - aclRights.xor(acl.getAclBitSet()); - - // Remove the acl as all rights are already set to 0. - if (aclRights.equals(ZERO_BITSET)) { - getAccessAclMap(aclType).remove(acl.getName()); - } - } else { - // throw exception if acl doesn't exist. - throw new OMException("Acl [" + acl + "] doesn't exist.", - INVALID_REQUEST); - } - } - - // Add a new acl to the map - public void addAcl(OzoneAclInfo acl) throws OMException { - Objects.requireNonNull(acl, "Acl should not be null."); - if (acl.getAclScope().equals(OzoneAclInfo.OzoneAclScope.DEFAULT)) { - defaultAclList.add(acl); - return; - } - - if (!getAccessAclMap(acl.getType()).containsKey(acl.getName())) { - BitSet acls = BitSet.valueOf(acl.getRights().toByteArray()); - getAccessAclMap(acl.getType()).put(acl.getName(), acls); - } else { - // throw exception if acl is already added. - - throw new OMException("Acl " + acl + " already exist.", INVALID_REQUEST); - } - } - - // for a given acl, check if the user has access rights - public boolean hasAccess(OzoneAclInfo acl) { - if (acl == null) { - return false; - } - - BitSet aclBitSet = getAcl(acl.getType(), acl.getName()); - if (aclBitSet == null) { - return false; - } - BitSet result = BitSet.valueOf(acl.getRights().toByteArray()); - result.and(aclBitSet); - return (!result.equals(ZERO_BITSET) || aclBitSet.get(ALL.ordinal())) - && !aclBitSet.get(NONE.ordinal()); - } - - /** - * For a given acl, check if the user has access rights. - * Acl's are checked in followoing order: - * 1. Acls for USER. - * 2. Acls for GROUPS. - * 3. Acls for WORLD. - * 4. Acls for ANONYMOUS. - * @param acl - * @param ugi - * - * @return true if given ugi has acl set, else false. - * */ - public boolean hasAccess(ACLType acl, UserGroupInformation ugi) { - if (acl == null) { - return false; - } - if (ugi == null) { - return false; - } - - // Check acls in user acl list. - return checkAccessForOzoneAclType(OzoneAclType.USER, acl, ugi) - || checkAccessForOzoneAclType(OzoneAclType.GROUP, acl, ugi) - || checkAccessForOzoneAclType(OzoneAclType.WORLD, acl, ugi) - || checkAccessForOzoneAclType(OzoneAclType.ANONYMOUS, acl, ugi); - } - - /** - * Helper function to check acl access for OzoneAclType. - * */ - private boolean checkAccessForOzoneAclType(OzoneAclType identityType, - ACLType acl, UserGroupInformation ugi) { - - switch (identityType) { - case USER: - return OzoneAclUtil.checkIfAclBitIsSet(acl, getAcl(identityType, - ugi.getUserName())); - case GROUP: - // Check access for user groups. - for (String userGroup : ugi.getGroupNames()) { - if (OzoneAclUtil.checkIfAclBitIsSet(acl, getAcl(identityType, - userGroup))) { - // Return true if any user group has required permission. - return true; - } - } - break; - default: - // For type WORLD and ANONYMOUS we set acl type as name. - if(OzoneAclUtil.checkIfAclBitIsSet(acl, getAcl(identityType, - identityType.name()))) { - return true; - } - - } - return false; - } - - // Convert this map to OzoneAclInfo Protobuf List - public List ozoneAclGetProtobuf() { - List aclList = new LinkedList<>(); - for (OzoneAclType type : OzoneAclType.values()) { - for (Map.Entry entry : - accessAclMap.get(type.ordinal()).entrySet()) { - OzoneAclInfo.Builder builder = OzoneAclInfo.newBuilder() - .setName(entry.getKey()) - .setType(type) - .setAclScope(OzoneAclScope.ACCESS) - .setRights(ByteString.copyFrom(entry.getValue().toByteArray())); - - aclList.add(builder.build()); - } - } - aclList.addAll(defaultAclList); - return aclList; - } - - // Create map from list of OzoneAclInfos - public static OmOzoneAclMap ozoneAclGetFromProtobuf( - List aclList) throws OMException { - OmOzoneAclMap aclMap = new OmOzoneAclMap(); - for (OzoneAclInfo acl : aclList) { - aclMap.addAcl(acl); - } - return aclMap; - } - - public Collection getAclsByScope(OzoneAclScope scope) { - if (scope.equals(OzoneAclScope.DEFAULT)) { - return defaultAclList.stream().map(a -> - OzoneAcl.fromProtobuf(a)).collect(Collectors.toList()); - } else { - return getAcl(); - } - } - - public List getDefaultAclList() { - return defaultAclList; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java deleted file mode 100644 index 2d753a5caa5..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartInfo; - -/** - * Class that defines information about each part of a multipart upload key. - */ -public class OmPartInfo { - private int partNumber; - private String partName; - private long modificationTime; - private long size; - - public OmPartInfo(int number, String name, long time, long size) { - this.partNumber = number; - this.partName = name; - this.modificationTime = time; - this.size = size; - } - - public int getPartNumber() { - return partNumber; - } - - public String getPartName() { - return partName; - } - - public long getModificationTime() { - return modificationTime; - } - - public long getSize() { - return size; - } - - public PartInfo getProto() { - return PartInfo.newBuilder().setPartNumber(partNumber).setPartName(partName) - .setModificationTime(modificationTime) - .setSize(size).build(); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java deleted file mode 100644 index 26b5b1d7c7d..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java +++ /dev/null @@ -1,183 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrefixInfo; - -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Objects; - -/** - * Wrapper class for Ozone prefix path info, currently mainly target for ACL but - * can be extended for other OzFS optimizations in future. - */ -// TODO: support Auditable interface -public final class OmPrefixInfo extends WithMetadata { - - private String name; - private List acls; - - public OmPrefixInfo(String name, List acls, - Map metadata) { - this.name = name; - this.acls = acls; - this.metadata = metadata; - } - - /** - * Returns the ACL's associated with this prefix. - * @return {@literal List} - */ - public List getAcls() { - return acls; - } - - public boolean addAcl(OzoneAcl acl) { - return OzoneAclUtil.addAcl(acls, acl); - } - - public boolean removeAcl(OzoneAcl acl) { - return OzoneAclUtil.removeAcl(acls, acl); - } - - public boolean setAcls(List newAcls) { - return OzoneAclUtil.setAcl(acls, newAcls); - } - - /** - * Returns the name of the prefix path. - * @return name of the prefix path. - */ - public String getName() { - return name; - } - - /** - * Returns new builder class that builds a OmPrefixInfo. - * - * @return Builder - */ - public static OmPrefixInfo.Builder newBuilder() { - return new OmPrefixInfo.Builder(); - } - - /** - * Builder for OmPrefixInfo. - */ - public static class Builder { - private String name; - private List acls; - private Map metadata; - - public Builder() { - //Default values - this.acls = new LinkedList<>(); - this.metadata = new HashMap<>(); - } - - public Builder setAcls(List listOfAcls) { - if (listOfAcls != null) { - acls.addAll(listOfAcls); - } - return this; - } - - public Builder setName(String n) { - this.name = n; - return this; - } - - public OmPrefixInfo.Builder addMetadata(String key, String value) { - metadata.put(key, value); - return this; - } - - public OmPrefixInfo.Builder addAllMetadata( - Map additionalMetadata) { - if (additionalMetadata != null) { - metadata.putAll(additionalMetadata); - } - return this; - } - - /** - * Constructs the OmPrefixInfo. - * @return instance of OmPrefixInfo. - */ - public OmPrefixInfo build() { - Preconditions.checkNotNull(name); - return new OmPrefixInfo(name, acls, metadata); - } - } - - /** - * Creates PrefixInfo protobuf from OmPrefixInfo. - */ - public PrefixInfo getProtobuf() { - PrefixInfo.Builder pib = PrefixInfo.newBuilder().setName(name) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)); - if (acls != null) { - pib.addAllAcls(OzoneAclUtil.toProtobuf(acls)); - } - return pib.build(); - } - - /** - * Parses PrefixInfo protobuf and creates OmPrefixInfo. - * @param prefixInfo - * @return instance of OmPrefixInfo - */ - public static OmPrefixInfo getFromProtobuf(PrefixInfo prefixInfo) { - OmPrefixInfo.Builder opib = OmPrefixInfo.newBuilder() - .setName(prefixInfo.getName()); - if (prefixInfo.getMetadataList() != null) { - opib.addAllMetadata(KeyValueUtil - .getFromProtobuf(prefixInfo.getMetadataList())); - } - if (prefixInfo.getAclsList() != null) { - opib.setAcls(OzoneAclUtil.fromProtobuf(prefixInfo.getAclsList())); - } - return opib.build(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - OmPrefixInfo that = (OmPrefixInfo) o; - return name.equals(that.name) && - Objects.equals(acls, that.acls) && - Objects.equals(metadata, that.metadata); - } - - @Override - public int hashCode() { - return Objects.hash(name); - } -} - diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java deleted file mode 100644 index 6453e8e443b..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ /dev/null @@ -1,359 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import java.io.IOException; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.Auditable; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo; - -import com.google.common.base.Preconditions; - - -/** - * A class that encapsulates the OmVolumeArgs Args. - */ -public final class OmVolumeArgs extends WithMetadata implements Auditable { - private final String adminName; - private String ownerName; - private final String volume; - private long creationTime; - private long quotaInBytes; - private final OmOzoneAclMap aclMap; - private long objectID; - private long updateID; - - /** - * Set the Object ID. If this value is already set then this function throws. - * There is a reason why we cannot use the final here. The OmVolumeArgs is - * deserialized from the protobuf in many places in code. We need to set - * this object ID, after it is deserialized. - * - * @param obId - long - */ - public void setObjectID(long obId) { - if(this.objectID != 0) { - throw new UnsupportedOperationException("Attempt to modify object ID " + - "which is not zero. Current Object ID is " + this.objectID); - } - this.objectID = obId; - } - - /** - * Returns a monotonically increasing ID, that denotes the last update. - * Each time an update happens, this ID is incremented. - * @return long - */ - public long getUpdateID() { - return updateID; - } - - /** - * Sets the update ID. For each modification of this object, we will set - * this to a value greater than the current value. - * @param updateID long - */ - public void setUpdateID(long updateID) { - this.updateID = updateID; - } - - /** - * A immutable identity field for this object. - * @return long. - */ - public long getObjectID() { - return objectID; - } - - /** - * Private constructor, constructed via builder. - * @param adminName - Administrator's name. - * @param ownerName - Volume owner's name - * @param volume - volume name - * @param quotaInBytes - Volume Quota in bytes. - * @param metadata - metadata map for custom key/value data. - * @param aclMap - User to access rights map. - * @param creationTime - Volume creation time. - * @param objectID - ID of this object. - * @param updateID - A sequence number that denotes the last update on this - * object. This is a monotonically increasing number. - */ - @SuppressWarnings({"checkstyle:ParameterNumber", "This is invoked from a " + - "builder."}) - private OmVolumeArgs(String adminName, String ownerName, String volume, - long quotaInBytes, Map metadata, - OmOzoneAclMap aclMap, long creationTime, long objectID, - long updateID) { - this.adminName = adminName; - this.ownerName = ownerName; - this.volume = volume; - this.quotaInBytes = quotaInBytes; - this.metadata = metadata; - this.aclMap = aclMap; - this.creationTime = creationTime; - this.objectID = objectID; - this.updateID = updateID; - } - - - public void setOwnerName(String newOwner) { - this.ownerName = newOwner; - } - - public void setQuotaInBytes(long quotaInBytes) { - this.quotaInBytes = quotaInBytes; - } - - public void setCreationTime(long time) { - this.creationTime = time; - } - - public void addAcl(OzoneAcl acl) throws OMException { - this.aclMap.addAcl(acl); - } - - public void setAcls(List acls) throws OMException { - this.aclMap.setAcls(acls); - } - - public void removeAcl(OzoneAcl acl) throws OMException { - this.aclMap.removeAcl(acl); - } - - /** - * Returns the Admin Name. - * @return String. - */ - public String getAdminName() { - return adminName; - } - - /** - * Returns the owner Name. - * @return String - */ - public String getOwnerName() { - return ownerName; - } - - /** - * Returns the volume Name. - * @return String - */ - public String getVolume() { - return volume; - } - - /** - * Returns creation time. - * @return long - */ - public long getCreationTime() { - return creationTime; - } - - /** - * Returns Quota in Bytes. - * @return long, Quota in bytes. - */ - public long getQuotaInBytes() { - return quotaInBytes; - } - - public OmOzoneAclMap getAclMap() { - return aclMap; - } - /** - * Returns new builder class that builds a OmVolumeArgs. - * - * @return Builder - */ - public static Builder newBuilder() { - return new Builder(); - } - - @Override - public Map toAuditMap() { - Map auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.ADMIN, this.adminName); - auditMap.put(OzoneConsts.OWNER, this.ownerName); - auditMap.put(OzoneConsts.VOLUME, this.volume); - auditMap.put(OzoneConsts.CREATION_TIME, String.valueOf(this.creationTime)); - auditMap.put(OzoneConsts.QUOTA_IN_BYTES, String.valueOf(this.quotaInBytes)); - auditMap.put(OzoneConsts.OBJECT_ID, String.valueOf(this.getObjectID())); - auditMap.put(OzoneConsts.UPDATE_ID, String.valueOf(this.getUpdateID())); - return auditMap; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - OmVolumeArgs that = (OmVolumeArgs) o; - return Objects.equals(this.objectID, that.objectID); - } - - @Override - public int hashCode() { - return Objects.hash(this.objectID); - } - - /** - * Builder for OmVolumeArgs. - */ - public static class Builder { - private String adminName; - private String ownerName; - private String volume; - private long creationTime; - private long quotaInBytes; - private Map metadata; - private OmOzoneAclMap aclMap; - private long objectID; - private long updateID; - - /** - * Sets the Object ID for this Object. - * Object ID are unique and immutable identifier for each object in the - * System. - * @param objectID - long - */ - public void setObjectID(long objectID) { - this.objectID = objectID; - } - - /** - * Sets the update ID for this Object. Update IDs are monotonically - * increasing values which are updated each time there is an update. - * @param updateID - long - */ - public void setUpdateID(long updateID) { - this.updateID = updateID; - } - - - - /** - * Constructs a builder. - */ - public Builder() { - metadata = new HashMap<>(); - aclMap = new OmOzoneAclMap(); - } - - public Builder setAdminName(String admin) { - this.adminName = admin; - return this; - } - - public Builder setOwnerName(String owner) { - this.ownerName = owner; - return this; - } - - public Builder setVolume(String volumeName) { - this.volume = volumeName; - return this; - } - - public Builder setCreationTime(long createdOn) { - this.creationTime = createdOn; - return this; - } - - public Builder setQuotaInBytes(long quota) { - this.quotaInBytes = quota; - return this; - } - - public Builder addMetadata(String key, String value) { - metadata.put(key, value); // overwrite if present. - return this; - } - - public Builder addAllMetadata(Map additionalMetaData) { - if (additionalMetaData != null) { - metadata.putAll(additionalMetaData); - } - return this; - } - - public Builder addOzoneAcls(OzoneAclInfo acl) throws IOException { - aclMap.addAcl(acl); - return this; - } - - /** - * Constructs a CreateVolumeArgument. - * @return CreateVolumeArgs. - */ - public OmVolumeArgs build() { - Preconditions.checkNotNull(adminName); - Preconditions.checkNotNull(ownerName); - Preconditions.checkNotNull(volume); - return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes, - metadata, aclMap, creationTime, objectID, updateID); - } - - } - - public VolumeInfo getProtobuf() { - List aclList = aclMap.ozoneAclGetProtobuf(); - return VolumeInfo.newBuilder() - .setAdminName(adminName) - .setOwnerName(ownerName) - .setVolume(volume) - .setQuotaInBytes(quotaInBytes) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) - .addAllVolumeAcls(aclList) - .setCreationTime( - creationTime == 0 ? System.currentTimeMillis() : creationTime) - .setObjectID(objectID) - .setUpdateID(updateID) - .build(); - } - - public static OmVolumeArgs getFromProtobuf(VolumeInfo volInfo) - throws OMException { - OmOzoneAclMap aclMap = - OmOzoneAclMap.ozoneAclGetFromProtobuf(volInfo.getVolumeAclsList()); - return new OmVolumeArgs( - volInfo.getAdminName(), - volInfo.getOwnerName(), - volInfo.getVolume(), - volInfo.getQuotaInBytes(), - KeyValueUtil.getFromProtobuf(volInfo.getMetadataList()), - aclMap, - volInfo.getCreationTime(), - volInfo.getObjectID(), - volInfo.getUpdateID()); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java deleted file mode 100644 index 11ee622494d..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -/** - * This class represents a open key "session". A session here means a key is - * opened by a specific client, the client sends the handler to server, such - * that servers can recognize this client, and thus know how to close the key. - */ -public class OpenKeySession { - private final long id; - private final OmKeyInfo keyInfo; - // the version of the key when it is being opened in this session. - // a block that has a create version equals to open version means it will - // be committed only when this open session is closed. - private long openVersion; - - public OpenKeySession(long id, OmKeyInfo info, long version) { - this.id = id; - this.keyInfo = info; - this.openVersion = version; - } - - public long getOpenVersion() { - return this.openVersion; - } - - public OmKeyInfo getKeyInfo() { - return keyInfo; - } - - public long getId() { - return id; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java deleted file mode 100644 index fd42fea9ac0..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java +++ /dev/null @@ -1,286 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.security.acl.RequestContext; - -import java.util.ArrayList; -import java.util.BitSet; -import java.util.List; -import java.util.stream.Collectors; - -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE; - -/** - * Helper class for ozone acls operations. - */ -public final class OzoneAclUtil { - - private OzoneAclUtil(){ - } - - /** - * Helper function to get access acl list for current user. - * - * @param userName - * @param userGroups - * @return list of OzoneAcls - * */ - public static List getAclList(String userName, - List userGroups, ACLType userRights, ACLType groupRights) { - - List listOfAcls = new ArrayList<>(); - - // User ACL. - listOfAcls.add(new OzoneAcl(USER, userName, userRights, ACCESS)); - if(userGroups != null) { - // Group ACLs of the User. - userGroups.forEach((group) -> listOfAcls.add( - new OzoneAcl(GROUP, group, groupRights, ACCESS))); - } - return listOfAcls; - } - - /** - * Check if acl right requested for given RequestContext exist - * in provided acl list. - * Acl validation rules: - * 1. If user/group has ALL bit set than all user should have all rights. - * 2. If user/group has NONE bit set than user/group will not have any right. - * 3. For all other individual rights individual bits should be set. - * - * @param acls - * @param context - * @return return true if acl list contains right requsted in context. - * */ - public static boolean checkAclRight(List acls, - RequestContext context) throws OMException { - String[] userGroups = context.getClientUgi().getGroupNames(); - String userName = context.getClientUgi().getUserName(); - ACLType aclToCheck = context.getAclRights(); - for (OzoneAcl a : acls) { - if(checkAccessInAcl(a, userGroups, userName, aclToCheck)) { - return true; - } - } - return false; - } - - private static boolean checkAccessInAcl(OzoneAcl a, String[] groups, - String username, ACLType aclToCheck) { - BitSet rights = a.getAclBitSet(); - switch (a.getType()) { - case USER: - if (a.getName().equals(username)) { - return checkIfAclBitIsSet(aclToCheck, rights); - } - break; - case GROUP: - for (String grp : groups) { - if (a.getName().equals(grp)) { - return checkIfAclBitIsSet(aclToCheck, rights); - } - } - break; - - default: - return checkIfAclBitIsSet(aclToCheck, rights); - } - return false; - } - - /** - * Check if acl right requested for given RequestContext exist - * in provided acl list. - * Acl validation rules: - * 1. If user/group has ALL bit set than all user should have all rights. - * 2. If user/group has NONE bit set than user/group will not have any right. - * 3. For all other individual rights individual bits should be set. - * - * @param acls - * @param context - * @return return true if acl list contains right requsted in context. - * */ - public static boolean checkAclRights(List acls, - RequestContext context) throws OMException { - String[] userGroups = context.getClientUgi().getGroupNames(); - String userName = context.getClientUgi().getUserName(); - ACLType aclToCheck = context.getAclRights(); - for (OzoneAcl acl : acls) { - if (checkAccessInAcl(acl, userGroups, userName, aclToCheck)) { - return true; - } - } - return false; - } - - /** - * Helper function to check if bit for given acl is set. - * @param acl - * @param bitset - * @return True of acl bit is set else false. - * */ - public static boolean checkIfAclBitIsSet(IAccessAuthorizer.ACLType acl, - BitSet bitset) { - if (bitset == null) { - return false; - } - - return ((bitset.get(acl.ordinal()) - || bitset.get(ALL.ordinal())) - && !bitset.get(NONE.ordinal())); - } - - /** - * Helper function to inherit default ACL as access ACL for child object. - * 1. deep copy of OzoneAcl to avoid unexpected parent default ACL change - * 2. merge inherited access ACL with existing access ACL via - * OzoneUtils.addAcl(). - * @param acls - * @param parentAcls - * @return true if acls inherited DEFAULT acls from parentAcls successfully, - * false otherwise. - */ - public static boolean inheritDefaultAcls(List acls, - List parentAcls) { - List inheritedAcls = null; - if (parentAcls != null && !parentAcls.isEmpty()) { - inheritedAcls = parentAcls.stream() - .filter(a -> a.getAclScope() == DEFAULT) - .map(acl -> new OzoneAcl(acl.getType(), acl.getName(), - acl.getAclBitSet(), OzoneAcl.AclScope.ACCESS)) - .collect(Collectors.toList()); - } - if (inheritedAcls != null && !inheritedAcls.isEmpty()) { - inheritedAcls.stream().forEach(acl -> addAcl(acls, acl)); - return true; - } - return false; - } - - /** - * Convert a list of OzoneAclInfo(protoc) to list of OzoneAcl(java). - * @param protoAcls - * @return list of OzoneAcl. - */ - public static List fromProtobuf(List protoAcls) { - return protoAcls.stream().map(acl->OzoneAcl.fromProtobuf(acl)) - .collect(Collectors.toList()); - } - - /** - * Convert a list of OzoneAcl(java) to list of OzoneAclInfo(protoc). - * @param protoAcls - * @return list of OzoneAclInfo. - */ - public static List toProtobuf(List protoAcls) { - return protoAcls.stream().map(acl->OzoneAcl.toProtobuf(acl)) - .collect(Collectors.toList()); - } - - /** - * Add an OzoneAcl to existing list of OzoneAcls. - * @param existingAcls - * @param acl - * @return true if current OzoneAcls are changed, false otherwise. - */ - public static boolean addAcl(List existingAcls, OzoneAcl acl) { - if (existingAcls == null || acl == null) { - return false; - } - - for (OzoneAcl a: existingAcls) { - if (a.getName().equals(acl.getName()) && - a.getType().equals(acl.getType()) && - a.getAclScope().equals(acl.getAclScope())) { - BitSet current = a.getAclBitSet(); - BitSet original = (BitSet) current.clone(); - current.or(acl.getAclBitSet()); - if (current.equals(original)) { - return false; - } - return true; - } - } - - existingAcls.add(acl); - return true; - } - - /** - * remove OzoneAcl from existing list of OzoneAcls. - * @param existingAcls - * @param acl - * @return true if current OzoneAcls are changed, false otherwise. - */ - public static boolean removeAcl(List existingAcls, OzoneAcl acl) { - if (existingAcls == null || existingAcls.isEmpty() || acl == null) { - return false; - } - - for (OzoneAcl a: existingAcls) { - if (a.getName().equals(acl.getName()) && - a.getType().equals(acl.getType()) && - a.getAclScope().equals(acl.getAclScope())) { - BitSet current = a.getAclBitSet(); - BitSet original = (BitSet) current.clone(); - current.andNot(acl.getAclBitSet()); - - if (current.equals(original)) { - return false; - } - - if (current.isEmpty()) { - existingAcls.remove(a); - } - return true; - } - } - return false; - } - - /** - * Set existingAcls to newAcls. - * @param existingAcls - * @param newAcls - * @return true if newAcls are set successfully, false otherwise. - */ - public static boolean setAcl(List existingAcls, - List newAcls) { - if (existingAcls == null) { - return false; - } else { - existingAcls.clear(); - if (newAcls != null) { - existingAcls.addAll(newAcls); - } - } - return true; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java deleted file mode 100644 index 07f3194c14b..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.fs.Path; - -import java.nio.file.Paths; - -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; - -/** - * Utility class for OzoneFileSystem. - */ -public final class OzoneFSUtils { - - private OzoneFSUtils() {} - - /** - * Returns string representation of path after removing the leading slash. - */ - public static String pathToKey(Path path) { - return path.toString().substring(1); - } - - /** - * Returns string representation of the input path parent. The function adds - * a trailing slash if it does not exist and returns an empty string if the - * parent is root. - */ - public static String getParent(String keyName) { - java.nio.file.Path parentDir = Paths.get(keyName).getParent(); - if (parentDir == null) { - return ""; - } - return addTrailingSlashIfNeeded(parentDir.toString()); - } - - /** - * The function returns immediate child of given ancestor in a particular - * descendant. For example if ancestor is /a/b and descendant is /a/b/c/d/e - * the function should return /a/b/c/. If the descendant itself is the - * immediate child then it is returned as is without adding a trailing slash. - * This is done to distinguish files from a directory as in ozone files do - * not carry a trailing slash. - */ - public static String getImmediateChild(String descendant, String ancestor) { - ancestor = - !ancestor.isEmpty() ? addTrailingSlashIfNeeded(ancestor) : ancestor; - if (!descendant.startsWith(ancestor)) { - return null; - } - java.nio.file.Path descendantPath = Paths.get(descendant); - java.nio.file.Path ancestorPath = Paths.get(ancestor); - int ancestorPathNameCount = - ancestor.isEmpty() ? 0 : ancestorPath.getNameCount(); - if (descendantPath.getNameCount() - ancestorPathNameCount > 1) { - return addTrailingSlashIfNeeded( - ancestor + descendantPath.getName(ancestorPathNameCount)); - } - return descendant; - } - - public static String addTrailingSlashIfNeeded(String key) { - if (!key.endsWith(OZONE_URI_DELIMITER)) { - return key + OZONE_URI_DELIMITER; - } else { - return key; - } - } - - public static boolean isFile(String keyName) { - return !keyName.endsWith(OZONE_URI_DELIMITER); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java deleted file mode 100644 index 87179465121..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java +++ /dev/null @@ -1,116 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.protocolPB.PBHelper; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto; - -import java.io.IOException; -import java.net.URI; - -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; - -/** - * File Status of the Ozone Key. - */ -public class OzoneFileStatus extends FileStatus { - - private static final long serialVersionUID = 1L; - - transient private OmKeyInfo keyInfo; - - public OzoneFileStatus(OmKeyInfo key, long blockSize, boolean isDirectory) { - super(key.getDataSize(), isDirectory, key.getFactor().getNumber(), - blockSize, key.getModificationTime(), getPath(key.getKeyName())); - keyInfo = key; - } - - public OzoneFileStatus(FileStatus status) throws IOException { - super(status); - } - - // Use this constructor only for directories - public OzoneFileStatus(String keyName) { - super(0, true, 0, 0, 0, getPath(keyName)); - } - - public OzoneFileStatusProto getProtobuf() throws IOException { - return OzoneFileStatusProto.newBuilder().setStatus(PBHelper.convert(this)) - .build(); - } - - public static OzoneFileStatus getFromProtobuf(OzoneFileStatusProto response) - throws IOException { - return new OzoneFileStatus(PBHelper.convert(response.getStatus())); - } - - public static Path getPath(String keyName) { - return new Path(OZONE_URI_DELIMITER + keyName); - } - - public FileStatus makeQualified(URI defaultUri, Path parent, - String owner, String group) { - // fully-qualify path - setPath(parent.makeQualified(defaultUri, null)); - setGroup(group); - setOwner(owner); - if (isDirectory()) { - setPermission(FsPermission.getDirDefault()); - } else { - setPermission(FsPermission.getFileDefault()); - } - return this; // API compatibility - } - - /** Get the modification time of the file/directory. - * - * o3fs uses objects as "fake" directories, which are not updated to - * reflect the accurate modification time. We choose to report the - * current time because some parts of the ecosystem (e.g. the - * HistoryServer) use modification time to ignore "old" directories. - * - * @return for files the modification time in milliseconds since January 1, - * 1970 UTC or for directories the current time. - */ - @Override - public long getModificationTime(){ - if (isDirectory()) { - return System.currentTimeMillis(); - } else { - return super.getModificationTime(); - } - } - - public OmKeyInfo getKeyInfo() { - return keyInfo; - } - - @Override - public boolean equals(Object o) { - return super.equals(o); - } - - @Override - public int hashCode() { - return super.hashCode(); - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java deleted file mode 100644 index c28c2c8abc4..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import java.util.ArrayList; -import java.util.List; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .RepeatedKeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyInfo; - -/** - * Args for deleted keys. This is written to om metadata deletedTable. - * Once a key is deleted, it is moved to om metadata deletedTable. Having a - * {label: List} ensures that if users create & delete keys with - * exact same uri multiple times, all the delete instances are bundled under - * the same key name. This is useful as part of GDPR compliance where an - * admin wants to confirm if a given key is deleted from deletedTable metadata. - */ -public class RepeatedOmKeyInfo { - private List omKeyInfoList; - - public RepeatedOmKeyInfo(List omKeyInfos) { - this.omKeyInfoList = omKeyInfos; - } - - public RepeatedOmKeyInfo(OmKeyInfo omKeyInfos) { - this.omKeyInfoList = new ArrayList<>(); - this.omKeyInfoList.add(omKeyInfos); - } - - public void addOmKeyInfo(OmKeyInfo info) { - this.omKeyInfoList.add(info); - } - - public List getOmKeyInfoList() { - return omKeyInfoList; - } - - public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo - repeatedKeyInfo) { - List list = new ArrayList<>(); - for(KeyInfo k : repeatedKeyInfo.getKeyInfoList()) { - list.add(OmKeyInfo.getFromProtobuf(k)); - } - return new RepeatedOmKeyInfo.Builder().setOmKeyInfos(list).build(); - } - - public RepeatedKeyInfo getProto() { - List list = new ArrayList<>(); - for(OmKeyInfo k : omKeyInfoList) { - list.add(k.getProtobuf()); - } - - RepeatedKeyInfo.Builder builder = RepeatedKeyInfo.newBuilder() - .addAllKeyInfo(list); - return builder.build(); - } - - /** - * Builder of RepeatedOmKeyInfo. - */ - public static class Builder { - private List omKeyInfos; - - public Builder(){} - - public Builder setOmKeyInfos(List infoList) { - this.omKeyInfos = infoList; - return this; - } - - public RepeatedOmKeyInfo build() { - return new RepeatedOmKeyInfo(omKeyInfos); - } - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java deleted file mode 100644 index 5f651144465..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; - -import java.util.Objects; - -/** - * S3Secret to be saved in database. - */ -public class S3SecretValue { - private String kerberosID; - private String awsSecret; - - public S3SecretValue(String kerberosID, String awsSecret) { - this.kerberosID = kerberosID; - this.awsSecret = awsSecret; - } - - public String getKerberosID() { - return kerberosID; - } - - public void setKerberosID(String kerberosID) { - this.kerberosID = kerberosID; - } - - public String getAwsSecret() { - return awsSecret; - } - - public void setAwsSecret(String awsSecret) { - this.awsSecret = awsSecret; - } - - public String getAwsAccessKey() { - return kerberosID; - } - - public static S3SecretValue fromProtobuf( - OzoneManagerProtocolProtos.S3Secret s3Secret) { - return new S3SecretValue(s3Secret.getKerberosID(), s3Secret.getAwsSecret()); - } - - public OzoneManagerProtocolProtos.S3Secret getProtobuf() { - return OzoneManagerProtocolProtos.S3Secret.newBuilder() - .setAwsSecret(this.awsSecret) - .setKerberosID(this.kerberosID) - .build(); - } - - @Override - public String toString() { - return "awsAccessKey=" + kerberosID + "\nawsSecret=" + awsSecret; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - S3SecretValue that = (S3SecretValue) o; - return kerberosID.equals(that.kerberosID) && - awsSecret.equals(that.awsSecret); - } - - @Override - public int hashCode() { - return Objects.hash(kerberosID, awsSecret); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java deleted file mode 100644 index dce4f8e20d2..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java +++ /dev/null @@ -1,224 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .ServicePort; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * ServiceInfo holds the config details of Ozone services. - */ -public final class ServiceInfo { - - private static final ObjectReader READER = - new ObjectMapper().readerFor(ServiceInfo.class); - private static final ObjectWriter WRITER = - new ObjectMapper().writerWithDefaultPrettyPrinter(); - - /** - * Type of node/service. - */ - private NodeType nodeType; - /** - * Hostname of the node in which the service is running. - */ - private String hostname; - - /** - * List of ports the service listens to. - */ - private Map ports; - - /** - * Default constructor for JSON deserialization. - */ - public ServiceInfo() {} - - /** - * Constructs the ServiceInfo for the {@code nodeType}. - * @param nodeType type of node/service - * @param hostname hostname of the service - * @param portList list of ports the service listens to - */ - private ServiceInfo( - NodeType nodeType, String hostname, List portList) { - Preconditions.checkNotNull(nodeType); - Preconditions.checkNotNull(hostname); - this.nodeType = nodeType; - this.hostname = hostname; - this.ports = new HashMap<>(); - for (ServicePort port : portList) { - ports.put(port.getType(), port.getValue()); - } - } - - /** - * Returns the type of node/service. - * @return node type - */ - public NodeType getNodeType() { - return nodeType; - } - - /** - * Returns the hostname of the service. - * @return hostname - */ - public String getHostname() { - return hostname; - } - - /** - * Returns ServicePort.Type to port mappings. - * @return ports - */ - public Map getPorts() { - return ports; - } - - /** - * Returns the port for given type. - * - * @param type the type of port. - * ex: RPC, HTTP, HTTPS, etc.. - * @throws NullPointerException if the service doesn't support the given type - */ - @JsonIgnore - public int getPort(ServicePort.Type type) { - return ports.get(type); - } - - /** - * Returns the address of the service (hostname with port of the given type). - * @param portType the type of port, eg. RPC, HTTP, etc. - * @return service address (hostname with port of the given type) - */ - @JsonIgnore - public String getServiceAddress(ServicePort.Type portType) { - return hostname + ":" + getPort(portType); - } - - /** - * Converts {@link ServiceInfo} to OzoneManagerProtocolProtos.ServiceInfo. - * - * @return OzoneManagerProtocolProtos.ServiceInfo - */ - @JsonIgnore - public OzoneManagerProtocolProtos.ServiceInfo getProtobuf() { - OzoneManagerProtocolProtos.ServiceInfo.Builder builder = - OzoneManagerProtocolProtos.ServiceInfo.newBuilder(); - builder.setNodeType(nodeType) - .setHostname(hostname) - .addAllServicePorts( - ports.entrySet().stream() - .map( - entry -> - ServicePort.newBuilder() - .setType(entry.getKey()) - .setValue(entry.getValue()).build()) - .collect(Collectors.toList())); - return builder.build(); - } - - /** - * Converts OzoneManagerProtocolProtos.ServiceInfo to {@link ServiceInfo}. - * - * @return {@link ServiceInfo} - */ - @JsonIgnore - public static ServiceInfo getFromProtobuf( - OzoneManagerProtocolProtos.ServiceInfo serviceInfo) { - return new ServiceInfo(serviceInfo.getNodeType(), - serviceInfo.getHostname(), - serviceInfo.getServicePortsList()); - } - - /** - * Creates a new builder to build {@link ServiceInfo}. - * @return {@link ServiceInfo.Builder} - */ - public static Builder newBuilder() { - return new Builder(); - } - - /** - * Builder used to build/construct {@link ServiceInfo}. - */ - public static class Builder { - - private NodeType node; - private String host; - private List portList = new ArrayList<>(); - - - /** - * Sets the node/service type. - * @param nodeType type of node - * @return the builder - */ - public Builder setNodeType(NodeType nodeType) { - node = nodeType; - return this; - } - - /** - * Sets the hostname of the service. - * @param hostname service hostname - * @return the builder - */ - public Builder setHostname(String hostname) { - host = hostname; - return this; - } - - /** - * Adds the service port to the service port list. - * @param servicePort RPC port - * @return the builder - */ - public Builder addServicePort(ServicePort servicePort) { - portList.add(servicePort); - return this; - } - - - /** - * Builds and returns {@link ServiceInfo} with the set values. - * @return {@link ServiceInfo} - */ - public ServiceInfo build() { - return new ServiceInfo(node, host, portList); - } - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfoEx.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfoEx.java deleted file mode 100644 index a90be635687..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfoEx.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import java.util.List; - -/** - * Wrapper class for service discovery, design for broader usage such as - * security, etc. - */ -public class ServiceInfoEx { - - private List infoList; - - // PEM encoded string of SCM CA certificate. - private String caCertificate; - - public ServiceInfoEx(List infoList, - String caCertificate) { - this.infoList = infoList; - this.caCertificate = caCertificate; - } - - public List getServiceInfoList() { - return infoList; - } - - public String getCaCertificate() { - return caCertificate; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java deleted file mode 100644 index 6fc7c8fcc53..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import com.google.common.base.Preconditions; - -import java.util.HashMap; -import java.util.Map; - -/** - * A class that encapsulates the createVolume Args. - */ -public final class VolumeArgs { - private final String adminName; - private final String ownerName; - private final String volume; - private final long quotaInBytes; - private final Map extendedAttributes; - - /** - * Private constructor, constructed via builder. - * - * @param adminName - Administrator name. - * @param ownerName - Volume owner's name - * @param volume - volume name - * @param quotaInBytes - Volume Quota in bytes. - * @param keyValueMap - keyValue map. - */ - private VolumeArgs(String adminName, String ownerName, String volume, - long quotaInBytes, Map keyValueMap) { - this.adminName = adminName; - this.ownerName = ownerName; - this.volume = volume; - this.quotaInBytes = quotaInBytes; - this.extendedAttributes = keyValueMap; - } - - /** - * Returns the Admin Name. - * - * @return String. - */ - public String getAdminName() { - return adminName; - } - - /** - * Returns the owner Name. - * - * @return String - */ - public String getOwnerName() { - return ownerName; - } - - /** - * Returns the volume Name. - * - * @return String - */ - public String getVolume() { - return volume; - } - - /** - * Returns Quota in Bytes. - * - * @return long, Quota in bytes. - */ - public long getQuotaInBytes() { - return quotaInBytes; - } - - public Map getExtendedAttributes() { - return extendedAttributes; - } - - static class Builder { - private String adminName; - private String ownerName; - private String volume; - private long quotaInBytes; - private Map extendedAttributes; - - /** - * Constructs a builder. - */ - Builder() { - extendedAttributes = new HashMap<>(); - } - - public void setAdminName(String adminName) { - this.adminName = adminName; - } - - public void setOwnerName(String ownerName) { - this.ownerName = ownerName; - } - - public void setVolume(String volume) { - this.volume = volume; - } - - public void setQuotaInBytes(long quotaInBytes) { - this.quotaInBytes = quotaInBytes; - } - - public void addMetadata(String key, String value) { - extendedAttributes.put(key, value); // overwrite if present. - } - - /** - * Constructs a CreateVolumeArgument. - * - * @return CreateVolumeArgs. - */ - public VolumeArgs build() { - Preconditions.checkNotNull(adminName); - Preconditions.checkNotNull(ownerName); - Preconditions.checkNotNull(volume); - return new VolumeArgs(adminName, ownerName, volume, quotaInBytes, - extendedAttributes); - } - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java deleted file mode 100644 index 5c49a15a12b..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import java.util.HashMap; -import java.util.Map; - -/** - * Mixin class to handle custom metadata. - */ -public class WithMetadata { - - @SuppressWarnings("visibilitymodifier") - protected Map metadata = new HashMap<>(); - - /** - * Custom key value metadata. - */ - public Map getMetadata() { - return metadata; - } - - /** - * Set custom key value metadata. - */ - public void setMetadata(Map metadata) { - this.metadata = metadata; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java deleted file mode 100644 index b1211d8cb86..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java deleted file mode 100644 index 31f09244623..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ /dev/null @@ -1,477 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.lock; - - -import java.util.ArrayList; -import java.util.List; -import java.util.function.Consumer; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.lock.LockManager; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK; - -/** - * Provides different locks to handle concurrency in OzoneMaster. - * We also maintain lock hierarchy, based on the weight. - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
WEIGHT LOCK
0 S3 Bucket Lock
1 Volume Lock
2 Bucket Lock
3 User Lock
4 S3 Secret Lock
5 Prefix Lock
- * - * One cannot obtain a lower weight lock while holding a lock with higher - * weight. The other way around is possible.
- *
- *

- * For example: - *
- * {@literal ->} acquire volume lock (will work)
- * {@literal +->} acquire bucket lock (will work)
- * {@literal +-->} acquire s3 bucket lock (will throw Exception)
- *

- *
- */ - -public class OzoneManagerLock { - - private static final Logger LOG = - LoggerFactory.getLogger(OzoneManagerLock.class); - - private static final String READ_LOCK = "read"; - private static final String WRITE_LOCK = "write"; - - private final LockManager manager; - private final ThreadLocal lockSet = ThreadLocal.withInitial( - () -> Short.valueOf((short)0)); - - - /** - * Creates new OzoneManagerLock instance. - * @param conf Configuration object - */ - public OzoneManagerLock(Configuration conf) { - boolean fair = conf.getBoolean(OZONE_MANAGER_FAIR_LOCK, - OZONE_MANAGER_FAIR_LOCK_DEFAULT); - manager = new LockManager<>(conf, fair); - } - - /** - * Acquire lock on resource. - * - * For S3_BUCKET_LOCK, VOLUME_LOCK, BUCKET_LOCK type resource, same - * thread acquiring lock again is allowed. - * - * For USER_LOCK, PREFIX_LOCK, S3_SECRET_LOCK type resource, same thread - * acquiring lock again is not allowed. - * - * Special Note for USER_LOCK: Single thread can acquire single user lock/ - * multi user lock. But not both at the same time. - * @param resource - Type of the resource. - * @param resources - Resource names on which user want to acquire lock. - * For Resource type BUCKET_LOCK, first param should be volume, second param - * should be bucket name. For remaining all resource only one param should - * be passed. - */ - @Deprecated - public boolean acquireLock(Resource resource, String... resources) { - String resourceName = generateResourceName(resource, resources); - return lock(resource, resourceName, manager::writeLock, WRITE_LOCK); - } - - /** - * Acquire read lock on resource. - * - * For S3_BUCKET_LOCK, VOLUME_LOCK, BUCKET_LOCK type resource, same - * thread acquiring lock again is allowed. - * - * For USER_LOCK, PREFIX_LOCK, S3_SECRET_LOCK type resource, same thread - * acquiring lock again is not allowed. - * - * Special Note for USER_LOCK: Single thread can acquire single user lock/ - * multi user lock. But not both at the same time. - * @param resource - Type of the resource. - * @param resources - Resource names on which user want to acquire lock. - * For Resource type BUCKET_LOCK, first param should be volume, second param - * should be bucket name. For remaining all resource only one param should - * be passed. - */ - public boolean acquireReadLock(Resource resource, String... resources) { - String resourceName = generateResourceName(resource, resources); - return lock(resource, resourceName, manager::readLock, READ_LOCK); - } - - - /** - * Acquire write lock on resource. - * - * For S3_BUCKET_LOCK, VOLUME_LOCK, BUCKET_LOCK type resource, same - * thread acquiring lock again is allowed. - * - * For USER_LOCK, PREFIX_LOCK, S3_SECRET_LOCK type resource, same thread - * acquiring lock again is not allowed. - * - * Special Note for USER_LOCK: Single thread can acquire single user lock/ - * multi user lock. But not both at the same time. - * @param resource - Type of the resource. - * @param resources - Resource names on which user want to acquire lock. - * For Resource type BUCKET_LOCK, first param should be volume, second param - * should be bucket name. For remaining all resource only one param should - * be passed. - */ - public boolean acquireWriteLock(Resource resource, String... resources) { - String resourceName = generateResourceName(resource, resources); - return lock(resource, resourceName, manager::writeLock, WRITE_LOCK); - } - - private boolean lock(Resource resource, String resourceName, - Consumer lockFn, String lockType) { - if (!resource.canLock(lockSet.get())) { - String errorMessage = getErrorMessage(resource); - LOG.error(errorMessage); - throw new RuntimeException(errorMessage); - } else { - lockFn.accept(resourceName); - if (LOG.isDebugEnabled()) { - LOG.debug("Acquired {} {} lock on resource {}", lockType, resource.name, - resourceName); - } - lockSet.set(resource.setLock(lockSet.get())); - return true; - } - } - - /** - * Generate resource name to be locked. - * @param resource - * @param resources - */ - private String generateResourceName(Resource resource, String... resources) { - if (resources.length == 1 && resource != Resource.BUCKET_LOCK) { - return OzoneManagerLockUtil.generateResourceLockName(resource, - resources[0]); - } else if (resources.length == 2 && resource == Resource.BUCKET_LOCK) { - return OzoneManagerLockUtil.generateBucketLockName(resources[0], - resources[1]); - } else { - throw new IllegalArgumentException("acquire lock is supported on single" + - " resource for all locks except for resource bucket"); - } - } - - private String getErrorMessage(Resource resource) { - return "Thread '" + Thread.currentThread().getName() + "' cannot " + - "acquire " + resource.name + " lock while holding " + - getCurrentLocks().toString() + " lock(s)."; - - } - - private List getCurrentLocks() { - List currentLocks = new ArrayList<>(); - short lockSetVal = lockSet.get(); - for (Resource value : Resource.values()) { - if (value.isLevelLocked(lockSetVal)) { - currentLocks.add(value.getName()); - } - } - return currentLocks; - } - - /** - * Acquire lock on multiple users. - * @param firstUser - * @param secondUser - */ - public boolean acquireMultiUserLock(String firstUser, String secondUser) { - Resource resource = Resource.USER_LOCK; - firstUser = generateResourceName(resource, firstUser); - secondUser = generateResourceName(resource, secondUser); - - if (!resource.canLock(lockSet.get())) { - String errorMessage = getErrorMessage(resource); - LOG.error(errorMessage); - throw new RuntimeException(errorMessage); - } else { - // When acquiring multiple user locks, the reason for doing lexical - // order comparision is to avoid deadlock scenario. - - // Example: 1st thread acquire lock(ozone, hdfs) - // 2nd thread acquire lock(hdfs, ozone). - // If we don't acquire user locks in an order, there can be a deadlock. - // 1st thread acquired lock on ozone, waiting for lock on hdfs, 2nd - // thread acquired lock on hdfs, waiting for lock on ozone. - // To avoid this when we acquire lock on multiple users, we acquire - // locks in lexical order, which can help us to avoid dead locks. - // Now if first thread acquires lock on hdfs, 2nd thread wait for lock - // on hdfs, and first thread acquires lock on ozone. Once after first - // thread releases user locks, 2nd thread acquires them. - - int compare = firstUser.compareTo(secondUser); - String temp; - - // Order the user names in sorted order. Swap them. - if (compare > 0) { - temp = secondUser; - secondUser = firstUser; - firstUser = temp; - } - - if (compare == 0) { - // both users are equal. - manager.writeLock(firstUser); - } else { - manager.writeLock(firstUser); - try { - manager.writeLock(secondUser); - } catch (Exception ex) { - // We got an exception acquiring 2nd user lock. Release already - // acquired user lock, and throw exception to the user. - manager.writeUnlock(firstUser); - throw ex; - } - } - if (LOG.isDebugEnabled()) { - LOG.debug("Acquired Write {} lock on resource {} and {}", resource.name, - firstUser, secondUser); - } - lockSet.set(resource.setLock(lockSet.get())); - return true; - } - } - - - - /** - * Release lock on multiple users. - * @param firstUser - * @param secondUser - */ - public void releaseMultiUserLock(String firstUser, String secondUser) { - Resource resource = Resource.USER_LOCK; - firstUser = generateResourceName(resource, firstUser); - secondUser = generateResourceName(resource, secondUser); - - int compare = firstUser.compareTo(secondUser); - - String temp; - // Order the user names in sorted order. Swap them. - if (compare > 0) { - temp = secondUser; - secondUser = firstUser; - firstUser = temp; - } - - if (compare == 0) { - // both users are equal. - manager.writeUnlock(firstUser); - } else { - manager.writeUnlock(firstUser); - manager.writeUnlock(secondUser); - } - if (LOG.isDebugEnabled()) { - LOG.debug("Release Write {} lock on resource {} and {}", resource.name, - firstUser, secondUser); - } - lockSet.set(resource.clearLock(lockSet.get())); - } - - /** - * Release write lock on resource. - * @param resource - Type of the resource. - * @param resources - Resource names on which user want to acquire lock. - * For Resource type BUCKET_LOCK, first param should be volume, second param - * should be bucket name. For remaining all resource only one param should - * be passed. - */ - public void releaseWriteLock(Resource resource, String... resources) { - String resourceName = generateResourceName(resource, resources); - unlock(resource, resourceName, manager::writeUnlock, WRITE_LOCK); - } - - /** - * Release read lock on resource. - * @param resource - Type of the resource. - * @param resources - Resource names on which user want to acquire lock. - * For Resource type BUCKET_LOCK, first param should be volume, second param - * should be bucket name. For remaining all resource only one param should - * be passed. - */ - public void releaseReadLock(Resource resource, String... resources) { - String resourceName = generateResourceName(resource, resources); - unlock(resource, resourceName, manager::readUnlock, READ_LOCK); - } - - /** - * Release write lock on resource. - * @param resource - Type of the resource. - * @param resources - Resource names on which user want to acquire lock. - * For Resource type BUCKET_LOCK, first param should be volume, second param - * should be bucket name. For remaining all resource only one param should - * be passed. - */ - @Deprecated - public void releaseLock(Resource resource, String... resources) { - String resourceName = generateResourceName(resource, resources); - unlock(resource, resourceName, manager::writeUnlock, WRITE_LOCK); - } - - private void unlock(Resource resource, String resourceName, - Consumer lockFn, String lockType) { - // TODO: Not checking release of higher order level lock happened while - // releasing lower order level lock, as for that we need counter for - // locks, as some locks support acquiring lock again. - lockFn.accept(resourceName); - // clear lock - if (LOG.isDebugEnabled()) { - LOG.debug("Release {} {}, lock on resource {}", lockType, resource.name, - resourceName); - } - lockSet.set(resource.clearLock(lockSet.get())); - } - - /** - * Resource defined in Ozone. - */ - public enum Resource { - // For S3 Bucket need to allow only for S3, that should be means only 1. - S3_BUCKET_LOCK((byte) 0, "S3_BUCKET_LOCK"), // = 1 - - // For volume need to allow both s3 bucket and volume. 01 + 10 = 11 (3) - VOLUME_LOCK((byte) 1, "VOLUME_LOCK"), // = 2 - - // For bucket we need to allow both s3 bucket, volume and bucket. Which - // is equal to 100 + 010 + 001 = 111 = 4 + 2 + 1 = 7 - BUCKET_LOCK((byte) 2, "BUCKET_LOCK"), // = 4 - - // For user we need to allow s3 bucket, volume, bucket and user lock. - // Which is 8 4 + 2 + 1 = 15 - USER_LOCK((byte) 3, "USER_LOCK"), // 15 - - S3_SECRET_LOCK((byte) 4, "S3_SECRET_LOCK"), // 31 - PREFIX_LOCK((byte) 5, "PREFIX_LOCK"); //63 - - // level of the resource - private byte lockLevel; - - // This will tell the value, till which we can allow locking. - private short mask; - - // This value will help during setLock, and also will tell whether we can - // re-acquire lock or not. - private short setMask; - - // Name of the resource. - private String name; - - Resource(byte pos, String name) { - this.lockLevel = pos; - this.mask = (short) (Math.pow(2, lockLevel + 1) - 1); - this.setMask = (short) Math.pow(2, lockLevel); - this.name = name; - } - - boolean canLock(short lockSetVal) { - - // For USER_LOCK, S3_SECRET_LOCK and PREFIX_LOCK we shall not allow - // re-acquire locks from single thread. 2nd condition is we have - // acquired one of these locks, but after that trying to acquire a lock - // with less than equal of lockLevel, we should disallow. - if (((USER_LOCK.setMask & lockSetVal) == USER_LOCK.setMask || - (S3_SECRET_LOCK.setMask & lockSetVal) == S3_SECRET_LOCK.setMask || - (PREFIX_LOCK.setMask & lockSetVal) == PREFIX_LOCK.setMask) - && setMask <= lockSetVal) { - return false; - } - - - // Our mask is the summation of bits of all previous possible locks. In - // other words it is the largest possible value for that bit position. - - // For example for Volume lock, bit position is 1, and mask is 3. Which - // is the largest value that can be represented with 2 bits is 3. - // Therefore if lockSet is larger than mask we have to return false i.e - // some other higher order lock has been acquired. - - return lockSetVal <= mask; - } - - /** - * Set Lock bits in lockSetVal. - * - * @param lockSetVal - * @return Updated value which has set lock bits. - */ - short setLock(short lockSetVal) { - return (short) (lockSetVal | setMask); - } - - /** - * Clear lock from lockSetVal. - * - * @param lockSetVal - * @return Updated value which has cleared lock bits. - */ - short clearLock(short lockSetVal) { - return (short) (lockSetVal & ~setMask); - } - - /** - * Return true, if this level is locked, else false. - * @param lockSetVal - */ - boolean isLevelLocked(short lockSetVal) { - return (lockSetVal & setMask) == setMask; - } - - String getName() { - return name; - } - - short getMask() { - return mask; - } - } - -} - diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLockUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLockUtil.java deleted file mode 100644 index 78a42aa10a8..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLockUtil.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.lock; - -import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.OzoneConsts.OM_PREFIX; -import static org.apache.hadoop.ozone.OzoneConsts.OM_S3_PREFIX; -import static org.apache.hadoop.ozone.OzoneConsts.OM_S3_SECRET; -import static org.apache.hadoop.ozone.OzoneConsts.OM_USER_PREFIX; - -/** - * Utility class contains helper functions required for OM lock. - */ -final class OzoneManagerLockUtil { - - - private OzoneManagerLockUtil() { - } - - /** - * Generate resource lock name for the given resource name. - * - * @param resource - * @param resourceName - */ - public static String generateResourceLockName( - OzoneManagerLock.Resource resource, String resourceName) { - - if (resource == OzoneManagerLock.Resource.S3_BUCKET_LOCK) { - return OM_S3_PREFIX + resourceName; - } else if (resource == OzoneManagerLock.Resource.VOLUME_LOCK) { - return OM_KEY_PREFIX + resourceName; - } else if (resource == OzoneManagerLock.Resource.USER_LOCK) { - return OM_USER_PREFIX + resourceName; - } else if (resource == OzoneManagerLock.Resource.S3_SECRET_LOCK) { - return OM_S3_SECRET + resourceName; - } else if (resource == OzoneManagerLock.Resource.PREFIX_LOCK) { - return OM_PREFIX + resourceName; - } else { - // This is for developers who mistakenly call this method with resource - // bucket type, as for bucket type we need bucket and volumeName. - throw new IllegalArgumentException("Bucket resource type is passed, " + - "to get BucketResourceLockName, use generateBucketLockName method"); - } - - } - - /** - * Generate bucket lock name. - * @param volumeName - * @param bucketName - */ - public static String generateBucketLockName(String volumeName, - String bucketName) { - return OM_KEY_PREFIX + volumeName + OM_KEY_PREFIX + bucketName; - - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/package-info.java deleted file mode 100644 index 5feac5f6c0b..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.lock; - -/** - * Classes related to ozone manager lock. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java deleted file mode 100644 index 1744cffc134..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om; -/** - This package contains client side protocol library to communicate with OM. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java deleted file mode 100644 index 1434dca4c50..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.protocol; - -import java.io.IOException; - -/** - * Protocol to talk to OM HA. These methods are needed only called from - * OmRequestHandler. - */ -public interface OzoneManagerHAProtocol { - - /** - * Store the snapshot index i.e. the raft log index, corresponding to the - * last transaction applied to the OM RocksDB, in OM metadata dir on disk. - * @return the snapshot index - * @throws IOException - */ - long saveRatisSnapshot() throws IOException; - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java deleted file mode 100644 index a23669598f9..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ /dev/null @@ -1,530 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.protocol; - -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider; - -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; - -import java.io.Closeable; -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.ozone.security.OzoneDelegationTokenSelector; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.security.KerberosInfo; -import org.apache.hadoop.security.token.TokenInfo; -import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper; -import org.apache.hadoop.hdds.utils.db.SequenceNumberNotFoundException; - -/** - * Protocol to talk to OM. - */ -@KerberosInfo( - serverPrincipal = OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY) -@TokenInfo(OzoneDelegationTokenSelector.class) -public interface OzoneManagerProtocol - extends OzoneManagerSecurityProtocol, Closeable { - - @SuppressWarnings("checkstyle:ConstantName") - /** - * Version 1: Initial version. - */ - long versionID = 1L; - - /** - * Creates a volume. - * @param args - Arguments to create Volume. - * @throws IOException - */ - void createVolume(OmVolumeArgs args) throws IOException; - - /** - * Changes the owner of a volume. - * @param volume - Name of the volume. - * @param owner - Name of the owner. - * @throws IOException - */ - void setOwner(String volume, String owner) throws IOException; - - /** - * Changes the Quota on a volume. - * @param volume - Name of the volume. - * @param quota - Quota in bytes. - * @throws IOException - */ - void setQuota(String volume, long quota) throws IOException; - - /** - * Checks if the specified user can access this volume. - * @param volume - volume - * @param userAcl - user acls which needs to be checked for access - * @return true if the user has required access for the volume, - * false otherwise - * @throws IOException - */ - boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) - throws IOException; - - /** - * Gets the volume information. - * @param volume - Volume name. - * @return VolumeArgs or exception is thrown. - * @throws IOException - */ - OmVolumeArgs getVolumeInfo(String volume) throws IOException; - - /** - * Deletes an existing empty volume. - * @param volume - Name of the volume. - * @throws IOException - */ - void deleteVolume(String volume) throws IOException; - - /** - * Lists volume owned by a specific user. - * @param userName - user name - * @param prefix - Filter prefix -- Return only entries that match this. - * @param prevKey - Previous key -- List starts from the next from the prevkey - * @param maxKeys - Max number of keys to return. - * @return List of Volumes. - * @throws IOException - */ - List listVolumeByUser(String userName, String prefix, String - prevKey, int maxKeys) throws IOException; - - /** - * Lists volume all volumes in the cluster. - * @param prefix - Filter prefix -- Return only entries that match this. - * @param prevKey - Previous key -- List starts from the next from the prevkey - * @param maxKeys - Max number of keys to return. - * @return List of Volumes. - * @throws IOException - */ - List listAllVolumes(String prefix, String - prevKey, int maxKeys) throws IOException; - - /** - * Creates a bucket. - * @param bucketInfo - BucketInfo to create Bucket. - * @throws IOException - */ - void createBucket(OmBucketInfo bucketInfo) throws IOException; - - /** - * Gets the bucket information. - * @param volumeName - Volume name. - * @param bucketName - Bucket name. - * @return OmBucketInfo or exception is thrown. - * @throws IOException - */ - OmBucketInfo getBucketInfo(String volumeName, String bucketName) - throws IOException; - - /** - * Sets bucket property from args. - * @param args - BucketArgs. - * @throws IOException - */ - void setBucketProperty(OmBucketArgs args) throws IOException; - - /** - * Open the given key and return an open key session. - * - * @param args the args of the key. - * @return OpenKeySession instance that client uses to talk to container. - * @throws IOException - */ - OpenKeySession openKey(OmKeyArgs args) throws IOException; - - /** - * Commit a key. This will make the change from the client visible. The client - * is identified by the clientID. - * - * @param args the key to commit - * @param clientID the client identification - * @throws IOException - */ - void commitKey(OmKeyArgs args, long clientID) throws IOException; - - /** - * Allocate a new block, it is assumed that the client is having an open key - * session going on. This block will be appended to this open key session. - * - * @param args the key to append - * @param clientID the client identification - * @param excludeList List of datanodes/containers to exclude during block - * allocation - * @return an allocated block - * @throws IOException - */ - OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, - ExcludeList excludeList) throws IOException; - - - /** - * Look up for the container of an existing key. - * - * @param args the args of the key. - * @return OmKeyInfo instance that client uses to talk to container. - * @throws IOException - */ - OmKeyInfo lookupKey(OmKeyArgs args) throws IOException; - - /** - * Rename an existing key within a bucket. - * @param args the args of the key. - * @param toKeyName New name to be used for the Key - * @throws IOException - */ - void renameKey(OmKeyArgs args, String toKeyName) throws IOException; - - /** - * Deletes an existing key. - * - * @param args the args of the key. - * @throws IOException - */ - void deleteKey(OmKeyArgs args) throws IOException; - - /** - * Deletes an existing empty bucket from volume. - * @param volume - Name of the volume. - * @param bucket - Name of the bucket. - * @throws IOException - */ - void deleteBucket(String volume, String bucket) throws IOException; - - /** - * Returns a list of buckets represented by {@link OmBucketInfo} - * in the given volume. Argument volumeName is required, others - * are optional. - * - * @param volumeName - * the name of the volume. - * @param startBucketName - * the start bucket name, only the buckets whose name is - * after this value will be included in the result. - * @param bucketPrefix - * bucket name prefix, only the buckets whose name has - * this prefix will be included in the result. - * @param maxNumOfBuckets - * the maximum number of buckets to return. It ensures - * the size of the result will not exceed this limit. - * @return a list of buckets. - * @throws IOException - */ - List listBuckets(String volumeName, - String startBucketName, String bucketPrefix, int maxNumOfBuckets) - throws IOException; - - /** - * Returns a list of keys represented by {@link OmKeyInfo} - * in the given bucket. Argument volumeName, bucketName is required, - * others are optional. - * - * @param volumeName - * the name of the volume. - * @param bucketName - * the name of the bucket. - * @param startKeyName - * the start key name, only the keys whose name is - * after this value will be included in the result. - * @param keyPrefix - * key name prefix, only the keys whose name has - * this prefix will be included in the result. - * @param maxKeys - * the maximum number of keys to return. It ensures - * the size of the result will not exceed this limit. - * @return a list of keys. - * @throws IOException - */ - List listKeys(String volumeName, - String bucketName, String startKeyName, String keyPrefix, int maxKeys) - throws IOException; - - /** - * Returns list of Ozone services with its configuration details. - * - * @return list of Ozone services - * @throws IOException - */ - List getServiceList() throws IOException; - - ServiceInfoEx getServiceInfo() throws IOException; - - /* - * S3 Specific functionality that is supported by Ozone Manager. - */ - - /** - * Creates an S3 bucket inside Ozone manager and creates the mapping needed - * to access via both S3 and Ozone. - * @param userName - S3 user name. - * @param s3BucketName - S3 bucket Name. - * @throws IOException - On failure, throws an exception like Bucket exists. - */ - void createS3Bucket(String userName, String s3BucketName) throws IOException; - - /** - * Delets an S3 bucket inside Ozone manager and deletes the mapping. - * @param s3BucketName - S3 bucket Name. - * @throws IOException in case the bucket cannot be deleted. - */ - void deleteS3Bucket(String s3BucketName) throws IOException; - - /** - * Returns the Ozone Namespace for the S3Bucket. It will return the - * OzoneVolume/OzoneBucketName. - * @param s3BucketName - S3 Bucket Name. - * @return String - The Ozone canonical name for this s3 bucket. This - * string is useful for mounting an OzoneFS. - * @throws IOException - Error is throw if the s3bucket does not exist. - */ - String getOzoneBucketMapping(String s3BucketName) throws IOException; - - /** - * Returns a list of buckets represented by {@link OmBucketInfo} - * for the given user. Argument username is required, others - * are optional. - * - * @param userName - * user Name. - * @param startBucketName - * the start bucket name, only the buckets whose name is - * after this value will be included in the result. - * @param bucketPrefix - * bucket name prefix, only the buckets whose name has - * this prefix will be included in the result. - * @param maxNumOfBuckets - * the maximum number of buckets to return. It ensures - * the size of the result will not exceed this limit. - * @return a list of buckets. - * @throws IOException - */ - List listS3Buckets(String userName, String startBucketName, - String bucketPrefix, int maxNumOfBuckets) - throws IOException; - - /** - * Initiate multipart upload for the specified key. - * @param keyArgs - * @return MultipartInfo - * @throws IOException - */ - OmMultipartInfo initiateMultipartUpload(OmKeyArgs keyArgs) throws IOException; - - - /** - * Commit Multipart upload part file. - * @param omKeyArgs - * @param clientID - * @return OmMultipartCommitUploadPartInfo - * @throws IOException - */ - OmMultipartCommitUploadPartInfo commitMultipartUploadPart( - OmKeyArgs omKeyArgs, long clientID) throws IOException; - - /** - * Complete Multipart upload Request. - * @param omKeyArgs - * @param multipartUploadList - * @return OmMultipartUploadCompleteInfo - * @throws IOException - */ - OmMultipartUploadCompleteInfo completeMultipartUpload( - OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList) - throws IOException; - - /** - * Abort multipart upload. - * @param omKeyArgs - * @throws IOException - */ - void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException; - - /** - * Returns list of parts of a multipart upload key. - * @param volumeName - * @param bucketName - * @param keyName - * @param uploadID - * @param partNumberMarker - * @param maxParts - * @return OmMultipartUploadListParts - */ - OmMultipartUploadListParts listParts(String volumeName, String bucketName, - String keyName, String uploadID, int partNumberMarker, - int maxParts) throws IOException; - - /** - * List in-flight uploads. - */ - OmMultipartUploadList listMultipartUploads(String volumeName, - String bucketName, String prefix) throws IOException; - /** - * Gets s3Secret for given kerberos user. - * @param kerberosID - * @return S3SecretValue - * @throws IOException - */ - S3SecretValue getS3Secret(String kerberosID) throws IOException; - - /** - * Get the OM Client's Retry and Failover Proxy provider. - * @return OMFailoverProxyProvider - */ - OMFailoverProxyProvider getOMFailoverProxyProvider(); - - /** - * OzoneFS api to get file status for an entry. - * - * @param keyArgs Key args - * @throws OMException if file does not exist - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - OzoneFileStatus getFileStatus(OmKeyArgs keyArgs) throws IOException; - - /** - * Ozone FS api to create a directory. Parent directories if do not exist - * are created for the input directory. - * - * @param args Key args - * @throws OMException if any entry in the path exists as a file - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - void createDirectory(OmKeyArgs args) throws IOException; - - /** - * OzoneFS api to creates an output stream for a file. - * - * @param keyArgs Key args - * @param overWrite if true existing file at the location will be overwritten - * @param recursive if true file would be created even if parent directories - * do not exist - * @throws OMException if given key is a directory - * if file exists and isOverwrite flag is false - * if an ancestor exists as a file - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - OpenKeySession createFile(OmKeyArgs keyArgs, boolean overWrite, - boolean recursive) throws IOException; - - /** - * OzoneFS api to lookup for a file. - * - * @param keyArgs Key args - * @throws OMException if given key is not found or it is not a file - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - OmKeyInfo lookupFile(OmKeyArgs keyArgs) throws IOException; - - /** - * List the status for a file or a directory and its contents. - * - * @param keyArgs Key args - * @param recursive For a directory if true all the descendants of a - * particular directory are listed - * @param startKey Key from which listing needs to start. If startKey exists - * its status is included in the final list. - * @param numEntries Number of entries to list from the start key - * @return list of file status - */ - List listStatus(OmKeyArgs keyArgs, boolean recursive, - String startKey, long numEntries) throws IOException; - - /** - * Add acl for Ozone object. Return true if acl is added successfully else - * false. - * @param obj Ozone object for which acl should be added. - * @param acl ozone acl top be added. - * - * @throws IOException if there is error. - * */ - boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException; - - /** - * Remove acl for Ozone object. Return true if acl is removed successfully - * else false. - * @param obj Ozone object. - * @param acl Ozone acl to be removed. - * - * @throws IOException if there is error. - * */ - boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException; - - /** - * Acls to be set for given Ozone object. This operations reset ACL for - * given object to list of ACLs provided in argument. - * @param obj Ozone object. - * @param acls List of acls. - * - * @throws IOException if there is error. - * */ - boolean setAcl(OzoneObj obj, List acls) throws IOException; - - /** - * Returns list of ACLs for given Ozone object. - * @param obj Ozone object. - * - * @throws IOException if there is error. - * */ - List getAcl(OzoneObj obj) throws IOException; - - /** - * Get DB updates since a specific sequence number. - * @param dbUpdatesRequest request that encapsulates a sequence number. - * @return Wrapper containing the updates. - * @throws SequenceNumberNotFoundException if db is unable to read the data. - */ - DBUpdatesWrapper getDBUpdates( - OzoneManagerProtocolProtos.DBUpdatesRequest dbUpdatesRequest) - throws IOException; - -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerSecurityProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerSecurityProtocol.java deleted file mode 100644 index 3e90899bd93..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerSecurityProtocol.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.protocol; - -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.retry.Idempotent; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.security.KerberosInfo; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.ozone.om.exceptions.OMException; - -/** - * Security protocol for a secure OzoneManager. - */ -@KerberosInfo( - serverPrincipal = OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY) -public interface OzoneManagerSecurityProtocol { - - /** - * Get a valid Delegation Token. - * - * @param renewer the designated renewer for the token - * @return Token - * @throws OMException - */ - @Idempotent - Token getDelegationToken(Text renewer) - throws OMException; - - /** - * Renew an existing delegation token. - * - * @param token delegation token obtained earlier - * @return the new expiration time - * @throws OMException - */ - @Idempotent - long renewDelegationToken(Token token) - throws OMException; - - /** - * Cancel an existing delegation token. - * - * @param token delegation token - * @throws OMException - */ - @Idempotent - void cancelDelegationToken(Token token) - throws OMException; - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerServerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerServerProtocol.java deleted file mode 100644 index 6f58e2d7ebd..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerServerProtocol.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.protocol; - -/** - * This will be used in the OzoneManager Server, as few of the methods in - * OzoneManagerHAProtocol need not be exposed to Om clients. This interface - * extends both OzoneManagerHAProtocol and OzoneManagerProtocol. - */ -public interface OzoneManagerServerProtocol extends OzoneManagerProtocol, - OzoneManagerHAProtocol { -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java deleted file mode 100644 index 9c7f3888d31..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.protocol; \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java deleted file mode 100644 index c9dc8ecc035..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ /dev/null @@ -1,1569 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.protocolPB; - -import java.io.EOFException; -import java.io.IOException; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.io.retry.RetryProxy; -import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtocolTranslator; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.om.exceptions.NotLeaderException; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider; -import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; -import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListMultipartUploadsRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListMultipartUploadsResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupFileResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelDelegationTokenResponseProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CheckVolumeAccessRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetDelegationTokenResponseProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupKeyResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadAbortRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadListPartsRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadListPartsResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenewDelegationTokenResponseProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3BucketInfoRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3BucketInfoResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3CreateBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3DeleteBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3ListBucketsRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3ListBucketsResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo; -import org.apache.hadoop.ozone.protocolPB.OMPBHelper; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; -import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; -import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper; - -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import com.google.protobuf.ByteString; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.ACCESS_DENIED; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; - - - -/** - * The client side implementation of OzoneManagerProtocol. - */ - -@InterfaceAudience.Private -public final class OzoneManagerProtocolClientSideTranslatorPB - implements OzoneManagerProtocol, ProtocolTranslator { - - /** - * RpcController is not used and hence is set to null. - */ - private static final RpcController NULL_RPC_CONTROLLER = null; - - private final OMFailoverProxyProvider omFailoverProxyProvider; - private final OzoneManagerProtocolPB rpcProxy; - private final String clientID; - private static final Logger FAILOVER_PROXY_PROVIDER_LOG = - LoggerFactory.getLogger(OMFailoverProxyProvider.class); - - public OzoneManagerProtocolClientSideTranslatorPB( - OzoneManagerProtocolPB proxy, String clientId) { - this.rpcProxy = proxy; - this.clientID = clientId; - this.omFailoverProxyProvider = null; - } - - /** - * Constructor for OM Protocol Client. This creates a {@link RetryProxy} - * over {@link OMFailoverProxyProvider} proxy. OMFailoverProxyProvider has - * one {@link OzoneManagerProtocolPB} proxy pointing to each OM node in the - * cluster. - */ - public OzoneManagerProtocolClientSideTranslatorPB(OzoneConfiguration conf, - String clientId, String omServiceId, UserGroupInformation ugi) - throws IOException { - this.omFailoverProxyProvider = new OMFailoverProxyProvider(conf, ugi, - omServiceId); - - int maxRetries = conf.getInt( - OzoneConfigKeys.OZONE_CLIENT_RETRY_MAX_ATTEMPTS_KEY, - OzoneConfigKeys.OZONE_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT); - int maxFailovers = conf.getInt( - OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, - OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT); - int sleepBase = conf.getInt( - OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY, - OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT); - int sleepMax = conf.getInt( - OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY, - OzoneConfigKeys.OZONE_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT); - - this.rpcProxy = - createRetryProxy(omFailoverProxyProvider, maxRetries, maxFailovers, - sleepBase, sleepMax); - this.clientID = clientId; - } - - /** - * Creates a {@link RetryProxy} encapsulating the - * {@link OMFailoverProxyProvider}. The retry proxy fails over on network - * exception or if the current proxy is not the leader OM. - */ - private OzoneManagerProtocolPB createRetryProxy( - OMFailoverProxyProvider failoverProxyProvider, - int maxRetries, int maxFailovers, int delayMillis, int maxDelayBase) { - - RetryPolicy retryPolicyOnNetworkException = RetryPolicies - .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL, - maxFailovers, maxRetries, delayMillis, maxDelayBase); - - RetryPolicy retryPolicy = new RetryPolicy() { - @Override - public RetryAction shouldRetry(Exception exception, int retries, - int failovers, boolean isIdempotentOrAtMostOnce) - throws Exception { - - if (exception instanceof ServiceException) { - Throwable cause = exception.getCause(); - if (cause instanceof NotLeaderException) { - NotLeaderException notLeaderException = (NotLeaderException) cause; - omFailoverProxyProvider.performFailoverIfRequired( - notLeaderException.getSuggestedLeaderNodeId()); - return getRetryAction(RetryAction.RETRY, retries, failovers); - } else { - return getRetryAction(RetryAction.FAILOVER_AND_RETRY, retries, - failovers); - } - } else if (exception instanceof EOFException) { - return getRetryAction(RetryAction.FAILOVER_AND_RETRY, retries, - failovers); - } else { - return retryPolicyOnNetworkException.shouldRetry( - exception, retries, failovers, isIdempotentOrAtMostOnce); - } - } - - private RetryAction getRetryAction(RetryAction fallbackAction, - int retries, int failovers) { - if (retries < maxRetries && failovers < maxFailovers) { - return fallbackAction; - } else { - FAILOVER_PROXY_PROVIDER_LOG.error("Failed to connect to OM. " + - "Attempted {} retries and {} failovers", retries, failovers); - return RetryAction.FAIL; - } - } - }; - - OzoneManagerProtocolPB proxy = (OzoneManagerProtocolPB) RetryProxy.create( - OzoneManagerProtocolPB.class, failoverProxyProvider, retryPolicy); - return proxy; - } - - @VisibleForTesting - public OMFailoverProxyProvider getOMFailoverProxyProvider() { - return omFailoverProxyProvider; - } - - /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - *

- *

As noted in {@link AutoCloseable#close()}, cases where the - * close may fail require careful attention. It is strongly advised - * to relinquish the underlying resources and to internally - * mark the {@code Closeable} as closed, prior to throwing - * the {@code IOException}. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - - } - - /** - * Return the proxy object underlying this protocol translator. - * - * @return the proxy object underlying this protocol translator. - */ - @Override - public Object getUnderlyingProxyObject() { - return rpcProxy; - } - - /** - * Returns a OMRequest builder with specified type. - * @param cmdType type of the request - */ - private OMRequest.Builder createOMRequest(Type cmdType) { - - return OMRequest.newBuilder() - .setCmdType(cmdType) - .setClientId(clientID); - } - - /** - * Submits client request to OM server. - * @param omRequest client request - * @return response from OM - * @throws IOException thrown if any Protobuf service exception occurs - */ - private OMResponse submitRequest(OMRequest omRequest) - throws IOException { - try { - OMRequest payload = OMRequest.newBuilder(omRequest) - .setTraceID(TracingUtil.exportCurrentSpan()) - .build(); - - OMResponse omResponse = - rpcProxy.submitRequest(NULL_RPC_CONTROLLER, payload); - - if (omResponse.hasLeaderOMNodeId() && omFailoverProxyProvider != null) { - String leaderOmId = omResponse.getLeaderOMNodeId(); - - // Failover to the OM node returned by OMReponse leaderOMNodeId if - // current proxy is not pointing to that node. - omFailoverProxyProvider.performFailoverIfRequired(leaderOmId); - } - - return omResponse; - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - } - - /** - * Creates a volume. - * - * @param args - Arguments to create Volume. - * @throws IOException - */ - @Override - public void createVolume(OmVolumeArgs args) throws IOException { - CreateVolumeRequest.Builder req = - CreateVolumeRequest.newBuilder(); - VolumeInfo volumeInfo = args.getProtobuf(); - req.setVolumeInfo(volumeInfo); - - OMRequest omRequest = createOMRequest(Type.CreateVolume) - .setCreateVolumeRequest(req) - .build(); - - OMResponse omResponse = submitRequest(omRequest); - handleError(omResponse); - } - - /** - * Changes the owner of a volume. - * - * @param volume - Name of the volume. - * @param owner - Name of the owner. - * @throws IOException - */ - @Override - public void setOwner(String volume, String owner) throws IOException { - SetVolumePropertyRequest.Builder req = - SetVolumePropertyRequest.newBuilder(); - req.setVolumeName(volume).setOwnerName(owner); - - OMRequest omRequest = createOMRequest(Type.SetVolumeProperty) - .setSetVolumePropertyRequest(req) - .build(); - - OMResponse omResponse = submitRequest(omRequest); - handleError(omResponse); - } - - /** - * Changes the Quota on a volume. - * - * @param volume - Name of the volume. - * @param quota - Quota in bytes. - * @throws IOException - */ - @Override - public void setQuota(String volume, long quota) throws IOException { - SetVolumePropertyRequest.Builder req = - SetVolumePropertyRequest.newBuilder(); - req.setVolumeName(volume).setQuotaInBytes(quota); - - OMRequest omRequest = createOMRequest(Type.SetVolumeProperty) - .setSetVolumePropertyRequest(req) - .build(); - - OMResponse omResponse = submitRequest(omRequest); - handleError(omResponse); - } - - /** - * Checks if the specified user can access this volume. - * - * @param volume - volume - * @param userAcl - user acls which needs to be checked for access - * @return true if the user has required access for the volume, - * false otherwise - * @throws IOException - */ - @Override - public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) throws - IOException { - CheckVolumeAccessRequest.Builder req = - CheckVolumeAccessRequest.newBuilder(); - req.setVolumeName(volume).setUserAcl(userAcl); - - OMRequest omRequest = createOMRequest(Type.CheckVolumeAccess) - .setCheckVolumeAccessRequest(req) - .build(); - - OMResponse omResponse = submitRequest(omRequest); - - if (omResponse.getStatus() == ACCESS_DENIED) { - return false; - } else if (omResponse.getStatus() == OK) { - return true; - } else { - handleError(omResponse); - return false; - } - } - - /** - * Gets the volume information. - * - * @param volume - Volume name. - * @return OmVolumeArgs or exception is thrown. - * @throws IOException - */ - @Override - public OmVolumeArgs getVolumeInfo(String volume) throws IOException { - InfoVolumeRequest.Builder req = InfoVolumeRequest.newBuilder(); - req.setVolumeName(volume); - - OMRequest omRequest = createOMRequest(Type.InfoVolume) - .setInfoVolumeRequest(req) - .build(); - - InfoVolumeResponse resp = - handleError(submitRequest(omRequest)).getInfoVolumeResponse(); - - - return OmVolumeArgs.getFromProtobuf(resp.getVolumeInfo()); - } - - /** - * Deletes an existing empty volume. - * - * @param volume - Name of the volume. - * @throws IOException - */ - @Override - public void deleteVolume(String volume) throws IOException { - DeleteVolumeRequest.Builder req = DeleteVolumeRequest.newBuilder(); - req.setVolumeName(volume); - - OMRequest omRequest = createOMRequest(Type.DeleteVolume) - .setDeleteVolumeRequest(req) - .build(); - - handleError(submitRequest(omRequest)); - - } - - /** - * Lists volume owned by a specific user. - * - * @param userName - user name - * @param prefix - Filter prefix -- Return only entries that match this. - * @param prevKey - Previous key -- List starts from the next from the - * prevkey - * @param maxKeys - Max number of keys to return. - * @return List of Volumes. - * @throws IOException - */ - @Override - public List listVolumeByUser(String userName, String prefix, - String prevKey, int maxKeys) - throws IOException { - ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder(); - if (!Strings.isNullOrEmpty(prefix)) { - builder.setPrefix(prefix); - } - if (!Strings.isNullOrEmpty(prevKey)) { - builder.setPrevKey(prevKey); - } - builder.setMaxKeys(maxKeys); - builder.setUserName(userName); - builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_USER); - return listVolume(builder.build()); - } - - /** - * Lists volume all volumes in the cluster. - * - * @param prefix - Filter prefix -- Return only entries that match this. - * @param prevKey - Previous key -- List starts from the next from the - * prevkey - * @param maxKeys - Max number of keys to return. - * @return List of Volumes. - * @throws IOException - */ - @Override - public List listAllVolumes(String prefix, String prevKey, - int maxKeys) throws IOException { - ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder(); - if (!Strings.isNullOrEmpty(prefix)) { - builder.setPrefix(prefix); - } - if (!Strings.isNullOrEmpty(prevKey)) { - builder.setPrevKey(prevKey); - } - builder.setMaxKeys(maxKeys); - builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER); - return listVolume(builder.build()); - } - - private List listVolume(ListVolumeRequest request) - throws IOException { - - OMRequest omRequest = createOMRequest(Type.ListVolume) - .setListVolumeRequest(request) - .build(); - - ListVolumeResponse resp = - handleError(submitRequest(omRequest)).getListVolumeResponse(); - List list = new ArrayList<>(resp.getVolumeInfoList().size()); - for (VolumeInfo info : resp.getVolumeInfoList()) { - list.add(OmVolumeArgs.getFromProtobuf(info)); - } - return list; - } - - /** - * Creates a bucket. - * - * @param bucketInfo - BucketInfo to create bucket. - * @throws IOException - */ - @Override - public void createBucket(OmBucketInfo bucketInfo) throws IOException { - CreateBucketRequest.Builder req = - CreateBucketRequest.newBuilder(); - BucketInfo bucketInfoProtobuf = bucketInfo.getProtobuf(); - req.setBucketInfo(bucketInfoProtobuf); - - OMRequest omRequest = createOMRequest(Type.CreateBucket) - .setCreateBucketRequest(req) - .build(); - - handleError(submitRequest(omRequest)); - - } - - /** - * Gets the bucket information. - * - * @param volume - Volume name. - * @param bucket - Bucket name. - * @return OmBucketInfo or exception is thrown. - * @throws IOException - */ - @Override - public OmBucketInfo getBucketInfo(String volume, String bucket) - throws IOException { - InfoBucketRequest.Builder req = - InfoBucketRequest.newBuilder(); - req.setVolumeName(volume); - req.setBucketName(bucket); - - OMRequest omRequest = createOMRequest(Type.InfoBucket) - .setInfoBucketRequest(req) - .build(); - - InfoBucketResponse resp = - handleError(submitRequest(omRequest)).getInfoBucketResponse(); - - return OmBucketInfo.getFromProtobuf(resp.getBucketInfo()); - } - - /** - * Sets bucket property from args. - * @param args - BucketArgs. - * @throws IOException - */ - @Override - public void setBucketProperty(OmBucketArgs args) - throws IOException { - SetBucketPropertyRequest.Builder req = - SetBucketPropertyRequest.newBuilder(); - BucketArgs bucketArgs = args.getProtobuf(); - req.setBucketArgs(bucketArgs); - - OMRequest omRequest = createOMRequest(Type.SetBucketProperty) - .setSetBucketPropertyRequest(req) - .build(); - - handleError(submitRequest(omRequest)); - - } - - /** - * List buckets in a volume. - * - * @param volumeName - * @param startKey - * @param prefix - * @param count - * @return - * @throws IOException - */ - @Override - public List listBuckets(String volumeName, - String startKey, String prefix, int count) throws IOException { - List buckets = new ArrayList<>(); - ListBucketsRequest.Builder reqBuilder = ListBucketsRequest.newBuilder(); - reqBuilder.setVolumeName(volumeName); - reqBuilder.setCount(count); - if (startKey != null) { - reqBuilder.setStartKey(startKey); - } - if (prefix != null) { - reqBuilder.setPrefix(prefix); - } - ListBucketsRequest request = reqBuilder.build(); - - OMRequest omRequest = createOMRequest(Type.ListBuckets) - .setListBucketsRequest(request) - .build(); - - ListBucketsResponse resp = handleError(submitRequest(omRequest)) - .getListBucketsResponse(); - - buckets.addAll( - resp.getBucketInfoList().stream() - .map(OmBucketInfo::getFromProtobuf) - .collect(Collectors.toList())); - return buckets; - - } - - /** - * Create a new open session of the key, then use the returned meta info to - * talk to data node to actually write the key. - * @param args the args for the key to be allocated - * @return a handler to the key, returned client - * @throws IOException - */ - @Override - public OpenKeySession openKey(OmKeyArgs args) throws IOException { - CreateKeyRequest.Builder req = CreateKeyRequest.newBuilder(); - KeyArgs.Builder keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()); - - if(args.getAcls() != null) { - keyArgs.addAllAcls(args.getAcls().stream().distinct().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); - } - - if (args.getFactor() != null) { - keyArgs.setFactor(args.getFactor()); - } - - if (args.getType() != null) { - keyArgs.setType(args.getType()); - } - - if (args.getDataSize() > 0) { - keyArgs.setDataSize(args.getDataSize()); - } - - if (args.getMetadata() != null && args.getMetadata().size() > 0) { - keyArgs.addAllMetadata(KeyValueUtil.toProtobuf(args.getMetadata())); - } - req.setKeyArgs(keyArgs.build()); - - if (args.getMultipartUploadID() != null) { - keyArgs.setMultipartUploadID(args.getMultipartUploadID()); - } - - if (args.getMultipartUploadPartNumber() > 0) { - keyArgs.setMultipartNumber(args.getMultipartUploadPartNumber()); - } - - keyArgs.setIsMultipartKey(args.getIsMultipartKey()); - - - req.setKeyArgs(keyArgs.build()); - - OMRequest omRequest = createOMRequest(Type.CreateKey) - .setCreateKeyRequest(req) - .build(); - - CreateKeyResponse keyResponse = - handleError(submitRequest(omRequest)).getCreateKeyResponse(); - return new OpenKeySession(keyResponse.getID(), - OmKeyInfo.getFromProtobuf(keyResponse.getKeyInfo()), - keyResponse.getOpenVersion()); - } - - private OMResponse handleError(OMResponse resp) throws OMException { - if (resp.getStatus() != OK) { - throw new OMException(resp.getMessage(), - ResultCodes.values()[resp.getStatus().ordinal()]); - } - return resp; - } - - @Override - public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientId, - ExcludeList excludeList) throws IOException { - AllocateBlockRequest.Builder req = AllocateBlockRequest.newBuilder(); - KeyArgs.Builder keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setDataSize(args.getDataSize()); - - if (args.getFactor() != null) { - keyArgs.setFactor(args.getFactor()); - } - - if (args.getType() != null) { - keyArgs.setType(args.getType()); - } - - req.setKeyArgs(keyArgs); - req.setClientID(clientId); - req.setExcludeList(excludeList.getProtoBuf()); - - - OMRequest omRequest = createOMRequest(Type.AllocateBlock) - .setAllocateBlockRequest(req) - .build(); - - AllocateBlockResponse resp = handleError(submitRequest(omRequest)) - .getAllocateBlockResponse(); - return OmKeyLocationInfo.getFromProtobuf(resp.getKeyLocation()); - } - @Override - public void commitKey(OmKeyArgs args, long clientId) - throws IOException { - CommitKeyRequest.Builder req = CommitKeyRequest.newBuilder(); - List locationInfoList = args.getLocationInfoList(); - Preconditions.checkNotNull(locationInfoList); - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setDataSize(args.getDataSize()) - .addAllKeyLocations( - locationInfoList.stream().map(OmKeyLocationInfo::getProtobuf) - .collect(Collectors.toList())).build(); - req.setKeyArgs(keyArgs); - req.setClientID(clientId); - - OMRequest omRequest = createOMRequest(Type.CommitKey) - .setCommitKeyRequest(req) - .build(); - - handleError(submitRequest(omRequest)); - - - } - - - @Override - public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { - LookupKeyRequest.Builder req = LookupKeyRequest.newBuilder(); - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setDataSize(args.getDataSize()) - .setSortDatanodes(args.getSortDatanodes()) - .build(); - req.setKeyArgs(keyArgs); - - OMRequest omRequest = createOMRequest(Type.LookupKey) - .setLookupKeyRequest(req) - .build(); - - LookupKeyResponse resp = - handleError(submitRequest(omRequest)).getLookupKeyResponse(); - - return OmKeyInfo.getFromProtobuf(resp.getKeyInfo()); - } - - @Override - public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { - RenameKeyRequest.Builder req = RenameKeyRequest.newBuilder(); - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setDataSize(args.getDataSize()).build(); - req.setKeyArgs(keyArgs); - req.setToKeyName(toKeyName); - - OMRequest omRequest = createOMRequest(Type.RenameKey) - .setRenameKeyRequest(req) - .build(); - - handleError(submitRequest(omRequest)); - } - - /** - * Deletes an existing key. - * - * @param args the args of the key. - * @throws IOException - */ - @Override - public void deleteKey(OmKeyArgs args) throws IOException { - DeleteKeyRequest.Builder req = DeleteKeyRequest.newBuilder(); - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()).build(); - req.setKeyArgs(keyArgs); - - OMRequest omRequest = createOMRequest(Type.DeleteKey) - .setDeleteKeyRequest(req) - .build(); - - handleError(submitRequest(omRequest)); - - } - - /** - * Deletes an existing empty bucket from volume. - * @param volume - Name of the volume. - * @param bucket - Name of the bucket. - * @throws IOException - */ - public void deleteBucket(String volume, String bucket) throws IOException { - DeleteBucketRequest.Builder req = DeleteBucketRequest.newBuilder(); - req.setVolumeName(volume); - req.setBucketName(bucket); - - OMRequest omRequest = createOMRequest(Type.DeleteBucket) - .setDeleteBucketRequest(req) - .build(); - - handleError(submitRequest(omRequest)); - - } - - /** - * List keys in a bucket. - */ - @Override - public List listKeys(String volumeName, String bucketName, - String startKey, String prefix, int maxKeys) throws IOException { - List keys = new ArrayList<>(); - ListKeysRequest.Builder reqBuilder = ListKeysRequest.newBuilder(); - reqBuilder.setVolumeName(volumeName); - reqBuilder.setBucketName(bucketName); - reqBuilder.setCount(maxKeys); - - if (startKey != null) { - reqBuilder.setStartKey(startKey); - } - - if (prefix != null) { - reqBuilder.setPrefix(prefix); - } - - ListKeysRequest req = reqBuilder.build(); - - OMRequest omRequest = createOMRequest(Type.ListKeys) - .setListKeysRequest(req) - .build(); - - ListKeysResponse resp = - handleError(submitRequest(omRequest)).getListKeysResponse(); - keys.addAll( - resp.getKeyInfoList().stream() - .map(OmKeyInfo::getFromProtobuf) - .collect(Collectors.toList())); - return keys; - - } - - @Override - public void createS3Bucket(String userName, String s3BucketName) - throws IOException { - S3CreateBucketRequest req = S3CreateBucketRequest.newBuilder() - .setUserName(userName) - .setS3Bucketname(s3BucketName) - .build(); - - OMRequest omRequest = createOMRequest(Type.CreateS3Bucket) - .setCreateS3BucketRequest(req) - .build(); - - handleError(submitRequest(omRequest)); - - } - - @Override - public void deleteS3Bucket(String s3BucketName) throws IOException { - S3DeleteBucketRequest request = S3DeleteBucketRequest.newBuilder() - .setS3BucketName(s3BucketName) - .build(); - - OMRequest omRequest = createOMRequest(Type.DeleteS3Bucket) - .setDeleteS3BucketRequest(request) - .build(); - - handleError(submitRequest(omRequest)); - - } - - @Override - public String getOzoneBucketMapping(String s3BucketName) - throws IOException { - S3BucketInfoRequest request = S3BucketInfoRequest.newBuilder() - .setS3BucketName(s3BucketName) - .build(); - - OMRequest omRequest = createOMRequest(Type.InfoS3Bucket) - .setInfoS3BucketRequest(request) - .build(); - - S3BucketInfoResponse resp = handleError(submitRequest(omRequest)) - .getInfoS3BucketResponse(); - return resp.getOzoneMapping(); - } - - @Override - public List listS3Buckets(String userName, String startKey, - String prefix, int count) - throws IOException { - List buckets = new ArrayList<>(); - S3ListBucketsRequest.Builder reqBuilder = S3ListBucketsRequest.newBuilder(); - reqBuilder.setUserName(userName); - reqBuilder.setCount(count); - if (startKey != null) { - reqBuilder.setStartKey(startKey); - } - if (prefix != null) { - reqBuilder.setPrefix(prefix); - } - S3ListBucketsRequest request = reqBuilder.build(); - - OMRequest omRequest = createOMRequest(Type.ListS3Buckets) - .setListS3BucketsRequest(request) - .build(); - - S3ListBucketsResponse resp = handleError(submitRequest(omRequest)) - .getListS3BucketsResponse(); - - buckets.addAll( - resp.getBucketInfoList().stream() - .map(OmBucketInfo::getFromProtobuf) - .collect(Collectors.toList())); - return buckets; - - } - - @Override - public S3SecretValue getS3Secret(String kerberosID) throws IOException { - GetS3SecretRequest request = GetS3SecretRequest.newBuilder() - .setKerberosID(kerberosID) - .build(); - OMRequest omRequest = createOMRequest(Type.GetS3Secret) - .setGetS3SecretRequest(request) - .build(); - final GetS3SecretResponse resp = handleError(submitRequest(omRequest)) - .getGetS3SecretResponse(); - - return S3SecretValue.fromProtobuf(resp.getS3Secret()); - - } - - /** - * Return the proxy object underlying this protocol translator. - * - * @return the proxy object underlying this protocol translator. - */ - @Override - public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws - IOException { - - MultipartInfoInitiateRequest.Builder multipartInfoInitiateRequest = - MultipartInfoInitiateRequest.newBuilder(); - - KeyArgs.Builder keyArgs = KeyArgs.newBuilder() - .setVolumeName(omKeyArgs.getVolumeName()) - .setBucketName(omKeyArgs.getBucketName()) - .setKeyName(omKeyArgs.getKeyName()) - .setFactor(omKeyArgs.getFactor()) - .addAllAcls(omKeyArgs.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) - .setType(omKeyArgs.getType()); - multipartInfoInitiateRequest.setKeyArgs(keyArgs.build()); - - OMRequest omRequest = createOMRequest( - Type.InitiateMultiPartUpload) - .setInitiateMultiPartUploadRequest(multipartInfoInitiateRequest.build()) - .build(); - - MultipartInfoInitiateResponse resp = handleError(submitRequest(omRequest)) - .getInitiateMultiPartUploadResponse(); - - return new OmMultipartInfo(resp.getVolumeName(), resp.getBucketName(), resp - .getKeyName(), resp.getMultipartUploadID()); - } - - @Override - public OmMultipartCommitUploadPartInfo commitMultipartUploadPart( - OmKeyArgs omKeyArgs, long clientId) throws IOException { - - List locationInfoList = omKeyArgs.getLocationInfoList(); - Preconditions.checkNotNull(locationInfoList); - - - MultipartCommitUploadPartRequest.Builder multipartCommitUploadPartRequest - = MultipartCommitUploadPartRequest.newBuilder(); - - KeyArgs.Builder keyArgs = KeyArgs.newBuilder() - .setVolumeName(omKeyArgs.getVolumeName()) - .setBucketName(omKeyArgs.getBucketName()) - .setKeyName(omKeyArgs.getKeyName()) - .setMultipartUploadID(omKeyArgs.getMultipartUploadID()) - .setIsMultipartKey(omKeyArgs.getIsMultipartKey()) - .setMultipartNumber(omKeyArgs.getMultipartUploadPartNumber()) - .setDataSize(omKeyArgs.getDataSize()) - .addAllKeyLocations( - locationInfoList.stream().map(OmKeyLocationInfo::getProtobuf) - .collect(Collectors.toList())); - multipartCommitUploadPartRequest.setClientID(clientId); - multipartCommitUploadPartRequest.setKeyArgs(keyArgs.build()); - - OMRequest omRequest = createOMRequest( - Type.CommitMultiPartUpload) - .setCommitMultiPartUploadRequest(multipartCommitUploadPartRequest - .build()) - .build(); - - MultipartCommitUploadPartResponse response = - handleError(submitRequest(omRequest)) - .getCommitMultiPartUploadResponse(); - - OmMultipartCommitUploadPartInfo info = new - OmMultipartCommitUploadPartInfo(response.getPartName()); - return info; - } - - @Override - public OmMultipartUploadCompleteInfo completeMultipartUpload( - OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList) - throws IOException { - MultipartUploadCompleteRequest.Builder multipartUploadCompleteRequest = - MultipartUploadCompleteRequest.newBuilder(); - - KeyArgs.Builder keyArgs = KeyArgs.newBuilder() - .setVolumeName(omKeyArgs.getVolumeName()) - .setBucketName(omKeyArgs.getBucketName()) - .setKeyName(omKeyArgs.getKeyName()) - .addAllAcls(omKeyArgs.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) - .setMultipartUploadID(omKeyArgs.getMultipartUploadID()); - - multipartUploadCompleteRequest.setKeyArgs(keyArgs.build()); - multipartUploadCompleteRequest.addAllPartsList(multipartUploadList - .getPartsList()); - - OMRequest omRequest = createOMRequest( - Type.CompleteMultiPartUpload) - .setCompleteMultiPartUploadRequest( - multipartUploadCompleteRequest.build()).build(); - - MultipartUploadCompleteResponse response = - handleError(submitRequest(omRequest)) - .getCompleteMultiPartUploadResponse(); - - OmMultipartUploadCompleteInfo info = new - OmMultipartUploadCompleteInfo(response.getVolume(), response - .getBucket(), response.getKey(), response.getHash()); - return info; - } - - @Override - public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException { - KeyArgs.Builder keyArgs = KeyArgs.newBuilder() - .setVolumeName(omKeyArgs.getVolumeName()) - .setBucketName(omKeyArgs.getBucketName()) - .setKeyName(omKeyArgs.getKeyName()) - .setMultipartUploadID(omKeyArgs.getMultipartUploadID()); - - MultipartUploadAbortRequest.Builder multipartUploadAbortRequest = - MultipartUploadAbortRequest.newBuilder(); - multipartUploadAbortRequest.setKeyArgs(keyArgs); - - OMRequest omRequest = createOMRequest( - Type.AbortMultiPartUpload) - .setAbortMultiPartUploadRequest(multipartUploadAbortRequest.build()) - .build(); - - handleError(submitRequest(omRequest)); - - } - - @Override - public OmMultipartUploadListParts listParts(String volumeName, - String bucketName, String keyName, String uploadID, - int partNumberMarker, int maxParts) throws IOException { - MultipartUploadListPartsRequest.Builder multipartUploadListPartsRequest = - MultipartUploadListPartsRequest.newBuilder(); - multipartUploadListPartsRequest.setVolume(volumeName) - .setBucket(bucketName).setKey(keyName).setUploadID(uploadID) - .setPartNumbermarker(partNumberMarker).setMaxParts(maxParts); - - OMRequest omRequest = createOMRequest(Type.ListMultiPartUploadParts) - .setListMultipartUploadPartsRequest( - multipartUploadListPartsRequest.build()).build(); - - MultipartUploadListPartsResponse response = - handleError(submitRequest(omRequest)) - .getListMultipartUploadPartsResponse(); - - - OmMultipartUploadListParts omMultipartUploadListParts = - new OmMultipartUploadListParts(response.getType(), response.getFactor(), - response.getNextPartNumberMarker(), response.getIsTruncated()); - omMultipartUploadListParts.addProtoPartList(response.getPartsListList()); - - return omMultipartUploadListParts; - - } - - @Override - public OmMultipartUploadList listMultipartUploads(String volumeName, - String bucketName, - String prefix) throws IOException { - ListMultipartUploadsRequest request = ListMultipartUploadsRequest - .newBuilder() - .setVolume(volumeName) - .setBucket(bucketName) - .setPrefix(prefix == null ? "" : prefix) - .build(); - - OMRequest omRequest = createOMRequest(Type.ListMultipartUploads) - .setListMultipartUploadsRequest(request) - .build(); - - ListMultipartUploadsResponse listMultipartUploadsResponse = - handleError(submitRequest(omRequest)).getListMultipartUploadsResponse(); - - List uploadList = - listMultipartUploadsResponse.getUploadsListList() - .stream() - .map(proto -> new OmMultipartUpload( - proto.getVolumeName(), - proto.getBucketName(), - proto.getKeyName(), - proto.getUploadId(), - Instant.ofEpochMilli(proto.getCreationTime()), - proto.getType(), - proto.getFactor() - )) - .collect(Collectors.toList()); - - OmMultipartUploadList response = new OmMultipartUploadList(uploadList); - - return response; - } - - public List getServiceList() throws IOException { - ServiceListRequest req = ServiceListRequest.newBuilder().build(); - - OMRequest omRequest = createOMRequest(Type.ServiceList) - .setServiceListRequest(req) - .build(); - - final ServiceListResponse resp = handleError(submitRequest(omRequest)) - .getServiceListResponse(); - - return resp.getServiceInfoList().stream() - .map(ServiceInfo::getFromProtobuf) - .collect(Collectors.toList()); - - } - - @Override - public ServiceInfoEx getServiceInfo() throws IOException { - ServiceListRequest req = ServiceListRequest.newBuilder().build(); - - OMRequest omRequest = createOMRequest(Type.ServiceList) - .setServiceListRequest(req) - .build(); - - final ServiceListResponse resp = handleError(submitRequest(omRequest)) - .getServiceListResponse(); - - return new ServiceInfoEx( - resp.getServiceInfoList().stream() - .map(ServiceInfo::getFromProtobuf) - .collect(Collectors.toList()), - resp.getCaCertificate()); - } - - /** - * Get a valid Delegation Token. - * - * @param renewer the designated renewer for the token - * @return Token - * @throws OMException - */ - @Override - public Token getDelegationToken(Text renewer) - throws OMException { - GetDelegationTokenRequestProto req = GetDelegationTokenRequestProto - .newBuilder() - .setRenewer(renewer == null ? "" : renewer.toString()) - .build(); - - OMRequest omRequest = createOMRequest(Type.GetDelegationToken) - .setGetDelegationTokenRequest(req) - .build(); - - final GetDelegationTokenResponseProto resp; - try { - resp = - handleError(submitRequest(omRequest)).getGetDelegationTokenResponse(); - return resp.getResponse().hasToken() ? - OMPBHelper.convertToDelegationToken(resp.getResponse().getToken()) - : null; - } catch (IOException e) { - if(e instanceof OMException) { - throw (OMException)e; - } - throw new OMException("Get delegation token failed.", e, - TOKEN_ERROR_OTHER); - } - } - - /** - * Renew an existing delegation token. - * - * @param token delegation token obtained earlier - * @return the new expiration time - */ - @Override - public long renewDelegationToken(Token token) - throws OMException { - RenewDelegationTokenRequestProto req = - RenewDelegationTokenRequestProto.newBuilder(). - setToken(OMPBHelper.convertToTokenProto(token)). - build(); - - OMRequest omRequest = createOMRequest(Type.RenewDelegationToken) - .setRenewDelegationTokenRequest(req) - .build(); - - final RenewDelegationTokenResponseProto resp; - try { - resp = handleError(submitRequest(omRequest)) - .getRenewDelegationTokenResponse(); - return resp.getResponse().getNewExpiryTime(); - } catch (IOException e) { - if(e instanceof OMException) { - throw (OMException)e; - } - throw new OMException("Renew delegation token failed.", e, - TOKEN_ERROR_OTHER); - } - } - - /** - * Cancel an existing delegation token. - * - * @param token delegation token - */ - @Override - public void cancelDelegationToken(Token token) - throws OMException { - CancelDelegationTokenRequestProto req = CancelDelegationTokenRequestProto - .newBuilder() - .setToken(OMPBHelper.convertToTokenProto(token)) - .build(); - - OMRequest omRequest = createOMRequest(Type.CancelDelegationToken) - .setCancelDelegationTokenRequest(req) - .build(); - - final CancelDelegationTokenResponseProto resp; - try { - handleError(submitRequest(omRequest)); - } catch (IOException e) { - if(e instanceof OMException) { - throw (OMException)e; - } - throw new OMException("Cancel delegation token failed.", e, - TOKEN_ERROR_OTHER); - } - } - - /** - * Get File Status for an Ozone key. - * - * @param args - * @return OzoneFileStatus for the key. - * @throws IOException - */ - public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .build(); - GetFileStatusRequest req = - GetFileStatusRequest.newBuilder() - .setKeyArgs(keyArgs) - .build(); - - OMRequest omRequest = createOMRequest(Type.GetFileStatus) - .setGetFileStatusRequest(req) - .build(); - - final GetFileStatusResponse resp; - try { - resp = handleError(submitRequest(omRequest)).getGetFileStatusResponse(); - } catch (IOException e) { - throw e; - } - return OzoneFileStatus.getFromProtobuf(resp.getStatus()); - } - - @Override - public void createDirectory(OmKeyArgs args) throws IOException { - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .addAllAcls(args.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) - .build(); - CreateDirectoryRequest request = CreateDirectoryRequest.newBuilder() - .setKeyArgs(keyArgs) - .build(); - - OMRequest omRequest = createOMRequest(Type.CreateDirectory) - .setCreateDirectoryRequest(request) - .build(); - - handleError(submitRequest(omRequest)); - } - - @Override - public OmKeyInfo lookupFile(OmKeyArgs args) - throws IOException { - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setSortDatanodes(args.getSortDatanodes()) - .build(); - LookupFileRequest lookupFileRequest = LookupFileRequest.newBuilder() - .setKeyArgs(keyArgs) - .build(); - OMRequest omRequest = createOMRequest(Type.LookupFile) - .setLookupFileRequest(lookupFileRequest) - .build(); - LookupFileResponse resp = - handleError(submitRequest(omRequest)).getLookupFileResponse(); - return OmKeyInfo.getFromProtobuf(resp.getKeyInfo()); - } - - /** - * Add acl for Ozone object. Return true if acl is added successfully else - * false. - * - * @param obj Ozone object for which acl should be added. - * @param acl ozone acl top be added. - * @throws IOException if there is error. - */ - @Override - public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - AddAclRequest req = AddAclRequest.newBuilder() - .setObj(OzoneObj.toProtobuf(obj)) - .setAcl(OzoneAcl.toProtobuf(acl)) - .build(); - - OMRequest omRequest = createOMRequest(Type.AddAcl) - .setAddAclRequest(req) - .build(); - AddAclResponse addAclResponse = - handleError(submitRequest(omRequest)).getAddAclResponse(); - - return addAclResponse.getResponse(); - } - - /** - * Remove acl for Ozone object. Return true if acl is removed successfully - * else false. - * - * @param obj Ozone object. - * @param acl Ozone acl to be removed. - * @throws IOException if there is error. - */ - @Override - public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - RemoveAclRequest req = RemoveAclRequest.newBuilder() - .setObj(OzoneObj.toProtobuf(obj)) - .setAcl(OzoneAcl.toProtobuf(acl)) - .build(); - - OMRequest omRequest = createOMRequest(Type.RemoveAcl) - .setRemoveAclRequest(req) - .build(); - RemoveAclResponse response = - handleError(submitRequest(omRequest)).getRemoveAclResponse(); - - return response.getResponse(); - } - - /** - * Acls to be set for given Ozone object. This operations reset ACL for given - * object to list of ACLs provided in argument. - * - * @param obj Ozone object. - * @param acls List of acls. - * @throws IOException if there is error. - */ - @Override - public boolean setAcl(OzoneObj obj, List acls) throws IOException { - SetAclRequest.Builder builder = SetAclRequest.newBuilder() - .setObj(OzoneObj.toProtobuf(obj)); - - acls.forEach(a -> builder.addAcl(OzoneAcl.toProtobuf(a))); - - OMRequest omRequest = createOMRequest(Type.SetAcl) - .setSetAclRequest(builder.build()) - .build(); - SetAclResponse response = - handleError(submitRequest(omRequest)).getSetAclResponse(); - - return response.getResponse(); - } - - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @throws IOException if there is error. - */ - @Override - public List getAcl(OzoneObj obj) throws IOException { - GetAclRequest req = GetAclRequest.newBuilder() - .setObj(OzoneObj.toProtobuf(obj)) - .build(); - - OMRequest omRequest = createOMRequest(Type.GetAcl) - .setGetAclRequest(req) - .build(); - GetAclResponse response = - handleError(submitRequest(omRequest)).getGetAclResponse(); - List acls = new ArrayList<>(); - response.getAclsList().stream().forEach(a -> - acls.add(OzoneAcl.fromProtobuf(a))); - return acls; - } - - @Override - public DBUpdatesWrapper getDBUpdates(DBUpdatesRequest dbUpdatesRequest) - throws IOException { - OMRequest omRequest = createOMRequest(Type.DBUpdates) - .setDbUpdatesRequest(dbUpdatesRequest) - .build(); - - DBUpdatesResponse dbUpdatesResponse = - handleError(submitRequest(omRequest)).getDbUpdatesResponse(); - - DBUpdatesWrapper dbUpdatesWrapper = new DBUpdatesWrapper(); - for (ByteString byteString : dbUpdatesResponse.getDataList()) { - dbUpdatesWrapper.addWriteBatch(byteString.toByteArray(), 0L); - } - dbUpdatesWrapper.setCurrentSequenceNumber( - dbUpdatesResponse.getSequenceNumber()); - return dbUpdatesWrapper; - } - - @Override - public OpenKeySession createFile(OmKeyArgs args, - boolean overWrite, boolean recursive) throws IOException { - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setDataSize(args.getDataSize()) - .setType(args.getType()) - .setFactor(args.getFactor()) - .addAllAcls(args.getAcls().stream().map(a -> - OzoneAcl.toProtobuf(a)).collect(Collectors.toList())) - .build(); - CreateFileRequest createFileRequest = CreateFileRequest.newBuilder() - .setKeyArgs(keyArgs) - .setIsOverwrite(overWrite) - .setIsRecursive(recursive) - .build(); - OMRequest omRequest = createOMRequest(Type.CreateFile) - .setCreateFileRequest(createFileRequest) - .build(); - CreateFileResponse resp = - handleError(submitRequest(omRequest)).getCreateFileResponse(); - return new OpenKeySession(resp.getID(), - OmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion()); - } - - @Override - public List listStatus(OmKeyArgs args, boolean recursive, - String startKey, long numEntries) throws IOException { - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .build(); - ListStatusRequest listStatusRequest = - ListStatusRequest.newBuilder() - .setKeyArgs(keyArgs) - .setRecursive(recursive) - .setStartKey(startKey) - .setNumEntries(numEntries) - .build(); - OMRequest omRequest = createOMRequest(Type.ListStatus) - .setListStatusRequest(listStatusRequest) - .build(); - ListStatusResponse listStatusResponse = - handleError(submitRequest(omRequest)).getListStatusResponse(); - List statusList = - new ArrayList<>(listStatusResponse.getStatusesCount()); - for (OzoneFileStatusProto fileStatus : listStatusResponse - .getStatusesList()) { - statusList.add(OzoneFileStatus.getFromProtobuf(fileStatus)); - } - return statusList; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java deleted file mode 100644 index 69083dc885e..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.protocolPB; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ipc.ProtocolInfo; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneManagerService; -import org.apache.hadoop.security.KerberosInfo; -import org.apache.hadoop.security.token.TokenInfo; -import org.apache.hadoop.ozone.security.OzoneDelegationTokenSelector; - -/** - * Protocol used to communicate with OM. - */ -@ProtocolInfo(protocolName = - "org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol", - protocolVersion = 1) -@KerberosInfo( - serverPrincipal = OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY) -@TokenInfo(OzoneDelegationTokenSelector.class) -@InterfaceAudience.Private -public interface OzoneManagerProtocolPB - extends OzoneManagerService.BlockingInterface { -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java deleted file mode 100644 index d595edf291a..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.protocolPB; \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/package-info.java deleted file mode 100644 index 69d94b60ac4..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -/** - * Classes related to ozone REST interface. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java deleted file mode 100644 index 4ff5f6a335e..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java +++ /dev/null @@ -1,195 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -import com.google.protobuf.ByteString; -import org.apache.hadoop.crypto.CipherSuite; -import org.apache.hadoop.crypto.CryptoProtocolVersion; -import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .BucketEncryptionInfoProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CipherSuiteProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CryptoProtocolVersionProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .FileEncryptionInfoProto; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; -import org.apache.hadoop.security.token.Token; - -/** - * Utilities for converting protobuf classes. - */ -public final class OMPBHelper { - - private OMPBHelper() { - /** Hidden constructor */ - } - - /** - * Converts Ozone delegation token to @{@link TokenProto}. - * @return tokenProto - */ - public static TokenProto convertToTokenProto(Token tok) { - if(tok == null){ - throw new IllegalArgumentException("Invalid argument: token is null"); - } - - return TokenProto.newBuilder(). - setIdentifier(getByteString(tok.getIdentifier())). - setPassword(getByteString(tok.getPassword())). - setKind(tok.getKind().toString()). - setService(tok.getService().toString()).build(); - } - - public static ByteString getByteString(byte[] bytes) { - // return singleton to reduce object allocation - return (bytes.length == 0) ? ByteString.EMPTY : ByteString.copyFrom(bytes); - } - - /** - * Converts @{@link TokenProto} to Ozone delegation token. - * - * @return Ozone - */ - public static Token convertToDelegationToken( - TokenProto tokenProto) { - return new Token<>(tokenProto.getIdentifier() - .toByteArray(), tokenProto.getPassword().toByteArray(), new Text( - tokenProto.getKind()), new Text(tokenProto.getService())); - } - - public static BucketEncryptionKeyInfo convert( - BucketEncryptionInfoProto beInfo) { - if (beInfo == null) { - throw new IllegalArgumentException("Invalid argument: bucket encryption" + - " info is null"); - } - - return new BucketEncryptionKeyInfo( - beInfo.hasCryptoProtocolVersion()? - convert(beInfo.getCryptoProtocolVersion()) : null, - beInfo.hasSuite()? convert(beInfo.getSuite()) : null, - beInfo.getKeyName()); - } - - - public static BucketEncryptionInfoProto convert( - BucketEncryptionKeyInfo beInfo) { - if (beInfo == null || beInfo.getKeyName() == null) { - throw new IllegalArgumentException("Invalid argument: bucket encryption" + - " info is null"); - } - - BucketEncryptionInfoProto.Builder bb = BucketEncryptionInfoProto - .newBuilder().setKeyName(beInfo.getKeyName()); - - if (beInfo.getSuite() != null) { - bb.setSuite(convert(beInfo.getSuite())); - } - if (beInfo.getVersion()!= null) { - bb.setCryptoProtocolVersion(convert(beInfo.getVersion())); - } - return bb.build(); - } - - public static FileEncryptionInfoProto convert( - FileEncryptionInfo info) { - if (info == null) { - return null; - } - return OzoneManagerProtocolProtos.FileEncryptionInfoProto.newBuilder() - .setSuite(convert(info.getCipherSuite())) - .setCryptoProtocolVersion(convert(info.getCryptoProtocolVersion())) - .setKey(getByteString(info.getEncryptedDataEncryptionKey())) - .setIv(getByteString(info.getIV())) - .setEzKeyVersionName(info.getEzKeyVersionName()) - .setKeyName(info.getKeyName()) - .build(); - } - - public static FileEncryptionInfo convert(FileEncryptionInfoProto proto) { - if (proto == null) { - return null; - } - CipherSuite suite = convert(proto.getSuite()); - CryptoProtocolVersion version = convert(proto.getCryptoProtocolVersion()); - byte[] key = proto.getKey().toByteArray(); - byte[] iv = proto.getIv().toByteArray(); - String ezKeyVersionName = proto.getEzKeyVersionName(); - String keyName = proto.getKeyName(); - return new FileEncryptionInfo(suite, version, key, iv, keyName, - ezKeyVersionName); - } - - public static CipherSuite convert(CipherSuiteProto proto) { - switch(proto) { - case AES_CTR_NOPADDING: - return CipherSuite.AES_CTR_NOPADDING; - default: - // Set to UNKNOWN and stash the unknown enum value - CipherSuite suite = CipherSuite.UNKNOWN; - suite.setUnknownValue(proto.getNumber()); - return suite; - } - } - - public static CipherSuiteProto convert(CipherSuite suite) { - switch (suite) { - case UNKNOWN: - return CipherSuiteProto.UNKNOWN; - case AES_CTR_NOPADDING: - return CipherSuiteProto.AES_CTR_NOPADDING; - default: - return null; - } - } - - public static CryptoProtocolVersionProto convert( - CryptoProtocolVersion version) { - switch(version) { - case UNKNOWN: - return OzoneManagerProtocolProtos.CryptoProtocolVersionProto - .UNKNOWN_PROTOCOL_VERSION; - case ENCRYPTION_ZONES: - return OzoneManagerProtocolProtos.CryptoProtocolVersionProto - .ENCRYPTION_ZONES; - default: - return null; - } - } - - public static CryptoProtocolVersion convert( - CryptoProtocolVersionProto proto) { - switch(proto) { - case ENCRYPTION_ZONES: - return CryptoProtocolVersion.ENCRYPTION_ZONES; - default: - // Set to UNKNOWN and stash the unknown enum value - CryptoProtocolVersion version = CryptoProtocolVersion.UNKNOWN; - version.setUnknownValue(proto.getNumber()); - return version; - } - } - - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java deleted file mode 100644 index 8361bac0d06..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -/** - * Helper class for converting protobuf objects. - */ -public final class OzonePBHelper { - - private OzonePBHelper() { - /** Hidden constructor */ - } - - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java deleted file mode 100644 index 860386d9fdc..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.protocolPB; - -/** - * This package contains classes for the Protocol Buffers binding of Ozone - * protocols. - */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/AWSV4AuthValidator.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/AWSV4AuthValidator.java deleted file mode 100644 index 575c9eaae8a..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/AWSV4AuthValidator.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.security; - -import org.apache.hadoop.util.StringUtils; -import org.apache.kerby.util.Hex; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.crypto.Mac; -import javax.crypto.spec.SecretKeySpec; -import java.io.UnsupportedEncodingException; -import java.net.URLDecoder; -import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; -import java.security.GeneralSecurityException; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; - -/** - * AWS v4 authentication payload validator. For more details refer to AWS - * documentation https://docs.aws.amazon.com/general/latest/gr/ - * sigv4-create-canonical-request.html. - **/ -final class AWSV4AuthValidator { - - private final static Logger LOG = - LoggerFactory.getLogger(AWSV4AuthValidator.class); - private static final String HMAC_SHA256_ALGORITHM = "HmacSHA256"; - private static final Charset UTF_8 = Charset.forName("utf-8"); - - private AWSV4AuthValidator() { - } - - private static String urlDecode(String str) { - try { - return URLDecoder.decode(str, UTF_8.name()); - } catch (UnsupportedEncodingException e) { - throw new RuntimeException(e); - } - } - - public static String hash(String payload) throws NoSuchAlgorithmException { - MessageDigest md = MessageDigest.getInstance("SHA-256"); - md.update(payload.getBytes(UTF_8)); - return String.format("%064x", new java.math.BigInteger(1, md.digest())); - } - - private static byte[] sign(byte[] key, String msg) { - try { - SecretKeySpec signingKey = new SecretKeySpec(key, HMAC_SHA256_ALGORITHM); - Mac mac = Mac.getInstance(HMAC_SHA256_ALGORITHM); - mac.init(signingKey); - return mac.doFinal(msg.getBytes(StandardCharsets.UTF_8)); - } catch (GeneralSecurityException gse) { - throw new RuntimeException(gse); - } - } - - /** - * Returns signing key. - * - * @param key - * @param strToSign - * - * SignatureKey = HMAC-SHA256(HMAC-SHA256(HMAC-SHA256(HMAC-SHA256("AWS4" + - * "","20130524"),"us-east-1"),"s3"),"aws4_request") - * - * For more details refer to AWS documentation: https://docs.aws.amazon - * .com/AmazonS3/latest/API/sig-v4-header-based-auth.html - * - * */ - private static byte[] getSigningKey(String key, String strToSign) { - String[] signData = StringUtils.split(StringUtils.split(strToSign, - '\n')[2], '/'); - String dateStamp = signData[0]; - String regionName = signData[1]; - String serviceName = signData[2]; - byte[] kDate = sign(("AWS4" + key).getBytes(UTF_8), dateStamp); - byte[] kRegion = sign(kDate, regionName); - byte[] kService = sign(kRegion, serviceName); - byte[] kSigning = sign(kService, "aws4_request"); - LOG.info(Hex.encode(kSigning)); - return kSigning; - } - - /** - * Validate request by comparing Signature from request. Returns true if - * aws request is legit else returns false. - * Signature = HEX(HMAC_SHA256(key, String to Sign)) - * - * For more details refer to AWS documentation: https://docs.aws.amazon.com - * /AmazonS3/latest/API/sigv4-streaming.html - */ - public static boolean validateRequest(String strToSign, String signature, - String userKey) { - String expectedSignature = Hex.encode(sign(getSigningKey(userKey, - strToSign), strToSign)); - return expectedSignature.equals(signature); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java deleted file mode 100644 index 0fd6b08bccd..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/GDPRSymmetricKey.java +++ /dev/null @@ -1,87 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.security; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.ozone.OzoneConsts; - -import java.security.SecureRandom; -import java.util.HashMap; -import java.util.Map; - -import javax.crypto.Cipher; -import javax.crypto.spec.SecretKeySpec; - -/** - * Symmetric Key structure for GDPR. - */ -public class GDPRSymmetricKey { - - private SecretKeySpec secretKey; - private Cipher cipher; - private String algorithm; - private String secret; - - public SecretKeySpec getSecretKey() { - return secretKey; - } - - public Cipher getCipher() { - return cipher; - } - - /** - * Default constructor creates key with default values. - * @throws Exception - */ - public GDPRSymmetricKey(SecureRandom secureRandom) throws Exception { - algorithm = OzoneConsts.GDPR_ALGORITHM_NAME; - secret = RandomStringUtils.random( - OzoneConsts.GDPR_DEFAULT_RANDOM_SECRET_LENGTH, - 0, 0, true, true, null, secureRandom); - this.secretKey = new SecretKeySpec( - secret.getBytes(OzoneConsts.GDPR_CHARSET), algorithm); - this.cipher = Cipher.getInstance(algorithm); - } - - /** - * Overloaded constructor creates key with specified values. - * @throws Exception - */ - public GDPRSymmetricKey(String secret, String algorithm) throws Exception { - Preconditions.checkNotNull(secret, "Secret cannot be null"); - //TODO: When we add feature to allow users to customize the secret length, - // we need to update this length check Precondition - Preconditions.checkArgument(secret.length() == 16, - "Secret must be exactly 16 characters"); - Preconditions.checkNotNull(algorithm, "Algorithm cannot be null"); - this.secret = secret; - this.algorithm = algorithm; - this.secretKey = new SecretKeySpec( - secret.getBytes(OzoneConsts.GDPR_CHARSET), algorithm); - this.cipher = Cipher.getInstance(algorithm); - } - - public Map getKeyDetails() { - Map keyDetail = new HashMap<>(); - keyDetail.put(OzoneConsts.GDPR_SECRET, this.secret); - keyDetail.put(OzoneConsts.GDPR_ALGORITHM, this.algorithm); - return keyDetail; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java deleted file mode 100644 index 5cc782336a8..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneBlockTokenSecretManager.java +++ /dev/null @@ -1,192 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.security; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.EnumSet; - -/** - * SecretManager for Ozone Master block tokens. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public class OzoneBlockTokenSecretManager extends - OzoneSecretManager { - - private static final Logger LOG = LoggerFactory - .getLogger(OzoneBlockTokenSecretManager.class);; - // Will be set by grpc clients for individual datanodes. - static final Text SERVICE = new Text("HDDS_SERVICE"); - private final String omCertSerialId; - - /** - * Create a secret manager. - * - * @param conf - * @param blockTokenExpirytime token expiry time for expired tokens in - * milliseconds - */ - public OzoneBlockTokenSecretManager(SecurityConfig conf, - long blockTokenExpirytime, String omCertSerialId) { - super(conf, blockTokenExpirytime, blockTokenExpirytime, SERVICE, LOG); - this.omCertSerialId = omCertSerialId; - } - - @Override - public OzoneBlockTokenIdentifier createIdentifier() { - throw new SecurityException("Ozone block token can't be created " - + "without owner and access mode information."); - } - - public OzoneBlockTokenIdentifier createIdentifier(String owner, - String blockId, EnumSet modes, long maxLength) { - return new OzoneBlockTokenIdentifier(owner, blockId, modes, - getTokenExpiryTime(), omCertSerialId, maxLength); - } - - /** - * Generate an block token for specified user, blockId. Service field for - * token is set to blockId. - * - * @param user - * @param blockId - * @param modes - * @param maxLength - * @return token - */ - public Token generateToken(String user, - String blockId, EnumSet modes, long maxLength) { - OzoneBlockTokenIdentifier tokenIdentifier = createIdentifier(user, - blockId, modes, maxLength); - if (LOG.isTraceEnabled()) { - long expiryTime = tokenIdentifier.getExpiryDate(); - String tokenId = tokenIdentifier.toString(); - LOG.trace("Issued delegation token -> expiryTime:{}, tokenId:{}", - expiryTime, tokenId); - } - // Pass blockId as service. - return new Token<>(tokenIdentifier.getBytes(), - createPassword(tokenIdentifier), tokenIdentifier.getKind(), - new Text(blockId)); - } - - /** - * Generate an block token for current user. - * - * @param blockId - * @param modes - * @return token - */ - public Token generateToken(String blockId, - EnumSet modes, long maxLength) throws IOException { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - String userID = (ugi == null ? null : ugi.getShortUserName()); - return generateToken(userID, blockId, modes, maxLength); - } - - @Override - public byte[] retrievePassword(OzoneBlockTokenIdentifier identifier) - throws InvalidToken { - validateToken(identifier); - return createPassword(identifier); - } - - @Override - public long renewToken(Token token, - String renewer) throws IOException { - throw new UnsupportedOperationException("Renew token operation is not " + - "supported for ozone block tokens."); - } - - @Override - public OzoneBlockTokenIdentifier cancelToken(Token - token, String canceller) throws IOException { - throw new UnsupportedOperationException("Cancel token operation is not " + - "supported for ozone block tokens."); - } - - /** - * Find the OzoneBlockTokenInfo for the given token id, and verify that if the - * token is not expired. - */ - public boolean validateToken(OzoneBlockTokenIdentifier identifier) - throws InvalidToken { - long now = Time.now(); - if (identifier.getExpiryDate() < now) { - throw new InvalidToken("token " + formatTokenId(identifier) + " is " + - "expired, current time: " + Time.formatTime(now) + - " expiry time: " + identifier.getExpiryDate()); - } - - if (!verifySignature(identifier, createPassword(identifier))) { - throw new InvalidToken("Tampered/Invalid token."); - } - return true; - } - - /** - * Validates if given hash is valid. - * - * @param identifier - * @param password - */ - public boolean verifySignature(OzoneBlockTokenIdentifier identifier, - byte[] password) { - throw new UnsupportedOperationException("This operation is not " + - "supported for block tokens."); - } - - /** - * Should be called before this object is used. - * @param client - */ - @Override - public synchronized void start(CertificateClient client) throws IOException { - super.start(client); - } - - /** - * Returns expiry time by adding configured expiry time with current time. - * - * @return Expiry time. - */ - private long getTokenExpiryTime() { - return Time.now() + getTokenRenewInterval(); - } - - /** - * Should be called before this object is used. - */ - @Override - public synchronized void stop() throws IOException { - super.stop(); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java deleted file mode 100644 index 0de8ac63c3f..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java +++ /dev/null @@ -1,560 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.security; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.S3SecretManager; -import org.apache.hadoop.ozone.om.S3SecretManagerImpl; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.security.OzoneSecretStore.OzoneManagerSecretState; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier.TokenInfo; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.HadoopKerberosName; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.Daemon; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.io.IOException; -import java.util.Iterator; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_EXPIRED; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type.S3TOKEN; - -/** - * SecretManager for Ozone Master. Responsible for signing identifiers with - * private key, - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public class OzoneDelegationTokenSecretManager - extends OzoneSecretManager { - - private static final Logger LOG = LoggerFactory - .getLogger(OzoneDelegationTokenSecretManager.class); - private final Map currentTokens; - private final OzoneSecretStore store; - private final S3SecretManagerImpl s3SecretManager; - private Thread tokenRemoverThread; - private final long tokenRemoverScanInterval; - private String omCertificateSerialId; - /** - * If the delegation token update thread holds this lock, it will not get - * interrupted. - */ - private Object noInterruptsLock = new Object(); - - private boolean isRatisEnabled; - - /** - * Create a secret manager. - * - * @param conf configuration. - * @param tokenMaxLifetime the maximum lifetime of the delegation tokens in - * milliseconds - * @param tokenRenewInterval how often the tokens must be renewed in - * milliseconds - * @param dtRemoverScanInterval how often the tokens are scanned for expired - * tokens in milliseconds - * @param certClient certificate client to SCM CA - */ - public OzoneDelegationTokenSecretManager(OzoneConfiguration conf, - long tokenMaxLifetime, long tokenRenewInterval, - long dtRemoverScanInterval, Text service, - S3SecretManager s3SecretManager, CertificateClient certClient) - throws IOException { - super(new SecurityConfig(conf), tokenMaxLifetime, tokenRenewInterval, - service, LOG); - setCertClient(certClient); - currentTokens = new ConcurrentHashMap(); - this.tokenRemoverScanInterval = dtRemoverScanInterval; - this.s3SecretManager = (S3SecretManagerImpl) s3SecretManager; - this.store = new OzoneSecretStore(conf, - this.s3SecretManager.getOmMetadataManager()); - isRatisEnabled = conf.getBoolean( - OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, - OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); - loadTokenSecretState(store.loadState()); - } - - @Override - public OzoneTokenIdentifier createIdentifier() { - return OzoneTokenIdentifier.newInstance(); - } - - /** - * Create new Identifier with given,owner,renwer and realUser. - * - * @return T - */ - public OzoneTokenIdentifier createIdentifier(Text owner, Text renewer, - Text realUser) { - return OzoneTokenIdentifier.newInstance(owner, renewer, realUser); - } - - /** - * Returns {@link Token} for given identifier. - * - * @param owner - * @param renewer - * @param realUser - * @return Token - * @throws IOException to allow future exceptions to be added without breaking - * compatibility - */ - public Token createToken(Text owner, Text renewer, - Text realUser) - throws IOException { - OzoneTokenIdentifier identifier = createIdentifier(owner, renewer, - realUser); - updateIdentifierDetails(identifier); - - byte[] password = createPassword(identifier.getBytes(), - getCurrentKey().getPrivateKey()); - long expiryTime = identifier.getIssueDate() + getTokenRenewInterval(); - - // For HA ratis will take care of updating. - // This will be removed, when HA/Non-HA code is merged. - if (!isRatisEnabled) { - addToTokenStore(identifier, password, expiryTime); - } - - Token token = new Token<>(identifier.getBytes(), - password, identifier.getKind(), getService()); - if (LOG.isDebugEnabled()) { - LOG.debug("Created delegation token: {}", token); - } - return token; - } - - /** - * Add delegation token in to in-memory map of tokens. - * @param token - * @param ozoneTokenIdentifier - * @return renewTime - If updated successfully, return renewTime. - */ - public long updateToken(Token token, - OzoneTokenIdentifier ozoneTokenIdentifier) { - long renewTime = - ozoneTokenIdentifier.getIssueDate() + getTokenRenewInterval(); - TokenInfo tokenInfo = new TokenInfo(renewTime, token.getPassword(), - ozoneTokenIdentifier.getTrackingId()); - currentTokens.put(ozoneTokenIdentifier, tokenInfo); - return renewTime; - } - - /** - * Stores given identifier in token store. - * - * @param identifier - * @param password - * @throws IOException - */ - private void addToTokenStore(OzoneTokenIdentifier identifier, - byte[] password, long renewTime) - throws IOException { - TokenInfo tokenInfo = new TokenInfo(renewTime, password, - identifier.getTrackingId()); - currentTokens.put(identifier, tokenInfo); - store.storeToken(identifier, tokenInfo.getRenewDate()); - } - - /** - * Updates issue date, master key id and sequence number for identifier. - * - * @param identifier the identifier to validate - */ - private void updateIdentifierDetails(OzoneTokenIdentifier identifier) { - int sequenceNum; - long now = Time.now(); - sequenceNum = incrementDelegationTokenSeqNum(); - identifier.setIssueDate(now); - identifier.setMasterKeyId(getCurrentKey().getKeyId()); - identifier.setSequenceNumber(sequenceNum); - identifier.setMaxDate(now + getTokenMaxLifetime()); - identifier.setOmCertSerialId(getOmCertificateSerialId()); - } - - /** - * Get OM certificate serial id. - * */ - private String getOmCertificateSerialId() { - if (omCertificateSerialId == null) { - omCertificateSerialId = - getCertClient().getCertificate().getSerialNumber().toString(); - } - return omCertificateSerialId; - } - - /** - * Renew a delegation token. - * - * @param token the token to renew - * @param renewer the full principal name of the user doing the renewal - * @return the new expiration time - * @throws InvalidToken if the token is invalid - * @throws AccessControlException if the user can't renew token - */ - @Override - public synchronized long renewToken(Token token, - String renewer) throws IOException { - ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); - DataInputStream in = new DataInputStream(buf); - OzoneTokenIdentifier id = OzoneTokenIdentifier.readProtoBuf(in); - if (LOG.isDebugEnabled()) { - LOG.debug("Token renewal for identifier: {}, total currentTokens: {}", - formatTokenId(id), currentTokens.size()); - } - - long now = Time.now(); - if (id.getMaxDate() < now) { - throw new OMException(renewer + " tried to renew an expired token " - + formatTokenId(id) + " max expiration date: " - + Time.formatTime(id.getMaxDate()) - + " currentTime: " + Time.formatTime(now), TOKEN_EXPIRED); - } - validateToken(id); - if ((id.getRenewer() == null) || (id.getRenewer().toString().isEmpty())) { - throw new AccessControlException(renewer + - " tried to renew a token " + formatTokenId(id) - + " without a renewer"); - } - if (!id.getRenewer().toString().equals(renewer)) { - throw new AccessControlException(renewer - + " tries to renew a token " + formatTokenId(id) - + " with non-matching renewer " + id.getRenewer()); - } - - long renewTime = Math.min(id.getMaxDate(), now + getTokenRenewInterval()); - - // For HA ratis will take care of updating. - // This will be removed, when HA/Non-HA code is merged. - if (!isRatisEnabled) { - try { - addToTokenStore(id, token.getPassword(), renewTime); - } catch (IOException e) { - LOG.error("Unable to update token " + id.getSequenceNumber(), e); - } - } - return renewTime; - } - - public void updateRenewToken(Token token, - OzoneTokenIdentifier ozoneTokenIdentifier, long expiryTime) { - //TODO: Instead of having in-memory map inside this class, we can use - // cache from table and make this table cache clean up policy NEVER. In - // this way, we don't need to maintain seperate in-memory map. To do this - // work we need to merge HA/Non-HA code. - TokenInfo tokenInfo = new TokenInfo(expiryTime, token.getPassword(), - ozoneTokenIdentifier.getTrackingId()); - currentTokens.put(ozoneTokenIdentifier, tokenInfo); - } - - /** - * Cancel a token by removing it from store and cache. - * - * @return Identifier of the canceled token - * @throws InvalidToken for invalid token - * @throws AccessControlException if the user isn't allowed to cancel - */ - public OzoneTokenIdentifier cancelToken(Token token, - String canceller) throws IOException { - OzoneTokenIdentifier id = OzoneTokenIdentifier.readProtoBuf( - token.getIdentifier()); - if (LOG.isDebugEnabled()) { - LOG.debug("Token cancellation requested for identifier: {}", - formatTokenId(id)); - } - - if (id.getUser() == null) { - throw new InvalidToken("Token with no owner " + formatTokenId(id)); - } - String owner = id.getUser().getUserName(); - Text renewer = id.getRenewer(); - HadoopKerberosName cancelerKrbName = new HadoopKerberosName(canceller); - String cancelerShortName = cancelerKrbName.getShortName(); - if (!canceller.equals(owner) - && (renewer == null || renewer.toString().isEmpty() - || !cancelerShortName - .equals(renewer.toString()))) { - throw new AccessControlException(canceller - + " is not authorized to cancel the token " + formatTokenId(id)); - } - - // For HA ratis will take care of removal. - // This check will be removed, when HA/Non-HA code is merged. - if (!isRatisEnabled) { - try { - store.removeToken(id); - } catch (IOException e) { - LOG.error("Unable to remove token " + id.getSequenceNumber(), e); - } - TokenInfo info = currentTokens.remove(id); - if (info == null) { - throw new InvalidToken("Token not found " + formatTokenId(id)); - } - } else { - // Check whether token is there in-memory map of tokens or not on the - // OM leader. - TokenInfo info = currentTokens.get(id); - if (info == null) { - throw new InvalidToken("Token not found in-memory map of tokens" + - formatTokenId(id)); - } - } - return id; - } - - /** - * Remove the expired token from in-memory map. - * @param ozoneTokenIdentifier - * @throws IOException - */ - public void removeToken(OzoneTokenIdentifier ozoneTokenIdentifier) { - currentTokens.remove(ozoneTokenIdentifier); - } - - @Override - public byte[] retrievePassword(OzoneTokenIdentifier identifier) - throws InvalidToken { - if(identifier.getTokenType().equals(S3TOKEN)) { - return validateS3Token(identifier); - } - return validateToken(identifier).getPassword(); - } - - /** - * Checks if TokenInfo for the given identifier exists in database and if the - * token is expired. - */ - private TokenInfo validateToken(OzoneTokenIdentifier identifier) - throws InvalidToken { - TokenInfo info = currentTokens.get(identifier); - if (info == null) { - throw new InvalidToken("token " + formatTokenId(identifier) - + " can't be found in cache"); - } - long now = Time.now(); - if (info.getRenewDate() < now) { - throw new InvalidToken("token " + formatTokenId(identifier) + " is " + - "expired, current time: " + Time.formatTime(now) + - " expected renewal time: " + Time.formatTime(info.getRenewDate())); - } - if (!verifySignature(identifier, info.getPassword())) { - throw new InvalidToken("Tampered/Invalid token."); - } - return info; - } - - /** - * Validates if given hash is valid. - * - * @param identifier - * @param password - */ - public boolean verifySignature(OzoneTokenIdentifier identifier, - byte[] password) { - try { - return getCertClient().verifySignature(identifier.getBytes(), password, - getCertClient().getCertificate(identifier.getOmCertSerialId())); - } catch (CertificateException e) { - return false; - } - } - - /** - * Validates if a S3 identifier is valid or not. - * */ - private byte[] validateS3Token(OzoneTokenIdentifier identifier) - throws InvalidToken { - LOG.trace("Validating S3Token for identifier:{}", identifier); - String awsSecret; - try { - awsSecret = s3SecretManager.getS3UserSecretString(identifier - .getAwsAccessId()); - } catch (IOException e) { - LOG.error("Error while validating S3 identifier:{}", - identifier, e); - throw new InvalidToken("No S3 secret found for S3 identifier:" - + identifier); - } - - if (awsSecret == null) { - throw new InvalidToken("No S3 secret found for S3 identifier:" - + identifier); - } - - if (AWSV4AuthValidator.validateRequest(identifier.getStrToSign(), - identifier.getSignature(), awsSecret)) { - return identifier.getSignature().getBytes(UTF_8); - } - throw new InvalidToken("Invalid S3 identifier:" - + identifier); - - } - - private void loadTokenSecretState( - OzoneManagerSecretState state) throws IOException { - LOG.info("Loading token state into token manager."); - for (Map.Entry entry : - state.getTokenState().entrySet()) { - addPersistedDelegationToken(entry.getKey(), entry.getValue()); - } - } - - private void addPersistedDelegationToken(OzoneTokenIdentifier identifier, - long renewDate) throws IOException { - if (isRunning()) { - // a safety check - throw new IOException( - "Can't add persisted delegation token to a running SecretManager."); - } - - byte[] password = createPassword(identifier.getBytes(), - getCertClient().getPrivateKey()); - if (identifier.getSequenceNumber() > getDelegationTokenSeqNum()) { - setDelegationTokenSeqNum(identifier.getSequenceNumber()); - } - if (currentTokens.get(identifier) == null) { - currentTokens.put(identifier, new TokenInfo(renewDate, - password, identifier.getTrackingId())); - } else { - throw new IOException("Same delegation token being added twice: " - + formatTokenId(identifier)); - } - } - - /** - * Should be called before this object is used. - */ - @Override - public synchronized void start(CertificateClient certClient) - throws IOException { - super.start(certClient); - tokenRemoverThread = new Daemon(new ExpiredTokenRemover()); - tokenRemoverThread.start(); - } - - public void stopThreads() { - if (LOG.isDebugEnabled()) { - LOG.debug("Stopping expired delegation token remover thread"); - } - setIsRunning(false); - - if (tokenRemoverThread != null) { - synchronized (noInterruptsLock) { - tokenRemoverThread.interrupt(); - } - try { - tokenRemoverThread.join(); - } catch (InterruptedException e) { - throw new RuntimeException( - "Unable to join on token removal thread", e); - } - } - } - - /** - * Stops the OzoneDelegationTokenSecretManager. - * - * @throws IOException - */ - @Override - public void stop() throws IOException { - super.stop(); - stopThreads(); - if (this.store != null) { - this.store.close(); - } - } - - /** - * Remove expired delegation tokens from cache and persisted store. - */ - private void removeExpiredToken() { - long now = Time.now(); - synchronized (this) { - Iterator> i = currentTokens.entrySet().iterator(); - while (i.hasNext()) { - Map.Entry entry = i.next(); - long renewDate = entry.getValue().getRenewDate(); - if (renewDate < now) { - i.remove(); - try { - store.removeToken(entry.getKey()); - } catch (IOException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Failed to remove expired token {}", entry.getValue()); - } - } - } - } - } - } - - private class ExpiredTokenRemover extends Thread { - - private long lastTokenCacheCleanup; - - @Override - public void run() { - LOG.info("Starting expired delegation token remover thread, " - + "tokenRemoverScanInterval=" + getTokenRemoverScanInterval() - / (60 * 1000) + " min(s)"); - try { - while (isRunning()) { - long now = Time.now(); - if (lastTokenCacheCleanup + getTokenRemoverScanInterval() - < now) { - removeExpiredToken(); - lastTokenCacheCleanup = now; - } - try { - Thread.sleep(Math.min(5000, - getTokenRemoverScanInterval())); // 5 seconds - } catch (InterruptedException ie) { - LOG.error("ExpiredTokenRemover received " + ie); - } - } - } catch (Throwable t) { - LOG.error("ExpiredTokenRemover thread received unexpected exception", - t); - Runtime.getRuntime().exit(-1); - } - } - } - - public long getTokenRemoverScanInterval() { - return tokenRemoverScanInterval; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java deleted file mode 100644 index 68afaaf52b8..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSelector.java +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.security; - -import java.util.Collection; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A delegation token selector that is specialized for Ozone. - */ -@InterfaceAudience.Private -public class OzoneDelegationTokenSelector - extends AbstractDelegationTokenSelector { - - public OzoneDelegationTokenSelector() { - super(OzoneTokenIdentifier.KIND_NAME); - } - - private static final Logger LOG = LoggerFactory - .getLogger(OzoneDelegationTokenSelector.class); - - @Override - public Token selectToken(Text service, - Collection> tokens) { - if (LOG.isTraceEnabled()) { - LOG.trace("Getting token for service {}", service); - } - Token token = getSelectedTokens(service, tokens); - if (LOG.isDebugEnabled()) { - LOG.debug("Got tokens: {} for service {}", token, service); - } - return token; - } - - private Token getSelectedTokens(Text service, - Collection> tokens) { - if (service == null) { - return null; - } - for (Token token : tokens) { - if (OzoneTokenIdentifier.KIND_NAME.equals(token.getKind()) - && token.getService().toString().contains(service.toString())) { - return (Token) token; - } - } - return null; - } - -} - diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretKey.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretKey.java deleted file mode 100644 index 39260fe5064..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretKey.java +++ /dev/null @@ -1,176 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.security; - -import com.google.common.base.Preconditions; -import com.google.protobuf.ByteString; -import java.io.ByteArrayInputStream; -import java.io.DataInput; -import java.io.DataInputStream; -import java.io.DataOutput; -import java.io.IOException; -import java.security.KeyPair; -import java.security.PrivateKey; -import java.security.PublicKey; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SecretKeyProto; - -/** - * Wrapper class for Ozone/Hdds secret keys. Used in delegation tokens and block - * tokens. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public class OzoneSecretKey implements Writable { - - private int keyId; - private long expiryDate; - private PrivateKey privateKey; - private PublicKey publicKey; - private SecurityConfig securityConfig; - - public OzoneSecretKey(int keyId, long expiryDate, KeyPair keyPair) { - Preconditions.checkNotNull(keyId); - this.keyId = keyId; - this.expiryDate = expiryDate; - this.privateKey = keyPair.getPrivate(); - this.publicKey = keyPair.getPublic(); - } - - /* - * Create new instance using default signature algorithm and provider. - * */ - public OzoneSecretKey(int keyId, long expiryDate, byte[] pvtKey, - byte[] publicKey) { - Preconditions.checkNotNull(pvtKey); - Preconditions.checkNotNull(publicKey); - - this.securityConfig = new SecurityConfig(new OzoneConfiguration()); - this.keyId = keyId; - this.expiryDate = expiryDate; - this.privateKey = SecurityUtil.getPrivateKey(pvtKey, securityConfig); - this.publicKey = SecurityUtil.getPublicKey(publicKey, securityConfig); - } - - public int getKeyId() { - return keyId; - } - - public long getExpiryDate() { - return expiryDate; - } - - public PrivateKey getPrivateKey() { - return privateKey; - } - - public PublicKey getPublicKey() { - return publicKey; - } - - public byte[] getEncodedPrivateKey() { - return privateKey.getEncoded(); - } - - public byte[] getEncodedPubliceKey() { - return publicKey.getEncoded(); - } - - public void setExpiryDate(long expiryDate) { - this.expiryDate = expiryDate; - } - - @Override - public void write(DataOutput out) throws IOException { - SecretKeyProto token = SecretKeyProto.newBuilder() - .setKeyId(getKeyId()) - .setExpiryDate(getExpiryDate()) - .setPrivateKeyBytes(ByteString.copyFrom(getEncodedPrivateKey())) - .setPublicKeyBytes(ByteString.copyFrom(getEncodedPubliceKey())) - .build(); - out.write(token.toByteArray()); - } - - @Override - public void readFields(DataInput in) throws IOException { - SecretKeyProto secretKey = SecretKeyProto.parseFrom((DataInputStream) in); - expiryDate = secretKey.getExpiryDate(); - keyId = secretKey.getKeyId(); - privateKey = SecurityUtil.getPrivateKey(secretKey.getPrivateKeyBytes() - .toByteArray(), securityConfig); - publicKey = SecurityUtil.getPublicKey(secretKey.getPublicKeyBytes() - .toByteArray(), securityConfig); - } - - @Override - public int hashCode() { - HashCodeBuilder hashCodeBuilder = new HashCodeBuilder(537, 963); - hashCodeBuilder.append(getExpiryDate()) - .append(getKeyId()) - .append(getEncodedPrivateKey()) - .append(getEncodedPubliceKey()); - - return hashCodeBuilder.build(); - } - - @Override - public boolean equals(Object obj) { - if (obj == this) { - return true; - } - - if (obj instanceof OzoneSecretKey) { - OzoneSecretKey that = (OzoneSecretKey) obj; - return new EqualsBuilder() - .append(this.keyId, that.keyId) - .append(this.expiryDate, that.expiryDate) - .append(this.privateKey, that.privateKey) - .append(this.publicKey, that.publicKey) - .build(); - } - return false; - } - - /** - * Reads protobuf encoded input stream to construct {@link OzoneSecretKey}. - */ - static OzoneSecretKey readProtoBuf(DataInput in) throws IOException { - Preconditions.checkNotNull(in); - SecretKeyProto key = SecretKeyProto.parseFrom((DataInputStream) in); - return new OzoneSecretKey(key.getKeyId(), key.getExpiryDate(), - key.getPrivateKeyBytes().toByteArray(), - key.getPublicKeyBytes().toByteArray()); - } - - /** - * Reads protobuf encoded input stream to construct {@link OzoneSecretKey}. - */ - static OzoneSecretKey readProtoBuf(byte[] identifier) throws IOException { - Preconditions.checkNotNull(identifier); - DataInputStream in = new DataInputStream(new ByteArrayInputStream( - identifier)); - return readProtoBuf(in); - } - -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java deleted file mode 100644 index 06fc071f32d..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretManager.java +++ /dev/null @@ -1,258 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.security; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.token.SecretManager; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.slf4j.Logger; - -import java.io.IOException; -import java.security.InvalidKeyException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.PrivateKey; -import java.security.Signature; -import java.security.SignatureException; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * SecretManager for Ozone Master. Responsible for signing identifiers with - * private key, - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public abstract class OzoneSecretManager - extends SecretManager { - - private final Logger logger; - /** - * The name of the Private/Public Key based hashing algorithm. - */ - private final SecurityConfig securityConfig; - private final long tokenMaxLifetime; - private final long tokenRenewInterval; - private final Text service; - private CertificateClient certClient; - private volatile boolean running; - private OzoneSecretKey currentKey; - private AtomicInteger currentKeyId; - private AtomicInteger tokenSequenceNumber; - - /** - * Create a secret manager. - * - * @param secureConf configuration. - * @param tokenMaxLifetime the maximum lifetime of the delegation tokens in - * milliseconds - * @param tokenRenewInterval how often the tokens must be renewed in - * milliseconds - * @param service name of service - * @param logger logger for the secret manager - */ - public OzoneSecretManager(SecurityConfig secureConf, long tokenMaxLifetime, - long tokenRenewInterval, Text service, Logger logger) { - this.securityConfig = secureConf; - this.tokenMaxLifetime = tokenMaxLifetime; - this.tokenRenewInterval = tokenRenewInterval; - currentKeyId = new AtomicInteger(); - tokenSequenceNumber = new AtomicInteger(); - this.service = service; - this.logger = logger; - } - - - /** - * Compute HMAC of the identifier using the private key and return the output - * as password. - * - * @param identifier - * @param privateKey - * @return byte[] signed byte array - */ - public byte[] createPassword(byte[] identifier, PrivateKey privateKey) - throws OzoneSecurityException { - try { - Signature rsaSignature = Signature.getInstance( - getDefaultSignatureAlgorithm()); - rsaSignature.initSign(privateKey); - rsaSignature.update(identifier); - return rsaSignature.sign(); - } catch (InvalidKeyException | NoSuchAlgorithmException | - SignatureException ex) { - throw new OzoneSecurityException("Error while creating HMAC hash for " + - "token.", ex, OzoneSecurityException.ResultCodes - .SECRET_MANAGER_HMAC_ERROR); - } - } - - @Override - public byte[] createPassword(T identifier) { - if (logger.isDebugEnabled()) { - logger.debug("Creating password for identifier: {}, currentKey: {}", - formatTokenId(identifier), currentKey.getKeyId()); - } - byte[] password = null; - try { - password = createPassword(identifier.getBytes(), - currentKey.getPrivateKey()); - } catch (IOException ioe) { - logger.error("Could not store token {}!!", formatTokenId(identifier), - ioe); - } - return password; - } - - /** - * Renew a delegation token. - * - * @param token the token to renew - * @param renewer the full principal name of the user doing the renewal - * @return the new expiration time - * @throws InvalidToken if the token is invalid - * @throws AccessControlException if the user can't renew token - */ - public abstract long renewToken(Token token, String renewer) - throws IOException; - /** - * Cancel a token by removing it from store and cache. - * - * @return Identifier of the canceled token - * @throws InvalidToken for invalid token - * @throws AccessControlException if the user isn't allowed to cancel - */ - public abstract T cancelToken(Token token, String canceller) - throws IOException; - - public int incrementCurrentKeyId() { - return currentKeyId.incrementAndGet(); - } - - public int getDelegationTokenSeqNum() { - return tokenSequenceNumber.get(); - } - - public void setDelegationTokenSeqNum(int seqNum) { - tokenSequenceNumber.set(seqNum); - } - - public int incrementDelegationTokenSeqNum() { - return tokenSequenceNumber.incrementAndGet(); - } - - /** - * Update the current master key. This is called once by start method before - * tokenRemoverThread is created, - */ - private OzoneSecretKey updateCurrentKey(KeyPair keyPair) throws IOException { - logger.info("Updating the current master key for generating tokens"); - - // TODO: fix me based on the certificate expire time to set the key - // expire time. - int newCurrentId = incrementCurrentKeyId(); - OzoneSecretKey newKey = new OzoneSecretKey(newCurrentId, -1, - keyPair); - currentKey = newKey; - return currentKey; - } - - public String formatTokenId(T id) { - return "(" + id + ")"; - } - - /** - * Should be called before this object is used. - * - * @param client - * @throws IOException - */ - public synchronized void start(CertificateClient client) - throws IOException { - Preconditions.checkState(!isRunning()); - setCertClient(client); - updateCurrentKey(new KeyPair(certClient.getPublicKey(), - certClient.getPrivateKey())); - setIsRunning(true); - } - - /** - * Stops the OzoneDelegationTokenSecretManager. - * - * @throws IOException - */ - public synchronized void stop() throws IOException { - setIsRunning(false); - } - - public String getDefaultSignatureAlgorithm() { - return securityConfig.getSignatureAlgo(); - } - - public long getTokenMaxLifetime() { - return tokenMaxLifetime; - } - - public long getTokenRenewInterval() { - return tokenRenewInterval; - } - - public Text getService() { - return service; - } - - /** - * Is Secret Manager running. - * - * @return true if secret mgr is running - */ - public synchronized boolean isRunning() { - return running; - } - - public void setIsRunning(boolean val) { - running = val; - } - - public OzoneSecretKey getCurrentKey() { - return currentKey; - } - - public AtomicInteger getCurrentKeyId() { - return currentKeyId; - } - - public AtomicInteger getTokenSequenceNumber() { - return tokenSequenceNumber; - } - - public CertificateClient getCertClient() { - return certClient; - } - - public void setCertClient(CertificateClient client) { - this.certClient = client; - } -} - diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java deleted file mode 100644 index 23c28d8d866..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java +++ /dev/null @@ -1,115 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.security; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.hdds.utils.db.Table.KeyValue; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -/** - * SecretStore for Ozone Master. - */ -public class OzoneSecretStore implements Closeable { - - private static final Logger LOG = LoggerFactory - .getLogger(OzoneSecretStore.class); - private OMMetadataManager omMetadataManager; - @Override - public void close() throws IOException { - if (omMetadataManager != null) { - try { - omMetadataManager.getDelegationTokenTable().close(); - } catch (Exception e) { - throw new IOException("Error while closing OzoneSecretStore.", e); - } - } - } - - - /** - * Support class to maintain state of OzoneSecretStore. - */ - public static class OzoneManagerSecretState { - private Map tokenState = new HashMap<>(); - public Map getTokenState() { - return tokenState; - } - } - - public OzoneSecretStore(OzoneConfiguration conf, - OMMetadataManager omMetadataManager) { - this.omMetadataManager = omMetadataManager; - } - - public OzoneManagerSecretState loadState() throws IOException { - OzoneManagerSecretState state = new OzoneManagerSecretState(); - int numTokens = loadTokens(state); - LOG.info("Loaded " + numTokens + " tokens"); - return state; - } - - public void storeToken(OzoneTokenIdentifier tokenId, long renewDate) - throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Storing token {}", tokenId.getSequenceNumber()); - } - - try { - omMetadataManager.getDelegationTokenTable().put(tokenId, renewDate); - } catch (IOException e) { - LOG.error("Unable to store token " + tokenId.toString(), e); - throw e; - } - } - - public void updateToken(OzoneTokenIdentifier tokenId, long renewDate) - throws IOException { - storeToken(tokenId, renewDate); - } - - public void removeToken(OzoneTokenIdentifier tokenId) throws IOException { - try { - omMetadataManager.getDelegationTokenTable().delete(tokenId); - } catch (IOException e) { - LOG.error("Unable to remove token {}", tokenId.toString(), e); - throw e; - } - } - - public int loadTokens(OzoneManagerSecretState state) throws IOException { - int loadedToken = 0; - try (TableIterator> iterator = - omMetadataManager.getDelegationTokenTable().iterator()){ - iterator.seekToFirst(); - while(iterator.hasNext()) { - KeyValue kv = iterator.next(); - state.tokenState.put(kv.getKey(), kv.getValue()); - loadedToken++; - } - } - return loadedToken; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecurityException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecurityException.java deleted file mode 100644 index d8a014be28c..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneSecurityException.java +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.security; - -import java.io.IOException; - -/** - * Security exceptions thrown at Ozone layer. - */ -public class OzoneSecurityException extends IOException { - private final OzoneSecurityException.ResultCodes result; - - /** - * Constructs an {@code IOException} with {@code null} - * as its error detail message. - */ - public OzoneSecurityException(OzoneSecurityException.ResultCodes result) { - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified detail message. - * - * @param message The detail message (which is saved for later retrieval by - * the - * {@link #getMessage()} method) - */ - public OzoneSecurityException(String message, - OzoneSecurityException.ResultCodes result) { - super(message); - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified detail message - * and cause. - *

- *

Note that the detail message associated with {@code cause} is - * not automatically incorporated into this exception's detail - * message. - * - * @param message The detail message (which is saved for later retrieval by - * the - * {@link #getMessage()} method) - * @param cause The cause (which is saved for later retrieval by the {@link - * #getCause()} method). (A null value is permitted, and indicates that the - * cause is nonexistent or unknown.) - * @since 1.6 - */ - public OzoneSecurityException(String message, Throwable cause, - OzoneSecurityException.ResultCodes result) { - super(message, cause); - this.result = result; - } - - /** - * Constructs an {@code IOException} with the specified cause and a - * detail message of {@code (cause==null ? null : cause.toString())} - * (which typically contains the class and detail message of {@code cause}). - * This constructor is useful for IO exceptions that are little more - * than wrappers for other throwables. - * - * @param cause The cause (which is saved for later retrieval by the {@link - * #getCause()} method). (A null value is permitted, and indicates that the - * cause is nonexistent or unknown.) - * @since 1.6 - */ - public OzoneSecurityException(Throwable cause, - OzoneSecurityException.ResultCodes result) { - super(cause); - this.result = result; - } - - /** - * Returns resultCode. - * @return ResultCode - */ - public OzoneSecurityException.ResultCodes getResult() { - return result; - } - - /** - * Error codes to make it easy to decode these exceptions. - */ - public enum ResultCodes { - OM_PUBLIC_PRIVATE_KEY_FILE_NOT_EXIST, - S3_SECRET_NOT_FOUND, - SECRET_MANAGER_HMAC_ERROR - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java deleted file mode 100644 index f5e114afff7..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/OzoneTokenIdentifier.java +++ /dev/null @@ -1,315 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.security; - -import java.io.ByteArrayInputStream; -import java.io.DataInput; -import java.io.DataInputStream; -import java.io.DataOutput; -import java.io.IOException; -import java.util.Arrays; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type; -import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; - -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type.S3TOKEN; - -/** - * The token identifier for Ozone Master. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public class OzoneTokenIdentifier extends - AbstractDelegationTokenIdentifier { - - public final static Text KIND_NAME = new Text("OzoneToken"); - private String omCertSerialId; - private Type tokenType; - private String awsAccessId; - private String signature; - private String strToSign; - - /** - * Create an empty delegation token identifier. - */ - public OzoneTokenIdentifier() { - super(); - this.tokenType = Type.DELEGATION_TOKEN; - } - - /** - * Create a new ozone master delegation token identifier. - * - * @param owner the effective username of the token owner - * @param renewer the username of the renewer - * @param realUser the real username of the token owner - */ - public OzoneTokenIdentifier(Text owner, Text renewer, Text realUser) { - super(owner, renewer, realUser); - this.tokenType = Type.DELEGATION_TOKEN; - } - - /** - * {@inheritDoc} - */ - @Override - public Text getKind() { - return KIND_NAME; - } - - /** - * Overrides default implementation to write using Protobuf. - * - * @param out output stream - * @throws IOException - */ - @Override - public void write(DataOutput out) throws IOException { - OMTokenProto.Builder builder = OMTokenProto.newBuilder() - .setMaxDate(getMaxDate()) - .setType(getTokenType()) - .setOwner(getOwner().toString()) - .setRealUser(getRealUser().toString()) - .setRenewer(getRenewer().toString()) - .setIssueDate(getIssueDate()) - .setMaxDate(getMaxDate()) - .setSequenceNumber(getSequenceNumber()) - .setMasterKeyId(getMasterKeyId()); - - // Set s3 specific fields. - if (getTokenType().equals(S3TOKEN)) { - builder.setAccessKeyId(getAwsAccessId()) - .setSignature(getSignature()) - .setStrToSign(getStrToSign()); - } else { - builder.setOmCertSerialId(getOmCertSerialId()); - } - OMTokenProto token = builder.build(); - out.write(token.toByteArray()); - } - - /** - * Overrides default implementation to read using Protobuf. - * - * @param in input stream - * @throws IOException - */ - @Override - public void readFields(DataInput in) throws IOException { - OMTokenProto token = OMTokenProto.parseFrom((DataInputStream) in); - setTokenType(token.getType()); - setMaxDate(token.getMaxDate()); - setOwner(new Text(token.getOwner())); - setRealUser(new Text(token.getRealUser())); - setRenewer(new Text(token.getRenewer())); - setIssueDate(token.getIssueDate()); - setMaxDate(token.getMaxDate()); - setSequenceNumber(token.getSequenceNumber()); - setMasterKeyId(token.getMasterKeyId()); - setOmCertSerialId(token.getOmCertSerialId()); - - // Set s3 specific fields. - if (getTokenType().equals(S3TOKEN)) { - setAwsAccessId(token.getAccessKeyId()); - setSignature(token.getSignature()); - setStrToSign(token.getStrToSign()); - } - } - - /** - * Reads protobuf encoded input stream to construct {@link - * OzoneTokenIdentifier}. - */ - public static OzoneTokenIdentifier readProtoBuf(DataInput in) - throws IOException { - OMTokenProto token = OMTokenProto.parseFrom((DataInputStream) in); - OzoneTokenIdentifier identifier = new OzoneTokenIdentifier(); - identifier.setTokenType(token.getType()); - identifier.setMaxDate(token.getMaxDate()); - - // Set type specific fields. - if (token.getType().equals(S3TOKEN)) { - identifier.setSignature(token.getSignature()); - identifier.setStrToSign(token.getStrToSign()); - identifier.setAwsAccessId(token.getAccessKeyId()); - } else { - identifier.setRenewer(new Text(token.getRenewer())); - identifier.setOwner(new Text(token.getOwner())); - identifier.setRealUser(new Text(token.getRealUser())); - identifier.setIssueDate(token.getIssueDate()); - identifier.setSequenceNumber(token.getSequenceNumber()); - identifier.setMasterKeyId(token.getMasterKeyId()); - } - identifier.setOmCertSerialId(token.getOmCertSerialId()); - return identifier; - } - - /** - * Reads protobuf encoded input stream to construct {@link - * OzoneTokenIdentifier}. - */ - public static OzoneTokenIdentifier readProtoBuf(byte[] identifier) - throws IOException { - DataInputStream in = new DataInputStream(new ByteArrayInputStream( - identifier)); - return readProtoBuf(in); - } - - /** - * Creates new instance. - */ - public static OzoneTokenIdentifier newInstance() { - return new OzoneTokenIdentifier(); - } - - /** - * Creates new instance. - */ - public static OzoneTokenIdentifier newInstance(Text owner, Text renewer, - Text realUser) { - return new OzoneTokenIdentifier(owner, renewer, realUser); - } - - @Override - public int hashCode() { - return super.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (!(obj instanceof OzoneTokenIdentifier)) { - return false; - } - OzoneTokenIdentifier that = (OzoneTokenIdentifier) obj; - return new EqualsBuilder() - .append(getOmCertSerialId(), that.getOmCertSerialId()) - .append(getMaxDate(), that.getMaxDate()) - .append(getIssueDate(), that.getIssueDate()) - .append(getMasterKeyId(), that.getMasterKeyId()) - .append(getOwner(), that.getOwner()) - .append(getRealUser(), that.getRealUser()) - .append(getRenewer(), that.getRenewer()) - .append(getKind(), that.getKind()) - .append(getSequenceNumber(), that.getSequenceNumber()) - .build(); - } - - /** - * Class to encapsulate a token's renew date and password. - */ - @InterfaceStability.Evolving - public static class TokenInfo { - - private long renewDate; - private byte[] password; - private String trackingId; - - public TokenInfo(long renewDate, byte[] password) { - this(renewDate, password, null); - } - - public TokenInfo(long renewDate, byte[] password, - String trackingId) { - this.renewDate = renewDate; - this.password = Arrays.copyOf(password, password.length); - this.trackingId = trackingId; - } - - /** - * returns renew date. - */ - public long getRenewDate() { - return renewDate; - } - - /** - * returns password. - */ - byte[] getPassword() { - return password; - } - - /** - * returns tracking id. - */ - public String getTrackingId() { - return trackingId; - } - } - - public String getOmCertSerialId() { - return omCertSerialId; - } - - public void setOmCertSerialId(String omCertSerialId) { - this.omCertSerialId = omCertSerialId; - } - - public Type getTokenType() { - return tokenType; - } - - public void setTokenType(Type tokenType) { - this.tokenType = tokenType; - } - - public String getAwsAccessId() { - return awsAccessId; - } - - public void setAwsAccessId(String awsAccessId) { - this.awsAccessId = awsAccessId; - } - - public String getSignature() { - return signature; - } - - public void setSignature(String signature) { - this.signature = signature; - } - - public String getStrToSign() { - return strToSign; - } - - public void setStrToSign(String strToSign) { - this.strToSign = strToSign; - } - - @Override - public String toString() { - StringBuilder buffer = new StringBuilder(); - buffer.append(getKind()) - .append(" owner=").append(getOwner()) - .append(", renewer=").append(getRenewer()) - .append(", realUser=").append(getRealUser()) - .append(", issueDate=").append(getIssueDate()) - .append(", maxDate=").append(getMaxDate()) - .append(", sequenceNumber=").append(getSequenceNumber()) - .append(", masterKeyId=").append(getMasterKeyId()) - .append(", strToSign=").append(getStrToSign()) - .append(", signature=").append(getSignature()) - .append(", awsAccessKeyId=").append(getAwsAccessId()); - return buffer.toString(); - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java deleted file mode 100644 index d8a2660b0e1..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IAccessAuthorizer.java +++ /dev/null @@ -1,182 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.security.acl; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.exceptions.OMException; - -import java.util.BitSet; - -/** - * Public API for Ozone ACLs. Security providers providing support for Ozone - * ACLs should implement this. - */ -@InterfaceAudience.LimitedPrivate({"HDFS", "Yarn", "Ranger", "Hive", "HBase"}) -@InterfaceStability.Evolving -public interface IAccessAuthorizer { - - /** - * Check access for given ozoneObject. - * - * @param ozoneObject object for which access needs to be checked. - * @param context Context object encapsulating all user related information. - * @throws org.apache.hadoop.ozone.om.exceptions.OMException - * @return true if user has access else false. - */ - boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) - throws OMException; - - /** - * ACL rights. - */ - enum ACLType { - READ, - WRITE, - CREATE, - LIST, - DELETE, - READ_ACL, - WRITE_ACL, - ALL, - NONE; - private static int length = ACLType.values().length; - private static ACLType[] vals = ACLType.values(); - - public static int getNoOfAcls() { - return length; - } - - public static ACLType getAclTypeFromOrdinal(int ordinal) { - if (ordinal > length - 1 && ordinal > -1) { - throw new IllegalArgumentException("Ordinal greater than array lentgh" + - ". ordinal:" + ordinal); - } - return vals[ordinal]; - } - - /** - * Returns the ACL rights based on passed in String. - * - * @param type ACL right string - * @return ACLType - */ - public static ACLType getACLRight(String type) { - if (type == null || type.isEmpty()) { - throw new IllegalArgumentException("ACL right cannot be empty"); - } - - switch (type) { - case OzoneConsts.OZONE_ACL_READ: - return ACLType.READ; - case OzoneConsts.OZONE_ACL_WRITE: - return ACLType.WRITE; - case OzoneConsts.OZONE_ACL_CREATE: - return ACLType.CREATE; - case OzoneConsts.OZONE_ACL_DELETE: - return ACLType.DELETE; - case OzoneConsts.OZONE_ACL_LIST: - return ACLType.LIST; - case OzoneConsts.OZONE_ACL_READ_ACL: - return ACLType.READ_ACL; - case OzoneConsts.OZONE_ACL_WRITE_ACL: - return ACLType.WRITE_ACL; - case OzoneConsts.OZONE_ACL_ALL: - return ACLType.ALL; - case OzoneConsts.OZONE_ACL_NONE: - return ACLType.NONE; - default: - throw new IllegalArgumentException("[" + type + "] ACL right is not " + - "recognized"); - } - - } - - /** - * Returns String representation of ACL rights. - * - * @param acls ACLType - * @return String representation of acl - */ - public static String getACLString(BitSet acls) { - StringBuffer sb = new StringBuffer(); - acls.stream().forEach(acl -> { - sb.append(getAclString(ACLType.values()[acl])); - }); - return sb.toString(); - } - - public static String getAclString(ACLType acl) { - switch (acl) { - case READ: - return OzoneConsts.OZONE_ACL_READ; - case WRITE: - return OzoneConsts.OZONE_ACL_WRITE; - case CREATE: - return OzoneConsts.OZONE_ACL_CREATE; - case DELETE: - return OzoneConsts.OZONE_ACL_DELETE; - case LIST: - return OzoneConsts.OZONE_ACL_LIST; - case READ_ACL: - return OzoneConsts.OZONE_ACL_READ_ACL; - case WRITE_ACL: - return OzoneConsts.OZONE_ACL_WRITE_ACL; - case ALL: - return OzoneConsts.OZONE_ACL_ALL; - case NONE: - return OzoneConsts.OZONE_ACL_NONE; - default: - throw new IllegalArgumentException("ACL right is not recognized"); - } - } - - } - - /** - * Type of acl identity. - */ - enum ACLIdentityType { - USER(OzoneConsts.OZONE_ACL_USER_TYPE), - GROUP(OzoneConsts.OZONE_ACL_GROUP_TYPE), - WORLD(OzoneConsts.OZONE_ACL_WORLD_TYPE), - ANONYMOUS(OzoneConsts.OZONE_ACL_ANONYMOUS_TYPE), - CLIENT_IP(OzoneConsts.OZONE_ACL_IP_TYPE); - - // TODO: Add support for acl checks based on CLIENT_IP. - - @Override - public String toString() { - return value; - } - /** - * String value for this Enum. - */ - private final String value; - - /** - * Init OzoneACLtypes enum. - * - * @param val String type for this enum. - */ - ACLIdentityType(String val) { - value = val; - } - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IOzoneObj.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IOzoneObj.java deleted file mode 100644 index b300fcd914a..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/IOzoneObj.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.security.acl; - -/** - * Marker interface for objects supported by Ozone. - * */ -public interface IOzoneObj { -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java deleted file mode 100644 index ae37bc87198..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAccessAuthorizer.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.security.acl; - -import org.apache.hadoop.ozone.om.exceptions.OMException; - -/** - * Default implementation for {@link IAccessAuthorizer}. - * */ -public class OzoneAccessAuthorizer implements IAccessAuthorizer { - - @Override - public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) - throws OMException { - return true; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java deleted file mode 100644 index b51af56a4bb..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.security.acl; - -import org.apache.hadoop.hdds.conf.Config; -import org.apache.hadoop.hdds.conf.ConfigGroup; -import org.apache.hadoop.hdds.conf.ConfigTag; -import org.apache.hadoop.hdds.conf.ConfigType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; - -/** - * Ozone ACL config pojo. - * */ -@ConfigGroup(prefix = "ozone.om") -public class OzoneAclConfig { - // OM Default user/group permissions - private ACLType userDefaultRights = ACLType.ALL; - private ACLType groupDefaultRights = ACLType.ALL; - - @Config(key = "user.rights", - defaultValue = "ALL", - type = ConfigType.STRING, - tags = {ConfigTag.OM, ConfigTag.SECURITY}, - description = "Default user permissions set for an object in " + - "OzoneManager." - ) - public void setUserDefaultRights(String userRights) { - if(userRights == null) { - userRights = "ALL"; - } - this.userDefaultRights = ACLType.valueOf(userRights); - } - - @Config(key = "group.rights", - defaultValue = "ALL", - type = ConfigType.STRING, - tags = {ConfigTag.OM, ConfigTag.SECURITY}, - description = "Default group permissions set for an object in " + - "OzoneManager." - ) - public void setGroupDefaultRights(String groupRights) { - if(groupRights == null) { - groupRights = "ALL"; - } - this.groupDefaultRights = ACLType.valueOf(groupRights); - } - - public ACLType getUserDefaultRights() { - return userDefaultRights; - } - - public ACLType getGroupDefaultRights() { - return groupDefaultRights; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java deleted file mode 100644 index 4a95e55ec72..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObj.java +++ /dev/null @@ -1,147 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.security.acl; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType; - -import java.util.LinkedHashMap; -import java.util.Map; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.StoreType.*; - -/** - * Class representing an unique ozone object. - * */ -public abstract class OzoneObj implements IOzoneObj { - - private final ResourceType resType; - - private final StoreType storeType; - - OzoneObj(ResourceType resType, StoreType storeType) { - - Preconditions.checkNotNull(resType); - Preconditions.checkNotNull(storeType); - this.resType = resType; - this.storeType = storeType; - } - - public static OzoneManagerProtocolProtos.OzoneObj toProtobuf(OzoneObj obj) { - return OzoneManagerProtocolProtos.OzoneObj.newBuilder() - .setResType(ObjectType.valueOf(obj.getResourceType().name())) - .setStoreType(valueOf(obj.getStoreType().name())) - .setPath(obj.getPath()).build(); - } - - public ResourceType getResourceType() { - return resType; - } - - @Override - public String toString() { - return "OzoneObj{" + - "resType=" + resType + - ", storeType=" + storeType + - ", path='" + getPath() + '\'' + - '}'; - } - - public StoreType getStoreType() { - return storeType; - } - - public abstract String getVolumeName(); - - public abstract String getBucketName(); - - public abstract String getKeyName(); - - /** - * Get PrefixName. - * A prefix name is like a key name under the bucket but - * are mainly used for ACL for now and persisted into a separate prefix table. - * - * @return prefix name. - */ - public abstract String getPrefixName(); - - /** - * Get full path of a key or prefix including volume and bucket. - * @return full path of a key or prefix. - */ - public abstract String getPath(); - - /** - * Ozone Objects supported for ACL. - */ - public enum ResourceType { - VOLUME(OzoneConsts.VOLUME), - BUCKET(OzoneConsts.BUCKET), - KEY(OzoneConsts.KEY), - PREFIX(OzoneConsts.PREFIX); - - /** - * String value for this Enum. - */ - private final String value; - - @Override - public String toString() { - return value; - } - - ResourceType(String resType) { - value = resType; - } - } - - /** - * Ozone Objects supported for ACL. - */ - public enum StoreType { - OZONE(OzoneConsts.OZONE), - S3(OzoneConsts.S3); - - /** - * String value for this Enum. - */ - private final String value; - - @Override - public String toString() { - return value; - } - - StoreType(String objType) { - value = objType; - } - } - - public Map toAuditMap() { - Map auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.RESOURCE_TYPE, this.getResourceType().value); - auditMap.put(OzoneConsts.STORAGE_TYPE, this.getStoreType().value); - auditMap.put(OzoneConsts.VOLUME, this.getVolumeName()); - auditMap.put(OzoneConsts.BUCKET, this.getBucketName()); - auditMap.put(OzoneConsts.KEY, this.getKeyName()); - return auditMap; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java deleted file mode 100644 index cbae18cb784..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java +++ /dev/null @@ -1,204 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.security.acl; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; - -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; - -/** - * Class representing an ozone object. - * It can be a volume with non-null volumeName (bucketName=null & name=null) - * or a bucket with non-null volumeName and bucketName (name=null) - * or a key with non-null volumeName, bucketName and key name - * (via getKeyName) - * or a prefix with non-null volumeName, bucketName and prefix name - * (via getPrefixName) - */ -public final class OzoneObjInfo extends OzoneObj { - - private final String volumeName; - private final String bucketName; - private final String name; - - /** - * - * @param resType - * @param storeType - * @param volumeName - * @param bucketName - * @param name - keyName/PrefixName - */ - private OzoneObjInfo(ResourceType resType, StoreType storeType, - String volumeName, String bucketName, String name) { - super(resType, storeType); - this.volumeName = volumeName; - this.bucketName = bucketName; - this.name = name; - } - - @Override - public String getPath() { - switch (getResourceType()) { - case VOLUME: - return OZONE_URI_DELIMITER + getVolumeName(); - case BUCKET: - return OZONE_URI_DELIMITER + getVolumeName() - + OZONE_URI_DELIMITER + getBucketName(); - case KEY: - return OZONE_URI_DELIMITER + getVolumeName() - + OZONE_URI_DELIMITER + getBucketName() - + OZONE_URI_DELIMITER + getKeyName(); - case PREFIX: - return OZONE_URI_DELIMITER + getVolumeName() - + OZONE_URI_DELIMITER + getBucketName() - + OZONE_URI_DELIMITER + getPrefixName(); - default: - throw new IllegalArgumentException("Unknown resource " + - "type" + getResourceType()); - } - } - - @Override - public String getVolumeName() { - return volumeName; - } - - @Override - public String getBucketName() { - return bucketName; - } - - @Override - public String getKeyName() { - return name; - } - - @Override - public String getPrefixName() { - return name; - } - - - public static OzoneObjInfo fromProtobuf(OzoneManagerProtocolProtos.OzoneObj - proto) { - Builder builder = new Builder() - .setResType(ResourceType.valueOf(proto.getResType().name())) - .setStoreType(StoreType.valueOf(proto.getStoreType().name())); - String[] tokens = StringUtils.split(proto.getPath(), - OZONE_URI_DELIMITER, 3); - if(tokens == null) { - throw new IllegalArgumentException("Unexpected path:" + proto.getPath()); - } - // Set volume name. - switch (proto.getResType()) { - case VOLUME: - builder.setVolumeName(tokens[0]); - break; - case BUCKET: - if (tokens.length < 2) { - throw new IllegalArgumentException("Unexpected argument for " + - "Ozone bucket. Path:" + proto.getPath()); - } - builder.setVolumeName(tokens[0]); - builder.setBucketName(tokens[1]); - break; - case KEY: - if (tokens.length < 3) { - throw new IllegalArgumentException("Unexpected argument for " + - "Ozone key. Path:" + proto.getPath()); - } - builder.setVolumeName(tokens[0]); - builder.setBucketName(tokens[1]); - builder.setKeyName(tokens[2]); - break; - case PREFIX: - if (tokens.length < 3) { - throw new IllegalArgumentException("Unexpected argument for " + - "Ozone Prefix. Path:" + proto.getPath()); - } - builder.setVolumeName(tokens[0]); - builder.setBucketName(tokens[1]); - builder.setPrefixName(tokens[2]); - break; - default: - throw new IllegalArgumentException("Unexpected type for " + - "Ozone key. Type:" + proto.getResType()); - } - return builder.build(); - } - - /** - * Inner builder class. - */ - public static class Builder { - - private OzoneObj.ResourceType resType; - private OzoneObj.StoreType storeType; - private String volumeName; - private String bucketName; - private String name; - - public static Builder newBuilder() { - return new Builder(); - } - - public static Builder fromKeyArgs(OmKeyArgs args) { - return new Builder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setResType(ResourceType.KEY); - } - - public Builder setResType(OzoneObj.ResourceType res) { - this.resType = res; - return this; - } - - public Builder setStoreType(OzoneObj.StoreType store) { - this.storeType = store; - return this; - } - - public Builder setVolumeName(String volume) { - this.volumeName = volume; - return this; - } - - public Builder setBucketName(String bucket) { - this.bucketName = bucket; - return this; - } - - public Builder setKeyName(String key) { - this.name = key; - return this; - } - - public Builder setPrefixName(String prefix) { - this.name = prefix; - return this; - } - - public OzoneObjInfo build() { - return new OzoneObjInfo(resType, storeType, volumeName, bucketName, name); - } - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/RequestContext.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/RequestContext.java deleted file mode 100644 index 329582721e0..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/RequestContext.java +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.security.acl; - -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; - -import java.net.InetAddress; - -/** - * This class encapsulates information required for Ozone ACLs. - * */ -public class RequestContext { - private final String host; - private final InetAddress ip; - private final UserGroupInformation clientUgi; - private final String serviceId; - private final ACLIdentityType aclType; - private final ACLType aclRights; - - public RequestContext(String host, InetAddress ip, - UserGroupInformation clientUgi, String serviceId, - ACLIdentityType aclType, ACLType aclRights) { - this.host = host; - this.ip = ip; - this.clientUgi = clientUgi; - this.serviceId = serviceId; - this.aclType = aclType; - this.aclRights = aclRights; - } - - /** - * Builder class for @{@link RequestContext}. - */ - public static class Builder { - private String host; - private InetAddress ip; - private UserGroupInformation clientUgi; - private String serviceId; - private IAccessAuthorizer.ACLIdentityType aclType; - private IAccessAuthorizer.ACLType aclRights; - - public Builder setHost(String bHost) { - this.host = bHost; - return this; - } - - public Builder setIp(InetAddress cIp) { - this.ip = cIp; - return this; - } - - public Builder setClientUgi(UserGroupInformation cUgi) { - this.clientUgi = cUgi; - return this; - } - - public Builder setServiceId(String sId) { - this.serviceId = sId; - return this; - } - - public Builder setAclType(ACLIdentityType acl) { - this.aclType = acl; - return this; - } - - public Builder setAclRights(ACLType aclRight) { - this.aclRights = aclRight; - return this; - } - - public RequestContext build() { - return new RequestContext(host, ip, clientUgi, serviceId, aclType, - aclRights); - } - } - - public static Builder newBuilder() { - return new Builder(); - } - - public String getHost() { - return host; - } - - public InetAddress getIp() { - return ip; - } - - public UserGroupInformation getClientUgi() { - return clientUgi; - } - - public String getServiceId() { - return serviceId; - } - - public ACLIdentityType getAclType() { - return aclType; - } - - public ACLType getAclRights() { - return aclRights; - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java deleted file mode 100644 index 5c572ef1c76..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.security.acl; - -/** - * Classes related to ozone Ozone ACL. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/package-info.java deleted file mode 100644 index 457f891a5d2..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.security; -/** - * Ozone security related classes. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/BooleanBiFunction.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/BooleanBiFunction.java deleted file mode 100644 index 82398b74be0..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/BooleanBiFunction.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.util; - -/** - * Defines a functional interface having two inputs and returns boolean as - * output. - */ -@FunctionalInterface -public interface BooleanBiFunction { - boolean apply(LEFT left, RIGHT right); -} - diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java deleted file mode 100644 index 69c5791e2a7..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.util; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.util.ClassUtil; -import org.apache.hadoop.hdds.utils.HddsVersionInfo; -import org.apache.hadoop.hdds.utils.VersionInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class returns build information about Hadoop components. - */ -@InterfaceAudience.Public -@InterfaceStability.Stable -public final class OzoneVersionInfo { - private static final Logger LOG = - LoggerFactory.getLogger(OzoneVersionInfo.class); - - public static final VersionInfo OZONE_VERSION_INFO = - new VersionInfo("ozone"); - - private OzoneVersionInfo() {} - - public static void main(String[] args) { - System.out.println( - " ////////////// \n" + - " //////////////////// \n" + - " //////// //////////////// \n" + - " ////// //////////////// \n" + - " ///// //////////////// / \n" + - " ///// //////// /// \n" + - " //// //////// ///// \n" + - " ///// //////////////// \n" + - " ///// //////////////// // \n" + - " //// /////////////// ///// \n" + - " ///// /////////////// //// \n" + - " ///// ////// ///// \n" + - " ////// ////// ///// \n" + - " /////////// //////// \n" + - " ////// //////////// \n" + - " /// ////////// \n" + - " / " + OZONE_VERSION_INFO.getVersion() + "(" - + OZONE_VERSION_INFO.getRelease() + ")\n"); - System.out.println( - "Source code repository " + OZONE_VERSION_INFO.getUrl() + " -r " + - OZONE_VERSION_INFO.getRevision()); - System.out.println("Compiled by " + OZONE_VERSION_INFO.getUser() + " on " - + OZONE_VERSION_INFO.getDate()); - System.out.println( - "Compiled with protoc " + OZONE_VERSION_INFO.getProtocVersion()); - System.out.println( - "From source with checksum " + OZONE_VERSION_INFO.getSrcChecksum() - + "\n"); - LOG.debug("This command was run using " + - ClassUtil.findContainingJar(OzoneVersionInfo.class)); - HddsVersionInfo.main(args); - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixNode.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixNode.java deleted file mode 100644 index 3009c9a4e8c..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixNode.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.util; - -import java.util.HashMap; - -/** - * Wrapper class for Radix tree node representing Ozone prefix path segment - * separated by "/". - */ -public class RadixNode { - - public RadixNode(String name) { - this.name = name; - this.children = new HashMap<>(); - } - - public String getName() { - return name; - } - - public boolean hasChildren() { - return children.isEmpty(); - } - - public HashMap getChildren() { - return children; - } - - public void setValue(T v) { - this.value = v; - } - - public T getValue() { - return value; - } - - private HashMap children; - - private String name; - - // TODO: k/v pairs for more metadata as needed - private T value; -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java deleted file mode 100644 index 597f58db3fd..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java +++ /dev/null @@ -1,220 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.util; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.ozone.OzoneConsts; - -import java.util.ArrayList; -import java.util.HashMap; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.List; - -/** - * Wrapper class for handling Ozone prefix path lookup of ACL APIs - * with radix tree. - */ -public class RadixTree { - - /** - * create a empty radix tree with root only. - */ - public RadixTree() { - root = new RadixNode(PATH_DELIMITER); - } - - /** - * If the Radix tree contains root only. - * @return true if the radix tree contains root only. - */ - public boolean isEmpty() { - return root.hasChildren(); - } - - /** - * Insert prefix tree node without value, value can be ACL or other metadata - * of the prefix path. - * @param path - */ - public void insert(String path) { - insert(path, null); - } - - /** - * Insert prefix tree node with value, value can be ACL or other metadata - * of the prefix path. - * @param path - * @param val - */ - public void insert(String path, T val) { - // all prefix path inserted should end with "/" - RadixNode n = root; - Path p = Paths.get(path); - for (int level = 0; level < p.getNameCount(); level++) { - HashMap child = n.getChildren(); - String component = p.getName(level).toString(); - if (child.containsKey(component)) { - n = child.get(component); - } else { - RadixNode tmp = new RadixNode(component); - child.put(component, tmp); - n = tmp; - } - } - if (val != null) { - n.setValue(val); - } - } - - /** - * Get the last node in the exact prefix path that matches in the tree. - * @param path - prefix path - * @return last node in the prefix tree or null if non exact prefix matchl - */ - public RadixNode getLastNodeInPrefixPath(String path) { - List> lpp = getLongestPrefixPath(path); - Path p = Paths.get(path); - if (lpp.size() != p.getNameCount() + 1) { - return null; - } else { - return lpp.get(p.getNameCount()); - } - } - - /** - * Remove prefix path. - * @param path - */ - public void removePrefixPath(String path) { - Path p = Paths.get(path); - removePrefixPathInternal(root, p, 0); - } - - /** - * Recursively remove non-overlapped part of the prefix path from radix tree. - * @param current current radix tree node. - * @param path prefix path to be removed. - * @param level current recursive level. - * @return true if current radix node can be removed. - * (not overlapped with other path), - * false otherwise. - */ - private boolean removePrefixPathInternal(RadixNode current, - Path path, int level) { - // last component is processed - if (level == path.getNameCount()) { - return current.hasChildren(); - } - - // not last component, recur for next component - String name = path.getName(level).toString(); - RadixNode node = current.getChildren().get(name); - if (node == null) { - return false; - } - - if (removePrefixPathInternal(node, path, level+1)) { - current.getChildren().remove(name); - return current.hasChildren(); - } - return false; - } - - /** - * Get the longest prefix path. - * @param path - prefix path. - * @return longest prefix path as list of RadixNode. - */ - public List> getLongestPrefixPath(String path) { - RadixNode n = root; - Path p = Paths.get(path); - int level = 0; - List> result = new ArrayList<>(); - result.add(root); - while (level < p.getNameCount()) { - HashMap children = n.getChildren(); - if (children.isEmpty()) { - break; - } - String component = p.getName(level).toString(); - if (children.containsKey(component)) { - n = children.get(component); - result.add(n); - level++; - } else { - break; - } - } - return result; - } - - @VisibleForTesting - /** - * Convert radix path to string format for output. - * @param path - radix path represented by list of radix nodes. - * @return radix path as string separated by "/". - * Note: the path will always be normalized with and ending "/". - */ - public static String radixPathToString(List> path) { - StringBuilder sb = new StringBuilder(); - for (RadixNode n : path) { - sb.append(n.getName()); - sb.append(n.getName().equals(PATH_DELIMITER) ? "" : PATH_DELIMITER); - } - return sb.toString(); - } - - /** - * Get the longest prefix path. - * @param path - prefix path. - * @return longest prefix path as String separated by "/". - */ - public String getLongestPrefix(String path) { - RadixNode n = root; - Path p = Paths.get(path); - int level = 0; - while (level < p.getNameCount()) { - HashMap children = n.getChildren(); - if (children.isEmpty()) { - break; - } - String component = p.getName(level).toString(); - if (children.containsKey(component)) { - n = children.get(component); - level++; - } else { - break; - } - } - - if (level >= 1) { - Path longestMatch = - Paths.get(root.getName()).resolve(p.subpath(0, level)); - String ret = longestMatch.toString(); - return path.endsWith("/") ? ret + "/" : ret; - } else { - return root.getName(); - } - } - - // root of a radix tree has a name of "/" and may optionally has it value. - private RadixNode root; - - private final static String PATH_DELIMITER = OzoneConsts.OZONE_URI_DELIMITER; -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/package-info.java deleted file mode 100644 index 7bc89c17b67..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.util; - -/** - * Ozone utilities. - */ \ No newline at end of file diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java deleted file mode 100644 index e146d31afd5..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.utils; - -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.charset.Charset; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.Locale; -import java.util.TimeZone; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.ozone.OzoneConsts; - -import com.google.common.base.Preconditions; -import org.apache.ratis.util.TimeDuration; - -/** - * Set of Utility functions used in ozone. - */ -@InterfaceAudience.Private -public final class OzoneUtils { - - public static final String ENCODING_NAME = "UTF-8"; - public static final Charset ENCODING = Charset.forName(ENCODING_NAME); - - private OzoneUtils() { - // Never constructed - } - - /** - * Date format that used in ozone. Here the format is thread safe to use. - */ - private static final ThreadLocal DATE_FORMAT = - new ThreadLocal() { - @Override - protected SimpleDateFormat initialValue() { - SimpleDateFormat format = new SimpleDateFormat( - OzoneConsts.OZONE_DATE_FORMAT, Locale.US); - format.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE)); - - return format; - } - }; - - /** - * Verifies that max key length is a valid value. - * - * @param length - * The max key length to be validated - * - * @throws IllegalArgumentException - */ - public static void verifyMaxKeyLength(String length) - throws IllegalArgumentException { - int maxKey = 0; - try { - maxKey = Integer.parseInt(length); - } catch (NumberFormatException nfe) { - throw new IllegalArgumentException( - "Invalid max key length, the vaule should be digital."); - } - - if (maxKey <= 0) { - throw new IllegalArgumentException( - "Invalid max key length, the vaule should be a positive number."); - } - } - - /** - * Returns a random Request ID. - * - * Request ID is returned to the client as well as flows through the system - * facilitating debugging on why a certain request failed. - * - * @return String random request ID - */ - public static String getRequestID() { - return UUID.randomUUID().toString(); - } - - /** - * Return host name if possible. - * - * @return Host Name or localhost - */ - public static String getHostName() { - String host = "localhost"; - try { - host = InetAddress.getLocalHost().getHostName(); - } catch (UnknownHostException e) { - // Ignore the error - } - return host; - } - - /** - * Convert time in millisecond to a human readable format required in ozone. - * @return a human readable string for the input time - */ - public static String formatTime(long millis) { - return DATE_FORMAT.get().format(millis); - } - - /** - * Convert time in ozone date format to millisecond. - * @return time in milliseconds - */ - public static long formatDate(String date) throws ParseException { - Preconditions.checkNotNull(date, "Date string should not be null."); - return DATE_FORMAT.get().parse(date).getTime(); - } - - public static boolean isOzoneEnabled(Configuration conf) { - return HddsUtils.isHddsEnabled(conf); - } - - - /** - * verifies that bucket name / volume name is a valid DNS name. - * - * @param resName Bucket or volume Name to be validated - * - * @throws IllegalArgumentException - */ - public static void verifyResourceName(String resName) - throws IllegalArgumentException { - HddsClientUtils.verifyResourceName(resName); - } - - /** - * Return the TimeDuration configured for the given key. If not configured, - * return the default value. - */ - public static TimeDuration getTimeDuration(Configuration conf, String key, - TimeDuration defaultValue) { - TimeUnit defaultTimeUnit = defaultValue.getUnit(); - long timeDurationInDefaultUnit = conf.getTimeDuration(key, - defaultValue.getDuration(), defaultTimeUnit); - return TimeDuration.valueOf(timeDurationInDefaultUnit, defaultTimeUnit); - } - - /** - * Return the time configured for the given key in milliseconds. - */ - public static long getTimeDurationInMS(Configuration conf, String key, - TimeDuration defaultValue) { - return getTimeDuration(conf, key, defaultValue) - .toLong(TimeUnit.MILLISECONDS); - } - -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java deleted file mode 100644 index 178157fcacc..00000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.utils; diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto deleted file mode 100644 index d82fdf2a8e2..00000000000 --- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto +++ /dev/null @@ -1,1107 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and unstable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *unstable* .proto interface. - */ - -syntax = "proto2"; -option java_package = "org.apache.hadoop.ozone.protocol.proto"; -option java_outer_classname = "OzoneManagerProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.ozone; - -/** -This file contains the protocol to communicate with -Ozone Manager. Ozone Manager manages the namespace for ozone. -This is similar to Namenode for Ozone. -*/ - -import "hdds.proto"; -import "Security.proto"; -import "FSProtos.proto"; - -enum Type { - CreateVolume = 11; - SetVolumeProperty = 12; - CheckVolumeAccess = 13; - InfoVolume = 14; - DeleteVolume = 15; - ListVolume = 16; - - CreateBucket = 21; - InfoBucket = 22; - SetBucketProperty = 23; - DeleteBucket = 24; - ListBuckets = 25; - - CreateKey = 31; - LookupKey = 32; - RenameKey = 33; - DeleteKey = 34; - ListKeys = 35; - CommitKey = 36; - AllocateBlock = 37; - - CreateS3Bucket = 41; - DeleteS3Bucket = 42; - InfoS3Bucket = 43; - ListS3Buckets = 44; - InitiateMultiPartUpload = 45; - CommitMultiPartUpload = 46; - CompleteMultiPartUpload = 47; - AbortMultiPartUpload = 48; - GetS3Secret = 49; - ListMultiPartUploadParts = 50; - - ServiceList = 51; - DBUpdates = 53; - - GetDelegationToken = 61; - RenewDelegationToken = 62; - CancelDelegationToken = 63; - - GetFileStatus = 70; - CreateDirectory = 71; - CreateFile = 72; - LookupFile = 73; - ListStatus = 74; - AddAcl = 75; - RemoveAcl = 76; - SetAcl = 77; - GetAcl = 78; - - PurgeKeys = 81; - - ListMultipartUploads = 82; -} - -message OMRequest { - required Type cmdType = 1; // Type of the command - - // A string that identifies this command, we generate Trace ID in Ozone - // frontend and this allows us to trace that command all over ozone. - optional string traceID = 2; - - required string clientId = 3; - - optional UserInfo userInfo = 4; - - - optional CreateVolumeRequest createVolumeRequest = 11; - optional SetVolumePropertyRequest setVolumePropertyRequest = 12; - optional CheckVolumeAccessRequest checkVolumeAccessRequest = 13; - optional InfoVolumeRequest infoVolumeRequest = 14; - optional DeleteVolumeRequest deleteVolumeRequest = 15; - optional ListVolumeRequest listVolumeRequest = 16; - - optional CreateBucketRequest createBucketRequest = 21; - optional InfoBucketRequest infoBucketRequest = 22; - optional SetBucketPropertyRequest setBucketPropertyRequest = 23; - optional DeleteBucketRequest deleteBucketRequest = 24; - optional ListBucketsRequest listBucketsRequest = 25; - - optional CreateKeyRequest createKeyRequest = 31; - optional LookupKeyRequest lookupKeyRequest = 32; - optional RenameKeyRequest renameKeyRequest = 33; - optional DeleteKeyRequest deleteKeyRequest = 34; - optional ListKeysRequest listKeysRequest = 35; - optional CommitKeyRequest commitKeyRequest = 36; - optional AllocateBlockRequest allocateBlockRequest = 37; - - optional S3CreateBucketRequest createS3BucketRequest = 41; - optional S3DeleteBucketRequest deleteS3BucketRequest = 42; - optional S3BucketInfoRequest infoS3BucketRequest = 43; - optional S3ListBucketsRequest listS3BucketsRequest = 44; - optional MultipartInfoInitiateRequest initiateMultiPartUploadRequest = 45; - optional MultipartCommitUploadPartRequest commitMultiPartUploadRequest = 46; - optional MultipartUploadCompleteRequest completeMultiPartUploadRequest = 47; - optional MultipartUploadAbortRequest abortMultiPartUploadRequest = 48; - optional GetS3SecretRequest getS3SecretRequest = 49; - optional MultipartUploadListPartsRequest listMultipartUploadPartsRequest = 50; - - optional ServiceListRequest serviceListRequest = 51; - optional DBUpdatesRequest dbUpdatesRequest = 53; - - optional hadoop.common.GetDelegationTokenRequestProto getDelegationTokenRequest = 61; - optional hadoop.common.RenewDelegationTokenRequestProto renewDelegationTokenRequest= 62; - optional hadoop.common.CancelDelegationTokenRequestProto cancelDelegationTokenRequest = 63; - optional UpdateGetDelegationTokenRequest updateGetDelegationTokenRequest = 64; - optional UpdateRenewDelegationTokenRequest updatedRenewDelegationTokenRequest = 65; - - optional GetFileStatusRequest getFileStatusRequest = 70; - optional CreateDirectoryRequest createDirectoryRequest = 71; - optional CreateFileRequest createFileRequest = 72; - optional LookupFileRequest lookupFileRequest = 73; - optional ListStatusRequest listStatusRequest = 74; - optional AddAclRequest addAclRequest = 75; - optional RemoveAclRequest removeAclRequest = 76; - optional SetAclRequest setAclRequest = 77; - optional GetAclRequest getAclRequest = 78; - - optional PurgeKeysRequest purgeKeysRequest = 81; - - optional UpdateGetS3SecretRequest updateGetS3SecretRequest = 82; - optional ListMultipartUploadsRequest listMultipartUploadsRequest = 83; -} - -message OMResponse { - required Type cmdType = 1; // Type of the command - - // A string that identifies this command, we generate Trace ID in Ozone - // frontend and this allows us to trace that command all over ozone. - optional string traceID = 2; - - optional bool success = 3 [default=true]; - - optional string message = 4; - - required Status status = 5; - - optional string leaderOMNodeId = 6; - - optional CreateVolumeResponse createVolumeResponse = 11; - optional SetVolumePropertyResponse setVolumePropertyResponse = 12; - optional CheckVolumeAccessResponse checkVolumeAccessResponse = 13; - optional InfoVolumeResponse infoVolumeResponse = 14; - optional DeleteVolumeResponse deleteVolumeResponse = 15; - optional ListVolumeResponse listVolumeResponse = 16; - - optional CreateBucketResponse createBucketResponse = 21; - optional InfoBucketResponse infoBucketResponse = 22; - optional SetBucketPropertyResponse setBucketPropertyResponse = 23; - optional DeleteBucketResponse deleteBucketResponse = 24; - optional ListBucketsResponse listBucketsResponse = 25; - - optional CreateKeyResponse createKeyResponse = 31; - optional LookupKeyResponse lookupKeyResponse = 32; - optional RenameKeyResponse renameKeyResponse = 33; - optional DeleteKeyResponse deleteKeyResponse = 34; - optional ListKeysResponse listKeysResponse = 35; - optional CommitKeyResponse commitKeyResponse = 36; - optional AllocateBlockResponse allocateBlockResponse = 37; - - optional S3CreateBucketResponse createS3BucketResponse = 41; - optional S3DeleteBucketResponse deleteS3BucketResponse = 42; - optional S3BucketInfoResponse infoS3BucketResponse = 43; - optional S3ListBucketsResponse listS3BucketsResponse = 44; - optional MultipartInfoInitiateResponse initiateMultiPartUploadResponse = 45; - optional MultipartCommitUploadPartResponse commitMultiPartUploadResponse = 46; - optional MultipartUploadCompleteResponse completeMultiPartUploadResponse = 47; - optional MultipartUploadAbortResponse abortMultiPartUploadResponse = 48; - optional GetS3SecretResponse getS3SecretResponse = 49; - optional MultipartUploadListPartsResponse listMultipartUploadPartsResponse = 50; - - optional ServiceListResponse ServiceListResponse = 51; - optional DBUpdatesResponse dbUpdatesResponse = 52; - - optional GetDelegationTokenResponseProto getDelegationTokenResponse = 61; - optional RenewDelegationTokenResponseProto renewDelegationTokenResponse = 62; - optional CancelDelegationTokenResponseProto cancelDelegationTokenResponse = 63; - - optional GetFileStatusResponse getFileStatusResponse = 70; - optional CreateDirectoryResponse createDirectoryResponse = 71; - optional CreateFileResponse createFileResponse = 72; - optional LookupFileResponse lookupFileResponse = 73; - optional ListStatusResponse listStatusResponse = 74; - optional AddAclResponse addAclResponse = 75; - optional RemoveAclResponse removeAclResponse = 76; - optional SetAclResponse setAclResponse = 77; - optional GetAclResponse getAclResponse = 78; - - optional PurgeKeysResponse purgeKeysResponse = 81; - - optional ListMultipartUploadsResponse listMultipartUploadsResponse = 82; -} - -enum Status { - OK = 1; - VOLUME_NOT_UNIQUE = 2; - VOLUME_NOT_FOUND = 3; - VOLUME_NOT_EMPTY = 4; - VOLUME_ALREADY_EXISTS = 5; - USER_NOT_FOUND = 6; - USER_TOO_MANY_VOLUMES = 7; - BUCKET_NOT_FOUND = 8; - BUCKET_NOT_EMPTY = 9; - BUCKET_ALREADY_EXISTS = 10; - KEY_ALREADY_EXISTS = 11; - KEY_NOT_FOUND = 12; - INVALID_KEY_NAME = 13; - ACCESS_DENIED = 14; - INTERNAL_ERROR = 15; - KEY_ALLOCATION_ERROR = 16; - KEY_DELETION_ERROR = 17; - KEY_RENAME_ERROR = 18; - METADATA_ERROR = 19; - OM_NOT_INITIALIZED = 20; - SCM_VERSION_MISMATCH_ERROR = 21; - S3_BUCKET_NOT_FOUND = 22; - S3_BUCKET_ALREADY_EXISTS = 23; - - INITIATE_MULTIPART_UPLOAD_ERROR = 24; - MULTIPART_UPLOAD_PARTFILE_ERROR = 25; - NO_SUCH_MULTIPART_UPLOAD_ERROR = 26; - MISMATCH_MULTIPART_LIST = 27; - MISSING_UPLOAD_PARTS = 28; - COMPLETE_MULTIPART_UPLOAD_ERROR = 29; - ENTITY_TOO_SMALL = 30; - ABORT_MULTIPART_UPLOAD_FAILED = 31; - - S3_SECRET_NOT_FOUND = 32; - - INVALID_AUTH_METHOD = 33; - INVALID_TOKEN = 34; - TOKEN_EXPIRED = 35; - TOKEN_ERROR_OTHER = 36; - LIST_MULTIPART_UPLOAD_PARTS_FAILED = 37; - SCM_IN_SAFE_MODE = 38; - INVALID_REQUEST = 39; - - BUCKET_ENCRYPTION_KEY_NOT_FOUND = 40; - UNKNOWN_CIPHER_SUITE = 41; - INVALID_KMS_PROVIDER = 42; - TOKEN_CREATION_ERROR = 43; - - FILE_NOT_FOUND = 44; - DIRECTORY_NOT_FOUND = 45; - FILE_ALREADY_EXISTS = 46; - NOT_A_FILE = 47; - PERMISSION_DENIED = 48; - TIMEOUT = 49; - PREFIX_NOT_FOUND=50; - - S3_BUCKET_INVALID_LENGTH = 51; // s3 bucket invalid length. - - RATIS_ERROR = 52; - - INVALID_PATH_IN_ACL_REQUEST = 53; // Invalid path name in acl request. - - USER_MISMATCH = 54; // Error code when requested user name passed is - // different from remote user. -} - - -message VolumeInfo { - required string adminName = 1; - required string ownerName = 2; - required string volume = 3; - optional uint64 quotaInBytes = 4; - repeated hadoop.hdds.KeyValue metadata = 5; - repeated OzoneAclInfo volumeAcls = 6; - optional uint64 creationTime = 7; - optional uint64 objectID = 8; - optional uint64 updateID = 9; -} - -/** - User information which will be extracted during RPC context and used - during validating Acl. -*/ -message UserInfo { - optional string userName = 1; - optional string remoteAddress = 3; -} - -/** - This will be used during OM HA, once leader generates token sends this - request via ratis to persist to OM DB. This request will be internally used - by OM for replicating token across a quorum of OMs. -*/ -message UpdateGetDelegationTokenRequest { - required GetDelegationTokenResponseProto getDelegationTokenResponse = 1; -} - -/** - This will be used during OM HA, once leader renews token, sends this - request via ratis to persist to OM DB. This request will be internally used - by OM for replicating renewed token information across a quorum of OMs. -*/ -message UpdateRenewDelegationTokenRequest { - required hadoop.common.RenewDelegationTokenRequestProto - renewDelegationTokenRequest = 1; - required RenewDelegationTokenResponseProto renewDelegationTokenResponse = 2; -} - -/** - Creates a volume -*/ -message CreateVolumeRequest { - required VolumeInfo volumeInfo = 1; -} - -message CreateVolumeResponse { - -} - -message UserVolumeInfo { - repeated string volumeNames = 1; - optional uint64 objectID = 2; - optional uint64 updateID = 3; -} - -/** - Changes the Volume Properties -- like ownership and quota for a volume. -*/ -message SetVolumePropertyRequest { - required string volumeName = 1; - optional string ownerName = 2; - optional uint64 quotaInBytes = 3; -} - -message SetVolumePropertyResponse { - -} - -/** - * Checks if the user has specified permissions for the volume - */ -message CheckVolumeAccessRequest { - required string volumeName = 1; - required OzoneAclInfo userAcl = 2; -} - -message CheckVolumeAccessResponse { - -} - - -/** - Returns information about a volume. -*/ - -message InfoVolumeRequest { - required string volumeName = 1; -} - -message InfoVolumeResponse { - optional VolumeInfo volumeInfo = 2; -} - -/** - Deletes an existing volume. -*/ -message DeleteVolumeRequest { - required string volumeName = 1; -} - -message DeleteVolumeResponse { - -} - - -/** - List Volumes -- List all volumes in the cluster or by user. -*/ - -message ListVolumeRequest { - enum Scope { - USER_VOLUMES = 1; // User volumes -- called by user - VOLUMES_BY_USER = 2; // User volumes - called by Admin - VOLUMES_BY_CLUSTER = 3; // All volumes in the cluster - } - required Scope scope = 1; - optional string userName = 2; - optional string prefix = 3; - optional string prevKey = 4; - optional uint32 maxKeys = 5; -} - -message ListVolumeResponse { - repeated VolumeInfo volumeInfo = 2; -} - -message BucketInfo { - required string volumeName = 1; - required string bucketName = 2; - repeated OzoneAclInfo acls = 3; - required bool isVersionEnabled = 4 [default = false]; - required StorageTypeProto storageType = 5 [default = DISK]; - optional uint64 creationTime = 6; - repeated hadoop.hdds.KeyValue metadata = 7; - optional BucketEncryptionInfoProto beinfo = 8; -} - -enum StorageTypeProto { - DISK = 1; - SSD = 2; - ARCHIVE = 3; - RAM_DISK = 4; -} - -/** - * Cipher suite. - */ -enum CipherSuiteProto { - UNKNOWN = 1; - AES_CTR_NOPADDING = 2; -} - -/** - * Crypto protocol version used to access encrypted files. - */ -enum CryptoProtocolVersionProto { - UNKNOWN_PROTOCOL_VERSION = 1; - ENCRYPTION_ZONES = 2; -} -/** - * Encryption information for bucket (bucket key) - */ -message BucketEncryptionInfoProto { - required string keyName = 1; - optional CipherSuiteProto suite = 2; - optional CryptoProtocolVersionProto cryptoProtocolVersion = 3; -} - -/** - * Encryption information for a file. - */ -message FileEncryptionInfoProto { - required CipherSuiteProto suite = 1; - required CryptoProtocolVersionProto cryptoProtocolVersion = 2; - required bytes key = 3; - required bytes iv = 4; - required string keyName = 5; - required string ezKeyVersionName = 6; -} - -/** - * Encryption information for an individual - * file within an encryption zone - */ -message PerFileEncryptionInfoProto { - required bytes key = 1; - required bytes iv = 2; - required string ezKeyVersionName = 3; -} - -message DataEncryptionKeyProto { - required uint32 keyId = 1; - required bytes nonce = 3; - required bytes encryptionKey = 4; - required uint64 expiryDate = 5; - optional string encryptionAlgorithm = 6; -} - -message BucketArgs { - required string volumeName = 1; - required string bucketName = 2; - optional bool isVersionEnabled = 5; - optional StorageTypeProto storageType = 6; - repeated hadoop.hdds.KeyValue metadata = 7; -} - -message PrefixInfo { - required string name = 1; - repeated OzoneAclInfo acls = 2; - repeated hadoop.hdds.KeyValue metadata = 3; -} - -message OzoneObj { - enum ObjectType { - VOLUME = 1; - BUCKET = 2; - KEY = 3; - PREFIX = 4; - } - - enum StoreType { - OZONE = 1; - S3 = 2; - } - required ObjectType resType = 1; - required StoreType storeType = 2 [default = S3]; - required string path = 3; -} - -message OzoneAclInfo { - enum OzoneAclType { - USER = 1; - GROUP = 2; - WORLD = 3; - ANONYMOUS = 4; - CLIENT_IP = 5; - } - - enum OzoneAclScope { - ACCESS = 0; - DEFAULT = 1; - } - - required OzoneAclType type = 1; - required string name = 2; - required bytes rights = 3; - required OzoneAclScope aclScope = 4 [default = ACCESS]; -} - -message GetAclRequest { - required OzoneObj obj = 1; -} - -message GetAclResponse { - repeated OzoneAclInfo acls = 1; -} - -message AddAclRequest { - required OzoneObj obj = 1; - required OzoneAclInfo acl = 2; -} - -message AddAclResponse { - required bool response = 1; -} - -message RemoveAclRequest { - required OzoneObj obj = 1; - required OzoneAclInfo acl = 2; -} - -message RemoveAclResponse { - required bool response = 1; -} - -message SetAclRequest { - required OzoneObj obj = 1; - repeated OzoneAclInfo acl = 2; -} - -message SetAclResponse { - required bool response = 1; -} - -message CreateBucketRequest { - required BucketInfo bucketInfo = 1; -} - -message CreateBucketResponse { -} - -message InfoBucketRequest { - required string volumeName = 1; - required string bucketName = 2; -} - -message InfoBucketResponse { - optional BucketInfo bucketInfo = 2; -} - -message SetBucketPropertyRequest { - optional BucketArgs bucketArgs = 1; -} - -message SetBucketPropertyResponse { - -} - -message DeleteBucketRequest { - required string volumeName = 1; - required string bucketName = 2; -} - -message DeleteBucketResponse { - -} - -message ListBucketsRequest { - required string volumeName = 1; - optional string startKey = 2; - optional string prefix = 3; - optional int32 count = 4; -} - -message ListBucketsResponse { - - repeated BucketInfo bucketInfo = 2; -} - -message KeyArgs { - required string volumeName = 1; - required string bucketName = 2; - required string keyName = 3; - optional uint64 dataSize = 4; - optional hadoop.hdds.ReplicationType type = 5; - optional hadoop.hdds.ReplicationFactor factor = 6; - repeated KeyLocation keyLocations = 7; - optional bool isMultipartKey = 8; - optional string multipartUploadID = 9; - optional uint32 multipartNumber = 10; - repeated hadoop.hdds.KeyValue metadata = 11; - repeated OzoneAclInfo acls = 12; - // This will be set when the request is received in pre-Execute. This - // value is used in setting creation/modification time depending on the - // request type. - optional uint64 modificationTime = 13; - optional bool sortDatanodes = 14; -} - -message KeyLocation { - required hadoop.hdds.BlockID blockID = 1; - required uint64 offset = 3; - required uint64 length = 4; - // indicated at which version this block gets created. - optional uint64 createVersion = 5; - optional hadoop.common.TokenProto token = 6; - // Walk around to include pipeline info for client read/write - // without talking to scm. - // NOTE: the pipeline info may change after pipeline close. - // So eventually, we will have to change back to call scm to - // get the up to date pipeline information. This will need o3fs - // provide not only a OM delegation token but also a SCM delegation token - optional hadoop.hdds.Pipeline pipeline = 7; -} - -message KeyLocationList { - optional uint64 version = 1; - repeated KeyLocation keyLocations = 2; - optional FileEncryptionInfoProto fileEncryptionInfo = 3; -} - -message KeyInfo { - required string volumeName = 1; - required string bucketName = 2; - required string keyName = 3; - required uint64 dataSize = 4; - required hadoop.hdds.ReplicationType type = 5; - required hadoop.hdds.ReplicationFactor factor = 6; - repeated KeyLocationList keyLocationList = 7; - required uint64 creationTime = 8; - required uint64 modificationTime = 9; - optional uint64 latestVersion = 10; - repeated hadoop.hdds.KeyValue metadata = 11; - optional FileEncryptionInfoProto fileEncryptionInfo = 12; - repeated OzoneAclInfo acls = 13; -} - -message RepeatedKeyInfo { - repeated KeyInfo keyInfo = 1; -} - -message OzoneFileStatusProto { - required hadoop.fs.FileStatusProto status = 1; -} - -message GetFileStatusRequest { - required KeyArgs keyArgs = 1; -} - -message GetFileStatusResponse { - required OzoneFileStatusProto status = 1; -} - -message CreateDirectoryRequest { - required KeyArgs keyArgs = 1; -} - -message CreateDirectoryResponse { -} - -message CreateFileRequest { - required KeyArgs keyArgs = 1; - required bool isRecursive = 2; - required bool isOverwrite = 3; - // Set in OM HA during preExecute step. This way all OM's use same ID in - // OM HA. - optional uint64 clientID = 4; -} - -message CreateFileResponse { - - optional KeyInfo keyInfo = 1; - // clients' followup request may carry this ID for stateful operations - // (similar to a cookie). - optional uint64 ID = 2; - optional uint64 openVersion = 3; -} - -message LookupFileRequest { - required KeyArgs keyArgs = 1; -} - -message LookupFileResponse { - optional KeyInfo keyInfo = 1; -} - -message ListStatusRequest { - required KeyArgs keyArgs = 1; - required bool recursive = 2; - required string startKey = 3; - required uint64 numEntries = 4; -} - -message ListStatusResponse { - repeated OzoneFileStatusProto statuses = 1; -} - -message CreateKeyRequest { - required KeyArgs keyArgs = 1; - // Set in OM HA during preExecute step. This way all OM's use same ID in - // OM HA. - optional uint64 clientID = 2; -} - -message CreateKeyResponse { - optional KeyInfo keyInfo = 2; - // clients' followup request may carry this ID for stateful operations - // (similar to a cookie). - optional uint64 ID = 3; - optional uint64 openVersion = 4; -} - -message LookupKeyRequest { - required KeyArgs keyArgs = 1; -} - -message LookupKeyResponse { - optional KeyInfo keyInfo = 2; - // clients' followup request may carry this ID for stateful operations (similar - // to a cookie). - optional uint64 ID = 3; - // TODO : allow specifiying a particular version to read. - optional uint64 openVersion = 4; -} - -message RenameKeyRequest{ - required KeyArgs keyArgs = 1; - required string toKeyName = 2; -} - -message RenameKeyResponse{ - -} - -message DeleteKeyRequest { - required KeyArgs keyArgs = 1; -} - -message DeleteKeyResponse { - - optional KeyInfo keyInfo = 2; - // clients' followup request may carry this ID for stateful operations - // (similar to a cookie). - optional uint64 ID = 3; - optional uint64 openVersion = 4; -} - -message PurgeKeysRequest { - repeated string keys = 1; -} - -message PurgeKeysResponse { - -} - -message OMTokenProto { - enum Type { - DELEGATION_TOKEN = 1; - S3TOKEN = 2; - }; - required Type type = 1; - optional uint32 version = 2; - optional string owner = 3; - optional string renewer = 4; - optional string realUser = 5; - optional uint64 issueDate = 6; - optional uint64 maxDate = 7; - optional uint32 sequenceNumber = 8; - optional uint32 masterKeyId = 9; - optional uint64 expiryDate = 10; - optional string omCertSerialId = 11; - optional string accessKeyId = 12; - optional string signature = 13; - optional string strToSign = 14; -} - -message SecretKeyProto { - required uint32 keyId = 1; - required uint64 expiryDate = 2; - required bytes privateKeyBytes = 3; - required bytes publicKeyBytes = 4; -} - -message ListKeysRequest { - required string volumeName = 1; - required string bucketName = 2; - optional string startKey = 3; - optional string prefix = 4; - optional int32 count = 5; -} - -message ListKeysResponse { - repeated KeyInfo keyInfo = 2; -} - -message CommitKeyRequest { - required KeyArgs keyArgs = 1; - required uint64 clientID = 2; -} - -message CommitKeyResponse { - -} - -message AllocateBlockRequest { - required KeyArgs keyArgs = 1; - required uint64 clientID = 2; - optional hadoop.hdds.ExcludeListProto excludeList = 3; - // During HA on one of the OM nodes, we allocate block and send the - // AllocateBlockRequest with keyLocation set. If this is set, no need to - // call scm again in OM Ratis applyTransaction just append it to DB. - optional KeyLocation keyLocation = 4; -} - -message AllocateBlockResponse { - - optional KeyLocation keyLocation = 2; -} - -message ServiceListRequest { -} - -message DBUpdatesRequest { - required uint64 sequenceNumber = 1; -} - -message ServiceListResponse { - - repeated ServiceInfo serviceInfo = 2; - // When security is enabled, return SCM CA certificate to Ozone client - // to set up gRPC TLS for client to authenticate server(DN). - optional string caCertificate = 3; -} - -message DBUpdatesResponse { - required uint64 sequenceNumber = 1; - repeated bytes data = 2; -} - -message ServicePort { - enum Type { - RPC = 1; - HTTP = 2; - HTTPS = 3; - RATIS = 4; - }; - required Type type = 1; - required uint32 value = 2; -} - -message ServiceInfo { - required hadoop.hdds.NodeType nodeType = 1; - required string hostname = 2; - repeated ServicePort servicePorts = 3; -} - -message S3CreateBucketRequest { - required string userName = 1; - required string s3bucketname = 2; - // This will be set during OM HA by one of the OM node. In future if more - // data fields are required to create volume/bucket we can add them to - // this. This is the reason for creating a new message type for this. - // S3CreateBucket means create volume from userName and create bucket - // with s3BucketName. - optional S3CreateVolumeInfo s3CreateVolumeInfo = 3; -} - -message S3CreateVolumeInfo { - // Creation time set in preExecute on one of the OM node. - required uint64 creationTime = 1; -} - -message S3CreateBucketResponse { - -} - -message S3DeleteBucketRequest { - required string s3bucketName = 1; -} - -message S3DeleteBucketResponse { - -} - -message S3BucketInfoRequest { - required string s3bucketName = 1; -} -message S3BucketInfoResponse { - - optional string ozoneMapping = 2; -} - -message S3ListBucketsRequest { - required string userName = 1; - optional string startKey = 2; - optional string prefix = 3; - optional int32 count = 4; -} - -message S3ListBucketsResponse { - repeated BucketInfo bucketInfo = 2; -} - -message MultipartInfoInitiateRequest { - required KeyArgs keyArgs = 1; - -} - -message MultipartInfoInitiateResponse { - required string volumeName = 1; - required string bucketName = 2; - required string keyName = 3; - required string multipartUploadID = 4; -} - -message MultipartKeyInfo { - required string uploadID = 4; - repeated PartKeyInfo partKeyInfoList = 5; -} - -message PartKeyInfo { - required string partName = 1; - required uint32 partNumber = 2; - required KeyInfo partKeyInfo = 3; -} - -message MultipartCommitUploadPartRequest { - required KeyArgs keyArgs = 1; - required uint64 clientID = 2; -} - -message MultipartCommitUploadPartResponse { - // This one is returned as Etag for S3. - optional string partName = 1; -} - -message MultipartUploadCompleteRequest { - required KeyArgs keyArgs = 1; - repeated Part partsList = 2; -} - -message MultipartUploadCompleteResponse { - optional string volume = 1; - optional string bucket = 2; - optional string key = 3; - optional string hash = 4; // This will be used as etag for s3 -} - -message Part { - required uint32 partNumber = 1; - required string partName = 2; -} - -message MultipartUploadAbortRequest { - required KeyArgs keyArgs = 1; -} - -message MultipartUploadAbortResponse { - -} -message MultipartUploadListPartsRequest { - required string volume = 1; - required string bucket = 2; - required string key = 3; - required string uploadID = 4; - optional uint32 partNumbermarker = 5; - optional uint32 maxParts = 6; -} - -message MultipartUploadListPartsResponse { - optional hadoop.hdds.ReplicationType type = 2; - optional hadoop.hdds.ReplicationFactor factor = 3; - optional uint32 nextPartNumberMarker = 4; - optional bool isTruncated = 5; - repeated PartInfo partsList = 6; - -} - -message ListMultipartUploadsRequest { - required string volume = 1; - required string bucket = 2; - required string prefix = 3; -} - -message ListMultipartUploadsResponse { - optional bool isTruncated = 1; - repeated MultipartUploadInfo uploadsList = 2; -} - -message MultipartUploadInfo { - required string volumeName = 1; - required string bucketName = 2; - required string keyName = 3; - required string uploadId = 4; - required uint64 creationTime = 5; - required hadoop.hdds.ReplicationType type = 6; - required hadoop.hdds.ReplicationFactor factor = 7; - -} - -message PartInfo { - required uint32 partNumber = 1; - required string partName = 2; - required uint64 modificationTime = 3; - required uint64 size = 4; -} - -message GetDelegationTokenResponseProto { - - optional hadoop.common.GetDelegationTokenResponseProto response = 2; -} - -message RenewDelegationTokenResponseProto { - - optional hadoop.common.RenewDelegationTokenResponseProto response = 2; -} - -message CancelDelegationTokenResponseProto { - - optional hadoop.common.CancelDelegationTokenResponseProto response = 2; -} - -message S3Secret { - required string kerberosID = 1; - required string awsSecret = 2; -} - -message GetS3SecretRequest { - required string kerberosID = 1; -} - -message GetS3SecretResponse { - required S3Secret s3Secret = 2; -} - -/** - This will be used internally by OM to replicate S3 Secret across quorum of - OM's. -*/ -message UpdateGetS3SecretRequest { - required string kerberosID = 1; - required string awsSecret = 2; -} - -/** - The OM service that takes care of Ozone namespace. -*/ -service OzoneManagerService { - // A client-to-OM RPC to send client requests to OM Ratis server - rpc submitRequest(OMRequest) - returns(OMResponse); -} diff --git a/hadoop-ozone/common/src/main/resources/ozone-version-info.properties b/hadoop-ozone/common/src/main/resources/ozone-version-info.properties deleted file mode 100644 index 599f14d5eca..00000000000 --- a/hadoop-ozone/common/src/main/resources/ozone-version-info.properties +++ /dev/null @@ -1,27 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -version=${declared.ozone.version} -release=${ozone.release} -revision=${version-info.scm.commit} -branch=${version-info.scm.branch} -user=${user.name} -date=${version-info.build.time} -url=${version-info.scm.uri} -srcChecksum=${version-info.source.md5} -protocVersion=${protobuf.version} diff --git a/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh b/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh deleted file mode 100644 index 3fff7f5f354..00000000000 --- a/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -if [[ "${HADOOP_SHELL_EXECNAME}" = ozone ]]; then - hadoop_add_profile ozone -fi - - diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java deleted file mode 100644 index ce743fead31..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOmUtils.java +++ /dev/null @@ -1,180 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.utils.db.DBCheckpoint; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.test.PathUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.Timeout; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.FileWriter; -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -/** - * Unit tests for {@link OmUtils}. - */ -public class TestOmUtils { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Rule - public Timeout timeout = new Timeout(60_000); - - @Rule - public ExpectedException thrown= ExpectedException.none(); - - /** - * Test {@link OmUtils#getOmDbDir}. - */ - @Test - public void testGetOmDbDir() { - final File testDir = PathUtils.getTestDir(TestOmUtils.class); - final File dbDir = new File(testDir, "omDbDir"); - final File metaDir = new File(testDir, "metaDir"); // should be ignored. - final Configuration conf = new OzoneConfiguration(); - conf.set(OMConfigKeys.OZONE_OM_DB_DIRS, dbDir.getPath()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); - - try { - assertEquals(dbDir, OmUtils.getOmDbDir(conf)); - assertTrue(dbDir.exists()); // should have been created. - } finally { - FileUtils.deleteQuietly(dbDir); - } - } - - /** - * Test {@link OmUtils#getOmDbDir} with fallback to OZONE_METADATA_DIRS - * when OZONE_OM_DB_DIRS is undefined. - */ - @Test - public void testGetOmDbDirWithFallback() { - final File testDir = PathUtils.getTestDir(TestOmUtils.class); - final File metaDir = new File(testDir, "metaDir"); - final Configuration conf = new OzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); - - try { - assertEquals(metaDir, OmUtils.getOmDbDir(conf)); - assertTrue(metaDir.exists()); // should have been created. - } finally { - FileUtils.deleteQuietly(metaDir); - } - } - - @Test - public void testNoOmDbDirConfigured() { - thrown.expect(IllegalArgumentException.class); - OmUtils.getOmDbDir(new OzoneConfiguration()); - } - - @Test - public void testWriteCheckpointToOutputStream() throws Exception { - - FileInputStream fis = null; - FileOutputStream fos = null; - - try { - String testDirName = folder.newFolder().getAbsolutePath(); - File file = new File(testDirName + "/temp1.txt"); - FileWriter writer = new FileWriter(file); - writer.write("Test data 1"); - writer.close(); - - file = new File(testDirName + "/temp2.txt"); - writer = new FileWriter(file); - writer.write("Test data 2"); - writer.close(); - - File outputFile = - new File(Paths.get(testDirName, "output_file.tgz").toString()); - TestDBCheckpoint dbCheckpoint = new TestDBCheckpoint( - Paths.get(testDirName)); - OmUtils.writeOmDBCheckpointToStream(dbCheckpoint, - new FileOutputStream(outputFile)); - assertNotNull(outputFile); - } finally { - IOUtils.closeStream(fis); - IOUtils.closeStream(fos); - } - } - -} - -class TestDBCheckpoint implements DBCheckpoint { - - private Path checkpointFile; - - TestDBCheckpoint(Path checkpointFile) { - this.checkpointFile = checkpointFile; - } - - @Override - public Path getCheckpointLocation() { - return checkpointFile; - } - - @Override - public long getCheckpointTimestamp() { - return 0; - } - - @Override - public long getLatestSequenceNumber() { - return 0; - } - - @Override - public long checkpointCreationTimeTaken() { - return 0; - } - - @Override - public void cleanupCheckpoint() throws IOException { - FileUtils.deleteDirectory(checkpointFile.toFile()); - } - - @Override - public void setRatisSnapshotIndex(long omRatisSnapshotIndex) { - } - - @Override - public long getRatisSnapshotIndex() { - return 0; - } -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java deleted file mode 100644 index 17fc9b58996..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java +++ /dev/null @@ -1,329 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; - -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.Test; - -import java.util.HashMap; -import java.util.List; -import java.util.Set; - -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.*; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * This class is to test acl storage and retrieval in ozone store. - */ -public class TestOzoneAcls { - - @Test - public void testAclParse() { - HashMap testMatrix; - testMatrix = new HashMap<>(); - - testMatrix.put("user:bilbo:r", Boolean.TRUE); - testMatrix.put("user:bilbo:w", Boolean.TRUE); - testMatrix.put("user:bilbo:rw", Boolean.TRUE); - testMatrix.put("user:bilbo:a", Boolean.TRUE); - testMatrix.put(" user:bilbo:a ", Boolean.TRUE); - - - // ACLs makes no judgement on the quality of - // user names. it is for the userAuth interface - // to determine if a user name is really a name - testMatrix.put(" user:*:rw", Boolean.TRUE); - testMatrix.put(" user:~!:rw", Boolean.TRUE); - - - testMatrix.put("", Boolean.FALSE); - testMatrix.put(null, Boolean.FALSE); - testMatrix.put(" user:bilbo:", Boolean.FALSE); - testMatrix.put(" user:bilbo:rx", Boolean.TRUE); - testMatrix.put(" user:bilbo:rwdlncxy", Boolean.TRUE); - testMatrix.put(" group:bilbo:rwdlncxy", Boolean.TRUE); - testMatrix.put(" world::rwdlncxy", Boolean.TRUE); - testMatrix.put(" user:bilbo:rncxy", Boolean.TRUE); - testMatrix.put(" group:bilbo:ncxy", Boolean.TRUE); - testMatrix.put(" world::ncxy", Boolean.TRUE); - testMatrix.put(" user:bilbo:rwcxy", Boolean.TRUE); - testMatrix.put(" group:bilbo:rwcxy", Boolean.TRUE); - testMatrix.put(" world::rwcxy", Boolean.TRUE); - testMatrix.put(" user:bilbo:mk", Boolean.FALSE); - testMatrix.put(" user::rw", Boolean.FALSE); - testMatrix.put("user11:bilbo:rw", Boolean.FALSE); - testMatrix.put(" user:::rw", Boolean.FALSE); - - testMatrix.put(" group:hobbit:r", Boolean.TRUE); - testMatrix.put(" group:hobbit:w", Boolean.TRUE); - testMatrix.put(" group:hobbit:rw", Boolean.TRUE); - testMatrix.put(" group:hobbit:a", Boolean.TRUE); - testMatrix.put(" group:*:rw", Boolean.TRUE); - testMatrix.put(" group:~!:rw", Boolean.TRUE); - - testMatrix.put(" group:hobbit:", Boolean.FALSE); - testMatrix.put(" group:hobbit:rx", Boolean.TRUE); - testMatrix.put(" group:hobbit:mk", Boolean.FALSE); - testMatrix.put(" group::", Boolean.FALSE); - testMatrix.put(" group::rw", Boolean.FALSE); - testMatrix.put(" group22:hobbit:", Boolean.FALSE); - testMatrix.put(" group:::rw", Boolean.FALSE); - - testMatrix.put("JUNK group:hobbit:r", Boolean.FALSE); - testMatrix.put("JUNK group:hobbit:w", Boolean.FALSE); - testMatrix.put("JUNK group:hobbit:rw", Boolean.FALSE); - testMatrix.put("JUNK group:hobbit:a", Boolean.FALSE); - testMatrix.put("JUNK group:*:rw", Boolean.FALSE); - testMatrix.put("JUNK group:~!:rw", Boolean.FALSE); - - testMatrix.put(" world::r", Boolean.TRUE); - testMatrix.put(" world::w", Boolean.TRUE); - testMatrix.put(" world::rw", Boolean.TRUE); - testMatrix.put(" world::a", Boolean.TRUE); - - testMatrix.put(" world:bilbo:w", Boolean.FALSE); - testMatrix.put(" world:bilbo:rw", Boolean.FALSE); - testMatrix.put(" anonymous:bilbo:w", Boolean.FALSE); - testMatrix.put(" anonymous:ANONYMOUS:w", Boolean.TRUE); - testMatrix.put(" anonymous::rw", Boolean.TRUE); - testMatrix.put(" world:WORLD:rw", Boolean.TRUE); - - Set keys = testMatrix.keySet(); - for (String key : keys) { - if (testMatrix.get(key)) { - OzoneAcl.parseAcl(key); - } else { - try { - OzoneAcl.parseAcl(key); - // should never get here since parseAcl will throw - fail("An exception was expected but did not happen. Key: " + key); - } catch (IllegalArgumentException e) { - // nothing to do - } - } - } - } - - @Test - public void testAclValues() throws Exception { - OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); - assertEquals(acl.getName(), "bilbo"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); - assertFalse(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertEquals(ACLIdentityType.USER, acl.getType()); - - acl = OzoneAcl.parseAcl("user:bilbo:a"); - assertEquals("bilbo", acl.getName()); - assertTrue(acl.getAclBitSet().get(ALL.ordinal())); - assertFalse(acl.getAclBitSet().get(WRITE.ordinal())); - assertEquals(ACLIdentityType.USER, acl.getType()); - - acl = OzoneAcl.parseAcl("user:bilbo:r"); - assertEquals("bilbo", acl.getName()); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); - assertEquals(ACLIdentityType.USER, acl.getType()); - - acl = OzoneAcl.parseAcl("user:bilbo:w"); - assertEquals("bilbo", acl.getName()); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); - assertEquals(ACLIdentityType.USER, acl.getType()); - - acl = OzoneAcl.parseAcl("group:hobbit:a"); - assertEquals(acl.getName(), "hobbit"); - assertTrue(acl.getAclBitSet().get(ALL.ordinal())); - assertFalse(acl.getAclBitSet().get(READ.ordinal())); - assertEquals(ACLIdentityType.GROUP, acl.getType()); - - acl = OzoneAcl.parseAcl("world::a"); - assertEquals(acl.getName(), "WORLD"); - assertTrue(acl.getAclBitSet().get(ALL.ordinal())); - assertFalse(acl.getAclBitSet().get(WRITE.ordinal())); - assertEquals(ACLIdentityType.WORLD, acl.getType()); - - acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy"); - assertEquals(acl.getName(), "bilbo"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); - - acl = OzoneAcl.parseAcl("group:hadoop:rwdlncxy"); - assertEquals(acl.getName(), "hadoop"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); - assertEquals(ACLIdentityType.GROUP, acl.getType()); - - acl = OzoneAcl.parseAcl("world::rwdlncxy"); - assertEquals(acl.getName(), "WORLD"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); - assertEquals(ACLIdentityType.WORLD, acl.getType()); - - // Acls with scope info. - acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[DEFAULT]"); - assertEquals(acl.getName(), "bilbo"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); - assertTrue(acl.getAclScope().equals(OzoneAcl.AclScope.DEFAULT)); - - acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); - assertEquals(acl.getName(), "bilbo"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); - assertTrue(acl.getAclScope().equals(OzoneAcl.AclScope.ACCESS)); - - acl = OzoneAcl.parseAcl("group:hadoop:rwdlncxy[ACCESS]"); - assertEquals(acl.getName(), "hadoop"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); - assertEquals(ACLIdentityType.GROUP, acl.getType()); - assertTrue(acl.getAclScope().equals(OzoneAcl.AclScope.ACCESS)); - - acl = OzoneAcl.parseAcl("world::rwdlncxy[DEFAULT]"); - assertEquals(acl.getName(), "WORLD"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); - assertEquals(ACLIdentityType.WORLD, acl.getType()); - assertTrue(acl.getAclScope().equals(OzoneAcl.AclScope.DEFAULT)); - - - - LambdaTestUtils.intercept(IllegalArgumentException.class, "ACL right" + - " is not", () -> OzoneAcl.parseAcl("world::rwdlncxncxdfsfgbny" - )); - } - - @Test - public void testBitSetToListConversion() throws Exception { - OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); - - List rights = acl.getAclList(); - assertTrue(rights.size() == 2); - assertTrue(rights.contains(READ)); - assertTrue(rights.contains(WRITE)); - assertFalse(rights.contains(CREATE)); - - acl = OzoneAcl.parseAcl("user:bilbo:a"); - - rights = acl.getAclList(); - assertTrue(rights.size() == 1); - assertTrue(rights.contains(ALL)); - assertFalse(rights.contains(WRITE)); - assertFalse(rights.contains(CREATE)); - - acl = OzoneAcl.parseAcl("user:bilbo:cxy"); - rights = acl.getAclList(); - assertTrue(rights.size() == 3); - assertTrue(rights.contains(CREATE)); - assertTrue(rights.contains(READ_ACL)); - assertTrue(rights.contains(WRITE_ACL)); - assertFalse(rights.contains(WRITE)); - assertFalse(rights.contains(READ)); - - List acls = OzoneAcl.parseAcls("user:bilbo:cxy,group:hadoop:a"); - assertTrue(acls.size() == 2); - rights = acls.get(0).getAclList(); - assertTrue(rights.size() == 3); - assertTrue(rights.contains(CREATE)); - assertTrue(rights.contains(READ_ACL)); - assertTrue(rights.contains(WRITE_ACL)); - assertFalse(rights.contains(WRITE)); - assertFalse(rights.contains(READ)); - rights = acls.get(1).getAclList(); - assertTrue(rights.contains(ALL)); - - acls = OzoneAcl.parseAcls("user:bilbo:cxy[ACCESS]," + - "group:hadoop:a[DEFAULT],world::r[DEFAULT]"); - assertTrue(acls.size() == 3); - rights = acls.get(0).getAclList(); - assertTrue(rights.size() == 3); - assertTrue(rights.contains(CREATE)); - assertTrue(rights.contains(READ_ACL)); - assertTrue(rights.contains(WRITE_ACL)); - assertFalse(rights.contains(WRITE)); - assertFalse(rights.contains(READ)); - rights = acls.get(1).getAclList(); - assertTrue(rights.contains(ALL)); - - assertTrue(acls.get(0).getName().equals("bilbo")); - assertTrue(acls.get(1).getName().equals("hadoop")); - assertTrue(acls.get(2).getName().equals("WORLD")); - assertTrue(acls.get(0).getAclScope().equals(OzoneAcl.AclScope.ACCESS)); - assertTrue(acls.get(1).getAclScope().equals(OzoneAcl.AclScope.DEFAULT)); - assertTrue(acls.get(2).getAclScope().equals(OzoneAcl.AclScope.DEFAULT)); - } - -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmMultipartKeyInfoCodec.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmMultipartKeyInfoCodec.java deleted file mode 100644 index 7a537c0d502..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmMultipartKeyInfoCodec.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.codec; - -import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Test; - -import java.util.HashMap; -import java.util.UUID; - -/** - * This class tests OmMultipartKeyInfoCodec. - */ -public class TestOmMultipartKeyInfoCodec { - - @Test - public void testOmMultipartKeyInfoCodec() { - OmMultipartKeyInfoCodec codec = new OmMultipartKeyInfoCodec(); - OmMultipartKeyInfo omMultipartKeyInfo = new OmMultipartKeyInfo(UUID - .randomUUID().toString(), new HashMap<>()); - byte[] data = new byte[0]; - try { - data = codec.toPersistedFormat(omMultipartKeyInfo); - } catch (java.io.IOException e) { - e.printStackTrace(); - } - Assert.assertNotNull(data); - - OmMultipartKeyInfo multipartKeyInfo = null; - try { - multipartKeyInfo = codec.fromPersistedFormat(data); - } catch (java.io.IOException e) { - e.printStackTrace(); - } - Assert.assertEquals(omMultipartKeyInfo, multipartKeyInfo); - - // When random byte data passed returns null. - try { - codec.fromPersistedFormat("random".getBytes()); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("Can't encode the the raw " + - "data from the byte array", ex); - } catch (java.io.IOException e) { - e.printStackTrace(); - } - - } -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java deleted file mode 100644 index f06bf383512..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.codec; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; - -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.LinkedList; -import java.util.List; - -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * This class test OmPrefixInfoCodec. - */ -public class TestOmPrefixInfoCodec { - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - - private OmPrefixInfoCodec codec; - - @Before - public void setUp() { - codec = new OmPrefixInfoCodec(); - } - - @Test - public void testCodecWithIncorrectValues() throws Exception { - try { - codec.fromPersistedFormat("random".getBytes(StandardCharsets.UTF_8)); - fail("testCodecWithIncorrectValues failed"); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("Can't encode the the raw " + - "data from the byte array", ex); - } - } - - @Test - public void testCodecWithNullDataFromTable() throws Exception { - thrown.expect(NullPointerException.class); - codec.fromPersistedFormat(null); - } - - - @Test - public void testCodecWithNullDataFromUser() throws Exception { - thrown.expect(NullPointerException.class); - codec.toPersistedFormat(null); - } - - @Test - public void testToAndFromPersistedFormat() throws IOException { - - List acls = new LinkedList<>(); - OzoneAcl ozoneAcl = new OzoneAcl(ACLIdentityType.USER, - "hive", ACLType.ALL, ACCESS); - acls.add(ozoneAcl); - OmPrefixInfo opiSave = OmPrefixInfo.newBuilder() - .setName("/user/hive/warehouse") - .setAcls(acls) - .addMetadata("id", "100") - .build(); - - OmPrefixInfo opiLoad = codec.fromPersistedFormat( - codec.toPersistedFormat(opiSave)); - - assertTrue("Load saved prefix info should match", - opiLoad.equals(opiSave)); - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestS3SecretValueCodec.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestS3SecretValueCodec.java deleted file mode 100644 index 549c374a91d..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/TestS3SecretValueCodec.java +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.codec; - -import java.nio.charset.StandardCharsets; -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.test.GenericTestUtils; - -import static org.junit.Assert.fail; - -/** - * This class test S3SecretValueCodec. - */ -public class TestS3SecretValueCodec { - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private S3SecretValueCodec codec; - - @Before - public void initialize() { - codec = new S3SecretValueCodec(); - } - @Test - public void testCodecWithCorrectData() throws Exception { - - S3SecretValue s3SecretValue = - new S3SecretValue(UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - - byte[] data = codec.toPersistedFormat(s3SecretValue); - Assert.assertNotNull(data); - - S3SecretValue docdedS3Secret = codec.fromPersistedFormat(data); - - Assert.assertEquals(s3SecretValue, docdedS3Secret); - - } - - @Test - public void testCodecWithIncorrectValues() throws Exception { - try { - codec.fromPersistedFormat("random".getBytes(StandardCharsets.UTF_8)); - fail("testCodecWithIncorrectValues failed"); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("Can't encode the the raw " + - "data from the byte array", ex); - } - } - - @Test - public void testCodecWithNullDataFromTable() throws Exception { - thrown.expect(NullPointerException.class); - codec.fromPersistedFormat(null); - } - - - @Test - public void testCodecWithNullDataFromUser() throws Exception { - thrown.expect(NullPointerException.class); - codec.toPersistedFormat(null); - } -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/package-info.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/package-info.java deleted file mode 100644 index 8b5690a148b..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/codec/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *

- * Utility classes to encode/decode DTO objects to/from byte array. - */ - -/** - * Unit tests for codec's in OM. - */ -package org.apache.hadoop.ozone.om.codec; \ No newline at end of file diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/exceptions/TestResultCodes.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/exceptions/TestResultCodes.java deleted file mode 100644 index 24b5307138b..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/exceptions/TestResultCodes.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.exceptions; - -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test code mappping. - */ -public class TestResultCodes { - - @Test - public void codeMapping() { - Assert.assertEquals(ResultCodes.values().length, Status.values().length); - for (int i = 0; i < ResultCodes.values().length; i++) { - ResultCodes codeValue = ResultCodes.values()[i]; - Status protoBufValue = Status.values()[i]; - Assert.assertTrue(String - .format("Protobuf/Enum constant name mismatch %s %s", codeValue, - protoBufValue), sameName(codeValue.name(), protoBufValue.name())); - ResultCodes converted = ResultCodes.values()[protoBufValue.ordinal()]; - Assert.assertEquals(codeValue, converted); - - } - } - - private boolean sameName(String codeValue, String protoBufValue) { - return codeValue.equals(protoBufValue); - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java deleted file mode 100644 index a3bc8ada29f..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.hdds.protocol.StorageType; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test BucketInfo. - */ -public class TestOmBucketInfo { - - @Test - public void protobufConversion() { - OmBucketInfo bucket = OmBucketInfo.newBuilder() - .setBucketName("bucket") - .setVolumeName("vol1") - .setCreationTime(1L) - .setIsVersionEnabled(false) - .setStorageType(StorageType.ARCHIVE) - .build(); - - OmBucketInfo afterSerialization = - OmBucketInfo.getFromProtobuf(bucket.getProtobuf()); - - Assert.assertEquals(bucket, afterSerialization); - - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java deleted file mode 100644 index a1fa3241242..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo.Builder; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test OmKeyInfo. - */ -public class TestOmKeyInfo { - - @Test - public void protobufConversion() { - OmKeyInfo key = new Builder() - .setKeyName("key1") - .setBucketName("bucket") - .setVolumeName("vol1") - .setCreationTime(123L) - .setModificationTime(123L) - .setDataSize(123L) - .setReplicationFactor(ReplicationFactor.THREE) - .setReplicationType(ReplicationType.RATIS) - .addMetadata("key1", "value1") - .addMetadata("key2", "value2") - .build(); - - OmKeyInfo keyAfterSerialization = - OmKeyInfo.getFromProtobuf(key.getProtobuf()); - - Assert.assertEquals(key, keyAfterSerialization); - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartUpload.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartUpload.java deleted file mode 100644 index f321da2bccf..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmMultipartUpload.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test utilities inside OmMutipartUpload. - */ -public class TestOmMultipartUpload { - - @Test - public void from() { - String key1 = - OmMultipartUpload.getDbKey("vol1", "bucket1", "dir1/key1", "uploadId"); - OmMultipartUpload info = OmMultipartUpload.from(key1); - - Assert.assertEquals("vol1", info.getVolumeName()); - Assert.assertEquals("bucket1", info.getBucketName()); - Assert.assertEquals("dir1/key1", info.getKeyName()); - Assert.assertEquals("uploadId", info.getUploadId()); - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java deleted file mode 100644 index b1a4e4550c4..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java +++ /dev/null @@ -1,191 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; -import org.apache.hadoop.security.UserGroupInformation; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.BitSet; -import java.util.List; - -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -/** - * Test for OzoneAcls utility class. - */ -public class TestOzoneAclUtil { - - private static final List DEFAULT_ACLS = - getDefaultAcls(new OzoneConfiguration()); - - private static final OzoneAcl USER1 = new OzoneAcl(USER, "user1", - ACLType.READ_ACL, ACCESS); - - private static final OzoneAcl USER2 = new OzoneAcl(USER, "user2", - ACLType.WRITE, ACCESS); - - private static final OzoneAcl GROUP1 = new OzoneAcl(GROUP, "group1", - ACLType.ALL, ACCESS); - - @Test - public void testAddAcl() throws IOException { - List currentAcls = getDefaultAcls(new OzoneConfiguration()); - assertTrue(currentAcls.size() > 0); - - // Add new permission to existing acl entry. - OzoneAcl oldAcl = currentAcls.get(0); - OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), - ACLType.READ_ACL, ACCESS); - - addAndVerifyAcl(currentAcls, newAcl, true, DEFAULT_ACLS.size()); - // Add same permission again and verify result - addAndVerifyAcl(currentAcls, newAcl, false, DEFAULT_ACLS.size()); - - // Add a new user acl entry. - addAndVerifyAcl(currentAcls, USER1, true, DEFAULT_ACLS.size() + 1); - // Add same acl entry again and verify result - addAndVerifyAcl(currentAcls, USER1, false, DEFAULT_ACLS.size() + 1); - - // Add a new group acl entry. - addAndVerifyAcl(currentAcls, GROUP1, true, DEFAULT_ACLS.size() + 2); - // Add same acl entry again and verify result - addAndVerifyAcl(currentAcls, GROUP1, false, DEFAULT_ACLS.size() + 2); - } - - @Test - public void testRemoveAcl() { - List currentAcls = null; - - // add/remove to/from null OzoneAcls - removeAndVerifyAcl(currentAcls, USER1, false, 0); - addAndVerifyAcl(currentAcls, USER1, false, 0); - removeAndVerifyAcl(currentAcls, USER1, false, 0); - - currentAcls = getDefaultAcls(new OzoneConfiguration()); - assertTrue(currentAcls.size() > 0); - - // Add new permission to existing acl entru. - OzoneAcl oldAcl = currentAcls.get(0); - OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), - ACLType.READ_ACL, ACCESS); - - // Remove non existing acl entry - removeAndVerifyAcl(currentAcls, USER1, false, DEFAULT_ACLS.size()); - - // Remove non existing acl permission - removeAndVerifyAcl(currentAcls, newAcl, false, DEFAULT_ACLS.size()); - - // Add new permission to existing acl entry. - addAndVerifyAcl(currentAcls, newAcl, true, DEFAULT_ACLS.size()); - - // Remove the new permission added. - removeAndVerifyAcl(currentAcls, newAcl, true, DEFAULT_ACLS.size()); - - removeAndVerifyAcl(currentAcls, oldAcl, true, DEFAULT_ACLS.size() - 1); - } - - private void addAndVerifyAcl(List currentAcls, OzoneAcl addedAcl, - boolean expectedResult, int expectedSize) { - assertEquals(expectedResult, OzoneAclUtil.addAcl(currentAcls, addedAcl)); - if (currentAcls != null) { - boolean verified = verifyAclAdded(currentAcls, addedAcl); - assertTrue("addedAcl: " + addedAcl + " should exist in the" + - " current acls: " + currentAcls, verified); - assertEquals(expectedSize, currentAcls.size()); - } - } - - private void removeAndVerifyAcl(List currentAcls, - OzoneAcl removedAcl, boolean expectedResult, int expectedSize) { - assertEquals(expectedResult, OzoneAclUtil.removeAcl(currentAcls, - removedAcl)); - if (currentAcls != null) { - boolean verified = verifyAclRemoved(currentAcls, removedAcl); - assertTrue("removedAcl: " + removedAcl + " should not exist in the" + - " current acls: " + currentAcls, verified); - assertEquals(expectedSize, currentAcls.size()); - } - } - - private boolean verifyAclRemoved(List acls, OzoneAcl removedAcl) { - for (OzoneAcl acl : acls) { - if (acl.getName().equals(removedAcl.getName()) && - acl.getType().equals(removedAcl.getType()) && - acl.getAclScope().equals(removedAcl.getAclScope())) { - BitSet temp = (BitSet) acl.getAclBitSet().clone(); - temp.and(removedAcl.getAclBitSet()); - return !temp.equals(removedAcl.getAclBitSet()); - } - } - return true; - } - - private boolean verifyAclAdded(List acls, OzoneAcl newAcl) { - for (OzoneAcl acl : acls) { - if (acl.getName().equals(newAcl.getName()) && - acl.getType().equals(newAcl.getType()) && - acl.getAclScope().equals(newAcl.getAclScope())) { - BitSet temp = (BitSet) acl.getAclBitSet().clone(); - temp.and(newAcl.getAclBitSet()); - return temp.equals(newAcl.getAclBitSet()); - } - } - return false; - } - - /** - * Helper function to get default acl list for current user. - * - * @return list of ozoneAcls. - * @throws IOException - * */ - private static List getDefaultAcls(OzoneConfiguration conf) { - List ozoneAcls = new ArrayList<>(); - //User ACL - UserGroupInformation ugi; - try { - ugi = UserGroupInformation.getCurrentUser(); - } catch (IOException ioe) { - ugi = UserGroupInformation.createRemoteUser("user0"); - } - - OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - IAccessAuthorizer.ACLType userRights = aclConfig.getUserDefaultRights(); - IAccessAuthorizer.ACLType groupRights = aclConfig.getGroupDefaultRights(); - - OzoneAclUtil.addAcl(ozoneAcls, new OzoneAcl(USER, - ugi.getUserName(), userRights, ACCESS)); - //Group ACLs of the User - List userGroups = Arrays.asList(ugi.getGroupNames()); - userGroups.stream().forEach((group) -> OzoneAclUtil.addAcl(ozoneAcls, - new OzoneAcl(GROUP, group, groupRights, ACCESS))); - return ozoneAcls; - } -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/package-info.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/package-info.java deleted file mode 100644 index e62423ac4ee..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.helpers; -/** - * Unit tests of helpers. - */ diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java deleted file mode 100644 index 8438cbf58c0..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java +++ /dev/null @@ -1,348 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.lock; - -import java.util.ArrayList; -import java.util.LinkedList; -import java.util.List; -import java.util.Queue; -import java.util.Stack; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -import static org.junit.Assert.fail; - -/** - * Class tests OzoneManagerLock. - */ -public class TestOzoneManagerLock { - @Test - public void acquireResourceLock() { - String[] resourceName; - for (OzoneManagerLock.Resource resource : - OzoneManagerLock.Resource.values()) { - resourceName = generateResourceName(resource); - testResourceLock(resourceName, resource); - } - } - - private void testResourceLock(String[] resourceName, - OzoneManagerLock.Resource resource) { - OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); - lock.acquireLock(resource, resourceName); - lock.releaseLock(resource, resourceName); - Assert.assertTrue(true); - } - - @Test - public void reacquireResourceLock() { - String[] resourceName; - for (OzoneManagerLock.Resource resource : - OzoneManagerLock.Resource.values()) { - resourceName = generateResourceName(resource); - testResourceReacquireLock(resourceName, resource); - } - } - - private void testResourceReacquireLock(String[] resourceName, - OzoneManagerLock.Resource resource) { - OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); - - // Lock re-acquire not allowed by same thread. - if (resource == OzoneManagerLock.Resource.USER_LOCK || - resource == OzoneManagerLock.Resource.S3_SECRET_LOCK || - resource == OzoneManagerLock.Resource.PREFIX_LOCK){ - lock.acquireLock(resource, resourceName); - try { - lock.acquireLock(resource, resourceName); - fail("reacquireResourceLock failed"); - } catch (RuntimeException ex) { - String message = "cannot acquire " + resource.getName() + " lock " + - "while holding [" + resource.getName() + "] lock(s)."; - Assert.assertTrue(ex.getMessage(), ex.getMessage().contains(message)); - } - lock.releaseLock(resource, resourceName); - Assert.assertTrue(true); - } else { - lock.acquireLock(resource, resourceName); - lock.acquireLock(resource, resourceName); - lock.releaseLock(resource, resourceName); - lock.releaseLock(resource, resourceName); - Assert.assertTrue(true); - } - } - - @Test - public void testLockingOrder() { - OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); - String[] resourceName; - - // What this test does is iterate all resources. For each resource - // acquire lock, and then in inner loop acquire all locks with higher - // lock level, finally release the locks. - for (OzoneManagerLock.Resource resource : - OzoneManagerLock.Resource.values()) { - Stack stack = new Stack<>(); - resourceName = generateResourceName(resource); - lock.acquireLock(resource, resourceName); - stack.push(new ResourceInfo(resourceName, resource)); - for (OzoneManagerLock.Resource higherResource : - OzoneManagerLock.Resource.values()) { - if (higherResource.getMask() > resource.getMask()) { - resourceName = generateResourceName(higherResource); - lock.acquireLock(higherResource, resourceName); - stack.push(new ResourceInfo(resourceName, higherResource)); - } - } - // Now release locks - while (!stack.empty()) { - ResourceInfo resourceInfo = stack.pop(); - lock.releaseLock(resourceInfo.getResource(), - resourceInfo.getLockName()); - } - } - Assert.assertTrue(true); - } - - @Test - public void testLockViolationsWithOneHigherLevelLock() { - OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); - for (OzoneManagerLock.Resource resource : - OzoneManagerLock.Resource.values()) { - for (OzoneManagerLock.Resource higherResource : - OzoneManagerLock.Resource.values()) { - if (higherResource.getMask() > resource.getMask()) { - String[] resourceName = generateResourceName(higherResource); - lock.acquireLock(higherResource, resourceName); - try { - lock.acquireLock(resource, generateResourceName(resource)); - fail("testLockViolationsWithOneHigherLevelLock failed"); - } catch (RuntimeException ex) { - String message = "cannot acquire " + resource.getName() + " lock " + - "while holding [" + higherResource.getName() + "] lock(s)."; - Assert.assertTrue(ex.getMessage(), - ex.getMessage().contains(message)); - } - lock.releaseLock(higherResource, resourceName); - } - } - } - } - - @Test - public void testLockViolations() { - OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); - String[] resourceName; - - // What this test does is iterate all resources. For each resource - // acquire an higher level lock above the resource, and then take the the - // lock. This should fail. Like that it tries all error combinations. - for (OzoneManagerLock.Resource resource : - OzoneManagerLock.Resource.values()) { - Stack stack = new Stack<>(); - List currentLocks = new ArrayList<>(); - Queue queue = new LinkedList<>(); - for (OzoneManagerLock.Resource higherResource : - OzoneManagerLock.Resource.values()) { - if (higherResource.getMask() > resource.getMask()) { - resourceName = generateResourceName(higherResource); - lock.acquireLock(higherResource, resourceName); - stack.push(new ResourceInfo(resourceName, higherResource)); - currentLocks.add(higherResource.getName()); - queue.add(new ResourceInfo(resourceName, higherResource)); - // try to acquire lower level lock - try { - resourceName = generateResourceName(resource); - lock.acquireLock(resource, resourceName); - } catch (RuntimeException ex) { - String message = "cannot acquire " + resource.getName() + " lock " + - "while holding " + currentLocks.toString() + " lock(s)."; - Assert.assertTrue(ex.getMessage(), - ex.getMessage().contains(message)); - } - } - } - - // Now release locks - while (!stack.empty()) { - ResourceInfo resourceInfo = stack.pop(); - lock.releaseLock(resourceInfo.getResource(), - resourceInfo.getLockName()); - } - } - } - - @Test - public void releaseLockWithOutAcquiringLock() { - OzoneManagerLock lock = - new OzoneManagerLock(new OzoneConfiguration()); - try { - lock.releaseLock(OzoneManagerLock.Resource.USER_LOCK, "user3"); - fail("releaseLockWithOutAcquiringLock failed"); - } catch (IllegalMonitorStateException ex) { - String message = "Releasing lock on resource $user3 without acquiring " + - "lock"; - Assert.assertTrue(ex.getMessage(), ex.getMessage().contains(message)); - } - } - - - private String[] generateResourceName(OzoneManagerLock.Resource resource) { - if (resource == OzoneManagerLock.Resource.BUCKET_LOCK) { - return new String[]{UUID.randomUUID().toString(), - UUID.randomUUID().toString()}; - } else { - return new String[]{UUID.randomUUID().toString()}; - } - } - - - /** - * Class used to store locked resource info. - */ - public class ResourceInfo { - private String[] lockName; - private OzoneManagerLock.Resource resource; - - ResourceInfo(String[] resourceName, OzoneManagerLock.Resource resource) { - this.lockName = resourceName; - this.resource = resource; - } - - public String[] getLockName() { - return lockName; - } - - public OzoneManagerLock.Resource getResource() { - return resource; - } - } - - @Test - public void acquireMultiUserLock() { - OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); - lock.acquireMultiUserLock("user1", "user2"); - lock.releaseMultiUserLock("user1", "user2"); - Assert.assertTrue(true); - } - - @Test - public void reAcquireMultiUserLock() { - OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); - lock.acquireMultiUserLock("user1", "user2"); - try { - lock.acquireMultiUserLock("user1", "user2"); - fail("reAcquireMultiUserLock failed"); - } catch (RuntimeException ex) { - String message = "cannot acquire USER_LOCK lock while holding " + - "[USER_LOCK] lock(s)."; - Assert.assertTrue(ex.getMessage(), ex.getMessage().contains(message)); - } - lock.releaseMultiUserLock("user1", "user2"); - } - - @Test - public void acquireMultiUserLockAfterUserLock() { - OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); - lock.acquireLock(OzoneManagerLock.Resource.USER_LOCK, "user3"); - try { - lock.acquireMultiUserLock("user1", "user2"); - fail("acquireMultiUserLockAfterUserLock failed"); - } catch (RuntimeException ex) { - String message = "cannot acquire USER_LOCK lock while holding " + - "[USER_LOCK] lock(s)."; - Assert.assertTrue(ex.getMessage(), ex.getMessage().contains(message)); - } - lock.releaseLock(OzoneManagerLock.Resource.USER_LOCK, "user3"); - } - - @Test - public void acquireUserLockAfterMultiUserLock() { - OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); - lock.acquireMultiUserLock("user1", "user2"); - try { - lock.acquireLock(OzoneManagerLock.Resource.USER_LOCK, "user3"); - fail("acquireUserLockAfterMultiUserLock failed"); - } catch (RuntimeException ex) { - String message = "cannot acquire USER_LOCK lock while holding " + - "[USER_LOCK] lock(s)."; - Assert.assertTrue(ex.getMessage(), ex.getMessage().contains(message)); - } - lock.releaseMultiUserLock("user1", "user2"); - } - - @Test - public void testLockResourceParallel() throws Exception { - OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); - - for (OzoneManagerLock.Resource resource : - OzoneManagerLock.Resource.values()) { - final String[] resourceName = generateResourceName(resource); - lock.acquireLock(resource, resourceName); - - AtomicBoolean gotLock = new AtomicBoolean(false); - new Thread(() -> { - lock.acquireLock(resource, resourceName); - gotLock.set(true); - lock.releaseLock(resource, resourceName); - }).start(); - // Let's give some time for the new thread to run - Thread.sleep(100); - // Since the new thread is trying to get lock on same resource, - // it will wait. - Assert.assertFalse(gotLock.get()); - lock.releaseLock(resource, resourceName); - // Since we have released the lock, the new thread should have the lock - // now. - // Let's give some time for the new thread to run - Thread.sleep(100); - Assert.assertTrue(gotLock.get()); - } - - } - - @Test - public void testMultiLockResourceParallel() throws Exception { - OzoneManagerLock lock = new OzoneManagerLock(new OzoneConfiguration()); - lock.acquireMultiUserLock("user2", "user1"); - - AtomicBoolean gotLock = new AtomicBoolean(false); - new Thread(() -> { - lock.acquireMultiUserLock("user1", "user2"); - gotLock.set(true); - lock.releaseMultiUserLock("user1", "user2"); - }).start(); - // Let's give some time for the new thread to run - Thread.sleep(100); - // Since the new thread is trying to get lock on same resource, it will - // wait. - Assert.assertFalse(gotLock.get()); - lock.releaseMultiUserLock("user2", "user1"); - // Since we have released the lock, the new thread should have the lock - // now. - // Let's give some time for the new thread to run - Thread.sleep(100); - Assert.assertTrue(gotLock.get()); - } -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/package-info.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/package-info.java deleted file mode 100644 index 149794a5c67..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.lock; -/** - * Unit tests of OzoneManager lock. - */ diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java deleted file mode 100644 index 2784b6c3e87..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestAWSV4AuthValidator.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.security; - -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import java.util.Arrays; -import java.util.Collection; - -import static org.junit.Assert.*; - -/** - * Test for {@link AWSV4AuthValidator}. - * */ -@RunWith(Parameterized.class) -public class TestAWSV4AuthValidator { - - private String strToSign; - private String signature; - private String awsAccessKey; - - public TestAWSV4AuthValidator(String strToSign, String signature, - String awsAccessKey) { - this.strToSign = strToSign; - this.signature = signature; - this.awsAccessKey = awsAccessKey; - } - - @Parameterized.Parameters - public static Collection data() { - return Arrays.asList(new Object[][]{ - { - "AWS4-HMAC-SHA256\n" + - "20190221T002037Z\n" + - "20190221/us-west-1/s3/aws4_request\n" + - "c297c080cce4e0927779823d3fd1f5cae71481a8f7dfc7e18d" + - "91851294efc47d", - "56ec73ba1974f8feda8365c3caef89c5d4a688d5f9baccf" + - "4765f46a14cd745ad", - "dbaksbzljandlkandlsd" - }, - { - "AWS4-HMAC-SHA256\n" + - "20150830T123600Z\n" + - "20150830/us-east-1/iam/aws4_request\n" + - "f536975d06c0309214f805bb90ccff089219ecd68b2" + - "577efef23edd43b7e1a59", - "5d672d79c15b13162d9279b0855cfba" + - "6789a8edb4c82c400e06b5924a6f2b5d7", - "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY" - } - - }); - } - - @Test - public void testValidateRequest() { - assertTrue(AWSV4AuthValidator.validateRequest(strToSign, signature, - awsAccessKey)); - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java deleted file mode 100644 index 39c622043ba..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.security; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.junit.Assert; -import org.junit.Test; - -import java.security.SecureRandom; - -/** - * Tests GDPRSymmetricKey structure. - */ -public class TestGDPRSymmetricKey { - - @Test - public void testKeyGenerationWithDefaults() throws Exception { - GDPRSymmetricKey gkey = new GDPRSymmetricKey(new SecureRandom()); - - Assert.assertTrue(gkey.getCipher().getAlgorithm() - .equalsIgnoreCase(OzoneConsts.GDPR_ALGORITHM_NAME)); - - gkey.getKeyDetails().forEach( - (k, v) -> Assert.assertTrue(v.length() > 0)); - } - - @Test - public void testKeyGenerationWithValidInput() throws Exception { - GDPRSymmetricKey gkey = new GDPRSymmetricKey( - RandomStringUtils.randomAlphabetic(16), - OzoneConsts.GDPR_ALGORITHM_NAME); - - Assert.assertTrue(gkey.getCipher().getAlgorithm() - .equalsIgnoreCase(OzoneConsts.GDPR_ALGORITHM_NAME)); - - gkey.getKeyDetails().forEach( - (k, v) -> Assert.assertTrue(v.length() > 0)); - } - - @Test - public void testKeyGenerationWithInvalidInput() throws Exception { - GDPRSymmetricKey gkey = null; - try{ - gkey = new GDPRSymmetricKey(RandomStringUtils.randomAlphabetic(5), - OzoneConsts.GDPR_ALGORITHM_NAME); - } catch (IllegalArgumentException ex) { - Assert.assertTrue(ex.getMessage() - .equalsIgnoreCase("Secret must be exactly 16 characters")); - Assert.assertTrue(gkey == null); - } - } - - -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSelector.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSelector.java deleted file mode 100644 index 85ea03ea88d..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSelector.java +++ /dev/null @@ -1,87 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.security; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.token.Token; -import org.junit.Assert; -import org.junit.Test; - -import java.nio.charset.StandardCharsets; -import java.util.Collections; - -import static org.apache.hadoop.ozone.security.OzoneTokenIdentifier.KIND_NAME; - -/** - * Class to test OzoneDelegationTokenSelector. - */ -public class TestOzoneDelegationTokenSelector { - - - @Test - public void testTokenSelector() { - - // set dummy details for identifier and password in token. - byte[] identifier = - RandomStringUtils.randomAlphabetic(10) - .getBytes(StandardCharsets.UTF_8); - byte[] password = - RandomStringUtils.randomAlphabetic(10) - .getBytes(StandardCharsets.UTF_8); - - Token tokenIdentifierToken = - new Token<>(identifier, password, KIND_NAME, getService()); - - OzoneDelegationTokenSelector ozoneDelegationTokenSelector = - new OzoneDelegationTokenSelector(); - - Text service = new Text("om1:9862"); - - Token selectedToken = - ozoneDelegationTokenSelector.selectToken(service, - Collections.singletonList(tokenIdentifierToken)); - - - Assert.assertNotNull(selectedToken); - - - tokenIdentifierToken.setService(new Text("om1:9863")); - selectedToken = - ozoneDelegationTokenSelector.selectToken(service, - Collections.singletonList(tokenIdentifierToken)); - - Assert.assertNull(selectedToken); - - service = new Text("om1:9863"); - selectedToken = - ozoneDelegationTokenSelector.selectToken(service, - Collections.singletonList(tokenIdentifierToken)); - - Assert.assertNotNull(selectedToken); - - } - - - private Text getService() { - return new Text("om1:9862,om2:9862,om3:9862"); - } - - -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java deleted file mode 100644 index ab24b1b5925..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java +++ /dev/null @@ -1,151 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.security.acl; - -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.junit.Test; - -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType.*; -import static org.junit.Assert.*; -import org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType; - -/** - * Test class for {@link OzoneObjInfo}. - * */ -public class TestOzoneObjInfo { - - private OzoneObjInfo objInfo; - private OzoneObjInfo.Builder builder; - private String volume = "vol1"; - private String bucket = "bucket1"; - private String key = "key1"; - private static final OzoneObj.StoreType STORE = OzoneObj.StoreType.OZONE; - - - @Test - public void testGetVolumeName() { - - builder = getBuilder(volume, bucket, key); - objInfo = builder.build(); - assertEquals(objInfo.getVolumeName(), volume); - - objInfo = getBuilder(null, null, null).build(); - assertEquals(objInfo.getVolumeName(), null); - - objInfo = getBuilder(volume, null, null).build(); - assertEquals(objInfo.getVolumeName(), volume); - } - - private OzoneObjInfo.Builder getBuilder(String withVolume, - String withBucket, - String withKey) { - return OzoneObjInfo.Builder.newBuilder() - .setResType(ResourceType.VOLUME) - .setStoreType(STORE) - .setVolumeName(withVolume) - .setBucketName(withBucket) - .setKeyName(withKey); - } - - @Test - public void testGetBucketName() { - objInfo = getBuilder(volume, bucket, key).build(); - assertEquals(objInfo.getBucketName(), bucket); - - objInfo =getBuilder(volume, null, null).build(); - assertEquals(objInfo.getBucketName(), null); - - objInfo =getBuilder(null, bucket, null).build(); - assertEquals(objInfo.getBucketName(), bucket); - } - - @Test - public void testGetKeyName() { - objInfo = getBuilder(volume, bucket, key).build(); - assertEquals(objInfo.getKeyName(), key); - - objInfo = getBuilder(volume, null, null).build(); - assertEquals(objInfo.getKeyName(), null); - - objInfo = getBuilder(null, bucket, null).build(); - assertEquals(objInfo.getKeyName(), null); - - objInfo = getBuilder(null, null, key).build(); - assertEquals(objInfo.getKeyName(), key); - } - - @Test - public void testFromProtobufOp() { - // Key with long path. - key = "dir1/dir2/dir3/dir4/dir5/abc.txt"; - OzoneManagerProtocolProtos.OzoneObj protoObj = OzoneManagerProtocolProtos. - OzoneObj.newBuilder() - .setResType(KEY) - .setStoreType(OzoneManagerProtocolProtos.OzoneObj.StoreType.OZONE) - .setPath(volume + OZONE_URI_DELIMITER + - bucket + OZONE_URI_DELIMITER + key) - .build(); - - objInfo = OzoneObjInfo.fromProtobuf(protoObj); - assertEquals(objInfo.getKeyName(), key); - objInfo = getBuilder(volume, null, null).build(); - assertEquals(objInfo.getKeyName(), null); - objInfo = getBuilder(null, bucket, null).build(); - assertEquals(objInfo.getKeyName(), null); - objInfo = getBuilder(null, null, key).build(); - assertEquals(objInfo.getKeyName(), key); - - // Key with long path. - key = "dir1/dir2/dir3/dir4/dir5/abc.txt"; - protoObj = OzoneManagerProtocolProtos. - OzoneObj.newBuilder() - .setResType(KEY) - .setStoreType(OzoneManagerProtocolProtos.OzoneObj.StoreType.OZONE) - .setPath(OZONE_URI_DELIMITER + volume + OZONE_URI_DELIMITER + - bucket + OZONE_URI_DELIMITER + key) - .build(); - - objInfo = OzoneObjInfo.fromProtobuf(protoObj); - assertEquals(objInfo.getKeyName(), key); - objInfo = getBuilder(volume, null, null).build(); - assertEquals(objInfo.getKeyName(), null); - objInfo = getBuilder(null, bucket, null).build(); - assertEquals(objInfo.getKeyName(), null); - objInfo = getBuilder(null, null, key).build(); - assertEquals(objInfo.getKeyName(), key); - - // Key with long path. - key = "dir1/dir2/dir3/dir4/dir5/"; - protoObj = OzoneManagerProtocolProtos. - OzoneObj.newBuilder() - .setResType(KEY) - .setStoreType(OzoneManagerProtocolProtos.OzoneObj.StoreType.OZONE) - .setPath(OZONE_URI_DELIMITER + volume + OZONE_URI_DELIMITER + - bucket + OZONE_URI_DELIMITER + key) - .build(); - - objInfo = OzoneObjInfo.fromProtobuf(protoObj); - assertEquals(objInfo.getKeyName(), key); - objInfo = getBuilder(volume, null, null).build(); - assertEquals(objInfo.getKeyName(), null); - objInfo = getBuilder(null, bucket, null).build(); - assertEquals(objInfo.getKeyName(), null); - objInfo = getBuilder(null, null, key).build(); - assertEquals(objInfo.getKeyName(), key); - } -} \ No newline at end of file diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java deleted file mode 100644 index 57b02681deb..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.util; - -import org.junit.BeforeClass; -import org.junit.Test; - -import java.util.List; - -import static org.junit.Assert.assertEquals; - -/** - * Test Ozone Radix tree operations. - */ -public class TestRadixTree { - - final static RadixTree ROOT = new RadixTree<>(); - - @BeforeClass - public static void setupRadixTree() { - // Test prefix paths with an empty tree - assertEquals(true, ROOT.isEmpty()); - assertEquals("/", ROOT.getLongestPrefix("/a/b/c")); - assertEquals("/", RadixTree.radixPathToString( - ROOT.getLongestPrefixPath("/a/g"))); - // Build Radix tree below for testing. - // a - // | - // b - // / \ - // c e - // / \ / \ \ - // d f g dir1 dir2(1000) - // | - // g - // | - // h - ROOT.insert("/a/b/c/d"); - ROOT.insert("/a/b/c/d/g/h"); - ROOT.insert("/a/b/c/f"); - ROOT.insert("/a/b/e/g"); - ROOT.insert("/a/b/e/dir1"); - ROOT.insert("/a/b/e/dir2", 1000); - } - - /** - * Tests if insert and build prefix tree is correct. - */ - @Test - public void testGetLongestPrefix() { - assertEquals("/a/b/c", ROOT.getLongestPrefix("/a/b/c")); - assertEquals("/a/b", ROOT.getLongestPrefix("/a/b")); - assertEquals("/a", ROOT.getLongestPrefix("/a")); - assertEquals("/a/b/e/g", ROOT.getLongestPrefix("/a/b/e/g/h")); - - assertEquals("/", ROOT.getLongestPrefix("/d/b/c")); - assertEquals("/a/b/e", ROOT.getLongestPrefix("/a/b/e/dir3")); - assertEquals("/a/b/c/d", ROOT.getLongestPrefix("/a/b/c/d/p")); - - assertEquals("/a/b/c/f", ROOT.getLongestPrefix("/a/b/c/f/p")); - } - - @Test - public void testGetLongestPrefixPath() { - List> lpp = - ROOT.getLongestPrefixPath("/a/b/c/d/g/p"); - RadixNode lpn = lpp.get(lpp.size()-1); - assertEquals("g", lpn.getName()); - lpn.setValue(100); - - List> lpq = - ROOT.getLongestPrefixPath("/a/b/c/d/g/q"); - RadixNode lqn = lpp.get(lpq.size()-1); - System.out.print(RadixTree.radixPathToString(lpq)); - assertEquals(lpn, lqn); - assertEquals("g", lqn.getName()); - assertEquals(100, (int)lqn.getValue()); - - assertEquals("/a/", RadixTree.radixPathToString( - ROOT.getLongestPrefixPath("/a/g"))); - - } - - @Test - public void testGetLastNoeInPrefixPath() { - assertEquals(null, ROOT.getLastNodeInPrefixPath("/a/g")); - RadixNode ln = ROOT.getLastNodeInPrefixPath("/a/b/e/dir1"); - assertEquals("dir1", ln.getName()); - } - - @Test - public void testRemovePrefixPath() { - - // Remove, test and restore - // Remove partially overlapped path - ROOT.removePrefixPath("/a/b/c/d/g/h"); - assertEquals("/a/b/c", ROOT.getLongestPrefix("a/b/c/d")); - ROOT.insert("/a/b/c/d/g/h"); - - // Remove fully overlapped path - ROOT.removePrefixPath("/a/b/c/d"); - assertEquals("/a/b/c/d", ROOT.getLongestPrefix("a/b/c/d")); - ROOT.insert("/a/b/c/d"); - - // Remove non existing path - ROOT.removePrefixPath("/d/a"); - assertEquals("/a/b/c/d", ROOT.getLongestPrefix("a/b/c/d")); - } - - -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/package-info.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/package-info.java deleted file mode 100644 index a6acd30d77c..00000000000 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.util; -/** - * Unit tests of generic ozone utils. - */ diff --git a/hadoop-ozone/csi/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/csi/dev-support/findbugsExcludeFile.xml deleted file mode 100644 index 62d72d26a83..00000000000 --- a/hadoop-ozone/csi/dev-support/findbugsExcludeFile.xml +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - - diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml deleted file mode 100644 index 6e7b807e36a..00000000000 --- a/hadoop-ozone/csi/pom.xml +++ /dev/null @@ -1,188 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-csi - 0.5.0-SNAPSHOT - Apache Hadoop Ozone CSI service - Apache Hadoop Ozone CSI service - jar - - - 1.17.1 - - - - com.google.protobuf - protobuf-java-util - 3.5.1 - - - com.google.protobuf - protobuf-java - - - - - org.apache.hadoop - hadoop-hdds-config - - - org.apache.hadoop - hadoop-common - - - org.apache.hadoop - hadoop-hdfs - - - - - com.google.guava - guava - 26.0-android - - - com.google.protobuf - protobuf-java - 3.5.1 - - - io.grpc - grpc-netty - ${grpc.version} - - - io.netty - netty-transport-native-epoll - 4.1.30.Final - - - io.netty - netty-transport-native-unix-common - 4.1.30.Final - - - io.grpc - grpc-protobuf - ${grpc.version} - - - com.google.protobuf - protobuf-java - - - - - io.grpc - grpc-stub - ${grpc.version} - - - org.apache.hadoop - hadoop-ozone-client - - - com.google.guava - guava - - - com.google.protobuf - protobuf-java - - - io.netty - netty-all - - - io.netty - netty - - - - - - - - - - kr.motd.maven - os-maven-plugin - ${os-maven-plugin.version} - - - - - org.xolstice.maven.plugins - protobuf-maven-plugin - ${protobuf-maven-plugin.version} - true - - - com.google.protobuf:protoc:${protobuf-compile.version}:exe:${os.detected.classifier} - - ${basedir}/src/main/proto/ - - csi.proto - - target/generated-sources/java - false - - - - compile-protoc - - compile - test-compile - compile-custom - test-compile-custom - - - grpc-java - - io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} - - - - - - - maven-enforcer-plugin - - - depcheck - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - - - - - diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/ControllerService.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/ControllerService.java deleted file mode 100644 index 65b72502c67..00000000000 --- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/ControllerService.java +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.csi; - -import java.io.IOException; - -import org.apache.hadoop.ozone.client.OzoneClient; - -import csi.v1.ControllerGrpc.ControllerImplBase; -import csi.v1.Csi.CapacityRange; -import csi.v1.Csi.ControllerGetCapabilitiesRequest; -import csi.v1.Csi.ControllerGetCapabilitiesResponse; -import csi.v1.Csi.ControllerServiceCapability; -import csi.v1.Csi.ControllerServiceCapability.RPC; -import csi.v1.Csi.ControllerServiceCapability.RPC.Type; -import csi.v1.Csi.CreateVolumeRequest; -import csi.v1.Csi.CreateVolumeResponse; -import csi.v1.Csi.DeleteVolumeRequest; -import csi.v1.Csi.DeleteVolumeResponse; -import csi.v1.Csi.Volume; -import io.grpc.stub.StreamObserver; - -/** - * CSI controller service. - *

- * This service usually runs only once and responsible for the creation of - * the volume. - */ -public class ControllerService extends ControllerImplBase { - - private final String volumeOwner; - - private long defaultVolumeSize; - - private OzoneClient ozoneClient; - - public ControllerService(OzoneClient ozoneClient, long volumeSize, - String volumeOwner) { - this.volumeOwner = volumeOwner; - this.defaultVolumeSize = volumeSize; - this.ozoneClient = ozoneClient; - } - - @Override - public void createVolume(CreateVolumeRequest request, - StreamObserver responseObserver) { - try { - ozoneClient.getObjectStore() - .createS3Bucket(volumeOwner, request.getName()); - - long size = findSize(request.getCapacityRange()); - - CreateVolumeResponse response = CreateVolumeResponse.newBuilder() - .setVolume(Volume.newBuilder() - .setVolumeId(request.getName()) - .setCapacityBytes(size)) - .build(); - - responseObserver.onNext(response); - responseObserver.onCompleted(); - } catch (IOException e) { - responseObserver.onError(e); - } - } - - private long findSize(CapacityRange capacityRange) { - if (capacityRange.getRequiredBytes() != 0) { - return capacityRange.getRequiredBytes(); - } else { - if (capacityRange.getLimitBytes() != 0) { - return Math.min(defaultVolumeSize, capacityRange.getLimitBytes()); - } else { - //~1 gig - return defaultVolumeSize; - } - } - } - - @Override - public void deleteVolume(DeleteVolumeRequest request, - StreamObserver responseObserver) { - try { - ozoneClient.getObjectStore().deleteS3Bucket(request.getVolumeId()); - - DeleteVolumeResponse response = DeleteVolumeResponse.newBuilder() - .build(); - - responseObserver.onNext(response); - responseObserver.onCompleted(); - } catch (IOException e) { - responseObserver.onError(e); - } - } - - @Override - public void controllerGetCapabilities( - ControllerGetCapabilitiesRequest request, - StreamObserver responseObserver) { - ControllerGetCapabilitiesResponse response = - ControllerGetCapabilitiesResponse.newBuilder() - .addCapabilities( - ControllerServiceCapability.newBuilder().setRpc( - RPC.newBuilder().setType(Type.CREATE_DELETE_VOLUME))) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } -} diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java deleted file mode 100644 index df5127c47b5..00000000000 --- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java +++ /dev/null @@ -1,160 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.csi; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.Config; -import org.apache.hadoop.hdds.conf.ConfigGroup; -import org.apache.hadoop.hdds.conf.ConfigTag; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.util.StringUtils; - -import io.grpc.Server; -import io.grpc.netty.NettyServerBuilder; -import io.netty.channel.epoll.EpollEventLoopGroup; -import io.netty.channel.epoll.EpollServerDomainSocketChannel; -import io.netty.channel.unix.DomainSocketAddress; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; - -/** - * CLI entrypoint of the CSI service daemon. - */ -@Command(name = "ozone csi", - hidden = true, description = "CSI service daemon.", - versionProvider = HddsVersionProvider.class, - mixinStandardHelpOptions = true) -public class CsiServer extends GenericCli implements Callable { - - private final static Logger LOG = LoggerFactory.getLogger(CsiServer.class); - - @Override - public Void call() throws Exception { - OzoneConfiguration ozoneConfiguration = createOzoneConfiguration(); - CsiConfig csiConfig = ozoneConfiguration.getObject(CsiConfig.class); - - OzoneClient rpcClient = OzoneClientFactory.getRpcClient(ozoneConfiguration); - - EpollEventLoopGroup group = new EpollEventLoopGroup(); - - if (csiConfig.getVolumeOwner().isEmpty()) { - throw new IllegalArgumentException( - "ozone.csi.owner is not set. You should set this configuration " - + "variable to define which user should own all the created " - + "buckets."); - } - - Server server = - NettyServerBuilder - .forAddress(new DomainSocketAddress(csiConfig.getSocketPath())) - .channelType(EpollServerDomainSocketChannel.class) - .workerEventLoopGroup(group) - .bossEventLoopGroup(group) - .addService(new IdentitiyService()) - .addService(new ControllerService(rpcClient, - csiConfig.getDefaultVolumeSize(), csiConfig.getVolumeOwner())) - .addService(new NodeService(csiConfig)) - .build(); - - server.start(); - server.awaitTermination(); - rpcClient.close(); - return null; - } - - public static void main(String[] args) { - - StringUtils.startupShutdownMessage(CsiServer.class, args, LOG); - new CsiServer().run(args); - } - - /** - * Configuration settings specific to the CSI server. - */ - @ConfigGroup(prefix = "ozone.csi") - public static class CsiConfig { - private String socketPath; - private long defaultVolumeSize; - private String s3gAddress; - private String volumeOwner; - - public String getSocketPath() { - return socketPath; - } - - public String getVolumeOwner() { - return volumeOwner; - } - - @Config(key = "owner", - defaultValue = "", - description = - "This is the username which is used to create the requested " - + "storage. Used as a hadoop username and the generated ozone" - + " volume used to store all the buckets. WARNING: It can " - + "be a security hole to use CSI in a secure environments as " - + "ALL the users can request the mount of a specific bucket " - + "via the CSI interface.", - tags = ConfigTag.STORAGE) - public void setVolumeOwner(String volumeOwner) { - this.volumeOwner = volumeOwner; - } - - @Config(key = "socket", - defaultValue = "/var/lib/csi.sock", - description = - "The socket where all the CSI services will listen (file name).", - tags = ConfigTag.STORAGE) - public void setSocketPath(String socketPath) { - this.socketPath = socketPath; - } - - public long getDefaultVolumeSize() { - return defaultVolumeSize; - } - - @Config(key = "default-volume-size", - defaultValue = "1000000000", - description = - "The default size of the create volumes (if not specified).", - tags = ConfigTag.STORAGE) - public void setDefaultVolumeSize(long defaultVolumeSize) { - this.defaultVolumeSize = defaultVolumeSize; - } - - public String getS3gAddress() { - return s3gAddress; - } - - @Config(key = "s3g.address", - defaultValue = "http://localhost:9878", - description = - "The default size of the created volumes (if not specified in the" - + " requests).", - tags = ConfigTag.STORAGE) - public void setS3gAddress(String s3gAddress) { - this.s3gAddress = s3gAddress; - } - } -} diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentitiyService.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentitiyService.java deleted file mode 100644 index 5a0c4c8ba8a..00000000000 --- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentitiyService.java +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.csi; - -import org.apache.hadoop.ozone.util.OzoneVersionInfo; - -import com.google.protobuf.BoolValue; -import csi.v1.Csi.GetPluginCapabilitiesResponse; -import csi.v1.Csi.GetPluginInfoResponse; -import csi.v1.Csi.PluginCapability; -import csi.v1.Csi.PluginCapability.Service; -import static csi.v1.Csi.PluginCapability.Service.Type.CONTROLLER_SERVICE; -import csi.v1.Csi.ProbeResponse; -import csi.v1.IdentityGrpc.IdentityImplBase; -import io.grpc.stub.StreamObserver; - -/** - * Implementation of the CSI identity service. - */ -public class IdentitiyService extends IdentityImplBase { - - @Override - public void getPluginInfo(csi.v1.Csi.GetPluginInfoRequest request, - StreamObserver responseObserver) { - GetPluginInfoResponse response = GetPluginInfoResponse.newBuilder() - .setName("org.apache.hadoop.ozone") - .setVendorVersion(OzoneVersionInfo.OZONE_VERSION_INFO.getVersion()) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getPluginCapabilities( - csi.v1.Csi.GetPluginCapabilitiesRequest request, - StreamObserver responseObserver) { - GetPluginCapabilitiesResponse response = - GetPluginCapabilitiesResponse.newBuilder() - .addCapabilities(PluginCapability.newBuilder().setService( - Service.newBuilder().setType(CONTROLLER_SERVICE))) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - - } - - @Override - public void probe(csi.v1.Csi.ProbeRequest request, - StreamObserver responseObserver) { - ProbeResponse response = ProbeResponse.newBuilder() - .setReady(BoolValue.of(true)) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - - } -} diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/NodeService.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/NodeService.java deleted file mode 100644 index 8edda5923da..00000000000 --- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/NodeService.java +++ /dev/null @@ -1,142 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.csi; - -import java.io.IOException; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.ozone.csi.CsiServer.CsiConfig; - -import csi.v1.Csi.NodeGetCapabilitiesRequest; -import csi.v1.Csi.NodeGetCapabilitiesResponse; -import csi.v1.Csi.NodeGetInfoRequest; -import csi.v1.Csi.NodeGetInfoResponse; -import csi.v1.Csi.NodePublishVolumeRequest; -import csi.v1.Csi.NodePublishVolumeResponse; -import csi.v1.Csi.NodeUnpublishVolumeRequest; -import csi.v1.Csi.NodeUnpublishVolumeResponse; -import csi.v1.NodeGrpc.NodeImplBase; -import io.grpc.stub.StreamObserver; -import org.apache.commons.io.IOUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Implementation of the CSI node service. - */ -public class NodeService extends NodeImplBase { - - private static final Logger LOG = LoggerFactory.getLogger(NodeService.class); - - private String s3Endpoint; - - public NodeService(CsiConfig configuration) { - this.s3Endpoint = configuration.getS3gAddress(); - - } - - @Override - public void nodePublishVolume(NodePublishVolumeRequest request, - StreamObserver responseObserver) { - - try { - Files.createDirectories(Paths.get(request.getTargetPath())); - String mountCommand = - String.format("goofys --endpoint %s %s %s", - s3Endpoint, - request.getVolumeId(), - request.getTargetPath()); - LOG.info("Executing {}", mountCommand); - - executeCommand(mountCommand); - - responseObserver.onNext(NodePublishVolumeResponse.newBuilder() - .build()); - responseObserver.onCompleted(); - - } catch (Exception e) { - responseObserver.onError(e); - } - - } - - private void executeCommand(String mountCommand) - throws IOException, InterruptedException { - Process exec = Runtime.getRuntime().exec(mountCommand); - exec.waitFor(10, TimeUnit.SECONDS); - - LOG.info("Command is executed with stdout: {}, stderr: {}", - IOUtils.toString(exec.getInputStream(), "UTF-8"), - IOUtils.toString(exec.getErrorStream(), "UTF-8")); - if (exec.exitValue() != 0) { - throw new RuntimeException(String - .format("Return code of the command %s was %d", mountCommand, - exec.exitValue())); - } - } - - @Override - public void nodeUnpublishVolume(NodeUnpublishVolumeRequest request, - StreamObserver responseObserver) { - String umountCommand = - String.format("fusermount -u %s", request.getTargetPath()); - LOG.info("Executing {}", umountCommand); - - try { - executeCommand(umountCommand); - - responseObserver.onNext(NodeUnpublishVolumeResponse.newBuilder() - .build()); - responseObserver.onCompleted(); - - } catch (Exception e) { - responseObserver.onError(e); - } - - } - - @Override - public void nodeGetCapabilities(NodeGetCapabilitiesRequest request, - StreamObserver responseObserver) { - NodeGetCapabilitiesResponse response = - NodeGetCapabilitiesResponse.newBuilder() - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void nodeGetInfo(NodeGetInfoRequest request, - StreamObserver responseObserver) { - NodeGetInfoResponse response = null; - try { - response = NodeGetInfoResponse.newBuilder() - .setNodeId(InetAddress.getLocalHost().getHostName()) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } catch (UnknownHostException e) { - responseObserver.onError(e); - } - - } -} diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java deleted file mode 100644 index 1b558dd6f40..00000000000 --- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.csi; - -/** - * Container Storage Interface server implementation for Ozone. - */ \ No newline at end of file diff --git a/hadoop-ozone/csi/src/main/proto/csi.proto b/hadoop-ozone/csi/src/main/proto/csi.proto deleted file mode 100644 index 3bd53a0758b..00000000000 --- a/hadoop-ozone/csi/src/main/proto/csi.proto +++ /dev/null @@ -1,1323 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Code generated by make; DO NOT EDIT. -syntax = "proto3"; -package csi.v1; - -import "google/protobuf/descriptor.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; - -option go_package = "csi"; - -extend google.protobuf.FieldOptions { - // Indicates that a field MAY contain information that is sensitive - // and MUST be treated as such (e.g. not logged). - bool csi_secret = 1059; -} -service Identity { - rpc GetPluginInfo(GetPluginInfoRequest) - returns (GetPluginInfoResponse) {} - - rpc GetPluginCapabilities(GetPluginCapabilitiesRequest) - returns (GetPluginCapabilitiesResponse) {} - - rpc Probe (ProbeRequest) - returns (ProbeResponse) {} -} - -service Controller { - rpc CreateVolume (CreateVolumeRequest) - returns (CreateVolumeResponse) {} - - rpc DeleteVolume (DeleteVolumeRequest) - returns (DeleteVolumeResponse) {} - - rpc ControllerPublishVolume (ControllerPublishVolumeRequest) - returns (ControllerPublishVolumeResponse) {} - - rpc ControllerUnpublishVolume (ControllerUnpublishVolumeRequest) - returns (ControllerUnpublishVolumeResponse) {} - - rpc ValidateVolumeCapabilities (ValidateVolumeCapabilitiesRequest) - returns (ValidateVolumeCapabilitiesResponse) {} - - rpc ListVolumes (ListVolumesRequest) - returns (ListVolumesResponse) {} - - rpc GetCapacity (GetCapacityRequest) - returns (GetCapacityResponse) {} - - rpc ControllerGetCapabilities (ControllerGetCapabilitiesRequest) - returns (ControllerGetCapabilitiesResponse) {} - - rpc CreateSnapshot (CreateSnapshotRequest) - returns (CreateSnapshotResponse) {} - - rpc DeleteSnapshot (DeleteSnapshotRequest) - returns (DeleteSnapshotResponse) {} - - rpc ListSnapshots (ListSnapshotsRequest) - returns (ListSnapshotsResponse) {} - - rpc ControllerExpandVolume (ControllerExpandVolumeRequest) - returns (ControllerExpandVolumeResponse) {} -} - -service Node { - rpc NodeStageVolume (NodeStageVolumeRequest) - returns (NodeStageVolumeResponse) {} - - rpc NodeUnstageVolume (NodeUnstageVolumeRequest) - returns (NodeUnstageVolumeResponse) {} - - rpc NodePublishVolume (NodePublishVolumeRequest) - returns (NodePublishVolumeResponse) {} - - rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest) - returns (NodeUnpublishVolumeResponse) {} - - rpc NodeGetVolumeStats (NodeGetVolumeStatsRequest) - returns (NodeGetVolumeStatsResponse) {} - - - rpc NodeExpandVolume(NodeExpandVolumeRequest) - returns (NodeExpandVolumeResponse) {} - - - rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) - returns (NodeGetCapabilitiesResponse) {} - - rpc NodeGetInfo (NodeGetInfoRequest) - returns (NodeGetInfoResponse) {} -} -message GetPluginInfoRequest { - // Intentionally empty. -} - -message GetPluginInfoResponse { - // The name MUST follow domain name notation format - // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD - // include the plugin's host company name and the plugin name, - // to minimize the possibility of collisions. It MUST be 63 - // characters or less, beginning and ending with an alphanumeric - // character ([a-z0-9A-Z]) with dashes (-), dots (.), and - // alphanumerics between. This field is REQUIRED. - string name = 1; - - // This field is REQUIRED. Value of this field is opaque to the CO. - string vendor_version = 2; - - // This field is OPTIONAL. Values are opaque to the CO. - map manifest = 3; -} -message GetPluginCapabilitiesRequest { - // Intentionally empty. -} - -message GetPluginCapabilitiesResponse { - // All the capabilities that the controller service supports. This - // field is OPTIONAL. - repeated PluginCapability capabilities = 1; -} - -// Specifies a capability of the plugin. -message PluginCapability { - message Service { - enum Type { - UNKNOWN = 0; - // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for - // the ControllerService. Plugins SHOULD provide this capability. - // In rare cases certain plugins MAY wish to omit the - // ControllerService entirely from their implementation, but such - // SHOULD NOT be the common case. - // The presence of this capability determines whether the CO will - // attempt to invoke the REQUIRED ControllerService RPCs, as well - // as specific RPCs as indicated by ControllerGetCapabilities. - CONTROLLER_SERVICE = 1; - - // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for - // this plugin MAY NOT be equally accessible by all nodes in the - // cluster. The CO MUST use the topology information returned by - // CreateVolumeRequest along with the topology information - // returned by NodeGetInfo to ensure that a given volume is - // accessible from a given node when scheduling workloads. - VOLUME_ACCESSIBILITY_CONSTRAINTS = 2; - } - Type type = 1; - } - - message VolumeExpansion { - enum Type { - UNKNOWN = 0; - - // ONLINE indicates that volumes may be expanded when published to - // a node. When a Plugin implements this capability it MUST - // implement either the EXPAND_VOLUME controller capability or the - // EXPAND_VOLUME node capability or both. When a plugin supports - // ONLINE volume expansion and also has the EXPAND_VOLUME - // controller capability then the plugin MUST support expansion of - // volumes currently published and available on a node. When a - // plugin supports ONLINE volume expansion and also has the - // EXPAND_VOLUME node capability then the plugin MAY support - // expansion of node-published volume via NodeExpandVolume. - // - // Example 1: Given a shared filesystem volume (e.g. GlusterFs), - // the Plugin may set the ONLINE volume expansion capability and - // implement ControllerExpandVolume but not NodeExpandVolume. - // - // Example 2: Given a block storage volume type (e.g. EBS), the - // Plugin may set the ONLINE volume expansion capability and - // implement both ControllerExpandVolume and NodeExpandVolume. - // - // Example 3: Given a Plugin that supports volume expansion only - // upon a node, the Plugin may set the ONLINE volume - // expansion capability and implement NodeExpandVolume but not - // ControllerExpandVolume. - ONLINE = 1; - - // OFFLINE indicates that volumes currently published and - // available on a node SHALL NOT be expanded via - // ControllerExpandVolume. When a plugin supports OFFLINE volume - // expansion it MUST implement either the EXPAND_VOLUME controller - // capability or both the EXPAND_VOLUME controller capability and - // the EXPAND_VOLUME node capability. - // - // Example 1: Given a block storage volume type (e.g. Azure Disk) - // that does not support expansion of "node-attached" (i.e. - // controller-published) volumes, the Plugin may indicate - // OFFLINE volume expansion support and implement both - // ControllerExpandVolume and NodeExpandVolume. - OFFLINE = 2; - } - } - - oneof type { - // Service that the plugin supports. - Service service = 1; - VolumeExpansion volume_expansion = 2; - } -} -message ProbeRequest { - // Intentionally empty. -} - -message ProbeResponse { - // Readiness allows a plugin to report its initialization status back - // to the CO. Initialization for some plugins MAY be time consuming - // and it is important for a CO to distinguish between the following - // cases: - // - // 1) The plugin is in an unhealthy state and MAY need restarting. In - // this case a gRPC error code SHALL be returned. - // 2) The plugin is still initializing, but is otherwise perfectly - // healthy. In this case a successful response SHALL be returned - // with a readiness value of `false`. Calls to the plugin's - // Controller and/or Node services MAY fail due to an incomplete - // initialization state. - // 3) The plugin has finished initializing and is ready to service - // calls to its Controller and/or Node services. A successful - // response is returned with a readiness value of `true`. - // - // This field is OPTIONAL. If not present, the caller SHALL assume - // that the plugin is in a ready state and is accepting calls to its - // Controller and/or Node services (according to the plugin's reported - // capabilities). - .google.protobuf.BoolValue ready = 1; -} -message CreateVolumeRequest { - // The suggested name for the storage space. This field is REQUIRED. - // It serves two purposes: - // 1) Idempotency - This name is generated by the CO to achieve - // idempotency. The Plugin SHOULD ensure that multiple - // `CreateVolume` calls for the same name do not result in more - // than one piece of storage provisioned corresponding to that - // name. If a Plugin is unable to enforce idempotency, the CO's - // error recovery logic could result in multiple (unused) volumes - // being provisioned. - // In the case of error, the CO MUST handle the gRPC error codes - // per the recovery behavior defined in the "CreateVolume Errors" - // section below. - // The CO is responsible for cleaning up volumes it provisioned - // that it no longer needs. If the CO is uncertain whether a volume - // was provisioned or not when a `CreateVolume` call fails, the CO - // MAY call `CreateVolume` again, with the same name, to ensure the - // volume exists and to retrieve the volume's `volume_id` (unless - // otherwise prohibited by "CreateVolume Errors"). - // 2) Suggested name - Some storage systems allow callers to specify - // an identifier by which to refer to the newly provisioned - // storage. If a storage system supports this, it can optionally - // use this name as the identifier for the new volume. - // Any Unicode string that conforms to the length limit is allowed - // except those containing the following banned characters: - // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. - // (These are control characters other than commonly used whitespace.) - string name = 1; - - // This field is OPTIONAL. This allows the CO to specify the capacity - // requirement of the volume to be provisioned. If not specified, the - // Plugin MAY choose an implementation-defined capacity range. If - // specified it MUST always be honored, even when creating volumes - // from a source; which MAY force some backends to internally extend - // the volume after creating it. - CapacityRange capacity_range = 2; - - // The capabilities that the provisioned volume MUST have. SP MUST - // provision a volume that will satisfy ALL of the capabilities - // specified in this list. Otherwise SP MUST return the appropriate - // gRPC error code. - // The Plugin MUST assume that the CO MAY use the provisioned volume - // with ANY of the capabilities specified in this list. - // For example, a CO MAY specify two volume capabilities: one with - // access mode SINGLE_NODE_WRITER and another with access mode - // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the - // provisioned volume can be used in either mode. - // This also enables the CO to do early validation: If ANY of the - // specified volume capabilities are not supported by the SP, the call - // MUST return the appropriate gRPC error code. - // This field is REQUIRED. - repeated VolumeCapability volume_capabilities = 3; - - // Plugin specific parameters passed in as opaque key-value pairs. - // This field is OPTIONAL. The Plugin is responsible for parsing and - // validating these parameters. COs will treat these as opaque. - map parameters = 4; - - // Secrets required by plugin to complete volume creation request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - map secrets = 5 [(csi_secret) = true]; - - // If specified, the new volume will be pre-populated with data from - // this source. This field is OPTIONAL. - VolumeContentSource volume_content_source = 6; - - // Specifies where (regions, zones, racks, etc.) the provisioned - // volume MUST be accessible from. - // An SP SHALL advertise the requirements for topological - // accessibility information in documentation. COs SHALL only specify - // topological accessibility information supported by the SP. - // This field is OPTIONAL. - // This field SHALL NOT be specified unless the SP has the - // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. - // If this field is not specified and the SP has the - // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY - // choose where the provisioned volume is accessible from. - TopologyRequirement accessibility_requirements = 7; -} - -// Specifies what source the volume will be created from. One of the -// type fields MUST be specified. -message VolumeContentSource { - message SnapshotSource { - // Contains identity information for the existing source snapshot. - // This field is REQUIRED. Plugin is REQUIRED to support creating - // volume from snapshot if it supports the capability - // CREATE_DELETE_SNAPSHOT. - string snapshot_id = 1; - } - - message VolumeSource { - // Contains identity information for the existing source volume. - // This field is REQUIRED. Plugins reporting CLONE_VOLUME - // capability MUST support creating a volume from another volume. - string volume_id = 1; - } - - oneof type { - SnapshotSource snapshot = 1; - VolumeSource volume = 2; - } -} - -message CreateVolumeResponse { - // Contains all attributes of the newly created volume that are - // relevant to the CO along with information required by the Plugin - // to uniquely identify the volume. This field is REQUIRED. - Volume volume = 1; -} - -// Specify a capability of a volume. -message VolumeCapability { - // Indicate that the volume will be accessed via the block device API. - message BlockVolume { - // Intentionally empty, for now. - } - - // Indicate that the volume will be accessed via the filesystem API. - message MountVolume { - // The filesystem type. This field is OPTIONAL. - // An empty string is equal to an unspecified field value. - string fs_type = 1; - - // The mount options that can be used for the volume. This field is - // OPTIONAL. `mount_flags` MAY contain sensitive information. - // Therefore, the CO and the Plugin MUST NOT leak this information - // to untrusted entities. The total size of this repeated field - // SHALL NOT exceed 4 KiB. - repeated string mount_flags = 2; - } - - // Specify how a volume can be accessed. - message AccessMode { - enum Mode { - UNKNOWN = 0; - - // Can only be published once as read/write on a single node, at - // any given time. - SINGLE_NODE_WRITER = 1; - - // Can only be published once as readonly on a single node, at - // any given time. - SINGLE_NODE_READER_ONLY = 2; - - // Can be published as readonly at multiple nodes simultaneously. - MULTI_NODE_READER_ONLY = 3; - - // Can be published at multiple nodes simultaneously. Only one of - // the node can be used as read/write. The rest will be readonly. - MULTI_NODE_SINGLE_WRITER = 4; - - // Can be published as read/write at multiple nodes - // simultaneously. - MULTI_NODE_MULTI_WRITER = 5; - } - - // This field is REQUIRED. - Mode mode = 1; - } - - // Specifies what API the volume will be accessed using. One of the - // following fields MUST be specified. - oneof access_type { - BlockVolume block = 1; - MountVolume mount = 2; - } - - // This is a REQUIRED field. - AccessMode access_mode = 3; -} - -// The capacity of the storage space in bytes. To specify an exact size, -// `required_bytes` and `limit_bytes` SHALL be set to the same value. At -// least one of the these fields MUST be specified. -message CapacityRange { - // Volume MUST be at least this big. This field is OPTIONAL. - // A value of 0 is equal to an unspecified field value. - // The value of this field MUST NOT be negative. - int64 required_bytes = 1; - - // Volume MUST not be bigger than this. This field is OPTIONAL. - // A value of 0 is equal to an unspecified field value. - // The value of this field MUST NOT be negative. - int64 limit_bytes = 2; -} - -// Information about a specific volume. -message Volume { - // The capacity of the volume in bytes. This field is OPTIONAL. If not - // set (value of 0), it indicates that the capacity of the volume is - // unknown (e.g., NFS share). - // The value of this field MUST NOT be negative. - int64 capacity_bytes = 1; - - // The identifier for this volume, generated by the plugin. - // This field is REQUIRED. - // This field MUST contain enough information to uniquely identify - // this specific volume vs all other volumes supported by this plugin. - // This field SHALL be used by the CO in subsequent calls to refer to - // this volume. - // The SP is NOT responsible for global uniqueness of volume_id across - // multiple SPs. - string volume_id = 2; - - // Opaque static properties of the volume. SP MAY use this field to - // ensure subsequent volume validation and publishing calls have - // contextual information. - // The contents of this field SHALL be opaque to a CO. - // The contents of this field SHALL NOT be mutable. - // The contents of this field SHALL be safe for the CO to cache. - // The contents of this field SHOULD NOT contain sensitive - // information. - // The contents of this field SHOULD NOT be used for uniquely - // identifying a volume. The `volume_id` alone SHOULD be sufficient to - // identify the volume. - // A volume uniquely identified by `volume_id` SHALL always report the - // same volume_context. - // This field is OPTIONAL and when present MUST be passed to volume - // validation and publishing calls. - map volume_context = 3; - - // If specified, indicates that the volume is not empty and is - // pre-populated with data from the specified source. - // This field is OPTIONAL. - VolumeContentSource content_source = 4; - - // Specifies where (regions, zones, racks, etc.) the provisioned - // volume is accessible from. - // A plugin that returns this field MUST also set the - // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. - // An SP MAY specify multiple topologies to indicate the volume is - // accessible from multiple locations. - // COs MAY use this information along with the topology information - // returned by NodeGetInfo to ensure that a given volume is accessible - // from a given node when scheduling workloads. - // This field is OPTIONAL. If it is not specified, the CO MAY assume - // the volume is equally accessible from all nodes in the cluster and - // MAY schedule workloads referencing the volume on any available - // node. - // - // Example 1: - // accessible_topology = {"region": "R1", "zone": "Z2"} - // Indicates a volume accessible only from the "region" "R1" and the - // "zone" "Z2". - // - // Example 2: - // accessible_topology = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" - // in the "region" "R1". - repeated Topology accessible_topology = 5; -} - -message TopologyRequirement { - // Specifies the list of topologies the provisioned volume MUST be - // accessible from. - // This field is OPTIONAL. If TopologyRequirement is specified either - // requisite or preferred or both MUST be specified. - // - // If requisite is specified, the provisioned volume MUST be - // accessible from at least one of the requisite topologies. - // - // Given - // x = number of topologies provisioned volume is accessible from - // n = number of requisite topologies - // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 - // If x==n, then the SP MUST make the provisioned volume available to - // all topologies from the list of requisite topologies. If it is - // unable to do so, the SP MUST fail the CreateVolume call. - // For example, if a volume should be accessible from a single zone, - // and requisite = - // {"region": "R1", "zone": "Z2"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and the "zone" "Z2". - // Similarly, if a volume should be accessible from two zones, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and both "zone" "Z2" and "zone" "Z3". - // - // If xn, then the SP MUST make the provisioned volume available from - // all topologies from the list of requisite topologies and MAY choose - // the remaining x-n unique topologies from the list of all possible - // topologies. If it is unable to do so, the SP MUST fail the - // CreateVolume call. - // For example, if a volume should be accessible from two zones, and - // requisite = - // {"region": "R1", "zone": "Z2"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and the "zone" "Z2" and the SP may select the second zone - // independently, e.g. "R1/Z4". - repeated Topology requisite = 1; - - // Specifies the list of topologies the CO would prefer the volume to - // be provisioned in. - // - // This field is OPTIONAL. If TopologyRequirement is specified either - // requisite or preferred or both MUST be specified. - // - // An SP MUST attempt to make the provisioned volume available using - // the preferred topologies in order from first to last. - // - // If requisite is specified, all topologies in preferred list MUST - // also be present in the list of requisite topologies. - // - // If the SP is unable to to make the provisioned volume available - // from any of the preferred topologies, the SP MAY choose a topology - // from the list of requisite topologies. - // If the list of requisite topologies is not specified, then the SP - // MAY choose from the list of all possible topologies. - // If the list of requisite topologies is specified and the SP is - // unable to to make the provisioned volume available from any of the - // requisite topologies it MUST fail the CreateVolume call. - // - // Example 1: - // Given a volume should be accessible from a single zone, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // preferred = - // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume - // available from "zone" "Z3" in the "region" "R1" and fall back to - // "zone" "Z2" in the "region" "R1" if that is not possible. - // - // Example 2: - // Given a volume should be accessible from a single zone, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} - // preferred = - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z2"} - // then the the SP SHOULD first attempt to make the provisioned volume - // accessible from "zone" "Z4" in the "region" "R1" and fall back to - // "zone" "Z2" in the "region" "R1" if that is not possible. If that - // is not possible, the SP may choose between either the "zone" - // "Z3" or "Z5" in the "region" "R1". - // - // Example 3: - // Given a volume should be accessible from TWO zones (because an - // opaque parameter in CreateVolumeRequest, for example, specifies - // the volume is accessible from two zones, aka synchronously - // replicated), and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} - // preferred = - // {"region": "R1", "zone": "Z5"}, - // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume - // accessible from the combination of the two "zones" "Z5" and "Z3" in - // the "region" "R1". If that's not possible, it should fall back to - // a combination of "Z5" and other possibilities from the list of - // requisite. If that's not possible, it should fall back to a - // combination of "Z3" and other possibilities from the list of - // requisite. If that's not possible, it should fall back to a - // combination of other possibilities from the list of requisite. - repeated Topology preferred = 2; -} - -// Topology is a map of topological domains to topological segments. -// A topological domain is a sub-division of a cluster, like "region", -// "zone", "rack", etc. -// A topological segment is a specific instance of a topological domain, -// like "zone3", "rack3", etc. -// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} -// Valid keys have two segments: an OPTIONAL prefix and name, separated -// by a slash (/), for example: "com.company.example/zone". -// The key name segment is REQUIRED. The prefix is OPTIONAL. -// The key name MUST be 63 characters or less, begin and end with an -// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), -// underscores (_), dots (.), or alphanumerics in between, for example -// "zone". -// The key prefix MUST be 63 characters or less, begin and end with a -// lower-case alphanumeric character ([a-z0-9]), contain only -// dashes (-), dots (.), or lower-case alphanumerics in between, and -// follow domain name notation format -// (https://tools.ietf.org/html/rfc1035#section-2.3.1). -// The key prefix SHOULD include the plugin's host company name and/or -// the plugin name, to minimize the possibility of collisions with keys -// from other plugins. -// If a key prefix is specified, it MUST be identical across all -// topology keys returned by the SP (across all RPCs). -// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" -// MUST not both exist. -// Each value (topological segment) MUST contain 1 or more strings. -// Each string MUST be 63 characters or less and begin and end with an -// alphanumeric character with '-', '_', '.', or alphanumerics in -// between. -message Topology { - map segments = 1; -} -message DeleteVolumeRequest { - // The ID of the volume to be deprovisioned. - // This field is REQUIRED. - string volume_id = 1; - - // Secrets required by plugin to complete volume deletion request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - map secrets = 2 [(csi_secret) = true]; -} - -message DeleteVolumeResponse { - // Intentionally empty. -} -message ControllerPublishVolumeRequest { - // The ID of the volume to be used on a node. - // This field is REQUIRED. - string volume_id = 1; - - // The ID of the node. This field is REQUIRED. The CO SHALL set this - // field to match the node ID returned by `NodeGetInfo`. - string node_id = 2; - - // Volume capability describing how the CO intends to use this volume. - // SP MUST ensure the CO can use the published volume as described. - // Otherwise SP MUST return the appropriate gRPC error code. - // This is a REQUIRED field. - VolumeCapability volume_capability = 3; - - // Indicates SP MUST publish the volume in readonly mode. - // CO MUST set this field to false if SP does not have the - // PUBLISH_READONLY controller capability. - // This is a REQUIRED field. - bool readonly = 4; - - // Secrets required by plugin to complete controller publish volume - // request. This field is OPTIONAL. Refer to the - // `Secrets Requirements` section on how to use this field. - map secrets = 5 [(csi_secret) = true]; - - // Volume context as returned by CO in CreateVolumeRequest. This field - // is OPTIONAL and MUST match the volume_context of the volume - // identified by `volume_id`. - map volume_context = 6; -} - -message ControllerPublishVolumeResponse { - // Opaque static publish properties of the volume. SP MAY use this - // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` - // calls calls have contextual information. - // The contents of this field SHALL be opaque to a CO. - // The contents of this field SHALL NOT be mutable. - // The contents of this field SHALL be safe for the CO to cache. - // The contents of this field SHOULD NOT contain sensitive - // information. - // The contents of this field SHOULD NOT be used for uniquely - // identifying a volume. The `volume_id` alone SHOULD be sufficient to - // identify the volume. - // This field is OPTIONAL and when present MUST be passed to - // subsequent `NodeStageVolume` or `NodePublishVolume` calls - map publish_context = 1; -} -message ControllerUnpublishVolumeRequest { - // The ID of the volume. This field is REQUIRED. - string volume_id = 1; - - // The ID of the node. This field is OPTIONAL. The CO SHOULD set this - // field to match the node ID returned by `NodeGetInfo` or leave it - // unset. If the value is set, the SP MUST unpublish the volume from - // the specified node. If the value is unset, the SP MUST unpublish - // the volume from all nodes it is published to. - string node_id = 2; - - // Secrets required by plugin to complete controller unpublish volume - // request. This SHOULD be the same secrets passed to the - // ControllerPublishVolume call for the specified volume. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - map secrets = 3 [(csi_secret) = true]; -} - -message ControllerUnpublishVolumeResponse { - // Intentionally empty. -} -message ValidateVolumeCapabilitiesRequest { - // The ID of the volume to check. This field is REQUIRED. - string volume_id = 1; - - // Volume context as returned by CO in CreateVolumeRequest. This field - // is OPTIONAL and MUST match the volume_context of the volume - // identified by `volume_id`. - map volume_context = 2; - - // The capabilities that the CO wants to check for the volume. This - // call SHALL return "confirmed" only if all the volume capabilities - // specified below are supported. This field is REQUIRED. - repeated VolumeCapability volume_capabilities = 3; - - // See CreateVolumeRequest.parameters. - // This field is OPTIONAL. - map parameters = 4; - - // Secrets required by plugin to complete volume validation request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - map secrets = 5 [(csi_secret) = true]; -} - -message ValidateVolumeCapabilitiesResponse { - message Confirmed { - // Volume context validated by the plugin. - // This field is OPTIONAL. - map volume_context = 1; - - // Volume capabilities supported by the plugin. - // This field is REQUIRED. - repeated VolumeCapability volume_capabilities = 2; - - // The volume creation parameters validated by the plugin. - // This field is OPTIONAL. - map parameters = 3; - } - - // Confirmed indicates to the CO the set of capabilities that the - // plugin has validated. This field SHALL only be set to a non-empty - // value for successful validation responses. - // For successful validation responses, the CO SHALL compare the - // fields of this message to the originally requested capabilities in - // order to guard against an older plugin reporting "valid" for newer - // capability fields that it does not yet understand. - // This field is OPTIONAL. - Confirmed confirmed = 1; - - // Message to the CO if `confirmed` above is empty. This field is - // OPTIONAL. - // An empty string is equal to an unspecified field value. - string message = 2; -} -message ListVolumesRequest { - // If specified (non-zero value), the Plugin MUST NOT return more - // entries than this number in the response. If the actual number of - // entries is more than this number, the Plugin MUST set `next_token` - // in the response which can be used to get the next page of entries - // in the subsequent `ListVolumes` call. This field is OPTIONAL. If - // not specified (zero value), it means there is no restriction on the - // number of entries that can be returned. - // The value of this field MUST NOT be negative. - int32 max_entries = 1; - - // A token to specify where to start paginating. Set this field to - // `next_token` returned by a previous `ListVolumes` call to get the - // next page of entries. This field is OPTIONAL. - // An empty string is equal to an unspecified field value. - string starting_token = 2; -} - -message ListVolumesResponse { - message Entry { - Volume volume = 1; - } - - repeated Entry entries = 1; - - // This token allows you to get the next page of entries for - // `ListVolumes` request. If the number of entries is larger than - // `max_entries`, use the `next_token` as a value for the - // `starting_token` field in the next `ListVolumes` request. This - // field is OPTIONAL. - // An empty string is equal to an unspecified field value. - string next_token = 2; -} -message GetCapacityRequest { - // If specified, the Plugin SHALL report the capacity of the storage - // that can be used to provision volumes that satisfy ALL of the - // specified `volume_capabilities`. These are the same - // `volume_capabilities` the CO will use in `CreateVolumeRequest`. - // This field is OPTIONAL. - repeated VolumeCapability volume_capabilities = 1; - - // If specified, the Plugin SHALL report the capacity of the storage - // that can be used to provision volumes with the given Plugin - // specific `parameters`. These are the same `parameters` the CO will - // use in `CreateVolumeRequest`. This field is OPTIONAL. - map parameters = 2; - - // If specified, the Plugin SHALL report the capacity of the storage - // that can be used to provision volumes that in the specified - // `accessible_topology`. This is the same as the - // `accessible_topology` the CO returns in a `CreateVolumeResponse`. - // This field is OPTIONAL. This field SHALL NOT be set unless the - // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. - Topology accessible_topology = 3; -} - -message GetCapacityResponse { - // The available capacity, in bytes, of the storage that can be used - // to provision volumes. If `volume_capabilities` or `parameters` is - // specified in the request, the Plugin SHALL take those into - // consideration when calculating the available capacity of the - // storage. This field is REQUIRED. - // The value of this field MUST NOT be negative. - int64 available_capacity = 1; -} -message ControllerGetCapabilitiesRequest { - // Intentionally empty. -} - -message ControllerGetCapabilitiesResponse { - // All the capabilities that the controller service supports. This - // field is OPTIONAL. - repeated ControllerServiceCapability capabilities = 1; -} - -// Specifies a capability of the controller service. -message ControllerServiceCapability { - message RPC { - enum Type { - UNKNOWN = 0; - CREATE_DELETE_VOLUME = 1; - PUBLISH_UNPUBLISH_VOLUME = 2; - LIST_VOLUMES = 3; - GET_CAPACITY = 4; - // Currently the only way to consume a snapshot is to create - // a volume from it. Therefore plugins supporting - // CREATE_DELETE_SNAPSHOT MUST support creating volume from - // snapshot. - CREATE_DELETE_SNAPSHOT = 5; - LIST_SNAPSHOTS = 6; - - // Plugins supporting volume cloning at the storage level MAY - // report this capability. The source volume MUST be managed by - // the same plugin. Not all volume sources and parameters - // combinations MAY work. - CLONE_VOLUME = 7; - - // Indicates the SP supports ControllerPublishVolume.readonly - // field. - PUBLISH_READONLY = 8; - - // See VolumeExpansion for details. - EXPAND_VOLUME = 9; - } - - Type type = 1; - } - - oneof type { - // RPC that the controller supports. - RPC rpc = 1; - } -} -message CreateSnapshotRequest { - // The ID of the source volume to be snapshotted. - // This field is REQUIRED. - string source_volume_id = 1; - - // The suggested name for the snapshot. This field is REQUIRED for - // idempotency. - // Any Unicode string that conforms to the length limit is allowed - // except those containing the following banned characters: - // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. - // (These are control characters other than commonly used whitespace.) - string name = 2; - - // Secrets required by plugin to complete snapshot creation request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - map secrets = 3 [(csi_secret) = true]; - - // Plugin specific parameters passed in as opaque key-value pairs. - // This field is OPTIONAL. The Plugin is responsible for parsing and - // validating these parameters. COs will treat these as opaque. - // Use cases for opaque parameters: - // - Specify a policy to automatically clean up the snapshot. - // - Specify an expiration date for the snapshot. - // - Specify whether the snapshot is readonly or read/write. - // - Specify if the snapshot should be replicated to some place. - // - Specify primary or secondary for replication systems that - // support snapshotting only on primary. - map parameters = 4; -} - -message CreateSnapshotResponse { - // Contains all attributes of the newly created snapshot that are - // relevant to the CO along with information required by the Plugin - // to uniquely identify the snapshot. This field is REQUIRED. - Snapshot snapshot = 1; -} - -// Information about a specific snapshot. -message Snapshot { - // This is the complete size of the snapshot in bytes. The purpose of - // this field is to give CO guidance on how much space is needed to - // create a volume from this snapshot. The size of the volume MUST NOT - // be less than the size of the source snapshot. This field is - // OPTIONAL. If this field is not set, it indicates that this size is - // unknown. The value of this field MUST NOT be negative and a size of - // zero means it is unspecified. - int64 size_bytes = 1; - - // The identifier for this snapshot, generated by the plugin. - // This field is REQUIRED. - // This field MUST contain enough information to uniquely identify - // this specific snapshot vs all other snapshots supported by this - // plugin. - // This field SHALL be used by the CO in subsequent calls to refer to - // this snapshot. - // The SP is NOT responsible for global uniqueness of snapshot_id - // across multiple SPs. - string snapshot_id = 2; - - // Identity information for the source volume. Note that creating a - // snapshot from a snapshot is not supported here so the source has to - // be a volume. This field is REQUIRED. - string source_volume_id = 3; - - // Timestamp when the point-in-time snapshot is taken on the storage - // system. This field is REQUIRED. - .google.protobuf.Timestamp creation_time = 4; - - // Indicates if a snapshot is ready to use as a - // `volume_content_source` in a `CreateVolumeRequest`. The default - // value is false. This field is REQUIRED. - bool ready_to_use = 5; -} -message DeleteSnapshotRequest { - // The ID of the snapshot to be deleted. - // This field is REQUIRED. - string snapshot_id = 1; - - // Secrets required by plugin to complete snapshot deletion request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - map secrets = 2 [(csi_secret) = true]; -} - -message DeleteSnapshotResponse {} -// List all snapshots on the storage system regardless of how they were -// created. -message ListSnapshotsRequest { - // If specified (non-zero value), the Plugin MUST NOT return more - // entries than this number in the response. If the actual number of - // entries is more than this number, the Plugin MUST set `next_token` - // in the response which can be used to get the next page of entries - // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If - // not specified (zero value), it means there is no restriction on the - // number of entries that can be returned. - // The value of this field MUST NOT be negative. - int32 max_entries = 1; - - // A token to specify where to start paginating. Set this field to - // `next_token` returned by a previous `ListSnapshots` call to get the - // next page of entries. This field is OPTIONAL. - // An empty string is equal to an unspecified field value. - string starting_token = 2; - - // Identity information for the source volume. This field is OPTIONAL. - // It can be used to list snapshots by volume. - string source_volume_id = 3; - - // Identity information for a specific snapshot. This field is - // OPTIONAL. It can be used to list only a specific snapshot. - // ListSnapshots will return with current snapshot information - // and will not block if the snapshot is being processed after - // it is cut. - string snapshot_id = 4; -} - -message ListSnapshotsResponse { - message Entry { - Snapshot snapshot = 1; - } - - repeated Entry entries = 1; - - // This token allows you to get the next page of entries for - // `ListSnapshots` request. If the number of entries is larger than - // `max_entries`, use the `next_token` as a value for the - // `starting_token` field in the next `ListSnapshots` request. This - // field is OPTIONAL. - // An empty string is equal to an unspecified field value. - string next_token = 2; -} -message ControllerExpandVolumeRequest { - // The ID of the volume to expand. This field is REQUIRED. - string volume_id = 1; - - // This allows CO to specify the capacity requirements of the volume - // after expansion. This field is REQUIRED. - CapacityRange capacity_range = 2; - - // Secrets required by the plugin for expanding the volume. - // This field is OPTIONAL. - map secrets = 3 [(csi_secret) = true]; -} - -message ControllerExpandVolumeResponse { - // Capacity of volume after expansion. This field is REQUIRED. - int64 capacity_bytes = 1; - - // Whether node expansion is required for the volume. When true - // the CO MUST make NodeExpandVolume RPC call on the node. This field - // is REQUIRED. - bool node_expansion_required = 2; -} -message NodeStageVolumeRequest { - // The ID of the volume to publish. This field is REQUIRED. - string volume_id = 1; - - // The CO SHALL set this field to the value returned by - // `ControllerPublishVolume` if the corresponding Controller Plugin - // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be - // left unset if the corresponding Controller Plugin does not have - // this capability. This is an OPTIONAL field. - map publish_context = 2; - - // The path to which the volume MAY be staged. It MUST be an - // absolute path in the root filesystem of the process serving this - // request, and MUST be a directory. The CO SHALL ensure that there - // is only one `staging_target_path` per volume. The CO SHALL ensure - // that the path is directory and that the process serving the - // request has `read` and `write` permission to that directory. The - // CO SHALL be responsible for creating the directory if it does not - // exist. - // This is a REQUIRED field. - string staging_target_path = 3; - - // Volume capability describing how the CO intends to use this volume. - // SP MUST ensure the CO can use the staged volume as described. - // Otherwise SP MUST return the appropriate gRPC error code. - // This is a REQUIRED field. - VolumeCapability volume_capability = 4; - - // Secrets required by plugin to complete node stage volume request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - map secrets = 5 [(csi_secret) = true]; - - // Volume context as returned by CO in CreateVolumeRequest. This field - // is OPTIONAL and MUST match the volume_context of the volume - // identified by `volume_id`. - map volume_context = 6; -} - -message NodeStageVolumeResponse { - // Intentionally empty. -} -message NodeUnstageVolumeRequest { - // The ID of the volume. This field is REQUIRED. - string volume_id = 1; - - // The path at which the volume was staged. It MUST be an absolute - // path in the root filesystem of the process serving this request. - // This is a REQUIRED field. - string staging_target_path = 2; -} - -message NodeUnstageVolumeResponse { - // Intentionally empty. -} -message NodePublishVolumeRequest { - // The ID of the volume to publish. This field is REQUIRED. - string volume_id = 1; - - // The CO SHALL set this field to the value returned by - // `ControllerPublishVolume` if the corresponding Controller Plugin - // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be - // left unset if the corresponding Controller Plugin does not have - // this capability. This is an OPTIONAL field. - map publish_context = 2; - - // The path to which the volume was staged by `NodeStageVolume`. - // It MUST be an absolute path in the root filesystem of the process - // serving this request. - // It MUST be set if the Node Plugin implements the - // `STAGE_UNSTAGE_VOLUME` node capability. - // This is an OPTIONAL field. - string staging_target_path = 3; - - // The path to which the volume will be published. It MUST be an - // absolute path in the root filesystem of the process serving this - // request. The CO SHALL ensure uniqueness of target_path per volume. - // The CO SHALL ensure that the parent directory of this path exists - // and that the process serving the request has `read` and `write` - // permissions to that parent directory. - // For volumes with an access type of block, the SP SHALL place the - // block device at target_path. - // For volumes with an access type of mount, the SP SHALL place the - // mounted directory at target_path. - // Creation of target_path is the responsibility of the SP. - // This is a REQUIRED field. - string target_path = 4; - - // Volume capability describing how the CO intends to use this volume. - // SP MUST ensure the CO can use the published volume as described. - // Otherwise SP MUST return the appropriate gRPC error code. - // This is a REQUIRED field. - VolumeCapability volume_capability = 5; - - // Indicates SP MUST publish the volume in readonly mode. - // This field is REQUIRED. - bool readonly = 6; - - // Secrets required by plugin to complete node publish volume request. - // This field is OPTIONAL. Refer to the `Secrets Requirements` - // section on how to use this field. - map secrets = 7 [(csi_secret) = true]; - - // Volume context as returned by CO in CreateVolumeRequest. This field - // is OPTIONAL and MUST match the volume_context of the volume - // identified by `volume_id`. - map volume_context = 8; -} - -message NodePublishVolumeResponse { - // Intentionally empty. -} -message NodeUnpublishVolumeRequest { - // The ID of the volume. This field is REQUIRED. - string volume_id = 1; - - // The path at which the volume was published. It MUST be an absolute - // path in the root filesystem of the process serving this request. - // The SP MUST delete the file or directory it created at this path. - // This is a REQUIRED field. - string target_path = 2; -} - -message NodeUnpublishVolumeResponse { - // Intentionally empty. -} -message NodeGetVolumeStatsRequest { - // The ID of the volume. This field is REQUIRED. - string volume_id = 1; - - // It can be any valid path where volume was previously - // staged or published. - // It MUST be an absolute path in the root filesystem of - // the process serving this request. - // This is a REQUIRED field. - string volume_path = 2; -} - -message NodeGetVolumeStatsResponse { - // This field is OPTIONAL. - repeated VolumeUsage usage = 1; -} - -message VolumeUsage { - enum Unit { - UNKNOWN = 0; - BYTES = 1; - INODES = 2; - } - // The available capacity in specified Unit. This field is OPTIONAL. - // The value of this field MUST NOT be negative. - int64 available = 1; - - // The total capacity in specified Unit. This field is REQUIRED. - // The value of this field MUST NOT be negative. - int64 total = 2; - - // The used capacity in specified Unit. This field is OPTIONAL. - // The value of this field MUST NOT be negative. - int64 used = 3; - - // Units by which values are measured. This field is REQUIRED. - Unit unit = 4; -} -message NodeGetCapabilitiesRequest { - // Intentionally empty. -} - -message NodeGetCapabilitiesResponse { - // All the capabilities that the node service supports. This field - // is OPTIONAL. - repeated NodeServiceCapability capabilities = 1; -} - -// Specifies a capability of the node service. -message NodeServiceCapability { - message RPC { - enum Type { - UNKNOWN = 0; - STAGE_UNSTAGE_VOLUME = 1; - // If Plugin implements GET_VOLUME_STATS capability - // then it MUST implement NodeGetVolumeStats RPC - // call for fetching volume statistics. - GET_VOLUME_STATS = 2; - // See VolumeExpansion for details. - EXPAND_VOLUME = 3; - } - - Type type = 1; - } - - oneof type { - // RPC that the controller supports. - RPC rpc = 1; - } -} -message NodeGetInfoRequest { -} - -message NodeGetInfoResponse { - // The identifier of the node as understood by the SP. - // This field is REQUIRED. - // This field MUST contain enough information to uniquely identify - // this specific node vs all other nodes supported by this plugin. - // This field SHALL be used by the CO in subsequent calls, including - // `ControllerPublishVolume`, to refer to this node. - // The SP is NOT responsible for global uniqueness of node_id across - // multiple SPs. - string node_id = 1; - - // Maximum number of volumes that controller can publish to the node. - // If value is not set or zero CO SHALL decide how many volumes of - // this type can be published by the controller to the node. The - // plugin MUST NOT set negative values here. - // This field is OPTIONAL. - int64 max_volumes_per_node = 2; - - // Specifies where (regions, zones, racks, etc.) the node is - // accessible from. - // A plugin that returns this field MUST also set the - // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. - // COs MAY use this information along with the topology information - // returned in CreateVolumeResponse to ensure that a given volume is - // accessible from a given node when scheduling workloads. - // This field is OPTIONAL. If it is not specified, the CO MAY assume - // the node is not subject to any topological constraint, and MAY - // schedule workloads that reference any volume V, such that there are - // no topological constraints declared for V. - // - // Example 1: - // accessible_topology = - // {"region": "R1", "zone": "R2"} - // Indicates the node exists within the "region" "R1" and the "zone" - // "Z2". - Topology accessible_topology = 3; -} -message NodeExpandVolumeRequest { - // The ID of the volume. This field is REQUIRED. - string volume_id = 1; - - // The path on which volume is available. This field is REQUIRED. - string volume_path = 2; - - // This allows CO to specify the capacity requirements of the volume - // after expansion. If capacity_range is omitted then a plugin MAY - // inspect the file system of the volume to determine the maximum - // capacity to which the volume can be expanded. In such cases a - // plugin MAY expand the volume to its maximum capacity. - // This field is OPTIONAL. - CapacityRange capacity_range = 3; -} - -message NodeExpandVolumeResponse { - // The capacity of the volume in bytes. This field is OPTIONAL. - int64 capacity_bytes = 1; -} diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml deleted file mode 100644 index 0f5c3c49ef7..00000000000 --- a/hadoop-ozone/datanode/pom.xml +++ /dev/null @@ -1,67 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-datanode - Apache Hadoop Ozone Datanode - jar - 0.5.0-SNAPSHOT - - - - org.apache.hadoop - hadoop-common - compile - - - com.sun.xml.bind - jaxb-impl - - - - - org.apache.hadoop - hadoop-hdfs - compile - - - org.apache.hadoop - hadoop-hdds-container-service - - - com.sun.xml.bind - jaxb-impl - - - com.sun.xml.bind - jaxb-core - - - javax.xml.bind - jaxb-api - - - javax.activation - activation - - - diff --git a/hadoop-ozone/dev-support/checks/README.md b/hadoop-ozone/dev-support/checks/README.md deleted file mode 100755 index ba7202cab16..00000000000 --- a/hadoop-ozone/dev-support/checks/README.md +++ /dev/null @@ -1,27 +0,0 @@ - - -# Ozone checks - -This directory contains a collection of easy-to-use helper scripts to execute various type of tests on the ozone/hdds codebase. - -The contract of the scripts are very simple: - - 1. Executing the scripts without any parameter will check the hdds/ozone project - 2. Shell exit code represents the result of the check (if failed, exits with non-zero code) - 3. Detailed information may be saved to the $OUTPUT_DIR (if it's not set, root level ./target will be used). - 4. The standard output should contain all the log about the build AND the results. - 5. The content of the $OUTPUT_DIR can be: - * `summary.html`/`summary.md`/`summary.txt`: contains a human readable overview about the failed tests (used by reporting) - * `failures`: contains a simple number (used by reporting) diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh deleted file mode 100755 index 81551d1ed97..00000000000 --- a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -REPORT_DIR=${REPORT_DIR:-$PWD} - -_realpath() { - if realpath "$@" > /dev/null; then - realpath "$@" - else - local relative_to - relative_to=$(realpath "${1/--relative-to=/}") || return 1 - realpath "$2" | sed -e "s@${relative_to}/@@" - fi -} - -## generate summary txt file -find "." -name 'TEST*.xml' -print0 \ - | xargs -n1 -0 "grep" -l -E "> "${REPORT_DIR}/summary.txt" - -## Check if Maven was killed -if grep -q 'Killed.* mvn .* test ' "${REPORT_DIR}/output.log"; then - echo 'Maven test run was killed' >> "${REPORT_DIR}/summary.txt" -fi - -#Collect of all of the report failes of FAILED tests -while IFS= read -r -d '' dir; do - while IFS=$'\n' read -r file; do - DIR_OF_TESTFILE=$(dirname "$file") - NAME_OF_TESTFILE=$(basename "$file") - NAME_OF_TEST="${NAME_OF_TESTFILE%.*}" - DESTDIRNAME=$(_realpath --relative-to="$PWD" "$DIR_OF_TESTFILE/../..") || continue - mkdir -p "$REPORT_DIR/$DESTDIRNAME" - #shellcheck disable=SC2086 - cp -r "$DIR_OF_TESTFILE"/*$NAME_OF_TEST* "$REPORT_DIR/$DESTDIRNAME/" - done < <(grep -l -r FAILURE --include="*.txt" "$dir" | grep -v output.txt) -done < <(find "." -name surefire-reports -print0) - -## generate summary markdown file -export SUMMARY_FILE="$REPORT_DIR/summary.md" -for TEST_RESULT_FILE in $(find "$REPORT_DIR" -name "*.txt" | grep -v output); do - - FAILURES=$(grep FAILURE "$TEST_RESULT_FILE" | grep "Tests run" | awk '{print $18}' | sort | uniq) - - for FAILURE in $FAILURES; do - TEST_RESULT_LOCATION="$(_realpath --relative-to="$REPORT_DIR" "$TEST_RESULT_FILE")" - TEST_OUTPUT_LOCATION="${TEST_RESULT_LOCATION//.txt/-output.txt}" - printf " * [%s](%s) ([output](%s))\n" "$FAILURE" "$TEST_RESULT_LOCATION" "$TEST_OUTPUT_LOCATION" >> "$SUMMARY_FILE" - done -done - -if [ -s "$SUMMARY_FILE" ]; then - printf "# Failing tests: \n\n" | cat - "$SUMMARY_FILE" > temp && mv temp "$SUMMARY_FILE" -fi - -## generate counter -wc -l "$REPORT_DIR/summary.txt" | awk '{print $1}'> "$REPORT_DIR/failures" diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh b/hadoop-ozone/dev-support/checks/acceptance.sh deleted file mode 100755 index ee03c587d4b..00000000000 --- a/hadoop-ozone/dev-support/checks/acceptance.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "$DIR/../../.." || exit 1 - -REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/acceptance"} -mkdir -p "$REPORT_DIR" - -OZONE_VERSION=$(grep "" "$DIR/../../pom.xml" | sed 's/<[^>]*>//g'| sed 's/^[ \t]*//') -DIST_DIR="$DIR/../../dist/target/ozone-$OZONE_VERSION" - -if [ ! -d "$DIST_DIR" ]; then - echo "Distribution dir is missing. Doing a full build" - "$DIR/build.sh" -fi - -cd "$DIST_DIR/compose" || exit 1 -./test-all.sh -RES=$? -cp result/* "$REPORT_DIR/" -cp "$REPORT_DIR/log.html" "$REPORT_DIR/summary.html" -exit $RES diff --git a/hadoop-ozone/dev-support/checks/author.sh b/hadoop-ozone/dev-support/checks/author.sh deleted file mode 100755 index 92903f92ea6..00000000000 --- a/hadoop-ozone/dev-support/checks/author.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "$DIR/../../.." || exit 1 - -REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/author"} -mkdir -p "$REPORT_DIR" -REPORT_FILE="$REPORT_DIR/summary.txt" - -#hide this string to not confuse yetus -AUTHOR="uthor" -AUTHOR="@a${AUTHOR}" - -grep -r --include="*.java" "$AUTHOR" . | tee "$REPORT_FILE" - -wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi diff --git a/hadoop-ozone/dev-support/checks/blockade.sh b/hadoop-ozone/dev-support/checks/blockade.sh deleted file mode 100755 index a48d2b592ba..00000000000 --- a/hadoop-ozone/dev-support/checks/blockade.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "$DIR/../../.." || exit 1 - -OZONE_VERSION=$(grep "" "$DIR/../../pom.xml" | sed 's/<[^>]*>//g'| sed 's/^[ \t]*//') -cd "$DIR/../../dist/target/ozone-$OZONE_VERSION/tests" || exit 1 - -source ${DIR}/../../dist/target/ozone-${OZONE_VERSION}/compose/ozoneblockade/.env -export OZONE_RUNNER_VERSION -export HDDS_VERSION - -python -m pytest -s blockade -exit $? diff --git a/hadoop-ozone/dev-support/checks/build.sh b/hadoop-ozone/dev-support/checks/build.sh deleted file mode 100755 index 11973301b27..00000000000 --- a/hadoop-ozone/dev-support/checks/build.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "$DIR/../../.." || exit 1 - -export MAVEN_OPTS="-Xmx4096m" -mvn -B -f pom.ozone.xml -Dmaven.javadoc.skip=true -DskipTests clean install -exit $? diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh deleted file mode 100755 index 685bf14629e..00000000000 --- a/hadoop-ozone/dev-support/checks/checkstyle.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "$DIR/../../.." || exit 1 - -BASE_DIR="$(pwd -P)" -REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/checkstyle"} -mkdir -p "$REPORT_DIR" -REPORT_FILE="$REPORT_DIR/summary.txt" - -mvn -B -fn checkstyle:check -f pom.ozone.xml - -#Print out the exact violations with parsing XML results with sed -find "." -name checkstyle-errors.xml -print0 \ - | xargs -0 sed '$!N; //d' \ - -e '/ "$REPORT_DIR/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi diff --git a/hadoop-ozone/dev-support/checks/findbugs.sh b/hadoop-ozone/dev-support/checks/findbugs.sh deleted file mode 100755 index ccbf2ed678c..00000000000 --- a/hadoop-ozone/dev-support/checks/findbugs.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "$DIR/../../.." || exit 1 - -if ! type unionBugs >/dev/null 2>&1 || ! type convertXmlToText >/dev/null 2>&1; then - mvn -B -fae compile spotbugs:check -f pom.ozone.xml - exit $? -fi - -mvn -B -fae compile spotbugs:spotbugs -f pom.ozone.xml - -REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/findbugs"} -mkdir -p "$REPORT_DIR" -REPORT_FILE="$REPORT_DIR/summary.txt" - -touch "$REPORT_FILE" - -find hadoop-hdds hadoop-ozone -name spotbugsXml.xml -print0 | xargs -0 unionBugs -output "${REPORT_DIR}"/summary.xml -convertXmlToText "${REPORT_DIR}"/summary.xml | tee -a "${REPORT_FILE}" -convertXmlToText -html:fancy-hist.xsl "${REPORT_DIR}"/summary.xml "${REPORT_DIR}"/summary.html - -wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi diff --git a/hadoop-ozone/dev-support/checks/integration.sh b/hadoop-ozone/dev-support/checks/integration.sh deleted file mode 100755 index 52e35765952..00000000000 --- a/hadoop-ozone/dev-support/checks/integration.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "$DIR/../../.." || exit 1 - -export MAVEN_OPTS="-Xmx4096m" -mvn -B install -f pom.ozone.xml -DskipTests -mvn -B -fn test -f pom.ozone.xml -pl :hadoop-ozone-integration-test,:hadoop-ozone-filesystem,:hadoop-ozone-tools \ - -Dtest=\!TestMiniChaosOzoneCluster "$@" - -REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/integration"} -mkdir -p "$REPORT_DIR" - -# shellcheck source=hadoop-ozone/dev-support/checks/_mvn_unit_report.sh -source "$DIR/_mvn_unit_report.sh" - -if [[ -s "$REPORT_DIR/summary.txt" ]] ; then - exit 1 -fi -exit 0 diff --git a/hadoop-ozone/dev-support/checks/isolation.sh b/hadoop-ozone/dev-support/checks/isolation.sh deleted file mode 100755 index 12802350d29..00000000000 --- a/hadoop-ozone/dev-support/checks/isolation.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "$DIR/../../.." || exit 1 - -hadooplines=$(git diff --name-only HEAD~1..HEAD | grep -v hadoop-ozone | grep -c -v hadoop-hdds ) -if [ "$hadooplines" == "0" ]; then - echo "Only ozone/hdds subprojects are changed" - exit 0 -else - echo "Main hadoop projects are changed in an ozone patch." - echo "Please do it in a HADOOP/HDFS patch and test it with hadoop precommit tests" - exit 1 -fi diff --git a/hadoop-ozone/dev-support/checks/rat.sh b/hadoop-ozone/dev-support/checks/rat.sh deleted file mode 100755 index 464d636f938..00000000000 --- a/hadoop-ozone/dev-support/checks/rat.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "$DIR/../../.." || exit 1 - -REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/rat"} -mkdir -p "$REPORT_DIR" - -REPORT_FILE="$REPORT_DIR/summary.txt" - -cd hadoop-hdds || exit 1 -mvn -B -fn org.apache.rat:apache-rat-plugin:0.13:check -cd ../hadoop-ozone || exit 1 -mvn -B -fn org.apache.rat:apache-rat-plugin:0.13:check - -cd "$DIR/../../.." || exit 1 - -grep -r --include=rat.txt "!????" hadoop-hdds hadoop-ozone | tee "$REPORT_FILE" - -wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi - diff --git a/hadoop-ozone/dev-support/checks/shellcheck.sh b/hadoop-ozone/dev-support/checks/shellcheck.sh deleted file mode 100755 index 2b67118985a..00000000000 --- a/hadoop-ozone/dev-support/checks/shellcheck.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "$DIR/../../.." || exit 1 - -REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/shellcheck"} -mkdir -p "$REPORT_DIR" -REPORT_FILE="$REPORT_DIR/summary.txt" - -echo "" > "$OUTPUT_FILE" -if [[ "$(uname -s)" = "Darwin" ]]; then - find hadoop-hdds hadoop-ozone -type f -perm '-500' -else - find hadoop-hdds hadoop-ozone -type f -executable -fi \ - | grep -v -e target/ -e node_modules/ -e '\.\(ico\|py\|yml\)$' \ - | xargs -n1 shellcheck \ - | tee "$REPORT_FILE" - -wc -l "$REPORT_FILE" | awk '{print $1}'> "$REPORT_DIR/failures" - -if [[ -s "${REPORT_FILE}" ]]; then - exit 1 -fi diff --git a/hadoop-ozone/dev-support/checks/unit.sh b/hadoop-ozone/dev-support/checks/unit.sh deleted file mode 100755 index 6a124127edb..00000000000 --- a/hadoop-ozone/dev-support/checks/unit.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -cd "$DIR/../../.." || exit 1 - -export MAVEN_OPTS="-Xmx4096m" -mvn -B -fn test -f pom.ozone.xml -pl \!:hadoop-ozone-integration-test,\!:hadoop-ozone-filesystem,\!:hadoop-ozone-tools "$@" - -REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/unit"} -mkdir -p "$REPORT_DIR" - -# shellcheck source=hadoop-ozone/dev-support/checks/_mvn_unit_report.sh -source "$DIR/_mvn_unit_report.sh" - -if [[ -s "$REPORT_DIR/summary.txt" ]] ; then - exit 1 -fi -exit 0 diff --git a/hadoop-ozone/dev-support/docker/Dockerfile b/hadoop-ozone/dev-support/docker/Dockerfile deleted file mode 100644 index 045e1f6c058..00000000000 --- a/hadoop-ozone/dev-support/docker/Dockerfile +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -FROM alpine -RUN apk add --update --no-cache bash alpine-sdk maven grep openjdk8 py-pip rsync procps autoconf automake libtool findutils - -#Install real glibc -RUN apk --no-cache add ca-certificates wget && \ - wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub && \ - wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.28-r0/glibc-2.28-r0.apk && \ - apk add glibc-2.28-r0.apk - -#Install protobuf -RUN mkdir -p /usr/local/src/ && \ - cd /usr/local/src/ && \ - wget https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.gz && \ - tar xvf protobuf-2.5.0.tar.gz && \ - cd protobuf-2.5.0 && \ - ./autogen.sh && \ - ./configure --prefix=/usr && \ - make && \ - make install && \ - protoc --version - -#Findbug install -RUN mkdir -p /opt && \ - curl -sL https://sourceforge.net/projects/findbugs/files/findbugs/3.0.1/findbugs-3.0.1.tar.gz/download | tar -xz && \ - mv findbugs-* /opt/findbugs - -#Install apache-ant -RUN mkdir -p /opt && \ - curl -sL 'https://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&filename=/ant/binaries/apache-ant-1.10.5-bin.tar.gz' | tar -xz && \ - mv apache-ant* /opt/ant - -#Install docker-compose -RUN pip install docker-compose - -#Install pytest==2.8.7 -RUN pip install pytest==2.8.7 - -ENV PATH=$PATH:/opt/findbugs/bin - -RUN addgroup -g 1000 default && \ - for i in $(seq 1 2000); do adduser jenkins$i -u $i -G default -h /tmp/ -H -D; done - -#This is a very huge local maven cache. Usually the mvn repository is not safe to be -#shared between builds as concurrent installls are not handled very well -#A simple workaround is to provide all the required 3rd party lib in the docker image -#It will be cached by docker, and any additional dependency can be downloaded, artifacts -#can be installed -USER jenkins1000 -RUN cd /tmp && \ - git clone --depth=1 https://gitbox.apache.org/repos/asf/hadoop.git -b trunk && \ - cd /tmp/hadoop && \ - mvn package dependency:go-offline -DskipTests -P hdds -pl :hadoop-ozone-dist -am && \ - rm -rf /tmp/.m2/repository/org/apache/hadoop/*hdds* && \ - rm -rf /tmp/.m2/repository/org/apache/hadoop/*ozone* && \ - find /tmp/.m2/repository -exec chmod o+wx {} \; diff --git a/hadoop-ozone/dev-support/intellij/install-runconfigs.sh b/hadoop-ozone/dev-support/intellij/install-runconfigs.sh deleted file mode 100755 index fc877bdde62..00000000000 --- a/hadoop-ozone/dev-support/intellij/install-runconfigs.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -SRC_DIR="$SCRIPT_DIR/runConfigurations" -DEST_DIR="$SCRIPT_DIR/../../../.idea/runConfigurations/" -mkdir -p "$DEST_DIR" -#shellcheck disable=SC2010 -ls -1 "$SRC_DIR" | grep -v ozone-site.xml | xargs -n1 -I FILE cp "$SRC_DIR/FILE" "$DEST_DIR" diff --git a/hadoop-ozone/dev-support/intellij/log4j.properties b/hadoop-ozone/dev-support/intellij/log4j.properties deleted file mode 100644 index bc62e32219a..00000000000 --- a/hadoop-ozone/dev-support/intellij/log4j.properties +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# log4j configuration used during build and unit tests -log4j.rootLogger=INFO,stdout -log4j.threshold=ALL -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n -log4j.logger.io.jagertraecing=DEBUG diff --git a/hadoop-ozone/dev-support/intellij/ozone-site.xml b/hadoop-ozone/dev-support/intellij/ozone-site.xml deleted file mode 100644 index 2eb79aa5219..00000000000 --- a/hadoop-ozone/dev-support/intellij/ozone-site.xml +++ /dev/null @@ -1,70 +0,0 @@ - - - - hdds.profiler.endpoint.enabled - true - - - ozone.scm.block.client.address - localhost - - - ozone.enabled - True - - - ozone.scm.datanode.id - /tmp/datanode.id - - - ozone.scm.client.address - localhost - - - ozone.metadata.dirs - /tmp/metadata - - - ozone.scm.names - localhost - - - ozone.om.address - localhost - - - ozone.enabled - true - - - ozone.scm.container.size - 10MB - - - ozone.scm.block.size - 1MB - - - hdds.datanode.storage.utilization.critical.threshold - 0.99 - - - hdds.prometheus.endpoint.enabled - true - - diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode.xml deleted file mode 100644 index 7f2a3e1cf8e..00000000000 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - \ No newline at end of file diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/FreonStandalone.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/FreonStandalone.xml deleted file mode 100644 index 9d964d48b67..00000000000 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/FreonStandalone.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - \ No newline at end of file diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManager.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManager.xml deleted file mode 100644 index c2aaf1c829b..00000000000 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManager.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManagerInit.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManagerInit.xml deleted file mode 100644 index 70fab5df640..00000000000 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneManagerInit.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneShell.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneShell.xml deleted file mode 100644 index 2d0bf803edc..00000000000 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneShell.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - \ No newline at end of file diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Recon.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/Recon.xml deleted file mode 100644 index 6c8e0ec9e80..00000000000 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/Recon.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - \ No newline at end of file diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/S3Gateway.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/S3Gateway.xml deleted file mode 100644 index 93f4a9d98db..00000000000 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/S3Gateway.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - \ No newline at end of file diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManager.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManager.xml deleted file mode 100644 index 46104d3ec73..00000000000 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManager.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManagerInit.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManagerInit.xml deleted file mode 100644 index f3ef26bf8d2..00000000000 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/StorageContainerManagerInit.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - diff --git a/hadoop-ozone/dist/README.md b/hadoop-ozone/dist/README.md deleted file mode 100644 index 88132ec1728..00000000000 --- a/hadoop-ozone/dist/README.md +++ /dev/null @@ -1,85 +0,0 @@ - - -# Ozone Distribution - -This folder contains the project to create the binary ozone distribution and provide all the helper script and docker files to start it locally or in the cluster. - -## Testing with local docker based cluster - -After a full dist build you can find multiple docker-compose based cluster definition in the `target/ozone-*/compose` folder. - -Please check the README files there. - -Usually you can start the cluster with: - -``` -cd compose/ozone -docker-compose up -d -``` - -## Testing on Kubernetes - -You can also test the ozone cluster in kubernetes. If you have no active kubernetes cluster you can start a local one with minikube: - -``` -minikube start -``` - -For testing in kubernetes you need to: - -1. Create a docker image with the new build -2. Upload it to a docker registery -3. Deploy the cluster with apply kubernetes resources - -The easiest way to do all these steps is using the [skaffold](https://github.com/GoogleContainerTools/skaffold) tool. After the [installation of skaffold](https://github.com/GoogleContainerTools/skaffold#installation), you can execute - -``` -skaffold run -``` - -in this (`hadoop-ozone/dist`) folder. - -The default kubernetes resources set (`src/main/k8s/`) contains NodePort based service definitions for the Ozone Manager, Storage Container Manager and the S3 gateway. - -With minikube you can access the services with: - -``` -minikube service s3g-public -minikube service om-public -minikube service scm-public -``` - -### Monitoring - -Apache Hadoop Ozone supports Prometheus out-of the box. It contains a prometheus compatible exporter servlet. To start the monitoring you need a prometheus deploy in your kubernetes cluster: - -``` -cd src/main/k8s/prometheus -kubectl apply -f . -``` - -The prometheus ui also could be access via a NodePort service: - -``` -minikube service prometheus-public -``` - -### Notes on the Kubernetes setup - -Please not that the provided kubernetes resources are not suitable production: - -1. There are no security setup -2. The datanode is started in StatefulSet instead of DaemonSet (To make it possible to scale it up on one node minikube cluster) -3. All the UI pages are published with NodePort services \ No newline at end of file diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching deleted file mode 100755 index 72f6c3bfe83..00000000000 --- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# project.build.directory -BASEDIR=$1 - -#hdds.version -HDDS_VERSION=$2 - -## @audience private -## @stability evolving -function run() -{ - declare res - - echo "\$ ${*}" - "${@}" - res=$? - if [[ ${res} != 0 ]]; then - echo - echo "Failed!" - echo - exit "${res}" - fi -} - -## @audience private -## @stability evolving -function findfileindir() -{ - declare file="$1" - declare dir="${2:-./share}" - declare count - - count=$(find "${dir}" -iname "${file}" | wc -l) - - #shellcheck disable=SC2086 - echo ${count} -} - - -# shellcheck disable=SC2164 -ROOT=$(cd "${BASEDIR}"/../../..;pwd) -echo -echo "Current directory $(pwd)" -echo - -run rm -rf "ozone-${HDDS_VERSION}" -run mkdir "ozone-${HDDS_VERSION}" -run cd "ozone-${HDDS_VERSION}" - -run cp -p "${ROOT}/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt" "NOTICE.txt" -run cp -p "${ROOT}/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt" "LICENSE.txt" -run cp -pr "${ROOT}/hadoop-ozone/dist/src/main/license/bin/licenses" "licenses" -run cp -p "${ROOT}/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/LICENSE" "licenses/LICENSE-ozone-recon.txt" -run cp -p "${ROOT}/README.txt" . - -run mkdir -p ./share/hadoop/mapreduce -run mkdir -p ./share/hadoop/ozone -run mkdir -p ./share/hadoop/hdds -run mkdir -p ./share/hadoop/yarn -run mkdir -p ./share/hadoop/hdfs -run mkdir -p ./share/hadoop/common - -touch ./share/hadoop/mapreduce/.keep -touch ./share/hadoop/yarn/.keep -touch ./share/hadoop/hdfs/.keep -touch ./share/hadoop/common/.keep - - -run mkdir -p ./share/ozone/web -run mkdir -p ./bin -run mkdir -p ./sbin -run mkdir -p ./etc -run mkdir -p ./libexec -run mkdir -p ./tests - -run cp -r "${ROOT}/hadoop-hdds/common/src/main/conf/" "etc/hadoop" -run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties" "etc/hadoop" -run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/dn-audit-log4j2.properties" "etc/hadoop" -run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/scm-audit-log4j2.properties" "etc/hadoop" -run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties" "etc/hadoop" -run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/ozone-site.xml" "etc/hadoop" -run cp -f "${ROOT}/hadoop-ozone/dist/src/main/conf/log4j.properties" "etc/hadoop" -run cp "${ROOT}/hadoop-hdds/common/src/main/resources/network-topology-default.xml" "etc/hadoop" -run cp "${ROOT}/hadoop-hdds/common/src/main/resources/network-topology-nodegroup.xml" "etc/hadoop" -run cp "${ROOT}/hadoop-ozone/common/src/main/bin/ozone" "bin/" -run cp -r "${ROOT}/hadoop-ozone/dist/src/main/dockerbin" "bin/docker" - -run cp "${ROOT}/hadoop-hdds/common/src/main/bin/hadoop-config.sh" "libexec/" -run cp "${ROOT}/hadoop-hdds/common/src/main/bin/hadoop-config.cmd" "libexec/" -run cp "${ROOT}/hadoop-hdds/common/src/main/bin/hadoop-functions.sh" "libexec/" -run cp "${ROOT}/hadoop-ozone/common/src/main/bin/ozone-config.sh" "libexec/" -run cp -r "${ROOT}/hadoop-ozone/common/src/main/shellprofile.d" "libexec/" - - -run cp "${ROOT}/hadoop-hdds/common/src/main/bin/hadoop-daemons.sh" "sbin/" -run cp "${ROOT}/hadoop-hdds/common/src/main/bin/workers.sh" "sbin/" -run cp "${ROOT}/hadoop-ozone/common/src/main/bin/start-ozone.sh" "sbin/" -run cp "${ROOT}/hadoop-ozone/common/src/main/bin/stop-ozone.sh" "sbin/" - -# fault injection tests -run cp -r "${ROOT}/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade" tests - -# Optional documentation, could be missing -cp -r "${ROOT}/hadoop-hdds/docs/target/classes/docs" ./ - -#Copy docker compose files -#compose files are preprocessed: properties (eg. project.version) are replaced first by maven. -run cp -p -R "${ROOT}/hadoop-ozone/dist/target/compose" . -run cp -p -r "${ROOT}/hadoop-ozone/dist/src/main/smoketest" . -run cp -p -r "${ROOT}/hadoop-ozone/dist/target/k8s" kubernetes -run cp -p -r "${ROOT}/hadoop-ozone/dist/target/Dockerfile" . - -#workaround for https://issues.apache.org/jira/browse/MRESOURCES-236 -find ./compose -name "*.sh" -exec chmod 755 {} \; diff --git a/hadoop-ozone/dist/dev-support/bin/dist-tar-stitching b/hadoop-ozone/dist/dev-support/bin/dist-tar-stitching deleted file mode 100755 index 408233afda1..00000000000 --- a/hadoop-ozone/dist/dev-support/bin/dist-tar-stitching +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# project.version -VERSION=$1 - -# project.build.directory -BASEDIR=$2 - -## @audience private -## @stability evolving -function run() -{ - declare res - - echo "\$ ${*}" - "${@}" - res=$? - if [[ ${res} != 0 ]]; then - echo - echo "Failed!" - echo - exit "${res}" - fi -} - -run tar -c -f "hadoop-ozone-${VERSION}.tar" "ozone-${VERSION}" -run gzip -f "hadoop-ozone-${VERSION}.tar" -echo -echo "Ozone dist tar available at: ${BASEDIR}/hadoop-ozone-${VERSION}.tar.gz" -echo diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml deleted file mode 100644 index 57b9a0cbd08..00000000000 --- a/hadoop-ozone/dist/pom.xml +++ /dev/null @@ -1,428 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-dist - Apache Hadoop Ozone Distribution - pom - 0.5.0-SNAPSHOT - - UTF-8 - true - 20190717-1 - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - copy-classpath-files - prepare-package - - copy - - - target/ozone-${ozone.version}/share/ozone/classpath - - - - org.apache.hadoop - hadoop-hdds-server-scm - ${hdds.version} - classpath - cp - hadoop-hdds-server-scm.classpath - - - org.apache.hadoop - hadoop-hdds-tools - ${hdds.version} - classpath - cp - hadoop-hdds-tools.classpath - - - org.apache.hadoop - hadoop-ozone-s3gateway - ${ozone.version} - classpath - cp - hadoop-ozone-s3gateway.classpath - - - org.apache.hadoop - hadoop-ozone-csi - ${ozone.version} - classpath - cp - hadoop-ozone-csi.classpath - - - org.apache.hadoop - hadoop-ozone-ozone-manager - ${ozone.version} - classpath - cp - hadoop-ozone-ozone-manager.classpath - - - - org.apache.hadoop - hadoop-ozone-tools - ${ozone.version} - classpath - cp - hadoop-ozone-tools.classpath - - - org.apache.hadoop - hadoop-ozone-filesystem - ${ozone.version} - classpath - cp - hadoop-ozone-filesystem.classpath - - - org.apache.hadoop - hadoop-ozone-common - ${ozone.version} - classpath - cp - hadoop-ozone-common.classpath - - - org.apache.hadoop - hadoop-ozone-datanode - ${ozone.version} - classpath - cp - hadoop-ozone-datanode.classpath - - - org.apache.hadoop - hadoop-ozone-recon - ${ozone.version} - classpath - cp - hadoop-ozone-recon.classpath - - - org.apache.hadoop - hadoop-ozone-upgrade - ${ozone.version} - classpath - cp - hadoop-ozone-upgrade.classpath - - - org.apache.hadoop - hadoop-ozone-insight - ${ozone.version} - classpath - cp - hadoop-ozone-insight.classpath - - - - - - copy-jars - prepare-package - - copy-dependencies - - - target/ozone-${ozone.version}/share/ozone/lib - - runtime - - - - copy-omitted-jars - prepare-package - - copy - - - target/ozone-${ozone.version}/share/ozone/lib - - - - com.google.protobuf - protobuf-java - 3.5.1 - - - com.google.guava - guava - 26.0-android - - - - - - - - maven-resources-plugin - 3.1.0 - - - copy-compose-files - compile - - copy-resources - - - ${basedir}/target/compose - - - src/main/compose - true - - - - - - copy-and-filter-dockerfile - compile - - copy-resources - - - ${project.build.directory} - - - src/main/docker - true - - - - - - copy-k8s - compile - - copy-resources - - - ${basedir}/target/k8s - - - src/main/k8s - true - - - - - - - - org.codehaus.mojo - exec-maven-plugin - - - dist - prepare-package - - exec - - - ${shell-executable} - ${project.build.directory} - - - ${basedir}/dev-support/bin/dist-layout-stitching - - ${project.build.directory} - ${hdds.version} - - - - - - - - maven-enforcer-plugin - - - depcheck - - - - - - - - - - org.apache.hadoop - hadoop-hdds-tools - - - org.apache.hadoop - hadoop-hdds-server-scm - - - org.apache.hadoop - hadoop-hdds-container-service - - - org.apache.hadoop - hadoop-ozone-s3gateway - - - org.apache.hadoop - hadoop-ozone-csi - - - org.apache.hadoop - hadoop-ozone-ozone-manager - - - org.apache.hadoop - hadoop-ozone-tools - - - org.apache.hadoop - hadoop-ozone-filesystem-lib-current - - - org.apache.hadoop - hadoop-ozone-filesystem-lib-legacy - - - org.apache.hadoop - hadoop-ozone-common - - - org.apache.hadoop - hadoop-ozone-datanode - - - org.apache.hadoop - hadoop-ozone-recon - - - org.apache.hadoop - hadoop-hdds-docs - - - org.apache.hadoop - hadoop-ozone-upgrade - - - org.apache.hadoop - hadoop-ozone-insight - - - - - docker-build - - - - io.fabric8 - docker-maven-plugin - 0.29.0 - - - - build - - package - - - - - - ${docker.image} - - - ${project.build.directory}/ozone-${project.version} - - - - - - - - - - - docker-push - - - - io.fabric8 - docker-maven-plugin - 0.29.0 - - - - push - - package - - - - - - - - dist - - - - org.codehaus.mojo - exec-maven-plugin - - - tar-ozone - package - - exec - - - ${shell-executable} - ${project.build.directory} - - - ${basedir}/dev-support/bin/dist-tar-stitching - - ${hdds.version} - ${project.build.directory} - - - - - - - - - - - diff --git a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml b/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml deleted file mode 100644 index 25e35c8a368..00000000000 --- a/hadoop-ozone/dist/src/main/assemblies/ozone-src.xml +++ /dev/null @@ -1,101 +0,0 @@ - - - ozone-src - - tar.gz - - true - - - pom.ozone.xml - / - pom.xml - - - hadoop-ozone/dist/src/main/license/src/LICENSE.txt - / - - - hadoop-ozone/dist/src/main/license/src/NOTICE.txt - / - - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular-nvd3.txt - /licenses - - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular.txt - /licenses - - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-d3.txt - /licenses - - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-nvd3.txt - /licenses - - - hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-jquery.txt - /licenses - - - - - . - - pom.ozone.xml - README.txt - - - - hadoop-hdds - true - - **/.classpath - **/.project - **/.settings - **/*.iml - **/target/** - - - - hadoop-ozone - true - - **/ozone-recon-web/build/** - **/ozone-recon-web/node_modules/** - **/.classpath - **/.project - **/.settings - **/*.iml - **/target/** - - - - hadoop-ozone/dist/src/main/license/src - - **/*.txt - - - - - diff --git a/hadoop-ozone/dist/src/main/compose/README.md b/hadoop-ozone/dist/src/main/compose/README.md deleted file mode 100644 index 8189d2c169a..00000000000 --- a/hadoop-ozone/dist/src/main/compose/README.md +++ /dev/null @@ -1,51 +0,0 @@ - - -# Docker cluster definitions - -This directory contains multiple docker cluster definitions to start local pseudo cluster with different configuration. - -It helps to start local (multi-node like) pseudo cluster with docker and docker-compose and obviously it's not for production. - -You may find more information in the specific subdirectories but in generic you can use the following commands: - -## Usage - -To start a cluster go to a subdirectory and start the cluster: - -``` -docker-compose up -d -``` - -You can check the logs of all the components with: - -``` -docker-compose logs -``` - -In case of a problem you can destroy the cluster an delete all the local state with: - -``` -docker-compose down -``` - -(Note: a simple docker-compose stop may not delete all the local data). - -You can scale up and down the components: - -``` -docker-compose scale datanode=5 -``` - -Usually the key webui ports are published on the docker host. diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/conf/grafana.ini b/hadoop-ozone/dist/src/main/compose/common/grafana/conf/grafana.ini deleted file mode 100644 index a8451460de5..00000000000 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/conf/grafana.ini +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -[auth.anonymous] -enabled=true -org_role=Editor diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json deleted file mode 100644 index 7644b12f2a2..00000000000 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json +++ /dev/null @@ -1,1344 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "iteration": 1544553994120, - "links": [], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 22, - "panels": [], - "repeat": null, - "title": "Total Count", - "type": "row" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorPostfix": false, - "colorPrefix": false, - "colorValue": false, - "colors": [ - "#7eb26d", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "format": "none", - "gauge": { - "maxValue": null, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 8, - "x": 0, - "y": 1 - }, - "hideTimeOverride": false, - "id": 12, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "repeat": "entity", - "repeatDirection": "h", - "scopedVars": { - "entity": { - "selected": false, - "text": "volume", - "value": "volume" - } - }, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "__name__", - "targets": [ - { - "expr": "om_metrics_num_[[entity]]s", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "timeShift": null, - "title": "[[entity]] created", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorPostfix": false, - "colorPrefix": false, - "colorValue": false, - "colors": [ - "#7eb26d", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "format": "none", - "gauge": { - "maxValue": null, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 8, - "x": 8, - "y": 1 - }, - "hideTimeOverride": false, - "id": 28, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "repeat": null, - "repeatDirection": "h", - "repeatIteration": 1544553994120, - "repeatPanelId": 12, - "scopedVars": { - "entity": { - "selected": false, - "text": "bucket", - "value": "bucket" - } - }, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "__name__", - "targets": [ - { - "expr": "om_metrics_num_[[entity]]s", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "timeShift": null, - "title": "[[entity]] created", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorPostfix": false, - "colorPrefix": false, - "colorValue": false, - "colors": [ - "#7eb26d", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "format": "none", - "gauge": { - "maxValue": null, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 8, - "x": 16, - "y": 1 - }, - "hideTimeOverride": false, - "id": 29, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "repeat": null, - "repeatDirection": "h", - "repeatIteration": 1544553994120, - "repeatPanelId": 12, - "scopedVars": { - "entity": { - "selected": false, - "text": "key", - "value": "key" - } - }, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "__name__", - "targets": [ - { - "expr": "om_metrics_num_[[entity]]s", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "timeShift": null, - "title": "[[entity]] created", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorPostfix": false, - "colorPrefix": false, - "colorValue": false, - "colors": [ - "#7eb26d", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 8, - "x": 0, - "y": 6 - }, - "hideTimeOverride": false, - "id": 18, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "repeat": "entity", - "repeatDirection": "h", - "scopedVars": { - "entity": { - "selected": false, - "text": "volume", - "value": "volume" - } - }, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "__name__", - "targets": [ - { - "expr": "om_metrics_num_[[entity]]_ops", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A" - } - ], - "thresholds": "", - "timeShift": null, - "title": "$entity Ops", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "total" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorPostfix": false, - "colorPrefix": false, - "colorValue": false, - "colors": [ - "#7eb26d", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 8, - "x": 8, - "y": 6 - }, - "hideTimeOverride": false, - "id": 30, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "repeat": null, - "repeatDirection": "h", - "repeatIteration": 1544553994120, - "repeatPanelId": 18, - "scopedVars": { - "entity": { - "selected": false, - "text": "bucket", - "value": "bucket" - } - }, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "__name__", - "targets": [ - { - "expr": "om_metrics_num_[[entity]]_ops", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A" - } - ], - "thresholds": "", - "timeShift": null, - "title": "$entity Ops", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "total" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorPostfix": false, - "colorPrefix": false, - "colorValue": false, - "colors": [ - "#7eb26d", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 5, - "w": 8, - "x": 16, - "y": 6 - }, - "hideTimeOverride": false, - "id": 31, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "repeat": null, - "repeatDirection": "h", - "repeatIteration": 1544553994120, - "repeatPanelId": 18, - "scopedVars": { - "entity": { - "selected": false, - "text": "key", - "value": "key" - } - }, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": true, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "__name__", - "targets": [ - { - "expr": "om_metrics_num_[[entity]]_ops", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A" - } - ], - "thresholds": "", - "timeShift": null, - "title": "$entity Ops", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "total" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 11 - }, - "id": 14, - "panels": [], - "repeat": null, - "title": "Object Creation Rate", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "decimals": null, - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 12 - }, - "id": 24, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": "entity", - "repeatDirection": "h", - "scopedVars": { - "entity": { - "selected": false, - "text": "volume", - "value": "volume" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(om_metrics_num_[[entity]]_creates[1h])", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A" - }, - { - "expr": "rate(om_metrics_num_[[entity]]_commits[1h])", - "format": "time_series", - "intervalFactor": 1, - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": "1h", - "timeRegions": [], - "timeShift": "1h", - "title": "[[entity]] created - Hourly", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "transparent": false, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": true, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "decimals": null, - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 12 - }, - "id": 32, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "repeatDirection": "h", - "repeatIteration": 1544553994120, - "repeatPanelId": 24, - "scopedVars": { - "entity": { - "selected": false, - "text": "bucket", - "value": "bucket" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(om_metrics_num_[[entity]]_creates[1h])", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A" - }, - { - "expr": "rate(om_metrics_num_[[entity]]_commits[1h])", - "format": "time_series", - "intervalFactor": 1, - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": "1h", - "timeRegions": [], - "timeShift": "1h", - "title": "[[entity]] created - Hourly", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "transparent": false, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": true, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "decimals": null, - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 12 - }, - "id": 33, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "repeatDirection": "h", - "repeatIteration": 1544553994120, - "repeatPanelId": 24, - "scopedVars": { - "entity": { - "selected": false, - "text": "key", - "value": "key" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(om_metrics_num_[[entity]]_creates[1h])", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A" - }, - { - "expr": "rate(om_metrics_num_[[entity]]_commits[1h])", - "format": "time_series", - "intervalFactor": 1, - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": "1h", - "timeRegions": [], - "timeShift": "1h", - "title": "[[entity]] created - Hourly", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "transparent": false, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": true, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 19 - }, - "id": 27, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": "entity", - "repeatDirection": "h", - "scopedVars": { - "entity": { - "selected": false, - "text": "volume", - "value": "volume" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(om_metrics_num_[[entity]]_creates[1d])", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A" - }, - { - "expr": "rate(om_metrics_num_[[entity]]_commits[1d])", - "format": "time_series", - "intervalFactor": 1, - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": "24h", - "timeRegions": [], - "timeShift": null, - "title": "[[entity]] created - Daily", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": true, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 19 - }, - "id": 34, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "repeatDirection": "h", - "repeatIteration": 1544553994120, - "repeatPanelId": 27, - "scopedVars": { - "entity": { - "selected": false, - "text": "bucket", - "value": "bucket" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(om_metrics_num_[[entity]]_creates[1d])", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A" - }, - { - "expr": "rate(om_metrics_num_[[entity]]_commits[1d])", - "format": "time_series", - "intervalFactor": 1, - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": "24h", - "timeRegions": [], - "timeShift": null, - "title": "[[entity]] created - Daily", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": true, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fill": 1, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 19 - }, - "id": 35, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "repeatDirection": "h", - "repeatIteration": 1544553994120, - "repeatPanelId": 27, - "scopedVars": { - "entity": { - "selected": false, - "text": "key", - "value": "key" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(om_metrics_num_[[entity]]_creates[1d])", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A" - }, - { - "expr": "rate(om_metrics_num_[[entity]]_commits[1d])", - "format": "time_series", - "intervalFactor": 1, - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": "24h", - "timeRegions": [], - "timeShift": null, - "title": "[[entity]] created - Daily", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": true, - "alignLevel": null - } - } - ], - "refresh": "1m", - "schemaVersion": 16, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "allValue": "all", - "current": { - "tags": [], - "text": "All", - "value": [ - "$__all" - ] - }, - "hide": 0, - "includeAll": true, - "label": null, - "multi": true, - "name": "entity", - "options": [ - { - "selected": true, - "text": "All", - "value": "$__all" - }, - { - "selected": false, - "text": "volume", - "value": "volume" - }, - { - "selected": false, - "text": "bucket", - "value": "bucket" - }, - { - "selected": false, - "text": "key", - "value": "key" - } - ], - "query": "volume,bucket,key", - "skipUrlSync": false, - "type": "custom" - } - ] - }, - "time": { - "from": "now/d", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Ozone - Object Metrics", - "uid": "yakEh0Eik", - "version": 1 -} diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - RPC Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - RPC Metrics.json deleted file mode 100644 index a22e3d793cc..00000000000 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - RPC Metrics.json +++ /dev/null @@ -1,875 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "iteration": 1544554371864, - "links": [], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 69, - "panels": [], - "repeat": "servername", - "scopedVars": { - "servername": { - "selected": true, - "text": "OzoneManagerService", - "value": "OzoneManagerService" - } - }, - "title": "RPC Queue Time Summary - [[servername]]", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 1 - }, - "id": 47, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "scopedVars": { - "servername": { - "selected": true, - "text": "OzoneManagerService", - "value": "OzoneManagerService" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "rpc_rpc_queue_time_avg_time{servername=~\"$servername\"}", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Avg Queue Time", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "transparent": true, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 1 - }, - "id": 48, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "scopedVars": { - "servername": { - "selected": true, - "text": "OzoneManagerService", - "value": "OzoneManagerService" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "rpc_rpc_queue_time_num_ops{servername=~\"$servername\"}", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Number of Ops", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "transparent": true, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 10 - }, - "id": 39, - "panels": [], - "repeat": null, - "title": "RPC Call Queue Length", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "fill": 1, - "gridPos": { - "h": 9, - "w": 24, - "x": 0, - "y": 11 - }, - "id": 29, - "legend": { - "avg": false, - "current": true, - "max": false, - "min": false, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": "servername", - "repeatDirection": "h", - "scopedVars": { - "servername": { - "selected": true, - "text": "OzoneManagerService", - "value": "OzoneManagerService" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rpc_call_queue_length{servername=~\"$servername\"}", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "$servername", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 20 - }, - "id": 37, - "panels": [], - "repeat": "window", - "scopedVars": { - "window": { - "selected": true, - "text": "60", - "value": "60" - } - }, - "title": "RPC Deferred Processing Time [[window]]s window", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "decimals": null, - "fill": 1, - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 21 - }, - "id": 35, - "legend": { - "alignAsTable": false, - "avg": true, - "current": false, - "hideEmpty": true, - "hideZero": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": "servername", - "repeatDirection": "v", - "scopedVars": { - "servername": { - "selected": true, - "text": "OzoneManagerService", - "value": "OzoneManagerService" - }, - "window": { - "selected": true, - "text": "60", - "value": "60" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "avg(rpc_deferred_rpc_processing_time[[window]]s50th_percentile_latency{servername=~\"$servername\"})", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A" - }, - { - "expr": "avg(rpc_deferred_rpc_processing_time[[window]]s75th_percentile_latency{servername=~\"$servername\"})", - "format": "time_series", - "intervalFactor": 1, - "refId": "B" - }, - { - "expr": "avg(rpc_deferred_rpc_processing_time[[window]]s90th_percentile_latency{servername=~\"$servername\"})", - "format": "time_series", - "intervalFactor": 1, - "refId": "C" - }, - { - "expr": "avg(rpc_deferred_rpc_processing_time[[window]]s99th_percentile_latency{servername=~\"$servername\"})", - "format": "time_series", - "intervalFactor": 1, - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": "1h", - "timeRegions": [], - "timeShift": "1h", - "title": "$servername", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "transparent": true, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": true, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 29 - }, - "id": 34, - "panels": [], - "repeat": "window", - "scopedVars": { - "window": { - "selected": true, - "text": "60", - "value": "60" - } - }, - "title": "RPC Queue Time [[window]]s window", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "decimals": null, - "fill": 1, - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 30 - }, - "id": 32, - "legend": { - "alignAsTable": false, - "avg": true, - "current": false, - "hideEmpty": true, - "hideZero": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": "servername", - "repeatDirection": "v", - "scopedVars": { - "servername": { - "selected": true, - "text": "OzoneManagerService", - "value": "OzoneManagerService" - }, - "window": { - "selected": true, - "text": "60", - "value": "60" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "avg(rpc_rpc_queue_time[[window]]s50th_percentile_latency{servername=~\"$servername\"})", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A" - }, - { - "expr": "avg(rpc_rpc_queue_time[[window]]s75th_percentile_latency{servername=~\"$servername\"})", - "format": "time_series", - "intervalFactor": 1, - "refId": "B" - }, - { - "expr": "avg(rpc_rpc_queue_time[[window]]s90th_percentile_latency{servername=~\"$servername\"})", - "format": "time_series", - "intervalFactor": 1, - "refId": "C" - }, - { - "expr": "avg(rpc_rpc_queue_time[[window]]s99th_percentile_latency{servername=~\"$servername\"})", - "format": "time_series", - "intervalFactor": 1, - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": "1h", - "timeRegions": [], - "timeShift": "1h", - "title": "$servername", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "transparent": false, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": true, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 38 - }, - "id": 14, - "panels": [], - "repeat": "window", - "scopedVars": { - "window": { - "selected": true, - "text": "60", - "value": "60" - } - }, - "title": "RPC Processing Time [[window]]s window", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "decimals": null, - "fill": 1, - "gridPos": { - "h": 9, - "w": 24, - "x": 0, - "y": 39 - }, - "id": 24, - "legend": { - "alignAsTable": false, - "avg": true, - "current": false, - "hideEmpty": true, - "hideZero": false, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": true, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": "servername", - "repeatDirection": "v", - "scopedVars": { - "servername": { - "selected": true, - "text": "OzoneManagerService", - "value": "OzoneManagerService" - }, - "window": { - "selected": true, - "text": "60", - "value": "60" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "avg(rpc_rpc_processing_time[[window]]s50th_percentile_latency{servername=~\"$servername\"})", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "refId": "A" - }, - { - "expr": "avg(rpc_rpc_processing_time[[window]]s75th_percentile_latency{servername=~\"$servername\"})", - "format": "time_series", - "intervalFactor": 1, - "refId": "B" - }, - { - "expr": "avg(rpc_rpc_processing_time[[window]]s90th_percentile_latency{servername=~\"$servername\"})", - "format": "time_series", - "intervalFactor": 1, - "refId": "C" - }, - { - "expr": "avg(rpc_rpc_processing_time[[window]]s99th_percentile_latency{servername=~\"$servername\"})", - "format": "time_series", - "intervalFactor": 1, - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": "1h", - "timeRegions": [], - "timeShift": "1h", - "title": "$servername", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "transparent": true, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": true, - "alignLevel": null - } - } - ], - "refresh": false, - "schemaVersion": 16, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "allValue": "All", - "current": { - "text": "OzoneManagerService", - "value": [ - "OzoneManagerService" - ] - }, - "datasource": "Prometheus", - "definition": "label_values(servername)", - "hide": 0, - "includeAll": true, - "label": "servername", - "multi": true, - "name": "servername", - "options": [], - "query": "label_values(servername)", - "refresh": 2, - "regex": "", - "skipUrlSync": false, - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": "All", - "current": { - "tags": [], - "text": "60", - "value": [ - "60" - ] - }, - "hide": 0, - "includeAll": true, - "label": null, - "multi": true, - "name": "window", - "options": [ - { - "selected": false, - "text": "All", - "value": "$__all" - }, - { - "selected": true, - "text": "60", - "value": "60" - }, - { - "selected": false, - "text": "300", - "value": "300" - } - ], - "query": "60,300", - "skipUrlSync": false, - "type": "custom" - } - ] - }, - "time": { - "from": "now/d", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Ozone - RPC Metrics", - "uid": "yDSkL0Pmk", - "version": 1 -} diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/provisioning/dashboards/dashboards.yml b/hadoop-ozone/dist/src/main/compose/common/grafana/provisioning/dashboards/dashboards.yml deleted file mode 100755 index 1485f72e4ea..00000000000 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/provisioning/dashboards/dashboards.yml +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -- name: 'default' - org_id: 1 - folder: '' - type: 'file' - options: - folder: '/var/lib/grafana/dashboards' diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/provisioning/datasources/datasources.yml b/hadoop-ozone/dist/src/main/compose/common/grafana/provisioning/datasources/datasources.yml deleted file mode 100755 index 4d33c2305c9..00000000000 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/provisioning/datasources/datasources.yml +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -datasources: -- name: 'Prometheus' - type: 'prometheus' - access: 'proxy' - org_id: 1 - url: 'http://prometheus:9090' - is_default: true - version: 1 - editable: true diff --git a/hadoop-ozone/dist/src/main/compose/common/prometheus/prometheus.yml b/hadoop-ozone/dist/src/main/compose/common/prometheus/prometheus.yml deleted file mode 100644 index af567d91291..00000000000 --- a/hadoop-ozone/dist/src/main/compose/common/prometheus/prometheus.yml +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -global: - scrape_interval: 15s # By default, scrape targets every 15 seconds. - -scrape_configs: - - job_name: ozone - metrics_path: /prom - static_configs: - - targets: - - "scm:9876" - - "om:9874" diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env deleted file mode 100644 index df9065c5ff4..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/.env +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HADOOP_VERSION=3 -OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml deleted file mode 100644 index 7d8295d8817..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-compose.yaml +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - namenode: - image: apache/hadoop:${HADOOP_VERSION} - ports: - - 9870:9870 - env_file: - - ./docker-config - environment: - ENSURE_NAMENODE_DIR: "/tmp/hadoop-root/dfs/name" - command: ["hdfs", "namenode"] - datanode: - image: apache/hadoop:${HADOOP_VERSION} - ports: - - 9864 - volumes: - - ../..:/opt/ozone - command: ["hdfs","datanode"] - environment: - HADOOP_CLASSPATH: /opt/ozone/share/hadoop/ozoneplugin/*.jar - env_file: - - ./docker-config - om: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - ./docker-config - command: ["ozone","om"] - scm: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["ozone","scm"] - s3g: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9878:9878 - env_file: - - ./docker-config - command: ["ozone","s3g"] diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config deleted file mode 100644 index 63bbbd89873..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=true -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.replication=1 -OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds - -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService -HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 - -#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config deleted file mode 100644 index 79362388331..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/common-config +++ /dev/null @@ -1,77 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=true -OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.replication=1 - -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds -HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019 -HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012 - -CORE-SITE.xml_fs.defaultFS=o3fs://bucket1.vol1/ - -MAPRED-SITE.XML_mapreduce.framework.name=yarn -MAPRED-SITE.XML_yarn.app.mapreduce.am.env=HADOOP_MAPRED_HOME=$HADOOP_HOME -MAPRED-SITE.XML_mapreduce.map.env=HADOOP_MAPRED_HOME=$HADOOP_HOME -MAPRED-SITE.XML_mapreduce.reduce.env=HADOOP_MAPRED_HOME=$HADOOP_HOME -MAPRED-SITE.XML_mapreduce.map.memory.mb=4096 -MAPRED-SITE.XML_mapreduce.reduce.memory.mb=4096 -MAPRED-SITE.XML_mapred.child.java.opts=-Xmx2g - -YARN-SITE.XML_yarn.app.mapreduce.am.staging-dir=/user -YARN_SITE.XML_yarn.timeline-service.enabled=true -#YARN_SITE.XML_yarn.timeline-service.generic.application.history.enabled=true -#YARN_SITE.XML_yarn.timeline-service.hostname=jhs -#YARN_SITE.XML_yarn.log.server.url=http://jhs:8188/applicationhistory/logs/ - -YARN-SITE.XML_yarn.nodemanager.pmem-check-enabled=false -YARN-SITE.XML_yarn.nodemanager.delete.debug-delay-sec=6000 -YARN-SITE.XML_yarn.nodemanager.vmem-check-enabled=false -YARN-SITE.XML_yarn.nodemanager.aux-services=mapreduce_shuffle -YARN-SITE.XML_yarn.nodemanager.disk-health-checker.enable=false - -YARN-SITE.XML_yarn.resourcemanager.hostname=rm -YARN_SITE_XML_yarn.resourcemanager.system.metrics.publisher.enabled=true - -#YARN-SITE.XML_yarn.log-aggregation-enable=true -#YARN-SITE.XML_yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds=3600 - -#YARN-SITE.yarn.nodemanager.container-executor.class=org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor -#YARN-SITE.XML_yarn.nodemanager.linux-container-executor.path=/opt/hadoop/bin/container-executor -#YARN-SITE.XML_yarn.nodemanager.linux-container-executor.group=hadoop -YARN-SITE.XML_yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage=99 -YARN-SITE.XML_yarn.nodemanager.disk-health-checker.enable=false - -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-applications=10000 -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-am-resource-percent=0.1 -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.resource-calculator=org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.queues=default -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.capacity=100 -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.user-limit-factor=1 -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.maximum-capacity=100 -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.state=RUNNING -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_submit_applications=* -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_administer_queue=* -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.node-locality-delay=40 -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings= -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings-override.enable=false diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env deleted file mode 100644 index 27fc57662d6..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/.env +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=@hdds.version@ -#TODO: swich to apache/hadoop. Older versions are not supported by apache/hadoop, yet. -# See: HADOOP-16092 for more details. -HADOOP_IMAGE=flokkr/hadoop -HADOOP_VERSION=2.7.7 -OZONE_RUNNER_VERSION=@docker.ozone-runner.version@ diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml deleted file mode 100644 index 17f5ee53552..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-compose.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../../..:/opt/hadoop - ports: - - 9864 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - docker-config - - ../common-config - om: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: om - volumes: - - ../../..:/opt/hadoop - ports: - - 9874:9874 - environment: - WAITFOR: scm:9876 - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - docker-config - - ../common-config - command: ["/opt/hadoop/bin/ozone","om"] - s3g: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: s3g - volumes: - - ../../..:/opt/hadoop - ports: - - 9878:9878 - env_file: - - ./docker-config - - ../common-config - command: ["/opt/hadoop/bin/ozone","s3g"] - scm: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: scm - volumes: - - ../../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - docker-config - - ../common-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["/opt/hadoop/bin/ozone","scm"] - rm: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} - hostname: rm - volumes: - - ../../..:/opt/ozone - ports: - - 8088:8088 - env_file: - - ./docker-config - - ../common-config - environment: - HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar - command: ["yarn", "resourcemanager"] - nm: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} - hostname: nm - volumes: - - ../../..:/opt/ozone - env_file: - - ./docker-config - - ../common-config - environment: - HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar - WAIT_FOR: rm:8088 - command: ["yarn","nodemanager"] -# Optional section: comment out this part to get DNS resolution for all the containers. -# dns: -# image: andyshinn/dnsmasq:2.76 -# ports: -# - 53:53/udp -# - 53:53/tcp -# volumes: -# - "/var/run/docker.sock:/var/run/docker.sock" -# command: -# - "-k" -# - "-d" diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config deleted file mode 100644 index fccdace413a..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/docker-config +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.BasicOzFs -MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-legacy-@project.version@.jar diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/test.sh deleted file mode 100755 index a2ab5d8826a..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop27/test.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -export COMPOSE_DIR - -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../../testlib.sh" - -start_docker_env - -execute_robot_test scm createmrenv.robot - - -#rm is the container name (resource manager) and not the rm command -execute_command_in_container rm sudo apk add --update py-pip -execute_command_in_container rm sudo pip install robotframework - -# reinitialize the directories to use -export OZONE_DIR=/opt/ozone -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../../testlib.sh" - -execute_robot_test rm ozonefs/hadoopo3fs.robot - -execute_robot_test rm -v hadoop.version:2.7.7 mapreduce.robot - -stop_docker_env - -generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env deleted file mode 100644 index 4cb42717f68..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/.env +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=@hdds.version@ -#TODO: swich to apache/hadoop. Older versions are not supported by apache/hadoop, yet. -# See: HADOOP-16092 for more details. -HADOOP_IMAGE=flokkr/hadoop -HADOOP_VERSION=3.1.2 -OZONE_RUNNER_VERSION=@docker.ozone-runner.version@ diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml deleted file mode 100644 index e3696fcf70a..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-compose.yaml +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../../..:/opt/hadoop - ports: - - 9864 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - docker-config - - ../common-config - om: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: om - volumes: - - ../../..:/opt/hadoop - ports: - - 9874:9874 - environment: - WAITFOR: scm:9876 - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - docker-config - - ../common-config - command: ["/opt/hadoop/bin/ozone","om"] - s3g: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: s3g - volumes: - - ../../..:/opt/hadoop - ports: - - 9878:9878 - env_file: - - ./docker-config - - ../common-config - command: ["/opt/hadoop/bin/ozone","s3g"] - scm: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: scm - volumes: - - ../../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - docker-config - - ../common-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["/opt/hadoop/bin/ozone","scm"] - rm: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} - hostname: rm - volumes: - - ../../..:/opt/ozone - ports: - - 8088:8088 - env_file: - - ./docker-config - - ../common-config - environment: - HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar - command: ["yarn", "resourcemanager"] - nm: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} - hostname: nm - volumes: - - ../../..:/opt/ozone - env_file: - - ./docker-config - - ../common-config - environment: - HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar - WAIT_FOR: rm:8088 - command: ["yarn","nodemanager"] diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config deleted file mode 100644 index d7ead2172b9..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/docker-config +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs -MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/test.sh deleted file mode 100755 index 03caea39f75..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop31/test.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -export COMPOSE_DIR - -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../../testlib.sh" - -start_docker_env - -execute_robot_test scm createmrenv.robot - - -#rm is the container name (resource manager) and not the rm command -execute_command_in_container rm sudo apk add --update py-pip -execute_command_in_container rm sudo pip install robotframework - -# reinitialize the directories to use -export OZONE_DIR=/opt/ozone -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../../testlib.sh" - -execute_robot_test rm ozonefs/hadoopo3fs.robot - -execute_robot_test rm -v hadoop.version:3.1.2 mapreduce.robot - - -stop_docker_env - -generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env deleted file mode 100644 index 70ba4b692d4..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/.env +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=@hdds.version@ -HADOOP_IMAGE=apache/hadoop -HADOOP_VERSION=3 -OZONE_RUNNER_VERSION=@docker.ozone-runner.version@ diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml deleted file mode 100644 index c25d36cb904..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-compose.yaml +++ /dev/null @@ -1,104 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../../..:/opt/hadoop - ports: - - 9864 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - docker-config - - ../common-config - om: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: om - volumes: - - ../../..:/opt/hadoop - ports: - - 9874:9874 - environment: - WAITFOR: scm:9876 - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - docker-config - - ../common-config - command: ["/opt/hadoop/bin/ozone","om"] - s3g: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: s3g - volumes: - - ../../..:/opt/hadoop - ports: - - 9878:9878 - env_file: - - ./docker-config - - ../common-config - command: ["/opt/hadoop/bin/ozone","s3g"] - scm: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: scm - volumes: - - ../../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - docker-config - - ../common-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["/opt/hadoop/bin/ozone","scm"] - rm: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} - hostname: rm - volumes: - - ../../..:/opt/ozone - ports: - - 8088:8088 - env_file: - - ./docker-config - - ../common-config - environment: - HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar - command: ["yarn", "resourcemanager"] - nm: - image: ${HADOOP_IMAGE}:${HADOOP_VERSION} - hostname: nm - volumes: - - ../../..:/opt/ozone - env_file: - - ./docker-config - - ../common-config - environment: - HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar - WAIT_FOR: rm:8088 - command: ["yarn","nodemanager"] -# Optional section: comment out this part to get DNS resolution for all the containers. -# Add 127.0.0.1 (or the ip of your docker machine) to the resolv.conf to get local DNS resolution -# For all the containers (including resource managers and Node manager UI) -# dns: -# image: andyshinn/dnsmasq:2.76 -# ports: -# - 53:53/udp -# - 53:53/tcp -# volumes: -# - "/var/run/docker.sock:/var/run/docker.sock" -# command: -# - "-k" -# - "-d" diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config deleted file mode 100644 index d7ead2172b9..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/docker-config +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -CORE-SITE.xml_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs -MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/test.sh deleted file mode 100755 index b1910a57e1f..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-mr/hadoop32/test.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -export COMPOSE_DIR - -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../../testlib.sh" - -start_docker_env - -execute_robot_test scm createmrenv.robot - -# reinitialize the directories to use -export OZONE_DIR=/opt/ozone - -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../../testlib.sh" - -execute_robot_test rm ozonefs/hadoopo3fs.robot - -execute_robot_test rm mapreduce.robot - -stop_docker_env - -generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env deleted file mode 100644 index 96ab163b474..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.env +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=${hdds.version} -OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml deleted file mode 100644 index 2cd2ce80c16..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-compose.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - ./docker-config - om1: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9880:9874 - - 9890:9872 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/ozone","om"] - om2: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9882:9874 - - 9892:9872 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/ozone","om"] - om3: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9884:9874 - - 9894:9872 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/ozone","om"] - scm: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["/opt/hadoop/bin/ozone","scm"] diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config deleted file mode 100644 index f3de99a50a7..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem -CORE-SITE.XML_fs.defaultFS=o3fs://bucket.volume.id1 -OZONE-SITE.XML_ozone.om.service.ids=id1 -OZONE-SITE.XML_ozone.om.nodes.id1=om1,om2,om3 -OZONE-SITE.XML_ozone.om.address.id1.om1=om1 -OZONE-SITE.XML_ozone.om.address.id1.om2=om2 -OZONE-SITE.XML_ozone.om.address.id1.om3=om3 -OZONE-SITE.XML_ozone.om.ratis.enable=true -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=True -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.handler.type=distributed -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.replication=1 -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds -OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -ASYNC_PROFILER_HOME=/opt/profiler - -#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/.env b/hadoop-ozone/dist/src/main/compose/ozone-recon/.env deleted file mode 100644 index 96ab163b474..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-recon/.env +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=${hdds.version} -OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml deleted file mode 100644 index 38e2ef33094..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml +++ /dev/null @@ -1,65 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - - 9882 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - ./docker-config - om: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/ozone","om"] - scm: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["/opt/hadoop/bin/ozone","scm"] - recon: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9888:9888 - env_file: - - ./docker-config - environment: - WAITFOR: om:9874 - command: ["/opt/hadoop/bin/ozone","recon"] diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config deleted file mode 100644 index 61d1378cded..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=True -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon -OZONE-SITE.XML_ozone.recon.om.db.dir=/data/metadata/recon -OZONE-SITE.XML_ozone.handler.type=distributed -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.replication=1 -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds -OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -ASYNC_PROFILER_HOME=/opt/profiler - -#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-recon/test.sh deleted file mode 100755 index f4bfcc3d57f..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-recon/test.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -export COMPOSE_DIR - -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../testlib.sh" - -start_docker_env - -execute_robot_test scm basic/basic.robot - -stop_docker_env - -generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/.env b/hadoop-ozone/dist/src/main/compose/ozone-topology/.env deleted file mode 100644 index 249827bc91a..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/.env +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=0.5.0-SNAPSHOT -OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml deleted file mode 100644 index a66eff617ed..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml +++ /dev/null @@ -1,110 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode_1: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - - 9882 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - ./docker-config - networks: - net: - ipv4_address: 10.5.0.4 - datanode_2: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - - 9882 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - ./docker-config - networks: - net: - ipv4_address: 10.5.0.5 - datanode_3: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - - 9882 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - ./docker-config - networks: - net: - ipv4_address: 10.5.0.6 - datanode_4: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - - 9882 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - ./docker-config - networks: - net: - ipv4_address: 10.5.0.7 - om: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/ozone","om"] - networks: - net: - ipv4_address: 10.5.0.70 - scm: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["/opt/hadoop/bin/ozone","scm"] - networks: - net: - ipv4_address: 10.5.0.71 -networks: - net: - driver: bridge - ipam: - config: - - subnet: 10.5.0.0/16 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config deleted file mode 100644 index ac6a3679de3..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-config +++ /dev/null @@ -1,43 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=True -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.handler.type=distributed -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.replication=1 -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds -OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true -OZONE-SITE.XML_ozone.scm.container.placement.impl=org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackAware -OZONE-SITE.XML_net.topology.node.switch.mapping.impl=org.apache.hadoop.net.TableMapping -OZONE-SITE.XML_net.topology.table.file.name=/opt/hadoop/compose/ozone-topology/network-config -OZONE-SITE.XML_dfs.network.topology.aware.read.enable=true -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -ASYNC_PROFILER_HOME=/opt/profiler -HDDS_DN_OPTS=-Dmodule.name=datanode -HDFS_OM_OPTS=-Dmodule.name=om -HDFS_STORAGECONTAINERMANAGER_OPTS=-Dmodule.name=scm -HDFS_OM_SH_OPTS=-Dmodule.name=sh -HDFS_SCM_CLI_OPTS=-Dmodule.name=scmcli - -#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/network-config b/hadoop-ozone/dist/src/main/compose/ozone-topology/network-config deleted file mode 100644 index 5c6af824a19..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/network-config +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -10.5.0.4 /rack1 -10.5.0.5 /rack1 -10.5.0.6 /rack1 -10.5.0.7 /rack2 -10.5.0.8 /rack2 -10.5.0.9 /rack2 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh deleted file mode 100755 index d7402dfa5ab..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone-topology/test.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -export COMPOSE_DIR - -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../testlib.sh" - -start_docker_env 4 - -#Due to the limitation of the current auditparser test, it should be the -#first test in a clean cluster. - -#Disabling for now, audit parser tool during parse getting exception. -#execute_robot_test om auditparser - -execute_robot_test scm basic/basic.robot - -execute_robot_test scm topology/scmcli.robot - -stop_docker_env - -generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozone/.env b/hadoop-ozone/dist/src/main/compose/ozone/.env deleted file mode 100644 index 96ab163b474..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone/.env +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=${hdds.version} -OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml deleted file mode 100644 index 145ce3ebb17..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - - 9882 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - ./docker-config - om: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/ozone","om"] - scm: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - privileged: true #required by the profiler - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["/opt/hadoop/bin/ozone","scm"] diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config b/hadoop-ozone/dist/src/main/compose/ozone/docker-config deleted file mode 100644 index 380b529cd33..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=True -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.handler.type=distributed -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.replication=1 -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds -OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -ASYNC_PROFILER_HOME=/opt/profiler - -#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh deleted file mode 100755 index e06f817f3d8..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -export COMPOSE_DIR - -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../testlib.sh" - -start_docker_env - -#Due to the limitation of the current auditparser test, it should be the -#first test in a clean cluster. - -#Disabling for now, audit parser tool during parse getting exception. -#execute_robot_test om auditparser - -execute_robot_test scm basic/basic.robot - -execute_robot_test scm gdpr/gdpr.robot - -stop_docker_env - -generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env b/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env deleted file mode 100644 index 96ab163b474..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/.env +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=${hdds.version} -OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml deleted file mode 100644 index 703329fe144..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-compose.yaml +++ /dev/null @@ -1,58 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - ./docker-config - om: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/ozone","om"] - scm: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["/opt/hadoop/bin/ozone","scm"] - ozone_client: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9869 - command: ["tail", "-f","/etc/passwd"] - env_file: - - ./docker-config diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config deleted file mode 100644 index 4d5466c6ab9..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=True -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.handler.type=distributed -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.client.max.retries=10 -OZONE-SITE.XML_ozone.scm.stale.node.interval=2m -OZONE-SITE.XML_ozone.scm.dead.node.interval=5m -OZONE-SITE.XML_ozone.replication=1 -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds -OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 -OZONE-SITE.XML_ozone.scm.pipeline.destroy.timeout=15s -OZONE-SITE.XML_hdds.heartbeat.interval=2s -OZONE-SITE.XML_hdds.scm.wait.time.after.safemode.exit=30s -OZONE-SITE.XML_hdds.scm.replication.thread.interval=6s -OZONE-SITE.XML_hdds.scm.replication.event.timeout=10s -OZONE-SITE.XML_dfs.ratis.server.failure.duration=35s -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 - -#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/.env b/hadoop-ozone/dist/src/main/compose/ozoneperf/.env deleted file mode 100644 index 96ab163b474..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/.env +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=${hdds.version} -OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/README.md b/hadoop-ozone/dist/src/main/compose/ozoneperf/README.md deleted file mode 100644 index 62328e02cb9..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/README.md +++ /dev/null @@ -1,56 +0,0 @@ - - -# Compose files for local performance tests - -This directory contains docker-compose definition for an ozone cluster where -all the metrics are saved to a prometheus instance, and profiling and Jaeger -tracing is turned on and set up. - -Prometheus follows a pull based approach where the metrics are published - on a HTTP endpoint. - -Prometheus compatible metrics endpoint can be enabled by setting `hdds.prometheus.endpoint.enabled` property to `true` - -## How to start - -Start the cluster with `docker-compose` - -``` -docker-compose up -d -``` - -Note: The freon test will be started after 30 seconds. - -## How to use - -You can check the ozone web ui: - -OzoneManager: http://localhost:9874 -SCM: http://localhost:9876 - -You can check the ozone metrics from the prometheus web ui. - -http://localhost:9090/graph - -You can view Grafana dashboards at: - -http://localhost:3000 - -Default dashboards available are: -Ozone - Object Metrics -Ozone - RPC Metrics - -You can access the Jaeger UI at: -http://localhost:16686 \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml deleted file mode 100644 index fa205407e87..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-compose.yaml +++ /dev/null @@ -1,89 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - command: ["ozone","datanode"] - env_file: - - ./docker-config - om: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - ./docker-config - command: ["ozone","om"] - scm: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["ozone","scm"] - jaeger: - image: jaegertracing/all-in-one:latest - environment: - COLLECTOR_ZIPKIN_HTTP_PORT: 9411 - ports: - - 16686:16686 - prometheus: - image: prom/prometheus - volumes: - - "../common/prometheus/prometheus.yml:/etc/prometheus.yml" - command: ["--config.file","/etc/prometheus.yml"] - ports: - - 9090:9090 - freon: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - environment: - SLEEP_SECONDS: 30 - env_file: - - ./docker-config - command: ["ozone","freon","rk"] - grafana: - image: grafana/grafana - volumes: - - "../common/grafana/dashboards:/var/lib/grafana/dashboards" - - "../common/grafana/provisioning:/etc/grafana/provisioning" - - "../common/grafana/conf/grafana.ini:/etc/grafana/grafana.ini" - command: ["-config","/etc/grafana/grafana.ini"] - ports: - - 3000:3000 - s3g: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9878:9878 - env_file: - - ./docker-config - command: ["ozone","s3g"] diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config b/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config deleted file mode 100644 index d2d345272a1..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=true -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds -OZONE-SITE.XML_ozone.replication=1 -OZONE-SITE.XML_hdds.prometheus.endpoint.enabled=true -OZONE-SITE.XML_hdds.profiler.endpoint.enabled=true - -ASYNC_PROFILER_HOME=/opt/profiler - -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 - -JAEGER_SAMPLER_PARAM=1 -JAEGER_SAMPLER_TYPE=const -JAEGER_AGENT_HOST=jaeger diff --git a/hadoop-ozone/dist/src/main/compose/ozoneperf/test.sh b/hadoop-ozone/dist/src/main/compose/ozoneperf/test.sh deleted file mode 100755 index f4bfcc3d57f..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozoneperf/test.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -export COMPOSE_DIR - -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../testlib.sh" - -start_docker_env - -execute_robot_test scm basic/basic.robot - -stop_docker_env - -generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env deleted file mode 100644 index 96ab163b474..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/.env +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=${hdds.version} -OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml deleted file mode 100644 index 78fd996a70c..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-compose.yaml +++ /dev/null @@ -1,83 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - s3g: - image: haproxy:latest - volumes: - - ../..:/opt/hadoop - - ./haproxy-conf/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg - ports: - - 9878:9878 - datanode: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - command: ["ozone","datanode"] - env_file: - - ./docker-config - om: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - ./docker-config - command: ["ozone","om"] - scm: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["ozone","scm"] - s3g1: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9879:9878 - env_file: - - ./docker-config - command: ["ozone","s3g"] - s3g2: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9880:9878 - env_file: - - ./docker-config - command: ["ozone","s3g"] - s3g3: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9881:9878 - env_file: - - ./docker-config - command: ["ozone","s3g"] diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config deleted file mode 100644 index d3efa2e884f..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/docker-config +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=true -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.replication=1 -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds - -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 - -#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/haproxy-conf/haproxy.cfg b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/haproxy-conf/haproxy.cfg deleted file mode 100644 index 5af09fa400a..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/haproxy-conf/haproxy.cfg +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Simple configuration for an HTTP proxy listening on port 9878 on all -# interfaces and forwarding requests to a multiple multiple S3 servers in round -# robin fashion. -global - daemon - maxconn 256 - -defaults - mode http - timeout connect 5000ms - timeout client 50000ms - timeout server 50000ms - -frontend http-in - bind *:9878 - default_backend servers - -backend servers - balance roundrobin - server server1 s3g1:9878 maxconn 32 - server server2 s3g2:9878 maxconn 32 - server server3 s3g3:9878 maxconn 32 diff --git a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/test.sh b/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/test.sh deleted file mode 100755 index f4bfcc3d57f..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozones3-haproxy/test.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -export COMPOSE_DIR - -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../testlib.sh" - -start_docker_env - -execute_robot_test scm basic/basic.robot - -stop_docker_env - -generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/.env b/hadoop-ozone/dist/src/main/compose/ozones3/.env deleted file mode 100644 index 96ab163b474..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozones3/.env +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=${hdds.version} -OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml deleted file mode 100644 index cc4bfd2268c..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozones3/docker-compose.yaml +++ /dev/null @@ -1,58 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - command: ["ozone","datanode"] - env_file: - - ./docker-config - om: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - ./docker-config - command: ["ozone","om"] - scm: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["ozone","scm"] - s3g: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9878:9878 - env_file: - - ./docker-config - command: ["ozone","s3g"] diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config b/hadoop-ozone/dist/src/main/compose/ozones3/docker-config deleted file mode 100644 index d3efa2e884f..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=true -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.replication=1 -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds - -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 - -#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/test.sh b/hadoop-ozone/dist/src/main/compose/ozones3/test.sh deleted file mode 100755 index 0160da9e382..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozones3/test.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -export COMPOSE_DIR - -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../testlib.sh" - -start_docker_env - -execute_robot_test scm basic/basic.robot - -execute_robot_test scm s3 - -stop_docker_env - -generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.env b/hadoop-ozone/dist/src/main/compose/ozonescripts/.env deleted file mode 100644 index 96ab163b474..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.env +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=${hdds.version} -OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/authorized_keys b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/authorized_keys deleted file mode 100644 index ae390529c7e..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/authorized_keys +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDgEmLpYm4BrWtq1KG9hhZXCZgGrETntu0eNTo21U3VKc9nH9/ot7M6lAawsFcT9uXu4b58PTlnfvwH/TATlCFjC8n0Z7SOx+FU6L3Sn8URh9HaX4L0tF8u87oCAD4dBrUGhhB36eiuH9dBBWly6RKffYJvrjatbc7GxBO/e5OSUMtqk/DSVKksmBhZxutrKivCNjDish9ViGIf8b5yS/MlEGmaVKApik1fJ5iOlloM/GgpB60YV/hbqfCecbWgeiM1gK92gdOcA/Wx1C7fj8BSI5iDSE6eZeF80gM3421lvyPDWyVhFaGbka4rXBX/fb9QSRBA9RTqhRKAEmAIf49H hadoop@cdae967fa87a diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/config b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/config deleted file mode 100644 index 6506916ded0..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/config +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -Host * - UserKnownHostsFile /dev/null - StrictHostKeyChecking no diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment deleted file mode 100644 index cbde0f2078d..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -JAVA_HOME=/usr/lib/jvm/jre diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa deleted file mode 100644 index 6632ce51c54..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA4BJi6WJuAa1ratShvYYWVwmYBqxE57btHjU6NtVN1SnPZx/f -6LezOpQGsLBXE/bl7uG+fD05Z378B/0wE5QhYwvJ9Ge0jsfhVOi90p/FEYfR2l+C -9LRfLvO6AgA+HQa1BoYQd+norh/XQQVpcukSn32Cb642rW3OxsQTv3uTklDLapPw -0lSpLJgYWcbrayorwjYw4rIfVYhiH/G+ckvzJRBpmlSgKYpNXyeYjpZaDPxoKQet -GFf4W6nwnnG1oHojNYCvdoHTnAP1sdQu34/AUiOYg0hOnmXhfNIDN+NtZb8jw1sl -YRWhm5GuK1wV/32/UEkQQPUU6oUSgBJgCH+PRwIDAQABAoIBAQDI1TH6ZNKchkck -9XgSWsBjOqIcOQN5fCeDT8nho8WjLVpL3/Hcr+ngsxRcAXHK3xyvw33r9SQic1qJ -/pC8u6RBFivo95qJ7vU0GXcp9TG4yLd6tui1U4WMm784U+dYNM7EDh1snSaECt3v -1V3yNJ0QfnoOh2NShn0zAkOA+M4H8Nx2SudMCsjcbK9+fYxzW3hX+sJpMKdjG1HW -DUz+I7cW7t0EGaVrgVSV+eR58LiXu+14YDNMrySiejB4nD2sKrx93XgiCBECCsBN -GLQGJCztaXoAY+5Kf/aJ9EEf2wBF3GecRk+XIAd87PeDmeahLQAVkAJ/rD1vsKFs -8kWi6CrhAoGBAP7leG/dMBhlfvROpBddIfWm2i8B+oZiAlSjdYGz+/ZhUaByXk18 -pupMGiiMQR1ziPnEg0gNgR2ZkH54qrXPn5WcQa4rlSEtUsZEp5v5WblhfX2QwKzY -G/uhA+mB7wXpQkSmXo0LclfPF2teROQrG1OyfWkWbxFH4i3+em7sL95jAoGBAOEK -v+wscqkMLW7Q8ONbWMCCBlmMHr6baB3VDCYZx25lr+GIF5zmJJFTmF2rq2VSAlts -qx1AGmaUSo78kC5FuJvSNTL6a1Us5ucdthQZM3N8pAz+OAE+QEU+BsdA27yAh3tO -yKDsMFNHKtXcgy5LeB5gzENLlNyw2jgkRv2Ef77NAoGAVH8DHqoHEH9Mx3XuRWR1 -JnaqKx0PzE5fEWmiQV3Fr5XxNivTgQJKXq7dmQVtbHLpPErdbhwz6fkHAjXD+UMb -VsAWscL2y6m3n8wQd87/5EkiDWbXyDRXimGE53pQHviFJDa2bzEVNXCMBeaZFb4I -cAViN1zdcrAOlUqfkXewIpsCgYB8wsXl/DpRB+RENGfn0+OfTjaQ/IKq72NIbq1+ -jfondQ6N/TICFQEe5HZrL9okoNOXteYjoD9CsWGoZdLVJGgVUvOVYImSvgMBDFK+ -T75bfzU/0sxfvBBLkviVDJsFpUf3D5VgybB86s6Po+HCD6r3RHjZshRESXOhflMx -B3z+3QKBgE2Lwo0DuwUGkm6k8psyn3x8EiXNsiNw12cojicFTyKUYLHxMBeVbCLW -3j3pxSggJgRuBLLzixUHbHp91r2ARTy28naK7R/la8yKVqK6ojcikN2mQsCHYtwB -nuFwXr42ytn6G+9Wn4xT64tGjRCqyZn0/v0XsPjVCyrZ6G7EtNHP ------END RSA PRIVATE KEY----- diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa.pub b/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa.pub deleted file mode 100644 index ae390529c7e..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/id_rsa.pub +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDgEmLpYm4BrWtq1KG9hhZXCZgGrETntu0eNTo21U3VKc9nH9/ot7M6lAawsFcT9uXu4b58PTlnfvwH/TATlCFjC8n0Z7SOx+FU6L3Sn8URh9HaX4L0tF8u87oCAD4dBrUGhhB36eiuH9dBBWly6RKffYJvrjatbc7GxBO/e5OSUMtqk/DSVKksmBhZxutrKivCNjDish9ViGIf8b5yS/MlEGmaVKApik1fJ5iOlloM/GgpB60YV/hbqfCecbWgeiM1gK92gdOcA/Wx1C7fj8BSI5iDSE6eZeF80gM3421lvyPDWyVhFaGbka4rXBX/fb9QSRBA9RTqhRKAEmAIf49H hadoop@cdae967fa87a diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile b/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile deleted file mode 100644 index b07800053b2..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -FROM apache/ozone-runner -RUN sudo yum install -y openssh-clients openssh-server - -RUN sudo ssh-keygen -A -RUN sudo mkdir -p /run/sshd -RUN sudo sed -i "s/.*UsePrivilegeSeparation.*/UsePrivilegeSeparation no/g" /etc/ssh/sshd_config -RUN sudo sed -i "s/.*PermitUserEnvironment.*/PermitUserEnvironment yes/g" /etc/ssh/sshd_config -RUN sudo sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd - -#/opt/hadoop is mounted, we can't use it as a home -RUN sudo usermod -d /opt hadoop -ADD .ssh /opt/.ssh -RUN sudo chown -R hadoop /opt/.ssh -RUN sudo chown hadoop /opt -RUN sudo chmod 600 /opt/.ssh/* -RUN sudo chmod 700 /opt/.ssh - -RUN sudo sh -c 'echo "export JAVA_HOME=/usr/lib/jvm/jre/" >> /etc/profile' -CMD ["sudo","/usr/sbin/sshd","-D"] diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/README.md b/hadoop-ozone/dist/src/main/compose/ozonescripts/README.md deleted file mode 100644 index 2531fa43660..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/README.md +++ /dev/null @@ -1,38 +0,0 @@ - - -# start-ozone environment - -This is an example environment to use/test `./sbin/start-ozone.sh` and `./sbin/stop-ozone.sh` scripts. - -There are ssh connections between the containers and the start/stop scripts could handle the start/stop process -similar to a real cluster. - -To use it, first start the cluster: - -``` -docker-copmose up -d -``` - -After a successfull startup (which starts only the ssh daemons) you can start ozone: - -``` -./start.sh -``` - -Check it the java processes are started: - -``` -./ps.sh -``` \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml deleted file mode 100644 index 62f116368f4..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-compose.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - build: . - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - env_file: - - ./docker-config - om: - build: . - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - env_file: - - ./docker-config - scm: - build: . - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - ./docker-config diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config b/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config deleted file mode 100644 index fe713e0dde2..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/docker-config +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -CORE-SITE.XML_fs.defaultFS=hdfs://namenode:9000 -OZONE-SITE.XML_ozone.ksm.address=ksm -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=true -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.replication=1 -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds -OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService -HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 -HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/ps.sh b/hadoop-ozone/dist/src/main/compose/ozonescripts/ps.sh deleted file mode 100755 index d5e2c386752..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/ps.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -docker-compose ps -q | xargs -n1 -I CONTAINER docker exec CONTAINER ps xa \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh b/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh deleted file mode 100755 index 49fc506ca29..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/start.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -x -docker-compose ps | grep datanode | awk '{print $1}' | xargs -n1 docker inspect --format '{{ .Config.Hostname }}' > ../../etc/hadoop/workers -docker-compose ps | grep ozonescripts | awk '{print $1}' | xargs -I CONTAINER -n1 docker exec CONTAINER cp /opt/hadoop/etc/hadoop/workers /etc/hadoop/workers -docker-compose exec scm /opt/hadoop/bin/ozone scm --init -docker-compose exec scm /opt/hadoop/sbin/start-ozone.sh -#We need a running SCM for om objectstore creation -#TODO create a utility to wait for the startup -sleep 10 -docker-compose exec om /opt/hadoop/bin/ozone om --init -docker-compose exec scm /opt/hadoop/sbin/start-ozone.sh diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/stop.sh b/hadoop-ozone/dist/src/main/compose/ozonescripts/stop.sh deleted file mode 100755 index a3ce08af573..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/stop.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -docker-compose exec scm /opt/hadoop/sbin/stop-ozone.sh diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env deleted file mode 100644 index 37227ac42bf..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/.env +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=${hdds.version} -HADOOP_VERSION=3 -OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/README.md b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/README.md deleted file mode 100644 index 14262701da5..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/README.md +++ /dev/null @@ -1,73 +0,0 @@ - -# Secure Docker-compose with KMS, Yarn RM and NM -This docker compose allows to test Sample Map Reduce Jobs with OzoneFileSystem -It is a superset of ozonesecure docker-compose, which add Yarn NM/RM in addition -to Ozone OM/SCM/NM/DN and Kerberos KDC. - -## Basic setup - -``` -cd $(git rev-parse --show-toplevel)/hadoop-ozone/dist/target/ozone-@project.version@/compose/ozonesecure-mr - -docker-compose up -d -``` - -## Ozone Manager Setup - -``` -docker-compose exec om bash - -kinit -kt /etc/security/keytabs/testuser.keytab testuser/om@EXAMPLE.COM - -ozone sh volume create /vol1 - -ozone sh bucket create /vol1/bucket1 - -ozone sh key put /vol1/bucket1/key1 LICENSE.txt - -ozone fs -ls o3fs://bucket1.vol1/ -``` - -## Yarn Resource Manager Setup -``` -docker-compose exec rm bash - -kinit -kt /etc/security/keytabs/hadoop.keytab hadoop/rm@EXAMPLE.COM -export HADOOP_MAPRED_HOME=/opt/hadoop/share/hadoop/mapreduce - -export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/opt/hadoop/share/hadoop/mapreduce/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar - -hadoop fs -mkdir /user -hadoop fs -mkdir /user/hadoop -``` - -## Run Examples - -### WordCount -``` -yarn jar $HADOOP_MAPRED_HOME/hadoop-mapreduce-examples-*.jar wordcount o3fs://bucket1.vol1/key1 o3fs://bucket1.vol1/key1.count - -hadoop fs -cat /key1.count/part-r-00000 -``` - -### Pi -``` -yarn jar $HADOOP_MAPRED_HOME/hadoop-mapreduce-examples-*.jar pi 10 100 -``` - -### RandomWrite -``` -yarn jar $HADOOP_MAPRED_HOME/hadoop-mapreduce-examples-*.jar randomwriter -Dtest.randomwrite.total_bytes=10000000 o3fs://bucket1.vol1/randomwrite.out -``` diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml deleted file mode 100644 index 53e0142b2b6..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-compose.yaml +++ /dev/null @@ -1,135 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3.5" -services: - kdc: - build: - context: docker-image/docker-krb5 - dockerfile: Dockerfile-krb5 - args: - buildno: 1 - hostname: kdc - networks: - - ozone - volumes: - - ../..:/opt/hadoop - kms: - image: apache/hadoop:${HADOOP_VERSION} - networks: - - ozone - ports: - - 9600:9600 - env_file: - - ./docker-config - command: ["hadoop", "kms"] - datanode: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - networks: - - ozone - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - docker-config - om: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: om - networks: - - ozone - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - docker-config - command: ["/opt/hadoop/bin/ozone","om"] - s3g: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: s3g - networks: - - ozone - volumes: - - ../..:/opt/hadoop - ports: - - 9878:9878 - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/ozone","s3g"] - scm: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: scm - networks: - - ozone - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["/opt/hadoop/bin/ozone","scm"] - rm: - image: apache/hadoop:${HADOOP_VERSION} - hostname: rm - networks: - - ozone - volumes: - - ../..:/opt/ozone - ports: - - 8088:8088 - env_file: - - ./docker-config - environment: - HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar - command: ["yarn", "resourcemanager"] - nm: - image: apache/hadoop:${HADOOP_VERSION} - hostname: nm - networks: - - ozone - volumes: - - ../..:/opt/ozone - env_file: - - ./docker-config - environment: - HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar - WAIT_FOR: rm:8088 - command: ["yarn","nodemanager"] - jhs: - image: apache/hadoop:${HADOOP_VERSION} - container_name: jhs - hostname: jhs - networks: - - ozone - volumes: - - ../..:/opt/ozone - ports: - - 8188:8188 - env_file: - - ./docker-config - environment: - HADOOP_CLASSPATH: /opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar - WAIT_FOR: rm:8088 - command: ["yarn","timelineserver"] -networks: - ozone: - name: ozone diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config deleted file mode 100644 index 646fd021ce7..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config +++ /dev/null @@ -1,133 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=True -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.handler.type=distributed -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_hdds.block.token.enabled=true -OZONE-SITE.XML_ozone.replication=1 -OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM -OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab -OZONE-SITE.XML_ozone.om.kerberos.principal=om/om@EXAMPLE.COM -OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab -OZONE-SITE.XML_ozone.s3g.keytab.file=/etc/security/keytabs/HTTP.keytab -OZONE-SITE.XML_ozone.s3g.authentication.kerberos.principal=HTTP/s3g@EXAMPLE.COM -OZONE-SITE.XML_ozone.administrators=* - -OZONE-SITE.XML_ozone.security.enabled=true -OZONE-SITE.XML_hdds.scm.http.kerberos.principal=HTTP/scm@EXAMPLE.COM -OZONE-SITE.XML_hdds.scm.http.kerberos.keytab=/etc/security/keytabs/HTTP.keytab -OZONE-SITE.XML_ozone.om.http.kerberos.principal=HTTP/om@EXAMPLE.COM -OZONE-SITE.XML_ozone.om.http.kerberos.keytab=/etc/security/keytabs/HTTP.keytab -HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/_HOST@EXAMPLE.COM -HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab -HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/_HOST@EXAMPLE.COM -HDFS-SITE.XML_dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds -HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019 -HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012 -CORE-SITE.XML_dfs.data.transfer.protection=authentication -CORE-SITE.XML_hadoop.security.authentication=kerberos -CORE-SITE.XML_hadoop.security.auth_to_local=RULE:[2:$1@$0](.*@EXAMPLE.COM)s/@.*///L -CORE-SITE.XML_hadoop.security.key.provider.path=kms://http@kms:9600/kms - -#temporary disable authorization as org.apache.hadoop.yarn.server.api.ResourceTrackerPB is not properly annotated to support it -CORE-SITE.XML_hadoop.security.authorization=false -HADOOP-POLICY.XML_ozone.om.security.client.protocol.acl=* -HADOOP-POLICY.XML_hdds.security.client.datanode.container.protocol.acl=* -HADOOP-POLICY.XML_hdds.security.client.scm.container.protocol.acl=* -HADOOP-POLICY.XML_hdds.security.client.scm.block.protocol.acl=* -HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=* -HADOOP-POLICY.XML_org.apache.hadoop.yarn.server.api.ResourceTracker.acl=* - -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 - -CORE-SITE.XML_fs.AbstractFileSystem.o3fs.impl=org.apache.hadoop.fs.ozone.OzFs -CORE-SITE.XML_fs.defaultFS=o3fs://bucket1.vol1/ - -MAPRED-SITE.XML_mapreduce.framework.name=yarn -MAPRED-SITE.XML_yarn.app.mapreduce.am.env=HADOOP_MAPRED_HOME=$HADOOP_HOME -MAPRED-SITE.XML_mapreduce.map.env=HADOOP_MAPRED_HOME=$HADOOP_HOME -MAPRED-SITE.XML_mapreduce.reduce.env=HADOOP_MAPRED_HOME=$HADOOP_HOME -MAPRED-SITE.XML_mapreduce.map.memory.mb=2048 -MAPRED-SITE.XML_mapreduce.reduce.memory.mb=2048 -#MAPRED-SITE.XML_mapred.child.java.opts=-Xmx2048 -MAPRED-SITE.XML_mapreduce.application.classpath=/opt/hadoop/share/hadoop/mapreduce/*:/opt/hadoop/share/hadoop/mapreduce/lib/*:/opt/ozone/share/ozone/lib/hadoop-ozone-filesystem-lib-current-@project.version@.jar - -YARN-SITE.XML_yarn.app.mapreduce.am.staging-dir=/user -YARN-SITE.XML_yarn.timeline-service.enabled=true -YARN-SITE.XML_yarn.timeline-service.generic.application.history.enabled=true -YARN-SITE.XML_yarn.timeline-service.hostname=jhs -YARN-SITE.XML_yarn.timeline-service.principal=jhs/jhs@EXAMPLE.COM -YARN-SITE.XML_yarn.timeline-service.keytab=/etc/security/keytabs/jhs.keytab -YARN-SITE.XML_yarn.log.server.url=http://jhs:8188/applicationhistory/logs/ - -YARN-SITE.XML_yarn.nodemanager.principal=nm/_HOST@EXAMPLE.COM -YARN-SITE.XML_yarn.nodemanager.keytab=/etc/security/keytabs/nm.keytab -YARN-SITE.XML_yarn.nodemanager.pmem-check-enabled=false -YARN-SITE.XML_yarn.nodemanager.delete.debug-delay-sec=600 -YARN-SITE.XML_yarn.nodemanager.vmem-check-enabled=false -YARN-SITE.XML_yarn.nodemanager.aux-services=mapreduce_shuffle -YARN-SITE.XML_yarn.nodemanager.disk-health-checker.enable=false - -YARN-SITE.XML_yarn.resourcemanager.hostname=rm -YARN-SITE.XML_yarn.resourcemanager.keytab=/etc/security/keytabs/rm.keytab -YARN-SITE.XML_yarn.resourcemanager.principal=rm/rm@EXAMPLE.COM -YARN-SITE.XML_yarn.resourcemanager.system.metrics.publisher.enabled=true - -YARN-SITE.XML_yarn.log-aggregation-enable=true -YARN-SITE.XML_yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds=3600 -YARN-SITE.XML_yarn.nodemanager.delete.debug-delay-sec=600 - -# Yarn LinuxContainer requires the /opt/hadoop/etc/hadoop to be owned by root and not modifiable by other users, -# which prevents start.sh from changing the configurations based on docker-config -# YARN-SITE.XML_yarn.nodemanager.container-executor.class=org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor -# YARN-SITE.XML_yarn.nodemanager.linux-container-executor.path=/opt/hadoop/bin/container-executor -# YARN-SITE.XML_yarn.nodemanager.linux-container-executor.group=hadoop - -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-applications=10000 -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.maximum-am-resource-percent=0.1 -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.resource-calculator=org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.queues=default -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.capacity=100 -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.user-limit-factor=1 -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.maximum-capacity=100 -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.state=RUNNING -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_submit_applications=* -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.root.default.acl_administer_queue=* -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.node-locality-delay=40 -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings= -CAPACITY-SCHEDULER.XML_yarn.scheduler.capacity.queue-mappings-override.enable=false - -#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm - -OZONE_DATANODE_SECURE_USER=root -KEYTAB_DIR=/etc/security/keytabs -KERBEROS_KEYTABS=dn om scm HTTP testuser s3g rm nm yarn jhs hadoop spark -KERBEROS_KEYSTORES=hadoop -KERBEROS_SERVER=kdc -JAVA_HOME=/usr/lib/jvm/jre -JSVC_HOME=/usr/bin -SLEEP_SECONDS=5 -KERBEROS_ENABLED=true diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/Dockerfile-krb5 b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/Dockerfile-krb5 deleted file mode 100644 index 6c6c816f871..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/Dockerfile-krb5 +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -FROM openjdk:8u191-jdk-alpine3.9 -# hadolint ignore=DL3018 -RUN apk add --no-cache bash ca-certificates openssl krb5-server krb5 && rm -rf /var/cache/apk/* && update-ca-certificates -RUN wget -O /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64 -RUN chmod +x /usr/local/bin/dumb-init -RUN wget -O /root/issuer https://github.com/ajayydv/docker/raw/kdc/issuer -RUN chmod +x /root/issuer -WORKDIR /opt -COPY krb5.conf /etc/ -COPY kadm5.acl /var/lib/krb5kdc/kadm5.acl -RUN kdb5_util create -s -P Welcome1 -RUN kadmin.local -q "addprinc -randkey admin/admin@EXAMPLE.COM" -RUN kadmin.local -q "ktadd -k /tmp/admin.keytab admin/admin@EXAMPLE.COM" -COPY launcher.sh . -RUN chmod +x /opt/launcher.sh -RUN mkdir -p /data -ENTRYPOINT ["/usr/local/bin/dumb-init", "--", "/opt/launcher.sh"] - diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/README.md b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/README.md deleted file mode 100644 index 60b675c8db5..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/README.md +++ /dev/null @@ -1,34 +0,0 @@ - - -# Experimental UNSECURE krb5 Kerberos container. - -Only for development. Not for production. - -The docker image contains a rest service which provides keystore and keytab files without any authentication! - -Master password: Welcome1 - -Principal: admin/admin@EXAMPLE.COM Password: Welcome1 - -Test: - -``` -docker run --net=host krb5 - -docker run --net=host -it --entrypoint=bash krb5 -kinit admin/admin -#pwd: Welcome1 -klist -``` diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/kadm5.acl b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/kadm5.acl deleted file mode 100644 index f0cd66016fa..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/kadm5.acl +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -*/admin@EXAMPLE.COM x diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/krb5.conf b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/krb5.conf deleted file mode 100644 index 0c274d36bb5..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/krb5.conf +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -[logging] -default = FILE:/var/log/krb5libs.log -kdc = FILE:/var/log/krb5kdc.log -admin_server = FILE:/var/log/kadmind.log - -[libdefaults] - dns_canonicalize_hostname = false - dns_lookup_realm = false - ticket_lifetime = 24h - renew_lifetime = 7d - forwardable = true - rdns = false - default_realm = EXAMPLE.COM - -[realms] - EXAMPLE.COM = { - kdc = localhost - admin_server = localhost - max_renewable_life = 7d - } - -[domain_realm] - .example.com = EXAMPLE.COM - example.com = EXAMPLE.COM - diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/launcher.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/launcher.sh deleted file mode 100644 index 0824f7b7ae6..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-image/docker-krb5/launcher.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e -/root/issuer & -krb5kdc -n & -sleep 4 -kadmind -nofork & -sleep 2 -tail -f /var/log/krb5kdc.log & -tail -f /var/log/kadmind.log - diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh deleted file mode 100755 index cc6ebf02242..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -export COMPOSE_DIR - -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../testlib.sh" - -export SECURITY_ENABLED=true - -start_docker_env - -execute_robot_test om kinit.robot - -execute_robot_test om createmrenv.robot - -# reinitialize the directories to use -export OZONE_DIR=/opt/ozone - -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../testlib.sh" - -execute_robot_test rm kinit-hadoop.robot - -execute_robot_test rm mapreduce.robot - -stop_docker_env - -generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env b/hadoop-ozone/dist/src/main/compose/ozonesecure/.env deleted file mode 100644 index 37227ac42bf..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/.env +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=${hdds.version} -HADOOP_VERSION=3 -OZONE_RUNNER_VERSION=${docker.ozone-runner.version} diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/README.md b/hadoop-ozone/dist/src/main/compose/ozonesecure/README.md deleted file mode 100644 index 0ce9a0a8926..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/README.md +++ /dev/null @@ -1,22 +0,0 @@ - -# Experimental UNSECURE krb5 Kerberos container. - -Only for development. Not for production. - -#### Dockerfile for KDC: -* ./docker-image/docker-krb5/Dockerfile-krb5 - -#### Dockerfile for SCM,OM and DataNode: -* ./docker-image/runner/Dockerfile \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml deleted file mode 100644 index de60a411116..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-compose.yaml +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - kdc: - build: - context: docker-image/docker-krb5 - dockerfile: Dockerfile-krb5 - args: - buildno: 1 - hostname: kdc - volumes: - - ../..:/opt/hadoop - - kms: - image: apache/hadoop:${HADOOP_VERSION} - ports: - - 9600:9600 - env_file: - - ./docker-config - command: ["hadoop", "kms"] - - datanode: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - volumes: - - ../..:/opt/hadoop - ports: - - 9864 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - docker-config - om: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: om - volumes: - - ../..:/opt/hadoop - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - docker-config - command: ["/opt/hadoop/bin/ozone","om"] - s3g: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: s3g - volumes: - - ../..:/opt/hadoop - ports: - - 9878:9878 - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/ozone","s3g"] - recon: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: recon - volumes: - - ../..:/opt/hadoop - ports: - - 9888:9888 - env_file: - - ./docker-config - environment: - WAITFOR: om:9874 - command: ["/opt/hadoop/bin/ozone","recon"] - scm: - image: apache/ozone-runner:${OZONE_RUNNER_VERSION} - hostname: scm - volumes: - - ../..:/opt/hadoop - ports: - - 9876:9876 - env_file: - - docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["/opt/hadoop/bin/ozone","scm"] diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config deleted file mode 100644 index 44af35ee85d..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config +++ /dev/null @@ -1,90 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -CORE-SITE.XML_fs.o3fs.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem - -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=True -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.handler.type=distributed -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_hdds.block.token.enabled=true -OZONE-SITE.XML_ozone.replication=1 -OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM -OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab -OZONE-SITE.XML_ozone.om.kerberos.principal=om/om@EXAMPLE.COM -OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab -OZONE-SITE.XML_ozone.s3g.keytab.file=/etc/security/keytabs/HTTP.keytab -OZONE-SITE.XML_ozone.s3g.authentication.kerberos.principal=HTTP/s3g@EXAMPLE.COM -OZONE-SITE.XML_ozone.recon.authentication.kerberos.principal=HTTP/recon@EXAMPLE.COM -OZONE-SITE.XML_ozone.recon.keytab.file=/etc/security/keytabs/HTTP.keytab -OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon -OZONE-SITE.XML_recon.om.snapshot.task.initial.delay=20s - -OZONE-SITE.XML_ozone.security.enabled=true -OZONE-SITE.XML_ozone.acl.enabled=true -OZONE-SITE.XML_ozone.acl.authorizer.class=org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer -OZONE-SITE.XML_ozone.administrators=* -OZONE-SITE.XML_hdds.scm.http.kerberos.principal=HTTP/scm@EXAMPLE.COM -OZONE-SITE.XML_hdds.scm.http.kerberos.keytab=/etc/security/keytabs/HTTP.keytab -OZONE-SITE.XML_ozone.om.http.kerberos.principal=HTTP/om@EXAMPLE.COM -OZONE-SITE.XML_ozone.om.http.kerberos.keytab=/etc/security/keytabs/HTTP.keytab -HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/_HOST@EXAMPLE.COM -HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab -HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/_HOST@EXAMPLE.COM -HDFS-SITE.XML_dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds -HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019 -HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012 -CORE-SITE.XML_dfs.data.transfer.protection=authentication -CORE-SITE.XML_hadoop.security.authentication=kerberos -CORE-SITE.XML_hadoop.security.auth_to_local=RULE:[2:$1@$0](.*)s/.*/root/ -CORE-SITE.XML_hadoop.security.key.provider.path=kms://http@kms:9600/kms - -CORE-SITE.XML_hadoop.http.authentication.simple.anonymous.allowed=false -CORE-SITE.XML_hadoop.http.authentication.signature.secret.file=/etc/security/http_secret -CORE-SITE.XML_hadoop.http.authentication.type=kerberos -CORE-SITE.XML_hadoop.http.authentication.kerberos.principal=HTTP/_HOST@EXAMPLE.COM -CORE-SITE.XML_hadoop.http.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab -CORE-SITE.XML_hadoop.http.filter.initializers=org.apache.hadoop.security.AuthenticationFilterInitializer - -CORE-SITE.XML_hadoop.security.authorization=true -HADOOP-POLICY.XML_ozone.om.security.client.protocol.acl=* -HADOOP-POLICY.XML_hdds.security.client.datanode.container.protocol.acl=* -HADOOP-POLICY.XML_hdds.security.client.scm.container.protocol.acl=* -HADOOP-POLICY.XML_hdds.security.client.scm.block.protocol.acl=* -HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=* - -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 - -#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm - -OZONE_DATANODE_SECURE_USER=root -SECURITY_ENABLED=true -KEYTAB_DIR=/etc/security/keytabs -KERBEROS_KEYTABS=dn om scm HTTP testuser testuser2 s3g -KERBEROS_KEYSTORES=hadoop -KERBEROS_SERVER=kdc -JAVA_HOME=/usr/lib/jvm/jre -JSVC_HOME=/usr/bin -SLEEP_SECONDS=5 -KERBEROS_ENABLED=true diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/Dockerfile-krb5 b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/Dockerfile-krb5 deleted file mode 100644 index 1a6097eaa8c..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/Dockerfile-krb5 +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -FROM openjdk:8u191-jdk-alpine3.9 -RUN apk add --update bash ca-certificates openssl krb5-server krb5 && rm -rf /var/cache/apk/* && update-ca-certificates -RUN wget -O /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64 -RUN chmod +x /usr/local/bin/dumb-init -RUN wget -O /root/issuer https://github.com/ajayydv/docker/raw/kdc/issuer -RUN chmod +x /root/issuer -WORKDIR /opt -ADD krb5.conf /etc/ -ADD kadm5.acl /var/lib/krb5kdc/kadm5.acl -RUN kdb5_util create -s -P Welcome1 -RUN kadmin.local -q "addprinc -randkey admin/admin@EXAMPLE.COM" -RUN kadmin.local -q "ktadd -k /tmp/admin.keytab admin/admin@EXAMPLE.COM" -ADD launcher.sh . -RUN chmod +x /opt/launcher.sh -RUN mkdir -p /data -ENTRYPOINT ["/usr/local/bin/dumb-init", "--", "/opt/launcher.sh"] - diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/README.md b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/README.md deleted file mode 100644 index b864a5fa5d9..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/README.md +++ /dev/null @@ -1,34 +0,0 @@ - - -# Experimental UNSECURE krb5 Kerberos container. - -Only for development. Not for production. - -The docker image contains a rest service which provides keystore and keytab files without any authentication! - -Master password: Welcome1 - -Principal: admin/admin@EXAMPLE.COM Password: Welcome1 - -Test: - -``` -docker run --net=host krb5 - -docker run --net=host -it --entrypoint=bash krb5 -kinit admin/admin -#pwd: Welcome1 -klist -``` diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/kadm5.acl b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/kadm5.acl deleted file mode 100644 index f0cd66016fa..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/kadm5.acl +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -*/admin@EXAMPLE.COM x diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/krb5.conf b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/krb5.conf deleted file mode 100644 index 0c274d36bb5..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/krb5.conf +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -[logging] -default = FILE:/var/log/krb5libs.log -kdc = FILE:/var/log/krb5kdc.log -admin_server = FILE:/var/log/kadmind.log - -[libdefaults] - dns_canonicalize_hostname = false - dns_lookup_realm = false - ticket_lifetime = 24h - renew_lifetime = 7d - forwardable = true - rdns = false - default_realm = EXAMPLE.COM - -[realms] - EXAMPLE.COM = { - kdc = localhost - admin_server = localhost - max_renewable_life = 7d - } - -[domain_realm] - .example.com = EXAMPLE.COM - example.com = EXAMPLE.COM - diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/launcher.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/launcher.sh deleted file mode 100644 index 0824f7b7ae6..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-image/docker-krb5/launcher.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e -/root/issuer & -krb5kdc -n & -sleep 4 -kadmind -nofork & -sleep 2 -tail -f /var/log/krb5kdc.log & -tail -f /var/log/kadmind.log - diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh deleted file mode 100755 index f32846386a9..00000000000 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -export COMPOSE_DIR - -# shellcheck source=/dev/null -source "$COMPOSE_DIR/../testlib.sh" - -export SECURITY_ENABLED=true - -start_docker_env - -execute_robot_test scm kinit.robot - -execute_robot_test scm basic - -execute_robot_test scm security - -execute_robot_test scm ozonefs/ozonefs.robot - -execute_robot_test s3g s3 - -execute_robot_test scm scmcli - -stop_docker_env - -generate_report diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh deleted file mode 100755 index afa5d56c6f1..00000000000 --- a/hadoop-ozone/dist/src/main/compose/test-all.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# -# Test executor to test all the compose/*/test.sh test scripts. -# - -SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd ) -ALL_RESULT_DIR="$SCRIPT_DIR/result" - -mkdir -p "$ALL_RESULT_DIR" -rm "$ALL_RESULT_DIR/*" - -RESULT=0 -IFS=$'\n' -# shellcheck disable=SC2044 -for test in $(find "$SCRIPT_DIR" -name test.sh | sort); do - echo "Executing test in $(dirname "$test")" - - #required to read the .env file from the right location - cd "$(dirname "$test")" || continue - ./test.sh - ret=$? - if [[ $ret -ne 0 ]]; then - RESULT=1 - echo "ERROR: Test execution of $(dirname "$test") is FAILED!!!!" - fi - RESULT_DIR="$(dirname "$test")/result" - cp "$RESULT_DIR"/robot-*.xml "$RESULT_DIR"/docker-*.log "$ALL_RESULT_DIR"/ -done - -rebot -N "smoketests" -d "$SCRIPT_DIR/result" "$SCRIPT_DIR/result/robot-*.xml" -exit $RESULT diff --git a/hadoop-ozone/dist/src/main/compose/test-single.sh b/hadoop-ozone/dist/src/main/compose/test-single.sh deleted file mode 100755 index 629a9bc07a6..00000000000 --- a/hadoop-ozone/dist/src/main/compose/test-single.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# -# Single test executor, can start a single robot test in any running container. -# - - -COMPOSE_DIR="$PWD" -export COMPOSE_DIR - -if [[ ! -f "$COMPOSE_DIR/docker-compose.yaml" ]]; then - echo "docker-compose.yaml is missing from the current dir. Please run this command from a docker-compose environment." - exit 1 -fi -if (( $# != 2 )); then -cat << EOF - Single test executor - - Usage: - - ../test-single.sh - - container: Name of the running docker-compose container (docker-compose.yaml is required in the current directory) - - robot_test: name of the robot test or directory relative to the smoketest dir. - - - -EOF - -fi - -# shellcheck source=testlib.sh -source "$COMPOSE_DIR/../testlib.sh" - -create_results_dir - -execute_robot_test "$1" "$2" - -generate_report diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh deleted file mode 100755 index b20dca894ae..00000000000 --- a/hadoop-ozone/dist/src/main/compose/testlib.sh +++ /dev/null @@ -1,157 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -set -e - -COMPOSE_ENV_NAME=$(basename "$COMPOSE_DIR") -COMPOSE_FILE=$COMPOSE_DIR/docker-compose.yaml -RESULT_DIR=${RESULT_DIR:-"$COMPOSE_DIR/result"} -RESULT_DIR_INSIDE="/tmp/smoketest/$(basename "$COMPOSE_ENV_NAME")/result" -SMOKETEST_DIR_INSIDE="${OZONE_DIR:-/opt/hadoop}/smoketest" - -## @description create results directory, purging any prior data -create_results_dir() { - #delete previous results - rm -rf "$RESULT_DIR" - mkdir -p "$RESULT_DIR" - #Should be writeable from the docker containers where user is different. - chmod ogu+w "$RESULT_DIR" -} - -## @description print the number of datanodes up -## @param the docker-compose file -count_datanodes() { - local compose_file=$1 - - local jmx_url='http://scm:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' - if [[ "${SECURITY_ENABLED}" == 'true' ]]; then - docker-compose -f "${compose_file}" exec -T scm bash -c "kinit -k HTTP/scm@EXAMPLE.COM -t /etc/security/keytabs/HTTP.keytab && curl --negotiate -u : -s '${jmx_url}'" - else - docker-compose -f "${compose_file}" exec -T scm curl -s "${jmx_url}" - fi \ - | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value' || true -} - -## @description wait until datanodes are up (or 30 seconds) -## @param the docker-compose file -## @param number of datanodes to wait for (default: 3) -wait_for_datanodes(){ - local compose_file=$1 - local -i datanode_count=${2:-3} - - #Reset the timer - SECONDS=0 - - #Don't give it up until 30 seconds - while [[ $SECONDS -lt 90 ]]; do - - #This line checks the number of HEALTHY datanodes registered in scm over the - # jmx HTTP servlet - datanodes=$(count_datanodes "${compose_file}") - if [[ "$datanodes" ]]; then - if [[ ${datanodes} -ge ${datanode_count} ]]; then - - #It's up and running. Let's return from the function. - echo "$datanodes datanodes are up and registered to the scm" - return - else - - #Print it only if a number. Could be not a number if scm is not yet started - echo "$datanodes datanode is up and healthy (until now)" - fi - fi - - sleep 2 - done - echo "WARNING! Datanodes are not started successfully. Please check the docker-compose files" - return 1 -} - -## @description Starts a docker-compose based test environment -## @param number of datanodes to start and wait for (default: 3) -start_docker_env(){ - local -i datanode_count=${1:-3} - - create_results_dir - - docker-compose -f "$COMPOSE_FILE" --no-ansi down - docker-compose -f "$COMPOSE_FILE" --no-ansi up -d --scale datanode="${datanode_count}" \ - && wait_for_datanodes "$COMPOSE_FILE" "${datanode_count}" \ - && sleep 10 - - if [[ $? -gt 0 ]]; then - OUTPUT_NAME="$COMPOSE_ENV_NAME" - stop_docker_env - return 1 - fi -} - -## @description Execute robot tests in a specific container. -## @param Name of the container in the docker-compose file -## @param robot test file or directory relative to the smoketest dir -execute_robot_test(){ - CONTAINER="$1" - shift 1 #Remove first argument which was the container name - # shellcheck disable=SC2206 - ARGUMENTS=($@) - TEST="${ARGUMENTS[${#ARGUMENTS[@]}-1]}" #Use last element as the test name - unset 'ARGUMENTS[${#ARGUMENTS[@]}-1]' #Remove the last element, remainings are the custom parameters - TEST_NAME=$(basename "$TEST") - TEST_NAME="$(basename "$COMPOSE_DIR")-${TEST_NAME%.*}" - set +e - OUTPUT_NAME="$COMPOSE_ENV_NAME-$TEST_NAME-$CONTAINER" - OUTPUT_PATH="$RESULT_DIR_INSIDE/robot-$OUTPUT_NAME.xml" - docker-compose -f "$COMPOSE_FILE" exec -T "$CONTAINER" mkdir -p "$RESULT_DIR_INSIDE" - # shellcheck disable=SC2068 - docker-compose -f "$COMPOSE_FILE" exec -T -e SECURITY_ENABLED="${SECURITY_ENABLED}" "$CONTAINER" python -m robot ${ARGUMENTS[@]} --log NONE -N "$TEST_NAME" --report NONE "${OZONE_ROBOT_OPTS[@]}" --output "$OUTPUT_PATH" "$SMOKETEST_DIR_INSIDE/$TEST" - - FULL_CONTAINER_NAME=$(docker-compose -f "$COMPOSE_FILE" ps | grep "_${CONTAINER}_" | head -n 1 | awk '{print $1}') - docker cp "$FULL_CONTAINER_NAME:$OUTPUT_PATH" "$RESULT_DIR/" - set -e - -} - - -## @description Execute specific command in docker container -## @param container name -## @param specific command to execute -execute_command_in_container(){ - set -e - # shellcheck disable=SC2068 - docker-compose -f "$COMPOSE_FILE" exec -T $@ - set +e -} - - -## @description Stops a docker-compose based test environment (with saving the logs) -stop_docker_env(){ - docker-compose -f "$COMPOSE_FILE" --no-ansi logs > "$RESULT_DIR/docker-$OUTPUT_NAME.log" - if [ "${KEEP_RUNNING:-false}" = false ]; then - docker-compose -f "$COMPOSE_FILE" --no-ansi down - fi -} - -## @description Generate robot framework reports based on the saved results. -generate_report(){ - - if command -v rebot > /dev/null 2>&1; then - #Generate the combined output and return with the right exit code (note: robot = execute test, rebot = generate output) - rebot -d "$RESULT_DIR" "$RESULT_DIR/robot-*.xml" - else - echo "Robot framework is not installed, the reports can be generated (sudo pip install robotframework)." - exit 1 - fi -} diff --git a/hadoop-ozone/dist/src/main/conf/dn-audit-log4j2.properties b/hadoop-ozone/dist/src/main/conf/dn-audit-log4j2.properties deleted file mode 100644 index 3c4d0457592..00000000000 --- a/hadoop-ozone/dist/src/main/conf/dn-audit-log4j2.properties +++ /dev/null @@ -1,90 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with this -# work for additional information regarding copyright ownership. The ASF -# licenses this file to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -#

-# http://www.apache.org/licenses/LICENSE-2.0 -#

-# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. -# -name=PropertiesConfig - -# Checks for config change periodically and reloads -monitorInterval=30 - -filter=read,write -# filter.read.onMatch=DENY avoids logging all READ events -# filter.read.onMatch=ACCEPT permits logging all READ events -# The above two settings ignore the log levels in configuration -# filter.read.onMatch=NEUTRAL permits logging of only those READ events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.read.type=MarkerFilter -filter.read.marker=READ -filter.read.onMatch=DENY -filter.read.onMismatch=NEUTRAL - -# filter.write.onMatch=DENY avoids logging all WRITE events -# filter.write.onMatch=ACCEPT permits logging all WRITE events -# The above two settings ignore the log levels in configuration -# filter.write.onMatch=NEUTRAL permits logging of only those WRITE events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.write.type=MarkerFilter -filter.write.marker=WRITE -filter.write.onMatch=NEUTRAL -filter.write.onMismatch=NEUTRAL - -# Log Levels are organized from most specific to least: -# OFF (most specific, no logging) -# FATAL (most specific, little data) -# ERROR -# WARN -# INFO -# DEBUG -# TRACE (least specific, a lot of data) -# ALL (least specific, all data) - -# Uncomment following section to enable logging to console appender also -#appenders=console, rolling -#appender.console.type=Console -#appender.console.name=STDOUT -#appender.console.layout.type=PatternLayout -#appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n - -# Comment this line when using both console and rolling appenders -appenders=rolling - -#Rolling File Appender with size & time thresholds. -#Rolling is triggered when either threshold is breached. -#The rolled over file is compressed by default -#Time interval is specified in seconds 86400s=1 day -appender.rolling.type=RollingFile -appender.rolling.name=RollingFile -appender.rolling.fileName =${sys:hadoop.log.dir}/dn-audit-${hostName}.log -appender.rolling.filePattern=${sys:hadoop.log.dir}/dn-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -appender.rolling.layout.type=PatternLayout -appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -appender.rolling.policies.type=Policies -appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -appender.rolling.policies.time.interval=86400 -appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -appender.rolling.policies.size.size=64MB - -loggers=audit -logger.audit.type=AsyncLogger -logger.audit.name=DNAudit -logger.audit.level=INFO -logger.audit.appenderRefs=rolling -logger.audit.appenderRef.file.ref=RollingFile - -rootLogger.level=INFO -#rootLogger.appenderRefs=stdout -#rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/dist/src/main/conf/log4j.properties b/hadoop-ozone/dist/src/main/conf/log4j.properties deleted file mode 100644 index ae42c611197..00000000000 --- a/hadoop-ozone/dist/src/main/conf/log4j.properties +++ /dev/null @@ -1,158 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Define some default values that can be overridden by system properties -hadoop.root.logger=INFO,console -hadoop.log.dir=. -hadoop.log.file=hadoop.log - -# Define the root logger to the system property "hadoop.root.logger". -log4j.rootLogger=${hadoop.root.logger}, EventCounter - -# Logging Threshold -log4j.threshold=ALL - -# Null Appender -log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender - -# -# Rolling File Appender - cap space usage at 5gb. -# -hadoop.log.maxfilesize=256MB -hadoop.log.maxbackupindex=20 -log4j.appender.RFA=org.apache.log4j.RollingFileAppender -log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file} - -log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize} -log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex} - -log4j.appender.RFA.layout=org.apache.log4j.PatternLayout - -# Pattern format: Date LogLevel LoggerName LogMessage -log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -# Debugging Pattern format -#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - - -# -# Daily Rolling File Appender -# - -log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file} - -# Rollover at midnight -log4j.appender.DRFA.DatePattern=.yyyy-MM-dd - -log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout - -# Pattern format: Date LogLevel LoggerName LogMessage -log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -# Debugging Pattern format -#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n - - -# -# console -# Add "console" to rootlogger above if you want to use this -# - -log4j.appender.console=org.apache.log4j.ConsoleAppender -log4j.appender.console.target=System.err -log4j.appender.console.layout=org.apache.log4j.PatternLayout -log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n - -# -# TaskLog Appender -# -log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender - -log4j.appender.TLA.layout=org.apache.log4j.PatternLayout -log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n - -# -# HDFS block state change log from block manager -# -# Uncomment the following to log normal block state change -# messages from BlockManager in NameNode. -#log4j.logger.BlockStateChange=DEBUG - -# -#Security appender -# -hadoop.security.logger=INFO,NullAppender -hadoop.security.log.maxfilesize=256MB -hadoop.security.log.maxbackupindex=20 -log4j.category.SecurityLogger=${hadoop.security.logger} -hadoop.security.log.file=SecurityAuth-${user.name}.audit -log4j.appender.RFAS=org.apache.log4j.RollingFileAppender -log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} -log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout -log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize} -log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex} - -# -# Daily Rolling Security appender -# -log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender -log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file} -log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout -log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n -log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd - - -# Custom Logging levels -# AWS SDK & S3A FileSystem -#log4j.logger.com.amazonaws=ERROR -log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR -#log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN - -# -# Event Counter Appender -# Sends counts of logging messages at different severity levels to Hadoop Metrics. -# -log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter - - -log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE - -# Do not log into datanode logs. Remove this line to have single log. -log4j.additivity.org.apache.hadoop.ozone=false - -# For development purposes, log both to console and log file. -log4j.appender.OZONE=org.apache.log4j.ConsoleAppender -log4j.appender.OZONE.Threshold=info -log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout -log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \ - %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n - -# Real ozone logger that writes to ozone.log -log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender -log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log -log4j.appender.FILE.Threshold=debug -log4j.appender.FILE.layout=org.apache.log4j.PatternLayout -log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \ -(%F:%L) %X{function} %X{resource} %X{user} %X{request} - \ -%m%n - -# Log levels of third-party libraries -log4j.logger.org.apache.commons.beanutils=WARN - -log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR -log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN diff --git a/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties b/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties deleted file mode 100644 index 57577e162d5..00000000000 --- a/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties +++ /dev/null @@ -1,90 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with this -# work for additional information regarding copyright ownership. The ASF -# licenses this file to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -#

-# http://www.apache.org/licenses/LICENSE-2.0 -#

-# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. -# -name=PropertiesConfig - -# Checks for config change periodically and reloads -monitorInterval=30 - -filter=read,write -# filter.read.onMatch=DENY avoids logging all READ events -# filter.read.onMatch=ACCEPT permits logging all READ events -# The above two settings ignore the log levels in configuration -# filter.read.onMatch=NEUTRAL permits logging of only those READ events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.read.type=MarkerFilter -filter.read.marker=READ -filter.read.onMatch=DENY -filter.read.onMismatch=NEUTRAL - -# filter.write.onMatch=DENY avoids logging all WRITE events -# filter.write.onMatch=ACCEPT permits logging all WRITE events -# The above two settings ignore the log levels in configuration -# filter.write.onMatch=NEUTRAL permits logging of only those WRITE events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.write.type=MarkerFilter -filter.write.marker=WRITE -filter.write.onMatch=NEUTRAL -filter.write.onMismatch=NEUTRAL - -# Log Levels are organized from most specific to least: -# OFF (most specific, no logging) -# FATAL (most specific, little data) -# ERROR -# WARN -# INFO -# DEBUG -# TRACE (least specific, a lot of data) -# ALL (least specific, all data) - -# Uncomment following section to enable logging to console appender also -#appenders=console, rolling -#appender.console.type=Console -#appender.console.name=STDOUT -#appender.console.layout.type=PatternLayout -#appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n - -# Comment this line when using both console and rolling appenders -appenders=rolling - -#Rolling File Appender with size & time thresholds. -#Rolling is triggered when either threshold is breached. -#The rolled over file is compressed by default -#Time interval is specified in seconds 86400s=1 day -appender.rolling.type=RollingFile -appender.rolling.name=RollingFile -appender.rolling.fileName =${sys:hadoop.log.dir}/om-audit-${hostName}.log -appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -appender.rolling.layout.type=PatternLayout -appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -appender.rolling.policies.type=Policies -appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -appender.rolling.policies.time.interval=86400 -appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -appender.rolling.policies.size.size=64MB - -loggers=audit -logger.audit.type=AsyncLogger -logger.audit.name=OMAudit -logger.audit.level=INFO -logger.audit.appenderRefs=rolling -logger.audit.appenderRef.file.ref=RollingFile - -rootLogger.level=INFO -#rootLogger.appenderRefs=stdout -#rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties b/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties deleted file mode 100644 index e8f5f2db9d0..00000000000 --- a/hadoop-ozone/dist/src/main/conf/ozone-shell-log4j.properties +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Define some default values that can be overridden by system properties -hadoop.log.dir=. -hadoop.log.file=ozone-shell.log - -log4j.rootLogger=INFO,FILE - -log4j.threshold=ALL - -log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender -log4j.appender.FILE.file=${hadoop.log.dir}/${hadoop.log.file} -log4j.appender.FILE.layout=org.apache.log4j.PatternLayout -log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{1}:%L - %m%n - -log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR -log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN diff --git a/hadoop-ozone/dist/src/main/conf/ozone-site.xml b/hadoop-ozone/dist/src/main/conf/ozone-site.xml deleted file mode 100644 index 77dd7ef9940..00000000000 --- a/hadoop-ozone/dist/src/main/conf/ozone-site.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - diff --git a/hadoop-ozone/dist/src/main/conf/scm-audit-log4j2.properties b/hadoop-ozone/dist/src/main/conf/scm-audit-log4j2.properties deleted file mode 100644 index 3f81561cc49..00000000000 --- a/hadoop-ozone/dist/src/main/conf/scm-audit-log4j2.properties +++ /dev/null @@ -1,90 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with this -# work for additional information regarding copyright ownership. The ASF -# licenses this file to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -#

-# http://www.apache.org/licenses/LICENSE-2.0 -#

-# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. -# -name=PropertiesConfig - -# Checks for config change periodically and reloads -monitorInterval=30 - -filter=read,write -# filter.read.onMatch=DENY avoids logging all READ events -# filter.read.onMatch=ACCEPT permits logging all READ events -# The above two settings ignore the log levels in configuration -# filter.read.onMatch=NEUTRAL permits logging of only those READ events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.read.type=MarkerFilter -filter.read.marker=READ -filter.read.onMatch=DENY -filter.read.onMismatch=NEUTRAL - -# filter.write.onMatch=DENY avoids logging all WRITE events -# filter.write.onMatch=ACCEPT permits logging all WRITE events -# The above two settings ignore the log levels in configuration -# filter.write.onMatch=NEUTRAL permits logging of only those WRITE events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.write.type=MarkerFilter -filter.write.marker=WRITE -filter.write.onMatch=NEUTRAL -filter.write.onMismatch=NEUTRAL - -# Log Levels are organized from most specific to least: -# OFF (most specific, no logging) -# FATAL (most specific, little data) -# ERROR -# WARN -# INFO -# DEBUG -# TRACE (least specific, a lot of data) -# ALL (least specific, all data) - -# Uncomment following section to enable logging to console appender also -#appenders=console, rolling -#appender.console.type=Console -#appender.console.name=STDOUT -#appender.console.layout.type=PatternLayout -#appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n - -# Comment this line when using both console and rolling appenders -appenders=rolling - -#Rolling File Appender with size & time thresholds. -#Rolling is triggered when either threshold is breached. -#The rolled over file is compressed by default -#Time interval is specified in seconds 86400s=1 day -appender.rolling.type=RollingFile -appender.rolling.name=RollingFile -appender.rolling.fileName =${sys:hadoop.log.dir}/scm-audit-${hostName}.log -appender.rolling.filePattern=${sys:hadoop.log.dir}/scm-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -appender.rolling.layout.type=PatternLayout -appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -appender.rolling.policies.type=Policies -appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -appender.rolling.policies.time.interval=86400 -appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -appender.rolling.policies.size.size=64MB - -loggers=audit -logger.audit.type=AsyncLogger -logger.audit.name=SCMAudit -logger.audit.level=INFO -logger.audit.appenderRefs=rolling -logger.audit.appenderRef.file.ref=RollingFile - -rootLogger.level=INFO -#rootLogger.appenderRefs=stdout -#rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/dist/src/main/docker/Dockerfile b/hadoop-ozone/dist/src/main/docker/Dockerfile deleted file mode 100644 index 3b0e8fe040f..00000000000 --- a/hadoop-ozone/dist/src/main/docker/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM apache/ozone-runner:@docker.ozone-runner.version@ - -ADD --chown=hadoop . /opt/hadoop - -WORKDIR /opt/hadoop diff --git a/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh b/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh deleted file mode 100755 index cb5f016c310..00000000000 --- a/hadoop-ozone/dist/src/main/dockerbin/entrypoint.sh +++ /dev/null @@ -1,149 +0,0 @@ -#!/usr/bin/env bash -## -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -## -set -e - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -if [ -n "$SLEEP_SECONDS" ]; then - echo "Sleeping for $SLEEP_SECONDS seconds" - sleep "$SLEEP_SECONDS" -fi - -# -# You can wait for an other TCP port with these settings. -# -# Example: -# -# export WAITFOR=localhost:9878 -# -# With an optional parameter, you can also set the maximum -# time of waiting with (in seconds) with WAITFOR_TIMEOUT. -# (The default is 300 seconds / 5 minutes.) -if [ -n "$WAITFOR" ]; then - echo "Waiting for the service $WAITFOR" - WAITFOR_HOST=$(printf "%s\n" "$WAITFOR"| cut -d : -f 1) - WAITFOR_PORT=$(printf "%s\n" "$WAITFOR"| cut -d : -f 2) - for i in $(seq "${WAITFOR_TIMEOUT:-300}" -1 0) ; do - set +e - nc -z "$WAITFOR_HOST" "$WAITFOR_PORT" > /dev/null 2>&1 - result=$? - set -e - if [ $result -eq 0 ] ; then - break - fi - sleep 1 - done - if [ "$i" -eq 0 ]; then - echo "Waiting for service $WAITFOR is timed out." >&2 - exit 1 - f - fi -fi - -if [ -n "$KERBEROS_ENABLED" ]; then - echo "Setting up kerberos!!" - KERBEROS_SERVER=${KERBEROS_SERVER:-krb5} - ISSUER_SERVER=${ISSUER_SERVER:-$KERBEROS_SERVER\:8081} - echo "KDC ISSUER_SERVER => $ISSUER_SERVER" - - if [ -n "$SLEEP_SECONDS" ]; then - echo "Sleeping for ${SLEEP_SECONDS} seconds" - sleep "$SLEEP_SECONDS" - fi - - if [ -z "$KEYTAB_DIR" ]; then - KEYTAB_DIR='/etc/security/keytabs' - fi - while true - do - set +e - STATUS=$(curl -s -o /dev/null -w '%{http_code}' http://"$ISSUER_SERVER"/keytab/test/test) - set -e - if [ "$STATUS" -eq 200 ]; then - echo "Got 200, KDC service ready!!" - break - else - echo "Got $STATUS :( KDC service not ready yet..." - fi - sleep 5 - done - - HOST_NAME=$(hostname -f) - export HOST_NAME - for NAME in ${KERBEROS_KEYTABS}; do - echo "Download $NAME/$HOSTNAME@EXAMPLE.COM keytab file to $KEYTAB_DIR/$NAME.keytab" - wget "http://$ISSUER_SERVER/keytab/$HOST_NAME/$NAME" -O "$KEYTAB_DIR/$NAME.keytab" - klist -kt "$KEYTAB_DIR/$NAME.keytab" - KERBEROS_ENABLED=true - done - - #Optional: let's try to adjust the krb5.conf - sudo sed -i "s/krb5/$KERBEROS_SERVER/g" "/etc/krb5.conf" || true -fi - -CONF_DESTINATION_DIR="${HADOOP_CONF_DIR:-/opt/hadoop/etc/hadoop}" - -#Try to copy the defaults -set +e -if [[ -d "/opt/ozone/etc/hadoop" ]]; then - cp /opt/hadoop/etc/hadoop/* "$CONF_DESTINATION_DIR/" > /dev/null 2>&1 -elif [[ -d "/opt/hadoop/etc/hadoop" ]]; then - cp /opt/hadoop/etc/hadoop/* "$CONF_DESTINATION_DIR/" > /dev/null 2>&1 -fi -set -e - -"$DIR"/envtoconf.py --destination "$CONF_DESTINATION_DIR" - -if [ -n "$ENSURE_SCM_INITIALIZED" ]; then - if [ ! -f "$ENSURE_SCM_INITIALIZED" ]; then - # Improve om and scm start up options - /opt/hadoop/bin/ozone scm --init || /opt/hadoop/bin/ozone scm -init - fi -fi - -if [ -n "$ENSURE_OM_INITIALIZED" ]; then - if [ ! -f "$ENSURE_OM_INITIALIZED" ]; then - # Improve om and scm start up options - /opt/hadoop/bin/ozone om --init || /opt/hadoop/bin/ozone om -createObjectStore - fi -fi - -# Supports byteman script to instrument hadoop process with byteman script -# -# -if [ -n "$BYTEMAN_SCRIPT" ] || [ -n "$BYTEMAN_SCRIPT_URL" ]; then - - export PATH=$PATH:$BYTEMAN_DIR/bin - - if [ -n "$BYTEMAN_SCRIPT_URL" ]; then - wget "$BYTEMAN_SCRIPT_URL" -O /tmp/byteman.btm - export BYTEMAN_SCRIPT=/tmp/byteman.btm - fi - - if [ ! -f "$BYTEMAN_SCRIPT" ]; then - echo "ERROR: The defined $BYTEMAN_SCRIPT does not exist!!!" - exit 255 - fi - - AGENT_STRING="-javaagent:/opt/byteman.jar=script:$BYTEMAN_SCRIPT" - export HADOOP_OPTS="$AGENT_STRING $HADOOP_OPTS" - echo "Process is instrumented with adding $AGENT_STRING to HADOOP_OPTS" -fi - -exec "$@" diff --git a/hadoop-ozone/dist/src/main/dockerbin/envtoconf.py b/hadoop-ozone/dist/src/main/dockerbin/envtoconf.py deleted file mode 100755 index 0e2c3686276..00000000000 --- a/hadoop-ozone/dist/src/main/dockerbin/envtoconf.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/python -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -"""convert environment variables to config""" - -import os -import re - -import argparse - -import sys -import transformation - -class Simple(object): - """Simple conversion""" - def __init__(self, args): - parser = argparse.ArgumentParser() - parser.add_argument("--destination", help="Destination directory", required=True) - self.args = parser.parse_args(args=args) - # copy the default files to file.raw in destination directory - - self.known_formats = ['xml', 'properties', 'yaml', 'yml', 'env', "sh", "cfg", 'conf'] - self.output_dir = self.args.destination - self.excluded_envs = ['HADOOP_CONF_DIR'] - self.configurables = {} - - def destination_file_path(self, name, extension): - """destination file path""" - return os.path.join(self.output_dir, "{}.{}".format(name, extension)) - - def write_env_var(self, name, extension, key, value): - """Write environment variables""" - with open(self.destination_file_path(name, extension) + ".raw", "a") as myfile: - myfile.write("{}: {}\n".format(key, value)) - - def process_envs(self): - """Process environment variables""" - for key in os.environ.keys(): - if key in self.excluded_envs: - continue - pattern = re.compile("[_\\.]") - parts = pattern.split(key) - extension = None - name = parts[0].lower() - if len(parts) > 1: - extension = parts[1].lower() - config_key = key[len(name) + len(extension) + 2:].strip() - if extension and "!" in extension: - splitted = extension.split("!") - extension = splitted[0] - fmt = splitted[1] - config_key = key[len(name) + len(extension) + len(fmt) + 3:].strip() - else: - fmt = extension - - if extension and extension in self.known_formats: - if name not in self.configurables.keys(): - with open(self.destination_file_path(name, extension) + ".raw", "w") as myfile: - myfile.write("") - self.configurables[name] = (extension, fmt) - self.write_env_var(name, extension, config_key, os.environ[key]) - else: - for configurable_name in self.configurables: - if key.lower().startswith(configurable_name.lower()): - self.write_env_var(configurable_name, - self.configurables[configurable_name], - key[len(configurable_name) + 1:], - os.environ[key]) - - def transform(self): - """transform""" - for configurable_name in self.configurables: - name = configurable_name - extension, fmt = self.configurables[name] - - destination_path = self.destination_file_path(name, extension) - - with open(destination_path + ".raw", "r") as myfile: - content = myfile.read() - transformer_func = getattr(transformation, "to_" + fmt) - content = transformer_func(content) - with open(destination_path, "w") as myfile: - myfile.write(content) - - def main(self): - """main""" - - # add the - self.process_envs() - - # copy file.ext.raw to file.ext in the destination directory, and - # transform to the right format (eg. key: value ===> XML) - self.transform() - - -def main(): - """main""" - Simple(sys.argv[1:]).main() - - -if __name__ == '__main__': - Simple(sys.argv[1:]).main() diff --git a/hadoop-ozone/dist/src/main/dockerbin/transformation.py b/hadoop-ozone/dist/src/main/dockerbin/transformation.py deleted file mode 100755 index 5e708ce2b65..00000000000 --- a/hadoop-ozone/dist/src/main/dockerbin/transformation.py +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/python -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -"""This module transform properties into different format""" -def render_yaml(yaml_root, prefix=""): - """render yaml""" - result = "" - if isinstance(yaml_root, dict): - if prefix: - result += "\n" - for key in yaml_root: - result += "{}{}: {}".format(prefix, key, render_yaml( - yaml_root[key], prefix + " ")) - elif isinstance(yaml_root, list): - result += "\n" - for item in yaml_root: - result += prefix + " - " + render_yaml(item, prefix + " ") - else: - result += "{}\n".format(yaml_root) - return result - - -def to_yaml(content): - """transform to yaml""" - props = process_properties(content) - - keys = props.keys() - yaml_props = {} - for key in keys: - parts = key.split(".") - node = yaml_props - prev_part = None - parent_node = {} - for part in parts[:-1]: - if part.isdigit(): - if isinstance(node, dict): - parent_node[prev_part] = [] - node = parent_node[prev_part] - while len(node) <= int(part): - node.append({}) - parent_node = node - node = node[int(node)] - else: - if part not in node: - node[part] = {} - parent_node = node - node = node[part] - prev_part = part - if parts[-1].isdigit(): - if isinstance(node, dict): - parent_node[prev_part] = [] - node = parent_node[prev_part] - node.append(props[key]) - else: - node[parts[-1]] = props[key] - - return render_yaml(yaml_props) - - -def to_yml(content): - """transform to yml""" - return to_yaml(content) - - -def to_properties(content): - """transform to properties""" - result = "" - props = process_properties(content) - for key, val in props.items(): - result += "{}: {}\n".format(key, val) - return result - - -def to_env(content): - """transform to environment variables""" - result = "" - props = process_properties(content) - for key, val in props: - result += "{}={}\n".format(key, val) - return result - - -def to_sh(content): - """transform to shell""" - result = "" - props = process_properties(content) - for key, val in props: - result += "export {}=\"{}\"\n".format(key, val) - return result - - -def to_cfg(content): - """transform to config""" - result = "" - props = process_properties(content) - for key, val in props: - result += "{}={}\n".format(key, val) - return result - - -def to_conf(content): - """transform to configuration""" - result = "" - props = process_properties(content) - for key, val in props: - result += "export {}={}\n".format(key, val) - return result - - -def to_xml(content): - """transform to xml""" - result = "\n" - props = process_properties(content) - for key in props: - result += "{0}{1}\n". \ - format(key, props[key]) - result += "" - return result - - -def process_properties(content, sep=': ', comment_char='#'): - """ - Read the file passed as parameter as a properties file. - """ - props = {} - for line in content.split("\n"): - sline = line.strip() - if sline and not sline.startswith(comment_char): - key_value = sline.split(sep) - key = key_value[0].strip() - value = sep.join(key_value[1:]).strip().strip('"') - props[key] = value - - return props diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/flekszible.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/flekszible.yaml deleted file mode 100644 index 8fdc1556688..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/flekszible.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -description: Jaeger tracing server diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/jaeger.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/jaeger.yaml deleted file mode 100644 index 4796092657c..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/jaeger/jaeger.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: Service -metadata: - name: jaeger -spec: - clusterIP: None - selector: - app: jaeger - component: jaeger - ports: - - name: ui - port: 16686 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: jaeger -spec: - selector: - matchLabels: - app: jaeger - component: jaeger - replicas: 1 - serviceName: jaeger - template: - metadata: - labels: - app: jaeger - component: jaeger - spec: - containers: - - name: jaeger - image: jaegertracing/all-in-one:latest - ports: - - containerPort: 16686 - name: web - env: - - name: COLLECTOR_ZIPKIN_HTTP_PORT - value: "9411" diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml deleted file mode 100644 index e7c22226010..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-controller.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -kind: Deployment -apiVersion: apps/v1 -metadata: - name: csi-provisioner -spec: - replicas: 1 - selector: - matchLabels: - app: csi-provisioner - template: - metadata: - labels: - app: csi-provisioner - spec: - serviceAccount: csi-ozone - containers: - - name: csi-provisioner - image: quay.io/k8scsi/csi-provisioner:v1.0.1 - args: - - "--csi-address=/var/lib/csi/csi.sock" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/ - - name: ozone-csi - image: "@docker.image@" - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/ - imagePullPolicy: Always - envFrom: - - configMapRef: - name: config - args: - - ozone - - csi - volumes: - - name: socket-dir - emptyDir: diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-crd.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-crd.yaml deleted file mode 100644 index f0ca37c30aa..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-crd.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: storage.k8s.io/v1beta1 -kind: CSIDriver -metadata: - name: org.apache.hadoop.ozone -spec: - attachRequired: false diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-node.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-node.yaml deleted file mode 100644 index 6c3a1ac36b7..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-node.yaml +++ /dev/null @@ -1,95 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -kind: DaemonSet -apiVersion: apps/v1beta2 -metadata: - name: csi-node -spec: - selector: - matchLabels: - app: csi-node - template: - metadata: - labels: - app: csi-node - spec: - serviceAccount: csi-ozone - containers: - - name: driver-registrar - image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2 - args: - - "--v=4" - - "--csi-address=/var/lib/csi/csi.sock" - - "--kubelet-registration-path=/var/lib/kubelet/plugins/org.apache.hadoop.ozone/csi.sock" - env: - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - name: plugin-dir - mountPath: /var/lib/csi - - name: registration-dir - mountPath: /registration/ - - name: csi-node - image: "@docker.image@" - securityContext: - runAsUser: 0 - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - args: - - ozone - - csi - envFrom: - - configMapRef: - name: config - imagePullPolicy: "Always" - volumeMounts: - - name: plugin-dir - mountPath: /var/lib/csi - - name: pods-mount-dir - mountPath: /var/lib/kubelet/pods - mountPropagation: "Bidirectional" - - name: fuse-device - mountPath: /dev/fuse - - name: dbus - mountPath: /var/run/dbus - - name: systemd - mountPath: /run/systemd - volumes: - - name: plugin-dir - hostPath: - path: /var/lib/kubelet/plugins/org.apache.hadoop.ozone - type: DirectoryOrCreate - - name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins_registry/ - type: DirectoryOrCreate - - name: pods-mount-dir - hostPath: - path: /var/lib/kubelet/pods - type: Directory - - name: fuse-device - hostPath: - path: /dev/fuse - - name: dbus - hostPath: - path: /var/run/dbus - - name: systemd - hostPath: - path: /run/systemd diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-rbac.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-rbac.yaml deleted file mode 100644 index d83ffb3e1f1..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-rbac.yaml +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: ServiceAccount -metadata: - namespace: default - name: csi-ozone ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-ozone -rules: - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "update","watch"] - - apiGroups: [""] - resources: ["namespaces"] - verbs: ["get", "list"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "update", "create"] - - apiGroups: ["storage.k8s.io"] - resources: ["volumeattachments"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-ozone -subjects: - - kind: ServiceAccount - name: csi-ozone - namespace: default -roleRef: - kind: ClusterRole - name: csi-ozone - apiGroup: rbac.authorization.k8s.io diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-storageclass.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-storageclass.yaml deleted file mode 100644 index 97801605509..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/csi-storageclass.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: ozone -provisioner: org.apache.hadoop.ozone diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/definitions/csi.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/definitions/csi.yaml deleted file mode 100644 index 14c2ea30aff..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone-csi/definitions/csi.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -name: ozone/csi -description: Configuration for CSI interface ---- -- type: Add - trigger: - metadata: - name: config - path: - - data - value: - OZONE-SITE.XML_ozone.csi.s3g.address: http://s3g-0.s3g:9878 - OZONE-SITE.XML_ozone.csi.socket: /var/lib/csi/csi.sock - OZONE-SITE.XML_ozone.csi.owner: hadoop diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml deleted file mode 100644 index 5f5e70b5cdf..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/config.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: ConfigMap -metadata: - name: config -data: - - OZONE-SITE.XML_hdds.datanode.dir: "/data/storage" - OZONE-SITE.XML_ozone.scm.datanode.id.dir: "/data" - OZONE-SITE.XML_ozone.metadata.dirs: "/data/metadata" - OZONE-SITE.XML_ozone.scm.block.client.address: "scm-0.scm" - OZONE-SITE.XML_ozone.om.address: "om-0.om" - OZONE-SITE.XML_ozone.scm.client.address: "scm-0.scm" - OZONE-SITE.XML_ozone.scm.names: "scm-0.scm" - OZONE-SITE.XML_ozone.enabled: "true" - LOG4J.PROPERTIES_log4j.rootLogger: "INFO, stdout" - LOG4J.PROPERTIES_log4j.appender.stdout: "org.apache.log4j.ConsoleAppender" - LOG4J.PROPERTIES_log4j.appender.stdout.layout: "org.apache.log4j.PatternLayout" - LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern: "%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n" diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss-service.yaml deleted file mode 100644 index 7c221d9c44b..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss-service.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: Service -metadata: - name: datanode -spec: - ports: - - port: 9870 - name: rpc - clusterIP: None - selector: - app: ozone - component: datanode diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml deleted file mode 100644 index 88a4308adbf..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/datanode-ss.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in cdatanodepliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: datanode - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: datanode - serviceName: datanode - replicas: 3 - template: - metadata: - labels: - app: ozone - component: datanode - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9882" - prometheus.io/path: "/prom" - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: component - operator: In - values: - - datanode - topologyKey: "kubernetes.io/hostname" - securityContext: - fsGroup: 1000 - containers: - - name: datanode - image: "@docker.image@" - args: ["ozone","datanode"] diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/emptydir.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/emptydir.yaml deleted file mode 100644 index 7a717bfb363..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/emptydir.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -name: ozone/emptydir -description: Add empty dir based ephemeral persistence ---- -- type: Add - trigger: - metadata: - labels: - app.kubernetes.io/component: ozone - path: - - spec - - template - - spec - - (initContainers|containers) - - "*" - - volumeMounts - value: - - name: data - mountPath: /data -- type: Add - trigger: - metadata: - labels: - app.kubernetes.io/component: ozone - path: - - spec - - template - - spec - - volumes - value: - - name: data - emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/persistence.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/persistence.yaml deleted file mode 100644 index 33a818d5cae..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/persistence.yaml +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -name: ozone/persistence -description: Add real PVC based persistence ---- -- type: Add - path: - - spec - trigger: - kind: StatefulSet - value: - volumeClaimTemplates: - - metadata: - name: data - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 2Gi -- type: Add - trigger: - metadata: - name: datanode - path: - - spec - - template - - spec - value: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: component - operator: In - values: - - datanode - topologyKey: kubernetes.io/hostname -- type: Add - trigger: - metadata: - labels: - app.kubernetes.io/component: ozone - path: - - spec - - template - - spec - - (initContainers|containers) - - "*" - - volumeMounts - value: - - name: data - mountPath: /data \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/profiler.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/profiler.yaml deleted file mode 100644 index d76931af45a..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/profiler.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -name: ozone/profiler -description: Enable profiler endpoint. ---- -- type: Add - trigger: - metadata: - name: config - path: - - data - value: - OZONE-SITE.XML_hdds.profiler.endpoint.enabled: "true" - ASYNC_PROFILER_HOME: /opt/profiler \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/prometheus.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/prometheus.yaml deleted file mode 100644 index 604df1fe5ac..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/prometheus.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -name: ozone/prometheus -description: Enable prometheus monitoring in Ozone ---- -- type: Add - trigger: - metadata: - name: config - path: - - data - value: - OZONE-SITE.XML_hdds.prometheus.endpoint.enabled: "true" diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/tracing.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/tracing.yaml deleted file mode 100644 index 007b8d12517..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/tracing.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -name: ozone/tracing -description: Enable jaeger tracing ---- -- type: Add - path: - - spec - - template - - spec - - containers - - .* - - env - value: - - name: JAEGER_SAMPLER_TYPE - value: probabilistic - - name: JAEGER_SAMPLER_PARAM - value: "0.01" - - name: JAEGER_AGENT_HOST - value: jaeger-0.jaeger diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/flekszible.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/flekszible.yaml deleted file mode 100644 index 2707d302d15..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/flekszible.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -description: Apache Hadoop Ozone diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/flekszible.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/flekszible.yaml deleted file mode 100644 index c6e29f3d182..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/flekszible.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -description: Load test tool for Apache Hadoop Ozone diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/freon.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/freon.yaml deleted file mode 100644 index 40ebc98a425..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/freon/freon.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: freon - labels: - app.kubernetes.io/component: ozone -spec: - replicas: 1 - selector: - matchLabels: - app: ozone - component: freon - template: - metadata: - labels: - app: ozone - component: freon - spec: - containers: - - name: freon - image: "@docker.image@" - args: ["ozone","freon", "rk", "--factor=THREE", "--replicationType=RATIS"] - envFrom: - - configMapRef: - name: config diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss-service.yaml deleted file mode 100644 index a6462fe687a..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss-service.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: Service -metadata: - name: om -spec: - ports: - - port: 9874 - name: ui - clusterIP: None - selector: - app: ozone - component: om diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml deleted file mode 100644 index befc21ecf16..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/om-ss.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: om - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: om - serviceName: om - replicas: 1 - template: - metadata: - labels: - app: ozone - component: om - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9874" - prometheus.io/path: "/prom" - spec: - securityContext: - fsGroup: 1000 - containers: - - name: om - image: "@docker.image@" - args: ["ozone","om"] - env: - - name: WAITFOR - value: scm-0.scm:9876 - - name: ENSURE_OM_INITIALIZED - value: /data/metadata/om/current/VERSION - livenessProbe: - tcpSocket: - port: 9862 - initialDelaySeconds: 30 - volumes: [] diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss-service.yaml deleted file mode 100644 index c99bbd217ae..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss-service.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: Service -metadata: - name: s3g -spec: - ports: - - port: 9878 - name: rest - clusterIP: None - selector: - app: ozone - component: s3g diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml deleted file mode 100644 index fc8ff9a8e9f..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/s3g-ss.yaml +++ /dev/null @@ -1,43 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: s3g - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: s3g - serviceName: s3g - replicas: 1 - template: - metadata: - labels: - app: ozone - component: s3g - spec: - containers: - - name: s3g - image: "@docker.image@" - args: ["ozone","s3g"] - livenessProbe: - httpGet: - path: / - port: 9878 - initialDelaySeconds: 30 diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss-service.yaml deleted file mode 100644 index f8a05abcba9..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss-service.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: Service -metadata: - name: scm -spec: - ports: - - port: 9876 - name: ui - clusterIP: None - selector: - app: ozone - component: scm diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml deleted file mode 100644 index d386afc7643..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/scm-ss.yaml +++ /dev/null @@ -1,52 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: scm - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: scm - serviceName: scm - replicas: 1 - template: - metadata: - labels: - app: ozone - component: scm - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9876" - prometheus.io/path: "/prom" - spec: - securityContext: - fsGroup: 1000 - initContainers: - - name: init - image: "@docker.image@" - args: ["ozone","scm", "--init"] - containers: - - name: scm - image: "@docker.image@" - args: ["ozone","scm"] - livenessProbe: - tcpSocket: - port: 9861 - initialDelaySeconds: 30 diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/transformations/config.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/transformations/config.yaml deleted file mode 100644 index c8ae632cfae..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/transformations/config.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -- type: Add - path: - - spec - - template - - spec - - ".*" - - ".*" - - envFrom - value: - - configMapRef: - name: config diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/configmap.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/configmap.yaml deleted file mode 100644 index f8809875cb3..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/configmap.yaml +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: ConfigMap -metadata: - name: prometheusconf -data: - prometheus.yaml: |- - global: - scrape_interval: 15s - scrape_configs: - - job_name: jmxexporter - kubernetes_sd_configs: - - role: pod - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - relabel_configs: - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: kubernetes_pod_name diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/definitions/enable.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/definitions/enable.yaml deleted file mode 100644 index 6825e91da3a..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/definitions/enable.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -name: ozone/prometheus -description: Enable prometheus monitoring in Ozone ---- -- type: Add - trigger: - metadata: - name: config - path: - - data - value: - OZONE-SITE.XML_hdds.prometheus.endpoint.enabled: "true" diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/deployment.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/deployment.yaml deleted file mode 100644 index 636840398ee..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/deployment.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - name: prometheus - labels: - app: prometheus -spec: - replicas: 1 - selector: - matchLabels: - app: prometheus - template: - metadata: - labels: - app: prometheus - spec: - serviceAccountName: prometheus-operator - containers: - - name: prometheus - image: prom/prometheus - args: ["--config.file=/conf/prometheus.yaml"] - ports: - - containerPort: 9090 - volumeMounts: - - name: config - mountPath: "/conf" - readOnly: true - volumes: - - name: config - configMap: - name: prometheusconf diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/flekszible.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/flekszible.yaml deleted file mode 100644 index 20809e57edb..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/flekszible.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -description: Prometheus monitoring diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/role.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/role.yaml deleted file mode 100644 index 194e9f4a2be..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/role.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: prometheus -rules: -- apiGroups: [""] - resources: - - nodes - - services - - endpoints - - pods - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: - - configmaps - verbs: ["get"] -- nonResourceURLs: ["/metrics"] - verbs: ["get"] diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/rolebinding.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/rolebinding.yaml deleted file mode 100644 index ef5105da4e1..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/rolebinding.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: prometheus-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: prometheus -subjects: -- kind: ServiceAccount - name: prometheus-operator - namespace: default diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/service-account.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/service-account.yaml deleted file mode 100644 index d5ba196c747..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/service-account.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -apiVersion: v1 -kind: ServiceAccount -metadata: - name: prometheus-operator diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/service.yaml deleted file mode 100644 index e07aafc1869..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/prometheus/service.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -kind: Service -apiVersion: v1 -metadata: - name: prometheus -spec: - selector: - app: prometheus - ports: - - protocol: TCP - port: 9090 diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml deleted file mode 100644 index 54203bdb664..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/flekszible.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -description: Simple python based webserver with persistent volume claim. diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-deployment.yaml deleted file mode 100644 index d8e75782371..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-deployment.yaml +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ozone-csi-test-webserver - labels: - app: ozone-csi-test-webserver - annotations: {} -spec: - replicas: 1 - selector: - matchLabels: - app: ozone-csi-test-webserver - template: - metadata: - labels: - app: ozone-csi-test-webserver - spec: - containers: - - name: web - image: python:3.7.3-alpine3.8 - args: - - python - - -m - - http.server - - --directory - - /www - volumeMounts: - - mountPath: /www - name: webroot - volumes: - - name: webroot - persistentVolumeClaim: - claimName: ozone-csi-test-webserver - readOnly: false diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-service.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-service.yaml deleted file mode 100644 index 6a53a4397f0..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-service.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: ozone-csi-test-webserver - labels: {} - annotations: {} -spec: - type: NodePort - ports: - - port: 8000 - name: web - selector: - app: ozone-csi-test-webserver diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-volume.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-volume.yaml deleted file mode 100644 index 4b1e44b206a..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/definitions/pv-test/webserver-volume.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: ozone-csi-test-webserver - labels: {} - annotations: {} -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: ozone diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/Flekszible deleted file mode 100644 index e00d9ce8182..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/Flekszible +++ /dev/null @@ -1,45 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -source: - - path: ../../definitions -import: - - path: ozone - transformations: - - type: Image - image: "@docker.image@" - - type: PublishStatefulSet - - type: ozone/emptydir - - path: ozone/freon - destination: freon - transformations: - - type: Image - image: "@docker.image@" -header: |- - # Licensed to the Apache Software Foundation (ASF) under one - # or more contributor license agreements. See the NOTICE file - # distributed with this work for additional information - # regarding copyright ownership. The ASF licenses this file - # to you under the Apache License, Version 2.0 (the - # "License"); you may not use this file except in compliance - # with the License. You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/LICENSE.header b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/LICENSE.header deleted file mode 100644 index 635f0d9e60e..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/LICENSE.header +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml deleted file mode 100644 index 94d16d2f74d..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/config-configmap.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: ConfigMap -metadata: - name: config -data: - OZONE-SITE.XML_hdds.datanode.dir: /data/storage - OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data - OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata - OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm - OZONE-SITE.XML_ozone.om.address: om-0.om - OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm - OZONE-SITE.XML_ozone.scm.names: scm-0.scm - OZONE-SITE.XML_ozone.enabled: "true" - LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout - LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender - LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout - LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern: '%d{yyyy-MM-dd - HH:mm:ss} %-5p %c{1}:%L - %m%n' diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-public-service.yaml deleted file mode 100644 index 89b59140a25..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-public-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: datanode-public -spec: - ports: - - port: 9870 - name: rpc - selector: - app: ozone - component: datanode - type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-service.yaml deleted file mode 100644 index 929e7a25950..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: datanode -spec: - ports: - - port: 9870 - name: rpc - clusterIP: None - selector: - app: ozone - component: datanode diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml deleted file mode 100644 index c393eada79d..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/datanode-statefulset.yaml +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: datanode - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: datanode - serviceName: datanode - replicas: 3 - template: - metadata: - labels: - app: ozone - component: datanode - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9882" - prometheus.io/path: /prom - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: component - operator: In - values: - - datanode - topologyKey: kubernetes.io/hostname - securityContext: - fsGroup: 1000 - containers: - - name: datanode - image: '@docker.image@' - args: - - ozone - - datanode - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - volumes: - - name: data - emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/freon/freon-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/freon/freon-deployment.yaml deleted file mode 100644 index 1662c4e3f24..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/freon/freon-deployment.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: freon - labels: - app.kubernetes.io/component: ozone -spec: - replicas: 1 - selector: - matchLabels: - app: ozone - component: freon - template: - metadata: - labels: - app: ozone - component: freon - spec: - containers: - - name: freon - image: '@docker.image@' - args: - - ozone - - freon - - rk - - --factor=THREE - - --replicationType=RATIS - envFrom: - - configMapRef: - name: config diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-public-service.yaml deleted file mode 100644 index deb2c333c38..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-public-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: om-public -spec: - ports: - - port: 9874 - name: ui - selector: - app: ozone - component: om - type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-service.yaml deleted file mode 100644 index 617277d9b85..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: om -spec: - ports: - - port: 9874 - name: ui - clusterIP: None - selector: - app: ozone - component: om diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml deleted file mode 100644 index 5de01f5feae..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/om-statefulset.yaml +++ /dev/null @@ -1,65 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: om - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: om - serviceName: om - replicas: 1 - template: - metadata: - labels: - app: ozone - component: om - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9874" - prometheus.io/path: /prom - spec: - securityContext: - fsGroup: 1000 - containers: - - name: om - image: '@docker.image@' - args: - - ozone - - om - env: - - name: WAITFOR - value: scm-0.scm:9876 - - name: ENSURE_OM_INITIALIZED - value: /data/metadata/om/current/VERSION - livenessProbe: - tcpSocket: - port: 9862 - initialDelaySeconds: 30 - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - volumes: - - name: data - emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-public-service.yaml deleted file mode 100644 index d2b2420576c..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-public-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: s3g-public -spec: - ports: - - port: 9878 - name: rest - selector: - app: ozone - component: s3g - type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-service.yaml deleted file mode 100644 index dd1ca834799..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: s3g -spec: - ports: - - port: 9878 - name: rest - clusterIP: None - selector: - app: ozone - component: s3g diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.yaml deleted file mode 100644 index 240958303f6..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/s3g-statefulset.yaml +++ /dev/null @@ -1,55 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: s3g - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: s3g - serviceName: s3g - replicas: 1 - template: - metadata: - labels: - app: ozone - component: s3g - spec: - containers: - - name: s3g - image: '@docker.image@' - args: - - ozone - - s3g - livenessProbe: - httpGet: - path: / - port: 9878 - initialDelaySeconds: 30 - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - volumes: - - name: data - emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-public-service.yaml deleted file mode 100644 index e3246fc5e35..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-public-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: scm-public -spec: - ports: - - port: 9876 - name: ui - selector: - app: ozone - component: scm - type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-service.yaml deleted file mode 100644 index 0df15d64531..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: scm -spec: - ports: - - port: 9876 - name: ui - clusterIP: None - selector: - app: ozone - component: scm diff --git a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-statefulset.yaml deleted file mode 100644 index 0f8173c48ba..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/getting-started/scm-statefulset.yaml +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: scm - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: scm - serviceName: scm - replicas: 1 - template: - metadata: - labels: - app: ozone - component: scm - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9876" - prometheus.io/path: /prom - spec: - securityContext: - fsGroup: 1000 - initContainers: - - name: init - image: '@docker.image@' - args: - - ozone - - scm - - --init - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - containers: - - name: scm - image: '@docker.image@' - args: - - ozone - - scm - livenessProbe: - tcpSocket: - port: 9861 - initialDelaySeconds: 30 - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - volumes: - - name: data - emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible deleted file mode 100644 index 3390db03d4a..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/Flekszible +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -source: - - path: ../../definitions -import: - - path: ozone - transformations: - - type: Image - image: "@docker.image@" - - type: PublishStatefulSet - - type: ozone/emptydir - - type: Remove - trigger: - metadata: - name: datanode - path: - - spec - - template - - spec - - affinity - - path: ozone/freon - destination: freon - transformations: - - type: Image - image: "@docker.image@" -header: |- - # Licensed to the Apache Software Foundation (ASF) under one - # or more contributor license agreements. See the NOTICE file - # distributed with this work for additional information - # regarding copyright ownership. The ASF licenses this file - # to you under the Apache License, Version 2.0 (the - # "License"); you may not use this file except in compliance - # with the License. You may obtain a copy of the License at - # - # http://www.apache.org/licenses/LICENSE-2.0 - # - # Unless required by applicable law or agreed to in writing, software - # distributed under the License is distributed on an "AS IS" BASIS, - # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - # See the License for the specific language governing permissions and - # limitations under the License. diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/LICENSE.header b/hadoop-ozone/dist/src/main/k8s/examples/minikube/LICENSE.header deleted file mode 100644 index 635f0d9e60e..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/LICENSE.header +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml deleted file mode 100644 index 94d16d2f74d..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/config-configmap.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: ConfigMap -metadata: - name: config -data: - OZONE-SITE.XML_hdds.datanode.dir: /data/storage - OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data - OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata - OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm - OZONE-SITE.XML_ozone.om.address: om-0.om - OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm - OZONE-SITE.XML_ozone.scm.names: scm-0.scm - OZONE-SITE.XML_ozone.enabled: "true" - LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout - LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender - LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout - LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern: '%d{yyyy-MM-dd - HH:mm:ss} %-5p %c{1}:%L - %m%n' diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-public-service.yaml deleted file mode 100644 index 89b59140a25..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-public-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: datanode-public -spec: - ports: - - port: 9870 - name: rpc - selector: - app: ozone - component: datanode - type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-service.yaml deleted file mode 100644 index 929e7a25950..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: datanode -spec: - ports: - - port: 9870 - name: rpc - clusterIP: None - selector: - app: ozone - component: datanode diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml deleted file mode 100644 index db91864bdaf..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/datanode-statefulset.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: datanode - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: datanode - serviceName: datanode - replicas: 3 - template: - metadata: - labels: - app: ozone - component: datanode - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9882" - prometheus.io/path: /prom - spec: - securityContext: - fsGroup: 1000 - containers: - - name: datanode - image: '@docker.image@' - args: - - ozone - - datanode - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - volumes: - - name: data - emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/freon/freon-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/freon/freon-deployment.yaml deleted file mode 100644 index 1662c4e3f24..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/freon/freon-deployment.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: freon - labels: - app.kubernetes.io/component: ozone -spec: - replicas: 1 - selector: - matchLabels: - app: ozone - component: freon - template: - metadata: - labels: - app: ozone - component: freon - spec: - containers: - - name: freon - image: '@docker.image@' - args: - - ozone - - freon - - rk - - --factor=THREE - - --replicationType=RATIS - envFrom: - - configMapRef: - name: config diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-public-service.yaml deleted file mode 100644 index deb2c333c38..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-public-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: om-public -spec: - ports: - - port: 9874 - name: ui - selector: - app: ozone - component: om - type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-service.yaml deleted file mode 100644 index 617277d9b85..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: om -spec: - ports: - - port: 9874 - name: ui - clusterIP: None - selector: - app: ozone - component: om diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-statefulset.yaml deleted file mode 100644 index 5de01f5feae..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/om-statefulset.yaml +++ /dev/null @@ -1,65 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: om - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: om - serviceName: om - replicas: 1 - template: - metadata: - labels: - app: ozone - component: om - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9874" - prometheus.io/path: /prom - spec: - securityContext: - fsGroup: 1000 - containers: - - name: om - image: '@docker.image@' - args: - - ozone - - om - env: - - name: WAITFOR - value: scm-0.scm:9876 - - name: ENSURE_OM_INITIALIZED - value: /data/metadata/om/current/VERSION - livenessProbe: - tcpSocket: - port: 9862 - initialDelaySeconds: 30 - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - volumes: - - name: data - emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-public-service.yaml deleted file mode 100644 index d2b2420576c..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-public-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: s3g-public -spec: - ports: - - port: 9878 - name: rest - selector: - app: ozone - component: s3g - type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-service.yaml deleted file mode 100644 index dd1ca834799..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: s3g -spec: - ports: - - port: 9878 - name: rest - clusterIP: None - selector: - app: ozone - component: s3g diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-statefulset.yaml deleted file mode 100644 index 240958303f6..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/s3g-statefulset.yaml +++ /dev/null @@ -1,55 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: s3g - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: s3g - serviceName: s3g - replicas: 1 - template: - metadata: - labels: - app: ozone - component: s3g - spec: - containers: - - name: s3g - image: '@docker.image@' - args: - - ozone - - s3g - livenessProbe: - httpGet: - path: / - port: 9878 - initialDelaySeconds: 30 - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - volumes: - - name: data - emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-public-service.yaml deleted file mode 100644 index e3246fc5e35..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-public-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: scm-public -spec: - ports: - - port: 9876 - name: ui - selector: - app: ozone - component: scm - type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-service.yaml deleted file mode 100644 index 0df15d64531..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: scm -spec: - ports: - - port: 9876 - name: ui - clusterIP: None - selector: - app: ozone - component: scm diff --git a/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-statefulset.yaml deleted file mode 100644 index 0f8173c48ba..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/minikube/scm-statefulset.yaml +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: scm - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: scm - serviceName: scm - replicas: 1 - template: - metadata: - labels: - app: ozone - component: scm - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9876" - prometheus.io/path: /prom - spec: - securityContext: - fsGroup: 1000 - initContainers: - - name: init - image: '@docker.image@' - args: - - ozone - - scm - - --init - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - containers: - - name: scm - image: '@docker.image@' - args: - - ozone - - scm - livenessProbe: - tcpSocket: - port: 9861 - initialDelaySeconds: 30 - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - volumes: - - name: data - emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible deleted file mode 100644 index 96e8c6254bd..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/Flekszible +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -source: - - path: ../../definitions -import: - - path: ozone - transformations: - - type: Image - image: "@docker.image@" - - type: ozone/prometheus - - type: PublishStatefulSet - - type: ozone/tracing - - type: ozone/profiler - - type: ozone/emptydir - - type: ozone/csi - - path: prometheus - - path: jaeger - transformations: - - type: PublishService - - path: ozone/freon - destination: freon - transformations: - - type: Image - image: "@docker.image@" - - type: ozone/tracing - - path: pv-test - destination: pv-test - - path: ozone-csi - destination: csi - - path: pv-test - destination: pv-test -transformations: - - type: Namespace diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/LICENSE.header b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/LICENSE.header deleted file mode 100644 index 635f0d9e60e..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/LICENSE.header +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml deleted file mode 100644 index 43d11a48c9a..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/config-configmap.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: ConfigMap -metadata: - name: config -data: - OZONE-SITE.XML_hdds.datanode.dir: /data/storage - OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data - OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata - OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm - OZONE-SITE.XML_ozone.om.address: om-0.om - OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm - OZONE-SITE.XML_ozone.scm.names: scm-0.scm - OZONE-SITE.XML_ozone.enabled: "true" - LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout - LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender - LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout - LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern: '%d{yyyy-MM-dd - HH:mm:ss} %-5p %c{1}:%L - %m%n' - OZONE-SITE.XML_hdds.prometheus.endpoint.enabled: "true" - OZONE-SITE.XML_hdds.profiler.endpoint.enabled: "true" - ASYNC_PROFILER_HOME: /opt/profiler - OZONE-SITE.XML_ozone.csi.s3g.address: http://s3g-0.s3g:9878 - OZONE-SITE.XML_ozone.csi.socket: /var/lib/csi/csi.sock - OZONE-SITE.XML_ozone.csi.owner: hadoop diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-node-daemonset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-node-daemonset.yaml deleted file mode 100644 index fe4453232d2..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-node-daemonset.yaml +++ /dev/null @@ -1,97 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kind: DaemonSet -apiVersion: apps/v1beta2 -metadata: - name: csi-node -spec: - selector: - matchLabels: - app: csi-node - template: - metadata: - labels: - app: csi-node - spec: - serviceAccount: csi-ozone - containers: - - name: driver-registrar - image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2 - args: - - --v=4 - - --csi-address=/var/lib/csi/csi.sock - - --kubelet-registration-path=/var/lib/kubelet/plugins/org.apache.hadoop.ozone/csi.sock - env: - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - name: plugin-dir - mountPath: /var/lib/csi - - name: registration-dir - mountPath: /registration/ - - name: csi-node - image: '@docker.image@' - securityContext: - runAsUser: 0 - privileged: true - capabilities: - add: - - SYS_ADMIN - allowPrivilegeEscalation: true - args: - - ozone - - csi - envFrom: - - configMapRef: - name: config - imagePullPolicy: Always - volumeMounts: - - name: plugin-dir - mountPath: /var/lib/csi - - name: pods-mount-dir - mountPath: /var/lib/kubelet/pods - mountPropagation: Bidirectional - - name: fuse-device - mountPath: /dev/fuse - - name: dbus - mountPath: /var/run/dbus - - name: systemd - mountPath: /run/systemd - volumes: - - name: plugin-dir - hostPath: - path: /var/lib/kubelet/plugins/org.apache.hadoop.ozone - type: DirectoryOrCreate - - name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins_registry/ - type: DirectoryOrCreate - - name: pods-mount-dir - hostPath: - path: /var/lib/kubelet/pods - type: Directory - - name: fuse-device - hostPath: - path: /dev/fuse - - name: dbus - hostPath: - path: /var/run/dbus - - name: systemd - hostPath: - path: /run/systemd diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrole.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrole.yaml deleted file mode 100644 index 927ba6ff7b7..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrole.yaml +++ /dev/null @@ -1,98 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-ozone-default -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list -- apiGroups: - - "" - resources: - - events - verbs: - - list - - watch - - create - - update - - patch -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - update - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list -- apiGroups: - - storage.k8s.io - resources: - - storageclasses - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - get - - list - - watch - - update -- apiGroups: - - "" - resources: - - persistentvolumes - verbs: - - get - - list - - watch - - update - - create -- apiGroups: - - storage.k8s.io - resources: - - volumeattachments - verbs: - - get - - list - - watch - - update -- apiGroups: - - storage.k8s.io - resources: - - csinodes - verbs: - - get - - list - - watch diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrolebinding.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrolebinding.yaml deleted file mode 100644 index 948e759fbe3..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-clusterrolebinding.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-ozone-default -subjects: -- kind: ServiceAccount - name: csi-ozone - namespace: default -roleRef: - kind: ClusterRole - name: csi-ozone-default - apiGroup: rbac.authorization.k8s.io diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-serviceaccount.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-serviceaccount.yaml deleted file mode 100644 index 628d2a1c595..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-ozone-serviceaccount.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: ServiceAccount -metadata: - namespace: default - name: csi-ozone diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-provisioner-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-provisioner-deployment.yaml deleted file mode 100644 index 03478ffeee2..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/csi-provisioner-deployment.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kind: Deployment -apiVersion: apps/v1 -metadata: - name: csi-provisioner -spec: - replicas: 1 - selector: - matchLabels: - app: csi-provisioner - template: - metadata: - labels: - app: csi-provisioner - spec: - serviceAccount: csi-ozone - containers: - - name: csi-provisioner - image: quay.io/k8scsi/csi-provisioner:v1.0.1 - args: - - --csi-address=/var/lib/csi/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/ - - name: ozone-csi - image: '@docker.image@' - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/ - imagePullPolicy: Always - envFrom: - - configMapRef: - name: config - args: - - ozone - - csi - volumes: - - name: socket-dir - emptyDir: null diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/org.apache.hadoop.ozone-csidriver.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/org.apache.hadoop.ozone-csidriver.yaml deleted file mode 100644 index e657c50f758..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/org.apache.hadoop.ozone-csidriver.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: storage.k8s.io/v1beta1 -kind: CSIDriver -metadata: - name: org.apache.hadoop.ozone -spec: - attachRequired: false diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/ozone-storageclass.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/ozone-storageclass.yaml deleted file mode 100644 index c6c1c6c9d1e..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/csi/ozone-storageclass.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: ozone -provisioner: org.apache.hadoop.ozone diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-public-service.yaml deleted file mode 100644 index 89b59140a25..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-public-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: datanode-public -spec: - ports: - - port: 9870 - name: rpc - selector: - app: ozone - component: datanode - type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-service.yaml deleted file mode 100644 index 929e7a25950..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: datanode -spec: - ports: - - port: 9870 - name: rpc - clusterIP: None - selector: - app: ozone - component: datanode diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml deleted file mode 100644 index 475ce690b64..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/datanode-statefulset.yaml +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: datanode - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: datanode - serviceName: datanode - replicas: 3 - template: - metadata: - labels: - app: ozone - component: datanode - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9882" - prometheus.io/path: /prom - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: component - operator: In - values: - - datanode - topologyKey: kubernetes.io/hostname - securityContext: - fsGroup: 1000 - containers: - - name: datanode - image: '@docker.image@' - args: - - ozone - - datanode - envFrom: - - configMapRef: - name: config - env: - - name: JAEGER_SAMPLER_TYPE - value: probabilistic - - name: JAEGER_SAMPLER_PARAM - value: "0.01" - - name: JAEGER_AGENT_HOST - value: jaeger-0.jaeger - volumeMounts: - - name: data - mountPath: /data - volumes: - - name: data - emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/freon/freon-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/freon/freon-deployment.yaml deleted file mode 100644 index 88c9045cb5c..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/freon/freon-deployment.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: freon - labels: - app.kubernetes.io/component: ozone -spec: - replicas: 1 - selector: - matchLabels: - app: ozone - component: freon - template: - metadata: - labels: - app: ozone - component: freon - spec: - containers: - - name: freon - image: '@docker.image@' - args: - - ozone - - freon - - rk - - --factor=THREE - - --replicationType=RATIS - envFrom: - - configMapRef: - name: config - env: - - name: JAEGER_SAMPLER_TYPE - value: probabilistic - - name: JAEGER_SAMPLER_PARAM - value: "0.01" - - name: JAEGER_AGENT_HOST - value: jaeger-0.jaeger diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-public-service.yaml deleted file mode 100644 index fb06569b397..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-public-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: jaeger-public -spec: - selector: - app: jaeger - component: jaeger - ports: - - name: ui - port: 16686 - type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-service.yaml deleted file mode 100644 index 6e6125ade56..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: jaeger -spec: - clusterIP: None - selector: - app: jaeger - component: jaeger - ports: - - name: ui - port: 16686 diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-statefulset.yaml deleted file mode 100644 index 51410148f77..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/jaeger-statefulset.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: jaeger -spec: - selector: - matchLabels: - app: jaeger - component: jaeger - replicas: 1 - serviceName: jaeger - template: - metadata: - labels: - app: jaeger - component: jaeger - spec: - containers: - - name: jaeger - image: jaegertracing/all-in-one:latest - ports: - - containerPort: 16686 - name: web - env: - - name: COLLECTOR_ZIPKIN_HTTP_PORT - value: "9411" diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-public-service.yaml deleted file mode 100644 index deb2c333c38..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-public-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: om-public -spec: - ports: - - port: 9874 - name: ui - selector: - app: ozone - component: om - type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-service.yaml deleted file mode 100644 index 617277d9b85..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: om -spec: - ports: - - port: 9874 - name: ui - clusterIP: None - selector: - app: ozone - component: om diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-statefulset.yaml deleted file mode 100644 index 36df22c81c9..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/om-statefulset.yaml +++ /dev/null @@ -1,71 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: om - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: om - serviceName: om - replicas: 1 - template: - metadata: - labels: - app: ozone - component: om - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9874" - prometheus.io/path: /prom - spec: - securityContext: - fsGroup: 1000 - containers: - - name: om - image: '@docker.image@' - args: - - ozone - - om - env: - - name: WAITFOR - value: scm-0.scm:9876 - - name: ENSURE_OM_INITIALIZED - value: /data/metadata/om/current/VERSION - - name: JAEGER_SAMPLER_TYPE - value: probabilistic - - name: JAEGER_SAMPLER_PARAM - value: "0.01" - - name: JAEGER_AGENT_HOST - value: jaeger-0.jaeger - livenessProbe: - tcpSocket: - port: 9862 - initialDelaySeconds: 30 - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - volumes: - - name: data - emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-clusterrole.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-clusterrole.yaml deleted file mode 100644 index bf62be615b7..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-clusterrole.yaml +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: prometheus-default -rules: -- apiGroups: - - "" - resources: - - nodes - - services - - endpoints - - pods - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get -- nonResourceURLs: - - /metrics - verbs: - - get diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-deployment.yaml deleted file mode 100644 index 86a188af099..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-deployment.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - name: prometheus - labels: - app: prometheus -spec: - replicas: 1 - selector: - matchLabels: - app: prometheus - template: - metadata: - labels: - app: prometheus - spec: - serviceAccountName: prometheus-operator - containers: - - name: prometheus - image: prom/prometheus - args: - - --config.file=/conf/prometheus.yaml - ports: - - containerPort: 9090 - volumeMounts: - - name: config - mountPath: /conf - readOnly: true - volumes: - - name: config - configMap: - name: prometheusconf diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-clusterrolebinding.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-clusterrolebinding.yaml deleted file mode 100644 index 13ac066aba9..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-clusterrolebinding.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: prometheus-operator-default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: prometheus-default -subjects: -- kind: ServiceAccount - name: prometheus-operator - namespace: default diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-serviceaccount.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-serviceaccount.yaml deleted file mode 100644 index f816888b48d..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-operator-serviceaccount.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: prometheus-operator diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-service.yaml deleted file mode 100644 index 312cf586d33..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheus-service.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kind: Service -apiVersion: v1 -metadata: - name: prometheus -spec: - selector: - app: prometheus - ports: - - protocol: TCP - port: 9090 diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheusconf-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheusconf-configmap.yaml deleted file mode 100644 index 6d5b12362c5..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/prometheusconf-configmap.yaml +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: ConfigMap -metadata: - name: prometheusconf -data: - prometheus.yaml: |- - global: - scrape_interval: 15s - scrape_configs: - - job_name: jmxexporter - kubernetes_sd_configs: - - role: pod - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - relabel_configs: - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: kubernetes_pod_name diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-deployment.yaml deleted file mode 100644 index 04edcec9814..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-deployment.yaml +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ozone-csi-test-webserver - labels: - app: ozone-csi-test-webserver - annotations: {} -spec: - replicas: 1 - selector: - matchLabels: - app: ozone-csi-test-webserver - template: - metadata: - labels: - app: ozone-csi-test-webserver - spec: - containers: - - name: web - image: python:3.7.3-alpine3.8 - args: - - python - - -m - - http.server - - --directory - - /www - volumeMounts: - - mountPath: /www - name: webroot - volumes: - - name: webroot - persistentVolumeClaim: - claimName: ozone-csi-test-webserver - readOnly: false diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-persistentvolumeclaim.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-persistentvolumeclaim.yaml deleted file mode 100644 index 4b1e44b206a..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-persistentvolumeclaim.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: ozone-csi-test-webserver - labels: {} - annotations: {} -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: ozone diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-service.yaml deleted file mode 100644 index 6a53a4397f0..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/pv-test/ozone-csi-test-webserver-service.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: ozone-csi-test-webserver - labels: {} - annotations: {} -spec: - type: NodePort - ports: - - port: 8000 - name: web - selector: - app: ozone-csi-test-webserver diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-public-service.yaml deleted file mode 100644 index d2b2420576c..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-public-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: s3g-public -spec: - ports: - - port: 9878 - name: rest - selector: - app: ozone - component: s3g - type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-service.yaml deleted file mode 100644 index dd1ca834799..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: s3g -spec: - ports: - - port: 9878 - name: rest - clusterIP: None - selector: - app: ozone - component: s3g diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-statefulset.yaml deleted file mode 100644 index 0feb3686ab8..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/s3g-statefulset.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: s3g - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: s3g - serviceName: s3g - replicas: 1 - template: - metadata: - labels: - app: ozone - component: s3g - spec: - containers: - - name: s3g - image: '@docker.image@' - args: - - ozone - - s3g - livenessProbe: - httpGet: - path: / - port: 9878 - initialDelaySeconds: 30 - envFrom: - - configMapRef: - name: config - env: - - name: JAEGER_SAMPLER_TYPE - value: probabilistic - - name: JAEGER_SAMPLER_PARAM - value: "0.01" - - name: JAEGER_AGENT_HOST - value: jaeger-0.jaeger - volumeMounts: - - name: data - mountPath: /data - volumes: - - name: data - emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-public-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-public-service.yaml deleted file mode 100644 index e3246fc5e35..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-public-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: scm-public -spec: - ports: - - port: 9876 - name: ui - selector: - app: ozone - component: scm - type: NodePort diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-service.yaml deleted file mode 100644 index 0df15d64531..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: scm -spec: - ports: - - port: 9876 - name: ui - clusterIP: None - selector: - app: ozone - component: scm diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-statefulset.yaml deleted file mode 100644 index 246f8c411e0..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-dev/scm-statefulset.yaml +++ /dev/null @@ -1,80 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: scm - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: scm - serviceName: scm - replicas: 1 - template: - metadata: - labels: - app: ozone - component: scm - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9876" - prometheus.io/path: /prom - spec: - securityContext: - fsGroup: 1000 - initContainers: - - name: init - image: '@docker.image@' - args: - - ozone - - scm - - --init - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - containers: - - name: scm - image: '@docker.image@' - args: - - ozone - - scm - livenessProbe: - tcpSocket: - port: 9861 - initialDelaySeconds: 30 - envFrom: - - configMapRef: - name: config - env: - - name: JAEGER_SAMPLER_TYPE - value: probabilistic - - name: JAEGER_SAMPLER_PARAM - value: "0.01" - - name: JAEGER_AGENT_HOST - value: jaeger-0.jaeger - volumeMounts: - - name: data - mountPath: /data - volumes: - - name: data - emptyDir: {} diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible b/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible deleted file mode 100644 index 2fb527c0a45..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/Flekszible +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -source: - - path: ../../definitions -import: - - path: ozone - transformations: - - type: Image - image: "@docker.image@" - - type: ozone/persistence - - type: ozone/csi - - path: ozone/freon - destination: freon - transformations: - - type: Image - image: "@docker.image@" - - path: pv-test - destination: pv-test - - path: ozone-csi - destination: csi - - path: pv-test - destination: pv-test -transformations: - - type: Namespace diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/LICENSE.header b/hadoop-ozone/dist/src/main/k8s/examples/ozone/LICENSE.header deleted file mode 100644 index 635f0d9e60e..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/LICENSE.header +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml deleted file mode 100644 index e554145bac6..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/config-configmap.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: ConfigMap -metadata: - name: config -data: - OZONE-SITE.XML_hdds.datanode.dir: /data/storage - OZONE-SITE.XML_ozone.scm.datanode.id.dir: /data - OZONE-SITE.XML_ozone.metadata.dirs: /data/metadata - OZONE-SITE.XML_ozone.scm.block.client.address: scm-0.scm - OZONE-SITE.XML_ozone.om.address: om-0.om - OZONE-SITE.XML_ozone.scm.client.address: scm-0.scm - OZONE-SITE.XML_ozone.scm.names: scm-0.scm - OZONE-SITE.XML_ozone.enabled: "true" - LOG4J.PROPERTIES_log4j.rootLogger: INFO, stdout - LOG4J.PROPERTIES_log4j.appender.stdout: org.apache.log4j.ConsoleAppender - LOG4J.PROPERTIES_log4j.appender.stdout.layout: org.apache.log4j.PatternLayout - LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern: '%d{yyyy-MM-dd - HH:mm:ss} %-5p %c{1}:%L - %m%n' - OZONE-SITE.XML_ozone.csi.s3g.address: http://s3g-0.s3g:9878 - OZONE-SITE.XML_ozone.csi.socket: /var/lib/csi/csi.sock - OZONE-SITE.XML_ozone.csi.owner: hadoop diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-node-daemonset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-node-daemonset.yaml deleted file mode 100644 index fe4453232d2..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-node-daemonset.yaml +++ /dev/null @@ -1,97 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kind: DaemonSet -apiVersion: apps/v1beta2 -metadata: - name: csi-node -spec: - selector: - matchLabels: - app: csi-node - template: - metadata: - labels: - app: csi-node - spec: - serviceAccount: csi-ozone - containers: - - name: driver-registrar - image: quay.io/k8scsi/csi-node-driver-registrar:v1.0.2 - args: - - --v=4 - - --csi-address=/var/lib/csi/csi.sock - - --kubelet-registration-path=/var/lib/kubelet/plugins/org.apache.hadoop.ozone/csi.sock - env: - - name: KUBE_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - name: plugin-dir - mountPath: /var/lib/csi - - name: registration-dir - mountPath: /registration/ - - name: csi-node - image: '@docker.image@' - securityContext: - runAsUser: 0 - privileged: true - capabilities: - add: - - SYS_ADMIN - allowPrivilegeEscalation: true - args: - - ozone - - csi - envFrom: - - configMapRef: - name: config - imagePullPolicy: Always - volumeMounts: - - name: plugin-dir - mountPath: /var/lib/csi - - name: pods-mount-dir - mountPath: /var/lib/kubelet/pods - mountPropagation: Bidirectional - - name: fuse-device - mountPath: /dev/fuse - - name: dbus - mountPath: /var/run/dbus - - name: systemd - mountPath: /run/systemd - volumes: - - name: plugin-dir - hostPath: - path: /var/lib/kubelet/plugins/org.apache.hadoop.ozone - type: DirectoryOrCreate - - name: registration-dir - hostPath: - path: /var/lib/kubelet/plugins_registry/ - type: DirectoryOrCreate - - name: pods-mount-dir - hostPath: - path: /var/lib/kubelet/pods - type: Directory - - name: fuse-device - hostPath: - path: /dev/fuse - - name: dbus - hostPath: - path: /var/run/dbus - - name: systemd - hostPath: - path: /run/systemd diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrole.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrole.yaml deleted file mode 100644 index 927ba6ff7b7..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrole.yaml +++ /dev/null @@ -1,98 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-ozone-default -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list -- apiGroups: - - "" - resources: - - events - verbs: - - list - - watch - - create - - update - - patch -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - update - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list -- apiGroups: - - storage.k8s.io - resources: - - storageclasses - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - get - - list - - watch - - update -- apiGroups: - - "" - resources: - - persistentvolumes - verbs: - - get - - list - - watch - - update - - create -- apiGroups: - - storage.k8s.io - resources: - - volumeattachments - verbs: - - get - - list - - watch - - update -- apiGroups: - - storage.k8s.io - resources: - - csinodes - verbs: - - get - - list - - watch diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrolebinding.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrolebinding.yaml deleted file mode 100644 index 948e759fbe3..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-clusterrolebinding.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: csi-ozone-default -subjects: -- kind: ServiceAccount - name: csi-ozone - namespace: default -roleRef: - kind: ClusterRole - name: csi-ozone-default - apiGroup: rbac.authorization.k8s.io diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-serviceaccount.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-serviceaccount.yaml deleted file mode 100644 index 628d2a1c595..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-ozone-serviceaccount.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: ServiceAccount -metadata: - namespace: default - name: csi-ozone diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-provisioner-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-provisioner-deployment.yaml deleted file mode 100644 index 03478ffeee2..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/csi-provisioner-deployment.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kind: Deployment -apiVersion: apps/v1 -metadata: - name: csi-provisioner -spec: - replicas: 1 - selector: - matchLabels: - app: csi-provisioner - template: - metadata: - labels: - app: csi-provisioner - spec: - serviceAccount: csi-ozone - containers: - - name: csi-provisioner - image: quay.io/k8scsi/csi-provisioner:v1.0.1 - args: - - --csi-address=/var/lib/csi/csi.sock - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/ - - name: ozone-csi - image: '@docker.image@' - volumeMounts: - - name: socket-dir - mountPath: /var/lib/csi/ - imagePullPolicy: Always - envFrom: - - configMapRef: - name: config - args: - - ozone - - csi - volumes: - - name: socket-dir - emptyDir: null diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/org.apache.hadoop.ozone-csidriver.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/org.apache.hadoop.ozone-csidriver.yaml deleted file mode 100644 index e657c50f758..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/org.apache.hadoop.ozone-csidriver.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: storage.k8s.io/v1beta1 -kind: CSIDriver -metadata: - name: org.apache.hadoop.ozone -spec: - attachRequired: false diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/ozone-storageclass.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/ozone-storageclass.yaml deleted file mode 100644 index c6c1c6c9d1e..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/csi/ozone-storageclass.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: ozone -provisioner: org.apache.hadoop.ozone diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-service.yaml deleted file mode 100644 index 929e7a25950..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: datanode -spec: - ports: - - port: 9870 - name: rpc - clusterIP: None - selector: - app: ozone - component: datanode diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-statefulset.yaml deleted file mode 100644 index a3aa528dff0..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/datanode-statefulset.yaml +++ /dev/null @@ -1,72 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: datanode - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: datanode - serviceName: datanode - replicas: 3 - template: - metadata: - labels: - app: ozone - component: datanode - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9882" - prometheus.io/path: /prom - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: component - operator: In - values: - - datanode - topologyKey: kubernetes.io/hostname - securityContext: - fsGroup: 1000 - containers: - - name: datanode - image: '@docker.image@' - args: - - ozone - - datanode - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - volumeClaimTemplates: - - metadata: - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/freon/freon-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/freon/freon-deployment.yaml deleted file mode 100644 index 1662c4e3f24..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/freon/freon-deployment.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: freon - labels: - app.kubernetes.io/component: ozone -spec: - replicas: 1 - selector: - matchLabels: - app: ozone - component: freon - template: - metadata: - labels: - app: ozone - component: freon - spec: - containers: - - name: freon - image: '@docker.image@' - args: - - ozone - - freon - - rk - - --factor=THREE - - --replicationType=RATIS - envFrom: - - configMapRef: - name: config diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/om-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/om-service.yaml deleted file mode 100644 index 617277d9b85..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/om-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: om -spec: - ports: - - port: 9874 - name: ui - clusterIP: None - selector: - app: ozone - component: om diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/om-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/om-statefulset.yaml deleted file mode 100644 index ad0b16eacae..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/om-statefulset.yaml +++ /dev/null @@ -1,72 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: om - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: om - serviceName: om - replicas: 1 - template: - metadata: - labels: - app: ozone - component: om - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9874" - prometheus.io/path: /prom - spec: - securityContext: - fsGroup: 1000 - containers: - - name: om - image: '@docker.image@' - args: - - ozone - - om - env: - - name: WAITFOR - value: scm-0.scm:9876 - - name: ENSURE_OM_INITIALIZED - value: /data/metadata/om/current/VERSION - livenessProbe: - tcpSocket: - port: 9862 - initialDelaySeconds: 30 - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - volumes: [] - volumeClaimTemplates: - - metadata: - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-deployment.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-deployment.yaml deleted file mode 100644 index 04edcec9814..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-deployment.yaml +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ozone-csi-test-webserver - labels: - app: ozone-csi-test-webserver - annotations: {} -spec: - replicas: 1 - selector: - matchLabels: - app: ozone-csi-test-webserver - template: - metadata: - labels: - app: ozone-csi-test-webserver - spec: - containers: - - name: web - image: python:3.7.3-alpine3.8 - args: - - python - - -m - - http.server - - --directory - - /www - volumeMounts: - - mountPath: /www - name: webroot - volumes: - - name: webroot - persistentVolumeClaim: - claimName: ozone-csi-test-webserver - readOnly: false diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-persistentvolumeclaim.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-persistentvolumeclaim.yaml deleted file mode 100644 index 4b1e44b206a..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-persistentvolumeclaim.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: ozone-csi-test-webserver - labels: {} - annotations: {} -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: ozone diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-service.yaml deleted file mode 100644 index 6a53a4397f0..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/pv-test/ozone-csi-test-webserver-service.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: ozone-csi-test-webserver - labels: {} - annotations: {} -spec: - type: NodePort - ports: - - port: 8000 - name: web - selector: - app: ozone-csi-test-webserver diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/s3g-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/s3g-service.yaml deleted file mode 100644 index dd1ca834799..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/s3g-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: s3g -spec: - ports: - - port: 9878 - name: rest - clusterIP: None - selector: - app: ozone - component: s3g diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/s3g-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/s3g-statefulset.yaml deleted file mode 100644 index 6e96fb7dbcf..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/s3g-statefulset.yaml +++ /dev/null @@ -1,61 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: s3g - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: s3g - serviceName: s3g - replicas: 1 - template: - metadata: - labels: - app: ozone - component: s3g - spec: - containers: - - name: s3g - image: '@docker.image@' - args: - - ozone - - s3g - livenessProbe: - httpGet: - path: / - port: 9878 - initialDelaySeconds: 30 - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - volumeClaimTemplates: - - metadata: - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/scm-service.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/scm-service.yaml deleted file mode 100644 index 0df15d64531..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/scm-service.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -kind: Service -metadata: - name: scm -spec: - ports: - - port: 9876 - name: ui - clusterIP: None - selector: - app: ozone - component: scm diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone/scm-statefulset.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone/scm-statefulset.yaml deleted file mode 100644 index d4d651349f7..00000000000 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone/scm-statefulset.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: scm - labels: - app.kubernetes.io/component: ozone -spec: - selector: - matchLabels: - app: ozone - component: scm - serviceName: scm - replicas: 1 - template: - metadata: - labels: - app: ozone - component: scm - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9876" - prometheus.io/path: /prom - spec: - securityContext: - fsGroup: 1000 - initContainers: - - name: init - image: '@docker.image@' - args: - - ozone - - scm - - --init - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - containers: - - name: scm - image: '@docker.image@' - args: - - ozone - - scm - livenessProbe: - tcpSocket: - port: 9861 - initialDelaySeconds: 30 - envFrom: - - configMapRef: - name: config - volumeMounts: - - name: data - mountPath: /data - volumeClaimTemplates: - - metadata: - name: data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt deleted file mode 100644 index 6e661afe6a0..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ /dev/null @@ -1,443 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------------------------------------- -This product bundles various third-party components under other open source -licenses. This section summarizes those components and their licenses. -See licenses/ for text of these licenses. - -EPL -===================== - - org.eclipse.jetty:jetty-http - org.eclipse.jetty:jetty-io - org.eclipse.jetty:jetty-security - org.eclipse.jetty:jetty-server - org.eclipse.jetty:jetty-servlet - org.eclipse.jetty:jetty-util - org.eclipse.jetty:jetty-util-ajax - org.eclipse.jetty:jetty-webapp - org.eclipse.jetty:jetty-xml - - -BSD -===================== - - org.codehaus.woodstox:stax2-api - - -GPL with classpath exception -===================== - - org.openjdk.jmh:jmh-core - org.openjdk.jmh:jmh-generator-annprocess - - -CDDL -===================== - - com.sun.jersey:jersey-core - com.sun.jersey:jersey-json - com.sun.jersey:jersey-server - com.sun.jersey:jersey-servlet - com.sun.xml.bind:jaxb-core - com.sun.xml.bind:jaxb-impl - javax.activation:activation - javax.servlet.jsp:jsp-api - javax.ws.rs:javax.ws.rs-api - javax.ws.rs:jsr311-api - javax.xml.bind:jaxb-api - org.glassfish.hk2.external:aopalliance-repackaged - org.glassfish.hk2.external:jakarta.inject - org.glassfish.hk2.external:javax.inject - org.glassfish.hk2:guice-bridge - org.glassfish.hk2:hk2-api - org.glassfish.hk2:hk2-locator - org.glassfish.hk2:hk2-utils - org.glassfish.hk2:osgi-resource-locator - org.glassfish.jersey.containers:jersey-container-servlet - org.glassfish.jersey.containers:jersey-container-servlet-core - org.glassfish.jersey.core:jersey-client - org.glassfish.jersey.core:jersey-common - org.glassfish.jersey.core:jersey-server - org.glassfish.jersey.ext.cdi:jersey-cdi1x - org.glassfish.jersey.ext:jersey-entity-filtering - org.glassfish.jersey.inject:jersey-hk2 - org.glassfish.jersey.media:jersey-media-jaxb - org.glassfish.jersey.media:jersey-media-json-jackson - - -Apache License -===================== - - com.fasterxml.jackson.core:jackson-annotations - com.fasterxml.jackson.core:jackson-core - com.fasterxml.jackson.core:jackson-databind - com.fasterxml.jackson.dataformat:jackson-dataformat-xml - com.fasterxml.jackson.module:jackson-module-jaxb-annotations - com.fasterxml.woodstox:woodstox-core - com.github.stephenc.jcip:jcip-annotations - com.google.api.grpc:proto-google-common-protos - com.google.code.gson:gson - com.google.errorprone:error_prone_annotations - com.google.guava:guava - com.google.inject.extensions:guice-assistedinject - com.google.inject.extensions:guice-multibindings - com.google.inject.extensions:guice-servlet - com.google.inject:guice - com.jolbox:bonecp - com.lmax:disruptor - com.nimbusds:nimbus-jose-jwt - com.squareup.okhttp3:okhttp - com.squareup.okio:okio - info.picocli:picocli - io.dropwizard.metrics:metrics-core - io.grpc:grpc-context - io.grpc:grpc-core - io.grpc:grpc-netty - io.grpc:grpc-protobuf - io.grpc:grpc-protobuf-lite - io.grpc:grpc-stub - io.jaegertracing:jaeger-client - io.jaegertracing:jaeger-core - io.jaegertracing:jaeger-thrift - io.jaegertracing:jaeger-tracerresolver - io.netty:netty - io.netty:netty-all - io.netty:netty-buffer - io.netty:netty-codec - io.netty:netty-codec-http - io.netty:netty-codec-http2 - io.netty:netty-codec-socks - io.netty:netty-common - io.netty:netty-handler - io.netty:netty-handler-proxy - io.netty:netty-resolver - io.netty:netty-transport - io.netty:netty-transport-native-epoll - io.netty:netty-transport-native-unix-common - io.opencensus:opencensus-api - io.opencensus:opencensus-contrib-grpc-metrics - io.opentracing.contrib:opentracing-tracerresolver - io.opentracing:opentracing-api - io.opentracing:opentracing-noop - io.opentracing:opentracing-util - javax.enterprise:cdi-api - javax.inject:javax.inject - javax.validation:validation-api - log4j:log4j - net.minidev:accessors-smart - net.minidev:json-smart - org.bouncycastle:bcpkix-jdk15on - org.bouncycastle:bcprov-jdk15on - org.codehaus.jackson:jackson-core-asl - org.codehaus.jackson:jackson-jaxrs - org.codehaus.jackson:jackson-mapper-asl - org.codehaus.jackson:jackson-xc - org.codehaus.jettison:jettison - org.hamcrest:hamcrest-all - org.javassist:javassist - org.jboss.weld.servlet:weld-servlet - org.jooq:jooq - org.jooq:jooq-codegen - org.jooq:jooq-meta - org.rocksdb:rocksdbjni - org.springframework:spring-beans - org.springframework:spring-core - org.springframework:spring-jcl - org.springframework:spring-jdbc - org.springframework:spring-tx - org.xerial.snappy:snappy-java - org.xerial:sqlite-jdbc - org.yaml:snakeyaml - - -MIT -===================== - - net.sf.jopt-simple:jopt-simple - org.codehaus.mojo:animal-sniffer-annotations - org.slf4j:slf4j-api - org.slf4j:slf4j-log4j12 - - -EPL 2.0 -===================== - - jakarta.annotation:jakarta.annotation-api - - -CDDL + GPLv2 with classpath exception -===================== - - javax.annotation:javax.annotation-api - javax.el:javax.el-api - javax.interceptor:javax.interceptor-api - javax.servlet:javax.servlet-api - - -Public Domain -===================== - - aopalliance:aopalliance - org.tukaani:xz - - -BSD 3-Clause -===================== - - com.google.code.findbugs:jsr305 - com.google.protobuf:protobuf-java - com.google.protobuf:protobuf-java-util - com.google.re2j:re2j - com.jcraft:jsch - com.thoughtworks.paranamer:paranamer - org.fusesource.leveldbjni:leveldbjni-all - org.ow2.asm:asm - - -BSD 2-Clause -===================== - - dnsjava:dnsjava - - --------------------------------------------------------------------------------- -hadoop-hdds-server-scm, hadoop-ozone-ozone-manager, hadoop-ozone-s3gateway and hadoop-hdds-server-framework -contains the source of the following javascript/css components (See licenses/ for text of these licenses): - -Apache Software Foundation License 2.0 -===================== - -nvd3-1.8.5.min.js.map -nvd3-1.8.5.min.css.map -nvd3-1.8.5.min.js -AbstractFuture.java -TimeoutFuture.java - - -BSD 3-Clause -===================== - -d3-3.5.17.min.js -d3-3.5.17.min.js -glyphicons-* - -MIT License -===================== - -bootstrap-3.4.1 -css/bootstrap-* -bootstrap.min.js -angular-route-1.6.4.min.js -angular-nvd3-1.0.9.min.js -angular-1.6.4.min.js -jquery-3.4.1.min.js - --------------------------------------------------------------------------------- -recon server uses a huge number of javascript and css dependencies. See the -licenses/LICENSE-ozone-recon.txt for the detailed list of the dependencies and licenses. - --------------------------------------------------------------------------------- -ratis-thirdparty-misc is a shaded dependency which includes additional 3rd party dependencies in shaded form. -For the detailed list of the dependencies and the associated licenses see licenses/LICENSE-ratis-thirdparty-misc.txt. diff --git a/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt b/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt deleted file mode 100644 index 674b74df18e..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/NOTICE.txt +++ /dev/null @@ -1,520 +0,0 @@ -Apache Hadoop -Copyright 2006 and onwards The Apache Software Foundation. - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - -Export Control Notice ---------------------- - -This distribution includes cryptographic software. The country in -which you currently reside may have restrictions on the import, -possession, use, and/or re-export to another country, of -encryption software. BEFORE using any encryption software, please -check your country's laws, regulations and policies concerning the -import, possession, or use, and re-export of encryption software, to -see if this is permitted. See for more -information. - -The U.S. Government Department of Commerce, Bureau of Industry and -Security (BIS), has classified this software as Export Commodity -Control Number (ECCN) 5D002.C.1, which includes information security -software using or performing cryptographic functions with asymmetric -algorithms. The form and manner of this Apache Software Foundation -distribution makes it eligible for export under the License Exception -ENC Technology Software Unrestricted (TSU) exception (see the BIS -Export Administration Regulations, Section 740.13) for both object -code and source code. - -This software uses the SSL libraries from the Jetty project written -by mortbay.org abd BouncyCastle Java cryptography APIs written by the - Legion of the Bouncy Castle Inc. - -********************** -THIRD PARTY COMPONENTS -********************** -This software includes third party software subject to the following copyrights: - -io.netty:netty-all -==================== - - - The Netty Project - ================= - -Please visit the Netty web site for more information: - - * https://netty.io/ - -Copyright 2014 The Netty Project - -The Netty Project licenses this file to you under the Apache License, -version 2.0 (the "License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at: - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -License for the specific language governing permissions and limitations -under the License. - -Also, please refer to each LICENSE..txt file, which is located in -the 'license' directory of the distribution file, for the license terms of the -components that this product depends on. - -------------------------------------------------------------------------------- -This product contains the extensions to Java Collections Framework which has -been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: - - * LICENSE: - * license/LICENSE.jsr166y.txt (Public Domain) - * HOMEPAGE: - * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ - * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ - -This product contains a modified version of Robert Harder's Public Domain -Base64 Encoder and Decoder, which can be obtained at: - - * LICENSE: - * license/LICENSE.base64.txt (Public Domain) - * HOMEPAGE: - * http://iharder.sourceforge.net/current/java/base64/ - -This product contains a modified portion of 'Webbit', an event based -WebSocket and HTTP server, which can be obtained at: - - * LICENSE: - * license/LICENSE.webbit.txt (BSD License) - * HOMEPAGE: - * https://github.com/joewalnes/webbit - -This product contains a modified portion of 'SLF4J', a simple logging -facade for Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.slf4j.txt (MIT License) - * HOMEPAGE: - * http://www.slf4j.org/ - -This product contains a modified portion of 'Apache Harmony', an open source -Java SE, which can be obtained at: - - * NOTICE: - * license/NOTICE.harmony.txt - * LICENSE: - * license/LICENSE.harmony.txt (Apache License 2.0) - * HOMEPAGE: - * http://archive.apache.org/dist/harmony/ - -This product contains a modified portion of 'jbzip2', a Java bzip2 compression -and decompression library written by Matthew J. Francis. It can be obtained at: - - * LICENSE: - * license/LICENSE.jbzip2.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jbzip2/ - -This product contains a modified portion of 'libdivsufsort', a C API library to construct -the suffix array and the Burrows-Wheeler transformed string for any input string of -a constant-size alphabet written by Yuta Mori. It can be obtained at: - - * LICENSE: - * license/LICENSE.libdivsufsort.txt (MIT License) - * HOMEPAGE: - * https://github.com/y-256/libdivsufsort - -This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, - which can be obtained at: - - * LICENSE: - * license/LICENSE.jctools.txt (ASL2 License) - * HOMEPAGE: - * https://github.com/JCTools/JCTools - -This product optionally depends on 'JZlib', a re-implementation of zlib in -pure Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.jzlib.txt (BSD style License) - * HOMEPAGE: - * http://www.jcraft.com/jzlib/ - -This product optionally depends on 'Compress-LZF', a Java library for encoding and -decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: - - * LICENSE: - * license/LICENSE.compress-lzf.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/ning/compress - -This product optionally depends on 'lz4', a LZ4 Java compression -and decompression library written by Adrien Grand. It can be obtained at: - - * LICENSE: - * license/LICENSE.lz4.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jpountz/lz4-java - -This product optionally depends on 'lzma-java', a LZMA Java compression -and decompression library, which can be obtained at: - - * LICENSE: - * license/LICENSE.lzma-java.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jponge/lzma-java - -This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression -and decompression library written by William Kinney. It can be obtained at: - - * LICENSE: - * license/LICENSE.jfastlz.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jfastlz/ - -This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data -interchange format, which can be obtained at: - - * LICENSE: - * license/LICENSE.protobuf.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/protobuf - -This product optionally depends on 'Bouncy Castle Crypto APIs' to generate -a temporary self-signed X.509 certificate when the JVM does not provide the -equivalent functionality. It can be obtained at: - - * LICENSE: - * license/LICENSE.bouncycastle.txt (MIT License) - * HOMEPAGE: - * http://www.bouncycastle.org/ - -This product optionally depends on 'Snappy', a compression library produced -by Google Inc, which can be obtained at: - - * LICENSE: - * license/LICENSE.snappy.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/snappy - -This product optionally depends on 'JBoss Marshalling', an alternative Java -serialization API, which can be obtained at: - - * LICENSE: - * license/LICENSE.jboss-marshalling.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jboss-remoting/jboss-marshalling - -This product optionally depends on 'Caliper', Google's micro- -benchmarking framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.caliper.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/google/caliper - -This product optionally depends on 'Apache Commons Logging', a logging -framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-logging.txt (Apache License 2.0) - * HOMEPAGE: - * http://commons.apache.org/logging/ - -This product optionally depends on 'Apache Log4J', a logging framework, which -can be obtained at: - - * LICENSE: - * license/LICENSE.log4j.txt (Apache License 2.0) - * HOMEPAGE: - * http://logging.apache.org/log4j/ - -This product optionally depends on 'Aalto XML', an ultra-high performance -non-blocking XML processor, which can be obtained at: - - * LICENSE: - * license/LICENSE.aalto-xml.txt (Apache License 2.0) - * HOMEPAGE: - * http://wiki.fasterxml.com/AaltoHome - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: - - * LICENSE: - * license/LICENSE.hpack.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/twitter/hpack - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: - - * LICENSE: - * license/LICENSE.hyper-hpack.txt (MIT License) - * HOMEPAGE: - * https://github.com/python-hyper/hpack/ - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at: - - * LICENSE: - * license/LICENSE.nghttp2-hpack.txt (MIT License) - * HOMEPAGE: - * https://github.com/nghttp2/nghttp2/ - -This product contains a modified portion of 'Apache Commons Lang', a Java library -provides utilities for the java.lang API, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-lang.txt (Apache License 2.0) - * HOMEPAGE: - * https://commons.apache.org/proper/commons-lang/ - - -This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build. - - * LICENSE: - * license/LICENSE.mvn-wrapper.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/takari/maven-wrapper - - -io.grpc:grpc-core -==================== - -Copyright 2014 The gRPC Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - ------------------------------------------------------------------------ - -This product contains a modified portion of 'OkHttp', an open source -HTTP & SPDY client for Android and Java applications, which can be obtained -at: - - * LICENSE: - * okhttp/third_party/okhttp/LICENSE (Apache License 2.0) - * HOMEPAGE: - * https://github.com/square/okhttp - * LOCATION_IN_GRPC: - * okhttp/third_party/okhttp - -This product contains a modified portion of 'Envoy', an open source -cloud-native high-performance edge/middle/service proxy, which can be -obtained at: - - * LICENSE: - * xds/third_party/envoy/LICENSE (Apache License 2.0) - * NOTICE: - * xds/third_party/envoy/NOTICE - * HOMEPAGE: - * https://www.envoyproxy.io - * LOCATION_IN_GRPC: - * xds/third_party/envoy - -This product contains a modified portion of 'gogoprotobuf', -an open source Protocol Buffers support for Go with Gadgets, -which can be obtained at: - - * LICENSE: - * xds/third_party/gogoproto/LICENSE - * HOMEPAGE: - * https://github.com/gogo/protobuf - * LOCATION_IN_GRPC: - * xds/third_party/gogoproto - -This product contains a modified portion of 'protoc-gen-validate (PGV)', -an open source protoc plugin to generate polyglot message validators, -which can be obtained at: - - * LICENSE: - * xds/third_party/protoc-gen-validate/LICENSE (Apache License 2.0) - * HOMEPAGE: - * https://github.com/lyft/protoc-gen-validate - * LOCATION_IN_GRPC: - * xds/third_party/protoc-gen-validate - -This product contains a modified portion of 'udpa', -an open source universal data plane API, which can be obtained at: - - * LICENSE: - * xds/third_party/udpa/LICENSE (Apache License 2.0) - * HOMEPAGE: - * https://github.com/cncf/udpa - * LOCATION_IN_GRPC: - * xds/third_party/udpa - - -com.fasterxml.jackson.dataformat:jackson-dataformat-xml -==================== - -# Jackson JSON processor - -Jackson is a high-performance, Free/Open Source JSON processing library. -It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has -been in development since 2007. -It is currently developed by a community of developers, as well as supported -commercially by FasterXML.com. - -## Licensing - -Jackson core and extension components may licensed under different licenses. -To find the details that apply to this artifact see the accompanying LICENSE file. -For more information, including possible other licensing options, contact -FasterXML.com (http://fasterxml.com). - -## Credits - -A list of contributors may be found from CREDITS file, which is included -in some artifacts (usually source distributions); but is always available -from the source code management (SCM) system project uses. - - -com.jolbox:bonecp -==================== - - ========================================================================= - == NOTICE file corresponding to the section 4 d of == - == the Apache License, Version 2.0, == - == in this case for the BoneCP (Java connection pool). == - ========================================================================= - - BoneCP - Copyright 2010 Wallace Wadge - - This product includes software developed by - Wallace Wadge (http://jolbox.com/). - -org.codehaus.jackson:jackson-mapper-asl -==================== - -This product currently only contains code developed by authors -of specific components, as identified by the source code files; -if such notes are missing files have been created by -Tatu Saloranta. - -For additional credits (generally to people who reported problems) -see CREDITS file. - - -com.google.inject:guice -==================== - - -Google Guice - Core Library -Copyright 2006-2015 Google, Inc. - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - - - - -com.google.inject.extensions:guice-assistedinject -==================== - - -Google Guice - Extensions - AssistedInject -Copyright 2006-2015 Google, Inc. - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - - - - -org.codehaus.jackson:jackson-xc -==================== - -This product currently only contains code developed by authors -of specific components, as identified by the source code files; -if such notes are missing files have been created by -Tatu Saloranta. - -For additional credits (generally to people who reported problems) -see CREDITS file. - - -org.codehaus.jackson:jackson-jaxrs -==================== - -This product currently only contains code developed by authors -of specific components, as identified by the source code files; -if such notes are missing files have been created by -Tatu Saloranta. - -For additional credits (generally to people who reported problems) -see CREDITS file. - - -com.google.inject.extensions:guice-servlet -==================== - - -Google Guice - Extensions - Servlet -Copyright 2006-2015 Google, Inc. - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - - - - -org.codehaus.jackson:jackson-core-asl -==================== - -This product currently only contains code developed by authors -of specific components, as identified by the source code files; -if such notes are missing files have been created by -Tatu Saloranta. - -For additional credits (generally to people who reported problems) -see CREDITS file. - - -org.bouncycastle:bcprov-jdk15on -==================== - -Copyright (c) 2000 - 2019 The Legion of the Bouncy Castle Inc. (https://www.bouncycastle.org) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -Source: https://bouncycastle.org/license.html - -log4j:log4j -==================== - -Apache log4j -Copyright 2007 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - -com.google.inject.extensions:guice-multibindings -==================== - - -Google Guice - Extensions - MultiBindings -Copyright 2006-2015 Google, Inc. - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - - -==================== -ratis-thirdparty-misc is a shaded dependency which includes additional 3rd party dependencies in shaded form. -For the detailed list of the dependencies and the associated NOTICE file see licenses/NOTICE-ratis-thirdparty-misc.txt. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-angular-nvd3.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-angular-nvd3.txt deleted file mode 100644 index d96c6fc85f9..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-angular-nvd3.txt +++ /dev/null @@ -1,16 +0,0 @@ -The MIT License (MIT) -Copyright (c) 2014 Konstantin Skipor - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software -and associated documentation files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, -and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT -LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-angular.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-angular.txt deleted file mode 100644 index 6f3880f4c29..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-angular.txt +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License - -Copyright (c) 2010-2017 Google, Inc. http://angularjs.org - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.code.findbugs-jsr305.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.code.findbugs-jsr305.txt deleted file mode 100644 index 84247609255..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.code.findbugs-jsr305.txt +++ /dev/null @@ -1,8 +0,0 @@ -The JSR-305 reference implementation (lib/jsr305.jar) is -distributed under the terms of the New BSD license: - - http://www.opensource.org/licenses/bsd-license.php - -See the JSR-305 home page for more information: - - http://code.google.com/p/jsr-305/ diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.re2j-re2j.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.re2j-re2j.txt deleted file mode 100644 index b620ae68fe3..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.re2j-re2j.txt +++ /dev/null @@ -1,32 +0,0 @@ -This is a work derived from Russ Cox's RE2 in Go, whose license -http://golang.org/LICENSE is as follows: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - * Neither the name of Google Inc. nor the names of its contributors - may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.jcraft-jsch.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.jcraft-jsch.txt deleted file mode 100644 index edd491dfbfb..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.jcraft-jsch.txt +++ /dev/null @@ -1,30 +0,0 @@ -JSch 0.0.* was released under the GNU LGPL license. Later, we have switched -over to a BSD-style license. - ------------------------------------------------------------------------------- -Copyright (c) 2002-2015 Atsuhiko Yamanaka, JCraft,Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the distribution. - - 3. The names of the authors may not be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JCRAFT, -INC. OR ANY CONTRIBUTORS TO THIS SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, -OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, -EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.sun.jersey.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.sun.jersey.txt deleted file mode 100644 index c1eec74bf46..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.sun.jersey.txt +++ /dev/null @@ -1,274 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)Version 1.1 - -1. Definitions. - - 1.1. "Contributor" means each individual or entity that creates or contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. - - 1.4. "Executable" means the Covered Software in any form other than Source Code. - - 1.5. "Initial Developer" means the individual or entity that first makes Original Software available under this License. - - 1.6. "Larger Work" means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable form of any of the following: - - A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; - - B. Any new file that contains any part of the Original Software or previous Modification; or - - C. Any new file that is contributed or otherwise made available under the terms of this License. - - 1.10. "Original Software" means the Source Code and Executable form of computer software code that is originally released under this License. - - 1.11. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients' rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient's rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. - -4. Versions of the License. - - 4.1. New Versions. - - Oracle is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as "Participant") alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. - - 6.3. If You assert a patent infringement claim against Participant alleging that the Participant Software directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by such Participant under Sections 2.1 or 2.2 shall be taken into account in determining the amount or value of any payment or license. - - 6.4. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer software" (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction's conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. - ----------- -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) -The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California. - - - - -The GNU General Public License (GPL) Version 2, June 1991 - - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. - -Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. - -Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and modification follow. - - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. - - c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. - -3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. - -If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. - -5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. - -9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - - Copyright (C) - - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - - This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. - - signature of Ty Coon, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. - - -"CLASSPATH" EXCEPTION TO THE GPL VERSION 2 - -Certain source files distributed by Oracle are subject to the following clarification and special exception to the GPL Version 2, but only where Oracle has expressly included in the particular source file's header the words "Oracle designates this particular file as subject to the "Classpath" exception as provided by Oracle in the License file that accompanied this code." - -Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination. - -As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so. If you do not wish to do so, delete this exception statement from your version. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.sun.xml.bind.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.sun.xml.bind.txt deleted file mode 100644 index b1c74f95ede..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.sun.xml.bind.txt +++ /dev/null @@ -1,759 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 - -1. Definitions. - - 1.1. "Contributor" means each individual or entity that creates or - contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original - Software, prior Modifications used by a Contributor (if any), and - the Modifications made by that particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or (b) - Modifications, or (c) the combination of files containing Original - Software with files containing Modifications, in each case including - portions thereof. - - 1.4. "Executable" means the Covered Software in any form other than - Source Code. - - 1.5. "Initial Developer" means the individual or entity that first - makes Original Software available under this License. - - 1.6. "Larger Work" means a work which combines Covered Software or - portions thereof with code not governed by the terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the maximum - extent possible, whether at the time of the initial grant or - subsequently acquired, any and all of the rights conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable form of - any of the following: - - A. Any file that results from an addition to, deletion from or - modification of the contents of a file containing Original Software - or previous Modifications; - - B. Any new file that contains any part of the Original Software or - previous Modification; or - - C. Any new file that is contributed or otherwise made available - under the terms of this License. - - 1.10. "Original Software" means the Source Code and Executable form - of computer software code that is originally released under this - License. - - 1.11. "Patent Claims" means any patent claim(s), now owned or - hereafter acquired, including without limitation, method, process, - and apparatus claims, in any patent Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer software - code in which modifications are made and (b) associated - documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal entity - exercising rights under, and complying with all of the terms of, - this License. For legal entities, "You" includes any entity which - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject - to third party intellectual property claims, the Initial Developer - hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Initial Developer, to use, reproduce, - modify, display, perform, sublicense and distribute the Original - Software (or portions thereof), with or without Modifications, - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of - Original Software, to make, have made, use, practice, sell, and - offer for sale, and/or otherwise dispose of the Original Software - (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on - the date Initial Developer first distributes or otherwise makes the - Original Software available to a third party under the terms of this - License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is - granted: (1) for code that You delete from the Original Software, or - (2) for infringements caused by: (i) the modification of the - Original Software, or (ii) the combination of the Original Software - with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject - to third party intellectual property claims, each Contributor hereby - grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Contributor to use, reproduce, modify, - display, perform, sublicense and distribute the Modifications - created by such Contributor (or portions thereof), either on an - unmodified basis, with other Modifications, as Covered Software - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling - of Modifications made by that Contributor either alone and/or in - combination with its Contributor Version (or portions of such - combination), to make, use, sell, offer for sale, have made, and/or - otherwise dispose of: (1) Modifications made by that Contributor (or - portions thereof); and (2) the combination of Modifications made by - that Contributor with its Contributor Version (or portions of such - combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective - on the date Contributor first distributes or otherwise makes the - Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is - granted: (1) for any code that Contributor has deleted from the - Contributor Version; (2) for infringements caused by: (i) third - party modifications of Contributor Version, or (ii) the combination - of Modifications made by that Contributor with other software - (except as part of the Contributor Version) or other devices; or (3) - under Patent Claims infringed by Covered Software in the absence of - Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make available - in Executable form must also be made available in Source Code form - and that Source Code form must be distributed only under the terms - of this License. You must include a copy of this License with every - copy of the Source Code form of the Covered Software You distribute - or otherwise make available. You must inform recipients of any such - Covered Software in Executable form as to how they can obtain such - Covered Software in Source Code form in a reasonable manner on or - through a medium customarily used for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You contribute are - governed by the terms of this License. You represent that You - believe Your Modifications are Your original creation(s) and/or You - have sufficient rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications that - identifies You as the Contributor of the Modification. You may not - remove or alter any copyright, patent or trademark notices contained - within the Covered Software, or any notices of licensing or any - descriptive text giving attribution to any Contributor or the - Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered Software in - Source Code form that alters or restricts the applicable version of - this License or the recipients' rights hereunder. You may choose to - offer, and to charge a fee for, warranty, support, indemnity or - liability obligations to one or more recipients of Covered Software. - However, you may do so only on Your own behalf, and not on behalf of - the Initial Developer or any Contributor. You must make it - absolutely clear that any such warranty, support, indemnity or - liability obligation is offered by You alone, and You hereby agree - to indemnify the Initial Developer and every Contributor for any - liability incurred by the Initial Developer or such Contributor as a - result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered Software under - the terms of this License or under the terms of a license of Your - choice, which may contain terms different from this License, - provided that You are in compliance with the terms of this License - and that the license for the Executable form does not attempt to - limit or alter the recipient's rights in the Source Code form from - the rights set forth in this License. If You distribute the Covered - Software in Executable form under a different license, You must make - it absolutely clear that any terms which differ from this License - are offered by You alone, not by the Initial Developer or - Contributor. You hereby agree to indemnify the Initial Developer and - every Contributor for any liability incurred by the Initial - Developer or such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software with - other code not governed by the terms of this License and distribute - the Larger Work as a single product. In such a case, You must make - sure the requirements of this License are fulfilled for the Covered - Software. - -4. Versions of the License. - - 4.1. New Versions. - - Oracle is the initial license steward and may publish revised and/or - new versions of this License from time to time. Each version will be - given a distinguishing version number. Except as provided in Section - 4.3, no one other than the license steward has the right to modify - this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise make the - Covered Software available under the terms of the version of the - License under which You originally received the Covered Software. If - the Initial Developer includes a notice in the Original Software - prohibiting it from being distributed or otherwise made available - under any subsequent version of the License, You must distribute and - make the Covered Software available under the terms of the version - of the License under which You originally received the Covered - Software. Otherwise, You may also choose to use, distribute or - otherwise make the Covered Software available under the terms of any - subsequent version of the License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a new - license for Your Original Software, You may create and use a - modified version of this License if You: (a) rename the license and - remove any references to the name of the license steward (except to - note that the license differs from this License); and (b) otherwise - make it clear that the license contains terms which differ from this - License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, - WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE - IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR - NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF - THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE - DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY - OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, - REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN - ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS - AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate - automatically if You fail to comply with terms herein and fail to - cure such breach within 30 days of becoming aware of the breach. - Provisions which, by their nature, must remain in effect beyond the - termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or a - Contributor (the Initial Developer or Contributor against whom You - assert such claim is referred to as "Participant") alleging that the - Participant Software (meaning the Contributor Version where the - Participant is a Contributor or the Original Software where the - Participant is the Initial Developer) directly or indirectly - infringes any patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial Developer (if the - Initial Developer is not the Participant) and all Contributors under - Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice - from Participant terminate prospectively and automatically at the - expiration of such 60 day notice period, unless if within such 60 - day period You withdraw Your claim with respect to the Participant - Software against such Participant either unilaterally or pursuant to - a written agreement with Participant. - - 6.3. If You assert a patent infringement claim against Participant - alleging that the Participant Software directly or indirectly - infringes any patent where such claim is resolved (such as by - license or settlement) prior to the initiation of patent - infringement litigation, then the reasonable value of the licenses - granted by such Participant under Sections 2.1 or 2.2 shall be taken - into account in determining the amount or value of any payment or - license. - - 6.4. In the event of termination under Sections 6.1 or 6.2 above, - all end user licenses that have been validly granted by You or any - distributor hereunder prior to termination (excluding licenses - granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE - TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER - FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR - LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE - POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT - APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH - PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH - LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR - LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION - AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is defined - in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer - software" (as that term is defined at 48 C.F.R. § - 252.227-7014(a)(1)) and "commercial computer software documentation" - as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent - with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 - (June 1995), all U.S. Government End Users acquire Covered Software - with only those rights set forth herein. This U.S. Government Rights - clause is in lieu of, and supersedes, any other FAR, DFAR, or other - clause or provision that addresses Government rights in computer - software under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. This License shall be governed by - the law of the jurisdiction specified in a notice contained within - the Original Software (except to the extent applicable law, if any, - provides otherwise), excluding such jurisdiction's conflict-of-law - provisions. Any litigation relating to this License shall be subject - to the jurisdiction of the courts located in the jurisdiction and - venue specified in a notice contained within the Original Software, - with the losing party responsible for costs, including, without - limitation, court costs and reasonable attorneys' fees and expenses. - The application of the United Nations Convention on Contracts for - the International Sale of Goods is expressly excluded. Any law or - regulation which provides that the language of a contract shall be - construed against the drafter shall not apply to this License. You - agree that You alone are responsible for compliance with the United - States export administration regulations (and the export control - laws and regulation of any other countries) when You use, distribute - or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or indirectly, - out of its utilization of rights under this License and You agree to - work with Initial Developer and Contributors to distribute such - responsibility on an equitable basis. Nothing herein is intended or - shall be deemed to constitute any admission of liability. - ------------------------------------------------------------------------- - -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION -LICENSE (CDDL) - -The code released under the CDDL shall be governed by the laws of the -State of California (excluding conflict-of-law provisions). Any -litigation relating to this License shall be subject to the jurisdiction -of the Federal Courts of the Northern District of California and the -state courts of the State of California, with venue lying in Santa Clara -County, California. - - - - The GNU General Public License (GPL) Version 2, June 1991 - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. -51 Franklin Street, Fifth Floor -Boston, MA 02110-1335 -USA - -Everyone is permitted to copy and distribute verbatim copies -of this license document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to -share and change it. By contrast, the GNU General Public License is -intended to guarantee your freedom to share and change free software--to -make sure the software is free for all its users. This General Public -License applies to most of the Free Software Foundation's software and -to any other program whose authors commit to using it. (Some other Free -Software Foundation software is covered by the GNU Library General -Public License instead.) You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. -Our General Public Licenses are designed to make sure that you have the -freedom to distribute copies of free software (and charge for this -service if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone -to deny you these rights or to ask you to surrender the rights. These -restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis -or for a fee, you must give the recipients all the rights that you have. -You must make sure that they, too, receive or can get the source code. -And you must show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - -Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - -Finally, any free program is threatened constantly by software patents. -We wish to avoid the danger that redistributors of a free program will -individually obtain patent licenses, in effect making the program -proprietary. To prevent this, we have made it clear that any patent must -be licensed for everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and -modification follow. - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a -notice placed by the copyright holder saying it may be distributed under -the terms of this General Public License. The "Program", below, refers -to any such program or work, and a "work based on the Program" means -either the Program or any derivative work under copyright law: that is -to say, a work containing the Program or a portion of it, either -verbatim or with modifications and/or translated into another language. -(Hereinafter, translation is included without limitation in the term -"modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of running -the Program is not restricted, and the output from the Program is -covered only if its contents constitute a work based on the Program -(independent of having been made by running the Program). Whether that -is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source -code as you receive it, in any medium, provided that you conspicuously -and appropriately publish on each copy an appropriate copyright notice -and disclaimer of warranty; keep intact all the notices that refer to -this License and to the absence of any warranty; and give any other -recipients of the Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of -it, thus forming a work based on the Program, and copy and distribute -such modifications or work under the terms of Section 1 above, provided -that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any part - thereof, to be licensed as a whole at no charge to all third parties - under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a notice - that there is no warranty (or else, saying that you provide a - warranty) and that users may redistribute the program under these - conditions, and telling the user how to view a copy of this License. - (Exception: if the Program itself is interactive but does not - normally print such an announcement, your work based on the Program - is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, and -can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based on -the Program, the distribution of the whole must be on the terms of this -License, whose permissions for other licensees extend to the entire -whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of a -storage or distribution medium does not bring the other work under the -scope of this License. - -3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections 1 - and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your cost - of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to - distribute corresponding source code. (This alternative is allowed - only for noncommercial distribution and only if you received the - program in object code or executable form with such an offer, in - accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source code -means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to control -compilation and installation of the executable. However, as a special -exception, the source code distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies the -executable. - -If distribution of executable or object code is made by offering access -to copy from a designated place, then offering equivalent access to copy -the source code from the same place counts as distribution of the source -code, even though third parties are not compelled to copy the source -along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt otherwise -to copy, modify, sublicense or distribute the Program is void, and will -automatically terminate your rights under this License. However, parties -who have received copies, or rights, from you under this License will -not have their licenses terminated so long as such parties remain in -full compliance. - -5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and all -its terms and conditions for copying, distributing or modifying the -Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further restrictions -on the recipients' exercise of the rights granted herein. You are not -responsible for enforcing compliance by third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot distribute -so as to satisfy simultaneously your obligations under this License and -any other pertinent obligations, then as a consequence you may not -distribute the Program at all. For example, if a patent license would -not permit royalty-free redistribution of the Program by all those who -receive copies directly or indirectly through you, then the only way you -could satisfy both it and this License would be to refrain entirely from -distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is implemented -by public license practices. Many people have made generous -contributions to the wide range of software distributed through that -system in reliance on consistent application of that system; it is up to -the author/donor to decide if he or she is willing to distribute -software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be -a consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License may -add an explicit geographical distribution limitation excluding those -countries, so that distribution is permitted only in or among countries -not thus excluded. In such case, this License incorporates the -limitation as if written in the body of this License. - -9. The Free Software Foundation may publish revised and/or new -versions of the General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Program does not specify a version -number of this License, you may choose any version ever published by the -Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the -author to ask for permission. For software which is copyrighted by the -Free Software Foundation, write to the Free Software Foundation; we -sometimes make exceptions for this. Our decision will be guided by the -two goals of preserving the free status of all derivatives of our free -software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, -EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE -ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH -YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL -NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR -DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL -DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM -(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED -INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF -THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR -OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to -attach them to the start of each source file to most effectively convey -the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type - `show w'. This is free software, and you are welcome to redistribute - it under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the -appropriate parts of the General Public License. Of course, the commands -you use may be called something other than `show w' and `show c'; they -could even be mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - program `Gnomovision' (which makes passes at compilers) written by - James Hacker. - - signature of Ty Coon, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications -with the library. If this is what you want to do, use the GNU Library -General Public License instead of this License. - -# - -Certain source files distributed by Oracle America, Inc. and/or its -affiliates are subject to the following clarification and special -exception to the GPLv2, based on the GNU Project exception for its -Classpath libraries, known as the GNU Classpath Exception, but only -where Oracle has expressly included in the particular source file's -header the words "Oracle designates this particular file as subject to -the "Classpath" exception as provided by Oracle in the LICENSE file -that accompanied this code." - -You should also note that Oracle includes multiple, independent -programs in this software package. Some of those programs are provided -under licenses deemed incompatible with the GPLv2 by the Free Software -Foundation and others. For example, the package includes programs -licensed under the Apache License, Version 2.0. Such programs are -licensed to you under their original licenses. - -Oracle facilitates your further distribution of this package by adding -the Classpath Exception to the necessary parts of its GPLv2 code, which -permits you to use that code in combination with other independent -modules not licensed under the GPLv2. However, note that this would -not permit you to commingle code under an incompatible license with -Oracle's GPLv2 licensed code by, for example, cutting and pasting such -code into a file also containing Oracle's GPLv2 licensed code and then -distributing the result. Additionally, if you were to remove the -Classpath Exception from any of the files to which it applies and -distribute the result, you would likely be required to license some or -all of the other code in that distribution under the GPLv2 as well, and -since the GPLv2 is incompatible with the license terms of some items -included in the distribution by Oracle, removing the Classpath -Exception could therefore effectively compromise your ability to -further distribute the package. - -Proceed with caution and we recommend that you obtain the advice of a -lawyer skilled in open source matters before removing the Classpath -Exception or making modifications to this package which may -subsequently be redistributed and/or involve the use of third party -software. - -CLASSPATH EXCEPTION -Linking this library statically or dynamically with other modules is -making a combined work based on this library. Thus, the terms and -conditions of the GNU General Public License version 2 cover the whole -combination. - -As a special exception, the copyright holders of this library give you -permission to link this library with independent modules to produce an -executable, regardless of the license terms of these independent -modules, and to copy and distribute the resulting executable under -terms of your choice, provided that you also meet, for each linked -independent module, the terms and conditions of the license of that -module. An independent module is a module which is not derived from or -based on this library. If you modify this library, you may extend this -exception to your version of the library, but you are not obligated to -do so. If you do not wish to do so, delete this exception statement -from your version. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.thoughtworks.paranamer-paranamer.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.thoughtworks.paranamer-paranamer.txt deleted file mode 100644 index 9eab8791863..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.thoughtworks.paranamer-paranamer.txt +++ /dev/null @@ -1,29 +0,0 @@ -[ ParaNamer used to be 'Pubic Domain', but since it includes a small piece of ASM it is now the same license as that: BSD ] - - Portions copyright (c) 2006-2018 Paul Hammant & ThoughtWorks Inc - Portions copyright (c) 2000-2007 INRIA, France Telecom - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-d3.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-d3.txt deleted file mode 100644 index c71e3f254c0..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-d3.txt +++ /dev/null @@ -1,26 +0,0 @@ -Copyright (c) 2010-2015, Michael Bostock -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* The name Michael Bostock may not be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, -EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-dnsjava-dnsjava.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-dnsjava-dnsjava.txt deleted file mode 100644 index 70bae6b99d7..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-dnsjava-dnsjava.txt +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 1998-2011, Brian Wellington. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jakarta.annotation-jakarta.annotation-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jakarta.annotation-jakarta.annotation-api.txt deleted file mode 100644 index e55f34467e2..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jakarta.annotation-jakarta.annotation-api.txt +++ /dev/null @@ -1,277 +0,0 @@ -Eclipse Public License - v 2.0 - - THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE - PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION - OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -1. DEFINITIONS - -"Contribution" means: - - a) in the case of the initial Contributor, the initial content - Distributed under this Agreement, and - - b) in the case of each subsequent Contributor: - i) changes to the Program, and - ii) additions to the Program; - where such changes and/or additions to the Program originate from - and are Distributed by that particular Contributor. A Contribution - "originates" from a Contributor if it was added to the Program by - such Contributor itself or anyone acting on such Contributor's behalf. - Contributions do not include changes or additions to the Program that - are not Modified Works. - -"Contributor" means any person or entity that Distributes the Program. - -"Licensed Patents" mean patent claims licensable by a Contributor which -are necessarily infringed by the use or sale of its Contribution alone -or when combined with the Program. - -"Program" means the Contributions Distributed in accordance with this -Agreement. - -"Recipient" means anyone who receives the Program under this Agreement -or any Secondary License (as applicable), including Contributors. - -"Derivative Works" shall mean any work, whether in Source Code or other -form, that is based on (or derived from) the Program and for which the -editorial revisions, annotations, elaborations, or other modifications -represent, as a whole, an original work of authorship. - -"Modified Works" shall mean any work in Source Code or other form that -results from an addition to, deletion from, or modification of the -contents of the Program, including, for purposes of clarity any new file -in Source Code form that contains any contents of the Program. Modified -Works shall not include works that contain only declarations, -interfaces, types, classes, structures, or files of the Program solely -in each case in order to link to, bind by name, or subclass the Program -or Modified Works thereof. - -"Distribute" means the acts of a) distributing or b) making available -in any manner that enables the transfer of a copy. - -"Source Code" means the form of a Program preferred for making -modifications, including but not limited to software source code, -documentation source, and configuration files. - -"Secondary License" means either the GNU General Public License, -Version 2.0, or any later versions of that license, including any -exceptions or additional permissions as identified by the initial -Contributor. - -2. GRANT OF RIGHTS - - a) Subject to the terms of this Agreement, each Contributor hereby - grants Recipient a non-exclusive, worldwide, royalty-free copyright - license to reproduce, prepare Derivative Works of, publicly display, - publicly perform, Distribute and sublicense the Contribution of such - Contributor, if any, and such Derivative Works. - - b) Subject to the terms of this Agreement, each Contributor hereby - grants Recipient a non-exclusive, worldwide, royalty-free patent - license under Licensed Patents to make, use, sell, offer to sell, - import and otherwise transfer the Contribution of such Contributor, - if any, in Source Code or other form. This patent license shall - apply to the combination of the Contribution and the Program if, at - the time the Contribution is added by the Contributor, such addition - of the Contribution causes such combination to be covered by the - Licensed Patents. The patent license shall not apply to any other - combinations which include the Contribution. No hardware per se is - licensed hereunder. - - c) Recipient understands that although each Contributor grants the - licenses to its Contributions set forth herein, no assurances are - provided by any Contributor that the Program does not infringe the - patent or other intellectual property rights of any other entity. - Each Contributor disclaims any liability to Recipient for claims - brought by any other entity based on infringement of intellectual - property rights or otherwise. As a condition to exercising the - rights and licenses granted hereunder, each Recipient hereby - assumes sole responsibility to secure any other intellectual - property rights needed, if any. For example, if a third party - patent license is required to allow Recipient to Distribute the - Program, it is Recipient's responsibility to acquire that license - before distributing the Program. - - d) Each Contributor represents that to its knowledge it has - sufficient copyright rights in its Contribution, if any, to grant - the copyright license set forth in this Agreement. - - e) Notwithstanding the terms of any Secondary License, no - Contributor makes additional grants to any Recipient (other than - those set forth in this Agreement) as a result of such Recipient's - receipt of the Program under the terms of a Secondary License - (if permitted under the terms of Section 3). - -3. REQUIREMENTS - -3.1 If a Contributor Distributes the Program in any form, then: - - a) the Program must also be made available as Source Code, in - accordance with section 3.2, and the Contributor must accompany - the Program with a statement that the Source Code for the Program - is available under this Agreement, and informs Recipients how to - obtain it in a reasonable manner on or through a medium customarily - used for software exchange; and - - b) the Contributor may Distribute the Program under a license - different than this Agreement, provided that such license: - i) effectively disclaims on behalf of all other Contributors all - warranties and conditions, express and implied, including - warranties or conditions of title and non-infringement, and - implied warranties or conditions of merchantability and fitness - for a particular purpose; - - ii) effectively excludes on behalf of all other Contributors all - liability for damages, including direct, indirect, special, - incidental and consequential damages, such as lost profits; - - iii) does not attempt to limit or alter the recipients' rights - in the Source Code under section 3.2; and - - iv) requires any subsequent distribution of the Program by any - party to be under a license that satisfies the requirements - of this section 3. - -3.2 When the Program is Distributed as Source Code: - - a) it must be made available under this Agreement, or if the - Program (i) is combined with other material in a separate file or - files made available under a Secondary License, and (ii) the initial - Contributor attached to the Source Code the notice described in - Exhibit A of this Agreement, then the Program may be made available - under the terms of such Secondary Licenses, and - - b) a copy of this Agreement must be included with each copy of - the Program. - -3.3 Contributors may not remove or alter any copyright, patent, -trademark, attribution notices, disclaimers of warranty, or limitations -of liability ("notices") contained within the Program from any copy of -the Program which they Distribute, provided that Contributors may add -their own appropriate notices. - -4. COMMERCIAL DISTRIBUTION - -Commercial distributors of software may accept certain responsibilities -with respect to end users, business partners and the like. While this -license is intended to facilitate the commercial use of the Program, -the Contributor who includes the Program in a commercial product -offering should do so in a manner which does not create potential -liability for other Contributors. Therefore, if a Contributor includes -the Program in a commercial product offering, such Contributor -("Commercial Contributor") hereby agrees to defend and indemnify every -other Contributor ("Indemnified Contributor") against any losses, -damages and costs (collectively "Losses") arising from claims, lawsuits -and other legal actions brought by a third party against the Indemnified -Contributor to the extent caused by the acts or omissions of such -Commercial Contributor in connection with its distribution of the Program -in a commercial product offering. The obligations in this section do not -apply to any claims or Losses relating to any actual or alleged -intellectual property infringement. In order to qualify, an Indemnified -Contributor must: a) promptly notify the Commercial Contributor in -writing of such claim, and b) allow the Commercial Contributor to control, -and cooperate with the Commercial Contributor in, the defense and any -related settlement negotiations. The Indemnified Contributor may -participate in any such claim at its own expense. - -For example, a Contributor might include the Program in a commercial -product offering, Product X. That Contributor is then a Commercial -Contributor. If that Commercial Contributor then makes performance -claims, or offers warranties related to Product X, those performance -claims and warranties are such Commercial Contributor's responsibility -alone. Under this section, the Commercial Contributor would have to -defend claims against the other Contributors related to those performance -claims and warranties, and if a court requires any other Contributor to -pay any damages as a result, the Commercial Contributor must pay -those damages. - -5. NO WARRANTY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT -PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" -BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR -IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF -TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR -PURPOSE. Each Recipient is solely responsible for determining the -appropriateness of using and distributing the Program and assumes all -risks associated with its exercise of rights under this Agreement, -including but not limited to the risks and costs of program errors, -compliance with applicable laws, damage to or loss of data, programs -or equipment, and unavailability or interruption of operations. - -6. DISCLAIMER OF LIABILITY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT -PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS -SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST -PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE -EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - -7. GENERAL - -If any provision of this Agreement is invalid or unenforceable under -applicable law, it shall not affect the validity or enforceability of -the remainder of the terms of this Agreement, and without further -action by the parties hereto, such provision shall be reformed to the -minimum extent necessary to make such provision valid and enforceable. - -If Recipient institutes patent litigation against any entity -(including a cross-claim or counterclaim in a lawsuit) alleging that the -Program itself (excluding combinations of the Program with other software -or hardware) infringes such Recipient's patent(s), then such Recipient's -rights granted under Section 2(b) shall terminate as of the date such -litigation is filed. - -All Recipient's rights under this Agreement shall terminate if it -fails to comply with any of the material terms or conditions of this -Agreement and does not cure such failure in a reasonable period of -time after becoming aware of such noncompliance. If all Recipient's -rights under this Agreement terminate, Recipient agrees to cease use -and distribution of the Program as soon as reasonably practicable. -However, Recipient's obligations under this Agreement and any licenses -granted by Recipient relating to the Program shall continue and survive. - -Everyone is permitted to copy and distribute copies of this Agreement, -but in order to avoid inconsistency the Agreement is copyrighted and -may only be modified in the following manner. The Agreement Steward -reserves the right to publish new versions (including revisions) of -this Agreement from time to time. No one other than the Agreement -Steward has the right to modify this Agreement. The Eclipse Foundation -is the initial Agreement Steward. The Eclipse Foundation may assign the -responsibility to serve as the Agreement Steward to a suitable separate -entity. Each new version of the Agreement will be given a distinguishing -version number. The Program (including Contributions) may always be -Distributed subject to the version of the Agreement under which it was -received. In addition, after a new version of the Agreement is published, -Contributor may elect to Distribute the Program (including its -Contributions) under the new version. - -Except as expressly stated in Sections 2(a) and 2(b) above, Recipient -receives no rights or licenses to the intellectual property of any -Contributor under this Agreement, whether expressly, by implication, -estoppel or otherwise. All rights in the Program not expressly granted -under this Agreement are reserved. Nothing in this Agreement is intended -to be enforceable by any entity that is not a Contributor or Recipient. -No third-party beneficiary rights are created under this Agreement. - -Exhibit A - Form of Secondary Licenses Notice - -"This Source Code may also be made available under the following -Secondary Licenses when the conditions for such availability set forth -in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), -version(s), and exceptions or additional permissions here}." - - Simply including a copy of this Agreement, including this Exhibit A - is not sufficient to license the Source Code under Secondary Licenses. - - If it is not possible or desirable to put the notice in a particular - file, then You may include the notice in a location (such as a LICENSE - file in a relevant directory) where a recipient would be likely to - look for such a notice. - - You may add additional accurate notices of copyright ownership. \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.activation-activation.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.activation-activation.txt deleted file mode 100644 index 9be507838d7..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.activation-activation.txt +++ /dev/null @@ -1,134 +0,0 @@ - -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 - -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 - -1. Definitions. - -1.1. "Contributor" means each individual or entity that creates or contributes to the creation of Modifications. - -1.2. "Contributor Version" means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. - -1.3. "Covered Software" means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. - -1.4. "Executable" means the Covered Software in any form other than Source Code. - -1.5. "Initial Developer" means the individual or entity that first makes Original Software available under this License. - -1.6. "Larger Work" means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. - -1.7. "License" means this document. - -1.8. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. - -1.9. "Modifications" means the Source Code and Executable form of any of the following: - -A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; - -B. Any new file that contains any part of the Original Software or previous Modification; or - -C. Any new file that is contributed or otherwise made available under the terms of this License. - -1.10. "Original Software" means the Source Code and Executable form of computer software code that is originally released under this License. - -1.11. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. - -1.12. "Source Code" means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. - -1.13. "You" (or "Your") means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -2. License Grants. - -2.1. The Initial Developer Grant. - -Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and - -(b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). - -(c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. - -(d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices. - -2.2. Contributor Grant. - -Conditioned upon Your compliance with Section 3.1 below and -subject to third party intellectual property claims, each -Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and - -(b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). - -(c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. -(d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. - -3. Distribution Obligations. - -3.1. Availability of Source Code. - -Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. - -3.2. Modifications. - -The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. - -3.3. Required Notices. - -You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. - -3.4. Application of Additional Terms. - -You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients' rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. - -3.5. Distribution of Executable Versions. - -You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient's rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. - -3.6. Larger Works. - -You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. - -4. Versions of the License. - -4.1. New Versions. - -Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. - -4.2. Effect of New Versions. - -You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. - -4.3. Modified Versions. - -When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License. - -5. DISCLAIMER OF WARRANTY. - -COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - -6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. - -6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as "Participant") alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. - -6.3. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - -UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - -The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer software" (as that term is defined at 48 C.F.R. ¤ 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. - -9. MISCELLANEOUS. - -This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction's conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - -As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.annotation-javax.annotation-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.annotation-javax.annotation-api.txt deleted file mode 100644 index a0ccc93564c..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.annotation-javax.annotation-api.txt +++ /dev/null @@ -1,263 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 - -1. Definitions. - - 1.1. Contributor. means each individual or entity that creates or contributes to the creation of Modifications. - - 1.2. Contributor Version. means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. - - 1.3. Covered Software. means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. - - 1.4. Executable. means the Covered Software in any form other than Source Code. - - 1.5. Initial Developer. means the individual or entity that first makes Original Software available under this License. - - 1.6. Larger Work. means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. - - 1.7. License. means this document. - - 1.8. Licensable. means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. - - 1.9. Modifications. means the Source Code and Executable form of any of the following: - - A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; - - B. Any new file that contains any part of the Original Software or previous Modification; or - - C. Any new file that is contributed or otherwise made available under the terms of this License. - - 1.10. Original Software. means the Source Code and Executable form of computer software code that is originally released under this License. - - 1.11. Patent Claims. means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. - - 1.12. Source Code. means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. - - 1.13. You. (or .Your.) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, .You. includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, .control. means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. - - 3.2. Modifications. - The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. - - 3.4. Application of Additional Terms. - You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients. rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient.s rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. - -4. Versions of the License. - - 4.1. New Versions. - Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. - - 4.2. Effect of New Versions. - You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. - - 4.3. Modified Versions. - When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN .AS IS. BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as .Participant.) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. - - 6.3. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY.S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a .commercial item,. as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of .commercial computer software. (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and .commercial computer software documentation. as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction.s conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys. fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. - - NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) - - The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California. - - -The GNU General Public License (GPL) Version 2, June 1991 - - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. - -Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. - -Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and modification follow. - - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. - - c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. - -3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. - -If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. - -5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. - -9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - - Copyright (C) - - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - - This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. - - signature of Ty Coon, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. - - -"CLASSPATH" EXCEPTION TO THE GPL VERSION 2 - -Certain source files distributed by Sun Microsystems, Inc. are subject to the following clarification and special exception to the GPL Version 2, but only where Sun has expressly included in the particular source file's header the words - -"Sun designates this particular file as subject to the "Classpath" exception as provided by Sun in the License file that accompanied this code." - -Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination. - -As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module.? An independent module is a module which is not derived from or based on this library.? If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so.? If you do not wish to do so, delete this exception statement from your version. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.el-javax.el-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.el-javax.el-api.txt deleted file mode 100644 index a0ccc93564c..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.el-javax.el-api.txt +++ /dev/null @@ -1,263 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 - -1. Definitions. - - 1.1. Contributor. means each individual or entity that creates or contributes to the creation of Modifications. - - 1.2. Contributor Version. means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. - - 1.3. Covered Software. means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. - - 1.4. Executable. means the Covered Software in any form other than Source Code. - - 1.5. Initial Developer. means the individual or entity that first makes Original Software available under this License. - - 1.6. Larger Work. means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. - - 1.7. License. means this document. - - 1.8. Licensable. means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. - - 1.9. Modifications. means the Source Code and Executable form of any of the following: - - A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; - - B. Any new file that contains any part of the Original Software or previous Modification; or - - C. Any new file that is contributed or otherwise made available under the terms of this License. - - 1.10. Original Software. means the Source Code and Executable form of computer software code that is originally released under this License. - - 1.11. Patent Claims. means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. - - 1.12. Source Code. means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. - - 1.13. You. (or .Your.) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, .You. includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, .control. means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. - - 3.2. Modifications. - The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. - - 3.4. Application of Additional Terms. - You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients. rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient.s rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. - -4. Versions of the License. - - 4.1. New Versions. - Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. - - 4.2. Effect of New Versions. - You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. - - 4.3. Modified Versions. - When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN .AS IS. BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as .Participant.) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. - - 6.3. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY.S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a .commercial item,. as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of .commercial computer software. (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and .commercial computer software documentation. as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction.s conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys. fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. - - NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) - - The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California. - - -The GNU General Public License (GPL) Version 2, June 1991 - - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. - -Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. - -Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and modification follow. - - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. - - c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. - -3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. - -If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. - -5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. - -9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - - Copyright (C) - - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - - This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. - - signature of Ty Coon, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. - - -"CLASSPATH" EXCEPTION TO THE GPL VERSION 2 - -Certain source files distributed by Sun Microsystems, Inc. are subject to the following clarification and special exception to the GPL Version 2, but only where Sun has expressly included in the particular source file's header the words - -"Sun designates this particular file as subject to the "Classpath" exception as provided by Sun in the License file that accompanied this code." - -Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination. - -As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module.? An independent module is a module which is not derived from or based on this library.? If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so.? If you do not wish to do so, delete this exception statement from your version. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.interceptor-javax.interceptor-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.interceptor-javax.interceptor-api.txt deleted file mode 100644 index a0ccc93564c..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.interceptor-javax.interceptor-api.txt +++ /dev/null @@ -1,263 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 - -1. Definitions. - - 1.1. Contributor. means each individual or entity that creates or contributes to the creation of Modifications. - - 1.2. Contributor Version. means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. - - 1.3. Covered Software. means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. - - 1.4. Executable. means the Covered Software in any form other than Source Code. - - 1.5. Initial Developer. means the individual or entity that first makes Original Software available under this License. - - 1.6. Larger Work. means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. - - 1.7. License. means this document. - - 1.8. Licensable. means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. - - 1.9. Modifications. means the Source Code and Executable form of any of the following: - - A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; - - B. Any new file that contains any part of the Original Software or previous Modification; or - - C. Any new file that is contributed or otherwise made available under the terms of this License. - - 1.10. Original Software. means the Source Code and Executable form of computer software code that is originally released under this License. - - 1.11. Patent Claims. means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. - - 1.12. Source Code. means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. - - 1.13. You. (or .Your.) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, .You. includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, .control. means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. - - 3.2. Modifications. - The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. - - 3.4. Application of Additional Terms. - You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients. rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient.s rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. - -4. Versions of the License. - - 4.1. New Versions. - Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. - - 4.2. Effect of New Versions. - You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. - - 4.3. Modified Versions. - When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN .AS IS. BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as .Participant.) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. - - 6.3. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY.S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a .commercial item,. as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of .commercial computer software. (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and .commercial computer software documentation. as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction.s conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys. fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. - - NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) - - The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California. - - -The GNU General Public License (GPL) Version 2, June 1991 - - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. - -Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. - -Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and modification follow. - - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. - - c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. - -3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. - -If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. - -5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. - -9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - - Copyright (C) - - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - - This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. - - signature of Ty Coon, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. - - -"CLASSPATH" EXCEPTION TO THE GPL VERSION 2 - -Certain source files distributed by Sun Microsystems, Inc. are subject to the following clarification and special exception to the GPL Version 2, but only where Sun has expressly included in the particular source file's header the words - -"Sun designates this particular file as subject to the "Classpath" exception as provided by Sun in the License file that accompanied this code." - -Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination. - -As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module.? An independent module is a module which is not derived from or based on this library.? If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so.? If you do not wish to do so, delete this exception statement from your version. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.servlet-javax.servlet-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.servlet-javax.servlet-api.txt deleted file mode 100644 index a0ccc93564c..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.servlet-javax.servlet-api.txt +++ /dev/null @@ -1,263 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 - -1. Definitions. - - 1.1. Contributor. means each individual or entity that creates or contributes to the creation of Modifications. - - 1.2. Contributor Version. means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. - - 1.3. Covered Software. means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. - - 1.4. Executable. means the Covered Software in any form other than Source Code. - - 1.5. Initial Developer. means the individual or entity that first makes Original Software available under this License. - - 1.6. Larger Work. means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. - - 1.7. License. means this document. - - 1.8. Licensable. means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. - - 1.9. Modifications. means the Source Code and Executable form of any of the following: - - A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; - - B. Any new file that contains any part of the Original Software or previous Modification; or - - C. Any new file that is contributed or otherwise made available under the terms of this License. - - 1.10. Original Software. means the Source Code and Executable form of computer software code that is originally released under this License. - - 1.11. Patent Claims. means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. - - 1.12. Source Code. means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. - - 1.13. You. (or .Your.) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, .You. includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, .control. means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. - - 3.2. Modifications. - The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. - - 3.4. Application of Additional Terms. - You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients. rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient.s rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. - -4. Versions of the License. - - 4.1. New Versions. - Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. - - 4.2. Effect of New Versions. - You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. - - 4.3. Modified Versions. - When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN .AS IS. BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as .Participant.) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. - - 6.3. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY.S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a .commercial item,. as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of .commercial computer software. (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and .commercial computer software documentation. as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction.s conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys. fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. - - NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) - - The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California. - - -The GNU General Public License (GPL) Version 2, June 1991 - - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. - -Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. - -Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and modification follow. - - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. - - c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. - -3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. - -If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. - -5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. - -9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - - Copyright (C) - - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - - This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. - - signature of Ty Coon, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. - - -"CLASSPATH" EXCEPTION TO THE GPL VERSION 2 - -Certain source files distributed by Sun Microsystems, Inc. are subject to the following clarification and special exception to the GPL Version 2, but only where Sun has expressly included in the particular source file's header the words - -"Sun designates this particular file as subject to the "Classpath" exception as provided by Sun in the License file that accompanied this code." - -Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination. - -As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module.? An independent module is a module which is not derived from or based on this library.? If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so.? If you do not wish to do so, delete this exception statement from your version. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.servlet.jsp-jsp-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.servlet.jsp-jsp-api.txt deleted file mode 100644 index b1c74f95ede..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.servlet.jsp-jsp-api.txt +++ /dev/null @@ -1,759 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 - -1. Definitions. - - 1.1. "Contributor" means each individual or entity that creates or - contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original - Software, prior Modifications used by a Contributor (if any), and - the Modifications made by that particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or (b) - Modifications, or (c) the combination of files containing Original - Software with files containing Modifications, in each case including - portions thereof. - - 1.4. "Executable" means the Covered Software in any form other than - Source Code. - - 1.5. "Initial Developer" means the individual or entity that first - makes Original Software available under this License. - - 1.6. "Larger Work" means a work which combines Covered Software or - portions thereof with code not governed by the terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the maximum - extent possible, whether at the time of the initial grant or - subsequently acquired, any and all of the rights conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable form of - any of the following: - - A. Any file that results from an addition to, deletion from or - modification of the contents of a file containing Original Software - or previous Modifications; - - B. Any new file that contains any part of the Original Software or - previous Modification; or - - C. Any new file that is contributed or otherwise made available - under the terms of this License. - - 1.10. "Original Software" means the Source Code and Executable form - of computer software code that is originally released under this - License. - - 1.11. "Patent Claims" means any patent claim(s), now owned or - hereafter acquired, including without limitation, method, process, - and apparatus claims, in any patent Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer software - code in which modifications are made and (b) associated - documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal entity - exercising rights under, and complying with all of the terms of, - this License. For legal entities, "You" includes any entity which - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject - to third party intellectual property claims, the Initial Developer - hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Initial Developer, to use, reproduce, - modify, display, perform, sublicense and distribute the Original - Software (or portions thereof), with or without Modifications, - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of - Original Software, to make, have made, use, practice, sell, and - offer for sale, and/or otherwise dispose of the Original Software - (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on - the date Initial Developer first distributes or otherwise makes the - Original Software available to a third party under the terms of this - License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is - granted: (1) for code that You delete from the Original Software, or - (2) for infringements caused by: (i) the modification of the - Original Software, or (ii) the combination of the Original Software - with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject - to third party intellectual property claims, each Contributor hereby - grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Contributor to use, reproduce, modify, - display, perform, sublicense and distribute the Modifications - created by such Contributor (or portions thereof), either on an - unmodified basis, with other Modifications, as Covered Software - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling - of Modifications made by that Contributor either alone and/or in - combination with its Contributor Version (or portions of such - combination), to make, use, sell, offer for sale, have made, and/or - otherwise dispose of: (1) Modifications made by that Contributor (or - portions thereof); and (2) the combination of Modifications made by - that Contributor with its Contributor Version (or portions of such - combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective - on the date Contributor first distributes or otherwise makes the - Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is - granted: (1) for any code that Contributor has deleted from the - Contributor Version; (2) for infringements caused by: (i) third - party modifications of Contributor Version, or (ii) the combination - of Modifications made by that Contributor with other software - (except as part of the Contributor Version) or other devices; or (3) - under Patent Claims infringed by Covered Software in the absence of - Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make available - in Executable form must also be made available in Source Code form - and that Source Code form must be distributed only under the terms - of this License. You must include a copy of this License with every - copy of the Source Code form of the Covered Software You distribute - or otherwise make available. You must inform recipients of any such - Covered Software in Executable form as to how they can obtain such - Covered Software in Source Code form in a reasonable manner on or - through a medium customarily used for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You contribute are - governed by the terms of this License. You represent that You - believe Your Modifications are Your original creation(s) and/or You - have sufficient rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications that - identifies You as the Contributor of the Modification. You may not - remove or alter any copyright, patent or trademark notices contained - within the Covered Software, or any notices of licensing or any - descriptive text giving attribution to any Contributor or the - Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered Software in - Source Code form that alters or restricts the applicable version of - this License or the recipients' rights hereunder. You may choose to - offer, and to charge a fee for, warranty, support, indemnity or - liability obligations to one or more recipients of Covered Software. - However, you may do so only on Your own behalf, and not on behalf of - the Initial Developer or any Contributor. You must make it - absolutely clear that any such warranty, support, indemnity or - liability obligation is offered by You alone, and You hereby agree - to indemnify the Initial Developer and every Contributor for any - liability incurred by the Initial Developer or such Contributor as a - result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered Software under - the terms of this License or under the terms of a license of Your - choice, which may contain terms different from this License, - provided that You are in compliance with the terms of this License - and that the license for the Executable form does not attempt to - limit or alter the recipient's rights in the Source Code form from - the rights set forth in this License. If You distribute the Covered - Software in Executable form under a different license, You must make - it absolutely clear that any terms which differ from this License - are offered by You alone, not by the Initial Developer or - Contributor. You hereby agree to indemnify the Initial Developer and - every Contributor for any liability incurred by the Initial - Developer or such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software with - other code not governed by the terms of this License and distribute - the Larger Work as a single product. In such a case, You must make - sure the requirements of this License are fulfilled for the Covered - Software. - -4. Versions of the License. - - 4.1. New Versions. - - Oracle is the initial license steward and may publish revised and/or - new versions of this License from time to time. Each version will be - given a distinguishing version number. Except as provided in Section - 4.3, no one other than the license steward has the right to modify - this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise make the - Covered Software available under the terms of the version of the - License under which You originally received the Covered Software. If - the Initial Developer includes a notice in the Original Software - prohibiting it from being distributed or otherwise made available - under any subsequent version of the License, You must distribute and - make the Covered Software available under the terms of the version - of the License under which You originally received the Covered - Software. Otherwise, You may also choose to use, distribute or - otherwise make the Covered Software available under the terms of any - subsequent version of the License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a new - license for Your Original Software, You may create and use a - modified version of this License if You: (a) rename the license and - remove any references to the name of the license steward (except to - note that the license differs from this License); and (b) otherwise - make it clear that the license contains terms which differ from this - License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, - WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE - IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR - NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF - THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE - DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY - OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, - REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN - ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS - AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate - automatically if You fail to comply with terms herein and fail to - cure such breach within 30 days of becoming aware of the breach. - Provisions which, by their nature, must remain in effect beyond the - termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or a - Contributor (the Initial Developer or Contributor against whom You - assert such claim is referred to as "Participant") alleging that the - Participant Software (meaning the Contributor Version where the - Participant is a Contributor or the Original Software where the - Participant is the Initial Developer) directly or indirectly - infringes any patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial Developer (if the - Initial Developer is not the Participant) and all Contributors under - Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice - from Participant terminate prospectively and automatically at the - expiration of such 60 day notice period, unless if within such 60 - day period You withdraw Your claim with respect to the Participant - Software against such Participant either unilaterally or pursuant to - a written agreement with Participant. - - 6.3. If You assert a patent infringement claim against Participant - alleging that the Participant Software directly or indirectly - infringes any patent where such claim is resolved (such as by - license or settlement) prior to the initiation of patent - infringement litigation, then the reasonable value of the licenses - granted by such Participant under Sections 2.1 or 2.2 shall be taken - into account in determining the amount or value of any payment or - license. - - 6.4. In the event of termination under Sections 6.1 or 6.2 above, - all end user licenses that have been validly granted by You or any - distributor hereunder prior to termination (excluding licenses - granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE - TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER - FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR - LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE - POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT - APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH - PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH - LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR - LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION - AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is defined - in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer - software" (as that term is defined at 48 C.F.R. § - 252.227-7014(a)(1)) and "commercial computer software documentation" - as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent - with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 - (June 1995), all U.S. Government End Users acquire Covered Software - with only those rights set forth herein. This U.S. Government Rights - clause is in lieu of, and supersedes, any other FAR, DFAR, or other - clause or provision that addresses Government rights in computer - software under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. This License shall be governed by - the law of the jurisdiction specified in a notice contained within - the Original Software (except to the extent applicable law, if any, - provides otherwise), excluding such jurisdiction's conflict-of-law - provisions. Any litigation relating to this License shall be subject - to the jurisdiction of the courts located in the jurisdiction and - venue specified in a notice contained within the Original Software, - with the losing party responsible for costs, including, without - limitation, court costs and reasonable attorneys' fees and expenses. - The application of the United Nations Convention on Contracts for - the International Sale of Goods is expressly excluded. Any law or - regulation which provides that the language of a contract shall be - construed against the drafter shall not apply to this License. You - agree that You alone are responsible for compliance with the United - States export administration regulations (and the export control - laws and regulation of any other countries) when You use, distribute - or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or indirectly, - out of its utilization of rights under this License and You agree to - work with Initial Developer and Contributors to distribute such - responsibility on an equitable basis. Nothing herein is intended or - shall be deemed to constitute any admission of liability. - ------------------------------------------------------------------------- - -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION -LICENSE (CDDL) - -The code released under the CDDL shall be governed by the laws of the -State of California (excluding conflict-of-law provisions). Any -litigation relating to this License shall be subject to the jurisdiction -of the Federal Courts of the Northern District of California and the -state courts of the State of California, with venue lying in Santa Clara -County, California. - - - - The GNU General Public License (GPL) Version 2, June 1991 - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. -51 Franklin Street, Fifth Floor -Boston, MA 02110-1335 -USA - -Everyone is permitted to copy and distribute verbatim copies -of this license document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to -share and change it. By contrast, the GNU General Public License is -intended to guarantee your freedom to share and change free software--to -make sure the software is free for all its users. This General Public -License applies to most of the Free Software Foundation's software and -to any other program whose authors commit to using it. (Some other Free -Software Foundation software is covered by the GNU Library General -Public License instead.) You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. -Our General Public Licenses are designed to make sure that you have the -freedom to distribute copies of free software (and charge for this -service if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone -to deny you these rights or to ask you to surrender the rights. These -restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis -or for a fee, you must give the recipients all the rights that you have. -You must make sure that they, too, receive or can get the source code. -And you must show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - -Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - -Finally, any free program is threatened constantly by software patents. -We wish to avoid the danger that redistributors of a free program will -individually obtain patent licenses, in effect making the program -proprietary. To prevent this, we have made it clear that any patent must -be licensed for everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and -modification follow. - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a -notice placed by the copyright holder saying it may be distributed under -the terms of this General Public License. The "Program", below, refers -to any such program or work, and a "work based on the Program" means -either the Program or any derivative work under copyright law: that is -to say, a work containing the Program or a portion of it, either -verbatim or with modifications and/or translated into another language. -(Hereinafter, translation is included without limitation in the term -"modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of running -the Program is not restricted, and the output from the Program is -covered only if its contents constitute a work based on the Program -(independent of having been made by running the Program). Whether that -is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source -code as you receive it, in any medium, provided that you conspicuously -and appropriately publish on each copy an appropriate copyright notice -and disclaimer of warranty; keep intact all the notices that refer to -this License and to the absence of any warranty; and give any other -recipients of the Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of -it, thus forming a work based on the Program, and copy and distribute -such modifications or work under the terms of Section 1 above, provided -that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any part - thereof, to be licensed as a whole at no charge to all third parties - under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a notice - that there is no warranty (or else, saying that you provide a - warranty) and that users may redistribute the program under these - conditions, and telling the user how to view a copy of this License. - (Exception: if the Program itself is interactive but does not - normally print such an announcement, your work based on the Program - is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, and -can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based on -the Program, the distribution of the whole must be on the terms of this -License, whose permissions for other licensees extend to the entire -whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of a -storage or distribution medium does not bring the other work under the -scope of this License. - -3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections 1 - and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your cost - of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to - distribute corresponding source code. (This alternative is allowed - only for noncommercial distribution and only if you received the - program in object code or executable form with such an offer, in - accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source code -means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to control -compilation and installation of the executable. However, as a special -exception, the source code distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies the -executable. - -If distribution of executable or object code is made by offering access -to copy from a designated place, then offering equivalent access to copy -the source code from the same place counts as distribution of the source -code, even though third parties are not compelled to copy the source -along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt otherwise -to copy, modify, sublicense or distribute the Program is void, and will -automatically terminate your rights under this License. However, parties -who have received copies, or rights, from you under this License will -not have their licenses terminated so long as such parties remain in -full compliance. - -5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and all -its terms and conditions for copying, distributing or modifying the -Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further restrictions -on the recipients' exercise of the rights granted herein. You are not -responsible for enforcing compliance by third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot distribute -so as to satisfy simultaneously your obligations under this License and -any other pertinent obligations, then as a consequence you may not -distribute the Program at all. For example, if a patent license would -not permit royalty-free redistribution of the Program by all those who -receive copies directly or indirectly through you, then the only way you -could satisfy both it and this License would be to refrain entirely from -distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is implemented -by public license practices. Many people have made generous -contributions to the wide range of software distributed through that -system in reliance on consistent application of that system; it is up to -the author/donor to decide if he or she is willing to distribute -software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be -a consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License may -add an explicit geographical distribution limitation excluding those -countries, so that distribution is permitted only in or among countries -not thus excluded. In such case, this License incorporates the -limitation as if written in the body of this License. - -9. The Free Software Foundation may publish revised and/or new -versions of the General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Program does not specify a version -number of this License, you may choose any version ever published by the -Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the -author to ask for permission. For software which is copyrighted by the -Free Software Foundation, write to the Free Software Foundation; we -sometimes make exceptions for this. Our decision will be guided by the -two goals of preserving the free status of all derivatives of our free -software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, -EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE -ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH -YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL -NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR -DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL -DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM -(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED -INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF -THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR -OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to -attach them to the start of each source file to most effectively convey -the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type - `show w'. This is free software, and you are welcome to redistribute - it under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the -appropriate parts of the General Public License. Of course, the commands -you use may be called something other than `show w' and `show c'; they -could even be mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - program `Gnomovision' (which makes passes at compilers) written by - James Hacker. - - signature of Ty Coon, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications -with the library. If this is what you want to do, use the GNU Library -General Public License instead of this License. - -# - -Certain source files distributed by Oracle America, Inc. and/or its -affiliates are subject to the following clarification and special -exception to the GPLv2, based on the GNU Project exception for its -Classpath libraries, known as the GNU Classpath Exception, but only -where Oracle has expressly included in the particular source file's -header the words "Oracle designates this particular file as subject to -the "Classpath" exception as provided by Oracle in the LICENSE file -that accompanied this code." - -You should also note that Oracle includes multiple, independent -programs in this software package. Some of those programs are provided -under licenses deemed incompatible with the GPLv2 by the Free Software -Foundation and others. For example, the package includes programs -licensed under the Apache License, Version 2.0. Such programs are -licensed to you under their original licenses. - -Oracle facilitates your further distribution of this package by adding -the Classpath Exception to the necessary parts of its GPLv2 code, which -permits you to use that code in combination with other independent -modules not licensed under the GPLv2. However, note that this would -not permit you to commingle code under an incompatible license with -Oracle's GPLv2 licensed code by, for example, cutting and pasting such -code into a file also containing Oracle's GPLv2 licensed code and then -distributing the result. Additionally, if you were to remove the -Classpath Exception from any of the files to which it applies and -distribute the result, you would likely be required to license some or -all of the other code in that distribution under the GPLv2 as well, and -since the GPLv2 is incompatible with the license terms of some items -included in the distribution by Oracle, removing the Classpath -Exception could therefore effectively compromise your ability to -further distribute the package. - -Proceed with caution and we recommend that you obtain the advice of a -lawyer skilled in open source matters before removing the Classpath -Exception or making modifications to this package which may -subsequently be redistributed and/or involve the use of third party -software. - -CLASSPATH EXCEPTION -Linking this library statically or dynamically with other modules is -making a combined work based on this library. Thus, the terms and -conditions of the GNU General Public License version 2 cover the whole -combination. - -As a special exception, the copyright holders of this library give you -permission to link this library with independent modules to produce an -executable, regardless of the license terms of these independent -modules, and to copy and distribute the resulting executable under -terms of your choice, provided that you also meet, for each linked -independent module, the terms and conditions of the license of that -module. An independent module is a module which is not derived from or -based on this library. If you modify this library, you may extend this -exception to your version of the library, but you are not obligated to -do so. If you do not wish to do so, delete this exception statement -from your version. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.ws.rs-javax.ws.rs-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.ws.rs-javax.ws.rs-api.txt deleted file mode 100644 index b1c74f95ede..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.ws.rs-javax.ws.rs-api.txt +++ /dev/null @@ -1,759 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 - -1. Definitions. - - 1.1. "Contributor" means each individual or entity that creates or - contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original - Software, prior Modifications used by a Contributor (if any), and - the Modifications made by that particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or (b) - Modifications, or (c) the combination of files containing Original - Software with files containing Modifications, in each case including - portions thereof. - - 1.4. "Executable" means the Covered Software in any form other than - Source Code. - - 1.5. "Initial Developer" means the individual or entity that first - makes Original Software available under this License. - - 1.6. "Larger Work" means a work which combines Covered Software or - portions thereof with code not governed by the terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the maximum - extent possible, whether at the time of the initial grant or - subsequently acquired, any and all of the rights conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable form of - any of the following: - - A. Any file that results from an addition to, deletion from or - modification of the contents of a file containing Original Software - or previous Modifications; - - B. Any new file that contains any part of the Original Software or - previous Modification; or - - C. Any new file that is contributed or otherwise made available - under the terms of this License. - - 1.10. "Original Software" means the Source Code and Executable form - of computer software code that is originally released under this - License. - - 1.11. "Patent Claims" means any patent claim(s), now owned or - hereafter acquired, including without limitation, method, process, - and apparatus claims, in any patent Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer software - code in which modifications are made and (b) associated - documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal entity - exercising rights under, and complying with all of the terms of, - this License. For legal entities, "You" includes any entity which - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject - to third party intellectual property claims, the Initial Developer - hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Initial Developer, to use, reproduce, - modify, display, perform, sublicense and distribute the Original - Software (or portions thereof), with or without Modifications, - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of - Original Software, to make, have made, use, practice, sell, and - offer for sale, and/or otherwise dispose of the Original Software - (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on - the date Initial Developer first distributes or otherwise makes the - Original Software available to a third party under the terms of this - License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is - granted: (1) for code that You delete from the Original Software, or - (2) for infringements caused by: (i) the modification of the - Original Software, or (ii) the combination of the Original Software - with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject - to third party intellectual property claims, each Contributor hereby - grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Contributor to use, reproduce, modify, - display, perform, sublicense and distribute the Modifications - created by such Contributor (or portions thereof), either on an - unmodified basis, with other Modifications, as Covered Software - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling - of Modifications made by that Contributor either alone and/or in - combination with its Contributor Version (or portions of such - combination), to make, use, sell, offer for sale, have made, and/or - otherwise dispose of: (1) Modifications made by that Contributor (or - portions thereof); and (2) the combination of Modifications made by - that Contributor with its Contributor Version (or portions of such - combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective - on the date Contributor first distributes or otherwise makes the - Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is - granted: (1) for any code that Contributor has deleted from the - Contributor Version; (2) for infringements caused by: (i) third - party modifications of Contributor Version, or (ii) the combination - of Modifications made by that Contributor with other software - (except as part of the Contributor Version) or other devices; or (3) - under Patent Claims infringed by Covered Software in the absence of - Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make available - in Executable form must also be made available in Source Code form - and that Source Code form must be distributed only under the terms - of this License. You must include a copy of this License with every - copy of the Source Code form of the Covered Software You distribute - or otherwise make available. You must inform recipients of any such - Covered Software in Executable form as to how they can obtain such - Covered Software in Source Code form in a reasonable manner on or - through a medium customarily used for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You contribute are - governed by the terms of this License. You represent that You - believe Your Modifications are Your original creation(s) and/or You - have sufficient rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications that - identifies You as the Contributor of the Modification. You may not - remove or alter any copyright, patent or trademark notices contained - within the Covered Software, or any notices of licensing or any - descriptive text giving attribution to any Contributor or the - Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered Software in - Source Code form that alters or restricts the applicable version of - this License or the recipients' rights hereunder. You may choose to - offer, and to charge a fee for, warranty, support, indemnity or - liability obligations to one or more recipients of Covered Software. - However, you may do so only on Your own behalf, and not on behalf of - the Initial Developer or any Contributor. You must make it - absolutely clear that any such warranty, support, indemnity or - liability obligation is offered by You alone, and You hereby agree - to indemnify the Initial Developer and every Contributor for any - liability incurred by the Initial Developer or such Contributor as a - result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered Software under - the terms of this License or under the terms of a license of Your - choice, which may contain terms different from this License, - provided that You are in compliance with the terms of this License - and that the license for the Executable form does not attempt to - limit or alter the recipient's rights in the Source Code form from - the rights set forth in this License. If You distribute the Covered - Software in Executable form under a different license, You must make - it absolutely clear that any terms which differ from this License - are offered by You alone, not by the Initial Developer or - Contributor. You hereby agree to indemnify the Initial Developer and - every Contributor for any liability incurred by the Initial - Developer or such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software with - other code not governed by the terms of this License and distribute - the Larger Work as a single product. In such a case, You must make - sure the requirements of this License are fulfilled for the Covered - Software. - -4. Versions of the License. - - 4.1. New Versions. - - Oracle is the initial license steward and may publish revised and/or - new versions of this License from time to time. Each version will be - given a distinguishing version number. Except as provided in Section - 4.3, no one other than the license steward has the right to modify - this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise make the - Covered Software available under the terms of the version of the - License under which You originally received the Covered Software. If - the Initial Developer includes a notice in the Original Software - prohibiting it from being distributed or otherwise made available - under any subsequent version of the License, You must distribute and - make the Covered Software available under the terms of the version - of the License under which You originally received the Covered - Software. Otherwise, You may also choose to use, distribute or - otherwise make the Covered Software available under the terms of any - subsequent version of the License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a new - license for Your Original Software, You may create and use a - modified version of this License if You: (a) rename the license and - remove any references to the name of the license steward (except to - note that the license differs from this License); and (b) otherwise - make it clear that the license contains terms which differ from this - License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, - WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE - IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR - NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF - THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE - DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY - OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, - REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN - ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS - AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate - automatically if You fail to comply with terms herein and fail to - cure such breach within 30 days of becoming aware of the breach. - Provisions which, by their nature, must remain in effect beyond the - termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or a - Contributor (the Initial Developer or Contributor against whom You - assert such claim is referred to as "Participant") alleging that the - Participant Software (meaning the Contributor Version where the - Participant is a Contributor or the Original Software where the - Participant is the Initial Developer) directly or indirectly - infringes any patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial Developer (if the - Initial Developer is not the Participant) and all Contributors under - Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice - from Participant terminate prospectively and automatically at the - expiration of such 60 day notice period, unless if within such 60 - day period You withdraw Your claim with respect to the Participant - Software against such Participant either unilaterally or pursuant to - a written agreement with Participant. - - 6.3. If You assert a patent infringement claim against Participant - alleging that the Participant Software directly or indirectly - infringes any patent where such claim is resolved (such as by - license or settlement) prior to the initiation of patent - infringement litigation, then the reasonable value of the licenses - granted by such Participant under Sections 2.1 or 2.2 shall be taken - into account in determining the amount or value of any payment or - license. - - 6.4. In the event of termination under Sections 6.1 or 6.2 above, - all end user licenses that have been validly granted by You or any - distributor hereunder prior to termination (excluding licenses - granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE - TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER - FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR - LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE - POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT - APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH - PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH - LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR - LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION - AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is defined - in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer - software" (as that term is defined at 48 C.F.R. § - 252.227-7014(a)(1)) and "commercial computer software documentation" - as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent - with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 - (June 1995), all U.S. Government End Users acquire Covered Software - with only those rights set forth herein. This U.S. Government Rights - clause is in lieu of, and supersedes, any other FAR, DFAR, or other - clause or provision that addresses Government rights in computer - software under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. This License shall be governed by - the law of the jurisdiction specified in a notice contained within - the Original Software (except to the extent applicable law, if any, - provides otherwise), excluding such jurisdiction's conflict-of-law - provisions. Any litigation relating to this License shall be subject - to the jurisdiction of the courts located in the jurisdiction and - venue specified in a notice contained within the Original Software, - with the losing party responsible for costs, including, without - limitation, court costs and reasonable attorneys' fees and expenses. - The application of the United Nations Convention on Contracts for - the International Sale of Goods is expressly excluded. Any law or - regulation which provides that the language of a contract shall be - construed against the drafter shall not apply to this License. You - agree that You alone are responsible for compliance with the United - States export administration regulations (and the export control - laws and regulation of any other countries) when You use, distribute - or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or indirectly, - out of its utilization of rights under this License and You agree to - work with Initial Developer and Contributors to distribute such - responsibility on an equitable basis. Nothing herein is intended or - shall be deemed to constitute any admission of liability. - ------------------------------------------------------------------------- - -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION -LICENSE (CDDL) - -The code released under the CDDL shall be governed by the laws of the -State of California (excluding conflict-of-law provisions). Any -litigation relating to this License shall be subject to the jurisdiction -of the Federal Courts of the Northern District of California and the -state courts of the State of California, with venue lying in Santa Clara -County, California. - - - - The GNU General Public License (GPL) Version 2, June 1991 - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. -51 Franklin Street, Fifth Floor -Boston, MA 02110-1335 -USA - -Everyone is permitted to copy and distribute verbatim copies -of this license document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to -share and change it. By contrast, the GNU General Public License is -intended to guarantee your freedom to share and change free software--to -make sure the software is free for all its users. This General Public -License applies to most of the Free Software Foundation's software and -to any other program whose authors commit to using it. (Some other Free -Software Foundation software is covered by the GNU Library General -Public License instead.) You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. -Our General Public Licenses are designed to make sure that you have the -freedom to distribute copies of free software (and charge for this -service if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone -to deny you these rights or to ask you to surrender the rights. These -restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis -or for a fee, you must give the recipients all the rights that you have. -You must make sure that they, too, receive or can get the source code. -And you must show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - -Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - -Finally, any free program is threatened constantly by software patents. -We wish to avoid the danger that redistributors of a free program will -individually obtain patent licenses, in effect making the program -proprietary. To prevent this, we have made it clear that any patent must -be licensed for everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and -modification follow. - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a -notice placed by the copyright holder saying it may be distributed under -the terms of this General Public License. The "Program", below, refers -to any such program or work, and a "work based on the Program" means -either the Program or any derivative work under copyright law: that is -to say, a work containing the Program or a portion of it, either -verbatim or with modifications and/or translated into another language. -(Hereinafter, translation is included without limitation in the term -"modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of running -the Program is not restricted, and the output from the Program is -covered only if its contents constitute a work based on the Program -(independent of having been made by running the Program). Whether that -is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source -code as you receive it, in any medium, provided that you conspicuously -and appropriately publish on each copy an appropriate copyright notice -and disclaimer of warranty; keep intact all the notices that refer to -this License and to the absence of any warranty; and give any other -recipients of the Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of -it, thus forming a work based on the Program, and copy and distribute -such modifications or work under the terms of Section 1 above, provided -that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any part - thereof, to be licensed as a whole at no charge to all third parties - under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a notice - that there is no warranty (or else, saying that you provide a - warranty) and that users may redistribute the program under these - conditions, and telling the user how to view a copy of this License. - (Exception: if the Program itself is interactive but does not - normally print such an announcement, your work based on the Program - is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, and -can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based on -the Program, the distribution of the whole must be on the terms of this -License, whose permissions for other licensees extend to the entire -whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of a -storage or distribution medium does not bring the other work under the -scope of this License. - -3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections 1 - and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your cost - of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to - distribute corresponding source code. (This alternative is allowed - only for noncommercial distribution and only if you received the - program in object code or executable form with such an offer, in - accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source code -means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to control -compilation and installation of the executable. However, as a special -exception, the source code distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies the -executable. - -If distribution of executable or object code is made by offering access -to copy from a designated place, then offering equivalent access to copy -the source code from the same place counts as distribution of the source -code, even though third parties are not compelled to copy the source -along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt otherwise -to copy, modify, sublicense or distribute the Program is void, and will -automatically terminate your rights under this License. However, parties -who have received copies, or rights, from you under this License will -not have their licenses terminated so long as such parties remain in -full compliance. - -5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and all -its terms and conditions for copying, distributing or modifying the -Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further restrictions -on the recipients' exercise of the rights granted herein. You are not -responsible for enforcing compliance by third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot distribute -so as to satisfy simultaneously your obligations under this License and -any other pertinent obligations, then as a consequence you may not -distribute the Program at all. For example, if a patent license would -not permit royalty-free redistribution of the Program by all those who -receive copies directly or indirectly through you, then the only way you -could satisfy both it and this License would be to refrain entirely from -distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is implemented -by public license practices. Many people have made generous -contributions to the wide range of software distributed through that -system in reliance on consistent application of that system; it is up to -the author/donor to decide if he or she is willing to distribute -software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be -a consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License may -add an explicit geographical distribution limitation excluding those -countries, so that distribution is permitted only in or among countries -not thus excluded. In such case, this License incorporates the -limitation as if written in the body of this License. - -9. The Free Software Foundation may publish revised and/or new -versions of the General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Program does not specify a version -number of this License, you may choose any version ever published by the -Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the -author to ask for permission. For software which is copyrighted by the -Free Software Foundation, write to the Free Software Foundation; we -sometimes make exceptions for this. Our decision will be guided by the -two goals of preserving the free status of all derivatives of our free -software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, -EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE -ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH -YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL -NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR -DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL -DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM -(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED -INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF -THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR -OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to -attach them to the start of each source file to most effectively convey -the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type - `show w'. This is free software, and you are welcome to redistribute - it under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the -appropriate parts of the General Public License. Of course, the commands -you use may be called something other than `show w' and `show c'; they -could even be mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - program `Gnomovision' (which makes passes at compilers) written by - James Hacker. - - signature of Ty Coon, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications -with the library. If this is what you want to do, use the GNU Library -General Public License instead of this License. - -# - -Certain source files distributed by Oracle America, Inc. and/or its -affiliates are subject to the following clarification and special -exception to the GPLv2, based on the GNU Project exception for its -Classpath libraries, known as the GNU Classpath Exception, but only -where Oracle has expressly included in the particular source file's -header the words "Oracle designates this particular file as subject to -the "Classpath" exception as provided by Oracle in the LICENSE file -that accompanied this code." - -You should also note that Oracle includes multiple, independent -programs in this software package. Some of those programs are provided -under licenses deemed incompatible with the GPLv2 by the Free Software -Foundation and others. For example, the package includes programs -licensed under the Apache License, Version 2.0. Such programs are -licensed to you under their original licenses. - -Oracle facilitates your further distribution of this package by adding -the Classpath Exception to the necessary parts of its GPLv2 code, which -permits you to use that code in combination with other independent -modules not licensed under the GPLv2. However, note that this would -not permit you to commingle code under an incompatible license with -Oracle's GPLv2 licensed code by, for example, cutting and pasting such -code into a file also containing Oracle's GPLv2 licensed code and then -distributing the result. Additionally, if you were to remove the -Classpath Exception from any of the files to which it applies and -distribute the result, you would likely be required to license some or -all of the other code in that distribution under the GPLv2 as well, and -since the GPLv2 is incompatible with the license terms of some items -included in the distribution by Oracle, removing the Classpath -Exception could therefore effectively compromise your ability to -further distribute the package. - -Proceed with caution and we recommend that you obtain the advice of a -lawyer skilled in open source matters before removing the Classpath -Exception or making modifications to this package which may -subsequently be redistributed and/or involve the use of third party -software. - -CLASSPATH EXCEPTION -Linking this library statically or dynamically with other modules is -making a combined work based on this library. Thus, the terms and -conditions of the GNU General Public License version 2 cover the whole -combination. - -As a special exception, the copyright holders of this library give you -permission to link this library with independent modules to produce an -executable, regardless of the license terms of these independent -modules, and to copy and distribute the resulting executable under -terms of your choice, provided that you also meet, for each linked -independent module, the terms and conditions of the license of that -module. An independent module is a module which is not derived from or -based on this library. If you modify this library, you may extend this -exception to your version of the library, but you are not obligated to -do so. If you do not wish to do so, delete this exception statement -from your version. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.ws.rs-jsr311-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.ws.rs-jsr311-api.txt deleted file mode 100644 index b1c74f95ede..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.ws.rs-jsr311-api.txt +++ /dev/null @@ -1,759 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 - -1. Definitions. - - 1.1. "Contributor" means each individual or entity that creates or - contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original - Software, prior Modifications used by a Contributor (if any), and - the Modifications made by that particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or (b) - Modifications, or (c) the combination of files containing Original - Software with files containing Modifications, in each case including - portions thereof. - - 1.4. "Executable" means the Covered Software in any form other than - Source Code. - - 1.5. "Initial Developer" means the individual or entity that first - makes Original Software available under this License. - - 1.6. "Larger Work" means a work which combines Covered Software or - portions thereof with code not governed by the terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the maximum - extent possible, whether at the time of the initial grant or - subsequently acquired, any and all of the rights conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable form of - any of the following: - - A. Any file that results from an addition to, deletion from or - modification of the contents of a file containing Original Software - or previous Modifications; - - B. Any new file that contains any part of the Original Software or - previous Modification; or - - C. Any new file that is contributed or otherwise made available - under the terms of this License. - - 1.10. "Original Software" means the Source Code and Executable form - of computer software code that is originally released under this - License. - - 1.11. "Patent Claims" means any patent claim(s), now owned or - hereafter acquired, including without limitation, method, process, - and apparatus claims, in any patent Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer software - code in which modifications are made and (b) associated - documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal entity - exercising rights under, and complying with all of the terms of, - this License. For legal entities, "You" includes any entity which - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject - to third party intellectual property claims, the Initial Developer - hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Initial Developer, to use, reproduce, - modify, display, perform, sublicense and distribute the Original - Software (or portions thereof), with or without Modifications, - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of - Original Software, to make, have made, use, practice, sell, and - offer for sale, and/or otherwise dispose of the Original Software - (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on - the date Initial Developer first distributes or otherwise makes the - Original Software available to a third party under the terms of this - License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is - granted: (1) for code that You delete from the Original Software, or - (2) for infringements caused by: (i) the modification of the - Original Software, or (ii) the combination of the Original Software - with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject - to third party intellectual property claims, each Contributor hereby - grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Contributor to use, reproduce, modify, - display, perform, sublicense and distribute the Modifications - created by such Contributor (or portions thereof), either on an - unmodified basis, with other Modifications, as Covered Software - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling - of Modifications made by that Contributor either alone and/or in - combination with its Contributor Version (or portions of such - combination), to make, use, sell, offer for sale, have made, and/or - otherwise dispose of: (1) Modifications made by that Contributor (or - portions thereof); and (2) the combination of Modifications made by - that Contributor with its Contributor Version (or portions of such - combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective - on the date Contributor first distributes or otherwise makes the - Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is - granted: (1) for any code that Contributor has deleted from the - Contributor Version; (2) for infringements caused by: (i) third - party modifications of Contributor Version, or (ii) the combination - of Modifications made by that Contributor with other software - (except as part of the Contributor Version) or other devices; or (3) - under Patent Claims infringed by Covered Software in the absence of - Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make available - in Executable form must also be made available in Source Code form - and that Source Code form must be distributed only under the terms - of this License. You must include a copy of this License with every - copy of the Source Code form of the Covered Software You distribute - or otherwise make available. You must inform recipients of any such - Covered Software in Executable form as to how they can obtain such - Covered Software in Source Code form in a reasonable manner on or - through a medium customarily used for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You contribute are - governed by the terms of this License. You represent that You - believe Your Modifications are Your original creation(s) and/or You - have sufficient rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications that - identifies You as the Contributor of the Modification. You may not - remove or alter any copyright, patent or trademark notices contained - within the Covered Software, or any notices of licensing or any - descriptive text giving attribution to any Contributor or the - Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered Software in - Source Code form that alters or restricts the applicable version of - this License or the recipients' rights hereunder. You may choose to - offer, and to charge a fee for, warranty, support, indemnity or - liability obligations to one or more recipients of Covered Software. - However, you may do so only on Your own behalf, and not on behalf of - the Initial Developer or any Contributor. You must make it - absolutely clear that any such warranty, support, indemnity or - liability obligation is offered by You alone, and You hereby agree - to indemnify the Initial Developer and every Contributor for any - liability incurred by the Initial Developer or such Contributor as a - result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered Software under - the terms of this License or under the terms of a license of Your - choice, which may contain terms different from this License, - provided that You are in compliance with the terms of this License - and that the license for the Executable form does not attempt to - limit or alter the recipient's rights in the Source Code form from - the rights set forth in this License. If You distribute the Covered - Software in Executable form under a different license, You must make - it absolutely clear that any terms which differ from this License - are offered by You alone, not by the Initial Developer or - Contributor. You hereby agree to indemnify the Initial Developer and - every Contributor for any liability incurred by the Initial - Developer or such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software with - other code not governed by the terms of this License and distribute - the Larger Work as a single product. In such a case, You must make - sure the requirements of this License are fulfilled for the Covered - Software. - -4. Versions of the License. - - 4.1. New Versions. - - Oracle is the initial license steward and may publish revised and/or - new versions of this License from time to time. Each version will be - given a distinguishing version number. Except as provided in Section - 4.3, no one other than the license steward has the right to modify - this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise make the - Covered Software available under the terms of the version of the - License under which You originally received the Covered Software. If - the Initial Developer includes a notice in the Original Software - prohibiting it from being distributed or otherwise made available - under any subsequent version of the License, You must distribute and - make the Covered Software available under the terms of the version - of the License under which You originally received the Covered - Software. Otherwise, You may also choose to use, distribute or - otherwise make the Covered Software available under the terms of any - subsequent version of the License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a new - license for Your Original Software, You may create and use a - modified version of this License if You: (a) rename the license and - remove any references to the name of the license steward (except to - note that the license differs from this License); and (b) otherwise - make it clear that the license contains terms which differ from this - License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, - WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE - IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR - NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF - THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE - DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY - OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, - REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN - ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS - AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate - automatically if You fail to comply with terms herein and fail to - cure such breach within 30 days of becoming aware of the breach. - Provisions which, by their nature, must remain in effect beyond the - termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or a - Contributor (the Initial Developer or Contributor against whom You - assert such claim is referred to as "Participant") alleging that the - Participant Software (meaning the Contributor Version where the - Participant is a Contributor or the Original Software where the - Participant is the Initial Developer) directly or indirectly - infringes any patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial Developer (if the - Initial Developer is not the Participant) and all Contributors under - Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice - from Participant terminate prospectively and automatically at the - expiration of such 60 day notice period, unless if within such 60 - day period You withdraw Your claim with respect to the Participant - Software against such Participant either unilaterally or pursuant to - a written agreement with Participant. - - 6.3. If You assert a patent infringement claim against Participant - alleging that the Participant Software directly or indirectly - infringes any patent where such claim is resolved (such as by - license or settlement) prior to the initiation of patent - infringement litigation, then the reasonable value of the licenses - granted by such Participant under Sections 2.1 or 2.2 shall be taken - into account in determining the amount or value of any payment or - license. - - 6.4. In the event of termination under Sections 6.1 or 6.2 above, - all end user licenses that have been validly granted by You or any - distributor hereunder prior to termination (excluding licenses - granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE - TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER - FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR - LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE - POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT - APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH - PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH - LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR - LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION - AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is defined - in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer - software" (as that term is defined at 48 C.F.R. § - 252.227-7014(a)(1)) and "commercial computer software documentation" - as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent - with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 - (June 1995), all U.S. Government End Users acquire Covered Software - with only those rights set forth herein. This U.S. Government Rights - clause is in lieu of, and supersedes, any other FAR, DFAR, or other - clause or provision that addresses Government rights in computer - software under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. This License shall be governed by - the law of the jurisdiction specified in a notice contained within - the Original Software (except to the extent applicable law, if any, - provides otherwise), excluding such jurisdiction's conflict-of-law - provisions. Any litigation relating to this License shall be subject - to the jurisdiction of the courts located in the jurisdiction and - venue specified in a notice contained within the Original Software, - with the losing party responsible for costs, including, without - limitation, court costs and reasonable attorneys' fees and expenses. - The application of the United Nations Convention on Contracts for - the International Sale of Goods is expressly excluded. Any law or - regulation which provides that the language of a contract shall be - construed against the drafter shall not apply to this License. You - agree that You alone are responsible for compliance with the United - States export administration regulations (and the export control - laws and regulation of any other countries) when You use, distribute - or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or indirectly, - out of its utilization of rights under this License and You agree to - work with Initial Developer and Contributors to distribute such - responsibility on an equitable basis. Nothing herein is intended or - shall be deemed to constitute any admission of liability. - ------------------------------------------------------------------------- - -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION -LICENSE (CDDL) - -The code released under the CDDL shall be governed by the laws of the -State of California (excluding conflict-of-law provisions). Any -litigation relating to this License shall be subject to the jurisdiction -of the Federal Courts of the Northern District of California and the -state courts of the State of California, with venue lying in Santa Clara -County, California. - - - - The GNU General Public License (GPL) Version 2, June 1991 - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. -51 Franklin Street, Fifth Floor -Boston, MA 02110-1335 -USA - -Everyone is permitted to copy and distribute verbatim copies -of this license document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to -share and change it. By contrast, the GNU General Public License is -intended to guarantee your freedom to share and change free software--to -make sure the software is free for all its users. This General Public -License applies to most of the Free Software Foundation's software and -to any other program whose authors commit to using it. (Some other Free -Software Foundation software is covered by the GNU Library General -Public License instead.) You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. -Our General Public Licenses are designed to make sure that you have the -freedom to distribute copies of free software (and charge for this -service if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone -to deny you these rights or to ask you to surrender the rights. These -restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis -or for a fee, you must give the recipients all the rights that you have. -You must make sure that they, too, receive or can get the source code. -And you must show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - -Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - -Finally, any free program is threatened constantly by software patents. -We wish to avoid the danger that redistributors of a free program will -individually obtain patent licenses, in effect making the program -proprietary. To prevent this, we have made it clear that any patent must -be licensed for everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and -modification follow. - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a -notice placed by the copyright holder saying it may be distributed under -the terms of this General Public License. The "Program", below, refers -to any such program or work, and a "work based on the Program" means -either the Program or any derivative work under copyright law: that is -to say, a work containing the Program or a portion of it, either -verbatim or with modifications and/or translated into another language. -(Hereinafter, translation is included without limitation in the term -"modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of running -the Program is not restricted, and the output from the Program is -covered only if its contents constitute a work based on the Program -(independent of having been made by running the Program). Whether that -is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source -code as you receive it, in any medium, provided that you conspicuously -and appropriately publish on each copy an appropriate copyright notice -and disclaimer of warranty; keep intact all the notices that refer to -this License and to the absence of any warranty; and give any other -recipients of the Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of -it, thus forming a work based on the Program, and copy and distribute -such modifications or work under the terms of Section 1 above, provided -that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any part - thereof, to be licensed as a whole at no charge to all third parties - under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a notice - that there is no warranty (or else, saying that you provide a - warranty) and that users may redistribute the program under these - conditions, and telling the user how to view a copy of this License. - (Exception: if the Program itself is interactive but does not - normally print such an announcement, your work based on the Program - is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, and -can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based on -the Program, the distribution of the whole must be on the terms of this -License, whose permissions for other licensees extend to the entire -whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of a -storage or distribution medium does not bring the other work under the -scope of this License. - -3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections 1 - and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your cost - of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to - distribute corresponding source code. (This alternative is allowed - only for noncommercial distribution and only if you received the - program in object code or executable form with such an offer, in - accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source code -means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to control -compilation and installation of the executable. However, as a special -exception, the source code distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies the -executable. - -If distribution of executable or object code is made by offering access -to copy from a designated place, then offering equivalent access to copy -the source code from the same place counts as distribution of the source -code, even though third parties are not compelled to copy the source -along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt otherwise -to copy, modify, sublicense or distribute the Program is void, and will -automatically terminate your rights under this License. However, parties -who have received copies, or rights, from you under this License will -not have their licenses terminated so long as such parties remain in -full compliance. - -5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and all -its terms and conditions for copying, distributing or modifying the -Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further restrictions -on the recipients' exercise of the rights granted herein. You are not -responsible for enforcing compliance by third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot distribute -so as to satisfy simultaneously your obligations under this License and -any other pertinent obligations, then as a consequence you may not -distribute the Program at all. For example, if a patent license would -not permit royalty-free redistribution of the Program by all those who -receive copies directly or indirectly through you, then the only way you -could satisfy both it and this License would be to refrain entirely from -distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is implemented -by public license practices. Many people have made generous -contributions to the wide range of software distributed through that -system in reliance on consistent application of that system; it is up to -the author/donor to decide if he or she is willing to distribute -software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be -a consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License may -add an explicit geographical distribution limitation excluding those -countries, so that distribution is permitted only in or among countries -not thus excluded. In such case, this License incorporates the -limitation as if written in the body of this License. - -9. The Free Software Foundation may publish revised and/or new -versions of the General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Program does not specify a version -number of this License, you may choose any version ever published by the -Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the -author to ask for permission. For software which is copyrighted by the -Free Software Foundation, write to the Free Software Foundation; we -sometimes make exceptions for this. Our decision will be guided by the -two goals of preserving the free status of all derivatives of our free -software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, -EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE -ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH -YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL -NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR -DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL -DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM -(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED -INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF -THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR -OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to -attach them to the start of each source file to most effectively convey -the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type - `show w'. This is free software, and you are welcome to redistribute - it under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the -appropriate parts of the General Public License. Of course, the commands -you use may be called something other than `show w' and `show c'; they -could even be mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - program `Gnomovision' (which makes passes at compilers) written by - James Hacker. - - signature of Ty Coon, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications -with the library. If this is what you want to do, use the GNU Library -General Public License instead of this License. - -# - -Certain source files distributed by Oracle America, Inc. and/or its -affiliates are subject to the following clarification and special -exception to the GPLv2, based on the GNU Project exception for its -Classpath libraries, known as the GNU Classpath Exception, but only -where Oracle has expressly included in the particular source file's -header the words "Oracle designates this particular file as subject to -the "Classpath" exception as provided by Oracle in the LICENSE file -that accompanied this code." - -You should also note that Oracle includes multiple, independent -programs in this software package. Some of those programs are provided -under licenses deemed incompatible with the GPLv2 by the Free Software -Foundation and others. For example, the package includes programs -licensed under the Apache License, Version 2.0. Such programs are -licensed to you under their original licenses. - -Oracle facilitates your further distribution of this package by adding -the Classpath Exception to the necessary parts of its GPLv2 code, which -permits you to use that code in combination with other independent -modules not licensed under the GPLv2. However, note that this would -not permit you to commingle code under an incompatible license with -Oracle's GPLv2 licensed code by, for example, cutting and pasting such -code into a file also containing Oracle's GPLv2 licensed code and then -distributing the result. Additionally, if you were to remove the -Classpath Exception from any of the files to which it applies and -distribute the result, you would likely be required to license some or -all of the other code in that distribution under the GPLv2 as well, and -since the GPLv2 is incompatible with the license terms of some items -included in the distribution by Oracle, removing the Classpath -Exception could therefore effectively compromise your ability to -further distribute the package. - -Proceed with caution and we recommend that you obtain the advice of a -lawyer skilled in open source matters before removing the Classpath -Exception or making modifications to this package which may -subsequently be redistributed and/or involve the use of third party -software. - -CLASSPATH EXCEPTION -Linking this library statically or dynamically with other modules is -making a combined work based on this library. Thus, the terms and -conditions of the GNU General Public License version 2 cover the whole -combination. - -As a special exception, the copyright holders of this library give you -permission to link this library with independent modules to produce an -executable, regardless of the license terms of these independent -modules, and to copy and distribute the resulting executable under -terms of your choice, provided that you also meet, for each linked -independent module, the terms and conditions of the license of that -module. An independent module is a module which is not derived from or -based on this library. If you modify this library, you may extend this -exception to your version of the library, but you are not obligated to -do so. If you do not wish to do so, delete this exception statement -from your version. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.xml.bind-jaxb-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.xml.bind-jaxb-api.txt deleted file mode 100644 index 833a843cfee..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-javax.xml.bind-jaxb-api.txt +++ /dev/null @@ -1,274 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)Version 1.1 - -1. Definitions. - - 1.1. "Contributor" means each individual or entity that creates or contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. - - 1.4. "Executable" means the Covered Software in any form other than Source Code. - - 1.5. "Initial Developer" means the individual or entity that first makes Original Software available under this License. - - 1.6. "Larger Work" means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable form of any of the following: - - A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; - - B. Any new file that contains any part of the Original Software or previous Modification; or - - C. Any new file that is contributed or otherwise made available under the terms of this License. - - 1.10. "Original Software" means the Source Code and Executable form of computer software code that is originally released under this License. - - 1.11. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients' rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient's rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. - -4. Versions of the License. - - 4.1. New Versions. - - Oracle is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as "Participant") alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. - - 6.3. If You assert a patent infringement claim against Participant alleging that the Participant Software directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by such Participant under Sections 2.1 or 2.2 shall be taken into account in determining the amount or value of any payment or license. - - 6.4. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer software" (as that term is defined at 48 C.F.R. ? 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction's conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. - ----------- -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) -The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California. - - - - -The GNU General Public License (GPL) Version 2, June 1991 - - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. - -Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. - -Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and modification follow. - - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. - - c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. - -3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. - -If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. - -5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. - -9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - - Copyright (C) - - This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - - This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. - - signature of Ty Coon, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. - - -"CLASSPATH" EXCEPTION TO THE GPL VERSION 2 - -Certain source files distributed by Oracle are subject to the following clarification and special exception to the GPL Version 2, but only where Oracle has expressly included in the particular source file's header the words "Oracle designates this particular file as subject to the "Classpath" exception as provided by Oracle in the License file that accompanied this code." - -Linking this library statically or dynamically with other modules is making a combined work based on this library. Thus, the terms and conditions of the GNU General Public License Version 2 cover the whole combination. - -As a special exception, the copyright holders of this library give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify this library, you may extend this exception to your version of the library, but you are not obligated to do so. If you do not wish to do so, delete this exception statement from your version. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jersey.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jersey.txt deleted file mode 100644 index 80babca1e16..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jersey.txt +++ /dev/null @@ -1,759 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 - -1. Definitions. - - 1.1. "Contributor" means each individual or entity that creates or - contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original - Software, prior Modifications used by a Contributor (if any), and - the Modifications made by that particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or (b) - Modifications, or (c) the combination of files containing Original - Software with files containing Modifications, in each case including - portions thereof. - - 1.4. "Executable" means the Covered Software in any form other than - Source Code. - - 1.5. "Initial Developer" means the individual or entity that first - makes Original Software available under this License. - - 1.6. "Larger Work" means a work which combines Covered Software or - portions thereof with code not governed by the terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the maximum - extent possible, whether at the time of the initial grant or - subsequently acquired, any and all of the rights conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable form of - any of the following: - - A. Any file that results from an addition to, deletion from or - modification of the contents of a file containing Original Software - or previous Modifications; - - B. Any new file that contains any part of the Original Software or - previous Modification; or - - C. Any new file that is contributed or otherwise made available - under the terms of this License. - - 1.10. "Original Software" means the Source Code and Executable form - of computer software code that is originally released under this - License. - - 1.11. "Patent Claims" means any patent claim(s), now owned or - hereafter acquired, including without limitation, method, process, - and apparatus claims, in any patent Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer software - code in which modifications are made and (b) associated - documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal entity - exercising rights under, and complying with all of the terms of, - this License. For legal entities, "You" includes any entity which - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject - to third party intellectual property claims, the Initial Developer - hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Initial Developer, to use, reproduce, - modify, display, perform, sublicense and distribute the Original - Software (or portions thereof), with or without Modifications, - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of - Original Software, to make, have made, use, practice, sell, and - offer for sale, and/or otherwise dispose of the Original Software - (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on - the date Initial Developer first distributes or otherwise makes the - Original Software available to a third party under the terms of this - License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is - granted: (1) for code that You delete from the Original Software, or - (2) for infringements caused by: (i) the modification of the - Original Software, or (ii) the combination of the Original Software - with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject - to third party intellectual property claims, each Contributor hereby - grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Contributor to use, reproduce, modify, - display, perform, sublicense and distribute the Modifications - created by such Contributor (or portions thereof), either on an - unmodified basis, with other Modifications, as Covered Software - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling - of Modifications made by that Contributor either alone and/or in - combination with its Contributor Version (or portions of such - combination), to make, use, sell, offer for sale, have made, and/or - otherwise dispose of: (1) Modifications made by that Contributor (or - portions thereof); and (2) the combination of Modifications made by - that Contributor with its Contributor Version (or portions of such - combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective - on the date Contributor first distributes or otherwise makes the - Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is - granted: (1) for any code that Contributor has deleted from the - Contributor Version; (2) for infringements caused by: (i) third - party modifications of Contributor Version, or (ii) the combination - of Modifications made by that Contributor with other software - (except as part of the Contributor Version) or other devices; or (3) - under Patent Claims infringed by Covered Software in the absence of - Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make available - in Executable form must also be made available in Source Code form - and that Source Code form must be distributed only under the terms - of this License. You must include a copy of this License with every - copy of the Source Code form of the Covered Software You distribute - or otherwise make available. You must inform recipients of any such - Covered Software in Executable form as to how they can obtain such - Covered Software in Source Code form in a reasonable manner on or - through a medium customarily used for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You contribute are - governed by the terms of this License. You represent that You - believe Your Modifications are Your original creation(s) and/or You - have sufficient rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications that - identifies You as the Contributor of the Modification. You may not - remove or alter any copyright, patent or trademark notices contained - within the Covered Software, or any notices of licensing or any - descriptive text giving attribution to any Contributor or the - Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered Software in - Source Code form that alters or restricts the applicable version of - this License or the recipients' rights hereunder. You may choose to - offer, and to charge a fee for, warranty, support, indemnity or - liability obligations to one or more recipients of Covered Software. - However, you may do so only on Your own behalf, and not on behalf of - the Initial Developer or any Contributor. You must make it - absolutely clear that any such warranty, support, indemnity or - liability obligation is offered by You alone, and You hereby agree - to indemnify the Initial Developer and every Contributor for any - liability incurred by the Initial Developer or such Contributor as a - result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered Software under - the terms of this License or under the terms of a license of Your - choice, which may contain terms different from this License, - provided that You are in compliance with the terms of this License - and that the license for the Executable form does not attempt to - limit or alter the recipient's rights in the Source Code form from - the rights set forth in this License. If You distribute the Covered - Software in Executable form under a different license, You must make - it absolutely clear that any terms which differ from this License - are offered by You alone, not by the Initial Developer or - Contributor. You hereby agree to indemnify the Initial Developer and - every Contributor for any liability incurred by the Initial - Developer or such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software with - other code not governed by the terms of this License and distribute - the Larger Work as a single product. In such a case, You must make - sure the requirements of this License are fulfilled for the Covered - Software. - -4. Versions of the License. - - 4.1. New Versions. - - Oracle is the initial license steward and may publish revised and/or - new versions of this License from time to time. Each version will be - given a distinguishing version number. Except as provided in Section - 4.3, no one other than the license steward has the right to modify - this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise make the - Covered Software available under the terms of the version of the - License under which You originally received the Covered Software. If - the Initial Developer includes a notice in the Original Software - prohibiting it from being distributed or otherwise made available - under any subsequent version of the License, You must distribute and - make the Covered Software available under the terms of the version - of the License under which You originally received the Covered - Software. Otherwise, You may also choose to use, distribute or - otherwise make the Covered Software available under the terms of any - subsequent version of the License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a new - license for Your Original Software, You may create and use a - modified version of this License if You: (a) rename the license and - remove any references to the name of the license steward (except to - note that the license differs from this License); and (b) otherwise - make it clear that the license contains terms which differ from this - License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, - WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE - IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR - NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF - THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE - DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY - OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, - REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN - ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS - AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate - automatically if You fail to comply with terms herein and fail to - cure such breach within 30 days of becoming aware of the breach. - Provisions which, by their nature, must remain in effect beyond the - termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or a - Contributor (the Initial Developer or Contributor against whom You - assert such claim is referred to as "Participant") alleging that the - Participant Software (meaning the Contributor Version where the - Participant is a Contributor or the Original Software where the - Participant is the Initial Developer) directly or indirectly - infringes any patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial Developer (if the - Initial Developer is not the Participant) and all Contributors under - Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice - from Participant terminate prospectively and automatically at the - expiration of such 60 day notice period, unless if within such 60 - day period You withdraw Your claim with respect to the Participant - Software against such Participant either unilaterally or pursuant to - a written agreement with Participant. - - 6.3. If You assert a patent infringement claim against Participant - alleging that the Participant Software directly or indirectly - infringes any patent where such claim is resolved (such as by - license or settlement) prior to the initiation of patent - infringement litigation, then the reasonable value of the licenses - granted by such Participant under Sections 2.1 or 2.2 shall be taken - into account in determining the amount or value of any payment or - license. - - 6.4. In the event of termination under Sections 6.1 or 6.2 above, - all end user licenses that have been validly granted by You or any - distributor hereunder prior to termination (excluding licenses - granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE - TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER - FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR - LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE - POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT - APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH - PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH - LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR - LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION - AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is defined - in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer - software" (as that term is defined at 48 C.F.R. ß - 252.227-7014(a)(1)) and "commercial computer software documentation" - as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent - with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 - (June 1995), all U.S. Government End Users acquire Covered Software - with only those rights set forth herein. This U.S. Government Rights - clause is in lieu of, and supersedes, any other FAR, DFAR, or other - clause or provision that addresses Government rights in computer - software under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. This License shall be governed by - the law of the jurisdiction specified in a notice contained within - the Original Software (except to the extent applicable law, if any, - provides otherwise), excluding such jurisdiction's conflict-of-law - provisions. Any litigation relating to this License shall be subject - to the jurisdiction of the courts located in the jurisdiction and - venue specified in a notice contained within the Original Software, - with the losing party responsible for costs, including, without - limitation, court costs and reasonable attorneys' fees and expenses. - The application of the United Nations Convention on Contracts for - the International Sale of Goods is expressly excluded. Any law or - regulation which provides that the language of a contract shall be - construed against the drafter shall not apply to this License. You - agree that You alone are responsible for compliance with the United - States export administration regulations (and the export control - laws and regulation of any other countries) when You use, distribute - or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or indirectly, - out of its utilization of rights under this License and You agree to - work with Initial Developer and Contributors to distribute such - responsibility on an equitable basis. Nothing herein is intended or - shall be deemed to constitute any admission of liability. - ------------------------------------------------------------------------- - -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION -LICENSE (CDDL) - -The code released under the CDDL shall be governed by the laws of the -State of California (excluding conflict-of-law provisions). Any -litigation relating to this License shall be subject to the jurisdiction -of the Federal Courts of the Northern District of California and the -state courts of the State of California, with venue lying in Santa Clara -County, California. - - - - The GNU General Public License (GPL) Version 2, June 1991 - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. -51 Franklin Street, Fifth Floor -Boston, MA 02110-1335 -USA - -Everyone is permitted to copy and distribute verbatim copies -of this license document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to -share and change it. By contrast, the GNU General Public License is -intended to guarantee your freedom to share and change free software--to -make sure the software is free for all its users. This General Public -License applies to most of the Free Software Foundation's software and -to any other program whose authors commit to using it. (Some other Free -Software Foundation software is covered by the GNU Library General -Public License instead.) You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. -Our General Public Licenses are designed to make sure that you have the -freedom to distribute copies of free software (and charge for this -service if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone -to deny you these rights or to ask you to surrender the rights. These -restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis -or for a fee, you must give the recipients all the rights that you have. -You must make sure that they, too, receive or can get the source code. -And you must show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - -Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - -Finally, any free program is threatened constantly by software patents. -We wish to avoid the danger that redistributors of a free program will -individually obtain patent licenses, in effect making the program -proprietary. To prevent this, we have made it clear that any patent must -be licensed for everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and -modification follow. - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a -notice placed by the copyright holder saying it may be distributed under -the terms of this General Public License. The "Program", below, refers -to any such program or work, and a "work based on the Program" means -either the Program or any derivative work under copyright law: that is -to say, a work containing the Program or a portion of it, either -verbatim or with modifications and/or translated into another language. -(Hereinafter, translation is included without limitation in the term -"modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of running -the Program is not restricted, and the output from the Program is -covered only if its contents constitute a work based on the Program -(independent of having been made by running the Program). Whether that -is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source -code as you receive it, in any medium, provided that you conspicuously -and appropriately publish on each copy an appropriate copyright notice -and disclaimer of warranty; keep intact all the notices that refer to -this License and to the absence of any warranty; and give any other -recipients of the Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of -it, thus forming a work based on the Program, and copy and distribute -such modifications or work under the terms of Section 1 above, provided -that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any part - thereof, to be licensed as a whole at no charge to all third parties - under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a notice - that there is no warranty (or else, saying that you provide a - warranty) and that users may redistribute the program under these - conditions, and telling the user how to view a copy of this License. - (Exception: if the Program itself is interactive but does not - normally print such an announcement, your work based on the Program - is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, and -can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based on -the Program, the distribution of the whole must be on the terms of this -License, whose permissions for other licensees extend to the entire -whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of a -storage or distribution medium does not bring the other work under the -scope of this License. - -3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections 1 - and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your cost - of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to - distribute corresponding source code. (This alternative is allowed - only for noncommercial distribution and only if you received the - program in object code or executable form with such an offer, in - accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source code -means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to control -compilation and installation of the executable. However, as a special -exception, the source code distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies the -executable. - -If distribution of executable or object code is made by offering access -to copy from a designated place, then offering equivalent access to copy -the source code from the same place counts as distribution of the source -code, even though third parties are not compelled to copy the source -along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt otherwise -to copy, modify, sublicense or distribute the Program is void, and will -automatically terminate your rights under this License. However, parties -who have received copies, or rights, from you under this License will -not have their licenses terminated so long as such parties remain in -full compliance. - -5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and all -its terms and conditions for copying, distributing or modifying the -Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further restrictions -on the recipients' exercise of the rights granted herein. You are not -responsible for enforcing compliance by third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot distribute -so as to satisfy simultaneously your obligations under this License and -any other pertinent obligations, then as a consequence you may not -distribute the Program at all. For example, if a patent license would -not permit royalty-free redistribution of the Program by all those who -receive copies directly or indirectly through you, then the only way you -could satisfy both it and this License would be to refrain entirely from -distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is implemented -by public license practices. Many people have made generous -contributions to the wide range of software distributed through that -system in reliance on consistent application of that system; it is up to -the author/donor to decide if he or she is willing to distribute -software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be -a consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License may -add an explicit geographical distribution limitation excluding those -countries, so that distribution is permitted only in or among countries -not thus excluded. In such case, this License incorporates the -limitation as if written in the body of this License. - -9. The Free Software Foundation may publish revised and/or new -versions of the General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Program does not specify a version -number of this License, you may choose any version ever published by the -Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the -author to ask for permission. For software which is copyrighted by the -Free Software Foundation, write to the Free Software Foundation; we -sometimes make exceptions for this. Our decision will be guided by the -two goals of preserving the free status of all derivatives of our free -software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, -EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE -ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH -YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL -NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR -DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL -DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM -(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED -INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF -THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR -OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to -attach them to the start of each source file to most effectively convey -the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type - `show w'. This is free software, and you are welcome to redistribute - it under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the -appropriate parts of the General Public License. Of course, the commands -you use may be called something other than `show w' and `show c'; they -could even be mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - program `Gnomovision' (which makes passes at compilers) written by - James Hacker. - - signature of Ty Coon, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications -with the library. If this is what you want to do, use the GNU Library -General Public License instead of this License. - -# - -Certain source files distributed by Oracle America, Inc. and/or its -affiliates are subject to the following clarification and special -exception to the GPLv2, based on the GNU Project exception for its -Classpath libraries, known as the GNU Classpath Exception, but only -where Oracle has expressly included in the particular source file's -header the words "Oracle designates this particular file as subject to -the "Classpath" exception as provided by Oracle in the LICENSE file -that accompanied this code." - -You should also note that Oracle includes multiple, independent -programs in this software package. Some of those programs are provided -under licenses deemed incompatible with the GPLv2 by the Free Software -Foundation and others. For example, the package includes programs -licensed under the Apache License, Version 2.0. Such programs are -licensed to you under their original licenses. - -Oracle facilitates your further distribution of this package by adding -the Classpath Exception to the necessary parts of its GPLv2 code, which -permits you to use that code in combination with other independent -modules not licensed under the GPLv2. However, note that this would -not permit you to commingle code under an incompatible license with -Oracle's GPLv2 licensed code by, for example, cutting and pasting such -code into a file also containing Oracle's GPLv2 licensed code and then -distributing the result. Additionally, if you were to remove the -Classpath Exception from any of the files to which it applies and -distribute the result, you would likely be required to license some or -all of the other code in that distribution under the GPLv2 as well, and -since the GPLv2 is incompatible with the license terms of some items -included in the distribution by Oracle, removing the Classpath -Exception could therefore effectively compromise your ability to -further distribute the package. - -Proceed with caution and we recommend that you obtain the advice of a -lawyer skilled in open source matters before removing the Classpath -Exception or making modifications to this package which may -subsequently be redistributed and/or involve the use of third party -software. - -CLASSPATH EXCEPTION -Linking this library statically or dynamically with other modules is -making a combined work based on this library. Thus, the terms and -conditions of the GNU General Public License version 2 cover the whole -combination. - -As a special exception, the copyright holders of this library give you -permission to link this library with independent modules to produce an -executable, regardless of the license terms of these independent -modules, and to copy and distribute the resulting executable under -terms of your choice, provided that you also meet, for each linked -independent module, the terms and conditions of the license of that -module. An independent module is a module which is not derived from or -based on this library. If you modify this library, you may extend this -exception to your version of the library, but you are not obligated to -do so. If you do not wish to do so, delete this exception statement -from your version. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jetty.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jetty.txt deleted file mode 100644 index 6acfaf43962..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jetty.txt +++ /dev/null @@ -1,415 +0,0 @@ -This program and the accompanying materials are made available under the -terms of the Eclipse Public License 1.0 which is available at -https://www.eclipse.org/org/documents/epl-1.0/EPL-1.0.txt -or the Apache Software License 2.0 which is available at -https://www.apache.org/licenses/LICENSE-2.0 - - - -Eclipse Public License - v 1.0 - -THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC -LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM -CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -1. DEFINITIONS - -"Contribution" means: - -a) in the case of the initial Contributor, the initial code and documentation - distributed under this Agreement, and -b) in the case of each subsequent Contributor: - i) changes to the Program, and - ii) additions to the Program; - - where such changes and/or additions to the Program originate from and are - distributed by that particular Contributor. A Contribution 'originates' - from a Contributor if it was added to the Program by such Contributor - itself or anyone acting on such Contributor's behalf. Contributions do not - include additions to the Program which: (i) are separate modules of - software distributed in conjunction with the Program under their own - license agreement, and (ii) are not derivative works of the Program. - -"Contributor" means any person or entity that distributes the Program. - -"Licensed Patents" mean patent claims licensable by a Contributor which are -necessarily infringed by the use or sale of its Contribution alone or when -combined with the Program. - -"Program" means the Contributions distributed in accordance with this -Agreement. - -"Recipient" means anyone who receives the Program under this Agreement, -including all Contributors. - -2. GRANT OF RIGHTS - a) Subject to the terms of this Agreement, each Contributor hereby grants - Recipient a non-exclusive, worldwide, royalty-free copyright license to - reproduce, prepare derivative works of, publicly display, publicly - perform, distribute and sublicense the Contribution of such Contributor, - if any, and such derivative works, in source code and object code form. - b) Subject to the terms of this Agreement, each Contributor hereby grants - Recipient a non-exclusive, worldwide, royalty-free patent license under - Licensed Patents to make, use, sell, offer to sell, import and otherwise - transfer the Contribution of such Contributor, if any, in source code and - object code form. This patent license shall apply to the combination of - the Contribution and the Program if, at the time the Contribution is - added by the Contributor, such addition of the Contribution causes such - combination to be covered by the Licensed Patents. The patent license - shall not apply to any other combinations which include the Contribution. - No hardware per se is licensed hereunder. - c) Recipient understands that although each Contributor grants the licenses - to its Contributions set forth herein, no assurances are provided by any - Contributor that the Program does not infringe the patent or other - intellectual property rights of any other entity. Each Contributor - disclaims any liability to Recipient for claims brought by any other - entity based on infringement of intellectual property rights or - otherwise. As a condition to exercising the rights and licenses granted - hereunder, each Recipient hereby assumes sole responsibility to secure - any other intellectual property rights needed, if any. For example, if a - third party patent license is required to allow Recipient to distribute - the Program, it is Recipient's responsibility to acquire that license - before distributing the Program. - d) Each Contributor represents that to its knowledge it has sufficient - copyright rights in its Contribution, if any, to grant the copyright - license set forth in this Agreement. - -3. REQUIREMENTS - -A Contributor may choose to distribute the Program in object code form under -its own license agreement, provided that: - - a) it complies with the terms and conditions of this Agreement; and - b) its license agreement: - i) effectively disclaims on behalf of all Contributors all warranties - and conditions, express and implied, including warranties or - conditions of title and non-infringement, and implied warranties or - conditions of merchantability and fitness for a particular purpose; - ii) effectively excludes on behalf of all Contributors all liability for - damages, including direct, indirect, special, incidental and - consequential damages, such as lost profits; - iii) states that any provisions which differ from this Agreement are - offered by that Contributor alone and not by any other party; and - iv) states that source code for the Program is available from such - Contributor, and informs licensees how to obtain it in a reasonable - manner on or through a medium customarily used for software exchange. - -When the Program is made available in source code form: - - a) it must be made available under this Agreement; and - b) a copy of this Agreement must be included with each copy of the Program. - Contributors may not remove or alter any copyright notices contained - within the Program. - -Each Contributor must identify itself as the originator of its Contribution, -if -any, in a manner that reasonably allows subsequent Recipients to identify the -originator of the Contribution. - -4. COMMERCIAL DISTRIBUTION - -Commercial distributors of software may accept certain responsibilities with -respect to end users, business partners and the like. While this license is -intended to facilitate the commercial use of the Program, the Contributor who -includes the Program in a commercial product offering should do so in a manner -which does not create potential liability for other Contributors. Therefore, -if a Contributor includes the Program in a commercial product offering, such -Contributor ("Commercial Contributor") hereby agrees to defend and indemnify -every other Contributor ("Indemnified Contributor") against any losses, -damages and costs (collectively "Losses") arising from claims, lawsuits and -other legal actions brought by a third party against the Indemnified -Contributor to the extent caused by the acts or omissions of such Commercial -Contributor in connection with its distribution of the Program in a commercial -product offering. The obligations in this section do not apply to any claims -or Losses relating to any actual or alleged intellectual property -infringement. In order to qualify, an Indemnified Contributor must: -a) promptly notify the Commercial Contributor in writing of such claim, and -b) allow the Commercial Contributor to control, and cooperate with the -Commercial Contributor in, the defense and any related settlement -negotiations. The Indemnified Contributor may participate in any such claim at -its own expense. - -For example, a Contributor might include the Program in a commercial product -offering, Product X. That Contributor is then a Commercial Contributor. If -that Commercial Contributor then makes performance claims, or offers -warranties related to Product X, those performance claims and warranties are -such Commercial Contributor's responsibility alone. Under this section, the -Commercial Contributor would have to defend claims against the other -Contributors related to those performance claims and warranties, and if a -court requires any other Contributor to pay any damages as a result, the -Commercial Contributor must pay those damages. - -5. NO WARRANTY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR -IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, -NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each -Recipient is solely responsible for determining the appropriateness of using -and distributing the Program and assumes all risks associated with its -exercise of rights under this Agreement , including but not limited to the -risks and costs of program errors, compliance with applicable laws, damage to -or loss of data, programs or equipment, and unavailability or interruption of -operations. - -6. DISCLAIMER OF LIABILITY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY -CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION -LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE -EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY -OF SUCH DAMAGES. - -7. GENERAL - -If any provision of this Agreement is invalid or unenforceable under -applicable law, it shall not affect the validity or enforceability of the -remainder of the terms of this Agreement, and without further action by the -parties hereto, such provision shall be reformed to the minimum extent -necessary to make such provision valid and enforceable. - -If Recipient institutes patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Program itself -(excluding combinations of the Program with other software or hardware) -infringes such Recipient's patent(s), then such Recipient's rights granted -under Section 2(b) shall terminate as of the date such litigation is filed. - -All Recipient's rights under this Agreement shall terminate if it fails to -comply with any of the material terms or conditions of this Agreement and does -not cure such failure in a reasonable period of time after becoming aware of -such noncompliance. If all Recipient's rights under this Agreement terminate, -Recipient agrees to cease use and distribution of the Program as soon as -reasonably practicable. However, Recipient's obligations under this Agreement -and any licenses granted by Recipient relating to the Program shall continue -and survive. - -Everyone is permitted to copy and distribute copies of this Agreement, but in -order to avoid inconsistency the Agreement is copyrighted and may only be -modified in the following manner. The Agreement Steward reserves the right to -publish new versions (including revisions) of this Agreement from time to -time. No one other than the Agreement Steward has the right to modify this -Agreement. The Eclipse Foundation is the initial Agreement Steward. The -Eclipse Foundation may assign the responsibility to serve as the Agreement -Steward to a suitable separate entity. Each new version of the Agreement will -be given a distinguishing version number. The Program (including -Contributions) may always be distributed subject to the version of the -Agreement under which it was received. In addition, after a new version of the -Agreement is published, Contributor may elect to distribute the Program -(including its Contributions) under the new version. Except as expressly -stated in Sections 2(a) and 2(b) above, Recipient receives no rights or -licenses to the intellectual property of any Contributor under this Agreement, -whether expressly, by implication, estoppel or otherwise. All rights in the -Program not expressly granted under this Agreement are reserved. - -This Agreement is governed by the laws of the State of New York and the -intellectual property laws of the United States of America. No party to this -Agreement will bring a legal action under this Agreement more than one year -after the cause of action arose. Each party waives its rights to a jury trial in -any resulting litigation. - - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jquery.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jquery.txt deleted file mode 100644 index 45930542204..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-jquery.txt +++ /dev/null @@ -1,20 +0,0 @@ -Copyright JS Foundation and other contributors, https://js.foundation/ - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-net.sf.jopt-simple-jopt-simple.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-net.sf.jopt-simple-jopt-simple.txt deleted file mode 100644 index 54b27325bb6..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-net.sf.jopt-simple-jopt-simple.txt +++ /dev/null @@ -1,24 +0,0 @@ -/* - The MIT License - - Copyright (c) 2004-2016 Paul R. Holser, Jr. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-nvd3.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-nvd3.txt deleted file mode 100644 index 0955544cdf3..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-nvd3.txt +++ /dev/null @@ -1,10 +0,0 @@ -Copyright (c) 2011-2014 Novus Partners, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this -file except in compliance with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software distributed under the - License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.codehaus.mojo-animal-sniffer-annotations.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.codehaus.mojo-animal-sniffer-annotations.txt deleted file mode 100644 index f88186cc45b..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.codehaus.mojo-animal-sniffer-annotations.txt +++ /dev/null @@ -1,21 +0,0 @@ - The MIT License - - Copyright (c) 2009 codehaus.org. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.codehaus.woodstox-stax2-api.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.codehaus.woodstox-stax2-api.txt deleted file mode 100644 index 9b533931871..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.codehaus.woodstox-stax2-api.txt +++ /dev/null @@ -1,13 +0,0 @@ -This copy of Stax2 API is licensed under the -Simplified BSD License (also known as "2-clause BSD", or "FreeBSD License") -See the License for details about distribution rights, and the -specific rights regarding derivate works. - -You may obtain a copy of the License at: - -http://www.opensource.org/licenses/bsd-license.php - -with details of: - - = FasterXML.com - = 2010- diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.fusesource.leveldbjni-leveldbjni-all.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.fusesource.leveldbjni-leveldbjni-all.txt deleted file mode 100644 index 8edd375909b..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.fusesource.leveldbjni-leveldbjni-all.txt +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 FuseSource Corp. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of FuseSource Corp. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.glassfish.hk2.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.glassfish.hk2.txt deleted file mode 100644 index b1c74f95ede..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.glassfish.hk2.txt +++ /dev/null @@ -1,759 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 - -1. Definitions. - - 1.1. "Contributor" means each individual or entity that creates or - contributes to the creation of Modifications. - - 1.2. "Contributor Version" means the combination of the Original - Software, prior Modifications used by a Contributor (if any), and - the Modifications made by that particular Contributor. - - 1.3. "Covered Software" means (a) the Original Software, or (b) - Modifications, or (c) the combination of files containing Original - Software with files containing Modifications, in each case including - portions thereof. - - 1.4. "Executable" means the Covered Software in any form other than - Source Code. - - 1.5. "Initial Developer" means the individual or entity that first - makes Original Software available under this License. - - 1.6. "Larger Work" means a work which combines Covered Software or - portions thereof with code not governed by the terms of this License. - - 1.7. "License" means this document. - - 1.8. "Licensable" means having the right to grant, to the maximum - extent possible, whether at the time of the initial grant or - subsequently acquired, any and all of the rights conveyed herein. - - 1.9. "Modifications" means the Source Code and Executable form of - any of the following: - - A. Any file that results from an addition to, deletion from or - modification of the contents of a file containing Original Software - or previous Modifications; - - B. Any new file that contains any part of the Original Software or - previous Modification; or - - C. Any new file that is contributed or otherwise made available - under the terms of this License. - - 1.10. "Original Software" means the Source Code and Executable form - of computer software code that is originally released under this - License. - - 1.11. "Patent Claims" means any patent claim(s), now owned or - hereafter acquired, including without limitation, method, process, - and apparatus claims, in any patent Licensable by grantor. - - 1.12. "Source Code" means (a) the common form of computer software - code in which modifications are made and (b) associated - documentation included in or with such code. - - 1.13. "You" (or "Your") means an individual or a legal entity - exercising rights under, and complying with all of the terms of, - this License. For legal entities, "You" includes any entity which - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants. - - 2.1. The Initial Developer Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject - to third party intellectual property claims, the Initial Developer - hereby grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Initial Developer, to use, reproduce, - modify, display, perform, sublicense and distribute the Original - Software (or portions thereof), with or without Modifications, - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using or selling of - Original Software, to make, have made, use, practice, sell, and - offer for sale, and/or otherwise dispose of the Original Software - (or portions thereof). - - (c) The licenses granted in Sections 2.1(a) and (b) are effective on - the date Initial Developer first distributes or otherwise makes the - Original Software available to a third party under the terms of this - License. - - (d) Notwithstanding Section 2.1(b) above, no patent license is - granted: (1) for code that You delete from the Original Software, or - (2) for infringements caused by: (i) the modification of the - Original Software, or (ii) the combination of the Original Software - with other software or devices. - - 2.2. Contributor Grant. - - Conditioned upon Your compliance with Section 3.1 below and subject - to third party intellectual property claims, each Contributor hereby - grants You a world-wide, royalty-free, non-exclusive license: - - (a) under intellectual property rights (other than patent or - trademark) Licensable by Contributor to use, reproduce, modify, - display, perform, sublicense and distribute the Modifications - created by such Contributor (or portions thereof), either on an - unmodified basis, with other Modifications, as Covered Software - and/or as part of a Larger Work; and - - (b) under Patent Claims infringed by the making, using, or selling - of Modifications made by that Contributor either alone and/or in - combination with its Contributor Version (or portions of such - combination), to make, use, sell, offer for sale, have made, and/or - otherwise dispose of: (1) Modifications made by that Contributor (or - portions thereof); and (2) the combination of Modifications made by - that Contributor with its Contributor Version (or portions of such - combination). - - (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective - on the date Contributor first distributes or otherwise makes the - Modifications available to a third party. - - (d) Notwithstanding Section 2.2(b) above, no patent license is - granted: (1) for any code that Contributor has deleted from the - Contributor Version; (2) for infringements caused by: (i) third - party modifications of Contributor Version, or (ii) the combination - of Modifications made by that Contributor with other software - (except as part of the Contributor Version) or other devices; or (3) - under Patent Claims infringed by Covered Software in the absence of - Modifications made by that Contributor. - -3. Distribution Obligations. - - 3.1. Availability of Source Code. - - Any Covered Software that You distribute or otherwise make available - in Executable form must also be made available in Source Code form - and that Source Code form must be distributed only under the terms - of this License. You must include a copy of this License with every - copy of the Source Code form of the Covered Software You distribute - or otherwise make available. You must inform recipients of any such - Covered Software in Executable form as to how they can obtain such - Covered Software in Source Code form in a reasonable manner on or - through a medium customarily used for software exchange. - - 3.2. Modifications. - - The Modifications that You create or to which You contribute are - governed by the terms of this License. You represent that You - believe Your Modifications are Your original creation(s) and/or You - have sufficient rights to grant the rights conveyed by this License. - - 3.3. Required Notices. - - You must include a notice in each of Your Modifications that - identifies You as the Contributor of the Modification. You may not - remove or alter any copyright, patent or trademark notices contained - within the Covered Software, or any notices of licensing or any - descriptive text giving attribution to any Contributor or the - Initial Developer. - - 3.4. Application of Additional Terms. - - You may not offer or impose any terms on any Covered Software in - Source Code form that alters or restricts the applicable version of - this License or the recipients' rights hereunder. You may choose to - offer, and to charge a fee for, warranty, support, indemnity or - liability obligations to one or more recipients of Covered Software. - However, you may do so only on Your own behalf, and not on behalf of - the Initial Developer or any Contributor. You must make it - absolutely clear that any such warranty, support, indemnity or - liability obligation is offered by You alone, and You hereby agree - to indemnify the Initial Developer and every Contributor for any - liability incurred by the Initial Developer or such Contributor as a - result of warranty, support, indemnity or liability terms You offer. - - 3.5. Distribution of Executable Versions. - - You may distribute the Executable form of the Covered Software under - the terms of this License or under the terms of a license of Your - choice, which may contain terms different from this License, - provided that You are in compliance with the terms of this License - and that the license for the Executable form does not attempt to - limit or alter the recipient's rights in the Source Code form from - the rights set forth in this License. If You distribute the Covered - Software in Executable form under a different license, You must make - it absolutely clear that any terms which differ from this License - are offered by You alone, not by the Initial Developer or - Contributor. You hereby agree to indemnify the Initial Developer and - every Contributor for any liability incurred by the Initial - Developer or such Contributor as a result of any such terms You offer. - - 3.6. Larger Works. - - You may create a Larger Work by combining Covered Software with - other code not governed by the terms of this License and distribute - the Larger Work as a single product. In such a case, You must make - sure the requirements of this License are fulfilled for the Covered - Software. - -4. Versions of the License. - - 4.1. New Versions. - - Oracle is the initial license steward and may publish revised and/or - new versions of this License from time to time. Each version will be - given a distinguishing version number. Except as provided in Section - 4.3, no one other than the license steward has the right to modify - this License. - - 4.2. Effect of New Versions. - - You may always continue to use, distribute or otherwise make the - Covered Software available under the terms of the version of the - License under which You originally received the Covered Software. If - the Initial Developer includes a notice in the Original Software - prohibiting it from being distributed or otherwise made available - under any subsequent version of the License, You must distribute and - make the Covered Software available under the terms of the version - of the License under which You originally received the Covered - Software. Otherwise, You may also choose to use, distribute or - otherwise make the Covered Software available under the terms of any - subsequent version of the License published by the license steward. - - 4.3. Modified Versions. - - When You are an Initial Developer and You want to create a new - license for Your Original Software, You may create and use a - modified version of this License if You: (a) rename the license and - remove any references to the name of the license steward (except to - note that the license differs from this License); and (b) otherwise - make it clear that the license contains terms which differ from this - License. - -5. DISCLAIMER OF WARRANTY. - - COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, - WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, - INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE - IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR - NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF - THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE - DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY - OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, - REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN - ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS - AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - - 6.1. This License and the rights granted hereunder will terminate - automatically if You fail to comply with terms herein and fail to - cure such breach within 30 days of becoming aware of the breach. - Provisions which, by their nature, must remain in effect beyond the - termination of this License shall survive. - - 6.2. If You assert a patent infringement claim (excluding - declaratory judgment actions) against Initial Developer or a - Contributor (the Initial Developer or Contributor against whom You - assert such claim is referred to as "Participant") alleging that the - Participant Software (meaning the Contributor Version where the - Participant is a Contributor or the Original Software where the - Participant is the Initial Developer) directly or indirectly - infringes any patent, then any and all rights granted directly or - indirectly to You by such Participant, the Initial Developer (if the - Initial Developer is not the Participant) and all Contributors under - Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice - from Participant terminate prospectively and automatically at the - expiration of such 60 day notice period, unless if within such 60 - day period You withdraw Your claim with respect to the Participant - Software against such Participant either unilaterally or pursuant to - a written agreement with Participant. - - 6.3. If You assert a patent infringement claim against Participant - alleging that the Participant Software directly or indirectly - infringes any patent where such claim is resolved (such as by - license or settlement) prior to the initiation of patent - infringement litigation, then the reasonable value of the licenses - granted by such Participant under Sections 2.1 or 2.2 shall be taken - into account in determining the amount or value of any payment or - license. - - 6.4. In the event of termination under Sections 6.1 or 6.2 above, - all end user licenses that have been validly granted by You or any - distributor hereunder prior to termination (excluding licenses - granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - - UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT - (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE - INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF - COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE - TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR - CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT - LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER - FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR - LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE - POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT - APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH - PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH - LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR - LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION - AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - - The Covered Software is a "commercial item," as that term is defined - in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer - software" (as that term is defined at 48 C.F.R. § - 252.227-7014(a)(1)) and "commercial computer software documentation" - as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent - with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 - (June 1995), all U.S. Government End Users acquire Covered Software - with only those rights set forth herein. This U.S. Government Rights - clause is in lieu of, and supersedes, any other FAR, DFAR, or other - clause or provision that addresses Government rights in computer - software under this License. - -9. MISCELLANEOUS. - - This License represents the complete agreement concerning subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. This License shall be governed by - the law of the jurisdiction specified in a notice contained within - the Original Software (except to the extent applicable law, if any, - provides otherwise), excluding such jurisdiction's conflict-of-law - provisions. Any litigation relating to this License shall be subject - to the jurisdiction of the courts located in the jurisdiction and - venue specified in a notice contained within the Original Software, - with the losing party responsible for costs, including, without - limitation, court costs and reasonable attorneys' fees and expenses. - The application of the United Nations Convention on Contracts for - the International Sale of Goods is expressly excluded. Any law or - regulation which provides that the language of a contract shall be - construed against the drafter shall not apply to this License. You - agree that You alone are responsible for compliance with the United - States export administration regulations (and the export control - laws and regulation of any other countries) when You use, distribute - or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - - As between Initial Developer and the Contributors, each party is - responsible for claims and damages arising, directly or indirectly, - out of its utilization of rights under this License and You agree to - work with Initial Developer and Contributors to distribute such - responsibility on an equitable basis. Nothing herein is intended or - shall be deemed to constitute any admission of liability. - ------------------------------------------------------------------------- - -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION -LICENSE (CDDL) - -The code released under the CDDL shall be governed by the laws of the -State of California (excluding conflict-of-law provisions). Any -litigation relating to this License shall be subject to the jurisdiction -of the Federal Courts of the Northern District of California and the -state courts of the State of California, with venue lying in Santa Clara -County, California. - - - - The GNU General Public License (GPL) Version 2, June 1991 - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. -51 Franklin Street, Fifth Floor -Boston, MA 02110-1335 -USA - -Everyone is permitted to copy and distribute verbatim copies -of this license document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to -share and change it. By contrast, the GNU General Public License is -intended to guarantee your freedom to share and change free software--to -make sure the software is free for all its users. This General Public -License applies to most of the Free Software Foundation's software and -to any other program whose authors commit to using it. (Some other Free -Software Foundation software is covered by the GNU Library General -Public License instead.) You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. -Our General Public Licenses are designed to make sure that you have the -freedom to distribute copies of free software (and charge for this -service if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs; and that you know you can do these things. - -To protect your rights, we need to make restrictions that forbid anyone -to deny you these rights or to ask you to surrender the rights. These -restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis -or for a fee, you must give the recipients all the rights that you have. -You must make sure that they, too, receive or can get the source code. -And you must show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - -Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - -Finally, any free program is threatened constantly by software patents. -We wish to avoid the danger that redistributors of a free program will -individually obtain patent licenses, in effect making the program -proprietary. To prevent this, we have made it clear that any patent must -be licensed for everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and -modification follow. - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a -notice placed by the copyright holder saying it may be distributed under -the terms of this General Public License. The "Program", below, refers -to any such program or work, and a "work based on the Program" means -either the Program or any derivative work under copyright law: that is -to say, a work containing the Program or a portion of it, either -verbatim or with modifications and/or translated into another language. -(Hereinafter, translation is included without limitation in the term -"modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of running -the Program is not restricted, and the output from the Program is -covered only if its contents constitute a work based on the Program -(independent of having been made by running the Program). Whether that -is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source -code as you receive it, in any medium, provided that you conspicuously -and appropriately publish on each copy an appropriate copyright notice -and disclaimer of warranty; keep intact all the notices that refer to -this License and to the absence of any warranty; and give any other -recipients of the Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of -it, thus forming a work based on the Program, and copy and distribute -such modifications or work under the terms of Section 1 above, provided -that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any part - thereof, to be licensed as a whole at no charge to all third parties - under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a notice - that there is no warranty (or else, saying that you provide a - warranty) and that users may redistribute the program under these - conditions, and telling the user how to view a copy of this License. - (Exception: if the Program itself is interactive but does not - normally print such an announcement, your work based on the Program - is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, and -can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based on -the Program, the distribution of the whole must be on the terms of this -License, whose permissions for other licensees extend to the entire -whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of a -storage or distribution medium does not bring the other work under the -scope of this License. - -3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections 1 - and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your cost - of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to - distribute corresponding source code. (This alternative is allowed - only for noncommercial distribution and only if you received the - program in object code or executable form with such an offer, in - accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source code -means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to control -compilation and installation of the executable. However, as a special -exception, the source code distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies the -executable. - -If distribution of executable or object code is made by offering access -to copy from a designated place, then offering equivalent access to copy -the source code from the same place counts as distribution of the source -code, even though third parties are not compelled to copy the source -along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt otherwise -to copy, modify, sublicense or distribute the Program is void, and will -automatically terminate your rights under this License. However, parties -who have received copies, or rights, from you under this License will -not have their licenses terminated so long as such parties remain in -full compliance. - -5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and all -its terms and conditions for copying, distributing or modifying the -Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further restrictions -on the recipients' exercise of the rights granted herein. You are not -responsible for enforcing compliance by third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot distribute -so as to satisfy simultaneously your obligations under this License and -any other pertinent obligations, then as a consequence you may not -distribute the Program at all. For example, if a patent license would -not permit royalty-free redistribution of the Program by all those who -receive copies directly or indirectly through you, then the only way you -could satisfy both it and this License would be to refrain entirely from -distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is implemented -by public license practices. Many people have made generous -contributions to the wide range of software distributed through that -system in reliance on consistent application of that system; it is up to -the author/donor to decide if he or she is willing to distribute -software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be -a consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License may -add an explicit geographical distribution limitation excluding those -countries, so that distribution is permitted only in or among countries -not thus excluded. In such case, this License incorporates the -limitation as if written in the body of this License. - -9. The Free Software Foundation may publish revised and/or new -versions of the General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Program does not specify a version -number of this License, you may choose any version ever published by the -Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the -author to ask for permission. For software which is copyrighted by the -Free Software Foundation, write to the Free Software Foundation; we -sometimes make exceptions for this. Our decision will be guided by the -two goals of preserving the free status of all derivatives of our free -software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, -EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE -ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH -YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL -NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR -DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL -DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM -(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED -INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF -THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR -OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to -attach them to the start of each source file to most effectively convey -the exclusion of warranty; and each file should have at least the -"copyright" line and a pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type - `show w'. This is free software, and you are welcome to redistribute - it under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the -appropriate parts of the General Public License. Of course, the commands -you use may be called something other than `show w' and `show c'; they -could even be mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - program `Gnomovision' (which makes passes at compilers) written by - James Hacker. - - signature of Ty Coon, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications -with the library. If this is what you want to do, use the GNU Library -General Public License instead of this License. - -# - -Certain source files distributed by Oracle America, Inc. and/or its -affiliates are subject to the following clarification and special -exception to the GPLv2, based on the GNU Project exception for its -Classpath libraries, known as the GNU Classpath Exception, but only -where Oracle has expressly included in the particular source file's -header the words "Oracle designates this particular file as subject to -the "Classpath" exception as provided by Oracle in the LICENSE file -that accompanied this code." - -You should also note that Oracle includes multiple, independent -programs in this software package. Some of those programs are provided -under licenses deemed incompatible with the GPLv2 by the Free Software -Foundation and others. For example, the package includes programs -licensed under the Apache License, Version 2.0. Such programs are -licensed to you under their original licenses. - -Oracle facilitates your further distribution of this package by adding -the Classpath Exception to the necessary parts of its GPLv2 code, which -permits you to use that code in combination with other independent -modules not licensed under the GPLv2. However, note that this would -not permit you to commingle code under an incompatible license with -Oracle's GPLv2 licensed code by, for example, cutting and pasting such -code into a file also containing Oracle's GPLv2 licensed code and then -distributing the result. Additionally, if you were to remove the -Classpath Exception from any of the files to which it applies and -distribute the result, you would likely be required to license some or -all of the other code in that distribution under the GPLv2 as well, and -since the GPLv2 is incompatible with the license terms of some items -included in the distribution by Oracle, removing the Classpath -Exception could therefore effectively compromise your ability to -further distribute the package. - -Proceed with caution and we recommend that you obtain the advice of a -lawyer skilled in open source matters before removing the Classpath -Exception or making modifications to this package which may -subsequently be redistributed and/or involve the use of third party -software. - -CLASSPATH EXCEPTION -Linking this library statically or dynamically with other modules is -making a combined work based on this library. Thus, the terms and -conditions of the GNU General Public License version 2 cover the whole -combination. - -As a special exception, the copyright holders of this library give you -permission to link this library with independent modules to produce an -executable, regardless of the license terms of these independent -modules, and to copy and distribute the resulting executable under -terms of your choice, provided that you also meet, for each linked -independent module, the terms and conditions of the license of that -module. An independent module is a module which is not derived from or -based on this library. If you modify this library, you may extend this -exception to your version of the library, but you are not obligated to -do so. If you do not wish to do so, delete this exception statement -from your version. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.openjdk.jmh-jmh-core.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.openjdk.jmh-jmh-core.txt deleted file mode 100644 index b40a0f457d7..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.openjdk.jmh-jmh-core.txt +++ /dev/null @@ -1,347 +0,0 @@ -The GNU General Public License (GPL) - -Version 2, June 1991 - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. -59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Everyone is permitted to copy and distribute verbatim copies of this license -document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to share -and change it. By contrast, the GNU General Public License is intended to -guarantee your freedom to share and change free software--to make sure the -software is free for all its users. This General Public License applies to -most of the Free Software Foundation's software and to any other program whose -authors commit to using it. (Some other Free Software Foundation software is -covered by the GNU Library General Public License instead.) You can apply it to -your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our -General Public Licenses are designed to make sure that you have the freedom to -distribute copies of free software (and charge for this service if you wish), -that you receive source code or can get it if you want it, that you can change -the software or use pieces of it in new free programs; and that you know you -can do these things. - -To protect your rights, we need to make restrictions that forbid anyone to deny -you these rights or to ask you to surrender the rights. These restrictions -translate to certain responsibilities for you if you distribute copies of the -software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis or for -a fee, you must give the recipients all the rights that you have. You must -make sure that they, too, receive or can get the source code. And you must -show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and (2) -offer you this license which gives you legal permission to copy, distribute -and/or modify the software. - -Also, for each author's protection and ours, we want to make certain that -everyone understands that there is no warranty for this free software. If the -software is modified by someone else and passed on, we want its recipients to -know that what they have is not the original, so that any problems introduced -by others will not reflect on the original authors' reputations. - -Finally, any free program is threatened constantly by software patents. We -wish to avoid the danger that redistributors of a free program will -individually obtain patent licenses, in effect making the program proprietary. -To prevent this, we have made it clear that any patent must be licensed for -everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and modification -follow. - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a notice -placed by the copyright holder saying it may be distributed under the terms of -this General Public License. The "Program", below, refers to any such program -or work, and a "work based on the Program" means either the Program or any -derivative work under copyright law: that is to say, a work containing the -Program or a portion of it, either verbatim or with modifications and/or -translated into another language. (Hereinafter, translation is included -without limitation in the term "modification".) Each licensee is addressed as -"you". - -Activities other than copying, distribution and modification are not covered by -this License; they are outside its scope. The act of running the Program is -not restricted, and the output from the Program is covered only if its contents -constitute a work based on the Program (independent of having been made by -running the Program). Whether that is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source code as -you receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice and -disclaimer of warranty; keep intact all the notices that refer to this License -and to the absence of any warranty; and give any other recipients of the -Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and you may -at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of it, thus -forming a work based on the Program, and copy and distribute such modifications -or work under the terms of Section 1 above, provided that you also meet all of -these conditions: - - a) You must cause the modified files to carry prominent notices stating - that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in whole or - in part contains or is derived from the Program or any part thereof, to be - licensed as a whole at no charge to all third parties under the terms of - this License. - - c) If the modified program normally reads commands interactively when run, - you must cause it, when started running for such interactive use in the - most ordinary way, to print or display an announcement including an - appropriate copyright notice and a notice that there is no warranty (or - else, saying that you provide a warranty) and that users may redistribute - the program under these conditions, and telling the user how to view a copy - of this License. (Exception: if the Program itself is interactive but does - not normally print such an announcement, your work based on the Program is - not required to print an announcement.) - -These requirements apply to the modified work as a whole. If identifiable -sections of that work are not derived from the Program, and can be reasonably -considered independent and separate works in themselves, then this License, and -its terms, do not apply to those sections when you distribute them as separate -works. But when you distribute the same sections as part of a whole which is a -work based on the Program, the distribution of the whole must be on the terms -of this License, whose permissions for other licensees extend to the entire -whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest your -rights to work written entirely by you; rather, the intent is to exercise the -right to control the distribution of derivative or collective works based on -the Program. - -In addition, mere aggregation of another work not based on the Program with the -Program (or with a work based on the Program) on a volume of a storage or -distribution medium does not bring the other work under the scope of this -License. - -3. You may copy and distribute the Program (or a work based on it, under -Section 2) in object code or executable form under the terms of Sections 1 and -2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable source - code, which must be distributed under the terms of Sections 1 and 2 above - on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three years, to - give any third party, for a charge no more than your cost of physically - performing source distribution, a complete machine-readable copy of the - corresponding source code, to be distributed under the terms of Sections 1 - and 2 above on a medium customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to - distribute corresponding source code. (This alternative is allowed only - for noncommercial distribution and only if you received the program in - object code or executable form with such an offer, in accord with - Subsection b above.) - -The source code for a work means the preferred form of the work for making -modifications to it. For an executable work, complete source code means all -the source code for all modules it contains, plus any associated interface -definition files, plus the scripts used to control compilation and installation -of the executable. However, as a special exception, the source code -distributed need not include anything that is normally distributed (in either -source or binary form) with the major components (compiler, kernel, and so on) -of the operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering access to copy -from a designated place, then offering equivalent access to copy the source -code from the same place counts as distribution of the source code, even though -third parties are not compelled to copy the source along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program except as -expressly provided under this License. Any attempt otherwise to copy, modify, -sublicense or distribute the Program is void, and will automatically terminate -your rights under this License. However, parties who have received copies, or -rights, from you under this License will not have their licenses terminated so -long as such parties remain in full compliance. - -5. You are not required to accept this License, since you have not signed it. -However, nothing else grants you permission to modify or distribute the Program -or its derivative works. These actions are prohibited by law if you do not -accept this License. Therefore, by modifying or distributing the Program (or -any work based on the Program), you indicate your acceptance of this License to -do so, and all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the Program), -the recipient automatically receives a license from the original licensor to -copy, distribute or modify the Program subject to these terms and conditions. -You may not impose any further restrictions on the recipients' exercise of the -rights granted herein. You are not responsible for enforcing compliance by -third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), conditions -are imposed on you (whether by court order, agreement or otherwise) that -contradict the conditions of this License, they do not excuse you from the -conditions of this License. If you cannot distribute so as to satisfy -simultaneously your obligations under this License and any other pertinent -obligations, then as a consequence you may not distribute the Program at all. -For example, if a patent license would not permit royalty-free redistribution -of the Program by all those who receive copies directly or indirectly through -you, then the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under any -particular circumstance, the balance of the section is intended to apply and -the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any patents or -other property right claims or to contest validity of any such claims; this -section has the sole purpose of protecting the integrity of the free software -distribution system, which is implemented by public license practices. Many -people have made generous contributions to the wide range of software -distributed through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing to -distribute software through any other system and a licensee cannot impose that -choice. - -This section is intended to make thoroughly clear what is believed to be a -consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in certain -countries either by patents or by copyrighted interfaces, the original -copyright holder who places the Program under this License may add an explicit -geographical distribution limitation excluding those countries, so that -distribution is permitted only in or among countries not thus excluded. In -such case, this License incorporates the limitation as if written in the body -of this License. - -9. The Free Software Foundation may publish revised and/or new versions of the -General Public License from time to time. Such new versions will be similar in -spirit to the present version, but may differ in detail to address new problems -or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any later -version", you have the option of following the terms and conditions either of -that version or of any later version published by the Free Software Foundation. -If the Program does not specify a version number of this License, you may -choose any version ever published by the Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free programs -whose distribution conditions are different, write to the author to ask for -permission. For software which is copyrighted by the Free Software Foundation, -write to the Free Software Foundation; we sometimes make exceptions for this. -Our decision will be guided by the two goals of preserving the free status of -all derivatives of our free software and of promoting the sharing and reuse of -software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR -THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE -STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE -PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND -PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, -YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL -ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE -PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR -INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA -BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A -FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER -OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest possible -use to the public, the best way to achieve this is to make it free software -which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to attach -them to the start of each source file to most effectively convey the exclusion -of warranty; and each file should have at least the "copyright" line and a -pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - - Copyright (C) - - This program is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by the Free - Software Foundation; either version 2 of the License, or (at your option) - any later version. - - This program is distributed in the hope that it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., 59 - Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this when it -starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author Gnomovision comes - with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free - software, and you are welcome to redistribute it under certain conditions; - type 'show c' for details. - -The hypothetical commands 'show w' and 'show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may be -called something other than 'show w' and 'show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your school, -if any, to sign a "copyright disclaimer" for the program, if necessary. Here -is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - 'Gnomovision' (which makes passes at compilers) written by James Hacker. - - signature of Ty Coon, 1 April 1989 - - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Library General Public -License instead of this License. - - -"CLASSPATH" EXCEPTION TO THE GPL - -Certain source files distributed by Oracle America and/or its affiliates are -subject to the following clarification and special exception to the GPL, but -only where Oracle has expressly included in the particular source file's header -the words "Oracle designates this particular file as subject to the "Classpath" -exception as provided by Oracle in the LICENSE file that accompanied this code." - - Linking this library statically or dynamically with other modules is making - a combined work based on this library. Thus, the terms and conditions of - the GNU General Public License cover the whole combination. - - As a special exception, the copyright holders of this library give you - permission to link this library with independent modules to produce an - executable, regardless of the license terms of these independent modules, - and to copy and distribute the resulting executable under terms of your - choice, provided that you also meet, for each linked independent module, - the terms and conditions of the license of that module. An independent - module is a module which is not derived from or based on this library. If - you modify this library, you may extend this exception to your version of - the library, but you are not obligated to do so. If you do not wish to do - so, delete this exception statement from your version. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.openjdk.jmh-jmh-generator-annprocess.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.openjdk.jmh-jmh-generator-annprocess.txt deleted file mode 100644 index b40a0f457d7..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.openjdk.jmh-jmh-generator-annprocess.txt +++ /dev/null @@ -1,347 +0,0 @@ -The GNU General Public License (GPL) - -Version 2, June 1991 - -Copyright (C) 1989, 1991 Free Software Foundation, Inc. -59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Everyone is permitted to copy and distribute verbatim copies of this license -document, but changing it is not allowed. - -Preamble - -The licenses for most software are designed to take away your freedom to share -and change it. By contrast, the GNU General Public License is intended to -guarantee your freedom to share and change free software--to make sure the -software is free for all its users. This General Public License applies to -most of the Free Software Foundation's software and to any other program whose -authors commit to using it. (Some other Free Software Foundation software is -covered by the GNU Library General Public License instead.) You can apply it to -your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our -General Public Licenses are designed to make sure that you have the freedom to -distribute copies of free software (and charge for this service if you wish), -that you receive source code or can get it if you want it, that you can change -the software or use pieces of it in new free programs; and that you know you -can do these things. - -To protect your rights, we need to make restrictions that forbid anyone to deny -you these rights or to ask you to surrender the rights. These restrictions -translate to certain responsibilities for you if you distribute copies of the -software, or if you modify it. - -For example, if you distribute copies of such a program, whether gratis or for -a fee, you must give the recipients all the rights that you have. You must -make sure that they, too, receive or can get the source code. And you must -show them these terms so they know their rights. - -We protect your rights with two steps: (1) copyright the software, and (2) -offer you this license which gives you legal permission to copy, distribute -and/or modify the software. - -Also, for each author's protection and ours, we want to make certain that -everyone understands that there is no warranty for this free software. If the -software is modified by someone else and passed on, we want its recipients to -know that what they have is not the original, so that any problems introduced -by others will not reflect on the original authors' reputations. - -Finally, any free program is threatened constantly by software patents. We -wish to avoid the danger that redistributors of a free program will -individually obtain patent licenses, in effect making the program proprietary. -To prevent this, we have made it clear that any patent must be licensed for -everyone's free use or not licensed at all. - -The precise terms and conditions for copying, distribution and modification -follow. - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - -0. This License applies to any program or other work which contains a notice -placed by the copyright holder saying it may be distributed under the terms of -this General Public License. The "Program", below, refers to any such program -or work, and a "work based on the Program" means either the Program or any -derivative work under copyright law: that is to say, a work containing the -Program or a portion of it, either verbatim or with modifications and/or -translated into another language. (Hereinafter, translation is included -without limitation in the term "modification".) Each licensee is addressed as -"you". - -Activities other than copying, distribution and modification are not covered by -this License; they are outside its scope. The act of running the Program is -not restricted, and the output from the Program is covered only if its contents -constitute a work based on the Program (independent of having been made by -running the Program). Whether that is true depends on what the Program does. - -1. You may copy and distribute verbatim copies of the Program's source code as -you receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice and -disclaimer of warranty; keep intact all the notices that refer to this License -and to the absence of any warranty; and give any other recipients of the -Program a copy of this License along with the Program. - -You may charge a fee for the physical act of transferring a copy, and you may -at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Program or any portion of it, thus -forming a work based on the Program, and copy and distribute such modifications -or work under the terms of Section 1 above, provided that you also meet all of -these conditions: - - a) You must cause the modified files to carry prominent notices stating - that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in whole or - in part contains or is derived from the Program or any part thereof, to be - licensed as a whole at no charge to all third parties under the terms of - this License. - - c) If the modified program normally reads commands interactively when run, - you must cause it, when started running for such interactive use in the - most ordinary way, to print or display an announcement including an - appropriate copyright notice and a notice that there is no warranty (or - else, saying that you provide a warranty) and that users may redistribute - the program under these conditions, and telling the user how to view a copy - of this License. (Exception: if the Program itself is interactive but does - not normally print such an announcement, your work based on the Program is - not required to print an announcement.) - -These requirements apply to the modified work as a whole. If identifiable -sections of that work are not derived from the Program, and can be reasonably -considered independent and separate works in themselves, then this License, and -its terms, do not apply to those sections when you distribute them as separate -works. But when you distribute the same sections as part of a whole which is a -work based on the Program, the distribution of the whole must be on the terms -of this License, whose permissions for other licensees extend to the entire -whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest your -rights to work written entirely by you; rather, the intent is to exercise the -right to control the distribution of derivative or collective works based on -the Program. - -In addition, mere aggregation of another work not based on the Program with the -Program (or with a work based on the Program) on a volume of a storage or -distribution medium does not bring the other work under the scope of this -License. - -3. You may copy and distribute the Program (or a work based on it, under -Section 2) in object code or executable form under the terms of Sections 1 and -2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable source - code, which must be distributed under the terms of Sections 1 and 2 above - on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three years, to - give any third party, for a charge no more than your cost of physically - performing source distribution, a complete machine-readable copy of the - corresponding source code, to be distributed under the terms of Sections 1 - and 2 above on a medium customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer to - distribute corresponding source code. (This alternative is allowed only - for noncommercial distribution and only if you received the program in - object code or executable form with such an offer, in accord with - Subsection b above.) - -The source code for a work means the preferred form of the work for making -modifications to it. For an executable work, complete source code means all -the source code for all modules it contains, plus any associated interface -definition files, plus the scripts used to control compilation and installation -of the executable. However, as a special exception, the source code -distributed need not include anything that is normally distributed (in either -source or binary form) with the major components (compiler, kernel, and so on) -of the operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering access to copy -from a designated place, then offering equivalent access to copy the source -code from the same place counts as distribution of the source code, even though -third parties are not compelled to copy the source along with the object code. - -4. You may not copy, modify, sublicense, or distribute the Program except as -expressly provided under this License. Any attempt otherwise to copy, modify, -sublicense or distribute the Program is void, and will automatically terminate -your rights under this License. However, parties who have received copies, or -rights, from you under this License will not have their licenses terminated so -long as such parties remain in full compliance. - -5. You are not required to accept this License, since you have not signed it. -However, nothing else grants you permission to modify or distribute the Program -or its derivative works. These actions are prohibited by law if you do not -accept this License. Therefore, by modifying or distributing the Program (or -any work based on the Program), you indicate your acceptance of this License to -do so, and all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - -6. Each time you redistribute the Program (or any work based on the Program), -the recipient automatically receives a license from the original licensor to -copy, distribute or modify the Program subject to these terms and conditions. -You may not impose any further restrictions on the recipients' exercise of the -rights granted herein. You are not responsible for enforcing compliance by -third parties to this License. - -7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), conditions -are imposed on you (whether by court order, agreement or otherwise) that -contradict the conditions of this License, they do not excuse you from the -conditions of this License. If you cannot distribute so as to satisfy -simultaneously your obligations under this License and any other pertinent -obligations, then as a consequence you may not distribute the Program at all. -For example, if a patent license would not permit royalty-free redistribution -of the Program by all those who receive copies directly or indirectly through -you, then the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under any -particular circumstance, the balance of the section is intended to apply and -the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any patents or -other property right claims or to contest validity of any such claims; this -section has the sole purpose of protecting the integrity of the free software -distribution system, which is implemented by public license practices. Many -people have made generous contributions to the wide range of software -distributed through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing to -distribute software through any other system and a licensee cannot impose that -choice. - -This section is intended to make thoroughly clear what is believed to be a -consequence of the rest of this License. - -8. If the distribution and/or use of the Program is restricted in certain -countries either by patents or by copyrighted interfaces, the original -copyright holder who places the Program under this License may add an explicit -geographical distribution limitation excluding those countries, so that -distribution is permitted only in or among countries not thus excluded. In -such case, this License incorporates the limitation as if written in the body -of this License. - -9. The Free Software Foundation may publish revised and/or new versions of the -General Public License from time to time. Such new versions will be similar in -spirit to the present version, but may differ in detail to address new problems -or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any later -version", you have the option of following the terms and conditions either of -that version or of any later version published by the Free Software Foundation. -If the Program does not specify a version number of this License, you may -choose any version ever published by the Free Software Foundation. - -10. If you wish to incorporate parts of the Program into other free programs -whose distribution conditions are different, write to the author to ask for -permission. For software which is copyrighted by the Free Software Foundation, -write to the Free Software Foundation; we sometimes make exceptions for this. -Our decision will be guided by the two goals of preserving the free status of -all derivatives of our free software and of promoting the sharing and reuse of -software generally. - -NO WARRANTY - -11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR -THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE -STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE -PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, -INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND -PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, -YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL -ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE -PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR -INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA -BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A -FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER -OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest possible -use to the public, the best way to achieve this is to make it free software -which everyone can redistribute and change under these terms. - -To do so, attach the following notices to the program. It is safest to attach -them to the start of each source file to most effectively convey the exclusion -of warranty; and each file should have at least the "copyright" line and a -pointer to where the full notice is found. - - One line to give the program's name and a brief idea of what it does. - - Copyright (C) - - This program is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by the Free - Software Foundation; either version 2 of the License, or (at your option) - any later version. - - This program is distributed in the hope that it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., 59 - Temple Place, Suite 330, Boston, MA 02111-1307 USA - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this when it -starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author Gnomovision comes - with ABSOLUTELY NO WARRANTY; for details type 'show w'. This is free - software, and you are welcome to redistribute it under certain conditions; - type 'show c' for details. - -The hypothetical commands 'show w' and 'show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may be -called something other than 'show w' and 'show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your school, -if any, to sign a "copyright disclaimer" for the program, if necessary. Here -is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - 'Gnomovision' (which makes passes at compilers) written by James Hacker. - - signature of Ty Coon, 1 April 1989 - - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Library General Public -License instead of this License. - - -"CLASSPATH" EXCEPTION TO THE GPL - -Certain source files distributed by Oracle America and/or its affiliates are -subject to the following clarification and special exception to the GPL, but -only where Oracle has expressly included in the particular source file's header -the words "Oracle designates this particular file as subject to the "Classpath" -exception as provided by Oracle in the LICENSE file that accompanied this code." - - Linking this library statically or dynamically with other modules is making - a combined work based on this library. Thus, the terms and conditions of - the GNU General Public License cover the whole combination. - - As a special exception, the copyright holders of this library give you - permission to link this library with independent modules to produce an - executable, regardless of the license terms of these independent modules, - and to copy and distribute the resulting executable under terms of your - choice, provided that you also meet, for each linked independent module, - the terms and conditions of the license of that module. An independent - module is a module which is not derived from or based on this library. If - you modify this library, you may extend this exception to your version of - the library, but you are not obligated to do so. If you do not wish to do - so, delete this exception statement from your version. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.ow2.asm-asm.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.ow2.asm-asm.txt deleted file mode 100644 index 4d191851af4..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.ow2.asm-asm.txt +++ /dev/null @@ -1,28 +0,0 @@ - - ASM: a very small and fast Java bytecode manipulation framework - Copyright (c) 2000-2011 INRIA, France Telecom - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - 3. Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.slf4j.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.slf4j.txt deleted file mode 100644 index 744377c4372..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-org.slf4j.txt +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) 2004-2017 QOS.ch -All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-protobuf.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-protobuf.txt deleted file mode 100644 index 19b305b0006..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-protobuf.txt +++ /dev/null @@ -1,32 +0,0 @@ -Copyright 2008 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Code generated by the Protocol Buffer compiler is owned by the owner -of the input file used when generating it. This code is not -standalone and requires a support library to be linked with it. This -support library is itself covered by the above license. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-ratis-thirdparty-misc.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-ratis-thirdparty-misc.txt deleted file mode 100644 index 31b531800e9..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-ratis-thirdparty-misc.txt +++ /dev/null @@ -1,353 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -================================================================================================ - -Apache Ratis subcomponents - -Apache Ratis includes a number of subcomponents with separate copyright notices and license terms. -Your use of the source code for these subcomponents is subject to the terms and conditions of the -following licenses. - ------------------------------------------------------------------------------------------------- -This product bundles SLF4J artifacts which are available under the folling licence: - - Copyright (c) 2004-2017 QOS.ch - All rights reserved. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------------------------------------------------------------------------------------------------- -This product bundles Google Protobuf which is available under the following licence: - -Copyright 2008 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Code generated by the Protocol Buffer compiler is owned by the owner -of the input file used when generating it. This code is not -standalone and requires a support library to be linked with it. This -support library is itself covered by the above license. - ------------------------------------------------------------------------------------------------- -This product bundles artifacts from the following projects -which are available under the Apache License 2.0. - -Google Guava -Netty -Opencensus -Grpc -JCTools -JCommander -Javapoet -J2objc (annotations) -Google Error prone annotations -Google auto value annotations - ------------------------------------------------------------------------------------------------- -The annotations from typetools/checker-framework and animal-sniffer/animal-sniffer-annotations -are licensed under the MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. ------------------------------------------------------------------------------------------------- -The product bundles Crc32 implementation in ratis-common, which is licensed under -Apache License 2.0 with the following comment: - -Some portions of this file Copyright (c) 2004-2006 Intel Corportation -and licensed under the BSD license. - -BSD license: - -Redistribution and use in source and binary forms, with or without modification, are permitted provided -that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------------------------------------------------------------------------------------------------- -The product bundles annotations from checkerframework, which is licensed under MIT: - -"The annotations are licensed under the MIT License" - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/NOTICE-ratis-thirtparty-misc.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/NOTICE-ratis-thirtparty-misc.txt deleted file mode 100644 index 7e3cbd6fba9..00000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/NOTICE-ratis-thirtparty-misc.txt +++ /dev/null @@ -1,340 +0,0 @@ -Apache Ratis -Copyright 2017-2019 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - -The binary distribution of this project bundles binaries of - --------------------------------------------------------------------------------- -Netty artifacts (io.nett:netty-*) - -Licensed under Apache License 2.0 with the following notice: - -The Netty Project - -Please visit the Netty web site for more information: - - * http://netty.io/ - -Copyright 2014 The Netty Project - -The Netty Project licenses this file to you under the Apache License, -version 2.0 (the "License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at: - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -License for the specific language governing permissions and limitations -under the License. - -Also, please refer to each LICENSE..txt file, which is located in -the 'license' directory of the distribution file, for the license terms of the -components that this product depends on. - -This product contains the extensions to Java Collections Framework which has -been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: - - * LICENSE: - * license/LICENSE.jsr166y.txt (Public Domain) - * HOMEPAGE: - * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ - * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ - -This product contains a modified version of Robert Harder's Public Domain -Base64 Encoder and Decoder, which can be obtained at: - - * LICENSE: - * license/LICENSE.base64.txt (Public Domain) - * HOMEPAGE: - * http://iharder.sourceforge.net/current/java/base64/ - -This product contains a modified portion of 'Webbit', an event based -WebSocket and HTTP server, which can be obtained at: - - * LICENSE: - * license/LICENSE.webbit.txt (BSD License) - * HOMEPAGE: - * https://github.com/joewalnes/webbit - -This product contains a modified portion of 'SLF4J', a simple logging -facade for Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.slf4j.txt (MIT License) - * HOMEPAGE: - * http://www.slf4j.org/ - -This product contains a modified portion of 'Apache Harmony', an open source -Java SE, which can be obtained at: - - * NOTICE: - * license/NOTICE.harmony.txt - * LICENSE: - * license/LICENSE.harmony.txt (Apache License 2.0) - * HOMEPAGE: - * http://archive.apache.org/dist/harmony/ - -This product contains a modified portion of 'jbzip2', a Java bzip2 compression -and decompression library written by Matthew J. Francis. It can be obtained at: - - * LICENSE: - * license/LICENSE.jbzip2.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jbzip2/ - -This product contains a modified portion of 'libdivsufsort', a C API library to construct -the suffix array and the Burrows-Wheeler transformed string for any input string of -a constant-size alphabet written by Yuta Mori. It can be obtained at: - - * LICENSE: - * license/LICENSE.libdivsufsort.txt (MIT License) - * HOMEPAGE: - * https://github.com/y-256/libdivsufsort - -This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM, - which can be obtained at: - - * LICENSE: - * license/LICENSE.jctools.txt (ASL2 License) - * HOMEPAGE: - * https://github.com/JCTools/JCTools - -This product optionally depends on 'JZlib', a re-implementation of zlib in -pure Java, which can be obtained at: - - * LICENSE: - * license/LICENSE.jzlib.txt (BSD style License) - * HOMEPAGE: - * http://www.jcraft.com/jzlib/ - -This product optionally depends on 'Compress-LZF', a Java library for encoding and -decoding data in LZF format, written by Tatu Saloranta. It can be obtained at: - - * LICENSE: - * license/LICENSE.compress-lzf.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/ning/compress - -This product optionally depends on 'lz4', a LZ4 Java compression -and decompression library written by Adrien Grand. It can be obtained at: - - * LICENSE: - * license/LICENSE.lz4.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/jpountz/lz4-java - -This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression -and decompression library written by William Kinney. It can be obtained at: - - * LICENSE: - * license/LICENSE.jfastlz.txt (MIT License) - * HOMEPAGE: - * https://code.google.com/p/jfastlz/ - -This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data -interchange format, which can be obtained at: - - * LICENSE: - * license/LICENSE.protobuf.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/protobuf - -This product optionally depends on 'Bouncy Castle Crypto APIs' to generate -a temporary self-signed X.509 certificate when the JVM does not provide the -equivalent functionality. It can be obtained at: - - * LICENSE: - * license/LICENSE.bouncycastle.txt (MIT License) - * HOMEPAGE: - * http://www.bouncycastle.org/ - -This product optionally depends on 'Snappy', a compression library produced -by Google Inc, which can be obtained at: - - * LICENSE: - * license/LICENSE.snappy.txt (New BSD License) - * HOMEPAGE: - * https://github.com/google/snappy - -This product optionally depends on 'JBoss Marshalling', an alternative Java -serialization API, which can be obtained at: - - * LICENSE: - * license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1) - * HOMEPAGE: - * http://www.jboss.org/jbossmarshalling - -This product optionally depends on 'Caliper', Google's micro- -benchmarking framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.caliper.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/google/caliper - -This product optionally depends on 'Apache Commons Logging', a logging -framework, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-logging.txt (Apache License 2.0) - * HOMEPAGE: - * http://commons.apache.org/logging/ - -This product optionally depends on 'Apache Log4J', a logging framework, which -can be obtained at: - - * LICENSE: - * license/LICENSE.log4j.txt (Apache License 2.0) - * HOMEPAGE: - * http://logging.apache.org/log4j/ - -This product optionally depends on 'Aalto XML', an ultra-high performance -non-blocking XML processor, which can be obtained at: - - * LICENSE: - * license/LICENSE.aalto-xml.txt (Apache License 2.0) - * HOMEPAGE: - * http://wiki.fasterxml.com/AaltoHome - -This product contains a modified version of 'HPACK', a Java implementation of -the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: - - * LICENSE: - * license/LICENSE.hpack.txt (Apache License 2.0) - * HOMEPAGE: - * https://github.com/twitter/hpack - -This product contains a modified portion of 'Apache Commons Lang', a Java library -provides utilities for the java.lang API, which can be obtained at: - - * LICENSE: - * license/LICENSE.commons-lang.txt (Apache License 2.0) - * HOMEPAGE: - * https://commons.apache.org/proper/commons-lang/ - - -This product contains the Maven wrapper scripts from 'Maven Wrapper', that provides an easy way to ensure a user has everything necessary to run the Maven build. - - * LICENSE: - * license/LICENSE.mvn-wrapper.txt (Apache License 2.0) - * HOMEPAGE: -* https://github.com/takari/maven-wrapper - ------------------------------------------------------------------------ -Code hale / Dropwizard metrics (3.x) - -Licensed under Apache License 2.0 with the following notice: - -Metrics -Copyright 2010-2013 Coda Hale and Yammer, Inc. - -This product includes software developed by Coda Hale and Yammer, Inc. - -This product includes code derived from the JSR-166 project (ThreadLocalRandom, Striped64, -LongAdder), which was released with the following comments: - - Written by Doug Lea with assistance from members of JCP JSR-166 - Expert Group and released to the public domain, as explained at -http://creativecommons.org/publicdomain/zero/1.0/ - ------------------------------------------------------------------------ -JCommander - -Licensed under Apache License 2.0 with the following notice: - -JCommander Copyright Notices -============================ - -Copyright 2010 Cedric Beust - - ------------------------------------------------------------------------ -GRPC-java - -Licensed under Apache License 2.0 with the following notice: - -Copyright 2014 The gRPC Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - - -This product contains a modified portion of 'OkHttp', an open source -HTTP & SPDY client for Android and Java applications, which can be obtained -at: - - * LICENSE: - * okhttp/third_party/okhttp/LICENSE (Apache License 2.0) - * HOMEPAGE: - * https://github.com/square/okhttp - * LOCATION_IN_GRPC: - * okhttp/third_party/okhttp - -This product contains a modified portion of 'Netty', an open source -networking library, which can be obtained at: - - * LICENSE: - * netty/third_party/netty/LICENSE.txt (Apache License 2.0) - * HOMEPAGE: - * https://netty.io - * LOCATION_IN_GRPC: -* netty/third_party/netty ------------------------------------------------------------------------ -The JSR-305 reference implementation (jsr305.jar) is distributed under the terms of the New BSD: - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation and/or -other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY -AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------------------------------------------------------------ -This product uses the dropwizard-hadoop-metrics2. - -Copyright 2016 Josh Elser - -Licensed under the Apache License v2.0 - ------------------------------------------------------------------------ -This product uses https://github.com/mbocek/docker-ganglia/ - -Contributed by Michal Bocek - -Licensed under the Apache License v2.0 -https://github.com/mbocek/docker-ganglia/blob/master/LICENSE - ------------------------------------------------------------------------ -This product uses https://github.com/graphite-project/docker-graphite-statsd - -Copyright (c) 2013-2016 Nathan Hopkins - -Licensed under the MIT License - --- diff --git a/hadoop-ozone/dist/src/main/license/src/LICENSE.txt b/hadoop-ozone/dist/src/main/license/src/LICENSE.txt deleted file mode 100644 index 4b1b8c05b24..00000000000 --- a/hadoop-ozone/dist/src/main/license/src/LICENSE.txt +++ /dev/null @@ -1,239 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------------------------------------- -This product bundles various third-party components under other open source -licenses. This section summarizes those components and their licenses. -See licenses/ for text of these licenses. - - -Apache Software Foundation License 2.0 --------------------------------------- - -hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map -hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css.map -hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js -hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java -hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java - - -BSD 3-Clause ------------- - -hadoop-hdds/framework/src/main/resources/webapps/static/d3-3.5.17.min.js -hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/d3-3.5.17.min.js -hadoop-hdds/docs/themes/ozonedoc/static/fonts/glyphicons-* -hadoop-hdds/docs/themes/ozonedoc/static/js/bootstrap.min.js - -MIT License ------------ - -hadoop-hdds/framework/src/main/resources/webapps/static/bootstrap-3.4.1 -hadoop-hdds/docs/themes/ozonedoc/static/css/bootstrap-* - -hadoop-hdds/framework/src/main/resources/webapps/static/angular-route-1.6.4.min.js -hadoop-hdds/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js -hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.6.4.min.js - -hadoop-hdds/framework/src/main/resources/webapps/static/jquery-3.4.1.min.js -hadoop-hdds/docs/themes/ozonedoc/static/js/jquery-3.4.1.min.js diff --git a/hadoop-ozone/dist/src/main/license/src/NOTICE.txt b/hadoop-ozone/dist/src/main/license/src/NOTICE.txt deleted file mode 100644 index 2803728c004..00000000000 --- a/hadoop-ozone/dist/src/main/license/src/NOTICE.txt +++ /dev/null @@ -1,33 +0,0 @@ -Apache Hadoop -Copyright 2006 and onwards The Apache Software Foundation. - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - -Export Control Notice ---------------------- - -This distribution includes cryptographic software. The country in -which you currently reside may have restrictions on the import, -possession, use, and/or re-export to another country, of -encryption software. BEFORE using any encryption software, please -check your country's laws, regulations and policies concerning the -import, possession, or use, and re-export of encryption software, to -see if this is permitted. See for more -information. - -The U.S. Government Department of Commerce, Bureau of Industry and -Security (BIS), has classified this software as Export Commodity -Control Number (ECCN) 5D002.C.1, which includes information security -software using or performing cryptographic functions with asymmetric -algorithms. The form and manner of this Apache Software Foundation -distribution makes it eligible for export under the License Exception -ENC Technology Software Unrestricted (TSU) exception (see the BIS -Export Administration Regulations, Section 740.13) for both object -code and source code. - -The following provides more details on the included cryptographic software: - -This software uses the SSL libraries from the Jetty project written -by mortbay.org abd BouncyCastle Java cryptography APIs written by the - Legion of the Bouncy Castle Inc. diff --git a/hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md b/hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md deleted file mode 100644 index 2581412d320..00000000000 --- a/hadoop-ozone/dist/src/main/license/src/licenses/IMPORTANT.md +++ /dev/null @@ -1,21 +0,0 @@ - - -# Important - -The files from this directory are not copied by automatically to the source distribution package. - -If you add any of the files to here, - * please also adjust `hadoop-ozone/dist/src/main/assemblies/ozone-src.xml` file. - * and copy the dependency to ../../bin/licenses (if it's included in the bin tar) \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular-nvd3.txt b/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular-nvd3.txt deleted file mode 100644 index d96c6fc85f9..00000000000 --- a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular-nvd3.txt +++ /dev/null @@ -1,16 +0,0 @@ -The MIT License (MIT) -Copyright (c) 2014 Konstantin Skipor - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software -and associated documentation files (the "Software"), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, -and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT -LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular.txt b/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular.txt deleted file mode 100644 index 6f3880f4c29..00000000000 --- a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-angular.txt +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License - -Copyright (c) 2010-2017 Google, Inc. http://angularjs.org - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-d3.txt b/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-d3.txt deleted file mode 100644 index c71e3f254c0..00000000000 --- a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-d3.txt +++ /dev/null @@ -1,26 +0,0 @@ -Copyright (c) 2010-2015, Michael Bostock -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* The name Michael Bostock may not be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, -EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-jquery.txt b/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-jquery.txt deleted file mode 100644 index 45930542204..00000000000 --- a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-jquery.txt +++ /dev/null @@ -1,20 +0,0 @@ -Copyright JS Foundation and other contributors, https://js.foundation/ - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-nvd3.txt b/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-nvd3.txt deleted file mode 100644 index 0955544cdf3..00000000000 --- a/hadoop-ozone/dist/src/main/license/src/licenses/LICENSE-nvd3.txt +++ /dev/null @@ -1,10 +0,0 @@ -Copyright (c) 2011-2014 Novus Partners, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this -file except in compliance with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software distributed under the - License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/ozone/README.txt b/hadoop-ozone/dist/src/main/ozone/README.txt deleted file mode 100644 index 6bbd83ffd04..00000000000 --- a/hadoop-ozone/dist/src/main/ozone/README.txt +++ /dev/null @@ -1,51 +0,0 @@ - - -This is the distribution of Apache Hadoop Ozone. - -Ozone is a submodule of Hadoop with separated release cycle. For more information, check - - http://ozone.hadoop.apache.org - - and - - https://cwiki.apache.org/confluence/display/HADOOP/Ozone+Contributor+Guide - -For more information about Hadoop, check: - - http://hadoop.apache.org - -This distribution includes cryptographic software. The country in -which you currently reside may have restrictions on the import, -possession, use, and/or re-export to another country, of -encryption software. BEFORE using any encryption software, please -check your country's laws, regulations and policies concerning the -import, possession, or use, and re-export of encryption software, to -see if this is permitted. See for more -information. - -The U.S. Government Department of Commerce, Bureau of Industry and -Security (BIS), has classified this software as Export Commodity -Control Number (ECCN) 5D002.C.1, which includes information security -software using or performing cryptographic functions with asymmetric -algorithms. The form and manner of this Apache Software Foundation -distribution makes it eligible for export under the License Exception -ENC Technology Software Unrestricted (TSU) exception (see the BIS -Export Administration Regulations, Section 740.13) for both object -code and source code. - -The following provides more details on the included cryptographic -software: - Hadoop Core uses the SSL libraries from the Jetty project written -by mortbay.org. diff --git a/hadoop-ozone/dist/src/main/smoketest/.env b/hadoop-ozone/dist/src/main/smoketest/.env deleted file mode 100644 index 47a25e1ce8a..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/.env +++ /dev/null @@ -1,17 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HADOOP_VERSION=3 \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/README.md b/hadoop-ozone/dist/src/main/smoketest/README.md deleted file mode 100644 index d181b8a3da6..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/README.md +++ /dev/null @@ -1,63 +0,0 @@ - - -## Ozone Acceptance Tests - -This directory contains a [robotframework](http://robotframework.org/) based test suite for Ozone to make it easier to check the current state of the package. - -You can run in in any environment after [installing](https://github.com/robotframework/robotframework/blob/master/INSTALL.rst) - -``` -cd $DIRECTORY_OF_OZONE -robot smoketest/basic -``` - -The argument of the `robot` could be any robot file or directory. - -The current configuration in the robot files (hostnames, ports) are adjusted for the docker-based setup but you can easily modify it for any environment. - -# Run tests in docker environment - -In the ./compose folder there are additional test scripts to make it easy to run all tests or run a specific test in a docker environment. - -## Test one environment - -Go to the compose directory and execute the test.sh directly from there: - -``` -cd compose/ozone -./test.sh -``` - -The results will be saved to the `compose/ozone/results` - -## Run all the tests - -``` -cd compose -./test-all.sh -``` - -The results will be combined to the `compose/results` folder. - -## Run one specific test case - -Start the compose environment and execute test: - -``` -cd compose/ozone -docker-compose up -d -#wait.... -../test-single.sh scm basic/basic.robot -``` \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/__init__.robot b/hadoop-ozone/dist/src/main/smoketest/__init__.robot deleted file mode 100644 index f8835df0721..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/__init__.robot +++ /dev/null @@ -1,18 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -*** Settings *** -Documentation Smoketest ozone secure cluster -Resource commonlib.robot -Suite Setup Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/auditparser/auditparser.robot b/hadoop-ozone/dist/src/main/smoketest/auditparser/auditparser.robot deleted file mode 100644 index 1caae755694..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/auditparser/auditparser.robot +++ /dev/null @@ -1,53 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Smoketest ozone cluster startup -Library OperatingSystem -Library BuiltIn -Resource ../commonlib.robot - -*** Variables *** -${user} hadoop -${count} 4 -${auditworkdir} /tmp/ - -*** Keywords *** -Set username - ${hostname} = Execute hostname - Set Suite Variable ${user} testuser/${hostname}@EXAMPLE.COM - [return] ${user} - -*** Test Cases *** -Initiating freon to generate data - ${result} = Execute ozone freon randomkeys --numOfVolumes 5 --numOfBuckets 5 --numOfKeys 5 --numOfThreads 1 - Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125 - Should Not Contain ${result} ERROR - -Testing audit parser - ${logdir} = Get Environment Variable HADOOP_LOG_DIR /var/log/hadoop - ${logfile} = Execute ls -t "${logdir}" | grep om-audit | head -1 - Execute ozone auditparser "${auditworkdir}/audit.db" load "${logdir}/${logfile}" - ${result} = Execute ozone auditparser "${auditworkdir}/audit.db" template top5cmds - Should Contain ${result} ALLOCATE_KEY - ${result} = Execute ozone auditparser "${auditworkdir}/audit.db" template top5users - Run Keyword If '${SECURITY_ENABLED}' == 'true' Set username - Should Contain ${result} ${user} - ${result} = Execute ozone auditparser "${auditworkdir}/audit.db" query "select count(*) from audit where op='CREATE_VOLUME' and RESULT='SUCCESS'" - ${result} = Convert To Number ${result} - Should be true ${result}>${count} - ${result} = Execute ozone auditparser "${auditworkdir}/audit.db" query "select count(*) from audit where op='CREATE_BUCKET' and RESULT='SUCCESS'" - ${result} = Convert To Number ${result} - Should be true ${result}>${count} diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot b/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot deleted file mode 100644 index edaee5e7267..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/basic/basic.robot +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Smoketest ozone cluster startup -Library OperatingSystem -Resource ../commonlib.robot - -*** Variables *** -${DATANODE_HOST} datanode - - -*** Test Cases *** - -Check webui static resources - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user - ${result} = Execute curl --negotiate -u : -s -I http://scm:9876/static/bootstrap-3.4.1/js/bootstrap.min.js - Should contain ${result} 200 - -Start freon testing - ${result} = Execute ozone freon randomkeys --numOfVolumes 5 --numOfBuckets 5 --numOfKeys 5 --numOfThreads 1 - Wait Until Keyword Succeeds 3min 10sec Should contain ${result} Number of Keys added: 125 - Should Not Contain ${result} ERROR diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot deleted file mode 100644 index 689e4af3e03..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot +++ /dev/null @@ -1,138 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Test ozone shell CLI usage -Library OperatingSystem -Resource ../commonlib.robot -Test Setup Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab -Test Timeout 2 minute -Suite Setup Generate prefix - -*** Variables *** -${prefix} generated - -*** Keywords *** -Generate prefix - ${random} = Generate Random String 5 [NUMBERS] - Set Suite Variable ${prefix} ${random} - -*** Test Cases *** -RpcClient with port - Test ozone shell o3:// om:9862 ${prefix}-rpcwoport - -RpcClient volume acls - Test Volume Acls o3:// om:9862 ${prefix}-rpcwoport2 - -RpcClient bucket acls - Test Bucket Acls o3:// om:9862 ${prefix}-rpcwoport2 - -RpcClient key acls - Test Key Acls o3:// om:9862 ${prefix}-rpcwoport2 - -RpcClient without host - Test ozone shell o3:// ${EMPTY} ${prefix}-rpcwport - -RpcClient without scheme - Test ozone shell ${EMPTY} ${EMPTY} ${prefix}-rpcwoscheme - - -*** Keywords *** -Test ozone shell - [arguments] ${protocol} ${server} ${volume} - ${result} = Execute ozone sh volume create ${protocol}${server}/${volume} --quota 100TB - Should not contain ${result} Failed - ${result} = Execute ozone sh volume list ${protocol}${server}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="${volume}")' - Should contain ${result} creationTime - ${result} = Execute ozone sh volume list | grep -Ev 'Removed|DEBUG|ERROR|INFO|TRACE|WARN' | jq -r '. | select(.name=="${volume}")' - Should contain ${result} creationTime -# TODO: Disable updating the owner, acls should be used to give access to other user. - Execute ozone sh volume update ${protocol}${server}/${volume} --quota 10TB -# ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name' -# Should Be Equal ${result} bill - ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="${volume}") | .quota' - Should Be Equal ${result} 10995116277760 - Execute ozone sh bucket create ${protocol}${server}/${volume}/bb1 - ${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="bb1") | .storageType' - Should Be Equal ${result} DISK - ${result} = Execute ozone sh bucket list ${protocol}${server}/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="bb1") | .volumeName' - Should Be Equal ${result} ${volume} - Run Keyword Test key handling ${protocol} ${server} ${volume} - Execute ozone sh bucket delete ${protocol}${server}/${volume}/bb1 - Execute ozone sh volume delete ${protocol}${server}/${volume} - -Test Volume Acls - [arguments] ${protocol} ${server} ${volume} - Execute ozone sh volume create ${protocol}${server}/${volume} - ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . - ${result} = Execute ozone sh volume addacl ${protocol}${server}/${volume} -a user:superuser1:rwxy[DEFAULT] - ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" . - ${result} = Execute ozone sh volume removeacl ${protocol}${server}/${volume} -a user:superuser1:xy - ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" . - ${result} = Execute ozone sh volume setacl ${protocol}${server}/${volume} -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT] - ${result} = Execute ozone sh volume getacl ${protocol}${server}/${volume} - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" . - Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"ALL\" . - -Test Bucket Acls - [arguments] ${protocol} ${server} ${volume} - Execute ozone sh bucket create ${protocol}${server}/${volume}/bb1 - ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . - ${result} = Execute ozone sh bucket addacl ${protocol}${server}/${volume}/bb1 -a user:superuser1:rwxy - ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" - ${result} = Execute ozone sh bucket removeacl ${protocol}${server}/${volume}/bb1 -a user:superuser1:xy - ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\" - ${result} = Execute ozone sh bucket setacl ${protocol}${server}/${volume}/bb1 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT] - ${result} = Execute ozone sh bucket getacl ${protocol}${server}/${volume}/bb1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" - Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"ALL\" . - - -Test key handling - [arguments] ${protocol} ${server} ${volume} - Execute ozone sh key put ${protocol}${server}/${volume}/bb1/key1 /opt/hadoop/NOTICE.txt - Execute rm -f NOTICE.txt.1 - Execute ozone sh key get ${protocol}${server}/${volume}/bb1/key1 NOTICE.txt.1 - Execute ls -l NOTICE.txt.1 - ${result} = Execute ozone sh key info ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="key1")' - Should contain ${result} creationTime - ${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="key1") | .name' - Should Be Equal ${result} key1 - Execute ozone sh key rename ${protocol}${server}/${volume}/bb1 key1 key2 - ${result} = Execute ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.name' - Should Be Equal ${result} key2 - Execute ozone sh key delete ${protocol}${server}/${volume}/bb1/key2 - -Test key Acls - [arguments] ${protocol} ${server} ${volume} - Execute ozone sh key put ${protocol}${server}/${volume}/bb1/key2 /opt/hadoop/NOTICE.txt - ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . - ${result} = Execute ozone sh key addacl ${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:rwxy - ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" - ${result} = Execute ozone sh key removeacl ${protocol}${server}/${volume}/bb1/key2 -a user:superuser1:xy - ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\" - ${result} = Execute ozone sh key setacl ${protocol}${server}/${volume}/bb1/key2 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc - ${result} = Execute ozone sh key getacl ${protocol}${server}/${volume}/bb1/key2 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" - Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . diff --git a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot b/hadoop-ozone/dist/src/main/smoketest/commonlib.robot deleted file mode 100644 index 88f6c4a9e8e..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/commonlib.robot +++ /dev/null @@ -1,65 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Library OperatingSystem -Library String -Library BuiltIn - -*** Variables *** -${SECURITY_ENABLED} %{SECURITY_ENABLED} - -*** Keywords *** -Execute - [arguments] ${command} - ${rc} ${output} = Run And Return Rc And Output ${command} - Log ${output} - Should Be Equal As Integers ${rc} 0 - [return] ${output} - -Execute And Ignore Error - [arguments] ${command} - ${rc} ${output} = Run And Return Rc And Output ${command} - Log ${output} - [return] ${output} - -Execute and checkrc - [arguments] ${command} ${expected_error_code} - ${rc} ${output} = Run And Return Rc And Output ${command} - Log ${output} - Should Be Equal As Integers ${rc} ${expected_error_code} - [return] ${output} - -Compare files - [arguments] ${file1} ${file2} - ${checksumbefore} = Execute md5sum ${file1} | awk '{print $1}' - ${checksumafter} = Execute md5sum ${file2} | awk '{print $1}' - Should Be Equal ${checksumbefore} ${checksumafter} - -Install aws cli - ${rc} ${output} = Run And Return Rc And Output which apt-get - Run Keyword if '${rc}' == '0' Install aws cli s3 debian - ${rc} ${output} = Run And Return Rc And Output yum --help - Run Keyword if '${rc}' == '0' Install aws cli s3 centos - -Kinit HTTP user - ${hostname} = Execute hostname - Wait Until Keyword Succeeds 2min 10sec Execute kinit -k HTTP/${hostname}@EXAMPLE.COM -t /etc/security/keytabs/HTTP.keytab - -Kinit test user - [arguments] ${user} ${keytab} - ${hostname} = Execute hostname - Set Suite Variable ${TEST_USER} ${user}/${hostname}@EXAMPLE.COM - Wait Until Keyword Succeeds 2min 10sec Execute kinit -k ${user}/${hostname}@EXAMPLE.COM -t /etc/security/keytabs/${keytab} diff --git a/hadoop-ozone/dist/src/main/smoketest/createbucketenv.robot b/hadoop-ozone/dist/src/main/smoketest/createbucketenv.robot deleted file mode 100644 index da97001725b..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/createbucketenv.robot +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Create bucket and volume for any other testings -Library OperatingSystem -Resource commonlib.robot -Test Timeout 2 minute - - -*** Variables *** -${volume} vol1 -${bucket} bucket1 - - -*** Keywords *** -Create volume - ${result} = Execute ozone sh volume create /${volume} --user hadoop --quota 100TB - Should not contain ${result} Failed -Create bucket - Execute ozone sh bucket create /${volume}/${bucket} - -*** Test Cases *** -Test ozone shell - ${result} = Execute And Ignore Error ozone sh bucket info /${volume}/${bucket} - Run Keyword if "VOLUME_NOT_FOUND" in """${result}""" Create volume - Run Keyword if "VOLUME_NOT_FOUND" in """${result}""" Create bucket - Run Keyword if "BUCKET_NOT_FOUND" in """${result}""" Create bucket - ${result} = Execute ozone sh bucket info /${volume}/${bucket} - Should not contain ${result} NOT_FOUND diff --git a/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot b/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot deleted file mode 100644 index 2f93e6c99e9..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Create directories required for MR test -Library OperatingSystem -Resource commonlib.robot -Test Timeout 2 minute - - -*** Variables *** -${volume} vol1 -${bucket} bucket1 - - -*** Keywords *** -Create volume - ${result} = Execute ozone sh volume create /${volume} --user hadoop --quota 100TB - Should not contain ${result} Failed -Create bucket - Execute ozone sh bucket create /${volume}/${bucket} - -*** Test Cases *** -Create test volume, bucket and key - ${result} = Execute And Ignore Error ozone sh bucket info /${volume}/${bucket} - Run Keyword if "VOLUME_NOT_FOUND" in """${result}""" Create volume - Run Keyword if "VOLUME_NOT_FOUND" in """${result}""" Create bucket - Run Keyword if "BUCKET_NOT_FOUND" in """${result}""" Create bucket - ${result} = Execute ozone sh bucket info /${volume}/${bucket} - Should not contain ${result} NOT_FOUND - Execute ozone sh key put /vol1/bucket1/key1 LICENSE.txt - -Create user dir for hadoop - Execute ozone fs -mkdir /user - Execute ozone fs -mkdir /user/hadoop diff --git a/hadoop-ozone/dist/src/main/smoketest/env-compose.robot b/hadoop-ozone/dist/src/main/smoketest/env-compose.robot deleted file mode 100644 index d529d7f02f2..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/env-compose.robot +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation High level utilities to execute commands and tests in docker-compose based environments. -Resource commonlib.robot - - -*** Keywords *** - -Run tests on host - [arguments] ${host} ${robotfile} - ${result} = Execute docker-compose exec ${host} robot smoketest/${robotfile} - -Execute on host - [arguments] ${host} ${command} - ${rc} ${output} = Run And Return Rc And Output docker-compose exec ${host} ${command} - Log ${output} - Should Be Equal As Integers ${rc} 0 - [return] ${output} diff --git a/hadoop-ozone/dist/src/main/smoketest/gdpr/gdpr.robot b/hadoop-ozone/dist/src/main/smoketest/gdpr/gdpr.robot deleted file mode 100644 index f4705eb0a3d..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/gdpr/gdpr.robot +++ /dev/null @@ -1,89 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Smoketest Ozone GDPR Feature -Library OperatingSystem -Library BuiltIn -Library String -Resource ../commonlib.robot -Suite Setup Generate volume - -*** Variables *** -${volume} generated - -*** Keywords *** -Generate volume - ${random} = Generate Random String 5 [LOWER] - Set Suite Variable ${volume} ${random} - -*** Test Cases *** -Test GDPR disabled - Test GDPR(disabled) without explicit options ${volume} - -Test GDPR --enforcegdpr=true - Test GDPR with --enforcegdpr=true ${volume} - -Test GDPR -g=true - Test GDPR with -g=true ${volume} - -Test GDPR -g=false - Test GDPR with -g=false ${volume} - -*** Keywords *** -Test GDPR(disabled) without explicit options - [arguments] ${volume} - Execute ozone sh volume create /${volume} --quota 100TB - Execute ozone sh bucket create /${volume}/mybucket1 - ${result} = Execute ozone sh bucket info /${volume}/mybucket1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mybucket1") | .metadata | .gdprEnabled' - Should Be Equal ${result} null - Execute ozone sh key put /${volume}/mybucket1/mykey /opt/hadoop/NOTICE.txt - Execute rm -f NOTICE.txt.1 - ${result} = Execute ozone sh key info /${volume}/mybucket1/mykey | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mykey") | .metadata | .gdprEnabled' - Should Be Equal ${result} null - Execute ozone sh key delete /${volume}/mybucket1/mykey - -Test GDPR with --enforcegdpr=true - [arguments] ${volume} - Execute ozone sh bucket create --enforcegdpr=true /${volume}/mybucket2 - ${result} = Execute ozone sh bucket info /${volume}/mybucket2 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mybucket2") | .metadata | .gdprEnabled' - Should Be Equal ${result} true - Execute ozone sh key put /${volume}/mybucket2/mykey /opt/hadoop/NOTICE.txt - Execute rm -f NOTICE.txt.1 - ${result} = Execute ozone sh key info /${volume}/mybucket2/mykey | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mykey") | .metadata | .gdprEnabled' - Should Be Equal ${result} true - Execute ozone sh key delete /${volume}/mybucket2/mykey - -Test GDPR with -g=true - [arguments] ${volume} - Execute ozone sh bucket create -g=true /${volume}/mybucket3 - ${result} = Execute ozone sh bucket info /${volume}/mybucket3 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mybucket3") | .metadata | .gdprEnabled' - Should Be Equal ${result} true - Execute ozone sh key put /${volume}/mybucket3/mykey /opt/hadoop/NOTICE.txt - Execute rm -f NOTICE.txt.1 - ${result} = Execute ozone sh key info /${volume}/mybucket3/mykey | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mykey") | .metadata | .gdprEnabled' - Should Be Equal ${result} true - Execute ozone sh key delete /${volume}/mybucket3/mykey - -Test GDPR with -g=false - [arguments] ${volume} - Execute ozone sh bucket create /${volume}/mybucket4 - ${result} = Execute ozone sh bucket info /${volume}/mybucket4 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mybucket4") | .metadata | .gdprEnabled' - Should Be Equal ${result} null - Execute ozone sh key put /${volume}/mybucket4/mykey /opt/hadoop/NOTICE.txt - Execute rm -f NOTICE.txt.1 - ${result} = Execute ozone sh key info /${volume}/mybucket4/mykey | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="mykey") | .metadata | .gdprEnabled' - Should Be Equal ${result} null - Execute ozone sh key delete /${volume}/mybucket4/mykey diff --git a/hadoop-ozone/dist/src/main/smoketest/kinit-hadoop.robot b/hadoop-ozone/dist/src/main/smoketest/kinit-hadoop.robot deleted file mode 100644 index 5d855556da1..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/kinit-hadoop.robot +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Kinit test user -Library OperatingSystem -Resource commonlib.robot -Test Timeout 2 minute - - -*** Test Cases *** -Kinit - Kinit test user hadoop hadoop.keytab diff --git a/hadoop-ozone/dist/src/main/smoketest/kinit.robot b/hadoop-ozone/dist/src/main/smoketest/kinit.robot deleted file mode 100644 index c9c1b754178..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/kinit.robot +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Kinit test user -Library OperatingSystem -Resource commonlib.robot -Test Timeout 2 minute - - -*** Variables *** -${testuser} testuser - -*** Test Cases *** -Kinit - Kinit test user ${testuser} ${testuser}.keytab diff --git a/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot b/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot deleted file mode 100644 index 789ec4f7fc1..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/mapreduce.robot +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Execute MR jobs -Library OperatingSystem -Resource commonlib.robot -Test Timeout 4 minute - - -*** Variables *** -${volume} vol1 -${bucket} bucket1 -${hadoop.version} 3.2.0 - - -*** Test cases *** -Execute PI calculation - ${output} = Execute yarn jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-${hadoop.version}.jar pi 3 3 - Should Contain ${output} completed successfully - -Execute WordCount - ${random} Generate Random String 2 [NUMBERS] - ${output} = Execute yarn jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-${hadoop.version}.jar wordcount o3fs://bucket1.vol1/key1 o3fs://bucket1.vol1/key1-${random}.count - Should Contain ${output} completed successfully diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/hadoopo3fs.robot b/hadoop-ozone/dist/src/main/smoketest/ozonefs/hadoopo3fs.robot deleted file mode 100644 index 8d12a526ea4..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/ozonefs/hadoopo3fs.robot +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Test ozone fs with hadoopfs -Library OperatingSystem -Library String -Resource ../commonlib.robot - -*** Variables *** -${DATANODE_HOST} datanode -${PREFIX} ozone - -*** Test cases *** - -Test hadoop dfs - ${random} = Generate Random String 5 [NUMBERS] - ${result} = Execute hdfs dfs -put /opt/hadoop/NOTICE.txt o3fs://bucket1.vol1/${PREFIX}-${random} - ${result} = Execute hdfs dfs -ls o3fs://bucket1.vol1/ - Should contain ${result} ${PREFIX}-${random} diff --git a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot b/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot deleted file mode 100644 index f728691b5f7..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot +++ /dev/null @@ -1,112 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Ozonefs test -Library OperatingSystem -Resource ../commonlib.robot - -*** Variables *** - - -*** Test Cases *** -Create volume and bucket - Execute ozone sh volume create o3://om/fstest --quota 100TB - Execute ozone sh volume create o3://om/fstest2 --quota 100TB - Execute ozone sh bucket create o3://om/fstest/bucket1 - Execute ozone sh bucket create o3://om/fstest/bucket2 - Execute ozone sh bucket create o3://om/fstest2/bucket3 - -Check volume from ozonefs - ${result} = Execute ozone fs -ls o3fs://bucket1.fstest/ - -Run ozoneFS tests - Execute ozone fs -mkdir -p o3fs://bucket1.fstest/testdir/deep - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' - Should contain ${result} testdir/deep - Execute ozone fs -copyFromLocal NOTICE.txt o3fs://bucket1.fstest/testdir/deep/ - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' - Should contain ${result} NOTICE.txt - - Execute ozone fs -put NOTICE.txt o3fs://bucket1.fstest/testdir/deep/PUTFILE.txt - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' - Should contain ${result} PUTFILE.txt - - ${result} = Execute ozone fs -ls o3fs://bucket1.fstest/testdir/deep/ - Should contain ${result} NOTICE.txt - Should contain ${result} PUTFILE.txt - - Execute ozone fs -mv o3fs://bucket1.fstest/testdir/deep/NOTICE.txt o3fs://bucket1.fstest/testdir/deep/MOVED.TXT - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' - Should contain ${result} MOVED.TXT - Should not contain ${result} NOTICE.txt - - Execute ozone fs -mkdir -p o3fs://bucket1.fstest/testdir/deep/subdir1 - Execute ozone fs -cp o3fs://bucket1.fstest/testdir/deep/MOVED.TXT o3fs://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' - Should contain ${result} subdir1/NOTICE.txt - - ${result} = Execute ozone fs -ls o3fs://bucket1.fstest/testdir/deep/subdir1/ - Should contain ${result} NOTICE.txt - - Execute ozone fs -cat o3fs://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt - Should not contain ${result} Failed - - Execute ozone fs -rm o3fs://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' - Should not contain ${result} NOTICE.txt - - ${result} = Execute ozone fs -rmdir o3fs://bucket1.fstest/testdir/deep/subdir1/ - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' - Should not contain ${result} subdir1 - - Execute ozone fs -touch o3fs://bucket1.fstest/testdir/TOUCHFILE.txt - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' - Should contain ${result} TOUCHFILE.txt - - Execute ozone fs -rm -r o3fs://bucket1.fstest/testdir/ - ${result} = Execute ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name' - Should not contain ${result} testdir - - Execute rm -Rf localdir1 - Execute mkdir localdir1 - Execute cp NOTICE.txt localdir1/LOCAL.txt - Execute ozone fs -mkdir -p o3fs://bucket1.fstest/testdir1 - Execute ozone fs -copyFromLocal localdir1 o3fs://bucket1.fstest/testdir1/ - Execute ozone fs -put NOTICE.txt o3fs://bucket1.fstest/testdir1/NOTICE.txt - - ${result} = Execute ozone fs -ls -R o3fs://bucket1.fstest/testdir1/ - Should contain ${result} localdir1/LOCAL.txt - Should contain ${result} testdir1/NOTICE.txt - - Execute ozone fs -mkdir -p o3fs://bucket2.fstest/testdir2 - Execute ozone fs -mkdir -p o3fs://bucket3.fstest2/testdir3 - - Execute ozone fs -cp o3fs://bucket1.fstest/testdir1/localdir1 o3fs://bucket2.fstest/testdir2/ - - Execute ozone fs -cp o3fs://bucket1.fstest/testdir1/localdir1 o3fs://bucket3.fstest2/testdir3/ - - Execute ozone sh key put o3://om/fstest/bucket1/KEY.txt NOTICE.txt - ${result} = Execute ozone fs -ls o3fs://bucket1.fstest/KEY.txt - Should contain ${result} KEY.txt - ${rc} ${result} = Run And Return Rc And Output ozone fs -copyFromLocal NOTICE.txt o3fs://bucket1.fstest/KEY.txt - Should Be Equal As Integers ${rc} 1 - Should contain ${result} File exists - Execute rm -Rf GET.txt - Execute ozone fs -get o3fs://bucket1.fstest/KEY.txt GET.txt - Execute ls -l GET.txt - ${rc} ${result} = Run And Return Rc And Output ozone fs -ls o3fs://abcde.pqrs/ - Should Be Equal As Integers ${rc} 1 - Should Match Regexp ${result} (Check access operation failed)|(Volume pqrs is not found) diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot deleted file mode 100644 index 40e7df13fbf..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot +++ /dev/null @@ -1,274 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation S3 gateway test with aws cli -Library OperatingSystem -Library String -Resource ../commonlib.robot -Resource commonawslib.robot -Test Setup Setup s3 tests - -*** Keywords *** -Create Random file - [arguments] ${size_in_megabytes} - Execute dd if=/dev/urandom of=/tmp/part1 bs=1048576 count=${size_in_megabytes} - - -*** Variables *** -${ENDPOINT_URL} http://s3g:9878 -${BUCKET} generated - -*** Test Cases *** - -Test Multipart Upload - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} multipartKey - Should contain ${result} UploadId -# initiate again - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey - ${nextUploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} multipartKey - Should contain ${result} UploadId - Should Not Be Equal ${uploadID} ${nextUploadID} - -# upload part -# each part should be minimum 5mb, other wise during complete multipart -# upload we get error entity too small. So, considering further complete -# multipart upload, uploading each part as 5MB file, exception is for last part - - Run Keyword Create Random file 5 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key multipartKey --part-number 1 --body /tmp/part1 --upload-id ${nextUploadID} - Should contain ${result} ETag -# override part - Run Keyword Create Random file 5 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key multipartKey --part-number 1 --body /tmp/part1 --upload-id ${nextUploadID} - Should contain ${result} ETag - - -Test Multipart Upload Complete - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey1 - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} multipartKey - Should contain ${result} UploadId - -#upload parts - Run Keyword Create Random file 5 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key multipartKey1 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - Execute echo "Part2" > /tmp/part2 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key multipartKey1 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - -#complete multipart upload - ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key multipartKey1 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' - Should contain ${result} ${BUCKET} - Should contain ${result} multipartKey1 - Should contain ${result} ETag - -#read file and check the key - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key multipartKey1 /tmp/multipartKey1.result - Execute cat /tmp/part1 /tmp/part2 >> /tmp/multipartKey1 - Compare files /tmp/multipartKey1 /tmp/multipartKey1.result - -Test Multipart Upload Complete Entity too small - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey2 - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} multipartKey - Should contain ${result} UploadId - -#upload parts - Execute echo "Part1" > /tmp/part1 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key multipartKey2 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - Execute echo "Part2" > /tmp/part2 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key multipartKey2 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - -#complete multipart upload - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key multipartKey2 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' 255 - Should contain ${result} EntityTooSmall - - -Test Multipart Upload Complete Invalid part - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey3 - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} multipartKey - Should contain ${result} UploadId - -#upload parts - Execute echo "Part1" > /tmp/part1 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key multipartKey3 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - Execute echo "Part2" > /tmp/part2 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key multipartKey3 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - -#complete multipart upload - ${result} = Execute AWSS3APICli and checkrc complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key multipartKey3 --multipart-upload 'Parts=[{ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2}]' 255 - Should contain ${result} InvalidPart - -Test abort Multipart upload - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey4 --storage-class REDUCED_REDUNDANCY - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} multipartKey - Should contain ${result} UploadId - - ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${BUCKET} --key multipartKey4 --upload-id ${uploadID} 0 - -Test abort Multipart upload with invalid uploadId - ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${BUCKET} --key multipartKey5 --upload-id "random" 255 - -Upload part with Incorrect uploadID - Execute echo "Multipart upload" > /tmp/testfile - ${result} = Execute AWSS3APICli and checkrc upload-part --bucket ${BUCKET} --key multipartKey --part-number 1 --body /tmp/testfile --upload-id "random" 255 - Should contain ${result} NoSuchUpload - -Test list parts -#initiate multipart upload - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key multipartKey5 - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} multipartKey - Should contain ${result} UploadId - -#upload parts - Run Keyword Create Random file 5 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key multipartKey5 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - Execute echo "Part2" > /tmp/part2 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key multipartKey5 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - -#list parts - ${result} = Execute AWSS3APICli list-parts --bucket ${BUCKET} --key multipartKey5 --upload-id ${uploadID} - ${part1} = Execute and checkrc echo '${result}' | jq -r '.Parts[0].ETag' 0 - ${part2} = Execute and checkrc echo '${result}' | jq -r '.Parts[1].ETag' 0 - Should Be equal ${part1} ${eTag1} - Should contain ${part2} ${eTag2} - Should contain ${result} STANDARD - -#list parts with max-items and next token - ${result} = Execute AWSS3APICli list-parts --bucket ${BUCKET} --key multipartKey5 --upload-id ${uploadID} --max-items 1 - ${part1} = Execute and checkrc echo '${result}' | jq -r '.Parts[0].ETag' 0 - ${token} = Execute and checkrc echo '${result}' | jq -r '.NextToken' 0 - Should Be equal ${part1} ${eTag1} - Should contain ${result} STANDARD - - ${result} = Execute AWSS3APICli list-parts --bucket ${BUCKET} --key multipartKey5 --upload-id ${uploadID} --max-items 1 --starting-token ${token} - ${part2} = Execute and checkrc echo '${result}' | jq -r '.Parts[0].ETag' 0 - Should Be equal ${part2} ${eTag2} - Should contain ${result} STANDARD - -#finally abort it - ${result} = Execute AWSS3APICli and checkrc abort-multipart-upload --bucket ${BUCKET} --key multipartKey5 --upload-id ${uploadID} 0 - -Test Multipart Upload with the simplified aws s3 cp API - Create Random file 22 - Execute AWSS3Cli cp /tmp/part1 s3://${BUCKET}/mpyawscli - Execute AWSS3Cli cp s3://${BUCKET}/mpyawscli /tmp/part1.result - Execute AWSS3Cli rm s3://${BUCKET}/mpyawscli - Compare files /tmp/part1 /tmp/part1.result - -Test Multipart Upload Put With Copy - Run Keyword Create Random file 5 - ${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} --key copytest/source --body /tmp/part1 - - - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key copytest/destination - - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} UploadId - - ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key copytest/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/copytest/source - Should contain ${result} ${BUCKET} - Should contain ${result} ETag - Should contain ${result} LastModified - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 - - - Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key copytest/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1}]' - Execute AWSS3APICli get-object --bucket ${BUCKET} --key copytest/destination /tmp/part-result - - Compare files /tmp/part1 /tmp/part-result - -Test Multipart Upload Put With Copy and range - Run Keyword Create Random file 10 - ${result} = Execute AWSS3APICli put-object --bucket ${BUCKET} --key copyrange/source --body /tmp/part1 - - - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key copyrange/destination - - ${uploadID} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} UploadId - - ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 1 --copy-source ${BUCKET}/copyrange/source --copy-source-range bytes=0-10485758 - Should contain ${result} ${BUCKET} - Should contain ${result} ETag - Should contain ${result} LastModified - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 - - ${result} = Execute AWSS3APICli upload-part-copy --bucket ${BUCKET} --key copyrange/destination --upload-id ${uploadID} --part-number 2 --copy-source ${BUCKET}/copyrange/source --copy-source-range bytes=10485758-10485760 - Should contain ${result} ${BUCKET} - Should contain ${result} ETag - Should contain ${result} LastModified - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.CopyPartResult.ETag' 0 - - - Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key copyrange/destination --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' - Execute AWSS3APICli get-object --bucket ${BUCKET} --key copyrange/destination /tmp/part-result - - Compare files /tmp/part1 /tmp/part-result - -Test Multipart Upload list - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key listtest/key1 - ${uploadID1} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} listtest/key1 - Should contain ${result} UploadId - - ${result} = Execute AWSS3APICli create-multipart-upload --bucket ${BUCKET} --key listtest/key2 - ${uploadID2} = Execute and checkrc echo '${result}' | jq -r '.UploadId' 0 - Should contain ${result} ${BUCKET} - Should contain ${result} listtest/key2 - Should contain ${result} UploadId - - ${result} = Execute AWSS3APICli list-multipart-uploads --bucket ${BUCKET} --prefix listtest - Should contain ${result} ${uploadID1} - Should contain ${result} ${uploadID2} - - ${count} = Execute and checkrc echo '${result}' | jq -r '.Uploads | length' 0 - Should Be Equal ${count} 2 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/README.md b/hadoop-ozone/dist/src/main/smoketest/s3/README.md deleted file mode 100644 index 70ccda7c35e..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/s3/README.md +++ /dev/null @@ -1,27 +0,0 @@ - - -## Ozone S3 Gatway Acceptance Tests - -Note: the aws cli based acceptance tests can be cross-checked with the original AWS s3 endpoint. - -You need to - - 1. Create a bucket - 2. Configure your local aws cli - 3. Set bucket/endpointurl during the robot test execution - -``` -robot -v bucket:ozonetest -v OZONE_TEST:false -v OZONE_S3_SET_CREDENTIALS:false -v ENDPOINT_URL:https://s3.us-east-2.amazonaws.com smoketest/s3 -``` diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/__init__.robot b/hadoop-ozone/dist/src/main/smoketest/s3/__init__.robot deleted file mode 100644 index f1bbea933bf..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/s3/__init__.robot +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Library OperatingSystem -Library String -Resource ../commonlib.robot -Resource ./commonawslib.robot -Test Setup Setup s3 tests \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot b/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot deleted file mode 100644 index 8762d5dac6f..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation S3 gateway test with aws cli -Library OperatingSystem -Library String -Resource ../commonlib.robot -Resource ./commonawslib.robot -Suite Setup Setup s3 tests - -*** Variables *** -${ENDPOINT_URL} http://s3g:9878 -${BUCKET} generated - -*** Test Cases *** - -File upload and directory list - Execute date > /tmp/testfile - ${result} = Execute AWSS3Cli cp /tmp/testfile s3://${BUCKET} - Should contain ${result} upload - ${result} = Execute AWSS3Cli cp /tmp/testfile s3://${BUCKET}/dir1/dir2/file - Should contain ${result} upload - ${result} = Execute AWSS3Cli ls s3://${BUCKET} - Should contain ${result} testfile - Should contain ${result} dir1 - Should not contain ${result} dir2 - ${result} = Execute AWSS3Cli ls s3://${BUCKET}/dir1/ - Should not contain ${result} testfile - Should not contain ${result} dir1 - Should contain ${result} dir2 - ${result} = Execute AWSS3Cli ls s3://${BUCKET}/dir1/dir2/file - Should not contain ${result} testfile - Should not contain ${result} dir1 - Should contain ${result} file diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot deleted file mode 100644 index 4d859927754..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation S3 gateway test with aws cli -Library OperatingSystem -Library String -Resource ../commonlib.robot -Resource commonawslib.robot -Test Setup Setup s3 tests - -*** Variables *** -${ENDPOINT_URL} http://s3g:9878 -${BUCKET} generated - -*** Test Cases *** - -Create bucket which already exists -# Bucket already is created in Test Setup. - ${result} = Execute AWSS3APICli create-bucket --bucket ${BUCKET} - Should contain ${result} ${BUCKET} - Should contain ${result} Location diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/buckethead.robot b/hadoop-ozone/dist/src/main/smoketest/s3/buckethead.robot deleted file mode 100644 index 2ce5002a108..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/s3/buckethead.robot +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation S3 gateway test with aws cli -Library OperatingSystem -Library String -Resource ../commonlib.robot -Resource commonawslib.robot -Test Setup Setup s3 tests - -*** Variables *** -${ENDPOINT_URL} http://s3g:9878 -${BUCKET} generated - -*** Test Cases *** - -Head Bucket not existent - ${result} = Execute AWSS3APICli head-bucket --bucket ${BUCKET} - ${result} = Execute AWSS3APICli and checkrc head-bucket --bucket ozonenosuchbucketqqweqwe 255 - Should contain ${result} Bad Request - Should contain ${result} 400 diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketlist.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketlist.robot deleted file mode 100644 index 4fe9b6507da..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketlist.robot +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation S3 gateway test with aws cli -Library OperatingSystem -Library String -Resource ../commonlib.robot -Resource commonawslib.robot -Test Setup Setup s3 tests - -*** Variables *** -${ENDPOINT_URL} http://s3g:9878 -${BUCKET} generated - -*** Test Cases *** - -List buckets - ${result} = Execute AWSS3APICli list-buckets | jq -r '.Buckets[].Name' - Should contain ${result} ${BUCKET} \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot deleted file mode 100644 index 13356354ea4..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ /dev/null @@ -1,81 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Resource ../commonlib.robot -Resource ../commonlib.robot - -*** Variables *** -${OZONE_S3_HEADER_VERSION} v4 -${OZONE_S3_SET_CREDENTIALS} true -${BUCKET} bucket-999 - -*** Keywords *** -Execute AWSS3APICli - [Arguments] ${command} - ${output} = Execute aws s3api --endpoint-url ${ENDPOINT_URL} ${command} - [return] ${output} - -Execute AWSS3APICli and checkrc - [Arguments] ${command} ${expected_error_code} - ${output} = Execute and checkrc aws s3api --endpoint-url ${ENDPOINT_URL} ${command} ${expected_error_code} - [return] ${output} - -Execute AWSS3Cli - [Arguments] ${command} - ${output} = Execute aws s3 --endpoint-url ${ENDPOINT_URL} ${command} - [return] ${output} - -Install aws cli s3 centos - Execute sudo yum install -y awscli - -Install aws cli s3 debian - Execute sudo apt-get install -y awscli - -Setup v2 headers - Set Environment Variable AWS_ACCESS_KEY_ID ANYID - Set Environment Variable AWS_SECRET_ACCESS_KEY ANYKEY - -Setup v4 headers - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab - ${result} = Execute ozone s3 getsecret - ${accessKey} = Get Regexp Matches ${result} (?<=awsAccessKey=).* - ${accessKey} = Get Variable Value ${accessKey} sdsdasaasdasd - ${secret} = Get Regexp Matches ${result} (?<=awsSecret=).* - - ${len}= Get Length ${accessKey} - ${accessKey}= Set Variable If ${len} > 0 ${accessKey[0]} kljdfslff - ${len}= Get Length ${secret} - ${secret}= Set Variable If ${len} > 0 ${secret[0]} dhafldhlf - Execute aws configure set default.s3.signature_version s3v4 - Execute aws configure set aws_access_key_id ${accessKey} - Execute aws configure set aws_secret_access_key ${secret} - Execute aws configure set region us-west-1 - -Setup incorrect credentials for S3 - Execute aws configure set default.s3.signature_version s3v4 - Execute aws configure set aws_access_key_id dlfknslnfslf - Execute aws configure set aws_secret_access_key dlfknslnfslf - Execute aws configure set region us-west-1 - -Create bucket - ${postfix} = Generate Random String 5 [NUMBERS] - Set Suite Variable ${BUCKET} bucket-${postfix} - Execute AWSS3APICli create-bucket --bucket ${BUCKET} - -Setup s3 tests - Run Keyword Install aws cli - Run Keyword if '${OZONE_S3_SET_CREDENTIALS}' == 'true' Setup v4 headers - Run Keyword if '${BUCKET}' == 'generated' Create bucket diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot deleted file mode 100644 index c6b568c95ab..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation S3 gateway test with aws cli -Library OperatingSystem -Library String -Resource ../commonlib.robot -Resource commonawslib.robot -Test Setup Setup s3 tests - -*** Variables *** -${ENDPOINT_URL} http://s3g:9878 -${BUCKET} generated -${DESTBUCKET} generated1 - - -*** Keywords *** -Create Dest Bucket - - ${postfix} = Generate Random String 5 [NUMBERS] - Set Suite Variable ${DESTBUCKET} destbucket-${postfix} - Execute AWSS3APICli create-bucket --bucket ${DESTBUCKET} - - -*** Test Cases *** -Copy Object Happy Scenario - Run Keyword if '${DESTBUCKET}' == 'generated1' Create Dest Bucket - Execute date > /tmp/copyfile - ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key copyobject/f1 --body /tmp/copyfile - ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix copyobject/ - Should contain ${result} f1 - - ${result} = Execute AWSS3ApiCli copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1 - ${result} = Execute AWSS3ApiCli list-objects --bucket ${DESTBUCKET} --prefix copyobject/ - Should contain ${result} f1 - #copying again will not throw error - ${result} = Execute AWSS3ApiCli copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1 - ${result} = Execute AWSS3ApiCli list-objects --bucket ${DESTBUCKET} --prefix copyobject/ - Should contain ${result} f1 - -Copy Object Where Bucket is not available - ${result} = Execute AWSS3APICli and checkrc copy-object --bucket dfdfdfdfdfnonexistent --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1 255 - Should contain ${result} NoSuchBucket - ${result} = Execute AWSS3APICli and checkrc copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source dfdfdfdfdfnonexistent/copyobject/f1 255 - Should contain ${result} NoSuchBucket - -Copy Object Where both source and dest are same with change to storageclass - ${result} = Execute AWSS3APICli copy-object --storage-class REDUCED_REDUNDANCY --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${DESTBUCKET}/copyobject/f1 - Should contain ${result} ETag - -Copy Object Where Key not available - ${result} = Execute AWSS3APICli and checkrc copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/nonnonexistentkey 255 - Should contain ${result} NoSuchKey diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectdelete.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectdelete.robot deleted file mode 100644 index 9e57d5003ac..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectdelete.robot +++ /dev/null @@ -1,72 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation S3 gateway test with aws cli -Library OperatingSystem -Library String -Resource ../commonlib.robot -Resource commonawslib.robot -Test Setup Setup s3 tests - -*** Variables *** -${ENDPOINT_URL} http://s3g:9878 -${BUCKET} generated - -*** Test Cases *** -Delete file with s3api - Execute date > /tmp/testfile - ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key deletetestapi/f1 --body /tmp/testfile - ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix deletetestapi/ - Should contain ${result} f1 - ${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key deletetestapi/f1 - ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix deletetestapi/ - Should not contain ${result} f1 -#In case of HTTP 500, the error code is printed out to the console. - Should not contain ${result} 500 - -Delete file with s3api, file doesn't exist - ${result} = Execute AWSS3Cli ls s3://${BUCKET}/ - Should not contain ${result} thereisnosuchfile - ${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key thereisnosuchfile - ${result} = Execute AWSS3Cli ls s3://${BUCKET}/ - Should not contain ${result} thereisnosuchfile - -Delete dir with s3api - Execute date > /tmp/testfile - ${result} = Execute AWSS3Cli cp /tmp/testfile s3://${BUCKET}/deletetestapidir/f1 - ${result} = Execute AWSS3Cli ls s3://${BUCKET}/deletetestapidir/ - Should contain ${result} f1 - ${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key deletetestapidir/ - ${result} = Execute AWSS3Cli ls s3://${BUCKET}/deletetestapidir/ - Should contain ${result} f1 - ${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key deletetestapidir/f1 - - -Delete file with s3api, file doesn't exist, prefix of a real file - Execute date > /tmp/testfile - ${result} = Execute AWSS3Cli cp /tmp/testfile s3://${BUCKET}/deletetestapiprefix/filefile - ${result} = Execute AWSS3Cli ls s3://${BUCKET}/deletetestapiprefix/ - Should contain ${result} filefile - ${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key deletetestapiprefix/file - ${result} = Execute AWSS3Cli ls s3://${BUCKET}/deletetestapiprefix/ - Should contain ${result} filefile - ${result} = Execute AWSS3APICli delete-object --bucket ${BUCKET} --key deletetestapiprefix/filefile - - - -Delete file with s3api, bucket doesn't exist - ${result} = Execute AWSS3APICli and checkrc delete-object --bucket ${BUCKET}-nosuchbucket --key f1 255 - Should contain ${result} NoSuchBucket diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectmultidelete.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectmultidelete.robot deleted file mode 100644 index 542ef99c7f0..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectmultidelete.robot +++ /dev/null @@ -1,48 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation S3 gateway test with aws cli -Library OperatingSystem -Library String -Resource ../commonlib.robot -Resource commonawslib.robot -Test Setup Setup s3 tests - -*** Variables *** -${ENDPOINT_URL} http://s3g:9878 -${BUCKET} generated - -*** Test Cases *** - -Delete file with multi delete - Execute date > /tmp/testfile - ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key multidelete/f1 --body /tmp/testfile - ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key multidelete/f2 --body /tmp/testfile - ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key multidelete/f3 --body /tmp/testfile - ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix multidelete/ - Should contain ${result} multidelete/f1 - Should contain ${result} multidelete/f2 - Should contain ${result} multidelete/f3 - Should contain ${result} STANDARD - Should not contain ${result} REDUCED_REDUNDANCY - ${result} = Execute AWSS3APICli delete-objects --bucket ${BUCKET} --delete 'Objects=[{Key=multidelete/f1},{Key=multidelete/f2},{Key=multidelete/f4}]' - Should not contain ${result} Error - ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix multidelete/ - Should not contain ${result} multidelete/f1 - Should not contain ${result} multidelete/f2 - Should contain ${result} multidelete/f3 - Should contain ${result} STANDARD - Should not contain ${result} REDUCED_REDUNDANCY diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot deleted file mode 100644 index 1b2a5048efe..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot +++ /dev/null @@ -1,154 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation S3 gateway test with aws cli -Library OperatingSystem -Library String -Resource ../commonlib.robot -Resource commonawslib.robot -Test Setup Setup s3 tests - -*** Variables *** -${ENDPOINT_URL} http://s3g:9878 -${OZONE_TEST} true -${BUCKET} generated - -*** Test Cases *** - -Put object to s3 - Execute echo "Randomtext" > /tmp/testfile - ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key putobject/f1 --body /tmp/testfile - ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix putobject/ - Should contain ${result} f1 - - Execute touch -f /tmp/zerobyte - ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key putobject/zerobyte --body /tmp/zerobyte - ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix putobject/ - Should contain ${result} zerobyte - -#This test depends on the previous test case. Can't be executes alone -Get object from s3 - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key putobject/f1 /tmp/testfile.result - Compare files /tmp/testfile /tmp/testfile.result - -Get Partial object from s3 with both start and endoffset - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=0-4 /tmp/testfile1.result - Should contain ${result} ContentRange - Should contain ${result} bytes 0-4/11 - Should contain ${result} AcceptRanges - ${expectedData} = Execute dd if=/tmp/testfile skip=0 bs=1 count=5 2>/dev/null - ${actualData} = Execute cat /tmp/testfile1.result - Should Be Equal ${expectedData} ${actualData} - - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=2-4 /tmp/testfile1.result1 - Should contain ${result} ContentRange - Should contain ${result} bytes 2-4/11 - Should contain ${result} AcceptRanges - ${expectedData} = Execute dd if=/tmp/testfile skip=2 bs=1 count=3 2>/dev/null - ${actualData} = Execute cat /tmp/testfile1.result1 - Should Be Equal ${expectedData} ${actualData} - -# end offset greater than file size and start with in file length - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=2-1000 /tmp/testfile1.result2 - Should contain ${result} ContentRange - Should contain ${result} bytes 2-10/11 - Should contain ${result} AcceptRanges - ${expectedData} = Execute dd if=/tmp/testfile skip=2 bs=1 count=9 2>/dev/null - ${actualData} = Execute cat /tmp/testfile1.result2 - Should Be Equal ${expectedData} ${actualData} - -Get Partial object from s3 with both start and endoffset(start offset and endoffset is greater than file size) - ${result} = Execute AWSS3APICli and checkrc get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=10000-10000 /tmp/testfile2.result 255 - Should contain ${result} InvalidRange - - -Get Partial object from s3 with both start and endoffset(end offset is greater than file size) - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=0-10000 /tmp/testfile2.result - Should contain ${result} ContentRange - Should contain ${result} bytes 0-10/11 - Should contain ${result} AcceptRanges - ${expectedData} = Execute cat /tmp/testfile - ${actualData} = Execute cat /tmp/testfile2.result - Should Be Equal ${expectedData} ${actualData} - -Get Partial object from s3 with only start offset - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=0- /tmp/testfile3.result - Should contain ${result} ContentRange - Should contain ${result} bytes 0-10/11 - Should contain ${result} AcceptRanges - ${expectedData} = Execute cat /tmp/testfile - ${actualData} = Execute cat /tmp/testfile3.result - Should Be Equal ${expectedData} ${actualData} - -Get Partial object from s3 with both start and endoffset which are equal - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=0-0 /tmp/testfile4.result - Should contain ${result} ContentRange - Should contain ${result} bytes 0-0/11 - Should contain ${result} AcceptRanges - ${expectedData} = Execute dd if=/tmp/testfile skip=0 bs=1 count=1 2>/dev/null - ${actualData} = Execute cat /tmp/testfile4.result - Should Be Equal ${expectedData} ${actualData} - - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=4-4 /tmp/testfile5.result - Should contain ${result} ContentRange - Should contain ${result} bytes 4-4/11 - Should contain ${result} AcceptRanges - ${expectedData} = Execute dd if=/tmp/testfile skip=4 bs=1 count=1 2>/dev/null - ${actualData} = Execute cat /tmp/testfile5.result - Should Be Equal ${expectedData} ${actualData} - -Get Partial object from s3 to get last n bytes - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=-4 /tmp/testfile6.result - Should contain ${result} ContentRange - Should contain ${result} bytes 7-10/11 - Should contain ${result} AcceptRanges - ${expectedData} = Execute dd if=/tmp/testfile skip=7 bs=1 count=4 2>/dev/null - ${actualData} = Execute cat /tmp/testfile6.result - Should Be Equal ${expectedData} ${actualData} - -# if end is greater than file length, returns whole file - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=-10000 /tmp/testfile7.result - Should contain ${result} ContentRange - Should contain ${result} bytes 0-10/11 - Should contain ${result} AcceptRanges - ${expectedData} = Execute cat /tmp/testfile - ${actualData} = Execute cat /tmp/testfile7.result - Should Be Equal ${expectedData} ${actualData} - -Incorrect values for end and start offset - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=-11-10000 /tmp/testfile8.result - Should not contain ${result} ContentRange - Should contain ${result} AcceptRanges - ${expectedData} = Execute cat /tmp/testfile - ${actualData} = Execute cat /tmp/testfile8.result - Should Be Equal ${expectedData} ${actualData} - - ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key putobject/f1 --range bytes=11-8 /tmp/testfile9.result - Should not contain ${result} ContentRange - Should contain ${result} AcceptRanges - ${expectedData} = Execute cat /tmp/testfile - ${actualData} = Execute cat /tmp/testfile8.result - Should Be Equal ${expectedData} ${actualData} - -Zero byte file - ${result} = Execute AWSS3APICli and checkrc get-object --bucket ${BUCKET} --key putobject/zerobyte --range bytes=0-0 /tmp/testfile2.result 255 - Should contain ${result} InvalidRange - - ${result} = Execute AWSS3APICli and checkrc get-object --bucket ${BUCKET} --key putobject/zerobyte --range bytes=0-1 /tmp/testfile2.result 255 - Should contain ${result} InvalidRange - - ${result} = Execute AWSS3APICli and checkrc get-object --bucket ${BUCKET} --key putobject/zerobyte --range bytes=0-10000 /tmp/testfile2.result 255 - Should contain ${result} InvalidRange \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/webui.robot b/hadoop-ozone/dist/src/main/smoketest/s3/webui.robot deleted file mode 100644 index 74ba4e7869a..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/s3/webui.robot +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation S3 gateway web ui test -Library OperatingSystem -Library String -Resource ../commonlib.robot -Resource ./commonawslib.robot -Suite Setup Setup s3 tests - -*** Variables *** -${ENDPOINT_URL} http://s3g:9878 -${BUCKET} generated - -*** Test Cases *** - -S3 Gateway Web UI - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user - ${result} = Execute curl --negotiate -u : -v ${ENDPOINT_URL} - Should contain ${result} HTTP/1.1 307 Temporary Redirect - ${result} = Execute curl --negotiate -u : -v ${ENDPOINT_URL}/static/index.html - Should contain ${result} Apache Hadoop Ozone S3 diff --git a/hadoop-ozone/dist/src/main/smoketest/scmcli/pipeline.robot b/hadoop-ozone/dist/src/main/smoketest/scmcli/pipeline.robot deleted file mode 100644 index 6a6f0b0eb78..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/scmcli/pipeline.robot +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Smoketest ozone cluster startup -Library OperatingSystem -Library BuiltIn -Resource ../commonlib.robot - -*** Variables *** - - -*** Test Cases *** -Run list pipeline - ${output} = Execute ozone scmcli pipeline list - Should contain ${output} Type:RATIS, Factor:ONE, State:OPEN \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot deleted file mode 100644 index ee4688c0ae4..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-fs.robot +++ /dev/null @@ -1,131 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Smoke test to start cluster with docker-compose environments. -Library OperatingSystem -Library String -Library BuiltIn -Resource ../commonlib.robot - -*** Variables *** -${ENDPOINT_URL} http://s3g:9878 - -*** Keywords *** -Setup volume names - ${random} Generate Random String 2 [NUMBERS] - Set Suite Variable ${volume1} fstest${random} - Set Suite Variable ${volume2} fstest2${random} - Set Suite Variable ${volume3} fstest3${random} - -*** Test Cases *** -Create volume bucket with wrong credentials - Execute kdestroy - ${rc} ${output} = Run And Return Rc And Output ozone sh volume create o3://om/fstest - Should contain ${output} Client cannot authenticate via - -Create volume bucket with credentials - # Authenticate testuser - Run Keyword Kinit test user testuser testuser.keytab - Run Keyword Setup volume names - Execute ozone sh volume create o3://om/${volume1} - Execute ozone sh volume create o3://om/${volume2} - Execute ozone sh bucket create o3://om/${volume1}/bucket1 - Execute ozone sh bucket create o3://om/${volume1}/bucket2 - Execute ozone sh bucket create o3://om/${volume2}/bucket3 - -Check volume from ozonefs - ${result} = Execute ozone fs -ls o3fs://bucket1.${volume1}/ - -Test Volume Acls - ${result} = Execute ozone sh volume create ${volume3} - Should not contain ${result} Failed - ${result} = Execute ozone sh volume getacl ${volume3} - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . - ${result} = Execute ozone sh volume addacl ${volume3} -a user:superuser1:rwxy[DEFAULT] - ${result} = Execute ozone sh volume getacl ${volume3} - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" - ${result} = Execute ozone sh volume removeacl ${volume3} -a user:superuser1:xy - ${result} = Execute ozone sh volume getacl ${volume3} - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"READ\", \"WRITE\" - ${result} = Execute ozone sh volume setacl ${volume3} -al user:superuser1:rwxy,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT] - ${result} = Execute ozone sh volume getacl ${volume3} - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" - Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"DEFAULT\",\n.*\"aclList\" : . \"ALL\" - -Test Bucket Acls - ${result} = Execute ozone sh bucket create ${volume3}/bk1 - Should not contain ${result} Failed - ${result} = Execute ozone sh bucket getacl ${volume3}/bk1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . - ${result} = Execute ozone sh bucket addacl ${volume3}/bk1 -a user:superuser1:rwxy - ${result} = Execute ozone sh bucket getacl ${volume3}/bk1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" - ${result} = Execute ozone sh bucket removeacl ${volume3}/bk1 -a user:superuser1:xy - ${result} = Execute ozone sh bucket getacl ${volume3}/bk1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\" - ${result} = Execute ozone sh bucket setacl ${volume3}/bk1 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc,group:superuser1:a[DEFAULT] - ${result} = Execute ozone sh bucket getacl ${volume3}/bk1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" - Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" - -Test key Acls - Execute ozone sh key put ${volume3}/bk1/key1 /opt/hadoop/NOTICE.txt - ${result} = Execute ozone sh key getacl ${volume3}/bk1/key1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \".*\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" . - ${result} = Execute ozone sh key addacl ${volume3}/bk1/key1 -a user:superuser1:rwxy - ${result} = Execute ozone sh key getacl ${volume3}/bk1/key1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" - ${result} = Execute ozone sh key removeacl ${volume3}/bk1/key1 -a user:superuser1:xy - ${result} = Execute ozone sh key getacl ${volume3}/bk1/key1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\" - ${result} = Execute ozone sh key setacl ${volume3}/bk1/key1 -al user:superuser1:rwxy,group:superuser1:a,user:testuser/scm@EXAMPLE.COM:rwxyc - ${result} = Execute ozone sh key getacl ${volume3}/bk1/key1 - Should Match Regexp ${result} \"type\" : \"USER\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"READ\", \"WRITE\", \"READ_ACL\", \"WRITE_ACL\" - Should Match Regexp ${result} \"type\" : \"GROUP\",\n.*\"name\" : \"superuser1\",\n.*\"aclScope\" : \"ACCESS\",\n.*\"aclList\" : . \"ALL\" - -Test native authorizer - Execute ozone sh volume removeacl ${volume3} -a group:root:a - Execute kdestroy - Run Keyword Kinit test user testuser2 testuser2.keytab - ${result} = Execute And Ignore Error ozone sh bucket list /${volume3}/ - Should contain ${result} PERMISSION_DENIED - ${result} = Execute And Ignore Error ozone sh key list /${volume3}/bk1 - Should contain ${result} PERMISSION_DENIED - ${result} = Execute And Ignore Error ozone sh volume addacl ${volume3} -a user:testuser2/scm@EXAMPLE.COM:xy - Should contain ${result} PERMISSION_DENIED User testuser2/scm@EXAMPLE.COM doesn't have WRITE_ACL permission to access volume - Execute kdestroy - Run Keyword Kinit test user testuser testuser.keytab - Execute ozone sh volume addacl ${volume3} -a user:testuser2/scm@EXAMPLE.COM:xyrw - Execute kdestroy - Run Keyword Kinit test user testuser2 testuser2.keytab - ${result} = Execute And Ignore Error ozone sh bucket list /${volume3}/ - Should contain ${result} PERMISSION_DENIED org.apache.hadoop.ozone.om.exceptions.OMException: User testuser2/scm@EXAMPLE.COM doesn't have LIST permission to access volume - Execute ozone sh volume addacl ${volume3} -a user:testuser2/scm@EXAMPLE.COM:l - Execute ozone sh bucket list /${volume3}/ - Execute ozone sh volume getacl /${volume3}/ - - ${result} = Execute And Ignore Error ozone sh key list /${volume3}/bk1 - Should contain ${result} PERMISSION_DENIED - Execute kdestroy - Run Keyword Kinit test user testuser testuser.keytab - Execute ozone sh bucket addacl ${volume3}/bk1 -a user:testuser2/scm@EXAMPLE.COM:a - Execute ozone sh bucket getacl /${volume3}/bk1 - Execute kdestroy - Run Keyword Kinit test user testuser2 testuser2.keytab - Execute ozone sh bucket getacl /${volume3}/bk1 - Execute ozone sh key list /${volume3}/bk1 - Execute kdestroy - Run Keyword Kinit test user testuser testuser.keytab diff --git a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot b/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot deleted file mode 100644 index 90166fe46d5..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/security/ozone-secure-s3.robot +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Smoke test to start cluster with docker-compose environments. -Library OperatingSystem -Library String -Library BuiltIn -Resource ../commonlib.robot -Resource ../s3/commonawslib.robot - -*** Variables *** -${ENDPOINT_URL} http://s3g:9878 - -*** Keywords *** -Setup volume names - ${random} Generate Random String 2 [NUMBERS] - Set Suite Variable ${volume1} fstest${random} - Set Suite Variable ${volume2} fstest2${random} - -*** Test Cases *** -Secure S3 test Success - Run Keyword Setup s3 tests - ${output} = Execute aws s3api --endpoint-url ${ENDPOINT_URL} create-bucket --bucket bucket-test123 - ${output} = Execute aws s3api --endpoint-url ${ENDPOINT_URL} list-buckets - Should contain ${output} bucket-test123 - -Secure S3 test Failure - Run Keyword Setup incorrect credentials for S3 - ${rc} ${result} = Run And Return Rc And Output aws s3api --endpoint-url ${ENDPOINT_URL} create-bucket --bucket bucket-test123 - Should Be True ${rc} > 0 - diff --git a/hadoop-ozone/dist/src/main/smoketest/test.sh b/hadoop-ozone/dist/src/main/smoketest/test.sh deleted file mode 100755 index e0a26b07930..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/test.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" -RESULT_DIR=result -#delete previous results -rm -rf "${DIR:?}/$RESULT_DIR" - -REPLACEMENT="$DIR/../compose/test-all.sh" -echo "THIS SCRIPT IS DEPRECATED. Please use $REPLACEMENT instead." - -${REPLACEMENT} -RESULT=$? -cp -r "$DIR/../compose/result" "$DIR" -exit $RESULT diff --git a/hadoop-ozone/dist/src/main/smoketest/topology/scmcli.robot b/hadoop-ozone/dist/src/main/smoketest/topology/scmcli.robot deleted file mode 100644 index 823981d15f9..00000000000 --- a/hadoop-ozone/dist/src/main/smoketest/topology/scmcli.robot +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -*** Settings *** -Documentation Smoketest ozone cluster startup -Library OperatingSystem -Library BuiltIn -Resource ../commonlib.robot - -*** Variables *** - - -*** Test Cases *** -Run printTopology - ${output} = Execute ozone scmcli printTopology - Should contain ${output} 10.5.0.7(ozone-topology_datanode_4_1.ozone-topology_net) /rack2 -Run printTopology -o - ${output} = Execute ozone scmcli printTopology -o - Should contain ${output} Location: /rack2 - Should contain ${output} 10.5.0.7(ozone-topology_datanode_4_1.ozone-topology_net) diff --git a/hadoop-ozone/fault-injection-test/network-tests/pom.xml b/hadoop-ozone/fault-injection-test/network-tests/pom.xml deleted file mode 100644 index 3b29480c006..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/pom.xml +++ /dev/null @@ -1,103 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone-fault-injection-test - 0.5.0-SNAPSHOT - - hadoop-ozone-network-tests - Apache Hadoop Ozone Network Tests - Apache Hadoop Ozone Network Tests - jar - - - - - maven-resources-plugin - 3.1.0 - - - copy-resources - process-resources - - copy-resources - - - ${project.build.directory} - - - src/test/compose - true - - docker-compose.yaml - docker-config - - - - - - - - - - - - - it - - ${basedir}../../dist/target/ozone-${project.version} - - - - - org.codehaus.mojo - exec-maven-plugin - - - integration-test - - exec - - - python - - -m - pytest - -s - ${basedir}/src/test/blockade/ - - - - ${ozone.home} - - - ${project.build.directory} - - - - - - - - - - - - diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/README.md b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/README.md deleted file mode 100644 index 7fb62b338e4..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/README.md +++ /dev/null @@ -1,42 +0,0 @@ - - -## Blockade Tests -Following python packages need to be installed before running the tests : - -1. blockade -2. pytest==3.2.0 - -Running test as part of the maven build: - -``` -mvn clean verify -Pit -``` - -Running test as part of the released binary: - -You can execute all blockade tests with following command: - -``` -cd $OZONE_HOME -python -m pytest tests/blockade -``` - -You can also execute specific blockade tests with following command: - -``` -cd $OZONE_HOME -python -m pytest tests/blockade/< PATH TO PYTHON FILE > -e.g: python -m pytest tests/blockade/test_blockade_datanode_isolation.py -``` \ No newline at end of file diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/conftest.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/conftest.py deleted file mode 100644 index 582c4cc9405..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/conftest.py +++ /dev/null @@ -1,113 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import time -import subprocess -import pytest - -EPOCH_TIME = int(time.time()) - - -def pytest_addoption(parser): - parser.addoption("--output-dir", - action="store", - default="/tmp/BlockadeTests", - help="location of output directory where output log " - "and plot files will be created") - parser.addoption("--log-format", - action="store", - default="%(asctime)s|%(levelname)s|%(threadName)s|" - "%(filename)s:%(lineno)s -" - " %(funcName)s()|%(message)s", - help="specify log format") - parser.addoption("--log-level", - action="store", - default="info", - help="specify log level") - parser.addoption("--containerStatusSleep", - action="store", - default="900", - help="sleep time before checking container status") - parser.addoption("--runSecondPhase", - action="store", - default="false", - help="run second phase of the tests") - - -@pytest.fixture -def run_second_phase(request): - """ - :param request: - This function returns if the user has opted for running second phase - of the tests. - """ - return request.config.getoption("--runSecondPhase") - - -def pytest_configure(config): - global OUTPUT_DIR - os.environ["CONTAINER_STATUS_SLEEP"] = config.option.containerStatusSleep - OUTPUT_DIR = "%s/%s" % (config.option.output_dir, EPOCH_TIME) - try: - os.makedirs(OUTPUT_DIR) - except OSError, e: - raise Exception(e.strerror + ": " + e.filename) - log_file = os.path.join(OUTPUT_DIR, "output.log") - - if config.option.log_level == "trace": - loglevel = eval("logging.DEBUG") - else: - loglevel = eval("logging." + config.option.log_level.upper()) - logformatter = logging.Formatter(config.option.log_format) - logging.basicConfig(filename=log_file, - filemode='w', - level=loglevel, - format=config.option.log_format) - console = logging.StreamHandler() - console.setLevel(loglevel) - console.setFormatter(logformatter) - logging.getLogger('').addHandler(console) - - -def pytest_report_teststatus(report): - logger = logging.getLogger('main') - loc, line, name = report.location - if report.outcome == 'skipped': - pass - elif report.when == 'setup': - logger.info("RUNNING TEST \"%s\" at location \"%s\" at line number" - " \"%s\"" % (name, loc, str(line))) - elif report.when == 'call': - logger.info("TEST \"%s\" %s in %3.2f seconds" % - (name, report.outcome.upper(), report.duration)) - log_file_path = "%s/%s_all_docker.log" % \ - (OUTPUT_DIR, name) - gather_docker_logs(log_file_path) - - -def pytest_sessionfinish(session): - logger = logging.getLogger('main') - logger.info("ALL TESTS FINISHED") - logger.info("ALL logs present in following directory: %s", OUTPUT_DIR) - - -def gather_docker_logs(log_file_path): - docker_compose_file = os.environ["DOCKER_COMPOSE_FILE"] - output = subprocess.check_output(["docker-compose", "-f", - docker_compose_file, "logs"]) - with open(log_file_path, "w") as text_file: - text_file.write(output) diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/__init__.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/__init__.py deleted file mode 100644 index 13878a13a7f..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. \ No newline at end of file diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/blockade.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/blockade.py deleted file mode 100644 index 7e32f09494d..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/blockade.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/python - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This module has apis to create and remove a blockade cluster""" - -from subprocess import call -import logging -import util - -logger = logging.getLogger(__name__) - - -class Blockade(object): - - @classmethod - def blockade_destroy(cls): - logger.info("Running blockade destroy") - call(["blockade", "destroy"]) - - @classmethod - def blockade_up(cls): - logger.info("Running blockade up") - call(["blockade", "up"]) - - @classmethod - def blockade_status(cls): - logger.info("Running blockade status") - return call(["blockade", "status"]) - - @classmethod - def make_flaky(cls, flaky_node): - logger.info("flaky node: %s", flaky_node) - output = call(["blockade", "flaky", flaky_node]) - assert output == 0, "flaky command failed with exit code=[%s]" % output - - @classmethod - def blockade_fast_all(cls): - output = call(["blockade", "fast", "--all"]) - assert output == 0, "fast command failed with exit code=[%s]" % output - - @classmethod - def blockade_create_partition(cls, *args): - nodes = "" - for node_list in args: - nodes = nodes + ','.join(node_list) + " " - exit_code, output = \ - util.run_command("blockade partition %s" % nodes) - assert exit_code == 0, \ - "blockade partition command failed with exit code=[%s]" % output - - @classmethod - def blockade_join(cls): - exit_code = call(["blockade", "join"]) - assert exit_code == 0, "blockade join command failed with exit code=[%s]" \ - % exit_code - - @classmethod - def blockade_stop(cls, node, all_nodes=False): - if all_nodes: - output = call(["blockade", "stop", "--all"]) - else: - output = call(["blockade", "stop", node]) - assert output == 0, "blockade stop command failed with exit code=[%s]" \ - % output - - @classmethod - def blockade_start(cls, node, all_nodes=False): - if all_nodes: - output = call(["blockade", "start", "--all"]) - else: - output = call(["blockade", "start", node]) - assert output == 0, "blockade start command failed with " \ - "exit code=[%s]" % output - - @classmethod - def blockade_add(cls, node): - output = call(["blockade", "add", node]) - assert output == 0, "blockade add command failed" diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/client.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/client.py deleted file mode 100644 index 9d40cf42dac..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/client.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/python - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from ozone import util -from ozone.cluster import Command - - -class OzoneClient: - - __logger__ = logging.getLogger(__name__) - - def __init__(self, cluster): - self.cluster = cluster - pass - - def create_volume(self, volume_name): - OzoneClient.__logger__.info("Creating Volume %s" % volume_name) - command = [Command.ozone, "sh volume create /%s --user root" % volume_name] - util.run_docker_command(command, self.cluster.client) - - def create_bucket(self, volume_name, bucket_name): - OzoneClient.__logger__.info("Creating Bucket %s in Volume %s" % (bucket_name, volume_name)) - command = [Command.ozone, "sh bucket create /%s/%s" % (volume_name, bucket_name)] - util.run_docker_command(command, self.cluster.client) - - def put_key(self, source_file, volume_name, bucket_name, key_name, replication_factor=None): - OzoneClient.__logger__.info("Creating Key %s in %s/%s" % (key_name, volume_name, bucket_name)) - exit_code, output = util.run_docker_command( - "ls %s" % source_file, self.cluster.client) - assert exit_code == 0, "%s does not exist" % source_file - command = [Command.ozone, "sh key put /%s/%s/%s %s" % - (volume_name, bucket_name, key_name, source_file)] - if replication_factor: - command.append("--replication=%s" % replication_factor) - - exit_code, output = util.run_docker_command(command, self.cluster.client) - assert exit_code == 0, "Ozone put Key failed with output=[%s]" % output - - def get_key(self, volume_name, bucket_name, key_name, file_path='.'): - OzoneClient.__logger__.info("Reading key %s from %s/%s" % (key_name, volume_name, bucket_name)) - command = [Command.ozone, "sh key get /%s/%s/%s %s" % - (volume_name, bucket_name, key_name, file_path)] - exit_code, output = util.run_docker_command(command, self.cluster.client) - assert exit_code == 0, "Ozone get Key failed with output=[%s]" % output - - def run_freon(self, num_volumes, num_buckets, num_keys, key_size, - replication_type="RATIS", replication_factor="THREE"): - """ - Runs freon on the cluster. - """ - command = [Command.freon, - " rk", - " --numOfVolumes " + str(num_volumes), - " --numOfBuckets " + str(num_buckets), - " --numOfKeys " + str(num_keys), - " --keySize " + str(key_size), - " --replicationType " + replication_type, - " --factor " + replication_factor] - return util.run_docker_command(command, self.cluster.client) diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py deleted file mode 100644 index 1616083377b..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/cluster.py +++ /dev/null @@ -1,316 +0,0 @@ -#!/usr/bin/python - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import os -import re -import subprocess -import sys -import yaml -import time - - -from os import environ -from subprocess import call -from ozone import util -from ozone.constants import Command -from ozone.blockade import Blockade -from ozone.client import OzoneClient -from ozone.container import Container -from ozone.exceptions import ContainerNotFoundError - - -class Configuration: - """ - Configurations to be used while starting Ozone Cluster. - Here @property decorators is used to achieve getters, setters and delete - behaviour for 'datanode_count' attribute. - @datanode_count.setter will set the value for 'datanode_count' attribute. - @datanode_count.deleter will delete the current value of 'datanode_count' - attribute. - """ - - def __init__(self): - if "MAVEN_TEST" in os.environ: - compose_dir = environ.get("MAVEN_TEST") - self.docker_compose_file = os.path.join(compose_dir, "docker-compose.yaml") - elif "OZONE_HOME" in os.environ: - compose_dir = os.path.join(environ.get("OZONE_HOME"), "compose", "ozoneblockade") - self.docker_compose_file = os.path.join(compose_dir, "docker-compose.yaml") - else: - __parent_dir__ = os.path.dirname(os.path.dirname(os.path.dirname( - os.path.dirname(os.path.realpath(__file__))))) - self.docker_compose_file = os.path.join(__parent_dir__, - "compose", "ozoneblockade", - "docker-compose.yaml") - self._datanode_count = 3 - os.environ["DOCKER_COMPOSE_FILE"] = self.docker_compose_file - - @property - def datanode_count(self): - return self._datanode_count - - @datanode_count.setter - def datanode_count(self, datanode_count): - self._datanode_count = datanode_count - - @datanode_count.deleter - def datanode_count(self): - del self._datanode_count - - -class OzoneCluster(object): - """ - This represents Ozone Cluster. - Here @property decorators is used to achieve getters, setters and delete - behaviour for 'om', 'scm', 'datanodes' and 'client' attributes. - """ - - __logger__ = logging.getLogger(__name__) - - def __init__(self, conf): - self.conf = conf - self.docker_compose_file = conf.docker_compose_file - self._om = None - self._scm = None - self._datanodes = None - self._client = None - self.scm_uuid = None - self.datanode_dir = None - - @property - def om(self): - return self._om - - @om.setter - def om(self, om): - self._om = om - - @om.deleter - def om(self): - del self._om - - @property - def scm(self): - return self._scm - - @scm.setter - def scm(self, scm): - self._scm = scm - - @scm.deleter - def scm(self): - del self._scm - - @property - def datanodes(self): - return self._datanodes - - @datanodes.setter - def datanodes(self, datanodes): - self._datanodes = datanodes - - @datanodes.deleter - def datanodes(self): - del self._datanodes - - @property - def client(self): - return self._client - - @client.setter - def client(self, client): - self._client = client - - @client.deleter - def client(self): - del self._client - - @classmethod - def create(cls, config=Configuration()): - return OzoneCluster(config) - - def start(self): - """ - Start Ozone Cluster in docker containers. - """ - - # check if docker is up. - - if "OZONE_RUNNER_VERSION" not in os.environ: - self.__logger__.error("OZONE_RUNNER_VERSION is not set.") - sys.exit(1) - - if "HDDS_VERSION" not in os.environ: - self.__logger__.error("HDDS_VERSION is not set.") - sys.exit(1) - - self.__logger__.info("Starting Ozone Cluster") - if Blockade.blockade_status() == 0: - Blockade.blockade_destroy() - - Blockade.blockade_up() - - call([Command.docker_compose, "-f", self.docker_compose_file, - "up", "-d", "--scale", - "datanode=" + str(self.conf.datanode_count)]) - self.__logger__.info("Waiting 10s for cluster start up...") - # Remove the sleep and wait only till the cluster is out of safemode - time.sleep(10) - output = subprocess.check_output([Command.docker_compose, "-f", - self.docker_compose_file, "ps"]) - node_list = [] - for out in output.split("\n")[2:-1]: - node = out.split(" ")[0] - node_list.append(node) - Blockade.blockade_add(node) - - self.om = filter(lambda x: 'om' in x, node_list)[0] - self.scm = filter(lambda x: 'scm' in x, node_list)[0] - self.datanodes = sorted(list(filter(lambda x: 'datanode' in x, node_list))) - self.client = filter(lambda x: 'ozone_client' in x, node_list)[0] - self.scm_uuid = self.__get_scm_uuid__() - self.datanode_dir = self.get_conf_value("hdds.datanode.dir") - - assert node_list, "no node found in the cluster!" - self.__logger__.info("blockade created with nodes %s", ' '.join(node_list)) - - def get_conf_value(self, key): - """ - Returns the value of given configuration key. - """ - command = [Command.ozone, "getconf -confKey " + key] - exit_code, output = util.run_docker_command(command, self.om) - return str(output).strip() - - def scale_datanode(self, datanode_count): - """ - Commission new datanodes to the running cluster. - """ - call([Command.docker_compose, "-f", self.docker_compose_file, - "up", "-d", "--scale", "datanode=" + datanode_count]) - - def partition_network(self, *args): - """ - Partition the network which is used by the cluster. - """ - Blockade.blockade_create_partition(*args) - - def restore_network(self): - """ - Restores the network partition. - """ - Blockade.blockade_join() - - def __get_scm_uuid__(self): - """ - Returns SCM's UUID. - """ - ozone_metadata_dir = self.get_conf_value("ozone.metadata.dirs") - command = "cat %s/scm/current/VERSION" % ozone_metadata_dir - exit_code, output = util.run_docker_command(command, self.scm) - output_list = output.split("\n") - key_value = [x for x in output_list if re.search(r"\w+=\w+", x)] - uuid = [token for token in key_value if 'scmUuid' in token] - return uuid.pop().split("=")[1].strip() - - def get_client(self): - return OzoneClient(self) - - def get_container(self, container_id): - command = [Command.ozone, "scmcli list -c=1 -s=%s | grep containerID", container_id - 1] - exit_code, output = util.run_docker_command(command, self.om) - if exit_code != 0: - raise ContainerNotFoundError(container_id) - return Container(container_id, self) - - def is_container_replica_exist(self, container_id, datanode): - container_parent_path = "%s/hdds/%s/current/containerDir0" % \ - (self.datanode_dir, self.scm_uuid) - command = "find %s -type f -name '%s.container'" % (container_parent_path, container_id) - exit_code, output = util.run_docker_command(command, datanode) - container_path = output.strip() - if not container_path: - return False - return True - - def get_containers_on_datanode(self, datanode): - """ - Returns all the container on given datanode. - """ - container_parent_path = "%s/hdds/%s/current/containerDir0" % \ - (self.datanode_dir, self.scm_uuid) - command = "find %s -type f -name '*.container'" % container_parent_path - exit_code, output = util.run_docker_command(command, datanode) - containers = [] - - container_list = map(str.strip, output.split("\n")) - for container_path in container_list: - # Reading the container file. - exit_code, output = util.run_docker_command( - "cat " + container_path, datanode) - if exit_code is not 0: - continue - data = output.split("\n") - # Reading key value pairs from container file. - key_value = [x for x in data if re.search(r"\w+:\s\w+", x)] - content = "\n".join(key_value) - content_yaml = yaml.load(content) - if content_yaml is None: - continue - containers.append(Container(content_yaml.get('containerID'), self)) - return containers - - def get_container_state(self, container_id, datanode): - container_parent_path = "%s/hdds/%s/current/containerDir0" % \ - (self.datanode_dir, self.scm_uuid) - command = "find %s -type f -name '%s.container'" % (container_parent_path, container_id) - exit_code, output = util.run_docker_command(command, datanode) - container_path = output.strip() - if not container_path: - raise ContainerNotFoundError("Container not found!") - - # Reading the container file. - exit_code, output = util.run_docker_command("cat " + container_path, datanode) - if exit_code != 0: - raise ContainerNotFoundError("Container not found!") - data = output.split("\n") - # Reading key value pairs from container file. - key_value = [x for x in data if re.search(r"\w+:\s\w+", x)] - content = "\n".join(key_value) - content_yaml = yaml.load(content) - return str(content_yaml.get('state')).lstrip() - - def get_container_datanodes(self, container_id): - result = [] - for datanode in self.datanodes: - container_parent_path = "%s/hdds/%s/current/containerDir0" % \ - (self.datanode_dir, self.scm_uuid) - command = "find %s -type f -name '%s.container'" % (container_parent_path, container_id) - exit_code, output = util.run_docker_command(command, datanode) - if output.strip(): - result.append(datanode) - return result - - def stop(self): - """ - Stops the Ozone Cluster. - """ - self.__logger__.info("Stopping Ozone Cluster") - call([Command.docker_compose, "-f", self.docker_compose_file, "down"]) - Blockade.blockade_destroy() - diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/constants.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/constants.py deleted file mode 100644 index a79d6b1be0f..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/constants.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/python - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class Command(object): - docker = "docker" - docker_compose = "docker-compose" - ozone = "/opt/hadoop/bin/ozone" - freon = "/opt/hadoop/bin/ozone freon" diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py deleted file mode 100644 index 65c6b2fa92f..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/container.py +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/python - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import util -from ozone.exceptions import ContainerNotFoundError - - -class Container: - - def __init__(self, container_id, cluster): - self.container_id = container_id - self.cluster = cluster - - def is_on(self, datanode): - return self.cluster.is_container_replica_exist(self.container_id, datanode) - - def get_datanode_states(self): - dns = self.cluster.get_container_datanodes(self.container_id) - states = [] - for dn in dns: - states.append(self.get_state(dn)) - return states - - def get_state(self, datanode): - return self.cluster.get_container_state(self.container_id, datanode) - - def wait_until_replica_is_quasi_closed(self, datanode): - def predicate(): - try: - if self.cluster.get_container_state(self.container_id, datanode) == 'QUASI_CLOSED': - return True - else: - return False - except ContainerNotFoundError: - return False - - util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 10) - if not predicate(): - raise Exception("Replica is not quasi closed!") - - def wait_until_one_replica_is_quasi_closed(self): - def predicate(): - dns = self.cluster.get_container_datanodes(self.container_id) - for dn in dns: - if self.cluster.get_container_state(self.container_id, dn) == 'QUASI_CLOSED': - return True - else: - return False - - util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 10) - if not predicate(): - raise Exception("None of the container replica is quasi closed!") - - def wait_until_replica_is_closed(self, datanode): - def predicate(): - try: - if self.cluster.get_container_state(self.container_id, datanode) == 'CLOSED': - return True - else: - return False - except ContainerNotFoundError: - return False - - util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 10) - if not predicate(): - raise Exception("Replica is not closed!") - - def wait_until_one_replica_is_closed(self): - def predicate(): - dns = self.cluster.get_container_datanodes(self.container_id) - for dn in dns: - if self.cluster.get_container_state(self.container_id, dn) == 'CLOSED': - return True - return False - - util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 10) - if not predicate(): - raise Exception("None of the container replica is closed!") - - def wait_until_two_replicas_are_closed(self): - def predicate(): - dns = self.cluster.get_container_datanodes(self.container_id) - closed_count = 0 - for dn in dns: - if self.cluster.get_container_state(self.container_id, dn) == 'CLOSED': - closed_count = closed_count + 1 - if closed_count > 1: - return True - return False - - util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 10) - if not predicate(): - raise Exception("None of the container replica is closed!") - - def wait_until_all_replicas_are_closed(self): - def predicate(): - try: - dns = self.cluster.get_container_datanodes(self.container_id) - for dn in dns: - if self.cluster.get_container_state(self.container_id, dn) != 'CLOSED': - return False - return True - except ContainerNotFoundError: - return False - - util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 10) - if not predicate(): - raise Exception("Not all the replicas are closed!") - - def wait_until_replica_is_not_open_anymore(self, datanode): - def predicate(): - try: - if self.cluster.get_container_state(self.container_id, datanode) != 'OPEN' and \ - self.cluster.get_container_state(self.container_id, datanode) != 'CLOSING': - return True - else: - return False - except ContainerNotFoundError: - return False - - util.wait_until(predicate, int(os.environ["CONTAINER_STATUS_SLEEP"]), 10) - if not predicate(): - raise Exception("Replica is not closed!") diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/exceptions.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/exceptions.py deleted file mode 100644 index 9917eaad37e..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/exceptions.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/python - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class ContainerNotFoundError(RuntimeError): - """ ContainerNotFoundError run-time error. """ - def __init__(self, *args, **kwargs): - pass diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/util.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/util.py deleted file mode 100644 index 066b16f67f4..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/ozone/util.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/python - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import time -import re -import subprocess - -from ozone.constants import Command - -logger = logging.getLogger(__name__) - - -def wait_until(predicate, timeout, check_frequency=1): - deadline = time.time() + timeout - while time.time() < deadline: - if predicate(): - return - time.sleep(check_frequency) - - -def run_docker_command(command, run_on): - if isinstance(command, list): - command = ' '.join(command) - command = [Command.docker, - "exec " + run_on, - command] - return run_command(command) - - -def run_command(cmd): - command = cmd - if isinstance(cmd, list): - command = ' '.join(cmd) - logger.info("RUNNING: %s", command) - all_output = "" - my_process = subprocess.Popen(command, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, shell=True) - while my_process.poll() is None: - op = my_process.stdout.readline() - if op: - all_output += op - logger.info(op) - other_output = my_process.communicate() - other_output = other_output[0].strip() - if other_output != "": - all_output += other_output - reg = re.compile(r"(\r\n|\n)$") - logger.debug("Output: %s", all_output) - all_output = reg.sub("", all_output, 1) - return my_process.returncode, all_output - - -def get_checksum(file_path, run_on): - command = "md5sum %s" % file_path - exit_code, output = run_docker_command(command, run_on) - assert exit_code == 0, "Cant find checksum" - output_split = output.split("\n") - result = "" - for line in output_split: - if line.find("Warning") >= 0 or line.find("is not a tty") >= 0: - logger.info("skip this line: %s", line) - else: - result = result + line - checksum = result.split(" ") - return checksum[0] diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_client_failure.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_client_failure.py deleted file mode 100644 index 642056408d2..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_client_failure.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/python - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import re -import time -import logging -import ozone.util - -from ozone.cluster import OzoneCluster - -logger = logging.getLogger(__name__) - - -def setup_function(): - global cluster - cluster = OzoneCluster.create() - cluster.start() - - -def teardown_function(): - cluster.stop() - - -def test_client_failure_isolate_two_datanodes(): - """ - In this test, all DNs are isolated from each other. - two of the DNs cannot communicate with any other node in the cluster. - Expectation : - Write should fail. - Keys written before partition created should be read. - """ - om = cluster.om - scm = cluster.scm - dns = cluster.datanodes - client = cluster.client - oz_client = cluster.get_client() - - epoch_time = int(time.time()) - volume_name = "%s-%s" % ("volume", epoch_time) - bucket_name = "%s-%s" % ("bucket", epoch_time) - key_name = "key-1" - - oz_client.create_volume(volume_name) - oz_client.create_bucket(volume_name, bucket_name) - oz_client.put_key("/etc/passwd", volume_name, bucket_name, key_name, "THREE") - - first_set = [om, scm, dns[0], client] - second_set = [dns[1]] - third_set = [dns[2]] - - logger.info("Partitioning the network") - cluster.partition_network(first_set, second_set, third_set) - - exit_code, output = oz_client.run_freon(1, 1, 1, 10240) - assert exit_code != 0, "freon run should have failed." - - oz_client.get_key(volume_name, bucket_name, key_name, "/tmp/") - - file_checksum = ozone.util.get_checksum("/etc/passwd", client) - key_checksum = ozone.util.get_checksum("/tmp/%s" % key_name, client) - - assert file_checksum == key_checksum - - -def test_client_failure_isolate_one_datanode(): - """ - In this test, one of the DNs is isolated from all other nodes. - Expectation : - Write should pass. - Keys written before partition created can be read. - """ - om = cluster.om - scm = cluster.scm - dns = cluster.datanodes - client = cluster.client - oz_client = cluster.get_client() - - epoch_time = int(time.time()) - volume_name = "%s-%s" % ("volume", epoch_time) - bucket_name = "%s-%s" % ("bucket", epoch_time) - key_name = "key-1" - - oz_client.create_volume(volume_name) - oz_client.create_bucket(volume_name, bucket_name) - oz_client.put_key("/etc/passwd", volume_name, bucket_name, key_name, "THREE") - - first_set = [om, scm, dns[0], dns[1], client] - second_set = [dns[2]] - - logger.info("Partitioning the network") - cluster.partition_network(first_set, second_set) - - exit_code, output = oz_client.run_freon(1, 1, 1, 10240) - assert re.search("3 way commit failed", output) is not None - assert exit_code == 0, "freon run failed with output=[%s]" % output - - oz_client.get_key(volume_name, bucket_name, key_name, "/tmp/") - - file_checksum = ozone.util.get_checksum("/etc/passwd", client) - key_checksum = ozone.util.get_checksum("/tmp/%s" % key_name, cluster.client) - - assert file_checksum == key_checksum - diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_datanode_isolation.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_datanode_isolation.py deleted file mode 100644 index 7f1d34efdee..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_datanode_isolation.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/python - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from ozone.cluster import OzoneCluster -from ozone.exceptions import ContainerNotFoundError - -logger = logging.getLogger(__name__) - - -def setup_function(): - global cluster - cluster = OzoneCluster.create() - cluster.start() - - -def teardown_function(): - cluster.stop() - - -def test_isolate_single_datanode(): - """ - In this test case we will create a network partition in such a way that - one of the DN will not be able to communicate with other datanodes - but it will be able to communicate with SCM. - - Once the network partition happens, SCM detects it and closes the pipeline, - which in-turn closes the containers. - - The container on the first two DN will get CLOSED as they have quorum. - The container replica on the third node will be QUASI_CLOSED as it is not - able to connect with the other DNs and it doesn't have latest BCSID. - - Once we restore the network, the stale replica on the third DN will be - deleted and a latest replica will be copied from any one of the other - DNs. - - """ - om = cluster.om - scm = cluster.scm - dns = cluster.datanodes - client = cluster.client - oz_client = cluster.get_client() - - oz_client.run_freon(1, 1, 1, 10240) - - # Partition the network - first_set = [om, scm, dns[0], dns[1], client] - second_set = [om, scm, dns[2], client] - logger.info("Partitioning the network") - cluster.partition_network(first_set, second_set) - - oz_client.run_freon(1, 1, 1, 10240) - - containers = cluster.get_containers_on_datanode(dns[0]) - - # The same set of containers should also be in datanode[2] - - for container in containers: - assert container.is_on(dns[2]) - - logger.info("Waiting for container to be CLOSED") - for container in containers: - container.wait_until_one_replica_is_closed() - - for container in containers: - assert container.get_state(dns[0]) == 'CLOSED' - assert container.get_state(dns[1]) == 'CLOSED' - try: - assert container.get_state(dns[2]) == 'CLOSING' or \ - container.get_state(dns[2]) == 'QUASI_CLOSED' - except ContainerNotFoundError: - assert True - - # Since the replica in datanode[2] doesn't have the latest BCSID, - # ReplicationManager will delete it and copy a closed replica. - # We will now restore the network and datanode[2] should get a - # closed replica of the container - logger.info("Restoring the network") - cluster.restore_network() - - logger.info("Waiting for the replica to be CLOSED") - for container in containers: - container.wait_until_replica_is_closed(dns[2]) - - for container in containers: - assert container.get_state(dns[0]) == 'CLOSED' - assert container.get_state(dns[1]) == 'CLOSED' - assert container.get_state(dns[2]) == 'CLOSED' - - exit_code, output = oz_client.run_freon(1, 1, 1, 10240) - assert exit_code == 0, "freon run failed with output=[%s]" % output - - -def test_datanode_isolation_all(): - """ - In this test case we will create a network partition in such a way that - all DNs cannot communicate with each other. - All DNs will be able to communicate with SCM. - - Once the network partition happens, SCM detects it and closes the pipeline, - which in-turn tries to close the containers. - At least one of the replica should be in closed state - - Once we restore the network, there will be three closed replicas. - - """ - om = cluster.om - scm = cluster.scm - dns = cluster.datanodes - client = cluster.client - oz_client = cluster.get_client() - - oz_client.run_freon(1, 1, 1, 10240) - - logger.info("Partitioning the network") - first_set = [om, scm, dns[0], client] - second_set = [om, scm, dns[1], client] - third_set = [om, scm, dns[2], client] - cluster.partition_network(first_set, second_set, third_set) - - containers = cluster.get_containers_on_datanode(dns[0]) - container = containers.pop() - - logger.info("Waiting for a replica to be CLOSED") - container.wait_until_one_replica_is_closed() - - # At least one of the replica should be in closed state - assert 'CLOSED' in container.get_datanode_states() - - logger.info("Restoring the network") - cluster.restore_network() - - logger.info("Waiting for the container to be replicated") - container.wait_until_all_replicas_are_closed() - # After restoring the network all the replicas should be in CLOSED state - for state in container.get_datanode_states(): - assert state == 'CLOSED' - - exit_code, output = oz_client.run_freon(1, 1, 1, 10240) - assert exit_code == 0, "freon run failed with output=[%s]" % output diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_flaky.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_flaky.py deleted file mode 100644 index 6b68d61872f..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_flaky.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/python - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import random -import pytest - -from ozone.blockade import Blockade -from ozone.cluster import OzoneCluster - - -logger = logging.getLogger(__name__) - - -def setup_function(): - global cluster - cluster = OzoneCluster.create() - cluster.start() - - -def teardown_function(): - cluster.stop() - - -@pytest.mark.parametrize("flaky_node", ["datanode"]) -def test_flaky(flaky_node): - """ - In these tests, we make the network of the nodes as flaky using blockade. - One of the DNs selected randomly and network of the DN is made flaky. - - Once HA is in place, we can go ahead and make OM and SCM network flaky. - - """ - flaky_container_name = { - "scm": cluster.scm, - "om": cluster.om, - "datanode": random.choice(cluster.datanodes), - "all": "--all" - }[flaky_node] - - Blockade.make_flaky(flaky_container_name) - exit_code, output = cluster.get_client().run_freon(1, 1, 1, 10240) - assert exit_code == 0, "freon run failed with output=[%s]" % output diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure.py deleted file mode 100644 index 10220b9a712..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/python - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from ozone.cluster import OzoneCluster - -logger = logging.getLogger(__name__) - - -def setup_function(): - global cluster - cluster = OzoneCluster.create() - cluster.start() - - -def teardown_function(): - cluster.stop() - - -def test_one_dn_isolate_scm_other_dn(): - """ - In this test, one of the DNs cannot communicate with SCM and other DNs. - Other DNs can communicate with each other and SCM . - Expectation : The container should eventually have two closed replicas. - """ - om = cluster.om - scm = cluster.scm - dns = cluster.datanodes - client = cluster.client - oz_client = cluster.get_client() - - oz_client.run_freon(1, 1, 1, 10240) - - # Partition the network - first_set = [dns[0], client] - second_set = [scm, om, dns[1], dns[2], client] - cluster.partition_network(first_set, second_set) - oz_client.run_freon(1, 1, 1, 10240) - containers = cluster.get_containers_on_datanode(dns[1]) - for container in containers: - container.wait_until_one_replica_is_closed() - - for container in containers: - assert container.get_state(dns[0]) == 'OPEN' - assert container.get_state(dns[1]) == 'CLOSED' - assert container.get_state(dns[2]) == 'CLOSED' - - cluster.restore_network() - for container in containers: - container.wait_until_all_replicas_are_closed() - for container in containers: - assert container.get_state(dns[0]) == 'CLOSED' - assert container.get_state(dns[1]) == 'CLOSED' - assert container.get_state(dns[2]) == 'CLOSED' - - exit_code, output = oz_client.run_freon(1, 1, 1, 10240) - assert exit_code == 0, "freon run failed with output=[%s]" % output - - -def test_one_dn_isolate_other_dn(): - """ - In this test, one of the DNs (first DN) cannot communicate - other DNs but can communicate with SCM. - One of the other two DNs (second DN) cannot communicate with SCM. - Expectation : - The container replica state in first DN can be either closed or - quasi-closed. - The container replica state in second DN can be either closed or open. - The container should eventually have at lease one closed replica. - """ - om = cluster.om - scm = cluster.scm - dns = cluster.datanodes - client = cluster.client - oz_client = cluster.get_client() - - oz_client.run_freon(1, 1, 1, 10240) - - # Partition the network - first_set = [om, scm, dns[0], client] - second_set = [om, dns[1], dns[2], client] - third_set = [scm, dns[2], client] - cluster.partition_network(first_set, second_set, third_set) - oz_client.run_freon(1, 1, 1, 10240) - - containers = cluster.get_containers_on_datanode(dns[0]) - for container in containers: - container.wait_until_replica_is_quasi_closed(dns[0]) - - for container in containers: - assert container.get_state(dns[0]) == 'QUASI_CLOSED' - assert container.get_state(dns[1]) == 'OPEN' or \ - container.get_state(dns[1]) == 'CLOSED' - assert container.get_state(dns[2]) == 'QUASI_CLOSED' or \ - container.get_state(dns[2]) == 'CLOSED' - - cluster.restore_network() - for container in containers: - container.wait_until_all_replicas_are_closed() - for container in containers: - assert container.get_state(dns[0]) == 'CLOSED' - assert container.get_state(dns[1]) == 'CLOSED' - assert container.get_state(dns[2]) == 'CLOSED' - - exit_code, output = oz_client.run_freon(1, 1, 1, 10240) - assert exit_code == 0, "freon run failed with output=[%s]" % output diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_three_nodes_isolate.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_three_nodes_isolate.py deleted file mode 100644 index 6f01c84adc7..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_three_nodes_isolate.py +++ /dev/null @@ -1,164 +0,0 @@ -#!/usr/bin/python - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import time -import logging - -from ozone.cluster import OzoneCluster - -logger = logging.getLogger(__name__) - - -def setup_function(): - global cluster - cluster = OzoneCluster.create() - cluster.start() - - -def teardown_function(): - cluster.stop() - - -def test_three_dns_isolate_one_scm_failure(): - """ - In this test, all DNs are isolated from each other. - One of the DNs (third DN) cannot communicate with SCM. - Expectation : - The container replica state in first DN should be closed. - The container replica state in second DN should be closed. - The container replica state in third DN should be open. - """ - om = cluster.om - scm = cluster.scm - dns = cluster.datanodes - client = cluster.client - oz_client = cluster.get_client() - - oz_client.run_freon(1, 1, 1, 10240) - - first_set = [om, scm, dns[0], client] - second_set = [om, scm, dns[1], client] - third_set = [om, dns[2], client] - - cluster.partition_network(first_set, second_set, third_set) - containers = cluster.get_containers_on_datanode(dns[0]) - for container in containers: - container.wait_until_replica_is_closed(dns[0]) - - for container in containers: - assert container.get_state(dns[0]) == 'CLOSED' - assert container.get_state(dns[1]) == 'CLOSED' - assert container.get_state(dns[2]) == 'OPEN' - - cluster.restore_network() - for container in containers: - container.wait_until_all_replicas_are_closed() - for container in containers: - assert container.get_state(dns[0]) == 'CLOSED' - assert container.get_state(dns[1]) == 'CLOSED' - assert container.get_state(dns[2]) == 'CLOSED' - - exit_code, output = oz_client.run_freon(1, 1, 1, 10240) - assert exit_code == 0, "freon run failed with output=[%s]" % output - - -def test_three_dns_isolate_two_scm_failure(): - """ - In this test, all DNs are isolated from each other. - two DNs cannot communicate with SCM (second DN and third DN) - Expectation : - The container replica state in first DN should be quasi-closed. - The container replica state in second DN should be open. - The container replica state in third DN should be open. - """ - om = cluster.om - scm = cluster.scm - dns = cluster.datanodes - client = cluster.client - oz_client = cluster.get_client() - - oz_client.run_freon(1, 1, 1, 10240) - - first_set = [om, scm, dns[0], client] - second_set = [om, dns[1], client] - third_set = [om, dns[2], client] - - cluster.partition_network(first_set, second_set, third_set) - containers = cluster.get_containers_on_datanode(dns[0]) - for container in containers: - container.wait_until_replica_is_quasi_closed(dns[0]) - - for container in containers: - assert container.get_state(dns[0]) == 'QUASI_CLOSED' - assert container.get_state(dns[1]) == 'OPEN' - assert container.get_state(dns[2]) == 'OPEN' - - cluster.restore_network() - for container in containers: - container.wait_until_all_replicas_are_closed() - for container in containers: - assert container.get_state(dns[0]) == 'CLOSED' - assert container.get_state(dns[1]) == 'CLOSED' - assert container.get_state(dns[2]) == 'CLOSED' - - exit_code, output = oz_client.run_freon(1, 1, 1, 10240) - assert exit_code == 0, "freon run failed with output=[%s]" % output - - -def test_three_dns_isolate_three_scm_failure(): - """ - In this test, all DNs are isolated from each other and also cannot - communicate with SCM. - Expectation : - The container replica state in first DN should be open. - The container replica state in second DN should be open. - The container replica state in third DN should be open. - """ - om = cluster.om - dns = cluster.datanodes - client = cluster.client - oz_client = cluster.get_client() - - oz_client.run_freon(1, 1, 1, 10240) - - first_set = [om, dns[0], client] - second_set = [om, dns[1], client] - third_set = [om, dns[2], client] - - cluster.partition_network(first_set, second_set, third_set) - - # Wait till the datanodes are marked as stale by SCM - time.sleep(150) - - containers = cluster.get_containers_on_datanode(dns[0]) - for container in containers: - assert container.get_state(dns[0]) == 'OPEN' - assert container.get_state(dns[1]) == 'OPEN' - assert container.get_state(dns[2]) == 'OPEN' - - cluster.restore_network() - - for container in containers: - container.wait_until_all_replicas_are_closed() - - for container in containers: - assert container.get_state(dns[0]) == 'CLOSED' - assert container.get_state(dns[1]) == 'CLOSED' - assert container.get_state(dns[2]) == 'CLOSED' - - exit_code, output = oz_client.run_freon(1, 1, 1, 10240) - assert exit_code == 0, "freon run failed with output=[%s]" % output diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_two_nodes.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_two_nodes.py deleted file mode 100644 index 20b0cc3d9d8..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_mixed_failure_two_nodes.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/python - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from ozone.cluster import OzoneCluster - -logger = logging.getLogger(__name__) - - -def setup_function(): - global cluster - cluster = OzoneCluster.create() - cluster.start() - - -def teardown_function(): - cluster.stop() - - -def test_two_dns_isolate_scm_same_partition(): - """ - In this test, there are three DNs, - DN1 is on a network partition and - DN2, DN3 are on a different network partition. - DN2 and DN3 cannot communicate with SCM. - Expectation : - The container replica state in DN1 should be quasi-closed. - The container replica state in DN2 should be open. - The container replica state in DN3 should be open. - """ - om = cluster.om - scm = cluster.scm - dns = cluster.datanodes - client = cluster.client - oz_client = cluster.get_client() - - oz_client.run_freon(1, 1, 1, 10240) - - first_set = [om, dns[1], dns[2], client] - second_set = [om, scm, dns[0], client] - cluster.partition_network(first_set, second_set) - oz_client.run_freon(1, 1, 1, 10240) - - containers = cluster.get_containers_on_datanode(dns[0]) - - for container in containers: - container.wait_until_one_replica_is_quasi_closed() - - for container in containers: - assert container.get_state(dns[0]) == 'QUASI_CLOSED' - assert container.get_state(dns[1]) == 'OPEN' - assert container.get_state(dns[2]) == 'OPEN' - - cluster.restore_network() - - for container in containers: - container.wait_until_all_replicas_are_closed() - - for container in containers: - assert container.get_state(dns[0]) == 'CLOSED' - assert container.get_state(dns[1]) == 'CLOSED' - assert container.get_state(dns[2]) == 'CLOSED' - - exit_code, output = oz_client.run_freon(1, 1, 1, 10240) - assert exit_code == 0, "freon run failed with output=[%s]" % output - - -def test_two_dns_isolate_scm_different_partition(): - """ - In this test, there are three DNs, - DN1 is on a network partition and - DN2, DN3 are on a different network partition. - DN1 and DN2 cannot communicate with SCM. - Expectation : - The container replica state in DN1 should be open. - The container replica states can be either 'closed' - in DN2 and DN3 or 'open' in DN2 and 'quasi-closed' in DN3. - """ - - om = cluster.om - scm = cluster.scm - dns = cluster.datanodes - client = cluster.client - oz_client = cluster.get_client() - - oz_client.run_freon(1, 1, 1, 10240) - - first_set = [om, dns[0], client] - second_set = [om, dns[1], dns[2], client] - third_set = [scm, dns[2], client] - cluster.partition_network(first_set, second_set, third_set) - oz_client.run_freon(1, 1, 1, 10240) - - containers = cluster.get_containers_on_datanode(dns[2]) - - for container in containers: - container.wait_until_replica_is_not_open_anymore(dns[2]) - - for container in containers: - assert container.get_state(dns[0]) == 'OPEN' - assert (container.get_state(dns[1]) == 'CLOSED' and - container.get_state(dns[2]) == 'CLOSED') or \ - (container.get_state(dns[1]) == 'OPEN' and - container.get_state(dns[2]) == 'QUASI_CLOSED') - - cluster.restore_network() - - for container in containers: - container.wait_until_all_replicas_are_closed() - - for container in containers: - assert container.get_state(dns[0]) == 'CLOSED' - assert container.get_state(dns[1]) == 'CLOSED' - assert container.get_state(dns[2]) == 'CLOSED' - - exit_code, output = oz_client.run_freon(1, 1, 1, 10240) - assert exit_code == 0, "freon run failed with output=[%s]" % output diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_scm_isolation.py b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_scm_isolation.py deleted file mode 100644 index f48ddf369b7..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/test_blockade_scm_isolation.py +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/python - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from ozone.cluster import OzoneCluster - -logger = logging.getLogger(__name__) - - -def setup_function(): - global cluster - cluster = OzoneCluster.create() - cluster.start() - - -def teardown_function(): - cluster.stop() - - -def test_scm_isolation_one_node(): - """ - In this test, one of the DNs cannot communicate with SCM. - Other DNs can communicate with SCM. - Expectation : The container should eventually have at least two closed - replicas. - """ - om = cluster.om - scm = cluster.scm - dns = cluster.datanodes - client = cluster.client - oz_client = cluster.get_client() - - oz_client.run_freon(1, 1, 1, 10240) - - first_set = [om, dns[0], dns[1], dns[2], client] - second_set = [om, scm, dns[1], dns[2], client] - cluster.partition_network(first_set, second_set) - oz_client.run_freon(1, 1, 1, 10240) - - containers = cluster.get_containers_on_datanode(dns[1]) - - for container in containers: - container.wait_until_two_replicas_are_closed() - - for container in containers: - assert container.get_state(dns[1]) == 'CLOSED' - assert container.get_state(dns[2]) == 'CLOSED' - assert container.get_state(dns[0]) == 'OPEN' or \ - container.get_state(dns[0]) == 'CLOSED' - - cluster.restore_network() - - for container in containers: - container.wait_until_all_replicas_are_closed() - - for container in containers: - assert container.get_state(dns[0]) == 'CLOSED' - assert container.get_state(dns[1]) == 'CLOSED' - assert container.get_state(dns[2]) == 'CLOSED' - - exit_code, output = oz_client.run_freon(1, 1, 1, 10240) - assert exit_code == 0, "freon run failed with output=[%s]" % output - - -def test_scm_isolation_two_node(): - """ - In this test, two DNs cannot communicate with SCM. - Expectation : The container should eventually have at three closed replicas - or, two open replicas and one quasi-closed replica. - """ - om = cluster.om - scm = cluster.scm - dns = cluster.datanodes - client = cluster.client - oz_client = cluster.get_client() - - oz_client.run_freon(1, 1, 1, 10240) - - first_set = [om, dns[0], dns[1], dns[2], client] - second_set = [om, scm, dns[1], client] - cluster.partition_network(first_set, second_set) - oz_client.run_freon(1, 1, 1, 10240) - - containers = cluster.get_containers_on_datanode(dns[1]) - - for container in containers: - container.wait_until_replica_is_not_open_anymore(dns[1]) - - for container in containers: - state = container.get_state(dns[1]) - assert state == 'QUASI_CLOSED' or state == 'CLOSED' - - if state == 'QUASI_CLOSED': - assert container.get_state(dns[0]) == 'OPEN' - assert container.get_state(dns[2]) == 'OPEN' - else: - assert container.get_state(dns[0]) == 'CLOSED' - assert container.get_state(dns[2]) == 'CLOSED' - - cluster.restore_network() - - for container in containers: - container.wait_until_all_replicas_are_closed() - - for container in containers: - assert container.get_state(dns[0]) == 'CLOSED' - assert container.get_state(dns[1]) == 'CLOSED' - assert container.get_state(dns[2]) == 'CLOSED' - - exit_code, output = oz_client.run_freon(1, 1, 1, 10240) - assert exit_code == 0, "freon run failed with output=[%s]" % output diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-compose.yaml b/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-compose.yaml deleted file mode 100644 index 7175eb885e2..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-compose.yaml +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -version: "3" -services: - datanode: - image: ${docker.image} - ports: - - 9864 - command: ["/opt/hadoop/bin/ozone","datanode"] - env_file: - - ./docker-config - om: - image: ${docker.image} - ports: - - 9874:9874 - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - env_file: - - ./docker-config - command: ["/opt/hadoop/bin/ozone","om"] - scm: - image: ${docker.image} - ports: - - 9876:9876 - env_file: - - ./docker-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - command: ["/opt/hadoop/bin/ozone","scm"] - ozone_client: - image: ${docker.image} - ports: - - 9869 - command: ["tail", "-f","/etc/passwd"] - env_file: - - ./docker-config diff --git a/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-config b/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-config deleted file mode 100644 index 1db1a798d35..00000000000 --- a/hadoop-ozone/fault-injection-test/network-tests/src/test/compose/docker-config +++ /dev/null @@ -1,77 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.enabled=True -OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata -OZONE-SITE.XML_ozone.handler.type=distributed -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.scm.dead.node.interval=5m -OZONE-SITE.XML_ozone.replication=1 -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds -HDFS-SITE.XML_rpc.metrics.quantile.enable=true -HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout -LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender -LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN -LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR - -#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation. -#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm - -#LOG4J2.PROPERTIES_* are for Ozone Audit Logging -LOG4J2.PROPERTIES_monitorInterval=30 -LOG4J2.PROPERTIES_filter=read,write -LOG4J2.PROPERTIES_filter.read.type=MarkerFilter -LOG4J2.PROPERTIES_filter.read.marker=READ -LOG4J2.PROPERTIES_filter.read.onMatch=DENY -LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.type=MarkerFilter -LOG4J2.PROPERTIES_filter.write.marker=WRITE -LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL -LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL -LOG4J2.PROPERTIES_appenders=console, rolling -LOG4J2.PROPERTIES_appender.console.type=Console -LOG4J2.PROPERTIES_appender.console.name=STDOUT -LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.type=RollingFile -LOG4J2.PROPERTIES_appender.rolling.name=RollingFile -LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log -LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz -LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout -LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n -LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies -LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400 -LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy -LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB -LOG4J2.PROPERTIES_loggers=audit -LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger -LOG4J2.PROPERTIES_logger.audit.name=OMAudit -LOG4J2.PROPERTIES_logger.audit.level=INFO -LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling -LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile -LOG4J2.PROPERTIES_rootLogger.level=INFO -LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout -LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT diff --git a/hadoop-ozone/fault-injection-test/pom.xml b/hadoop-ozone/fault-injection-test/pom.xml deleted file mode 100644 index 395c5340bf1..00000000000 --- a/hadoop-ozone/fault-injection-test/pom.xml +++ /dev/null @@ -1,35 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-fault-injection-test - 0.5.0-SNAPSHOT - Apache Hadoop Ozone Fault Injection Tests - Apache Hadoop Ozone Fault Injection Tests - pom - - - network-tests - - - diff --git a/hadoop-ozone/insight/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/insight/dev-support/findbugsExcludeFile.xml deleted file mode 100644 index 55abc263017..00000000000 --- a/hadoop-ozone/insight/dev-support/findbugsExcludeFile.xml +++ /dev/null @@ -1,19 +0,0 @@ - - - - diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml deleted file mode 100644 index 8287334f6a2..00000000000 --- a/hadoop-ozone/insight/pom.xml +++ /dev/null @@ -1,131 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-insight - 0.5.0-SNAPSHOT - Apache Hadoop Ozone Insight Tool - Apache Hadoop Ozone Insight Tool - jar - - - - org.apache.hadoop - hadoop-ozone-ozone-manager - - - org.apache.hadoop - hadoop-ozone-common - - - - org.apache.hadoop - hadoop-hdds-server-scm - - - org.apache.hadoop - hadoop-ozone-client - - - org.apache.hadoop - hadoop-ozone-filesystem - - - org.apache.hadoop - hadoop-hdds-server-framework - - - org.apache.hadoop - hadoop-common - compile - - - org.apache.hadoop - hadoop-hdfs - compile - - - com.sun.xml.bind - jaxb-core - - - javax.xml.bind - jaxb-api - - - javax.activation - activation - - - io.dropwizard.metrics - metrics-core - 3.2.4 - - - org.openjdk.jmh - jmh-core - 1.19 - - - org.openjdk.jmh - jmh-generator-annprocess - 1.19 - - - com.github.spotbugs - spotbugs - provided - - - junit - junit - test - - - org.apache.hadoop - hadoop-common - test - test-jar - - - org.apache.hadoop - hadoop-ozone-integration-test - test - test-jar - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - true - 2048 - - - - - diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightPoint.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightPoint.java deleted file mode 100644 index a23b876b53d..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightPoint.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.client.ContainerOperationClient; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.hdds.server.PrometheusMetricsSink; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.insight.LoggerSource.Level; -import org.apache.hadoop.security.UserGroupInformation; - -import com.google.protobuf.ProtocolMessageEnum; -import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT; - -/** - * Default implementation of Insight point logic. - */ -public abstract class BaseInsightPoint implements InsightPoint { - - /** - * List the related metrics. - */ - @Override - public List getMetrics() { - return new ArrayList<>(); - } - - /** - * List the related configuration. - */ - @Override - public List getConfigurationClasses() { - return new ArrayList<>(); - } - - /** - * List the related loggers. - * - * @param verbose true if verbose logging is requested. - */ - @Override - public List getRelatedLoggers(boolean verbose) { - List loggers = new ArrayList<>(); - return loggers; - } - - /** - * Create scm client. - */ - public ScmClient createScmClient(OzoneConfiguration ozoneConf) - throws IOException { - - if (!HddsUtils.getHostNameFromConfigKeys(ozoneConf, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY).isPresent()) { - - throw new IllegalArgumentException( - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY - + " should be set in ozone-site.xml"); - } - - long version = RPC.getProtocolVersion( - StorageContainerLocationProtocolPB.class); - InetSocketAddress scmAddress = - getScmAddressForClients(ozoneConf); - int containerSizeGB = (int) ozoneConf.getStorageSize( - OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT, - StorageUnit.GB); - ContainerOperationClient - .setContainerSizeB(containerSizeGB * OzoneConsts.GB); - - RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class, - ProtobufRpcEngine.class); - StorageContainerLocationProtocol client = - TracingUtil.createProxy( - new StorageContainerLocationProtocolClientSideTranslatorPB( - RPC.getProxy(StorageContainerLocationProtocolPB.class, version, - scmAddress, UserGroupInformation.getCurrentUser(), - ozoneConf, - NetUtils.getDefaultSocketFactory(ozoneConf), - Client.getRpcTimeout(ozoneConf))), - StorageContainerLocationProtocol.class, ozoneConf); - return new ContainerOperationClient( - client, new XceiverClientManager(ozoneConf)); - } - - /** - * Convenient method to define default log levels. - */ - public Level defaultLevel(boolean verbose) { - return verbose ? Level.TRACE : Level.DEBUG; - } - - /** - * Default metrics for any message type based RPC ServerSide translators. - */ - public void addProtocolMessageMetrics(List metrics, - String prefix, - Component.Type component, - ProtocolMessageEnum[] types) { - - MetricGroupDisplay messageTypeCounters = - new MetricGroupDisplay(component, "Message type counters"); - for (ProtocolMessageEnum type : types) { - String typeName = type.toString(); - MetricDisplay metricDisplay = new MetricDisplay("Number of " + typeName, - prefix + "_" + PrometheusMetricsSink - .normalizeName(typeName)); - messageTypeCounters.addMetrics(metricDisplay); - } - metrics.add(messageTypeCounters); - } - - /** - * Rpc metrics for any hadoop rpc endpoint. - */ - public void addRpcMetrics(List metrics, - Component.Type component, - Map filter) { - MetricGroupDisplay connection = - new MetricGroupDisplay(component, "RPC connections"); - connection.addMetrics(new MetricDisplay("Open connections", - "rpc_num_open_connections", filter)); - connection.addMetrics( - new MetricDisplay("Dropped connections", "rpc_num_dropped_connections", - filter)); - connection.addMetrics( - new MetricDisplay("Received bytes", "rpc_received_bytes", - filter)); - connection.addMetrics( - new MetricDisplay("Sent bytes", "rpc_sent_bytes", - filter)); - metrics.add(connection); - - MetricGroupDisplay queue = new MetricGroupDisplay(component, "RPC queue"); - queue.addMetrics(new MetricDisplay("RPC average queue time", - "rpc_rpc_queue_time_avg_time", filter)); - queue.addMetrics( - new MetricDisplay("RPC call queue length", "rpc_call_queue_length", - filter)); - metrics.add(queue); - - MetricGroupDisplay performance = - new MetricGroupDisplay(component, "RPC performance"); - performance.addMetrics(new MetricDisplay("RPC processing time average", - "rpc_rpc_processing_time_avg_time", filter)); - performance.addMetrics( - new MetricDisplay("Number of slow calls", "rpc_rpc_slow_calls", - filter)); - metrics.add(performance); - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightSubCommand.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightSubCommand.java deleted file mode 100644 index 4c3875c3ac0..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/BaseInsightSubCommand.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight; - -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Optional; - -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.insight.Component.Type; -import org.apache.hadoop.ozone.insight.om.KeyManagerInsight; -import org.apache.hadoop.ozone.insight.om.OmProtocolInsight; -import org.apache.hadoop.ozone.insight.scm.EventQueueInsight; -import org.apache.hadoop.ozone.insight.scm.NodeManagerInsight; -import org.apache.hadoop.ozone.insight.scm.ReplicaManagerInsight; -import org.apache.hadoop.ozone.insight.scm.ScmProtocolBlockLocationInsight; -import org.apache.hadoop.ozone.insight.scm.ScmProtocolContainerLocationInsight; -import org.apache.hadoop.ozone.insight.scm.ScmProtocolSecurityInsight; -import org.apache.hadoop.ozone.om.OMConfigKeys; - -import picocli.CommandLine; - -/** - * Parent class for all the insight subcommands. - */ -public class BaseInsightSubCommand { - - @CommandLine.ParentCommand - private Insight insightCommand; - - public InsightPoint getInsight(OzoneConfiguration configuration, - String selection) { - Map insights = createInsightPoints(configuration); - - if (!insights.containsKey(selection)) { - throw new RuntimeException(String - .format("No such component; %s. Available components: %s", selection, - insights.keySet())); - } - return insights.get(selection); - } - - /** - * Utility to get the host base on a component. - */ - public String getHost(OzoneConfiguration conf, Component component) { - if (component.getHostname() != null) { - return "http://" + component.getHostname() + ":" + component.getPort(); - } else if (component.getName() == Type.SCM) { - Optional scmHost = - HddsUtils.getHostNameFromConfigKeys(conf, - ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, - ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - - return "http://" + scmHost.get() + ":9876"; - } else if (component.getName() == Type.OM) { - Optional omHost = - HddsUtils.getHostNameFromConfigKeys(conf, - OMConfigKeys.OZONE_OM_ADDRESS_KEY); - return "http://" + omHost.get() + ":9874"; - } else { - throw new IllegalArgumentException( - "Component type is not supported: " + component.getName()); - } - - } - - public Map createInsightPoints( - OzoneConfiguration configuration) { - Map insights = new LinkedHashMap<>(); - insights.put("scm.node-manager", new NodeManagerInsight()); - insights.put("scm.replica-manager", new ReplicaManagerInsight()); - insights.put("scm.event-queue", new EventQueueInsight()); - insights.put("scm.protocol.block-location", - new ScmProtocolBlockLocationInsight()); - insights.put("scm.protocol.container-location", - new ScmProtocolContainerLocationInsight()); - insights.put("scm.protocol.security", - new ScmProtocolSecurityInsight()); - insights.put("om.key-manager", new KeyManagerInsight()); - insights.put("om.protocol.client", new OmProtocolInsight()); - - return insights; - } - - public Insight getInsightCommand() { - return insightCommand; - } -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Component.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Component.java deleted file mode 100644 index 261ae49fb8b..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Component.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight; - -import java.util.Objects; - -/** - * Identifier an ozone component. - */ -public class Component { - - /** - * The type of the component (eg. scm, s3g...) - */ - private Type name; - - /** - * Unique identifier of the instance (uuid or index). Can be null for - * non-HA server component. - */ - private String id; - - /** - * Hostname of the component. Optional, may help to find the right host - * name. - */ - private String hostname; - - /** - * HTTP service port. Optional. - */ - private int port; - - public Component(Type name) { - this.name = name; - } - - public Component(Type name, String id) { - this.name = name; - this.id = id; - } - - public Component(Type name, String id, String hostname) { - this.name = name; - this.id = id; - this.hostname = hostname; - } - - public Component(Type name, String id, String hostname, int port) { - this.name = name; - this.id = id; - this.hostname = hostname; - this.port = port; - } - - public Type getName() { - return name; - } - - public String getId() { - return id; - } - - public String getHostname() { - return hostname; - } - - public int getPort() { - return port; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - Component that = (Component) o; - return Objects.equals(name, that.name) && - Objects.equals(id, that.id); - } - - public String prefix() { - return name + (id != null && id.length() > 0 ? "-" + id : ""); - } - - @Override - public int hashCode() { - return Objects.hash(name, id); - } - - /** - * Ozone component types. - */ - public enum Type { - SCM, OM, DATANODE, S3G, RECON; - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java deleted file mode 100644 index e32ecd7cde9..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.Config; -import org.apache.hadoop.hdds.conf.ConfigGroup; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.insight.Component.Type; - -import picocli.CommandLine; - -import java.lang.reflect.Method; -import java.util.concurrent.Callable; - -/** - * Subcommand to show configuration values/documentation. - */ -@CommandLine.Command( - name = "config", - description = "Show configuration for a specific subcomponents", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class ConfigurationSubCommand extends BaseInsightSubCommand - implements Callable { - - @CommandLine.Parameters(description = "Name of the insight point (use list " - + "to check the available options)") - private String insightName; - - @Override - public Void call() throws Exception { - InsightPoint insight = - getInsight(getInsightCommand().createOzoneConfiguration(), insightName); - System.out.println( - "Configuration for `" + insightName + "` (" + insight.getDescription() - + ")"); - System.out.println(); - for (Class clazz : insight.getConfigurationClasses()) { - showConfig(clazz); - - } - return null; - } - - private void showConfig(Class clazz) { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.addResource(getHost(conf, new Component(Type.SCM)) + "/conf"); - ConfigGroup configGroup = - (ConfigGroup) clazz.getAnnotation(ConfigGroup.class); - if (configGroup == null) { - return; - } - - String prefix = configGroup.prefix(); - - for (Method method : clazz.getMethods()) { - if (method.isAnnotationPresent(Config.class)) { - Config config = method.getAnnotation(Config.class); - String key = prefix + "." + config.key(); - System.out.println(">>> " + key); - System.out.println(" default: " + config.defaultValue()); - System.out.println(" current: " + conf.get(key)); - System.out.println(); - System.out.println(config.description()); - System.out.println(); - System.out.println(); - - } - } - - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java deleted file mode 100644 index 690783ee411..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/Insight.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight; - -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; - -import picocli.CommandLine; - -/** - * Command line utility to check logs/metrics of internal ozone components. - */ -@CommandLine.Command(name = "ozone insight", - hidden = true, description = "Show debug information about a selected " - + "Ozone component", - versionProvider = HddsVersionProvider.class, - subcommands = {ListSubCommand.class, LogSubcommand.class, - MetricsSubCommand.class, ConfigurationSubCommand.class}, - mixinStandardHelpOptions = true) -public class Insight extends GenericCli { - - public static void main(String[] args) throws Exception { - new Insight().run(args); - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/InsightPoint.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/InsightPoint.java deleted file mode 100644 index 1284cfa9584..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/InsightPoint.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight; - -import java.util.List; - -/** - * Definition of a specific insight points. - */ -public interface InsightPoint { - - /** - * Human readdable description. - */ - String getDescription(); - - /** - * List of the related loggers. - */ - List getRelatedLoggers(boolean verbose); - - /** - * List of the related metrics. - */ - List getMetrics(); - - /** - * List of the configuration classes. - */ - List getConfigurationClasses(); - - - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ListSubCommand.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ListSubCommand.java deleted file mode 100644 index 8f913983647..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ListSubCommand.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -import picocli.CommandLine; - -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.Callable; - -/** - * Subcommand to list of the available insight points. - */ -@CommandLine.Command( - name = "list", - description = "Show available insight points.", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class ListSubCommand extends BaseInsightSubCommand - implements Callable { - - @CommandLine.Parameters(defaultValue = "") - private String insightPrefix; - - @Override - public Void call() throws Exception { - - System.out.println("Available insight points:\n\n"); - - Map insightPoints = - createInsightPoints(new OzoneConfiguration()); - for (Entry entry : insightPoints.entrySet()) { - if (insightPrefix == null || entry.getKey().startsWith(insightPrefix)) { - System.out.println(String.format(" %-33s %s", entry.getKey(), - entry.getValue().getDescription())); - } - } - return null; - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LogSubcommand.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LogSubcommand.java deleted file mode 100644 index 2e8787f2b26..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LogSubcommand.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import java.util.concurrent.Callable; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.insight.LoggerSource.Level; - -import org.apache.http.HttpResponse; -import org.apache.http.client.HttpClient; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.impl.client.HttpClientBuilder; -import picocli.CommandLine; - -/** - * Subcommand to display log. - */ -@CommandLine.Command( - name = "log", - aliases = "logs", - description = "Show log4j events related to the insight point", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class LogSubcommand extends BaseInsightSubCommand - implements Callable { - - @CommandLine.Parameters(description = "Name of the insight point (use list " - + "to check the available options)") - private String insightName; - - @CommandLine.Option(names = "-v", description = "Enable verbose mode to " - + "show more information / detailed message") - private boolean verbose; - - @Override - public Void call() throws Exception { - OzoneConfiguration conf = - getInsightCommand().createOzoneConfiguration(); - InsightPoint insight = - getInsight(conf, insightName); - - List loggers = insight.getRelatedLoggers(verbose); - - for (LoggerSource logger : loggers) { - setLogLevel(conf, logger.getLoggerName(), logger.getComponent(), - logger.getLevel()); - } - - Set sources = loggers.stream().map(LoggerSource::getComponent) - .collect(Collectors.toSet()); - try { - streamLog(conf, sources, loggers); - } finally { - for (LoggerSource logger : loggers) { - setLogLevel(conf, logger.getLoggerName(), logger.getComponent(), - Level.INFO); - } - } - return null; - } - - private void streamLog(OzoneConfiguration conf, Set sources, - List relatedLoggers) { - List loggers = new ArrayList<>(); - for (Component sourceComponent : sources) { - loggers.add(new Thread( - () -> streamLog(conf, sourceComponent, relatedLoggers))); - } - for (Thread thread : loggers) { - thread.start(); - } - for (Thread thread : loggers) { - try { - thread.join(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - } - - private void streamLog(OzoneConfiguration conf, Component logComponent, - List loggers) { - HttpClient client = HttpClientBuilder.create().build(); - - HttpGet get = new HttpGet(getHost(conf, logComponent) + "/logstream"); - try { - HttpResponse execute = client.execute(get); - try (BufferedReader bufferedReader = new BufferedReader( - new InputStreamReader(execute.getEntity().getContent(), - StandardCharsets.UTF_8))) { - bufferedReader.lines() - .filter(line -> { - for (LoggerSource logger : loggers) { - if (line.contains(logger.getLoggerName())) { - return true; - } - } - return false; - }) - .map(this::processLogLine) - .map(l -> "[" + logComponent.prefix() + "] " + l) - .forEach(System.out::println); - } - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - public String processLogLine(String line) { - Pattern p = Pattern.compile("(.*)"); - Matcher m = p.matcher(line); - StringBuffer sb = new StringBuffer(); - while (m.find()) { - m.appendReplacement(sb, "\n" + m.group(1).replaceAll("\\\\n", "\n")); - } - m.appendTail(sb); - return sb.toString(); - } - - private void setLogLevel(OzoneConfiguration conf, String name, - Component component, LoggerSource.Level level) { - HttpClient client = HttpClientBuilder.create().build(); - - String request = String - .format("/logLevel?log=%s&level=%s", name, - level); - String hostName = getHost(conf, component); - HttpGet get = new HttpGet(hostName + request); - try { - HttpResponse execute = client.execute(get); - if (execute.getStatusLine().getStatusCode() != 200) { - throw new RuntimeException( - "Can't set the log level: " + hostName + " -> HTTP " + execute - .getStatusLine().getStatusCode()); - } - } catch (IOException e) { - throw new RuntimeException(e); - } - } -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LoggerSource.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LoggerSource.java deleted file mode 100644 index 180b3e818b5..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LoggerSource.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight; - -import org.apache.hadoop.ozone.insight.Component.Type; - -/** - * Definition of a log source. - */ -public class LoggerSource { - - /** - * Id of the component where the log is generated. - */ - private Component component; - - /** - * Log4j/slf4j logger name. - */ - private String loggerName; - - /** - * Log level. - */ - private Level level; - - public LoggerSource(Component component, String loggerName, Level level) { - this.component = component; - this.loggerName = loggerName; - this.level = level; - } - - public LoggerSource(Type componentType, Class loggerClass, - Level level) { - this(new Component(componentType), loggerClass.getCanonicalName(), level); - } - - public Component getComponent() { - return component; - } - - public String getLoggerName() { - return loggerName; - } - - public Level getLevel() { - return level; - } - - /** - * Log level definition. - */ - public enum Level { - TRACE, DEBUG, INFO, WARN, ERROR - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricDisplay.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricDisplay.java deleted file mode 100644 index 395c14cca69..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricDisplay.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight; - -import java.util.HashMap; -import java.util.Map; - -/** - * Definition of one displayable hadoop metrics. - */ -public class MetricDisplay { - - /** - * Prometheus metrics name. - */ - private String id; - - /** - * Human readable definition of the metrhics. - */ - private String description; - - /** - * Prometheus metrics tag to filter out the right metrics. - */ - private Map filter; - - public MetricDisplay(String description, String id) { - this(description, id, new HashMap<>()); - } - - public MetricDisplay(String description, String id, - Map filter) { - this.id = id; - this.description = description; - this.filter = filter; - } - - public String getId() { - return id; - } - - public String getDescription() { - return description; - } - - public Map getFilter() { - return filter; - } - - public boolean checkLine(String line) { - return false; - } -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricGroupDisplay.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricGroupDisplay.java deleted file mode 100644 index 08fd60c48bb..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricGroupDisplay.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.ozone.insight.Component.Type; - -/** - * Definition of a group of metrics which can be displayed. - */ -public class MetricGroupDisplay { - - /** - * List fhe included metrics. - */ - private List metrics = new ArrayList<>(); - - /** - * Name of the component which includes the metrics (scm, om,...). - */ - private Component component; - - /** - * Human readable description. - */ - private String description; - - public MetricGroupDisplay(Component component, String description) { - this.component = component; - this.description = description; - } - - public MetricGroupDisplay(Type componentType, String metricName) { - this(new Component(componentType), metricName); - } - - public List getMetrics() { - return metrics; - } - - public void addMetrics(MetricDisplay item) { - this.metrics.add(item); - } - - public String getDescription() { - return description; - } - - public Component getComponent() { - return component; - } -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricsSubCommand.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricsSubCommand.java deleted file mode 100644 index d320c82b187..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricsSubCommand.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -import org.apache.http.HttpResponse; -import org.apache.http.client.HttpClient; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.impl.client.HttpClientBuilder; -import picocli.CommandLine; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; -import java.util.List; -import java.util.*; -import java.util.Map.Entry; -import java.util.concurrent.Callable; -import java.util.stream.Collectors; - -/** - * Command line interface to show metrics for a specific component. - */ -@CommandLine.Command( - name = "metrics", - aliases = "metric", - description = "Show available metrics.", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class MetricsSubCommand extends BaseInsightSubCommand - implements Callable { - - @CommandLine.Parameters(description = "Name of the insight point (use list " - + "to check the available options)") - private String insightName; - - @Override - public Void call() throws Exception { - OzoneConfiguration conf = - getInsightCommand().createOzoneConfiguration(); - InsightPoint insight = - getInsight(conf, insightName); - Set sources = - insight.getMetrics().stream().map(MetricGroupDisplay::getComponent) - .collect(Collectors.toSet()); - Map> metrics = getMetrics(conf, sources); - System.out.println( - "Metrics for `" + insightName + "` (" + insight.getDescription() + ")"); - System.out.println(); - for (MetricGroupDisplay group : insight.getMetrics()) { - System.out.println(group.getDescription()); - System.out.println(); - for (MetricDisplay display : group.getMetrics()) { - System.out.println(" " + display.getDescription() + ": " + selectValue( - metrics.get(group.getComponent()), display)); - } - System.out.println(); - System.out.println(); - - } - return null; - } - - private Map> getMetrics(OzoneConfiguration conf, - Collection sources) { - Map> result = new HashMap<>(); - for (Component source : sources) { - result.put(source, getMetrics(conf, source)); - } - return result; - } - - private String selectValue(List metrics, - MetricDisplay metricDisplay) { - for (String line : metrics) { - if (line.startsWith(metricDisplay.getId())) { - boolean filtered = false; - for (Entry filter : metricDisplay.getFilter() - .entrySet()) { - if (!line - .contains(filter.getKey() + "=\"" + filter.getValue() + "\"")) { - filtered = true; - } - } - if (!filtered) { - return line.split(" ")[1]; - } - } - } - return "???"; - } - - private List getMetrics(OzoneConfiguration conf, - Component component) { - HttpClient client = HttpClientBuilder.create().build(); - HttpGet get = new HttpGet(getHost(conf, component) + "/prom"); - try { - HttpResponse execute = client.execute(get); - if (execute.getStatusLine().getStatusCode() != 200) { - throw new RuntimeException( - "Can't read prometheus metrics endpoint" + execute.getStatusLine() - .getStatusCode()); - } - try (BufferedReader bufferedReader = new BufferedReader( - new InputStreamReader(execute.getEntity().getContent(), - StandardCharsets.UTF_8))) { - return bufferedReader.lines().collect(Collectors.toList()); - } - } catch (IOException e) { - throw new RuntimeException(e); - } - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/RatisInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/RatisInsight.java deleted file mode 100644 index b87955e8aaf..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/RatisInsight.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight.datanode; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.insight.BaseInsightPoint; -import org.apache.hadoop.ozone.insight.Component; -import org.apache.hadoop.ozone.insight.Component.Type; -import org.apache.hadoop.ozone.insight.InsightPoint; -import org.apache.hadoop.ozone.insight.LoggerSource; - -/** - * Insight definition for datanode/pipline metrics. - */ -public class RatisInsight extends BaseInsightPoint implements InsightPoint { - - private OzoneConfiguration conf; - - public RatisInsight(OzoneConfiguration conf) { - this.conf = conf; - } - - @Override - public List getRelatedLoggers(boolean verbose) { - List result = new ArrayList<>(); - try { - ScmClient scmClient = createScmClient(conf); - Pipeline pipeline = scmClient.listPipelines() - .stream() - .filter(d -> d.getNodes().size() > 1) - .findFirst() - .get(); - for (DatanodeDetails datanode : pipeline.getNodes()) { - Component dn = - new Component(Type.DATANODE, datanode.getUuid().toString(), - datanode.getHostName(), 9882); - result - .add(new LoggerSource(dn, "org.apache.ratis.server.impl", - defaultLevel(verbose))); - } - } catch (IOException e) { - throw new RuntimeException("Can't enumerate required logs", e); - } - - return result; - } - - @Override - public String getDescription() { - return "More information about one ratis datanode ring."; - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/package-info.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/package-info.java deleted file mode 100644 index 97dd4954789..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.insight.datanode; - -/** - * Insight points for the ozone datanodes. - */ \ No newline at end of file diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/KeyManagerInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/KeyManagerInsight.java deleted file mode 100644 index 515cf388640..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/KeyManagerInsight.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight.om; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.ozone.insight.BaseInsightPoint; -import org.apache.hadoop.ozone.insight.Component.Type; -import org.apache.hadoop.ozone.insight.LoggerSource; -import org.apache.hadoop.ozone.insight.MetricDisplay; -import org.apache.hadoop.ozone.insight.MetricGroupDisplay; -import org.apache.hadoop.ozone.om.KeyManagerImpl; - -/** - * Insight implementation for the key management related operations. - */ -public class KeyManagerInsight extends BaseInsightPoint { - - @Override - public List getMetrics() { - List display = new ArrayList<>(); - - MetricGroupDisplay state = - new MetricGroupDisplay(Type.OM, "Key related metrics"); - state - .addMetrics(new MetricDisplay("Number of keys", "om_metrics_num_keys")); - state.addMetrics(new MetricDisplay("Number of key operations", - "om_metrics_num_key_ops")); - - display.add(state); - - MetricGroupDisplay key = - new MetricGroupDisplay(Type.OM, "Key operation stats"); - for (String operation : new String[] {"allocate", "commit", "lookup", - "list", "delete"}) { - key.addMetrics(new MetricDisplay( - "Number of key " + operation + "s (failure + success)", - "om_metrics_num_key_" + operation)); - key.addMetrics( - new MetricDisplay("Number of failed key " + operation + "s", - "om_metrics_num_key_" + operation + "_fails")); - } - display.add(key); - - return display; - } - - @Override - public List getRelatedLoggers(boolean verbose) { - List loggers = new ArrayList<>(); - loggers.add( - new LoggerSource(Type.OM, KeyManagerImpl.class, - defaultLevel(verbose))); - return loggers; - } - - @Override - public String getDescription() { - return "OM Key Manager"; - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/OmProtocolInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/OmProtocolInsight.java deleted file mode 100644 index 502ba60dc86..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/OmProtocolInsight.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight.om; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.ozone.insight.BaseInsightPoint; -import org.apache.hadoop.ozone.insight.Component.Type; -import org.apache.hadoop.ozone.insight.LoggerSource; -import org.apache.hadoop.ozone.insight.MetricGroupDisplay; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB; - -/** - * Insight definition for the OM RPC server. - */ -public class OmProtocolInsight extends BaseInsightPoint { - - @Override - public List getRelatedLoggers(boolean verbose) { - List loggers = new ArrayList<>(); - loggers.add( - new LoggerSource(Type.OM, - OzoneManagerProtocolServerSideTranslatorPB.class, - defaultLevel(verbose))); - return loggers; - } - - @Override - public List getMetrics() { - List metrics = new ArrayList<>(); - - Map filter = new HashMap<>(); - filter.put("servername", "OzoneManagerService"); - - addRpcMetrics(metrics, Type.OM, filter); - - addProtocolMessageMetrics(metrics, "om_client_protocol", Type.OM, - OzoneManagerProtocolProtos.Type.values()); - - return metrics; - } - - @Override - public String getDescription() { - return "Ozone Manager RPC endpoint"; - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/package-info.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/package-info.java deleted file mode 100644 index c0dfc4d00e8..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/om/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.insight.om; - -/** - * Insight points for the Ozone Manager. - */ \ No newline at end of file diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/package-info.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/package-info.java deleted file mode 100644 index a77524d7e18..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.insight; - -/** - * Framework to collect log/metrics and configuration for specified ozone - * components. - */ \ No newline at end of file diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/EventQueueInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/EventQueueInsight.java deleted file mode 100644 index 5a88cd29942..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/EventQueueInsight.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight.scm; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.insight.BaseInsightPoint; -import org.apache.hadoop.ozone.insight.Component.Type; -import org.apache.hadoop.ozone.insight.LoggerSource; - -/** - * Insight definition to check internal events. - */ -public class EventQueueInsight extends BaseInsightPoint { - - @Override - public List getRelatedLoggers(boolean verbose) { - List loggers = new ArrayList<>(); - loggers - .add(new LoggerSource(Type.SCM, EventQueue.class, - defaultLevel(verbose))); - return loggers; - } - - @Override - public String getDescription() { - return "Information about the internal async event delivery"; - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/NodeManagerInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/NodeManagerInsight.java deleted file mode 100644 index c4fb0258d8a..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/NodeManagerInsight.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight.scm; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hdds.scm.node.SCMNodeManager; -import org.apache.hadoop.ozone.insight.BaseInsightPoint; -import org.apache.hadoop.ozone.insight.Component.Type; -import org.apache.hadoop.ozone.insight.LoggerSource; -import org.apache.hadoop.ozone.insight.MetricDisplay; -import org.apache.hadoop.ozone.insight.MetricGroupDisplay; - -/** - * Insight definition to check node manager / node report events. - */ -public class NodeManagerInsight extends BaseInsightPoint { - - @Override - public List getRelatedLoggers(boolean verbose) { - List loggers = new ArrayList<>(); - loggers.add( - new LoggerSource(Type.SCM, SCMNodeManager.class, - defaultLevel(verbose))); - return loggers; - } - - @Override - public List getMetrics() { - List display = new ArrayList<>(); - - MetricGroupDisplay nodes = - new MetricGroupDisplay(Type.SCM, "Node counters"); - - nodes.addMetrics( - new MetricDisplay("Healthy Nodes", "scm_node_manager_healthy_nodes")); - nodes.addMetrics( - new MetricDisplay("Dead Nodes", "scm_node_manager_dead_nodes")); - - display.add(nodes); - - MetricGroupDisplay hb = - new MetricGroupDisplay(Type.SCM, "HB processing stats"); - hb.addMetrics( - new MetricDisplay("HB processed", "scm_node_manager_num_hb_processed")); - hb.addMetrics(new MetricDisplay("HB processing failed", - "scm_node_manager_num_hb_processing_failed")); - display.add(hb); - - return display; - } - - @Override - public String getDescription() { - return "SCM Datanode management related information."; - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ReplicaManagerInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ReplicaManagerInsight.java deleted file mode 100644 index ec87f3f7727..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ReplicaManagerInsight.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight.scm; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hdds.scm.container.ReplicationManager; -import org.apache.hadoop.ozone.insight.BaseInsightPoint; -import org.apache.hadoop.ozone.insight.Component.Type; -import org.apache.hadoop.ozone.insight.LoggerSource; -import org.apache.hadoop.ozone.insight.MetricGroupDisplay; - -/** - * Insight definition to chech the replication manager internal state. - */ -public class ReplicaManagerInsight extends BaseInsightPoint { - - @Override - public List getRelatedLoggers(boolean verbose) { - List loggers = new ArrayList<>(); - loggers.add(new LoggerSource(Type.SCM, ReplicationManager.class, - defaultLevel(verbose))); - return loggers; - } - - @Override - public List getMetrics() { - List display = new ArrayList<>(); - return display; - } - - @Override - public List getConfigurationClasses() { - List result = new ArrayList<>(); - result.add(ReplicationManager.ReplicationManagerConfiguration.class); - return result; - } - - @Override - public String getDescription() { - return "SCM closed container replication manager"; - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolBlockLocationInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolBlockLocationInsight.java deleted file mode 100644 index f67f64194b3..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolBlockLocationInsight.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight.scm; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; -import org.apache.hadoop.hdds.scm.server.SCMBlockProtocolServer; -import org.apache.hadoop.ozone.insight.BaseInsightPoint; -import org.apache.hadoop.ozone.insight.Component.Type; -import org.apache.hadoop.ozone.insight.LoggerSource; -import org.apache.hadoop.ozone.insight.MetricGroupDisplay; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB; - -/** - * Insight metric to check the SCM block location protocol behaviour. - */ -public class ScmProtocolBlockLocationInsight extends BaseInsightPoint { - - @Override - public List getRelatedLoggers(boolean verbose) { - List loggers = new ArrayList<>(); - loggers.add( - new LoggerSource(Type.SCM, - ScmBlockLocationProtocolServerSideTranslatorPB.class, - defaultLevel(verbose))); - loggers.add(new LoggerSource(Type.SCM, - SCMBlockProtocolServer.class, - defaultLevel(verbose))); - return loggers; - } - - @Override - public List getMetrics() { - List metrics = new ArrayList<>(); - - Map filter = new HashMap<>(); - filter.put("servername", "StorageContainerLocationProtocolService"); - - addRpcMetrics(metrics, Type.SCM, filter); - - addProtocolMessageMetrics(metrics, "scm_block_location_protocol", - Type.SCM, ScmBlockLocationProtocolProtos.Type.values()); - - return metrics; - } - - @Override - public String getDescription() { - return "SCM Block location protocol endpoint"; - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolContainerLocationInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolContainerLocationInsight.java deleted file mode 100644 index d6db589ed82..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolContainerLocationInsight.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight.scm; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StorageContainerLocationProtocolService; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocolServerSideTranslatorPB; -import org.apache.hadoop.ozone.insight.BaseInsightPoint; -import org.apache.hadoop.ozone.insight.Component.Type; -import org.apache.hadoop.ozone.insight.LoggerSource; -import org.apache.hadoop.ozone.insight.MetricGroupDisplay; - -/** - * Insight metric to check the SCM block location protocol behaviour. - */ -public class ScmProtocolContainerLocationInsight extends BaseInsightPoint { - - @Override - public List getRelatedLoggers(boolean verbose) { - List loggers = new ArrayList<>(); - loggers.add( - new LoggerSource(Type.SCM, - StorageContainerLocationProtocolServerSideTranslatorPB.class, - defaultLevel(verbose))); - new LoggerSource(Type.SCM, - StorageContainerLocationProtocolService.class, - defaultLevel(verbose)); - return loggers; - } - - @Override - public List getMetrics() { - List metrics = new ArrayList<>(); - - Map filter = new HashMap<>(); - filter.put("servername", "StorageContainerLocationProtocolService"); - - addRpcMetrics(metrics, Type.SCM, filter); - - addProtocolMessageMetrics(metrics, "scm_container_location_protocol", - Type.SCM, StorageContainerLocationProtocolProtos.Type.values()); - - return metrics; - } - - @Override - public String getDescription() { - return "SCM Container location protocol endpoint"; - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolDatanodeInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolDatanodeInsight.java deleted file mode 100644 index 289af89a7c5..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolDatanodeInsight.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight.scm; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer; -import org.apache.hadoop.ozone.insight.BaseInsightPoint; -import org.apache.hadoop.ozone.insight.Component.Type; -import org.apache.hadoop.ozone.insight.LoggerSource; -import org.apache.hadoop.ozone.insight.MetricGroupDisplay; -import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB; - -/** - * Insight metric to check the SCM datanode protocol behaviour. - */ -public class ScmProtocolDatanodeInsight extends BaseInsightPoint { - - @Override - public List getRelatedLoggers(boolean verbose) { - List loggers = new ArrayList<>(); - loggers.add( - new LoggerSource(Type.SCM, - SCMDatanodeProtocolServer.class, - defaultLevel(verbose))); - loggers.add( - new LoggerSource(Type.SCM, - StorageContainerDatanodeProtocolServerSideTranslatorPB.class, - defaultLevel(verbose))); - return loggers; - } - - @Override - public List getMetrics() { - List metrics = new ArrayList<>(); - - Map filter = new HashMap<>(); - filter.put("servername", "StorageContainerDatanodeProtocolService"); - - addRpcMetrics(metrics, Type.SCM, filter); - - addProtocolMessageMetrics(metrics, "scm_datanode_protocol", - Type.SCM, StorageContainerDatanodeProtocolProtos.Type.values()); - - return metrics; - } - - @Override - public String getDescription() { - return "SCM Datanode protocol endpoint"; - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolSecurityInsight.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolSecurityInsight.java deleted file mode 100644 index 734da34f8bb..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/ScmProtocolSecurityInsight.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight.scm; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos; -import org.apache.hadoop.hdds.scm.protocol.SCMSecurityProtocolServerSideTranslatorPB; -import org.apache.hadoop.hdds.scm.server.SCMSecurityProtocolServer; -import org.apache.hadoop.ozone.insight.BaseInsightPoint; -import org.apache.hadoop.ozone.insight.Component.Type; -import org.apache.hadoop.ozone.insight.LoggerSource; -import org.apache.hadoop.ozone.insight.MetricGroupDisplay; - -/** - * Insight metric to check the SCM block location protocol behaviour. - */ -public class ScmProtocolSecurityInsight extends BaseInsightPoint { - - @Override - public List getRelatedLoggers(boolean verbose) { - List loggers = new ArrayList<>(); - loggers.add( - new LoggerSource(Type.SCM, - SCMSecurityProtocolServerSideTranslatorPB.class, - defaultLevel(verbose))); - new LoggerSource(Type.SCM, - SCMSecurityProtocolServer.class, - defaultLevel(verbose)); - return loggers; - } - - @Override - public List getMetrics() { - List metrics = new ArrayList<>(); - - Map filter = new HashMap<>(); - filter.put("servername", "SCMSecurityProtocolService"); - - addRpcMetrics(metrics, Type.SCM, filter); - - addProtocolMessageMetrics(metrics, "scm_security_protocol", - Type.SCM, SCMSecurityProtocolProtos.Type.values()); - - return metrics; - } - - @Override - public String getDescription() { - return "SCM Block location protocol endpoint"; - } - -} diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/package-info.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/package-info.java deleted file mode 100644 index 0966fbda401..00000000000 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/scm/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.insight.scm; - -/** - * Insight points for the Storage Container Manager. - */ \ No newline at end of file diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/LogSubcommandTest.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/LogSubcommandTest.java deleted file mode 100644 index 67c2f70e704..00000000000 --- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/LogSubcommandTest.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.insight; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Testing utility methods of the log subcommand test. - */ -public class LogSubcommandTest { - - @Test - public void filterLog() { - LogSubcommand logSubcommand = new LogSubcommand(); - String result = logSubcommand.processLogLine( - "2019-08-04 12:27:08,648 [TRACE|org.apache.hadoop.hdds.scm.node" - + ".SCMNodeManager|SCMNodeManager] HB is received from " - + "[datanode=localhost]: storageReport {\\n storageUuid: " - + "\"DS-29204db6-a615-4106-9dd4-ce294c2f4cf6\"\\n " - + "storageLocation: \"/tmp/hadoop-elek/dfs/data\"\\n capacity: " - + "8348086272\\n scmUsed: 4096\\n remaining: 8246956032n " - + "storageType: DISK\\n failed: falsen}\\n\n"); - Assert.assertEquals(3, result.split("\n").length); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml deleted file mode 100644 index 5593f28e9d4..00000000000 --- a/hadoop-ozone/integration-test/pom.xml +++ /dev/null @@ -1,136 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-integration-test - 0.5.0-SNAPSHOT - Apache Hadoop Ozone Integration Tests - Apache Hadoop Ozone Integration Tests - jar - - - - - org.apache.hadoop - hadoop-ozone-common - - - org.apache.hadoop - hadoop-hdds-server-scm - - - org.apache.hadoop - hadoop-ozone-ozone-manager - - - org.apache.hadoop - hadoop-minikdc - test - - - - org.apache.hadoop - hadoop-ozone-s3gateway - - - org.apache.hadoop - hadoop-ozone-csi - - - org.apache.hadoop - hadoop-ozone-recon - - - org.apache.hadoop - hadoop-ozone-client - - - commons-lang - commons-lang - test - - - org.apache.hadoop - hadoop-ozone-ozone-manager - test - test-jar - - - junit - junit - test - - - org.openjdk.jmh - jmh-core - test - - - org.openjdk.jmh - jmh-generator-annprocess - test - - - org.mockito - mockito-all - test - - - org.apache.hadoop - hadoop-kms - test - - - org.apache.hadoop - hadoop-kms - test-jar - test - - - org.apache.hadoop - hadoop-hdds-server-scm - test - test-jar - - - org.apache.hadoop - hadoop-hdds-container-service - test - test-jar - - - org.apache.hadoop - hadoop-common - test - test-jar - - - org.apache.hadoop - hadoop-hdfs - test - test-jar - - - - - diff --git a/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh b/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh deleted file mode 100755 index 002fe9448a6..00000000000 --- a/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -date=$(date +"%Y-%m-%d--%H-%M-%S-%Z") -fileformat=".MiniOzoneChaosCluster.log" -heapformat=".dump" -current="/tmp/" -filename="${current}${date}${fileformat}" -heapdumpfile="${current}${date}${heapformat}" - -export MAVEN_OPTS="-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${heapdumpfile} -Dorg.apache.ratis.thirdparty.io.netty.allocator.useCacheForAllThreads=false" - -echo "logging to ${filename}" -echo "heapdump to ${heapdumpfile}" - -echo "Starting MiniOzoneChaosCluster" -mvn clean install -DskipTests > "${filename}" 2>&1 -mvn exec:java \ - -Dexec.mainClass="org.apache.hadoop.ozone.TestMiniChaosOzoneCluster" \ - -Dexec.classpathScope=test \ - -Dexec.args="$*" >> "${filename}" 2>&1 diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java deleted file mode 100644 index e4f1a37fd62..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java +++ /dev/null @@ -1,470 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.container; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.NavigableSet; -import java.util.Set; -import java.util.UUID; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeoutException; - -/** - * Tests for ContainerStateManager. - */ -public class TestContainerStateManagerIntegration { - - private static final Logger LOG = - LoggerFactory.getLogger(TestContainerStateManagerIntegration.class); - - private OzoneConfiguration conf; - private MiniOzoneCluster cluster; - private XceiverClientManager xceiverClientManager; - private StorageContainerManager scm; - private ContainerManager containerManager; - private ContainerStateManager containerStateManager; - private String containerOwner = "OZONE"; - private int numContainerPerOwnerInPipeline; - - - @Before - public void setup() throws Exception { - conf = new OzoneConfiguration(); - numContainerPerOwnerInPipeline = - conf.getInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, - ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); - cluster.waitForClusterToBeReady(); - cluster.waitTobeOutOfSafeMode(); - xceiverClientManager = new XceiverClientManager(conf); - scm = cluster.getStorageContainerManager(); - containerManager = scm.getContainerManager(); - containerStateManager = ((SCMContainerManager)containerManager) - .getContainerStateManager(); - } - - @After - public void cleanUp() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testAllocateContainer() throws IOException { - // Allocate a container and verify the container info - ContainerWithPipeline container1 = scm.getClientProtocolServer() - .allocateContainer(xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - ContainerInfo info = containerManager - .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, - container1.getPipeline()); - Assert.assertNotEquals(container1.getContainerInfo().getContainerID(), - info.getContainerID()); - Assert.assertEquals(containerOwner, info.getOwner()); - Assert.assertEquals(xceiverClientManager.getType(), - info.getReplicationType()); - Assert.assertEquals(xceiverClientManager.getFactor(), - info.getReplicationFactor()); - Assert.assertEquals(HddsProtos.LifeCycleState.OPEN, info.getState()); - - // Check there are two containers in ALLOCATED state after allocation - ContainerWithPipeline container2 = scm.getClientProtocolServer() - .allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - int numContainers = containerStateManager - .getMatchingContainerIDs(containerOwner, - xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HddsProtos.LifeCycleState.OPEN).size(); - Assert.assertNotEquals(container1.getContainerInfo().getContainerID(), - container2.getContainerInfo().getContainerID()); - Assert.assertEquals(3, numContainers); - } - - @Test - public void testAllocateContainerWithDifferentOwner() throws IOException { - - // Allocate a container and verify the container info - ContainerWithPipeline container1 = scm.getClientProtocolServer() - .allocateContainer(xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - ContainerInfo info = containerManager - .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, - container1.getPipeline()); - Assert.assertNotNull(info); - - String newContainerOwner = "OZONE_NEW"; - ContainerWithPipeline container2 = scm.getClientProtocolServer() - .allocateContainer(xceiverClientManager.getType(), - xceiverClientManager.getFactor(), newContainerOwner); - ContainerInfo info2 = containerManager - .getMatchingContainer(OzoneConsts.GB * 3, newContainerOwner, - container1.getPipeline()); - Assert.assertNotNull(info2); - - Assert.assertNotEquals(info.containerID(), info2.containerID()); - } - - @Test - public void testContainerStateManagerRestart() throws IOException, - TimeoutException, InterruptedException, AuthenticationException { - // Allocate 5 containers in ALLOCATED state and 5 in CREATING state - - for (int i = 0; i < 10; i++) { - - ContainerWithPipeline container = scm.getClientProtocolServer() - .allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - if (i >= 5) { - scm.getContainerManager().updateContainerState(container - .getContainerInfo().containerID(), - HddsProtos.LifeCycleEvent.FINALIZE); - } - } - - cluster.restartStorageContainerManager(true); - - List result = cluster.getStorageContainerManager() - .getContainerManager().listContainer(null, 100); - - long matchCount = result.stream() - .filter(info -> - info.getOwner().equals(containerOwner)) - .filter(info -> - info.getReplicationType() == xceiverClientManager.getType()) - .filter(info -> - info.getReplicationFactor() == xceiverClientManager.getFactor()) - .filter(info -> - info.getState() == HddsProtos.LifeCycleState.OPEN) - .count(); - Assert.assertEquals(5, matchCount); - matchCount = result.stream() - .filter(info -> - info.getOwner().equals(containerOwner)) - .filter(info -> - info.getReplicationType() == xceiverClientManager.getType()) - .filter(info -> - info.getReplicationFactor() == xceiverClientManager.getFactor()) - .filter(info -> - info.getState() == HddsProtos.LifeCycleState.CLOSING) - .count(); - Assert.assertEquals(5, matchCount); - } - - @Test - public void testGetMatchingContainer() throws IOException { - long cid; - ContainerWithPipeline container1 = scm.getClientProtocolServer(). - allocateContainer(xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - cid = container1.getContainerInfo().getContainerID(); - - // each getMatchingContainer call allocates a container in the - // pipeline till the pipeline has numContainerPerOwnerInPipeline number of - // containers. - for (int i = 1; i < numContainerPerOwnerInPipeline; i++) { - ContainerInfo info = containerManager - .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, - container1.getPipeline()); - Assert.assertTrue(info.getContainerID() > cid); - cid = info.getContainerID(); - } - - // At this point there are already three containers in the pipeline. - // next container should be the same as first container - ContainerInfo info = containerManager - .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, - container1.getPipeline()); - Assert.assertEquals(container1.getContainerInfo().getContainerID(), - info.getContainerID()); - } - - @Test - public void testGetMatchingContainerWithExcludedList() throws IOException { - long cid; - ContainerWithPipeline container1 = scm.getClientProtocolServer(). - allocateContainer(xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - cid = container1.getContainerInfo().getContainerID(); - - // each getMatchingContainer call allocates a container in the - // pipeline till the pipeline has numContainerPerOwnerInPipeline number of - // containers. - for (int i = 1; i < numContainerPerOwnerInPipeline; i++) { - ContainerInfo info = containerManager - .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, - container1.getPipeline()); - Assert.assertTrue(info.getContainerID() > cid); - cid = info.getContainerID(); - } - - // At this point there are already three containers in the pipeline. - // next container should be the same as first container - ContainerInfo info = containerManager - .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, - container1.getPipeline(), Collections.singletonList(new - ContainerID(1))); - Assert.assertNotEquals(container1.getContainerInfo().getContainerID(), - info.getContainerID()); - } - - - @Test - public void testCreateContainerLogicWithExcludedList() throws IOException { - long cid; - ContainerWithPipeline container1 = scm.getClientProtocolServer(). - allocateContainer(xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - cid = container1.getContainerInfo().getContainerID(); - - for (int i = 1; i < numContainerPerOwnerInPipeline; i++) { - ContainerInfo info = containerManager - .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, - container1.getPipeline()); - Assert.assertTrue(info.getContainerID() > cid); - cid = info.getContainerID(); - } - - ContainerInfo info = containerManager - .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, - container1.getPipeline(), Arrays.asList(new ContainerID(1), new - ContainerID(2), new ContainerID(3))); - Assert.assertEquals(info.getContainerID(), 4); - } - - @Test - @Ignore("TODO:HDDS-1159") - public void testGetMatchingContainerMultipleThreads() - throws IOException, InterruptedException { - ContainerWithPipeline container1 = scm.getClientProtocolServer(). - allocateContainer(xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - Map container2MatchedCount = new ConcurrentHashMap<>(); - - // allocate blocks using multiple threads - int numBlockAllocates = 100000; - for (int i = 0; i < numBlockAllocates; i++) { - CompletableFuture.supplyAsync(() -> { - ContainerInfo info = containerManager - .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, - container1.getPipeline()); - container2MatchedCount - .compute(info.getContainerID(), (k, v) -> v == null ? 1L : v + 1); - return null; - }); - } - - // make sure pipeline has has numContainerPerOwnerInPipeline number of - // containers. - Assert.assertEquals(scm.getPipelineManager() - .getNumberOfContainers(container1.getPipeline().getId()), - numContainerPerOwnerInPipeline); - Thread.sleep(5000); - long threshold = 2000; - // check the way the block allocations are distributed in the different - // containers. - for (Long matchedCount : container2MatchedCount.values()) { - // TODO: #CLUTIL Look at the division of block allocations in different - // containers. - LOG.error("Total allocated block = " + matchedCount); - Assert.assertTrue(matchedCount <= - numBlockAllocates / container2MatchedCount.size() + threshold - && matchedCount >= - numBlockAllocates / container2MatchedCount.size() - threshold); - } - } - - @Test - public void testUpdateContainerState() throws IOException { - NavigableSet containerList = containerStateManager - .getMatchingContainerIDs(containerOwner, - xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HddsProtos.LifeCycleState.OPEN); - int containers = containerList == null ? 0 : containerList.size(); - Assert.assertEquals(0, containers); - - // Allocate container1 and update its state from - // OPEN -> CLOSING -> CLOSED -> DELETING -> DELETED - ContainerWithPipeline container1 = scm.getClientProtocolServer() - .allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - containers = containerStateManager.getMatchingContainerIDs(containerOwner, - xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HddsProtos.LifeCycleState.OPEN).size(); - Assert.assertEquals(1, containers); - - containerManager - .updateContainerState(container1.getContainerInfo().containerID(), - HddsProtos.LifeCycleEvent.FINALIZE); - containers = containerStateManager.getMatchingContainerIDs(containerOwner, - xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HddsProtos.LifeCycleState.CLOSING).size(); - Assert.assertEquals(1, containers); - - containerManager - .updateContainerState(container1.getContainerInfo().containerID(), - HddsProtos.LifeCycleEvent.CLOSE); - containers = containerStateManager.getMatchingContainerIDs(containerOwner, - xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HddsProtos.LifeCycleState.CLOSED).size(); - Assert.assertEquals(1, containers); - - containerManager - .updateContainerState(container1.getContainerInfo().containerID(), - HddsProtos.LifeCycleEvent.DELETE); - containers = containerStateManager.getMatchingContainerIDs(containerOwner, - xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HddsProtos.LifeCycleState.DELETING).size(); - Assert.assertEquals(1, containers); - - containerManager - .updateContainerState(container1.getContainerInfo().containerID(), - HddsProtos.LifeCycleEvent.CLEANUP); - containers = containerStateManager.getMatchingContainerIDs(containerOwner, - xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HddsProtos.LifeCycleState.DELETED).size(); - Assert.assertEquals(1, containers); - - // Allocate container1 and update its state from - // OPEN -> CLOSING -> CLOSED - ContainerWithPipeline container3 = scm.getClientProtocolServer() - .allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - containerManager - .updateContainerState(container3.getContainerInfo().containerID(), - HddsProtos.LifeCycleEvent.FINALIZE); - containerManager - .updateContainerState(container3.getContainerInfo().containerID(), - HddsProtos.LifeCycleEvent.CLOSE); - containers = containerStateManager.getMatchingContainerIDs(containerOwner, - xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HddsProtos.LifeCycleState.CLOSED).size(); - Assert.assertEquals(1, containers); - } - - - @Test - public void testReplicaMap() throws Exception { - DatanodeDetails dn1 = DatanodeDetails.newBuilder().setHostName("host1") - .setIpAddress("1.1.1.1") - .setUuid(UUID.randomUUID().toString()).build(); - DatanodeDetails dn2 = DatanodeDetails.newBuilder().setHostName("host2") - .setIpAddress("2.2.2.2") - .setUuid(UUID.randomUUID().toString()).build(); - - // Test 1: no replica's exist - ContainerID containerID = ContainerID.valueof(RandomUtils.nextLong()); - Set replicaSet; - try { - containerStateManager.getContainerReplicas(containerID); - Assert.fail(); - } catch (ContainerNotFoundException ex) { - // expected. - } - - ContainerWithPipeline container = scm.getClientProtocolServer() - .allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); - - ContainerID id = container.getContainerInfo().containerID(); - - // Test 2: Add replica nodes and then test - ContainerReplica replicaOne = ContainerReplica.newBuilder() - .setContainerID(id) - .setContainerState(ContainerReplicaProto.State.OPEN) - .setDatanodeDetails(dn1) - .build(); - ContainerReplica replicaTwo = ContainerReplica.newBuilder() - .setContainerID(id) - .setContainerState(ContainerReplicaProto.State.OPEN) - .setDatanodeDetails(dn2) - .build(); - containerStateManager.updateContainerReplica(id, replicaOne); - containerStateManager.updateContainerReplica(id, replicaTwo); - replicaSet = containerStateManager.getContainerReplicas(id); - Assert.assertEquals(2, replicaSet.size()); - Assert.assertTrue(replicaSet.contains(replicaOne)); - Assert.assertTrue(replicaSet.contains(replicaTwo)); - - // Test 3: Remove one replica node and then test - containerStateManager.removeContainerReplica(id, replicaOne); - replicaSet = containerStateManager.getContainerReplicas(id); - Assert.assertEquals(1, replicaSet.size()); - Assert.assertFalse(replicaSet.contains(replicaOne)); - Assert.assertTrue(replicaSet.contains(replicaTwo)); - - // Test 3: Remove second replica node and then test - containerStateManager.removeContainerReplica(id, replicaTwo); - replicaSet = containerStateManager.getContainerReplicas(id); - Assert.assertEquals(0, replicaSet.size()); - Assert.assertFalse(replicaSet.contains(replicaOne)); - Assert.assertFalse(replicaSet.contains(replicaTwo)); - - // Test 4: Re-insert dn1 - containerStateManager.updateContainerReplica(id, replicaOne); - replicaSet = containerStateManager.getContainerReplicas(id); - Assert.assertEquals(1, replicaSet.size()); - Assert.assertTrue(replicaSet.contains(replicaOne)); - Assert.assertFalse(replicaSet.contains(replicaTwo)); - - // Re-insert dn2 - containerStateManager.updateContainerReplica(id, replicaTwo); - replicaSet = containerStateManager.getContainerReplicas(id); - Assert.assertEquals(2, replicaSet.size()); - Assert.assertTrue(replicaSet.contains(replicaOne)); - Assert.assertTrue(replicaSet.contains(replicaTwo)); - - // Re-insert dn1 - containerStateManager.updateContainerReplica(id, replicaOne); - replicaSet = containerStateManager.getContainerReplicas(id); - Assert.assertEquals(2, replicaSet.size()); - Assert.assertTrue(replicaSet.contains(replicaOne)); - Assert.assertTrue(replicaSet.contains(replicaTwo)); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java deleted file mode 100644 index 5643cb6ef5d..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java +++ /dev/null @@ -1,167 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.scm.container.metrics; - -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.HashMap; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; -import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.junit.Assert.fail; - -/** - * Class used to test {@link SCMContainerManagerMetrics}. - */ -public class TestSCMContainerManagerMetrics { - - private MiniOzoneCluster cluster; - private StorageContainerManager scm; - private String containerOwner = "OZONE"; - - @Before - public void setup() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HDDS_CONTAINER_REPORT_INTERVAL, "3000s"); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); - cluster.waitForClusterToBeReady(); - scm = cluster.getStorageContainerManager(); - } - - - @After - public void teardown() { - cluster.shutdown(); - } - - @Test - public void testContainerOpsMetrics() throws IOException { - MetricsRecordBuilder metrics; - ContainerManager containerManager = scm.getContainerManager(); - metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); - long numSuccessfulCreateContainers = getLongCounter( - "NumSuccessfulCreateContainers", metrics); - - ContainerInfo containerInfo = containerManager.allocateContainer( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, containerOwner); - - metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); - Assert.assertEquals(getLongCounter("NumSuccessfulCreateContainers", - metrics), ++numSuccessfulCreateContainers); - - try { - containerManager.allocateContainer( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, containerOwner); - fail("testContainerOpsMetrics failed"); - } catch (IOException ex) { - // Here it should fail, so it should have the old metric value. - metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); - Assert.assertEquals(getLongCounter("NumSuccessfulCreateContainers", - metrics), numSuccessfulCreateContainers); - Assert.assertEquals(getLongCounter("NumFailureCreateContainers", - metrics), 1); - } - - metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); - long numSuccessfulDeleteContainers = getLongCounter( - "NumSuccessfulDeleteContainers", metrics); - - containerManager.deleteContainer( - new ContainerID(containerInfo.getContainerID())); - - metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); - Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers", - metrics), numSuccessfulDeleteContainers + 1); - - - try { - // Give random container to delete. - containerManager.deleteContainer( - new ContainerID(RandomUtils.nextLong(10000, 20000))); - fail("testContainerOpsMetrics failed"); - } catch (IOException ex) { - // Here it should fail, so it should have the old metric value. - metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); - Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers", - metrics), numSuccessfulCreateContainers); - Assert.assertEquals(getLongCounter("NumFailureDeleteContainers", - metrics), 1); - } - - containerManager.listContainer( - new ContainerID(containerInfo.getContainerID()), 1); - metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); - Assert.assertEquals(getLongCounter("NumListContainerOps", - metrics), 1); - - } - - @Test - public void testReportProcessingMetrics() throws Exception { - String volumeName = "vol1"; - String bucketName = "bucket1"; - String key = "key1"; - - MetricsRecordBuilder metrics = - getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); - Assert.assertEquals(getLongCounter("NumContainerReportsProcessedSuccessful", - metrics), 1); - - // Create key should create container on DN. - cluster.getRpcClient().getObjectStore().getClientProxy() - .createVolume(volumeName); - cluster.getRpcClient().getObjectStore().getClientProxy() - .createBucket(volumeName, bucketName); - OzoneOutputStream ozoneOutputStream = cluster.getRpcClient() - .getObjectStore().getClientProxy().createKey(volumeName, bucketName, - key, 0, ReplicationType.RATIS, ReplicationFactor.ONE, - new HashMap<>()); - - String data = "file data"; - ozoneOutputStream.write(data.getBytes(), 0, data.length()); - ozoneOutputStream.close(); - - - GenericTestUtils.waitFor(() -> { - final MetricsRecordBuilder scmMetrics = - getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); - return getLongCounter("NumICRReportsProcessedSuccessful", - scmMetrics) == 1; - }, 1000, 500000); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java deleted file mode 100644 index c7470a3cb60..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.common.helpers - .ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.List; -import java.util.Set; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos - .ReplicationType.RATIS; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos - .ReplicationFactor.THREE; - -/** - * Test for the Node2Pipeline map. - */ -public class TestNode2PipelineMap { - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static StorageContainerManager scm; - private static ContainerWithPipeline ratisContainer; - private static ContainerManager containerManager; - private static PipelineManager pipelineManager; - - /** - * Create a MiniDFSCluster for testing. - * - * @throws IOException - */ - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); - cluster.waitForClusterToBeReady(); - scm = cluster.getStorageContainerManager(); - containerManager = scm.getContainerManager(); - pipelineManager = scm.getPipelineManager(); - ContainerInfo containerInfo = containerManager.allocateContainer( - RATIS, THREE, "testOwner"); - ratisContainer = new ContainerWithPipeline(containerInfo, - pipelineManager.getPipeline(containerInfo.getPipelineID())); - pipelineManager = scm.getPipelineManager(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testPipelineMap() throws IOException { - - Set set = pipelineManager - .getContainersInPipeline(ratisContainer.getPipeline().getId()); - - ContainerID cId = ratisContainer.getContainerInfo().containerID(); - Assert.assertEquals(1, set.size()); - set.forEach(containerID -> - Assert.assertEquals(containerID, cId)); - - List dns = ratisContainer.getPipeline().getNodes(); - Assert.assertEquals(3, dns.size()); - - // get pipeline details by dnid - Set pipelines = scm.getScmNodeManager() - .getPipelines(dns.get(0)); - Assert.assertTrue(pipelines.contains(ratisContainer.getPipeline().getId())); - - // Now close the container and it should not show up while fetching - // containers by pipeline - containerManager - .updateContainerState(cId, HddsProtos.LifeCycleEvent.FINALIZE); - containerManager - .updateContainerState(cId, HddsProtos.LifeCycleEvent.CLOSE); - Set set2 = pipelineManager.getContainersInPipeline( - ratisContainer.getPipeline().getId()); - Assert.assertEquals(0, set2.size()); - - pipelineManager - .finalizeAndDestroyPipeline(ratisContainer.getPipeline(), false); - pipelines = scm.getScmNodeManager() - .getPipelines(dns.get(0)); - Assert - .assertFalse(pipelines.contains(ratisContainer.getPipeline().getId())); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java deleted file mode 100644 index 3207878f9a4..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; - -import java.io.IOException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos - .ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos - .ReplicationType.RATIS; - -/** - * Test Node failure detection and handling in Ratis. - */ -public class TestNodeFailure { - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static Pipeline ratisPipelineOne; - private static Pipeline ratisPipelineTwo; - private static ContainerManager containerManager; - private static PipelineManager pipelineManager; - private static long timeForFailure; - - /** - * Create a MiniDFSCluster for testing. - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - conf.setTimeDuration(OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY, - 10, TimeUnit.SECONDS); - conf.setTimeDuration( - ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT, - 10, TimeUnit.SECONDS); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(6) - .setHbInterval(1000) - .setHbProcessorInterval(1000) - .build(); - cluster.waitForClusterToBeReady(); - StorageContainerManager scm = cluster.getStorageContainerManager(); - containerManager = scm.getContainerManager(); - pipelineManager = scm.getPipelineManager(); - ratisPipelineOne = pipelineManager.getPipeline( - containerManager.allocateContainer( - RATIS, THREE, "testOwner").getPipelineID()); - ratisPipelineTwo = pipelineManager.getPipeline( - containerManager.allocateContainer( - RATIS, THREE, "testOwner").getPipelineID()); - // At this stage, there should be 2 pipeline one with 1 open container each. - // Try closing the both the pipelines, one with a closed container and - // the other with an open container. - timeForFailure = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY, - OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT - .getDuration(), TimeUnit.MILLISECONDS); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Ignore - // Enable this after we implement teardown pipeline logic once a datanode - // dies. - @Test(timeout = 300_000L) - public void testPipelineFail() throws InterruptedException, IOException, - TimeoutException { - Assert.assertEquals(ratisPipelineOne.getPipelineState(), - Pipeline.PipelineState.OPEN); - Pipeline pipelineToFail = ratisPipelineOne; - DatanodeDetails dnToFail = pipelineToFail.getFirstNode(); - cluster.shutdownHddsDatanode(dnToFail); - - // wait for sufficient time for the callback to be triggered - Thread.sleep(3 * timeForFailure); - - Assert.assertEquals(Pipeline.PipelineState.CLOSED, - pipelineManager.getPipeline(ratisPipelineOne.getId()) - .getPipelineState()); - Assert.assertEquals(Pipeline.PipelineState.OPEN, - pipelineManager.getPipeline(ratisPipelineTwo.getId()) - .getPipelineState()); - // Now restart the datanode and make sure that a new pipeline is created. - cluster.setWaitForClusterToBeReadyTimeout(300000); - cluster.restartHddsDatanode(dnToFail, true); - Pipeline ratisPipelineThree = pipelineManager.getPipeline( - containerManager.allocateContainer( - RATIS, THREE, "testOwner").getPipelineID()); - //Assert that new container is not created from the ratis 2 pipeline - Assert.assertNotEquals(ratisPipelineThree.getId(), - ratisPipelineTwo.getId()); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java deleted file mode 100644 index c583559fd3a..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.protocol.RaftGroupId; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.Mockito; - -import java.io.IOException; -import java.util.List; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; - -/** - * Tests for Pipeline Closing. - */ -public class TestPipelineClose { - - private MiniOzoneCluster cluster; - private OzoneConfiguration conf; - private StorageContainerManager scm; - private ContainerWithPipeline ratisContainer; - private ContainerManager containerManager; - private PipelineManager pipelineManager; - - private long pipelineDestroyTimeoutInMillis; - /** - * Create a MiniDFSCluster for testing. - * - * @throws IOException - */ - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build(); - conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 1000, - TimeUnit.MILLISECONDS); - pipelineDestroyTimeoutInMillis = 5000; - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, - pipelineDestroyTimeoutInMillis, TimeUnit.MILLISECONDS); - cluster.waitForClusterToBeReady(); - scm = cluster.getStorageContainerManager(); - containerManager = scm.getContainerManager(); - pipelineManager = scm.getPipelineManager(); - ContainerInfo containerInfo = containerManager - .allocateContainer(RATIS, THREE, "testOwner"); - ratisContainer = new ContainerWithPipeline(containerInfo, - pipelineManager.getPipeline(containerInfo.getPipelineID())); - pipelineManager = scm.getPipelineManager(); - // At this stage, there should be 2 pipeline one with 1 open container each. - // Try closing the both the pipelines, one with a closed container and - // the other with an open container. - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testPipelineCloseWithClosedContainer() throws IOException { - Set set = pipelineManager - .getContainersInPipeline(ratisContainer.getPipeline().getId()); - - ContainerID cId = ratisContainer.getContainerInfo().containerID(); - Assert.assertEquals(1, set.size()); - set.forEach(containerID -> Assert.assertEquals(containerID, cId)); - - // Now close the container and it should not show up while fetching - // containers by pipeline - containerManager - .updateContainerState(cId, HddsProtos.LifeCycleEvent.FINALIZE); - containerManager - .updateContainerState(cId, HddsProtos.LifeCycleEvent.CLOSE); - - Set setClosed = pipelineManager - .getContainersInPipeline(ratisContainer.getPipeline().getId()); - Assert.assertEquals(0, setClosed.size()); - - pipelineManager - .finalizeAndDestroyPipeline(ratisContainer.getPipeline(), false); - for (DatanodeDetails dn : ratisContainer.getPipeline().getNodes()) { - // Assert that the pipeline has been removed from Node2PipelineMap as well - Assert.assertFalse(scm.getScmNodeManager().getPipelines(dn) - .contains(ratisContainer.getPipeline().getId())); - } - } - - @Test - public void testPipelineCloseWithOpenContainer() - throws IOException, TimeoutException, InterruptedException { - Set setOpen = pipelineManager.getContainersInPipeline( - ratisContainer.getPipeline().getId()); - Assert.assertEquals(1, setOpen.size()); - - pipelineManager - .finalizeAndDestroyPipeline(ratisContainer.getPipeline(), false); - GenericTestUtils.waitFor(() -> { - try { - return containerManager - .getContainer(ratisContainer.getContainerInfo().containerID()) - .getState() == HddsProtos.LifeCycleState.CLOSING; - } catch (ContainerNotFoundException e) { - return false; - } - }, 100, 10000); - } - - @Test - public void testPipelineCloseWithPipelineAction() throws Exception { - List dns = ratisContainer.getPipeline().getNodes(); - PipelineActionsFromDatanode - pipelineActionsFromDatanode = TestUtils - .getPipelineActionFromDatanode(dns.get(0), - ratisContainer.getPipeline().getId()); - // send closing action for pipeline - PipelineActionHandler pipelineActionHandler = - new PipelineActionHandler(pipelineManager, conf); - pipelineActionHandler - .onMessage(pipelineActionsFromDatanode, new EventQueue()); - Thread.sleep((int) (pipelineDestroyTimeoutInMillis * 1.2)); - OzoneContainer ozoneContainer = - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer(); - List pipelineReports = - ozoneContainer.getPipelineReport().getPipelineReportList(); - for (PipelineReport pipelineReport : pipelineReports) { - // ensure the pipeline is not reported by any dn - Assert.assertNotEquals( - PipelineID.getFromProtobuf(pipelineReport.getPipelineID()), - ratisContainer.getPipeline().getId()); - } - - try { - pipelineManager.getPipeline(ratisContainer.getPipeline().getId()); - Assert.fail("Pipeline should not exist in SCM"); - } catch (PipelineNotFoundException e) { - } - } - - @Test - public void testPipelineCloseWithLogFailure() throws IOException { - - EventQueue eventQ = (EventQueue) scm.getEventQueue(); - PipelineActionHandler pipelineActionTest = - Mockito.mock(PipelineActionHandler.class); - eventQ.addHandler(SCMEvents.PIPELINE_ACTIONS, pipelineActionTest); - ArgumentCaptor actionCaptor = - ArgumentCaptor.forClass(PipelineActionsFromDatanode.class); - - ContainerInfo containerInfo = containerManager - .allocateContainer(RATIS, THREE, "testOwner"); - ContainerWithPipeline containerWithPipeline = - new ContainerWithPipeline(containerInfo, - pipelineManager.getPipeline(containerInfo.getPipelineID())); - Pipeline openPipeline = containerWithPipeline.getPipeline(); - RaftGroupId groupId = RaftGroupId.valueOf(openPipeline.getId().getId()); - - try { - pipelineManager.getPipeline(openPipeline.getId()); - } catch (PipelineNotFoundException e) { - Assert.assertTrue("pipeline should exist", false); - } - - DatanodeDetails datanodeDetails = openPipeline.getNodes().get(0); - int index = cluster.getHddsDatanodeIndex(datanodeDetails); - - XceiverServerRatis xceiverRatis = - (XceiverServerRatis) cluster.getHddsDatanodes().get(index) - .getDatanodeStateMachine().getContainer().getWriteChannel(); - - /** - * Notify Datanode Ratis Server endpoint of a Ratis log failure. - * This is expected to trigger an immediate pipeline actions report to SCM - */ - xceiverRatis.handleNodeLogFailure(groupId, null); - - // verify SCM receives a pipeline action report "immediately" - Mockito.verify(pipelineActionTest, Mockito.timeout(100)) - .onMessage( - actionCaptor.capture(), - Mockito.any(EventPublisher.class)); - - PipelineActionsFromDatanode actionsFromDatanode = - actionCaptor.getValue(); - - // match the pipeline id - verifyCloseForPipeline(openPipeline, actionsFromDatanode); - } - - private boolean verifyCloseForPipeline(Pipeline pipeline, - PipelineActionsFromDatanode report) { - UUID uuidToFind = pipeline.getId().getId(); - - boolean found = false; - for (StorageContainerDatanodeProtocolProtos.PipelineAction action : - report.getReport().getPipelineActionsList()) { - if (action.getAction() == - StorageContainerDatanodeProtocolProtos.PipelineAction.Action.CLOSE) { - PipelineID closedPipelineId = PipelineID. - getFromProtobuf(action.getClosePipeline().getPipelineID()); - - if (closedPipelineId.getId().equals(uuidToFind)) { - found = true; - } - } - } - - Assert.assertTrue("SCM did not receive a Close action for the Pipeline", - found); - return found; - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java deleted file mode 100644 index 0bbfb5312f3..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java +++ /dev/null @@ -1,475 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -/** - * Test for PipelineStateManager. - */ -public class TestPipelineStateManager { - - private PipelineStateManager stateManager; - - @Before - public void init() throws Exception { - Configuration conf = new OzoneConfiguration(); - stateManager = new PipelineStateManager(conf); - } - - private Pipeline createDummyPipeline(int numNodes) { - return createDummyPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, numNodes); - } - - private Pipeline createDummyPipeline(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, int numNodes) { - List nodes = new ArrayList<>(); - for (int i = 0; i < numNodes; i++) { - nodes.add(TestUtils.randomDatanodeDetails()); - } - return Pipeline.newBuilder() - .setType(type) - .setFactor(factor) - .setNodes(nodes) - .setState(Pipeline.PipelineState.ALLOCATED) - .setId(PipelineID.randomId()) - .build(); - } - - @Test - public void testAddAndGetPipeline() throws IOException { - Pipeline pipeline = createDummyPipeline(0); - try { - stateManager.addPipeline(pipeline); - Assert.fail("Pipeline should not have been added"); - } catch (IllegalArgumentException e) { - // replication factor and number of nodes in the pipeline do not match - Assert.assertTrue(e.getMessage().contains("do not match")); - } - - // add a pipeline - pipeline = createDummyPipeline(1); - stateManager.addPipeline(pipeline); - - try { - stateManager.addPipeline(pipeline); - Assert.fail("Pipeline should not have been added"); - } catch (IOException e) { - // Can not add a pipeline twice - Assert.assertTrue(e.getMessage().contains("Duplicate pipeline ID")); - } - - // verify pipeline returned is same - Pipeline pipeline1 = stateManager.getPipeline(pipeline.getId()); - Assert.assertTrue(pipeline == pipeline1); - - // clean up - removePipeline(pipeline); - } - - @Test - public void testGetPipelines() throws IOException { - // In start there should be no pipelines - Assert.assertTrue(stateManager.getPipelines().isEmpty()); - - Set pipelines = new HashSet<>(); - Pipeline pipeline = createDummyPipeline(1); - stateManager.addPipeline(pipeline); - stateManager.openPipeline(pipeline.getId()); - pipelines.add(pipeline); - pipeline = createDummyPipeline(1); - stateManager.addPipeline(pipeline); - stateManager.openPipeline(pipeline.getId()); - pipelines.add(pipeline); - - Set pipelines1 = new HashSet<>(stateManager.getPipelines( - HddsProtos.ReplicationType.RATIS)); - Assert.assertEquals(pipelines1.size(), pipelines.size()); - - pipelines1 = new HashSet<>(stateManager.getPipelines()); - Assert.assertEquals(pipelines1.size(), pipelines.size()); - - // clean up - for (Pipeline pipeline1 : pipelines) { - removePipeline(pipeline1); - } - } - - @Test - public void testGetPipelinesByTypeAndFactor() throws IOException { - Set pipelines = new HashSet<>(); - for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType - .values()) { - for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor - .values()) { - for (int i = 0; i < 5; i++) { - // 5 pipelines in allocated state for each type and factor - Pipeline pipeline = - createDummyPipeline(type, factor, factor.getNumber()); - stateManager.addPipeline(pipeline); - pipelines.add(pipeline); - - // 5 pipelines in open state for each type and factor - pipeline = createDummyPipeline(type, factor, factor.getNumber()); - stateManager.addPipeline(pipeline); - stateManager.openPipeline(pipeline.getId()); - pipelines.add(pipeline); - - // 5 pipelines in closed state for each type and factor - pipeline = createDummyPipeline(type, factor, factor.getNumber()); - stateManager.addPipeline(pipeline); - stateManager.finalizePipeline(pipeline.getId()); - pipelines.add(pipeline); - } - } - } - - for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType - .values()) { - for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor - .values()) { - // verify pipelines received - List pipelines1 = - stateManager.getPipelines(type, factor); - Assert.assertEquals(15, pipelines1.size()); - pipelines1.stream().forEach(p -> { - Assert.assertEquals(type, p.getType()); - Assert.assertEquals(factor, p.getFactor()); - }); - } - } - - //clean up - for (Pipeline pipeline : pipelines) { - removePipeline(pipeline); - } - } - - @Test - public void testGetPipelinesByTypeAndState() throws IOException { - Set pipelines = new HashSet<>(); - for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType - .values()) { - HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE; - for (int i = 0; i < 5; i++) { - // 5 pipelines in allocated state for each type and factor - Pipeline pipeline = - createDummyPipeline(type, factor, factor.getNumber()); - stateManager.addPipeline(pipeline); - pipelines.add(pipeline); - - // 5 pipelines in open state for each type and factor - pipeline = createDummyPipeline(type, factor, factor.getNumber()); - stateManager.addPipeline(pipeline); - stateManager.openPipeline(pipeline.getId()); - pipelines.add(pipeline); - - // 5 pipelines in closed state for each type and factor - pipeline = createDummyPipeline(type, factor, factor.getNumber()); - stateManager.addPipeline(pipeline); - stateManager.finalizePipeline(pipeline.getId()); - pipelines.add(pipeline); - } - } - - for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType - .values()) { - // verify pipelines received - List pipelines1 = stateManager - .getPipelines(type, Pipeline.PipelineState.OPEN); - Assert.assertEquals(5, pipelines1.size()); - pipelines1.forEach(p -> { - Assert.assertEquals(type, p.getType()); - Assert.assertEquals(Pipeline.PipelineState.OPEN, p.getPipelineState()); - }); - - pipelines1 = stateManager - .getPipelines(type, Pipeline.PipelineState.OPEN, - Pipeline.PipelineState.CLOSED, Pipeline.PipelineState.ALLOCATED); - Assert.assertEquals(15, pipelines1.size()); - } - - //clean up - for (Pipeline pipeline : pipelines) { - removePipeline(pipeline); - } - } - - @Test - public void testGetPipelinesByTypeFactorAndState() throws IOException { - Set pipelines = new HashSet<>(); - for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType - .values()) { - for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor - .values()) { - for (int i = 0; i < 5; i++) { - // 5 pipelines in allocated state for each type and factor - Pipeline pipeline = - createDummyPipeline(type, factor, factor.getNumber()); - stateManager.addPipeline(pipeline); - pipelines.add(pipeline); - - // 5 pipelines in open state for each type and factor - pipeline = createDummyPipeline(type, factor, factor.getNumber()); - stateManager.addPipeline(pipeline); - stateManager.openPipeline(pipeline.getId()); - pipelines.add(pipeline); - - // 5 pipelines in dormant state for each type and factor - pipeline = createDummyPipeline(type, factor, factor.getNumber()); - stateManager.addPipeline(pipeline); - stateManager.openPipeline(pipeline.getId()); - stateManager.deactivatePipeline(pipeline.getId()); - pipelines.add(pipeline); - - // 5 pipelines in closed state for each type and factor - pipeline = createDummyPipeline(type, factor, factor.getNumber()); - stateManager.addPipeline(pipeline); - stateManager.finalizePipeline(pipeline.getId()); - pipelines.add(pipeline); - } - } - } - - for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType - .values()) { - for (HddsProtos.ReplicationFactor factor : HddsProtos.ReplicationFactor - .values()) { - for (Pipeline.PipelineState state : Pipeline.PipelineState.values()) { - // verify pipelines received - List pipelines1 = - stateManager.getPipelines(type, factor, state); - Assert.assertEquals(5, pipelines1.size()); - pipelines1.forEach(p -> { - Assert.assertEquals(type, p.getType()); - Assert.assertEquals(factor, p.getFactor()); - Assert.assertEquals(state, p.getPipelineState()); - }); - } - } - } - - //clean up - for (Pipeline pipeline : pipelines) { - removePipeline(pipeline); - } - } - - @Test - public void testAddAndGetContainer() throws IOException { - long containerID = 0; - Pipeline pipeline = createDummyPipeline(1); - stateManager.addPipeline(pipeline); - pipeline = stateManager.getPipeline(pipeline.getId()); - stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(++containerID)); - - // move pipeline to open state - stateManager.openPipeline(pipeline.getId()); - stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(++containerID)); - stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(++containerID)); - - //verify the number of containers returned - Set containerIDs = - stateManager.getContainers(pipeline.getId()); - Assert.assertEquals(containerIDs.size(), containerID); - - removePipeline(pipeline); - try { - stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(++containerID)); - Assert.fail("Container should not have been added"); - } catch (IOException e) { - // Can not add a container to removed pipeline - Assert.assertTrue(e.getMessage().contains("not found")); - } - } - - @Test - public void testRemovePipeline() throws IOException { - Pipeline pipeline = createDummyPipeline(1); - stateManager.addPipeline(pipeline); - // close the pipeline - stateManager.openPipeline(pipeline.getId()); - stateManager - .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1)); - - try { - stateManager.removePipeline(pipeline.getId()); - Assert.fail("Pipeline should not have been removed"); - } catch (IOException e) { - // can not remove a pipeline which already has containers - Assert.assertTrue(e.getMessage().contains("not yet closed")); - } - - // close the pipeline - stateManager.finalizePipeline(pipeline.getId()); - // remove containers and then remove the pipeline - removePipeline(pipeline); - } - - @Test - public void testRemoveContainer() throws IOException { - long containerID = 1; - Pipeline pipeline = createDummyPipeline(1); - // create an open pipeline in stateMap - stateManager.addPipeline(pipeline); - stateManager.openPipeline(pipeline.getId()); - - stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(containerID)); - Assert.assertEquals(1, stateManager.getContainers(pipeline.getId()).size()); - stateManager.removeContainerFromPipeline(pipeline.getId(), - ContainerID.valueof(containerID)); - Assert.assertEquals(0, stateManager.getContainers(pipeline.getId()).size()); - - // add two containers in the pipeline - stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(++containerID)); - stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(++containerID)); - Assert.assertEquals(2, stateManager.getContainers(pipeline.getId()).size()); - - // move pipeline to closing state - stateManager.finalizePipeline(pipeline.getId()); - - stateManager.removeContainerFromPipeline(pipeline.getId(), - ContainerID.valueof(containerID)); - stateManager.removeContainerFromPipeline(pipeline.getId(), - ContainerID.valueof(--containerID)); - Assert.assertEquals(0, stateManager.getContainers(pipeline.getId()).size()); - - // clean up - stateManager.removePipeline(pipeline.getId()); - } - - @Test - public void testFinalizePipeline() throws IOException { - Pipeline pipeline = createDummyPipeline(1); - stateManager.addPipeline(pipeline); - // finalize on ALLOCATED pipeline - stateManager.finalizePipeline(pipeline.getId()); - Assert.assertEquals(Pipeline.PipelineState.CLOSED, - stateManager.getPipeline(pipeline.getId()).getPipelineState()); - // clean up - removePipeline(pipeline); - - pipeline = createDummyPipeline(1); - stateManager.addPipeline(pipeline); - stateManager.openPipeline(pipeline.getId()); - // finalize on OPEN pipeline - stateManager.finalizePipeline(pipeline.getId()); - Assert.assertEquals(Pipeline.PipelineState.CLOSED, - stateManager.getPipeline(pipeline.getId()).getPipelineState()); - // clean up - removePipeline(pipeline); - - pipeline = createDummyPipeline(1); - stateManager.addPipeline(pipeline); - stateManager.openPipeline(pipeline.getId()); - stateManager.finalizePipeline(pipeline.getId()); - // finalize should work on already closed pipeline - stateManager.finalizePipeline(pipeline.getId()); - Assert.assertEquals(Pipeline.PipelineState.CLOSED, - stateManager.getPipeline(pipeline.getId()).getPipelineState()); - // clean up - removePipeline(pipeline); - } - - @Test - public void testOpenPipeline() throws IOException { - Pipeline pipeline = createDummyPipeline(1); - stateManager.addPipeline(pipeline); - // open on ALLOCATED pipeline - stateManager.openPipeline(pipeline.getId()); - Assert.assertEquals(Pipeline.PipelineState.OPEN, - stateManager.getPipeline(pipeline.getId()).getPipelineState()); - - stateManager.openPipeline(pipeline.getId()); - // open should work on already open pipeline - Assert.assertEquals(Pipeline.PipelineState.OPEN, - stateManager.getPipeline(pipeline.getId()).getPipelineState()); - // clean up - removePipeline(pipeline); - } - - @Test - public void testQueryPipeline() throws IOException { - Pipeline pipeline = createDummyPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, 3); - // pipeline in allocated state should not be reported - stateManager.addPipeline(pipeline); - Assert.assertEquals(0, stateManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) - .size()); - - // pipeline in open state should be reported - stateManager.openPipeline(pipeline.getId()); - Assert.assertEquals(1, stateManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) - .size()); - - Pipeline pipeline2 = createDummyPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, 3); - pipeline2 = Pipeline.newBuilder(pipeline2) - .setState(Pipeline.PipelineState.OPEN) - .build(); - // pipeline in open state should be reported - stateManager.addPipeline(pipeline2); - Assert.assertEquals(2, stateManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) - .size()); - - // pipeline in closed state should not be reported - stateManager.finalizePipeline(pipeline2.getId()); - Assert.assertEquals(1, stateManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) - .size()); - - // clean up - removePipeline(pipeline); - removePipeline(pipeline2); - } - - private void removePipeline(Pipeline pipeline) throws IOException { - stateManager.finalizePipeline(pipeline.getId()); - stateManager.removePipeline(pipeline.getId()); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java deleted file mode 100644 index 6ace90cb248..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; - -/** - * Tests for RatisPipelineUtils. - */ -public class TestRatisPipelineCreateAndDestroy { - - private static MiniOzoneCluster cluster; - private OzoneConfiguration conf = new OzoneConfiguration(); - private static PipelineManager pipelineManager; - - public void init(int numDatanodes) throws Exception { - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - GenericTestUtils.getRandomizedTempPath()); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(numDatanodes) - .setHbInterval(1000) - .setHbProcessorInterval(1000) - .build(); - cluster.waitForClusterToBeReady(); - StorageContainerManager scm = cluster.getStorageContainerManager(); - pipelineManager = scm.getPipelineManager(); - } - - @After - public void cleanup() { - cluster.shutdown(); - } - - @Test(timeout = 180000) - public void testAutomaticPipelineCreationOnPipelineDestroy() - throws Exception { - init(6); - // make sure two pipelines are created - waitForPipelines(2); - List pipelines = pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN); - for (Pipeline pipeline : pipelines) { - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); - } - // make sure two pipelines are created - waitForPipelines(2); - } - - @Test(timeout = 180000) - public void testPipelineCreationOnNodeRestart() throws Exception { - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, - 5, TimeUnit.SECONDS); - init(3); - // make sure a pipelines is created - waitForPipelines(1); - List dns = new ArrayList<>(cluster.getHddsDatanodes()); - - List pipelines = - pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - for (HddsDatanodeService dn : dns) { - cluster.shutdownHddsDatanode(dn.getDatanodeDetails()); - } - - // try creating another pipeline now - try { - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - Assert.fail("pipeline creation should fail after shutting down pipeline"); - } catch (IOException ioe) { - // As now all datanodes are shutdown, they move to stale state, there - // will be no sufficient datanodes to create the pipeline. - Assert.assertTrue(ioe instanceof InsufficientDatanodesException); - } - - // make sure pipelines is destroyed - waitForPipelines(0); - for (HddsDatanodeService dn : dns) { - cluster.restartHddsDatanode(dn.getDatanodeDetails(), false); - } - - // destroy the existing pipelines - for (Pipeline pipeline : pipelines) { - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); - } - // make sure pipelines is created after node start - pipelineManager.triggerPipelineCreation(); - waitForPipelines(1); - } - - private void waitForPipelines(int numPipelines) - throws TimeoutException, InterruptedException { - GenericTestUtils.waitFor(() -> pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) - .size() == numPipelines, 100, 40000); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java deleted file mode 100644 index 4b3d5d62b3d..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java +++ /dev/null @@ -1,207 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.commons.collections.CollectionUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** - * Test for RatisPipelineProvider. - */ -public class TestRatisPipelineProvider { - - private NodeManager nodeManager; - private PipelineProvider provider; - private PipelineStateManager stateManager; - - @Before - public void init() throws Exception { - nodeManager = new MockNodeManager(true, 10); - stateManager = new PipelineStateManager(new OzoneConfiguration()); - provider = new MockRatisPipelineProvider(nodeManager, - stateManager, new OzoneConfiguration()); - } - - private void createPipelineAndAssertions( - HddsProtos.ReplicationFactor factor) throws IOException { - Pipeline pipeline = provider.create(factor); - stateManager.addPipeline(pipeline); - Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.RATIS); - Assert.assertEquals(pipeline.getFactor(), factor); - Assert.assertEquals(pipeline.getPipelineState(), - Pipeline.PipelineState.OPEN); - Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber()); - Pipeline pipeline1 = provider.create(factor); - stateManager.addPipeline(pipeline1); - // New pipeline should not overlap with the previous created pipeline - Assert.assertTrue( - CollectionUtils.intersection(pipeline.getNodes(), pipeline1.getNodes()) - .isEmpty()); - Assert.assertEquals(pipeline1.getType(), HddsProtos.ReplicationType.RATIS); - Assert.assertEquals(pipeline1.getFactor(), factor); - Assert.assertEquals(pipeline1.getPipelineState(), - Pipeline.PipelineState.OPEN); - Assert.assertEquals(pipeline1.getNodes().size(), factor.getNumber()); - } - - @Test - public void testCreatePipelineWithFactor() throws IOException { - HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE; - Pipeline pipeline = provider.create(factor); - stateManager.addPipeline(pipeline); - Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.RATIS); - Assert.assertEquals(pipeline.getFactor(), factor); - Assert.assertEquals(pipeline.getPipelineState(), - Pipeline.PipelineState.OPEN); - Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber()); - - factor = HddsProtos.ReplicationFactor.ONE; - Pipeline pipeline1 = provider.create(factor); - stateManager.addPipeline(pipeline1); - // New pipeline should overlap with the previous created pipeline, - // and one datanode should overlap between the two types. - Assert.assertEquals( - CollectionUtils.intersection(pipeline.getNodes(), - pipeline1.getNodes()).size(), 1); - Assert.assertEquals(pipeline1.getType(), HddsProtos.ReplicationType.RATIS); - Assert.assertEquals(pipeline1.getFactor(), factor); - Assert.assertEquals(pipeline1.getPipelineState(), - Pipeline.PipelineState.OPEN); - Assert.assertEquals(pipeline1.getNodes().size(), factor.getNumber()); - } - - @Test - public void testCreatePipelineWithFactorThree() throws IOException { - createPipelineAndAssertions(HddsProtos.ReplicationFactor.THREE); - } - - @Test - public void testCreatePipelineWithFactorOne() throws IOException { - createPipelineAndAssertions(HddsProtos.ReplicationFactor.ONE); - } - - private List createListOfNodes(int nodeCount) { - List nodes = new ArrayList<>(); - for (int i = 0; i < nodeCount; i++) { - nodes.add(TestUtils.randomDatanodeDetails()); - } - return nodes; - } - - @Test - public void testCreatePipelineWithNodes() { - HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE; - Pipeline pipeline = - provider.create(factor, createListOfNodes(factor.getNumber())); - Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.RATIS); - Assert.assertEquals(pipeline.getFactor(), factor); - Assert.assertEquals( - pipeline.getPipelineState(), Pipeline.PipelineState.OPEN); - Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber()); - - factor = HddsProtos.ReplicationFactor.ONE; - pipeline = provider.create(factor, createListOfNodes(factor.getNumber())); - Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.RATIS); - Assert.assertEquals(pipeline.getFactor(), factor); - Assert.assertEquals(pipeline.getPipelineState(), - Pipeline.PipelineState.OPEN); - Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber()); - } - - @Test - public void testCreatePipelinesDnExclude() throws IOException { - - // We need 9 Healthy DNs in MockNodeManager. - NodeManager mockNodeManager = new MockNodeManager(true, 12); - PipelineStateManager stateManagerMock = - new PipelineStateManager(new OzoneConfiguration()); - PipelineProvider providerMock = new MockRatisPipelineProvider( - mockNodeManager, stateManagerMock, new OzoneConfiguration()); - - // Use up first 3 DNs for an open pipeline. - List openPiplineDns = mockNodeManager.getAllNodes() - .subList(0, 3); - HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE; - - Pipeline openPipeline = Pipeline.newBuilder() - .setType(HddsProtos.ReplicationType.RATIS) - .setFactor(factor) - .setNodes(openPiplineDns) - .setState(Pipeline.PipelineState.OPEN) - .setId(PipelineID.randomId()) - .build(); - - stateManagerMock.addPipeline(openPipeline); - - // Use up next 3 DNs also for an open pipeline. - List moreOpenPiplineDns = mockNodeManager.getAllNodes() - .subList(3, 6); - Pipeline anotherOpenPipeline = Pipeline.newBuilder() - .setType(HddsProtos.ReplicationType.RATIS) - .setFactor(factor) - .setNodes(moreOpenPiplineDns) - .setState(Pipeline.PipelineState.OPEN) - .setId(PipelineID.randomId()) - .build(); - stateManagerMock.addPipeline(anotherOpenPipeline); - - // Use up next 3 DNs also for a closed pipeline. - List closedPiplineDns = mockNodeManager.getAllNodes() - .subList(6, 9); - Pipeline anotherClosedPipeline = Pipeline.newBuilder() - .setType(HddsProtos.ReplicationType.RATIS) - .setFactor(factor) - .setNodes(closedPiplineDns) - .setState(Pipeline.PipelineState.CLOSED) - .setId(PipelineID.randomId()) - .build(); - stateManagerMock.addPipeline(anotherClosedPipeline); - - Pipeline pipeline = providerMock.create(factor); - Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.RATIS); - Assert.assertEquals(pipeline.getFactor(), factor); - Assert.assertEquals(pipeline.getPipelineState(), - Pipeline.PipelineState.OPEN); - Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber()); - List pipelineNodes = pipeline.getNodes(); - - // Pipline nodes cannot be from open pipelines. - Assert.assertTrue( - pipelineNodes.parallelStream().filter(dn -> - (openPiplineDns.contains(dn) || moreOpenPiplineDns.contains(dn))) - .count() == 0); - - // Since we have only 9 Healthy DNs, at least 1 pipeline node should have - // been from the closed pipeline DN list. - Assert.assertTrue(pipelineNodes.parallelStream().filter( - closedPiplineDns::contains).count() > 0); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java deleted file mode 100644 index 2a486b1224e..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java +++ /dev/null @@ -1,317 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; -import static org.apache.hadoop.test.MetricsAsserts.getMetrics; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -/** - * Test cases to verify PipelineManager. - */ -public class TestSCMPipelineManager { - private static MockNodeManager nodeManager; - private static File testDir; - private static Configuration conf; - - @Before - public void setUp() throws Exception { - conf = new OzoneConfiguration(); - testDir = GenericTestUtils - .getTestDir(TestSCMPipelineManager.class.getSimpleName()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - boolean folderExisted = testDir.exists() || testDir.mkdirs(); - if (!folderExisted) { - throw new IOException("Unable to create test directory path"); - } - nodeManager = new MockNodeManager(true, 20); - } - - @After - public void cleanup() { - FileUtil.fullyDelete(testDir); - } - - @Test - public void testPipelineReload() throws IOException { - SCMPipelineManager pipelineManager = - new SCMPipelineManager(conf, nodeManager, new EventQueue(), null); - PipelineProvider mockRatisProvider = - new MockRatisPipelineProvider(nodeManager, - pipelineManager.getStateManager(), conf); - pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, - mockRatisProvider); - Set pipelines = new HashSet<>(); - for (int i = 0; i < 5; i++) { - Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - pipelines.add(pipeline); - } - pipelineManager.close(); - - // new pipeline manager should be able to load the pipelines from the db - pipelineManager = - new SCMPipelineManager(conf, nodeManager, new EventQueue(), null); - mockRatisProvider = - new MockRatisPipelineProvider(nodeManager, - pipelineManager.getStateManager(), conf); - pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, - mockRatisProvider); - for (Pipeline p : pipelines) { - pipelineManager.openPipeline(p.getId()); - } - List pipelineList = - pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS); - Assert.assertEquals(pipelines, new HashSet<>(pipelineList)); - - // clean up - for (Pipeline pipeline : pipelines) { - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); - } - pipelineManager.close(); - } - - @Test - public void testRemovePipeline() throws IOException { - SCMPipelineManager pipelineManager = - new SCMPipelineManager(conf, nodeManager, new EventQueue(), null); - PipelineProvider mockRatisProvider = - new MockRatisPipelineProvider(nodeManager, - pipelineManager.getStateManager(), conf); - pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, - mockRatisProvider); - - Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - pipelineManager.openPipeline(pipeline.getId()); - pipelineManager - .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1)); - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); - pipelineManager.close(); - - // new pipeline manager should not be able to load removed pipelines - pipelineManager = - new SCMPipelineManager(conf, nodeManager, new EventQueue(), null); - try { - pipelineManager.getPipeline(pipeline.getId()); - Assert.fail("Pipeline should not have been retrieved"); - } catch (IOException e) { - Assert.assertTrue(e.getMessage().contains("not found")); - } - - // clean up - pipelineManager.close(); - } - - @Test - public void testPipelineReport() throws IOException { - EventQueue eventQueue = new EventQueue(); - SCMPipelineManager pipelineManager = - new SCMPipelineManager(conf, nodeManager, eventQueue, null); - PipelineProvider mockRatisProvider = - new MockRatisPipelineProvider(nodeManager, - pipelineManager.getStateManager(), conf); - pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, - mockRatisProvider); - - SCMSafeModeManager scmSafeModeManager = - new SCMSafeModeManager(new OzoneConfiguration(), - new ArrayList<>(), pipelineManager, eventQueue); - - // create a pipeline in allocated state with no dns yet reported - Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - Assert - .assertFalse(pipelineManager.getPipeline(pipeline.getId()).isHealthy()); - Assert - .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isOpen()); - - // get pipeline report from each dn in the pipeline - PipelineReportHandler pipelineReportHandler = - new PipelineReportHandler(scmSafeModeManager, pipelineManager, conf); - for (DatanodeDetails dn: pipeline.getNodes()) { - PipelineReportFromDatanode pipelineReportFromDatanode = - TestUtils.getPipelineReportFromDatanode(dn, pipeline.getId()); - // pipeline is not healthy until all dns report - Assert.assertFalse( - pipelineManager.getPipeline(pipeline.getId()).isHealthy()); - pipelineReportHandler - .onMessage(pipelineReportFromDatanode, new EventQueue()); - } - - // pipeline is healthy when all dns report - Assert - .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isHealthy()); - // pipeline should now move to open state - Assert - .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isOpen()); - - // close the pipeline - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); - - for (DatanodeDetails dn: pipeline.getNodes()) { - PipelineReportFromDatanode pipelineReportFromDatanode = - TestUtils.getPipelineReportFromDatanode(dn, pipeline.getId()); - // pipeline report for destroyed pipeline should be ignored - pipelineReportHandler - .onMessage(pipelineReportFromDatanode, new EventQueue()); - } - - try { - pipelineManager.getPipeline(pipeline.getId()); - Assert.fail("Pipeline should not have been retrieved"); - } catch (IOException e) { - Assert.assertTrue(e.getMessage().contains("not found")); - } - - // clean up - pipelineManager.close(); - } - - @Test - public void testPipelineCreationFailedMetric() throws Exception { - MockNodeManager nodeManagerMock = new MockNodeManager(true, - 20); - SCMPipelineManager pipelineManager = - new SCMPipelineManager(conf, nodeManagerMock, new EventQueue(), null); - PipelineProvider mockRatisProvider = - new MockRatisPipelineProvider(nodeManagerMock, - pipelineManager.getStateManager(), conf); - pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, - mockRatisProvider); - - MetricsRecordBuilder metrics = getMetrics( - SCMPipelineMetrics.class.getSimpleName()); - long numPipelineCreated = getLongCounter("NumPipelineCreated", - metrics); - Assert.assertTrue(numPipelineCreated == 0); - - // 3 DNs are unhealthy. - // Create 5 pipelines (Use up 15 Datanodes) - for (int i = 0; i < 5; i++) { - Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - Assert.assertNotNull(pipeline); - } - - metrics = getMetrics( - SCMPipelineMetrics.class.getSimpleName()); - numPipelineCreated = getLongCounter("NumPipelineCreated", metrics); - Assert.assertTrue(numPipelineCreated == 5); - - long numPipelineCreateFailed = getLongCounter( - "NumPipelineCreationFailed", metrics); - Assert.assertTrue(numPipelineCreateFailed == 0); - - //This should fail... - try { - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - Assert.fail(); - } catch (InsufficientDatanodesException idEx) { - Assert.assertEquals( - "Cannot create pipeline of factor 3 using 1 nodes.", - idEx.getMessage()); - } - - metrics = getMetrics( - SCMPipelineMetrics.class.getSimpleName()); - numPipelineCreated = getLongCounter("NumPipelineCreated", metrics); - Assert.assertTrue(numPipelineCreated == 5); - - numPipelineCreateFailed = getLongCounter( - "NumPipelineCreationFailed", metrics); - Assert.assertTrue(numPipelineCreateFailed == 0); - } - - @Test - public void testActivateDeactivatePipeline() throws IOException { - final SCMPipelineManager pipelineManager = - new SCMPipelineManager(conf, nodeManager, new EventQueue(), null); - final PipelineProvider mockRatisProvider = - new MockRatisPipelineProvider(nodeManager, - pipelineManager.getStateManager(), conf); - - pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, - mockRatisProvider); - - final Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - final PipelineID pid = pipeline.getId(); - - pipelineManager.openPipeline(pid); - pipelineManager.addContainerToPipeline(pid, ContainerID.valueof(1)); - - Assert.assertTrue(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, - Pipeline.PipelineState.OPEN).contains(pipeline)); - - Assert.assertEquals(Pipeline.PipelineState.OPEN, - pipelineManager.getPipeline(pid).getPipelineState()); - - pipelineManager.deactivatePipeline(pid); - Assert.assertEquals(Pipeline.PipelineState.DORMANT, - pipelineManager.getPipeline(pid).getPipelineState()); - - Assert.assertFalse(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, - Pipeline.PipelineState.OPEN).contains(pipeline)); - - pipelineManager.activatePipeline(pid); - - Assert.assertTrue(pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, - Pipeline.PipelineState.OPEN).contains(pipeline)); - - pipelineManager.close(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java deleted file mode 100644 index 459a67ae882..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.IOException; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.protocol.proto - .HddsProtos.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.protocol.proto - .HddsProtos.ReplicationType.RATIS; - -/** - * Test SCM restart and recovery wrt pipelines. - */ -public class TestSCMRestart { - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static Pipeline ratisPipeline1; - private static Pipeline ratisPipeline2; - private static ContainerManager containerManager; - private static ContainerManager newContainerManager; - private static PipelineManager pipelineManager; - - /** - * Create a MiniDFSCluster for testing. - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(4) - .setHbInterval(1000) - .setHbProcessorInterval(1000) - .build(); - cluster.waitForClusterToBeReady(); - StorageContainerManager scm = cluster.getStorageContainerManager(); - containerManager = scm.getContainerManager(); - pipelineManager = scm.getPipelineManager(); - ratisPipeline1 = pipelineManager.getPipeline( - containerManager.allocateContainer( - RATIS, THREE, "Owner1").getPipelineID()); - ratisPipeline2 = pipelineManager.getPipeline( - containerManager.allocateContainer( - RATIS, ONE, "Owner2").getPipelineID()); - // At this stage, there should be 2 pipeline one with 1 open container - // each. Try restarting the SCM and then discover that pipeline are in - // correct state. - cluster.restartStorageContainerManager(true); - newContainerManager = cluster.getStorageContainerManager() - .getContainerManager(); - pipelineManager = cluster.getStorageContainerManager().getPipelineManager(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testPipelineWithScmRestart() throws IOException { - // After restart make sure that the pipeline are still present - Pipeline ratisPipeline1AfterRestart = - pipelineManager.getPipeline(ratisPipeline1.getId()); - Pipeline ratisPipeline2AfterRestart = - pipelineManager.getPipeline(ratisPipeline2.getId()); - Assert.assertNotSame(ratisPipeline1AfterRestart, ratisPipeline1); - Assert.assertNotSame(ratisPipeline2AfterRestart, ratisPipeline2); - Assert.assertEquals(ratisPipeline1AfterRestart, ratisPipeline1); - Assert.assertEquals(ratisPipeline2AfterRestart, ratisPipeline2); - - // Try creating a new container, it should be from the same pipeline - // as was before restart - ContainerInfo containerInfo = newContainerManager - .allocateContainer(RATIS, THREE, "Owner1"); - Assert.assertEquals(containerInfo.getPipelineID(), ratisPipeline1.getId()); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java deleted file mode 100644 index 22fd95b0955..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** - * Test for SimplePipelineProvider. - */ -public class TestSimplePipelineProvider { - - private NodeManager nodeManager; - private PipelineProvider provider; - private PipelineStateManager stateManager; - - @Before - public void init() throws Exception { - nodeManager = new MockNodeManager(true, 10); - stateManager = new PipelineStateManager(new OzoneConfiguration()); - provider = new SimplePipelineProvider(nodeManager); - } - - @Test - public void testCreatePipelineWithFactor() throws IOException { - HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE; - Pipeline pipeline = provider.create(factor); - stateManager.addPipeline(pipeline); - Assert.assertEquals(pipeline.getType(), - HddsProtos.ReplicationType.STAND_ALONE); - Assert.assertEquals(pipeline.getFactor(), factor); - Assert.assertEquals(pipeline.getPipelineState(), - Pipeline.PipelineState.OPEN); - Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber()); - - factor = HddsProtos.ReplicationFactor.ONE; - Pipeline pipeline1 = provider.create(factor); - stateManager.addPipeline(pipeline1); - Assert.assertEquals(pipeline1.getType(), - HddsProtos.ReplicationType.STAND_ALONE); - Assert.assertEquals(pipeline1.getFactor(), factor); - Assert.assertEquals(pipeline1.getPipelineState(), - Pipeline.PipelineState.OPEN); - Assert.assertEquals(pipeline1.getNodes().size(), factor.getNumber()); - } - - private List createListOfNodes(int nodeCount) { - List nodes = new ArrayList<>(); - for (int i = 0; i < nodeCount; i++) { - nodes.add(TestUtils.randomDatanodeDetails()); - } - return nodes; - } - - @Test - public void testCreatePipelineWithNodes() throws IOException { - HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE; - Pipeline pipeline = - provider.create(factor, createListOfNodes(factor.getNumber())); - Assert.assertEquals(pipeline.getType(), - HddsProtos.ReplicationType.STAND_ALONE); - Assert.assertEquals(pipeline.getFactor(), factor); - Assert.assertEquals(pipeline.getPipelineState(), - Pipeline.PipelineState.OPEN); - Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber()); - - factor = HddsProtos.ReplicationFactor.ONE; - pipeline = provider.create(factor, createListOfNodes(factor.getNumber())); - Assert.assertEquals(pipeline.getType(), - HddsProtos.ReplicationType.STAND_ALONE); - Assert.assertEquals(pipeline.getFactor(), factor); - Assert.assertEquals(pipeline.getPipelineState(), - Pipeline.PipelineState.OPEN); - Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber()); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java deleted file mode 100644 index f685b17cd33..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Package info tests. - */ -package org.apache.hadoop.hdds.scm.pipeline; \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java deleted file mode 100644 index 7cfd555a509..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java +++ /dev/null @@ -1,202 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.safemode; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.ReplicationManager; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.util.List; -import java.util.concurrent.TimeoutException; - -import static org.junit.Assert.fail; - -/** - * This class tests SCM Safe mode with pipeline rules. - */ - -public class TestSCMSafeModeWithPipelineRules { - - private static MiniOzoneCluster cluster; - private OzoneConfiguration conf = new OzoneConfiguration(); - private PipelineManager pipelineManager; - private MiniOzoneCluster.Builder clusterBuilder; - - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - - public void setup(int numDatanodes) throws Exception { - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - temporaryFolder.newFolder().toString()); - conf.setBoolean( - HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, - true); - conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "10s"); - conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); - clusterBuilder = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(numDatanodes) - .setHbInterval(1000) - .setHbProcessorInterval(1000); - - cluster = clusterBuilder.build(); - cluster.waitForClusterToBeReady(); - StorageContainerManager scm = cluster.getStorageContainerManager(); - pipelineManager = scm.getPipelineManager(); - } - - - @Test - public void testScmSafeMode() throws Exception { - - int datanodeCount = 6; - setup(datanodeCount); - - waitForRatis3NodePipelines(datanodeCount/3); - waitForRatis1NodePipelines(datanodeCount); - - int totalPipelineCount = datanodeCount + (datanodeCount/3); - - //Cluster is started successfully - cluster.stop(); - - cluster.restartOzoneManager(); - cluster.restartStorageContainerManager(false); - - pipelineManager = cluster.getStorageContainerManager().getPipelineManager(); - List pipelineList = - pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - - - pipelineList.get(0).getNodes().forEach(datanodeDetails -> { - try { - cluster.restartHddsDatanode(datanodeDetails, false); - } catch (Exception ex) { - fail("Datanode restart failed"); - } - }); - - - SCMSafeModeManager scmSafeModeManager = - cluster.getStorageContainerManager().getScmSafeModeManager(); - - - // Ceil(0.1 * 2) is 1, as one pipeline is healthy healthy pipeline rule is - // satisfied - - GenericTestUtils.waitFor(() -> - scmSafeModeManager.getHealthyPipelineSafeModeRule() - .validate(), 1000, 60000); - - // As Ceil(0.9 * 2) is 2, and from second pipeline no datanodes's are - // reported this rule is not met yet. - GenericTestUtils.waitFor(() -> - !scmSafeModeManager.getOneReplicaPipelineSafeModeRule() - .validate(), 1000, 60000); - - Assert.assertTrue(cluster.getStorageContainerManager().isInSafeMode()); - - DatanodeDetails restartedDatanode = pipelineList.get(1).getFirstNode(); - // Now restart one datanode from the 2nd pipeline - try { - cluster.restartHddsDatanode(restartedDatanode, false); - } catch (Exception ex) { - fail("Datanode restart failed"); - } - - - GenericTestUtils.waitFor(() -> - scmSafeModeManager.getOneReplicaPipelineSafeModeRule() - .validate(), 1000, 60000); - - GenericTestUtils.waitFor(() -> !scmSafeModeManager.getInSafeMode(), 1000, - 60000); - - // As after safemode wait time is not completed, we should have total - // pipeline's as original count 6(1 node pipelines) + 2 (3 node pipeline) - Assert.assertEquals(totalPipelineCount, - pipelineManager.getPipelines().size()); - - ReplicationManager replicationManager = - cluster.getStorageContainerManager().getReplicationManager(); - - GenericTestUtils.waitFor(() -> - replicationManager.isRunning(), 1000, 60000); - - - // As 4 datanodes are reported, 4 single node pipeline and 1 3 node - // pipeline. - - waitForRatis1NodePipelines(4); - waitForRatis3NodePipelines(1); - - // Restart other datanodes in the pipeline, and after some time we should - // have same count as original. - pipelineList.get(1).getNodes().forEach(datanodeDetails -> { - try { - if (!restartedDatanode.equals(datanodeDetails)) { - cluster.restartHddsDatanode(datanodeDetails, false); - } - } catch (Exception ex) { - fail("Datanode restart failed"); - } - }); - - waitForRatis1NodePipelines(datanodeCount); - waitForRatis3NodePipelines(datanodeCount/3); - - } - - @After - public void tearDown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - - private void waitForRatis3NodePipelines(int numPipelines) - throws TimeoutException, InterruptedException { - GenericTestUtils.waitFor(() -> pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN) - .size() == numPipelines, 100, 60000); - } - - private void waitForRatis1NodePipelines(int numPipelines) - throws TimeoutException, InterruptedException { - GenericTestUtils.waitFor(() -> pipelineManager - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, Pipeline.PipelineState.OPEN) - .size() == numPipelines, 100, 60000); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java deleted file mode 100644 index 2eef206dba4..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ /dev/null @@ -1,281 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.log4j.Level; -import org.apache.ratis.grpc.client.GrpcClientProtocolClient; -import org.apache.ratis.util.LogUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; - -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.Executors; - -/** - * This class causes random failures in the chaos cluster. - */ -public class MiniOzoneChaosCluster extends MiniOzoneClusterImpl { - - static final Logger LOG = - LoggerFactory.getLogger(MiniOzoneChaosCluster.class); - - private final int numDatanodes; - private final ScheduledExecutorService executorService; - - private ScheduledFuture scheduledFuture; - - private enum FailureMode { - NODES_RESTART, - NODES_SHUTDOWN - } - - public MiniOzoneChaosCluster(OzoneConfiguration conf, - OzoneManager ozoneManager, - StorageContainerManager scm, - List hddsDatanodes) { - super(conf, ozoneManager, scm, hddsDatanodes); - - this.executorService = Executors.newSingleThreadScheduledExecutor(); - this.numDatanodes = getHddsDatanodes().size(); - LOG.info("Starting MiniOzoneChaosCluster with {} datanodes", numDatanodes); - LogUtils.setLogLevel(GrpcClientProtocolClient.LOG, Level.WARN); - } - - // Get the number of datanodes to fail in the cluster. - private int getNumberOfNodesToFail() { - return RandomUtils.nextBoolean() ? 1 : 2; - } - - // Should the failed node wait for SCM to register the even before - // restart, i.e fast restart or not. - private boolean isFastRestart() { - return RandomUtils.nextBoolean(); - } - - // Should the selected node be stopped or started. - private boolean shouldStop() { - return RandomUtils.nextBoolean(); - } - - // Get the datanode index of the datanode to fail. - private int getNodeToFail() { - return RandomUtils.nextInt() % numDatanodes; - } - - private void restartNodes() { - final int numNodesToFail = getNumberOfNodesToFail(); - LOG.info("Will restart {} nodes to simulate failure", numNodesToFail); - for (int i = 0; i < numNodesToFail; i++) { - boolean failureMode = isFastRestart(); - int failedNodeIndex = getNodeToFail(); - String failString = failureMode ? "Fast" : "Slow"; - DatanodeDetails dn = - getHddsDatanodes().get(failedNodeIndex).getDatanodeDetails(); - try { - LOG.info("{} Restarting DataNode: {}", failString, dn.getUuid()); - restartHddsDatanode(failedNodeIndex, failureMode); - LOG.info("{} Completed restarting Datanode: {}", failString, - dn.getUuid()); - } catch (Exception e) { - LOG.error("Failed to restartNodes Datanode {}", dn.getUuid(), e); - } - } - } - - private void shutdownNodes() { - final int numNodesToFail = getNumberOfNodesToFail(); - LOG.info("Will shutdown {} nodes to simulate failure", numNodesToFail); - for (int i = 0; i < numNodesToFail; i++) { - boolean shouldStop = shouldStop(); - int failedNodeIndex = getNodeToFail(); - String stopString = shouldStop ? "Stopping" : "Restarting"; - DatanodeDetails dn = - getHddsDatanodes().get(failedNodeIndex).getDatanodeDetails(); - try { - LOG.info("{} DataNode {}", stopString, dn.getUuid()); - - if (shouldStop) { - shutdownHddsDatanode(failedNodeIndex); - } else { - restartHddsDatanode(failedNodeIndex, true); - } - LOG.info("Completed {} DataNode {}", stopString, dn.getUuid()); - - } catch (Exception e) { - LOG.error("Failed {} Datanode {}", stopString, dn.getUuid(), e); - } - } - } - - private FailureMode getFailureMode() { - return FailureMode. - values()[RandomUtils.nextInt() % FailureMode.values().length]; - } - - // Fail nodes randomly at configured timeout period. - private void fail() { - FailureMode mode = getFailureMode(); - switch (mode) { - case NODES_RESTART: - restartNodes(); - break; - case NODES_SHUTDOWN: - shutdownNodes(); - break; - - default: - LOG.error("invalid failure mode:{}", mode); - break; - } - } - - void startChaos(long initialDelay, long period, TimeUnit timeUnit) { - LOG.info("Starting Chaos with failure period:{} unit:{} numDataNodes:{}", - period, timeUnit, numDatanodes); - scheduledFuture = executorService.scheduleAtFixedRate(this::fail, - initialDelay, period, timeUnit); - } - - void stopChaos() throws Exception { - if (scheduledFuture != null) { - scheduledFuture.cancel(false); - scheduledFuture.get(); - } - } - - public void shutdown() { - try { - stopChaos(); - executorService.shutdown(); - executorService.awaitTermination(1, TimeUnit.DAYS); - //this should be called after stopChaos to be sure that the - //datanode collection is not modified during the shutdown - super.shutdown(); - } catch (Exception e) { - LOG.error("failed to shutdown MiniOzoneChaosCluster", e); - } - } - - /** - * Builder for configuring the MiniOzoneChaosCluster to run. - */ - public static class Builder extends MiniOzoneClusterImpl.Builder { - - /** - * Creates a new Builder. - * - * @param conf configuration - */ - public Builder(OzoneConfiguration conf) { - super(conf); - } - - /** - * Sets the number of HddsDatanodes to be started as part of - * MiniOzoneChaosCluster. - * - * @param val number of datanodes - * - * @return MiniOzoneChaosCluster.Builder - */ - public Builder setNumDatanodes(int val) { - super.setNumDatanodes(val); - return this; - } - - @Override - void initializeConfiguration() throws IOException { - super.initializeConfiguration(); - conf.setStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, - 2, StorageUnit.KB); - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, - 16, StorageUnit.KB); - conf.setStorageSize(OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE, - 4, StorageUnit.KB); - conf.setStorageSize(OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE, - 8, StorageUnit.KB); - conf.setStorageSize(ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, - 1, StorageUnit.MB); - conf.setTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT, 1000, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, 10, - TimeUnit.SECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, 20, - TimeUnit.SECONDS); - conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1, - TimeUnit.SECONDS); - conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, 1, - TimeUnit.SECONDS); - conf.setTimeDuration( - ScmConfigKeys.OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT, 5, - TimeUnit.SECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - 1, TimeUnit.SECONDS); - conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 1, - TimeUnit.SECONDS); - conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2); - conf.setInt("hdds.scm.replication.thread.interval", 10 * 1000); - conf.setInt("hdds.scm.replication.event.timeout", 20 * 1000); - conf.setInt(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 100); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, 100); - } - - @Override - public MiniOzoneChaosCluster build() throws IOException { - DefaultMetricsSystem.setMiniClusterMode(true); - initializeConfiguration(); - StorageContainerManager scm; - OzoneManager om; - try { - scm = createSCM(); - scm.start(); - om = createOM(); - if(certClient != null) { - om.setCertClient(certClient); - } - } catch (AuthenticationException ex) { - throw new IOException("Unable to build MiniOzoneCluster. ", ex); - } - - om.start(); - final List hddsDatanodes = createHddsDatanodes(scm); - MiniOzoneChaosCluster cluster = - new MiniOzoneChaosCluster(conf, om, scm, hddsDatanodes); - if (startDataNodes) { - cluster.startHddsDatanodes(); - } - return cluster; - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java deleted file mode 100644 index 0aba9689ffb..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ /dev/null @@ -1,472 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.hdds.scm.protocolPB - .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.test.GenericTestUtils; - -import java.io.IOException; -import java.util.List; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.TimeoutException; - -/** - * Interface used for MiniOzoneClusters. - */ -public interface MiniOzoneCluster { - - /** - * Returns the Builder to construct MiniOzoneCluster. - * - * @param conf OzoneConfiguration - * - * @return MiniOzoneCluster builder - */ - static Builder newBuilder(OzoneConfiguration conf) { - return new MiniOzoneClusterImpl.Builder(conf); - } - - /** - * Returns the Builder to construct MiniOzoneHACluster. - * - * @param conf OzoneConfiguration - * - * @return MiniOzoneCluster builder - */ - static Builder newHABuilder(OzoneConfiguration conf) { - return new MiniOzoneHAClusterImpl.Builder(conf); - } - - /** - * Returns the configuration object associated with the MiniOzoneCluster. - * - * @return Configuration - */ - Configuration getConf(); - - /** - * Waits for the cluster to be ready, this call blocks till all the - * configured {@link HddsDatanodeService} registers with - * {@link StorageContainerManager}. - * - * @throws TimeoutException In case of timeout - * @throws InterruptedException In case of interrupt while waiting - */ - void waitForClusterToBeReady() throws TimeoutException, InterruptedException; - - /** - * Sets the timeout value after which - * {@link MiniOzoneCluster#waitForClusterToBeReady} times out. - * - * @param timeoutInMs timeout value in milliseconds - */ - void setWaitForClusterToBeReadyTimeout(int timeoutInMs); - - /** - * Waits/blocks till the cluster is out of safe mode. - * - * @throws TimeoutException TimeoutException In case of timeout - * @throws InterruptedException In case of interrupt while waiting - */ - void waitTobeOutOfSafeMode() throws TimeoutException, InterruptedException; - - /** - * Returns OzoneManager Service ID. - * - * @return Service ID String - */ - String getServiceId(); - - /** - * Returns {@link StorageContainerManager} associated with this - * {@link MiniOzoneCluster} instance. - * - * @return {@link StorageContainerManager} instance - */ - StorageContainerManager getStorageContainerManager(); - - /** - * Returns {@link OzoneManager} associated with this - * {@link MiniOzoneCluster} instance. - * - * @return {@link OzoneManager} instance - */ - OzoneManager getOzoneManager(); - - /** - * Returns the list of {@link HddsDatanodeService} which are part of this - * {@link MiniOzoneCluster} instance. - * - * @return List of {@link HddsDatanodeService} - */ - List getHddsDatanodes(); - - /** - * Returns an {@link OzoneClient} to access the {@link MiniOzoneCluster}. - * - * @return {@link OzoneClient} - * @throws IOException - */ - OzoneClient getClient() throws IOException; - - /** - * Returns an RPC based {@link OzoneClient} to access the - * {@link MiniOzoneCluster}. - * - * @return {@link OzoneClient} - * @throws IOException - */ - OzoneClient getRpcClient() throws IOException; - - /** - * Returns StorageContainerLocationClient to communicate with - * {@link StorageContainerManager} associated with the MiniOzoneCluster. - * - * @return StorageContainerLocation Client - * @throws IOException - */ - StorageContainerLocationProtocolClientSideTranslatorPB - getStorageContainerLocationClient() throws IOException; - - /** - * Restarts StorageContainerManager instance. - * - * @param waitForDatanode - * @throws IOException - * @throws TimeoutException - * @throws InterruptedException - */ - void restartStorageContainerManager(boolean waitForDatanode) - throws InterruptedException, TimeoutException, IOException, - AuthenticationException; - - /** - * Restarts OzoneManager instance. - * - * @throws IOException - */ - void restartOzoneManager() throws IOException; - - /** - * Restart a particular HddsDatanode. - * - * @param i index of HddsDatanode in the MiniOzoneCluster - */ - void restartHddsDatanode(int i, boolean waitForDatanode) - throws InterruptedException, TimeoutException; - - int getHddsDatanodeIndex(DatanodeDetails dn) throws IOException; - - /** - * Restart a particular HddsDatanode. - * - * @param dn HddsDatanode in the MiniOzoneCluster - */ - void restartHddsDatanode(DatanodeDetails dn, boolean waitForDatanode) - throws InterruptedException, TimeoutException, IOException; - /** - * Shutdown a particular HddsDatanode. - * - * @param i index of HddsDatanode in the MiniOzoneCluster - */ - void shutdownHddsDatanode(int i); - - /** - * Shutdown a particular HddsDatanode. - * - * @param dn HddsDatanode in the MiniOzoneCluster - */ - void shutdownHddsDatanode(DatanodeDetails dn) throws IOException; - - /** - * Shutdown the MiniOzoneCluster and delete the storage dirs. - */ - void shutdown(); - - /** - * Stop the MiniOzoneCluster without any cleanup. - */ - void stop(); - - /** - * Start Scm. - */ - void startScm() throws IOException; - - /** - * Start DataNodes. - */ - void startHddsDatanodes(); - - /** - * Shuts down all the DataNodes. - */ - void shutdownHddsDatanodes(); - - /** - * Builder class for MiniOzoneCluster. - */ - @SuppressWarnings("visibilitymodifier") - abstract class Builder { - - protected static final int DEFAULT_HB_INTERVAL_MS = 1000; - protected static final int DEFAULT_HB_PROCESSOR_INTERVAL_MS = 100; - protected static final int ACTIVE_OMS_NOT_SET = -1; - - protected final OzoneConfiguration conf; - protected String path; - - protected String clusterId; - protected String omServiceId; - protected int numOfOMs; - protected int numOfActiveOMs = ACTIVE_OMS_NOT_SET; - - protected Optional enableTrace = Optional.of(false); - protected Optional hbInterval = Optional.empty(); - protected Optional hbProcessorInterval = Optional.empty(); - protected Optional scmId = Optional.empty(); - protected Optional omId = Optional.empty(); - - protected Boolean ozoneEnabled = true; - protected Boolean randomContainerPort = true; - protected Optional chunkSize = Optional.empty(); - protected Optional streamBufferFlushSize = Optional.empty(); - protected Optional streamBufferMaxSize = Optional.empty(); - protected Optional blockSize = Optional.empty(); - protected Optional streamBufferSizeUnit = Optional.empty(); - // Use relative smaller number of handlers for testing - protected int numOfOmHandlers = 20; - protected int numOfScmHandlers = 20; - protected int numOfDatanodes = 1; - protected boolean startDataNodes = true; - protected CertificateClient certClient; - - protected Builder(OzoneConfiguration conf) { - this.conf = conf; - setClusterId(UUID.randomUUID().toString()); - } - - /** - * Sets the cluster Id. - * - * @param id cluster Id - * - * @return MiniOzoneCluster.Builder - */ - public Builder setClusterId(String id) { - clusterId = id; - path = GenericTestUtils.getTempPath( - MiniOzoneClusterImpl.class.getSimpleName() + "-" + clusterId); - return this; - } - - public Builder setStartDataNodes(boolean nodes) { - this.startDataNodes = nodes; - return this; - } - - /** - * Sets the certificate client. - * - * @param client - * - * @return MiniOzoneCluster.Builder - */ - public Builder setCertificateClient(CertificateClient client) { - this.certClient = client; - return this; - } - - /** - * Sets the SCM id. - * - * @param id SCM Id - * - * @return MiniOzoneCluster.Builder - */ - public Builder setScmId(String id) { - scmId = Optional.of(id); - return this; - } - - /** - * Sets the OM id. - * - * @param id OM Id - * - * @return MiniOzoneCluster.Builder - */ - public Builder setOmId(String id) { - omId = Optional.of(id); - return this; - } - - /** - * If set to true container service will be started in a random port. - * - * @param randomPort enable random port - * - * @return MiniOzoneCluster.Builder - */ - public Builder setRandomContainerPort(boolean randomPort) { - randomContainerPort = randomPort; - return this; - } - - /** - * Sets the number of HddsDatanodes to be started as part of - * MiniOzoneCluster. - * - * @param val number of datanodes - * - * @return MiniOzoneCluster.Builder - */ - public Builder setNumDatanodes(int val) { - numOfDatanodes = val; - return this; - } - - /** - * Sets the number of HeartBeat Interval of Datanodes, the value should be - * in MilliSeconds. - * - * @param val HeartBeat interval in milliseconds - * - * @return MiniOzoneCluster.Builder - */ - public Builder setHbInterval(int val) { - hbInterval = Optional.of(val); - return this; - } - - /** - * Sets the number of HeartBeat Processor Interval of Datanodes, - * the value should be in MilliSeconds. - * - * @param val HeartBeat Processor interval in milliseconds - * - * @return MiniOzoneCluster.Builder - */ - public Builder setHbProcessorInterval(int val) { - hbProcessorInterval = Optional.of(val); - return this; - } - - /** - * When set to true, enables trace level logging. - * - * @param trace true or false - * - * @return MiniOzoneCluster.Builder - */ - public Builder setTrace(Boolean trace) { - enableTrace = Optional.of(trace); - return this; - } - - /** - * Modifies the configuration such that Ozone will be disabled. - * - * @return MiniOzoneCluster.Builder - */ - public Builder disableOzone() { - ozoneEnabled = false; - return this; - } - - /** - * Sets the chunk size. - * - * @return MiniOzoneCluster.Builder - */ - public Builder setChunkSize(int size) { - chunkSize = Optional.of(size); - return this; - } - - /** - * Sets the flush size for stream buffer. - * - * @return MiniOzoneCluster.Builder - */ - public Builder setStreamBufferFlushSize(long size) { - streamBufferFlushSize = Optional.of(size); - return this; - } - - /** - * Sets the max size for stream buffer. - * - * @return MiniOzoneCluster.Builder - */ - public Builder setStreamBufferMaxSize(long size) { - streamBufferMaxSize = Optional.of(size); - return this; - } - - /** - * Sets the block size for stream buffer. - * - * @return MiniOzoneCluster.Builder - */ - public Builder setBlockSize(long size) { - blockSize = Optional.of(size); - return this; - } - - public Builder setNumOfOzoneManagers(int numOMs) { - this.numOfOMs = numOMs; - return this; - } - - public Builder setNumOfActiveOMs(int numActiveOMs) { - this.numOfActiveOMs = numActiveOMs; - return this; - } - - public Builder setStreamBufferSizeUnit(StorageUnit unit) { - this.streamBufferSizeUnit = Optional.of(unit); - return this; - } - - public Builder setOMServiceId(String serviceId) { - this.omServiceId = serviceId; - return this; - } - - /** - * Constructs and returns MiniOzoneCluster. - * - * @return {@link MiniOzoneCluster} - * - * @throws IOException - */ - public abstract MiniOzoneCluster build() throws IOException; - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java deleted file mode 100644 index ac76482bd90..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ /dev/null @@ -1,663 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import java.io.File; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.common.Storage.StorageState; -import org.apache.hadoop.ozone.container.common.utils.ContainerCache; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.OMStorage; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.protocolPB - .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.test.GenericTestUtils; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.slf4j.event.Level; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState - .HEALTHY; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .DFS_CONTAINER_IPC_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .DFS_CONTAINER_IPC_RANDOM_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .DFS_CONTAINER_RATIS_IPC_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .DFS_CONTAINER_RATIS_IPC_RANDOM_PORT; - -/** - * MiniOzoneCluster creates a complete in-process Ozone cluster suitable for - * running tests. The cluster consists of a OzoneManager, - * StorageContainerManager and multiple DataNodes. - */ -@InterfaceAudience.Private -public class MiniOzoneClusterImpl implements MiniOzoneCluster { - - private static final Logger LOG = - LoggerFactory.getLogger(MiniOzoneClusterImpl.class); - - private final OzoneConfiguration conf; - private StorageContainerManager scm; - private OzoneManager ozoneManager; - private final List hddsDatanodes; - - // Timeout for the cluster to be ready - private int waitForClusterToBeReadyTimeout = 60000; // 1 min - private CertificateClient caClient; - - /** - * Creates a new MiniOzoneCluster. - * - * @throws IOException if there is an I/O error - */ - MiniOzoneClusterImpl(OzoneConfiguration conf, - OzoneManager ozoneManager, - StorageContainerManager scm, - List hddsDatanodes) { - this.conf = conf; - this.ozoneManager = ozoneManager; - this.scm = scm; - this.hddsDatanodes = hddsDatanodes; - } - - /** - * Creates a new MiniOzoneCluster without the OzoneManager. This is used by - * {@link MiniOzoneHAClusterImpl} for starting multiple OzoneManagers. - * @param conf - * @param scm - * @param hddsDatanodes - */ - MiniOzoneClusterImpl(OzoneConfiguration conf, StorageContainerManager scm, - List hddsDatanodes) { - this.conf = conf; - this.scm = scm; - this.hddsDatanodes = hddsDatanodes; - } - - public OzoneConfiguration getConf() { - return conf; - } - - public String getServiceId() { - // Non-HA cluster doesn't have OM Service Id. - return null; - } - - /** - * Waits for the Ozone cluster to be ready for processing requests. - */ - @Override - public void waitForClusterToBeReady() - throws TimeoutException, InterruptedException { - GenericTestUtils.waitFor(() -> { - final int healthy = scm.getNodeCount(HEALTHY); - final boolean isReady = healthy == hddsDatanodes.size(); - LOG.info("{}. Got {} of {} DN Heartbeats.", - isReady? "Cluster is ready" : "Waiting for cluster to be ready", - healthy, hddsDatanodes.size()); - return isReady; - }, 1000, waitForClusterToBeReadyTimeout); - } - - /** - * Sets the timeout value after which - * {@link MiniOzoneClusterImpl#waitForClusterToBeReady} times out. - * - * @param timeoutInMs timeout value in milliseconds - */ - @Override - public void setWaitForClusterToBeReadyTimeout(int timeoutInMs) { - waitForClusterToBeReadyTimeout = timeoutInMs; - } - - /** - * Waits for SCM to be out of Safe Mode. Many tests can be run iff we are out - * of Safe mode. - * - * @throws TimeoutException - * @throws InterruptedException - */ - @Override - public void waitTobeOutOfSafeMode() - throws TimeoutException, InterruptedException { - GenericTestUtils.waitFor(() -> { - if (!scm.isInSafeMode()) { - return true; - } - LOG.info("Waiting for cluster to be ready. No datanodes found"); - return false; - }, 100, 1000 * 45); - } - - @Override - public StorageContainerManager getStorageContainerManager() { - return this.scm; - } - - @Override - public OzoneManager getOzoneManager() { - return this.ozoneManager; - } - - @Override - public List getHddsDatanodes() { - return hddsDatanodes; - } - - @Override - public int getHddsDatanodeIndex(DatanodeDetails dn) throws IOException { - for (HddsDatanodeService service : hddsDatanodes) { - if (service.getDatanodeDetails().equals(dn)) { - return hddsDatanodes.indexOf(service); - } - } - throw new IOException( - "Not able to find datanode with datanode Id " + dn.getUuid()); - } - - @Override - public OzoneClient getClient() throws IOException { - return OzoneClientFactory.getClient(conf); - } - - @Override - public OzoneClient getRpcClient() throws IOException { - return OzoneClientFactory.getRpcClient(conf); - } - - /** - * Returns an RPC proxy connected to this cluster's StorageContainerManager - * for accessing container location information. Callers take ownership of - * the proxy and must close it when done. - * - * @return RPC proxy for accessing container location information - * @throws IOException if there is an I/O error - */ - @Override - public StorageContainerLocationProtocolClientSideTranslatorPB - getStorageContainerLocationClient() throws IOException { - long version = RPC.getProtocolVersion( - StorageContainerLocationProtocolPB.class); - InetSocketAddress address = scm.getClientRpcAddress(); - LOG.info( - "Creating StorageContainerLocationProtocol RPC client with address {}", - address); - return new StorageContainerLocationProtocolClientSideTranslatorPB( - RPC.getProxy(StorageContainerLocationProtocolPB.class, version, - address, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))); - } - - @Override - public void restartStorageContainerManager(boolean waitForDatanode) - throws TimeoutException, InterruptedException, IOException, - AuthenticationException { - scm.stop(); - scm.join(); - scm = StorageContainerManager.createSCM(conf); - scm.start(); - if (waitForDatanode) { - waitForClusterToBeReady(); - } - } - - @Override - public void restartOzoneManager() throws IOException { - ozoneManager.stop(); - ozoneManager.restart(); - } - - @Override - public void restartHddsDatanode(int i, boolean waitForDatanode) - throws InterruptedException, TimeoutException { - HddsDatanodeService datanodeService = hddsDatanodes.get(i); - datanodeService.stop(); - datanodeService.join(); - // ensure same ports are used across restarts. - OzoneConfiguration config = datanodeService.getConf(); - int currentPort = datanodeService.getDatanodeDetails() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); - config.setInt(DFS_CONTAINER_IPC_PORT, currentPort); - config.setBoolean(DFS_CONTAINER_IPC_RANDOM_PORT, false); - int ratisPort = datanodeService.getDatanodeDetails() - .getPort(DatanodeDetails.Port.Name.RATIS).getValue(); - config.setInt(DFS_CONTAINER_RATIS_IPC_PORT, ratisPort); - config.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, false); - hddsDatanodes.remove(i); - if (waitForDatanode) { - // wait for node to be removed from SCM healthy node list. - waitForClusterToBeReady(); - } - String[] args = new String[]{}; - HddsDatanodeService service = - HddsDatanodeService.createHddsDatanodeService(args); - hddsDatanodes.add(i, service); - service.start(config); - if (waitForDatanode) { - // wait for the node to be identified as a healthy node again. - waitForClusterToBeReady(); - } - } - - @Override - public void restartHddsDatanode(DatanodeDetails dn, boolean waitForDatanode) - throws InterruptedException, TimeoutException, IOException { - restartHddsDatanode(getHddsDatanodeIndex(dn), waitForDatanode); - } - - @Override - public void shutdownHddsDatanode(int i) { - hddsDatanodes.get(i).stop(); - } - - @Override - public void shutdownHddsDatanode(DatanodeDetails dn) throws IOException { - shutdownHddsDatanode(getHddsDatanodeIndex(dn)); - } - - @Override - public void shutdown() { - try { - LOG.info("Shutting down the Mini Ozone Cluster"); - - File baseDir = new File(GenericTestUtils.getTempPath( - MiniOzoneClusterImpl.class.getSimpleName() + "-" + - scm.getClientProtocolServer().getScmInfo().getClusterId())); - stop(); - FileUtils.deleteDirectory(baseDir); - ContainerCache.getInstance(conf).shutdownCache(); - DefaultMetricsSystem.shutdown(); - } catch (IOException e) { - LOG.error("Exception while shutting down the cluster.", e); - } - } - - @Override - public void stop() { - LOG.info("Stopping the Mini Ozone Cluster"); - stopOM(ozoneManager); - stopDatanodes(hddsDatanodes); - stopSCM(scm); - } - - /** - * Start Scm. - */ - @Override - public void startScm() throws IOException { - scm.start(); - } - - /** - * Start DataNodes. - */ - @Override - public void startHddsDatanodes() { - hddsDatanodes.forEach((datanode) -> { - datanode.setCertificateClient(getCAClient()); - datanode.start(); - }); - } - - @Override - public void shutdownHddsDatanodes() { - hddsDatanodes.forEach((datanode) -> { - try { - shutdownHddsDatanode(datanode.getDatanodeDetails()); - } catch (IOException e) { - LOG.error("Exception while trying to shutdown datanodes:", e); - } - }); - } - - private CertificateClient getCAClient() { - return this.caClient; - } - - private void setCAClient(CertificateClient client) { - this.caClient = client; - } - - private static void stopDatanodes( - Collection hddsDatanodes) { - if (!hddsDatanodes.isEmpty()) { - LOG.info("Stopping the HddsDatanodes"); - hddsDatanodes.parallelStream() - .forEach(MiniOzoneClusterImpl::stopDatanode); - } - } - - private static void stopDatanode(HddsDatanodeService dn) { - if (dn != null) { - dn.stop(); - dn.join(); - } - } - - private static void stopSCM(StorageContainerManager scm) { - if (scm != null) { - LOG.info("Stopping the StorageContainerManager"); - scm.stop(); - scm.join(); - } - } - - private static void stopOM(OzoneManager om) { - if (om != null) { - LOG.info("Stopping the OzoneManager"); - om.stop(); - om.join(); - } - } - - /** - * Builder for configuring the MiniOzoneCluster to run. - */ - public static class Builder extends MiniOzoneCluster.Builder { - - /** - * Creates a new Builder. - * - * @param conf configuration - */ - public Builder(OzoneConfiguration conf) { - super(conf); - } - - @Override - public MiniOzoneCluster build() throws IOException { - DefaultMetricsSystem.setMiniClusterMode(true); - initializeConfiguration(); - StorageContainerManager scm = null; - OzoneManager om = null; - List hddsDatanodes = Collections.emptyList(); - try { - scm = createSCM(); - scm.start(); - om = createOM(); - if(certClient != null) { - om.setCertClient(certClient); - } - om.start(); - - hddsDatanodes = createHddsDatanodes(scm); - MiniOzoneClusterImpl cluster = new MiniOzoneClusterImpl(conf, om, scm, - hddsDatanodes); - cluster.setCAClient(certClient); - if (startDataNodes) { - cluster.startHddsDatanodes(); - } - return cluster; - } catch (Exception ex) { - stopOM(om); - if (startDataNodes) { - stopDatanodes(hddsDatanodes); - } - stopSCM(scm); - removeConfiguration(); - - if (ex instanceof IOException) { - throw (IOException) ex; - } - if (ex instanceof RuntimeException) { - throw (RuntimeException) ex; - } - throw new IOException("Unable to build MiniOzoneCluster. ", ex); - } - } - - /** - * Initializes the configuration required for starting MiniOzoneCluster. - * - * @throws IOException - */ - void initializeConfiguration() throws IOException { - conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, ozoneEnabled); - Path metaDir = Paths.get(path, "ozone-meta"); - Files.createDirectories(metaDir); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); - if (!chunkSize.isPresent()) { - //set it to 1MB by default in tests - chunkSize = Optional.of(1); - } - if (!streamBufferFlushSize.isPresent()) { - streamBufferFlushSize = Optional.of((long)chunkSize.get()); - } - if (!streamBufferMaxSize.isPresent()) { - streamBufferMaxSize = Optional.of(2 * streamBufferFlushSize.get()); - } - if (!blockSize.isPresent()) { - blockSize = Optional.of(2 * streamBufferMaxSize.get()); - } - - if (!streamBufferSizeUnit.isPresent()) { - streamBufferSizeUnit = Optional.of(StorageUnit.MB); - } - conf.setStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, - chunkSize.get(), streamBufferSizeUnit.get()); - conf.setStorageSize(OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE, - streamBufferFlushSize.get(), streamBufferSizeUnit.get()); - conf.setStorageSize(OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE, - streamBufferMaxSize.get(), streamBufferSizeUnit.get()); - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, blockSize.get(), - streamBufferSizeUnit.get()); - configureTrace(); - } - - void removeConfiguration() { - FileUtils.deleteQuietly(new File(path)); - } - - /** - * Creates a new StorageContainerManager instance. - * - * @return {@link StorageContainerManager} - * - * @throws IOException - */ - StorageContainerManager createSCM() - throws IOException, AuthenticationException { - configureSCM(); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - initializeScmStorage(scmStore); - return StorageContainerManager.createSCM(conf); - } - - private void initializeScmStorage(SCMStorageConfig scmStore) - throws IOException { - if (scmStore.getState() == StorageState.INITIALIZED) { - return; - } - scmStore.setClusterId(clusterId); - if (!scmId.isPresent()) { - scmId = Optional.of(UUID.randomUUID().toString()); - } - scmStore.setScmId(scmId.get()); - scmStore.initialize(); - } - - void initializeOmStorage(OMStorage omStorage) throws IOException{ - if (omStorage.getState() == StorageState.INITIALIZED) { - return; - } - omStorage.setClusterId(clusterId); - omStorage.setScmId(scmId.get()); - omStorage.setOmId(omId.orElse(UUID.randomUUID().toString())); - // Initialize ozone certificate client if security is enabled. - if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - OzoneManager.initializeSecurity(conf, omStorage); - } - omStorage.initialize(); - } - - /** - * Creates a new OzoneManager instance. - * - * @return {@link OzoneManager} - * - * @throws IOException - */ - OzoneManager createOM() - throws IOException, AuthenticationException { - configureOM(); - OMStorage omStore = new OMStorage(conf); - initializeOmStorage(omStore); - return OzoneManager.createOm(conf); - } - - /** - * Creates HddsDatanodeService(s) instance. - * - * @return List of HddsDatanodeService - * - * @throws IOException - */ - List createHddsDatanodes( - StorageContainerManager scm) throws IOException { - configureHddsDatanodes(); - String scmAddress = scm.getDatanodeRpcAddress().getHostString() + - ":" + scm.getDatanodeRpcAddress().getPort(); - String[] args = new String[] {}; - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, scmAddress); - List hddsDatanodes = new ArrayList<>(); - for (int i = 0; i < numOfDatanodes; i++) { - OzoneConfiguration dnConf = new OzoneConfiguration(conf); - String datanodeBaseDir = path + "/datanode-" + Integer.toString(i); - Path metaDir = Paths.get(datanodeBaseDir, "meta"); - Path dataDir = Paths.get(datanodeBaseDir, "data", "containers"); - Path ratisDir = Paths.get(datanodeBaseDir, "data", "ratis"); - Path wrokDir = Paths.get(datanodeBaseDir, "data", "replication", - "work"); - Files.createDirectories(metaDir); - Files.createDirectories(dataDir); - Files.createDirectories(ratisDir); - Files.createDirectories(wrokDir); - dnConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); - dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.toString()); - dnConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, - ratisDir.toString()); - dnConf.set(OzoneConfigKeys.OZONE_CONTAINER_COPY_WORKDIR, - wrokDir.toString()); - - HddsDatanodeService datanode - = HddsDatanodeService.createHddsDatanodeService(args); - datanode.setConfiguration(dnConf); - hddsDatanodes.add(datanode); - } - return hddsDatanodes; - } - - private void configureSCM() { - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numOfScmHandlers); - conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, - "3s"); - configureSCMheartbeat(); - } - - private void configureSCMheartbeat() { - if (hbInterval.isPresent()) { - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, - hbInterval.get(), TimeUnit.MILLISECONDS); - - } else { - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, - DEFAULT_HB_INTERVAL_MS, - TimeUnit.MILLISECONDS); - } - - if (hbProcessorInterval.isPresent()) { - conf.setTimeDuration( - ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - hbProcessorInterval.get(), - TimeUnit.MILLISECONDS); - } else { - conf.setTimeDuration( - ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - DEFAULT_HB_PROCESSOR_INTERVAL_MS, - TimeUnit.MILLISECONDS); - } - } - - - private void configureOM() { - conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numOfOmHandlers); - } - - private void configureHddsDatanodes() { - conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - conf.set(HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, - randomContainerPort); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, - randomContainerPort); - } - - private void configureTrace() { - if (enableTrace.isPresent()) { - conf.setBoolean(OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY, - enableTrace.get()); - GenericTestUtils.setRootLogLevel(Level.TRACE); - } - GenericTestUtils.setRootLogLevel(Level.INFO); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java deleted file mode 100644 index 006d85458e5..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ /dev/null @@ -1,339 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMStorage; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.BindException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; - -/** - * MiniOzoneHAClusterImpl creates a complete in-process Ozone cluster - * with OM HA suitable for running tests. The cluster consists of a set of - * OzoneManagers, StorageContainerManager and multiple DataNodes. - */ -public final class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl { - - private static final Logger LOG = - LoggerFactory.getLogger(MiniOzoneHAClusterImpl.class); - - private Map ozoneManagerMap; - private List ozoneManagers; - private String omServiceId; - - // Active OMs denote OMs which are up and running - private List activeOMs; - private List inactiveOMs; - - private static final Random RANDOM = new Random(); - private static final int RATIS_LEADER_ELECTION_TIMEOUT = 1000; // 1 seconds - - public static final int NODE_FAILURE_TIMEOUT = 2000; // 2 seconds - - /** - * Creates a new MiniOzoneCluster with OM HA. - * - * @throws IOException if there is an I/O error - */ - - private MiniOzoneHAClusterImpl( - OzoneConfiguration conf, - Map omMap, - List activeOMList, - List inactiveOMList, - StorageContainerManager scm, - List hddsDatanodes, - String omServiceId) { - super(conf, scm, hddsDatanodes); - this.ozoneManagerMap = omMap; - this.ozoneManagers = new ArrayList<>(omMap.values()); - this.activeOMs = activeOMList; - this.inactiveOMs = inactiveOMList; - this.omServiceId = omServiceId; - } - - @Override - public String getServiceId() { - return omServiceId; - } - - /** - * Returns the first OzoneManager from the list. - * @return - */ - @Override - public OzoneManager getOzoneManager() { - return this.ozoneManagers.get(0); - } - - @Override - public OzoneClient getRpcClient() throws IOException { - return OzoneClientFactory.getRpcClient(getServiceId(), getConf()); - } - - public boolean isOMActive(String omNodeId) { - return activeOMs.contains(ozoneManagerMap.get(omNodeId)); - } - - public OzoneManager getOzoneManager(int index) { - return this.ozoneManagers.get(index); - } - - public OzoneManager getOzoneManager(String omNodeId) { - return this.ozoneManagerMap.get(omNodeId); - } - - /** - * Start a previously inactive OM. - */ - public void startInactiveOM(String omNodeID) throws IOException { - OzoneManager ozoneManager = ozoneManagerMap.get(omNodeID); - if (!inactiveOMs.contains(ozoneManager)) { - throw new IOException("OM is already active."); - } else { - ozoneManager.start(); - activeOMs.add(ozoneManager); - inactiveOMs.remove(ozoneManager); - } - } - - @Override - public void restartOzoneManager() throws IOException { - for (OzoneManager ozoneManager : ozoneManagers) { - ozoneManager.stop(); - ozoneManager.restart(); - } - } - - @Override - public void stop() { - for (OzoneManager ozoneManager : ozoneManagers) { - if (ozoneManager != null) { - LOG.info("Stopping the OzoneManager " + ozoneManager.getOMNodeId()); - ozoneManager.stop(); - ozoneManager.join(); - } - } - super.stop(); - } - - public void stopOzoneManager(int index) { - ozoneManagers.get(index).stop(); - } - - public void stopOzoneManager(String omNodeId) { - ozoneManagerMap.get(omNodeId).stop(); - } - - /** - * Builder for configuring the MiniOzoneCluster to run. - */ - public static class Builder extends MiniOzoneClusterImpl.Builder { - - private final String nodeIdBaseStr = "omNode-"; - private List activeOMs = new ArrayList<>(); - private List inactiveOMs = new ArrayList<>(); - - /** - * Creates a new Builder. - * - * @param conf configuration - */ - public Builder(OzoneConfiguration conf) { - super(conf); - } - - @Override - public MiniOzoneCluster build() throws IOException { - if (numOfActiveOMs > numOfOMs) { - throw new IllegalArgumentException("Number of active OMs cannot be " + - "more than the total number of OMs"); - } - - // If num of ActiveOMs is not set, set it to numOfOMs. - if (numOfActiveOMs == ACTIVE_OMS_NOT_SET) { - numOfActiveOMs = numOfOMs; - } - DefaultMetricsSystem.setMiniClusterMode(true); - initializeConfiguration(); - StorageContainerManager scm; - Map omMap; - try { - scm = createSCM(); - scm.start(); - omMap = createOMService(); - } catch (AuthenticationException ex) { - throw new IOException("Unable to build MiniOzoneCluster. ", ex); - } - - final List hddsDatanodes = createHddsDatanodes(scm); - MiniOzoneHAClusterImpl cluster = new MiniOzoneHAClusterImpl( - conf, omMap, activeOMs, inactiveOMs, scm, hddsDatanodes, omServiceId); - if (startDataNodes) { - cluster.startHddsDatanodes(); - } - return cluster; - } - - /** - * Initialize OM configurations. - * @throws IOException - */ - @Override - void initializeConfiguration() throws IOException { - super.initializeConfiguration(); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numOfOmHandlers); - conf.setTimeDuration( - OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, - RATIS_LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS); - conf.setTimeDuration( - OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_KEY, - NODE_FAILURE_TIMEOUT, TimeUnit.MILLISECONDS); - conf.setInt(OMConfigKeys.OZONE_OM_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, - 10); - } - - /** - * Start OM service with multiple OMs. - * @return list of OzoneManagers - * @throws IOException - * @throws AuthenticationException - */ - private Map createOMService() throws IOException, - AuthenticationException { - - Map omMap = new HashMap<>(); - - int retryCount = 0; - int basePort = 10000; - - while (true) { - try { - basePort = 10000 + RANDOM.nextInt(1000) * 4; - initHAConfig(basePort); - - for (int i = 1; i<= numOfOMs; i++) { - // Set nodeId - String nodeId = nodeIdBaseStr + i; - OzoneConfiguration config = new OzoneConfiguration(conf); - config.set(OMConfigKeys.OZONE_OM_NODE_ID_KEY, nodeId); - // Set the OM http(s) address to null so that the cluster picks - // up the address set with service ID and node ID in initHAConfig - config.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, ""); - config.set(OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, ""); - - // Set metadata/DB dir base path - String metaDirPath = path + "/" + nodeId; - config.set(OZONE_METADATA_DIRS, metaDirPath); - OMStorage omStore = new OMStorage(config); - initializeOmStorage(omStore); - - OzoneManager om = OzoneManager.createOm(config); - om.setCertClient(certClient); - omMap.put(nodeId, om); - - if (i <= numOfActiveOMs) { - om.start(); - activeOMs.add(om); - LOG.info("Started OzoneManager RPC server at " + - om.getOmRpcServerAddr()); - } else { - inactiveOMs.add(om); - LOG.info("Intialized OzoneManager at " + om.getOmRpcServerAddr() - + ". This OM is currently inactive (not running)."); - } - } - - // Set default OM address to point to the first OM. Clients would - // try connecting to this address by default - conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, - NetUtils.getHostPortString(omMap.get(nodeIdBaseStr + 1) - .getOmRpcServerAddr())); - - break; - } catch (BindException e) { - for (OzoneManager om : omMap.values()) { - om.stop(); - om.join(); - LOG.info("Stopping OzoneManager server at " + - om.getOmRpcServerAddr()); - } - omMap.clear(); - ++retryCount; - LOG.info("MiniOzoneHACluster port conflicts, retried " + - retryCount + " times"); - } - } - return omMap; - } - - /** - * Initialize HA related configurations. - */ - private void initHAConfig(int basePort) throws IOException { - // Set configurations required for starting OM HA service - conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId); - String omNodesKey = OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId); - StringBuilder omNodesKeyValue = new StringBuilder(); - - int port = basePort; - - for (int i = 1; i <= numOfOMs; i++, port+=6) { - String omNodeId = nodeIdBaseStr + i; - omNodesKeyValue.append(",").append(omNodeId); - String omAddrKey = OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodeId); - String omHttpAddrKey = OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId); - String omHttpsAddrKey = OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId); - String omRatisPortKey = OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, omServiceId, omNodeId); - - conf.set(omAddrKey, "127.0.0.1:" + port); - conf.set(omHttpAddrKey, "127.0.0.1:" + (port + 2)); - conf.set(omHttpsAddrKey, "127.0.0.1:" + (port + 3)); - conf.setInt(omRatisPortKey, port + 4); - } - - conf.set(omNodesKey, omNodesKeyValue.substring(1)); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java deleted file mode 100644 index 6ced6d64fe3..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java +++ /dev/null @@ -1,268 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.chaos.TestProbability; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * A Simple Load generator for testing. - */ -public class MiniOzoneLoadGenerator { - - private static final Logger LOG = - LoggerFactory.getLogger(MiniOzoneLoadGenerator.class); - - private static String keyNameDelimiter = "_"; - - private ThreadPoolExecutor writeExecutor; - private int numWriteThreads; - // number of buffer to be allocated, each is allocated with length which - // is multiple of 2, each buffer is populated with random data. - private int numBuffers; - private List buffers; - - private AtomicBoolean isWriteThreadRunning; - - private final List ozoneBuckets; - - private final AtomicInteger agedFileWrittenIndex; - private final ExecutorService agedFileExecutor; - private final OzoneBucket agedLoadBucket; - private final TestProbability agedWriteProbability; - - MiniOzoneLoadGenerator(List bucket, - OzoneBucket agedLoadBucket, int numThreads, - int numBuffers) { - this.ozoneBuckets = bucket; - this.numWriteThreads = numThreads; - this.numBuffers = numBuffers; - this.writeExecutor = new ThreadPoolExecutor(numThreads, numThreads, 100, - TimeUnit.SECONDS, new ArrayBlockingQueue<>(1024), - new ThreadPoolExecutor.CallerRunsPolicy()); - this.writeExecutor.prestartAllCoreThreads(); - - this.agedFileWrittenIndex = new AtomicInteger(0); - this.agedFileExecutor = Executors.newSingleThreadExecutor(); - this.agedLoadBucket = agedLoadBucket; - this.agedWriteProbability = TestProbability.valueOf(10); - - this.isWriteThreadRunning = new AtomicBoolean(false); - - // allocate buffers and populate random data. - buffers = new ArrayList<>(); - for (int i = 0; i < numBuffers; i++) { - int size = (int) StorageUnit.KB.toBytes(1 << i); - ByteBuffer buffer = ByteBuffer.allocate(size); - buffer.put(RandomUtils.nextBytes(size)); - buffers.add(buffer); - } - } - - // Start IO load on an Ozone bucket. - private void load(long runTimeMillis) { - long threadID = Thread.currentThread().getId(); - LOG.info("Started IO Thread:{}.", threadID); - String threadName = Thread.currentThread().getName(); - long startTime = Time.monotonicNow(); - - while (isWriteThreadRunning.get() && - (Time.monotonicNow() < startTime + runTimeMillis)) { - OzoneBucket bucket = - ozoneBuckets.get((int) (Math.random() * ozoneBuckets.size())); - try { - int index = RandomUtils.nextInt(); - String keyName = writeData(index, bucket, threadName); - - readData(bucket, keyName, index); - - deleteKey(bucket, keyName); - } catch (Exception e) { - LOG.error("LOADGEN: Exiting due to exception", e); - break; - } - } - // This will terminate other threads too. - isWriteThreadRunning.set(false); - LOG.info("Terminating IO thread:{}.", threadID); - } - - - private String writeData(int keyIndex, OzoneBucket bucket, String threadName) - throws Exception { - // choose a random buffer. - ByteBuffer buffer = buffers.get(keyIndex % numBuffers); - int bufferCapacity = buffer.capacity(); - - String keyName = getKeyName(keyIndex, threadName); - LOG.trace("LOADGEN: Writing key {}", keyName); - try (OzoneOutputStream stream = bucket.createKey(keyName, - bufferCapacity, ReplicationType.RATIS, ReplicationFactor.THREE, - new HashMap<>())) { - stream.write(buffer.array()); - LOG.trace("LOADGEN: Written key {}", keyName); - } catch (Throwable t) { - LOG.error("LOADGEN: Create key:{} failed with exception, skipping", - keyName, t); - throw t; - } - - return keyName; - } - - private void readData(OzoneBucket bucket, String keyName, int index) - throws Exception { - LOG.trace("LOADGEN: Reading key {}", keyName); - - ByteBuffer buffer = buffers.get(index % numBuffers); - int bufferCapacity = buffer.capacity(); - - try (OzoneInputStream stream = bucket.readKey(keyName)) { - byte[] readBuffer = new byte[bufferCapacity]; - int readLen = stream.read(readBuffer); - - if (readLen < bufferCapacity) { - throw new IOException("Read mismatch, key:" + keyName + - " read data length:" + readLen + - " is smaller than excepted:" + bufferCapacity); - } - - if (!Arrays.equals(readBuffer, buffer.array())) { - throw new IOException("Read mismatch, key:" + keyName + - " read data does not match the written data"); - } - LOG.trace("LOADGEN: Read key {}", keyName); - } catch (Throwable t) { - LOG.error("LOADGEN: Read key:{} failed with exception", keyName, t); - throw t; - } - } - - private void deleteKey(OzoneBucket bucket, String keyName) throws Exception { - LOG.trace("LOADGEN: Deleting key {}", keyName); - try { - bucket.deleteKey(keyName); - LOG.trace("LOADGEN: Deleted key {}", keyName); - } catch (Throwable t) { - LOG.error("LOADGEN: Unable to delete key:{}", keyName, t); - throw t; - } - } - - private Optional randomKeyToRead() { - int currentIndex = agedFileWrittenIndex.get(); - return currentIndex != 0 - ? Optional.of(RandomUtils.nextInt(0, currentIndex)) - : Optional.empty(); - } - - private void startAgedFilesLoad(long runTimeMillis) { - long threadID = Thread.currentThread().getId(); - LOG.info("AGED LOADGEN: Started Aged IO Thread:{}.", threadID); - String threadName = Thread.currentThread().getName(); - long startTime = Time.monotonicNow(); - - while (isWriteThreadRunning.get() && - (Time.monotonicNow() < startTime + runTimeMillis)) { - - String keyName = null; - try { - if (agedWriteProbability.isTrue()) { - keyName = writeData(agedFileWrittenIndex.getAndIncrement(), - agedLoadBucket, threadName); - } else { - Optional index = randomKeyToRead(); - if (index.isPresent()) { - keyName = getKeyName(index.get(), threadName); - readData(agedLoadBucket, keyName, index.get()); - } - } - } catch (Throwable t) { - LOG.error("AGED LOADGEN: {} Exiting due to exception", keyName, t); - break; - } - } - // This will terminate other threads too. - isWriteThreadRunning.set(false); - LOG.info("Terminating IO thread:{}.", threadID); - } - - void startIO(long time, TimeUnit timeUnit) { - List> writeFutures = new ArrayList<>(); - LOG.info("Starting MiniOzoneLoadGenerator for time {}:{} with {} buffers " + - "and {} threads", time, timeUnit, numBuffers, numWriteThreads); - if (isWriteThreadRunning.compareAndSet(false, true)) { - // Start the IO thread - for (int i = 0; i < numWriteThreads; i++) { - writeFutures.add( - CompletableFuture.runAsync(() -> load(timeUnit.toMillis(time)), - writeExecutor)); - } - - writeFutures.add(CompletableFuture.runAsync(() -> - startAgedFilesLoad(timeUnit.toMillis(time)), agedFileExecutor)); - - // Wait for IO to complete - for (CompletableFuture f : writeFutures) { - try { - f.get(); - } catch (Throwable t) { - LOG.error("startIO failed with exception", t); - } - } - } - } - - public void shutdownLoadGenerator() { - try { - writeExecutor.shutdown(); - writeExecutor.awaitTermination(1, TimeUnit.DAYS); - } catch (Exception e) { - LOG.error("error while closing ", e); - } - } - - private static String getKeyName(int keyIndex, String threadName) { - return threadName + keyNameDelimiter + keyIndex; - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java deleted file mode 100644 index 2023e0e4cef..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.test.LambdaTestUtils.VoidCallable; - -import org.apache.ratis.util.function.CheckedConsumer; -import org.junit.Assert; - -/** - * Helper class for Tests. - */ -public final class OzoneTestUtils { - /** - * Never Constructed. - */ - private OzoneTestUtils() { - } - - /** - * Close containers which contain the blocks listed in - * omKeyLocationInfoGroups. - * - * @param omKeyLocationInfoGroups locationInfos for a key. - * @param scm StorageContainerManager instance. - * @return true if close containers is successful. - * @throws IOException - */ - public static void closeContainers( - List omKeyLocationInfoGroups, - StorageContainerManager scm) throws Exception { - performOperationOnKeyContainers((blockID) -> { - if (scm.getContainerManager() - .getContainer(ContainerID.valueof(blockID.getContainerID())) - .getState() == HddsProtos.LifeCycleState.OPEN) { - scm.getContainerManager() - .updateContainerState(ContainerID.valueof(blockID.getContainerID()), - HddsProtos.LifeCycleEvent.FINALIZE); - } - if (scm.getContainerManager() - .getContainer(ContainerID.valueof(blockID.getContainerID())) - .getState() == HddsProtos.LifeCycleState.CLOSING) { - scm.getContainerManager() - .updateContainerState(ContainerID.valueof(blockID.getContainerID()), - HddsProtos.LifeCycleEvent.CLOSE); - } - Assert.assertFalse(scm.getContainerManager() - .getContainer(ContainerID.valueof(blockID.getContainerID())) - .isOpen()); - }, omKeyLocationInfoGroups); - } - - /** - * Performs the provided consumer on containers which contain the blocks - * listed in omKeyLocationInfoGroups. - * - * @param consumer Consumer which accepts BlockID as argument. - * @param omKeyLocationInfoGroups locationInfos for a key. - * @throws IOException - */ - public static void performOperationOnKeyContainers( - CheckedConsumer consumer, - List omKeyLocationInfoGroups) throws Exception { - - for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : - omKeyLocationInfoGroups) { - List omKeyLocationInfos = - omKeyLocationInfoGroup.getLocationList(); - for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfos) { - BlockID blockID = omKeyLocationInfo.getBlockID(); - consumer.accept(blockID); - } - } - } - - public static void expectOmException( - OMException.ResultCodes code, - VoidCallable eval) - throws Exception { - try { - eval.call(); - Assert.fail("OMException is expected"); - } catch (OMException ex) { - Assert.assertEquals(code, ex.getResult()); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java deleted file mode 100644 index 4e127a39213..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import java.io.Closeable; -import java.io.IOException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.ratis.RatisHelper; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.rpc.RpcClient; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.ratis.RatisHelper.newRaftClient; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import org.apache.ratis.client.RaftClient; -import org.apache.ratis.protocol.RaftPeer; -import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.rpc.SupportedRpcType; -import org.apache.ratis.util.TimeDuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Helpers for Ratis tests. - */ -public interface RatisTestHelper { - Logger LOG = LoggerFactory.getLogger(RatisTestHelper.class); - - /** For testing Ozone with Ratis. */ - class RatisTestSuite implements Closeable { - static final RpcType RPC = SupportedRpcType.GRPC; - static final int NUM_DATANODES = 3; - - private final OzoneConfiguration conf; - private final MiniOzoneCluster cluster; - - /** - * Create a {@link MiniOzoneCluster} for testing by setting. - * OZONE_ENABLED = true - * RATIS_ENABLED = true - */ - public RatisTestSuite() - throws IOException, TimeoutException, InterruptedException { - conf = newOzoneConfiguration(RPC); - - cluster = newMiniOzoneCluster(NUM_DATANODES, conf); - } - - public OzoneConfiguration getConf() { - return conf; - } - - public MiniOzoneCluster getCluster() { - return cluster; - } - - public ClientProtocol newOzoneClient() - throws IOException { - return new RpcClient(conf, null); - } - - @Override - public void close() { - cluster.shutdown(); - } - - public int getDatanodeOzoneRestPort() { - return cluster.getHddsDatanodes().get(0).getDatanodeDetails() - .getPort(DatanodeDetails.Port.Name.REST).getValue(); - } - } - - static OzoneConfiguration newOzoneConfiguration(RpcType rpc) { - final OzoneConfiguration conf = new OzoneConfiguration(); - initRatisConf(rpc, conf); - return conf; - } - - static void initRatisConf(RpcType rpc, Configuration conf) { - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); - LOG.info(OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY - + " = " + rpc.name()); - } - - static MiniOzoneCluster newMiniOzoneCluster( - int numDatanodes, OzoneConfiguration conf) - throws IOException, TimeoutException, InterruptedException { - final MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(1000) - .setNumDatanodes(numDatanodes).build(); - cluster.waitForClusterToBeReady(); - return cluster; - } - - static void initXceiverServerRatis( - RpcType rpc, DatanodeDetails dd, Pipeline pipeline) throws IOException { - final RaftPeer p = RatisHelper.toRaftPeer(dd); - final OzoneConfiguration conf = new OzoneConfiguration(); - final int maxOutstandingRequests = - HddsClientUtils.getMaxOutstandingRequests(conf); - final TimeDuration requestTimeout = - RatisHelper.getClientRequestTimeout(conf); - final RaftClient client = - newRaftClient(rpc, p, RatisHelper.createRetryPolicy(conf), - maxOutstandingRequests, requestTimeout); - client.groupAdd(RatisHelper.newRaftGroup(pipeline), p.getId()); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java deleted file mode 100644 index 129cf0488e3..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.client.ContainerOperationClient; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - -/** - * This class tests container operations (TODO currently only supports create) - * from cblock clients. - */ -public class TestContainerOperations { - - private static ScmClient storageClient; - private static MiniOzoneCluster cluster; - private static OzoneConfiguration ozoneConf; - - @BeforeClass - public static void setup() throws Exception { - int containerSizeGB = 5; - ContainerOperationClient.setContainerSizeB( - containerSizeGB * OzoneConsts.GB); - ozoneConf = new OzoneConfiguration(); - ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); - cluster = MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(1).build(); - StorageContainerLocationProtocolClientSideTranslatorPB client = - cluster.getStorageContainerLocationClient(); - RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class, - ProtobufRpcEngine.class); - storageClient = new ContainerOperationClient( - client, new XceiverClientManager(ozoneConf)); - cluster.waitForClusterToBeReady(); - } - - @AfterClass - public static void cleanup() throws Exception { - if(cluster != null) { - cluster.shutdown(); - } - } - - /** - * A simple test to create a container with {@link ContainerOperationClient}. - * @throws Exception - */ - @Test - public void testCreate() throws Exception { - ContainerWithPipeline container = storageClient.createContainer(HddsProtos - .ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor - .ONE, "OZONE"); - assertEquals(container.getContainerInfo().getContainerID(), storageClient - .getContainer(container.getContainerInfo().getContainerID()) - .getContainerID()); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java deleted file mode 100644 index 2d2d028884a..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.container.placement.algorithms. - ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms. - SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.scm.protocolPB. - StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.IOException; - -/** - * Tests the idempotent operations in ContainerStateMachine. - */ -public class TestContainerStateMachineIdempotency { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration ozoneConfig; - private static StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - private static XceiverClientManager xceiverClientManager; - private static String containerOwner = "OZONE"; - - @BeforeClass - public static void init() throws Exception { - ozoneConfig = new OzoneConfiguration(); - ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); - cluster = - MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1).build(); - cluster.waitForClusterToBeReady(); - storageContainerLocationClient = - cluster.getStorageContainerLocationClient(); - xceiverClientManager = new XceiverClientManager(ozoneConfig); - } - - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - IOUtils.cleanupWithLogger(null, storageContainerLocationClient); - } - - @Test - public void testContainerStateMachineIdempotency() throws Exception { - ContainerWithPipeline container = storageContainerLocationClient - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, containerOwner); - long containerID = container.getContainerInfo().getContainerID(); - Pipeline pipeline = container.getPipeline(); - XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline); - try { - //create the container - ContainerProtocolCalls.createContainer(client, containerID, null); - // call create Container again - BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); - byte[] data = - RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(); - ContainerProtos.ContainerCommandRequestProto writeChunkRequest = - ContainerTestHelper - .getWriteChunkRequest(container.getPipeline(), blockID, - data.length); - client.sendCommand(writeChunkRequest); - - //Make the write chunk request again without requesting for overWrite - client.sendCommand(writeChunkRequest); - // Now, explicitly make a putKey request for the block. - ContainerProtos.ContainerCommandRequestProto putKeyRequest = - ContainerTestHelper - .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk()); - client.sendCommand(putKeyRequest).getPutBlock(); - // send the putBlock again - client.sendCommand(putKeyRequest); - - // close container call - ContainerProtocolCalls.closeContainer(client, containerID, null); - ContainerProtocolCalls.closeContainer(client, containerID, null); - } catch (IOException ioe) { - Assert.fail("Container operation failed" + ioe); - } - xceiverClientManager.releaseClient(client, false); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java deleted file mode 100644 index e27aa857258..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.HashMap; -import java.util.Scanner; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.client.BucketArgs; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.VolumeArgs; - -import org.apache.commons.lang3.RandomStringUtils; - -/** - * Utility to help to generate test data. - */ -public final class TestDataUtil { - - private TestDataUtil() { - } - - public static OzoneBucket createVolumeAndBucket(MiniOzoneCluster cluster, - String volumeName, String bucketName) throws IOException { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - - OzoneClient client = cluster.getClient(); - - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .setAdmin(adminName) - .setOwner(userName) - .build(); - - ObjectStore objectStore = client.getObjectStore(); - - objectStore.createVolume(volumeName, volumeArgs); - - OzoneVolume volume = objectStore.getVolume(volumeName); - - BucketArgs omBucketArgs = BucketArgs.newBuilder() - .setStorageType(StorageType.DISK) - .build(); - - volume.createBucket(bucketName, omBucketArgs); - return volume.getBucket(bucketName); - - } - - public static void createKey(OzoneBucket bucket, String keyName, - String content) throws IOException { - try (OutputStream stream = bucket - .createKey(keyName, content.length(), ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>())) { - stream.write(content.getBytes()); - } - } - - public static String getKey(OzoneBucket bucket, String keyName) - throws IOException { - try (InputStream stream = bucket.readKey(keyName)) { - return new Scanner(stream).useDelimiter("\\A").next(); - } - } - - public static OzoneBucket createVolumeAndBucket(MiniOzoneCluster cluster) - throws IOException { - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - return createVolumeAndBucket(cluster, volumeName, bucketName); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java deleted file mode 100644 index bb66474ed4c..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java +++ /dev/null @@ -1,131 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Test; -import picocli.CommandLine.Command; -import picocli.CommandLine.Option; -import picocli.CommandLine; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; - -/** - * Test Read Write with Mini Ozone Chaos Cluster. - */ -@Command(description = "Starts IO with MiniOzoneChaosCluster", - name = "chaos", mixinStandardHelpOptions = true) -public class TestMiniChaosOzoneCluster implements Runnable { - - @Option(names = {"-d", "--numDatanodes"}, - description = "num of datanodes") - private static int numDatanodes = 20; - - @Option(names = {"-t", "--numThreads"}, - description = "num of IO threads") - private static int numThreads = 10; - - @Option(names = {"-b", "--numBuffers"}, - description = "num of IO buffers") - private static int numBuffers = 16; - - @Option(names = {"-m", "--numMinutes"}, - description = "total run time") - private static int numMinutes = 1440; // 1 day by default - - @Option(names = {"-n", "--numClients"}, - description = "no of clients writing to OM") - private static int numClients = 3; - - @Option(names = {"-i", "--failureInterval"}, - description = "time between failure events in seconds") - private static int failureInterval = 300; // 5 second period between failures. - - private static MiniOzoneChaosCluster cluster; - private static MiniOzoneLoadGenerator loadGenerator; - - @BeforeClass - public static void init() throws Exception { - cluster = new MiniOzoneChaosCluster.Builder(new OzoneConfiguration()) - .setNumDatanodes(numDatanodes).build(); - cluster.waitForClusterToBeReady(); - - String volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); - String bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); - ObjectStore store = cluster.getRpcClient().getObjectStore(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - List ozoneBuckets = new ArrayList<>(numClients); - for (int i = 0; i < numClients; i++) { - ozoneBuckets.add(volume.getBucket(bucketName)); - } - - String agedBucketName = - RandomStringUtils.randomAlphabetic(10).toLowerCase(); - - volume.createBucket(agedBucketName); - OzoneBucket agedLoadBucket = volume.getBucket(agedBucketName); - loadGenerator = - new MiniOzoneLoadGenerator(ozoneBuckets, agedLoadBucket, numThreads, - numBuffers); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (loadGenerator != null) { - loadGenerator.shutdownLoadGenerator(); - } - - if (cluster != null) { - cluster.shutdown(); - } - } - - public void run() { - try { - init(); - cluster.startChaos(failureInterval, failureInterval, TimeUnit.SECONDS); - loadGenerator.startIO(numMinutes, TimeUnit.MINUTES); - } catch (Exception e) { - } finally { - shutdown(); - } - } - - public static void main(String... args) { - CommandLine.run(new TestMiniChaosOzoneCluster(), System.err, args); - } - - @Test - public void testReadWriteWithChaosCluster() { - cluster.startChaos(5, 10, TimeUnit.SECONDS); - loadGenerator.startIO(1, TimeUnit.MINUTES); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java deleted file mode 100644 index efc2736af71..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ /dev/null @@ -1,326 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.FileReader; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.XceiverClientGrpc; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; -import org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer; -import org.apache.hadoop.test.PathUtils; -import org.apache.hadoop.test.TestGenericTestUtils; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.yaml.snakeyaml.Yaml; - -/** - * Test cases for mini ozone cluster. - */ -public class TestMiniOzoneCluster { - - private MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - - private final static File TEST_ROOT = TestGenericTestUtils.getTestDir(); - private final static File WRITE_TMP = new File(TEST_ROOT, "write"); - private final static File READ_TMP = new File(TEST_ROOT, "read"); - - @BeforeClass - public static void setup() { - conf = new OzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, TEST_ROOT.toString()); - conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - WRITE_TMP.mkdirs(); - READ_TMP.mkdirs(); - } - - @After - public void cleanup() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @AfterClass - public static void afterClass() { - FileUtils.deleteQuietly(WRITE_TMP); - FileUtils.deleteQuietly(READ_TMP); - } - - @Test(timeout = 30000) - public void testStartMultipleDatanodes() throws Exception { - final int numberOfNodes = 3; - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(numberOfNodes) - .build(); - cluster.waitForClusterToBeReady(); - List datanodes = cluster.getHddsDatanodes(); - assertEquals(numberOfNodes, datanodes.size()); - for(HddsDatanodeService dn : datanodes) { - // Create a single member pipe line - List dns = new ArrayList<>(); - dns.add(dn.getDatanodeDetails()); - Pipeline pipeline = Pipeline.newBuilder() - .setState(Pipeline.PipelineState.OPEN) - .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(HddsProtos.ReplicationFactor.ONE) - .setNodes(dns) - .build(); - - // Verify client is able to connect to the container - try (XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf)){ - client.connect(); - assertTrue(client.isConnected(pipeline.getFirstNode())); - } - } - } - - @Test - public void testDatanodeIDPersistent() throws Exception { - // Generate IDs for testing - DatanodeDetails id1 = TestUtils.randomDatanodeDetails(); - DatanodeDetails id2 = TestUtils.randomDatanodeDetails(); - DatanodeDetails id3 = TestUtils.randomDatanodeDetails(); - id1.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 1)); - id2.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 2)); - id3.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 3)); - - // Add certificate serial id. - String certSerialId = "" + RandomUtils.nextLong(); - id1.setCertSerialId(certSerialId); - - // Write a single ID to the file and read it out - File validIdsFile = new File(WRITE_TMP, "valid-values.id"); - validIdsFile.delete(); - ContainerUtils.writeDatanodeDetailsTo(id1, validIdsFile); - // Validate using yaml parser - Yaml yaml = new Yaml(); - try { - yaml.load(new FileReader(validIdsFile)); - } catch (Exception e) { - Assert.fail("Failed parsing datanode id yaml."); - } - DatanodeDetails validId = ContainerUtils.readDatanodeDetailsFrom( - validIdsFile); - - assertEquals(validId.getCertSerialId(), certSerialId); - assertEquals(id1, validId); - assertEquals(id1.getProtoBufMessage(), validId.getProtoBufMessage()); - - // Read should return an empty value if file doesn't exist - File nonExistFile = new File(READ_TMP, "non_exist.id"); - nonExistFile.delete(); - try { - ContainerUtils.readDatanodeDetailsFrom(nonExistFile); - Assert.fail(); - } catch (Exception e) { - assertTrue(e instanceof IOException); - } - - // Read should fail if the file is malformed - File malformedFile = new File(READ_TMP, "malformed.id"); - createMalformedIDFile(malformedFile); - try { - ContainerUtils.readDatanodeDetailsFrom(malformedFile); - fail("Read a malformed ID file should fail"); - } catch (Exception e) { - assertTrue(e instanceof IOException); - } - - // Test upgrade scenario - protobuf file instead of yaml - File protoFile = new File(WRITE_TMP, "valid-proto.id"); - try (FileOutputStream out = new FileOutputStream(protoFile)) { - HddsProtos.DatanodeDetailsProto proto = id1.getProtoBufMessage(); - proto.writeTo(out); - } - validId = ContainerUtils.readDatanodeDetailsFrom(protoFile); - assertEquals(validId.getCertSerialId(), certSerialId); - assertEquals(id1, validId); - assertEquals(id1.getProtoBufMessage(), validId.getProtoBufMessage()); - } - - @Test - public void testContainerRandomPort() throws IOException { - Configuration ozoneConf = SCMTestUtils.getConf(); - File testDir = PathUtils.getTestDir(TestOzoneContainer.class); - ozoneConf.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath()); - ozoneConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - TEST_ROOT.toString()); - - // Each instance of SM will create an ozone container - // that bounds to a random port. - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, - true); - List stateMachines = new ArrayList<>(); - try { - - for (int i = 0; i < 3; i++) { - stateMachines.add(new DatanodeStateMachine( - TestUtils.randomDatanodeDetails(), ozoneConf, null, null)); - } - - //we need to start all the servers to get the fix ports - for (DatanodeStateMachine dsm : stateMachines) { - dsm.getContainer().getReadChannel().start(); - dsm.getContainer().getWriteChannel().start(); - - } - - for (DatanodeStateMachine dsm : stateMachines) { - dsm.getContainer().getWriteChannel().stop(); - dsm.getContainer().getReadChannel().stop(); - - } - - //after the start the real port numbers should be available AND unique - HashSet ports = new HashSet(); - for (DatanodeStateMachine dsm : stateMachines) { - int readPort = dsm.getContainer().getReadChannel().getIPCPort(); - - assertNotEquals("Port number of the service is not updated", 0, - readPort); - - assertTrue("Port of datanode service is conflicted with other server.", - ports.add(readPort)); - - int writePort = dsm.getContainer().getWriteChannel().getIPCPort(); - - assertNotEquals("Port number of the service is not updated", 0, - writePort); - assertTrue("Port of datanode service is conflicted with other server.", - ports.add(writePort)); - } - - } finally { - for (DatanodeStateMachine dsm : stateMachines) { - dsm.close(); - } - } - - // Turn off the random port flag and test again - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); - try ( - DatanodeStateMachine sm1 = new DatanodeStateMachine( - TestUtils.randomDatanodeDetails(), ozoneConf, null, null); - DatanodeStateMachine sm2 = new DatanodeStateMachine( - TestUtils.randomDatanodeDetails(), ozoneConf, null, null); - DatanodeStateMachine sm3 = new DatanodeStateMachine( - TestUtils.randomDatanodeDetails(), ozoneConf, null, null); - ) { - HashSet ports = new HashSet(); - assertTrue(ports.add(sm1.getContainer().getReadChannel().getIPCPort())); - assertFalse(ports.add(sm2.getContainer().getReadChannel().getIPCPort())); - assertFalse(ports.add(sm3.getContainer().getReadChannel().getIPCPort())); - assertEquals(ports.iterator().next().intValue(), - conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT)); - } - } - - private void createMalformedIDFile(File malformedFile) - throws IOException{ - malformedFile.delete(); - DatanodeDetails id = TestUtils.randomDatanodeDetails(); - ContainerUtils.writeDatanodeDetailsTo(id, malformedFile); - - FileOutputStream out = new FileOutputStream(malformedFile); - out.write("malformed".getBytes()); - out.close(); - } - - /** - * Test that a DN can register with SCM even if it was started before the SCM. - * @throws Exception - */ - @Test (timeout = 300_000) - public void testDNstartAfterSCM() throws Exception { - // Start a cluster with 1 DN - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1) - .build(); - cluster.waitForClusterToBeReady(); - - // Stop the SCM - StorageContainerManager scm = cluster.getStorageContainerManager(); - scm.stop(); - - // Restart DN - cluster.restartHddsDatanode(0, false); - - // DN should be in GETVERSION state till the SCM is restarted. - // Check DN endpoint state for 20 seconds - DatanodeStateMachine dnStateMachine = cluster.getHddsDatanodes().get(0) - .getDatanodeStateMachine(); - for (int i = 0; i < 20; i++) { - for (EndpointStateMachine endpoint : - dnStateMachine.getConnectionManager().getValues()) { - Assert.assertEquals( - EndpointStateMachine.EndPointStates.GETVERSION, - endpoint.getState()); - } - Thread.sleep(1000); - } - - // DN should successfully register with the SCM after SCM is restarted. - // Restart the SCM - cluster.restartStorageContainerManager(true); - // Wait for DN to register - cluster.waitForClusterToBeReady(); - // DN should be in HEARTBEAT state after registering with the SCM - for (EndpointStateMachine endpoint : - dnStateMachine.getConnectionManager().getValues()) { - Assert.assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT, - endpoint.getState()); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java deleted file mode 100644 index fa0e73d9354..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import org.apache.hadoop.conf.TestConfigurationFieldsBase; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.recon.ReconServerConfigKeys; -import org.apache.hadoop.ozone.s3.S3GatewayConfigKeys; - -import java.util.Arrays; - -/** - * Tests if configuration constants documented in ozone-defaults.xml. - */ -public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase { - - @Override - public void initializeMemberVariables() { - xmlFilename = "ozone-default.xml"; - configurationClasses = - new Class[] {OzoneConfigKeys.class, ScmConfigKeys.class, - OMConfigKeys.class, HddsConfigKeys.class, - ReconServerConfigKeys.class, - S3GatewayConfigKeys.class - }; - errorIfMissingConfigProps = true; - errorIfMissingXmlProps = true; - xmlPropsToSkipCompare.add("hadoop.tags.custom"); - xmlPropsToSkipCompare.add("ozone.om.nodes.EXAMPLEOMSERVICEID"); - addPropertiesNotInXml(); - } - - private void addPropertiesNotInXml() { - configurationPropsToSkipCompare.addAll(Arrays.asList( - HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA, - HddsConfigKeys.HDDS_GRPC_TLS_TEST_CERT, - HddsConfigKeys.HDDS_KEY_ALGORITHM, - HddsConfigKeys.HDDS_SECURITY_PROVIDER, - OMConfigKeys.OZONE_OM_NODES_KEY, - OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE, - OzoneConfigKeys.OZONE_S3_TOKEN_MAX_LIFETIME_KEY - )); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java deleted file mode 100644 index ca1f1793458..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ /dev/null @@ -1,896 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import java.io.File; -import java.io.IOException; -import java.net.InetAddress; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.KeyPair; -import java.security.PrivilegedExceptionAction; -import java.util.Properties; -import java.util.UUID; -import java.util.concurrent.Callable; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.minikdc.MiniKdc; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.net.ServerSocketUtil; -import org.apache.hadoop.ozone.client.CertificateClientTestImpl; -import org.apache.hadoop.ozone.common.Storage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMStorage; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.security.KerberosAuthException; -import org.apache.hadoop.security.SaslRpcServer.AuthMethod; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.apache.hadoop.test.LambdaTestUtils; -import org.bouncycastle.asn1.x500.RDN; -import org.bouncycastle.asn1.x500.X500Name; -import org.bouncycastle.asn1.x500.style.BCStyle; -import org.bouncycastle.cert.jcajce.JcaX509CertificateHolder; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.security.cert.X509Certificate; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.temporal.ChronoUnit; -import java.util.Date; - -import static junit.framework.TestCase.assertNotNull; -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_AUTH_METHOD; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_EXPIRED; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.slf4j.event.Level.INFO; - -/** - * Test class to for security enabled Ozone cluster. - */ -@InterfaceAudience.Private -public final class TestSecureOzoneCluster { - - private static final String TEST_USER = "testUgiUser@EXAMPLE.COM"; - private static final String COMPONENT = "test"; - private static final int CLIENT_TIMEOUT = 2 * 1000; - private Logger logger = LoggerFactory - .getLogger(TestSecureOzoneCluster.class); - - @Rule - public Timeout timeout = new Timeout(80000); - - private MiniKdc miniKdc; - private OzoneConfiguration conf; - private File workDir; - private static Properties securityProperties; - private File scmKeytab; - private File spnegoKeytab; - private File omKeyTab; - private File testUserKeytab; - private String curUser; - private String testUserPrincipal; - private UserGroupInformation testKerberosUgi; - private StorageContainerManager scm; - private OzoneManager om; - private String host; - - private static String clusterId; - private static String scmId; - private static String omId; - private OzoneManagerProtocolClientSideTranslatorPB omClient; - private KeyPair keyPair; - private Path metaDirPath; - @Rule - public TemporaryFolder folder= new TemporaryFolder(); - private String omCertSerialId = "9879877970576"; - - @Before - public void init() { - try { - conf = new OzoneConfiguration(); - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); - - conf.setInt(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY, ServerSocketUtil - .getPort(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT, 100)); - conf.setInt(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY, ServerSocketUtil - .getPort(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT, 100)); - conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY, - ServerSocketUtil.getPort(ScmConfigKeys - .OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100)); - conf.setInt(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY, - ServerSocketUtil.getPort(ScmConfigKeys - .OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100)); - - DefaultMetricsSystem.setMiniClusterMode(true); - final String path = folder.newFolder().toString(); - metaDirPath = Paths.get(path, "om-meta"); - conf.set(OZONE_METADATA_DIRS, metaDirPath.toString()); - conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); - conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, - KERBEROS.toString()); - - startMiniKdc(); - setSecureConfig(conf); - createCredentialsInKDC(conf, miniKdc); - generateKeyPair(conf); -// OzoneManager.setTestSecureOmFlag(true); - } catch (IOException e) { - logger.error("Failed to initialize TestSecureOzoneCluster", e); - } catch (Exception e) { - logger.error("Failed to initialize TestSecureOzoneCluster", e); - } - } - - @After - public void stop() { - try { - stopMiniKdc(); - if (scm != null) { - scm.stop(); - } - if (om != null) { - om.stop(); - } - if (omClient != null) { - omClient.close(); - } - } catch (Exception e) { - logger.error("Failed to stop TestSecureOzoneCluster", e); - } - } - - private void createCredentialsInKDC(Configuration configuration, - MiniKdc kdc) throws Exception { - createPrincipal(scmKeytab, - configuration.get(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)); - createPrincipal(spnegoKeytab, - configuration.get(ScmConfigKeys - .HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY)); - createPrincipal(testUserKeytab, testUserPrincipal); - createPrincipal(omKeyTab, - configuration.get(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY)); - } - - private void createPrincipal(File keytab, String... principal) - throws Exception { - miniKdc.createPrincipal(keytab, principal); - } - - private void startMiniKdc() throws Exception { - workDir = GenericTestUtils - .getTestDir(TestSecureOzoneCluster.class.getSimpleName()); - securityProperties = MiniKdc.createConf(); - miniKdc = new MiniKdc(securityProperties, workDir); - miniKdc.start(); - } - - private void stopMiniKdc() { - miniKdc.stop(); - } - - private void setSecureConfig(Configuration configuration) throws IOException { - configuration.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); - configuration.setBoolean(OZONE_ENABLED, true); - host = InetAddress.getLocalHost().getCanonicalHostName() - .toLowerCase(); - String realm = miniKdc.getRealm(); - curUser = UserGroupInformation.getCurrentUser() - .getUserName(); - configuration.set( - CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, - "kerberos"); - configuration.set(OZONE_ADMINISTRATORS, curUser); - - configuration.set(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY, - "scm/" + host + "@" + realm); - configuration.set(ScmConfigKeys.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY, - "HTTP_SCM/" + host + "@" + realm); - - configuration.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY, - "om/" + host + "@" + realm); - configuration.set(OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY, - "HTTP_OM/" + host + "@" + realm); - - scmKeytab = new File(workDir, "scm.keytab"); - spnegoKeytab = new File(workDir, "http.keytab"); - omKeyTab = new File(workDir, "om.keytab"); - testUserKeytab = new File(workDir, "testuser.keytab"); - testUserPrincipal = "test@" + realm; - - configuration.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, - scmKeytab.getAbsolutePath()); - configuration.set( - ScmConfigKeys.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY, - spnegoKeytab.getAbsolutePath()); - configuration.set(OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY, - omKeyTab.getAbsolutePath()); - conf.set(OMConfigKeys.OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE, - spnegoKeytab.getAbsolutePath()); - } - - @Test - public void testSecureScmStartupSuccess() throws Exception { - - initSCM(); - scm = StorageContainerManager.createSCM(conf); - //Reads the SCM Info from SCM instance - ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); - Assert.assertEquals(clusterId, scmInfo.getClusterId()); - Assert.assertEquals(scmId, scmInfo.getScmId()); - } - - @Test - public void testSCMSecurityProtocol() throws Exception { - - initSCM(); - scm = HddsTestUtils.getScm(conf); - //Reads the SCM Info from SCM instance - try { - scm.start(); - - // Case 1: User with Kerberos credentials should succeed. - UserGroupInformation ugi = - UserGroupInformation.loginUserFromKeytabAndReturnUGI( - testUserPrincipal, testUserKeytab.getCanonicalPath()); - ugi.setAuthenticationMethod(KERBEROS); - SCMSecurityProtocol scmSecurityProtocolClient = - HddsClientUtils.getScmSecurityClient(conf, ugi); - assertNotNull(scmSecurityProtocolClient); - String caCert = scmSecurityProtocolClient.getCACertificate(); - LambdaTestUtils.intercept(RemoteException.class, "Certificate not found", - () -> scmSecurityProtocolClient.getCertificate("1")); - assertNotNull(caCert); - - // Case 2: User without Kerberos credentials should fail. - ugi = UserGroupInformation.createRemoteUser("test"); - ugi.setAuthenticationMethod(AuthMethod.TOKEN); - SCMSecurityProtocol finalScmSecurityProtocolClient = - HddsClientUtils.getScmSecurityClient(conf, ugi); - - LambdaTestUtils.intercept(IOException.class, "Client cannot" + - " authenticate via:[KERBEROS]", - () -> finalScmSecurityProtocolClient.getCACertificate()); - LambdaTestUtils.intercept(IOException.class, "Client cannot" + - " authenticate via:[KERBEROS]", - () -> finalScmSecurityProtocolClient.getCertificate("1")); - } finally { - if (scm != null) { - scm.stop(); - } - } - } - - private void initSCM() - throws IOException, AuthenticationException { - - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); - - final String path = folder.newFolder().toString(); - Path scmPath = Paths.get(path, "scm-meta"); - File temp = scmPath.toFile(); - if(!temp.exists()) { - temp.mkdirs(); - } - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - scmStore.setClusterId(clusterId); - scmStore.setScmId(scmId); - // writes the version file properties - scmStore.initialize(); - } - - @Test - public void testSecureScmStartupFailure() throws Exception { - initSCM(); - conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, ""); - conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, - "kerberos"); - - LambdaTestUtils.intercept(IOException.class, - "Running in secure mode, but config doesn't have a keytab", - () -> { - StorageContainerManager.createSCM(conf); - }); - - conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY, - "scm/_HOST@EXAMPLE.com"); - conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, - "/etc/security/keytabs/scm.keytab"); - - testCommonKerberosFailures( - () -> StorageContainerManager.createSCM(conf)); - - } - - private void testCommonKerberosFailures(Callable callable) throws Exception { - LambdaTestUtils.intercept(KerberosAuthException.class, "failure " - + "to login: for principal:", callable); - conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, - "OAuth2"); - - LambdaTestUtils.intercept(IllegalArgumentException.class, "Invalid" - + " attribute value for hadoop.security.authentication of OAuth2", - callable); - - conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, - "KERBEROS_SSL"); - LambdaTestUtils.intercept(AuthenticationException.class, - "KERBEROS_SSL authentication method not", - callable); - } - - /** - * Tests the secure om Initialization Failure. - * - * @throws IOException - */ - @Test - public void testSecureOMInitializationFailure() throws Exception { - initSCM(); - // Create a secure SCM instance as om client will connect to it - scm = StorageContainerManager.createSCM(conf); - setupOm(conf); - conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY, - "non-existent-user@EXAMPLE.com"); - testCommonKerberosFailures(() -> OzoneManager.createOm(conf)); - } - - /** - * Tests the secure om Initialization success. - * - * @throws IOException - */ - @Test - public void testSecureOmInitializationSuccess() throws Exception { - initSCM(); - // Create a secure SCM instance as om client will connect to it - scm = StorageContainerManager.createSCM(conf); - LogCapturer logs = LogCapturer.captureLogs(OzoneManager.LOG); - GenericTestUtils.setLogLevel(OzoneManager.LOG, INFO); - - setupOm(conf); - try { - om.start(); - } catch (Exception ex) { - // Expects timeout failure from scmClient in om but om user login via - // kerberos should succeed. - assertTrue(logs.getOutput().contains("Ozone Manager login" - + " successful")); - } - } - - /** - * Performs following tests for delegation token. - * 1. Get valid delegation token - * 2. Test successful token renewal. - * 3. Client can authenticate using token. - * 4. Delegation token renewal without Kerberos auth fails. - * 5. Test success of token cancellation. - * 5. Test failure of token cancellation. - * - * @throws Exception - */ - @Test - public void testDelegationToken() throws Exception { - - // Capture logs for assertions - LogCapturer logs = LogCapturer.captureLogs(Server.AUDITLOG); - LogCapturer omLogs = LogCapturer.captureLogs(OzoneManager.getLogger()); - GenericTestUtils - .setLogLevel(LoggerFactory.getLogger(Server.class.getName()), INFO); - - // Setup secure OM for start - setupOm(conf); - long omVersion = - RPC.getProtocolVersion(OzoneManagerProtocolPB.class); - try { - // Start OM - om.setCertClient(new CertificateClientTestImpl(conf)); - om.start(); - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - String username = ugi.getUserName(); - - // Get first OM client which will authenticate via Kerberos - omClient = new OzoneManagerProtocolClientSideTranslatorPB( - RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, - OmUtils.getOmAddress(conf), ugi, conf, - NetUtils.getDefaultSocketFactory(conf), - CLIENT_TIMEOUT), RandomStringUtils.randomAscii(5)); - - // Assert if auth was successful via Kerberos - assertFalse(logs.getOutput().contains( - "Auth successful for " + username + " (auth:KERBEROS)")); - - // Case 1: Test successful delegation token. - Token token = omClient - .getDelegationToken(new Text("om")); - - // Case 2: Test successful token renewal. - long renewalTime = omClient.renewDelegationToken(token); - assertTrue(renewalTime > 0); - - // Check if token is of right kind and renewer is running om instance - Assert.assertEquals(token.getKind().toString(), "OzoneToken"); - Assert.assertEquals(token.getService().toString(), - OmUtils.getOmRpcAddress(conf)); - omClient.close(); - - // Create a remote ugi and set its authentication method to Token - UserGroupInformation testUser = UserGroupInformation - .createRemoteUser(TEST_USER); - testUser.addToken(token); - testUser.setAuthenticationMethod(AuthMethod.TOKEN); - UserGroupInformation.setLoginUser(testUser); - - // Get Om client, this time authentication should happen via Token - testUser.doAs(new PrivilegedExceptionAction() { - @Override - public Void run() throws Exception { - omClient = new OzoneManagerProtocolClientSideTranslatorPB( - RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, - OmUtils.getOmAddress(conf), testUser, conf, - NetUtils.getDefaultSocketFactory(conf), CLIENT_TIMEOUT), - RandomStringUtils.randomAscii(5)); - return null; - } - }); - - // Case 3: Test Client can authenticate using token. - assertFalse(logs.getOutput().contains( - "Auth successful for " + username + " (auth:TOKEN)")); - OzoneTestUtils.expectOmException(VOLUME_NOT_FOUND, - () -> omClient.deleteVolume("vol1")); - assertTrue(logs.getOutput().contains("Auth successful for " - + username + " (auth:TOKEN)")); - - // Case 4: Test failure of token renewal. - // Call to renewDelegationToken will fail but it will confirm that - // initial connection via DT succeeded - omLogs.clearOutput(); - - LambdaTestUtils.intercept(OMException.class, "INVALID_AUTH_METHOD", - () -> { - try { - omClient.renewDelegationToken(token); - } catch (OMException ex) { - assertTrue(ex.getResult().equals(INVALID_AUTH_METHOD)); - throw ex; - } - }); - assertTrue(logs.getOutput().contains( - "Auth successful for " + username + " (auth:TOKEN)")); - omLogs.clearOutput(); - //testUser.setAuthenticationMethod(AuthMethod.KERBEROS); - UserGroupInformation.setLoginUser(ugi); - omClient = new OzoneManagerProtocolClientSideTranslatorPB( - RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, - OmUtils.getOmAddress(conf), ugi, conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf)), RandomStringUtils.randomAscii(5)); - - // Case 5: Test success of token cancellation. - omClient.cancelDelegationToken(token); - omClient.close(); - - // Wait for client to timeout - Thread.sleep(CLIENT_TIMEOUT); - - assertFalse(logs.getOutput().contains("Auth failed for")); - - // Case 6: Test failure of token cancellation. - // Get Om client, this time authentication using Token will fail as - // token is not in cache anymore. - omClient = new OzoneManagerProtocolClientSideTranslatorPB( - RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, - OmUtils.getOmAddress(conf), testUser, conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf)), RandomStringUtils.randomAscii(5)); - LambdaTestUtils.intercept(OMException.class, "Cancel delegation " + - "token failed", - () -> { - try { - omClient.cancelDelegationToken(token); - } catch (OMException ex) { - assertTrue(ex.getResult().equals(TOKEN_ERROR_OTHER)); - throw ex; - } - }); - - assertTrue(logs.getOutput().contains("Auth failed for")); - } finally { - om.stop(); - om.join(); - } - } - - private void generateKeyPair(OzoneConfiguration config) throws Exception { - HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(conf); - keyPair = keyGenerator.generateKey(); - KeyCodec pemWriter = new KeyCodec(new SecurityConfig(config), COMPONENT); - pemWriter.writeKey(keyPair, true); - } - - /** - * Tests delegation token renewal. - * - * @throws Exception - */ - @Test - public void testDelegationTokenRenewal() throws Exception { - GenericTestUtils - .setLogLevel(LoggerFactory.getLogger(Server.class.getName()), INFO); - LogCapturer omLogs = LogCapturer.captureLogs(OzoneManager.getLogger()); - - // Setup secure OM for start. - OzoneConfiguration newConf = new OzoneConfiguration(conf); - newConf.setLong(OMConfigKeys.DELEGATION_TOKEN_MAX_LIFETIME_KEY, 500); - setupOm(newConf); - long omVersion = - RPC.getProtocolVersion(OzoneManagerProtocolPB.class); - OzoneManager.setTestSecureOmFlag(true); - // Start OM - - try { - om.setCertClient(new CertificateClientTestImpl(conf)); - om.start(); - - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - - // Get first OM client which will authenticate via Kerberos - omClient = new OzoneManagerProtocolClientSideTranslatorPB(RPC.getProxy( - OzoneManagerProtocolPB.class, omVersion, OmUtils.getOmAddress(conf), - ugi, conf, NetUtils.getDefaultSocketFactory(conf), - CLIENT_TIMEOUT), RandomStringUtils.randomAscii(5)); - - // Since client is already connected get a delegation token - Token token = omClient.getDelegationToken( - new Text("om")); - - // Check if token is of right kind and renewer is running om instance - Assert.assertEquals(token.getKind().toString(), "OzoneToken"); - Assert.assertEquals(token.getService().toString(), OmUtils - .getOmRpcAddress(conf)); - - // Renew delegation token - long expiryTime = omClient.renewDelegationToken(token); - assertTrue(expiryTime > 0); - omLogs.clearOutput(); - - // Test failure of delegation renewal - // 1. When token maxExpiryTime exceeds - Thread.sleep(500); - LambdaTestUtils.intercept(OMException.class, - "TOKEN_EXPIRED", - () -> { - try { - omClient.renewDelegationToken(token); - } catch (OMException ex) { - assertTrue(ex.getResult().equals(TOKEN_EXPIRED)); - throw ex; - } - }); - - omLogs.clearOutput(); - - // 2. When renewer doesn't match (implicitly covers when renewer is - // null or empty ) - Token token2 = omClient.getDelegationToken(new Text("randomService")); - LambdaTestUtils.intercept(OMException.class, - "Delegation token renewal failed", - () -> omClient.renewDelegationToken(token2)); - assertTrue(omLogs.getOutput().contains(" with non-matching " + - "renewer randomService")); - omLogs.clearOutput(); - - // 3. Test tampered token - OzoneTokenIdentifier tokenId = OzoneTokenIdentifier.readProtoBuf( - token.getIdentifier()); - tokenId.setRenewer(new Text("om")); - tokenId.setMaxDate(System.currentTimeMillis() * 2); - Token tamperedToken = new Token<>( - tokenId.getBytes(), token2.getPassword(), token2.getKind(), - token2.getService()); - LambdaTestUtils.intercept(OMException.class, - "Delegation token renewal failed", - () -> omClient.renewDelegationToken(tamperedToken)); - assertTrue(omLogs.getOutput().contains("can't be found in " + - "cache")); - omLogs.clearOutput(); - - } finally { - om.stop(); - om.join(); - } - } - - private void setupOm(OzoneConfiguration config) throws Exception { - OMStorage omStore = new OMStorage(config); - omStore.setClusterId("testClusterId"); - omStore.setScmId("testScmId"); - omStore.setOmCertSerialId(omCertSerialId); - // writes the version file properties - omStore.initialize(); - OzoneManager.setTestSecureOmFlag(true); - om = OzoneManager.createOm(config); - } - - @Test - public void testGetS3Secret() throws Exception { - - // Setup secure OM for start - setupOm(conf); - long omVersion = - RPC.getProtocolVersion(OzoneManagerProtocolPB.class); - try { - // Start OM - om.setCertClient(new CertificateClientTestImpl(conf)); - om.start(); - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - String username = ugi.getUserName(); - - // Get first OM client which will authenticate via Kerberos - omClient = new OzoneManagerProtocolClientSideTranslatorPB( - RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, - OmUtils.getOmAddress(conf), ugi, conf, - NetUtils.getDefaultSocketFactory(conf), - CLIENT_TIMEOUT), RandomStringUtils.randomAscii(5)); - - //Creates a secret since it does not exist - S3SecretValue firstAttempt = omClient - .getS3Secret(UserGroupInformation.getCurrentUser().getUserName()); - - //Fetches the secret from db since it was created in previous step - S3SecretValue secondAttempt = omClient - .getS3Secret(UserGroupInformation.getCurrentUser().getUserName()); - - //secret fetched on both attempts must be same - assertTrue(firstAttempt.getAwsSecret() - .equals(secondAttempt.getAwsSecret())); - - //access key fetched on both attempts must be same - assertTrue(firstAttempt.getAwsAccessKey() - .equals(secondAttempt.getAwsAccessKey())); - - - try { - omClient.getS3Secret("HADOOP/JOHNDOE"); - fail("testGetS3Secret failed"); - } catch (IOException ex) { - GenericTestUtils.assertExceptionContains("USER_MISMATCH", ex); - } - } finally { - if(om != null){ - om.stop(); - } - } - } - - /** - * Tests functionality to init secure OM when it is already initialized. - */ - @Test - public void testSecureOmReInit() throws Exception { - LogCapturer omLogs = - LogCapturer.captureLogs(OzoneManager.getLogger()); - omLogs.clearOutput(); - - /** - * As all these processes run inside the same JVM, there are issues around - * the Hadoop UGI if different processes run with different principals. - * In this test, the OM has to contact the SCM to download certs. SCM runs - * as scm/host@REALM, but the OM logs in as om/host@REALM, and then the test - * fails, and the OM is unable to contact the SCM due to kerberos login - * issues. To work around that, have the OM run as the same principal as the - * SCM, and then the test passes. - * - * TODO: Need to look into this further to see if there is a better way to - * address this problem. - */ - String realm = miniKdc.getRealm(); - conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY, - "scm/" + host + "@" + realm); - omKeyTab = new File(workDir, "scm.keytab"); - conf.set(OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY, - omKeyTab.getAbsolutePath()); - - initSCM(); - try { - scm = HddsTestUtils.getScm(conf); - scm.start(); - conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, false); - OMStorage omStore = new OMStorage(conf); - initializeOmStorage(omStore); - OzoneManager.setTestSecureOmFlag(true); - om = OzoneManager.createOm(conf); - - assertNull(om.getCertificateClient()); - assertFalse(omLogs.getOutput().contains("Init response: GETCERT")); - assertFalse(omLogs.getOutput().contains("Successfully stored " + - "SCM signed certificate")); - - conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); - OzoneManager.omInit(conf); - om.stop(); - om = OzoneManager.createOm(conf); - - Assert.assertNotNull(om.getCertificateClient()); - Assert.assertNotNull(om.getCertificateClient().getPublicKey()); - Assert.assertNotNull(om.getCertificateClient().getPrivateKey()); - Assert.assertNotNull(om.getCertificateClient().getCertificate()); - assertTrue(omLogs.getOutput().contains("Init response: GETCERT")); - assertTrue(omLogs.getOutput().contains("Successfully stored " + - "SCM signed certificate")); - X509Certificate certificate = om.getCertificateClient().getCertificate(); - validateCertificate(certificate); - - } finally { - if (scm != null) { - scm.stop(); - } - } - - } - - /** - * Test functionality to get SCM signed certificate for OM. - */ - @Test - public void testSecureOmInitSuccess() throws Exception { - LogCapturer omLogs = - LogCapturer.captureLogs(OzoneManager.getLogger()); - omLogs.clearOutput(); - initSCM(); - try { - scm = HddsTestUtils.getScm(conf); - scm.start(); - - OMStorage omStore = new OMStorage(conf); - initializeOmStorage(omStore); - OzoneManager.setTestSecureOmFlag(true); - om = OzoneManager.createOm(conf); - - Assert.assertNotNull(om.getCertificateClient()); - Assert.assertNotNull(om.getCertificateClient().getPublicKey()); - Assert.assertNotNull(om.getCertificateClient().getPrivateKey()); - Assert.assertNotNull(om.getCertificateClient().getCertificate()); - assertTrue(omLogs.getOutput().contains("Init response: GETCERT")); - assertTrue(omLogs.getOutput().contains("Successfully stored " + - "SCM signed certificate")); - X509Certificate certificate = om.getCertificateClient().getCertificate(); - validateCertificate(certificate); - String pemEncodedCACert = - scm.getSecurityProtocolServer().getCACertificate(); - X509Certificate caCert = CertificateCodec.getX509Cert(pemEncodedCACert); - X509Certificate caCertStored = om.getCertificateClient() - .getCertificate(caCert.getSerialNumber().toString()); - assertEquals(caCert, caCertStored); - } finally { - if (scm != null) { - scm.stop(); - } - if (om != null) { - om.stop(); - } - - } - - } - - public void validateCertificate(X509Certificate cert) throws Exception { - - // Assert that we indeed have a self signed certificate. - X500Name x500Issuer = new JcaX509CertificateHolder(cert).getIssuer(); - RDN cn = x500Issuer.getRDNs(BCStyle.CN)[0]; - String hostName = InetAddress.getLocalHost().getHostName(); - String scmUser = "scm@" + hostName; - Assert.assertEquals(scmUser, cn.getFirst().getValue().toString()); - - // Subject name should be om login user in real world but in this test - // UGI has scm user context. - Assert.assertEquals(scmUser, cn.getFirst().getValue().toString()); - - LocalDate today = LocalDateTime.now().toLocalDate(); - Date invalidDate; - - // Make sure the end date is honored. - invalidDate = java.sql.Date.valueOf(today.plus(1, ChronoUnit.DAYS)); - assertTrue(cert.getNotAfter().after(invalidDate)); - - invalidDate = java.sql.Date.valueOf(today.plus(400, ChronoUnit.DAYS)); - assertTrue(cert.getNotAfter().before(invalidDate)); - - assertTrue(cert.getSubjectDN().toString().contains(scmId)); - assertTrue(cert.getSubjectDN().toString().contains(clusterId)); - - assertTrue(cert.getIssuerDN().toString().contains(scmUser)); - assertTrue(cert.getIssuerDN().toString().contains(scmId)); - assertTrue(cert.getIssuerDN().toString().contains(clusterId)); - - // Verify that certificate matches the public key. - String encodedKey1 = cert.getPublicKey().toString(); - String encodedKey2 = om.getCertificateClient().getPublicKey().toString(); - Assert.assertEquals(encodedKey1, encodedKey2); - } - - private void initializeOmStorage(OMStorage omStorage) throws IOException { - if (omStorage.getState() == Storage.StorageState.INITIALIZED) { - return; - } - omStorage.setClusterId(clusterId); - omStorage.setScmId(scmId); - omStorage.setOmId(omId); - // Initialize ozone certificate client if security is enabled. - if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - OzoneManager.initializeSecurity(conf, omStorage); - } - omStorage.initialize(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java deleted file mode 100644 index ba072f81e9f..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java +++ /dev/null @@ -1,656 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone; - -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic - .NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.junit.Assert.fail; -import static org.mockito.Matchers.argThat; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.io.File; -import java.io.IOException; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.block.DeletedBlockLog; -import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; -import org.apache.hadoop.hdds.scm.container.ReplicationManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.node.DatanodeInfo; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.net.DNSToSwitchMapping; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.net.StaticMapping; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.HddsVersionInfo; -import org.junit.Assert; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.Timeout; -import org.mockito.ArgumentMatcher; -import org.mockito.Mockito; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; - -/** - * Test class that exercises the StorageContainerManager. - */ -public class TestStorageContainerManager { - private static XceiverClientManager xceiverClientManager; - private static final Logger LOG = LoggerFactory.getLogger( - TestStorageContainerManager.class); - - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @Rule - public ExpectedException exception = ExpectedException.none(); - - @Rule - public TemporaryFolder folder= new TemporaryFolder(); - - @BeforeClass - public static void setup() throws IOException { - xceiverClientManager = new XceiverClientManager(new OzoneConfiguration()); - } - - @AfterClass - public static void cleanup() { - if (xceiverClientManager != null) { - xceiverClientManager.close(); - } - } - - @Test - public void testRpcPermission() throws Exception { - // Test with default configuration - OzoneConfiguration defaultConf = new OzoneConfiguration(); - testRpcPermissionWithConf(defaultConf, "unknownUser", true); - - // Test with ozone.administrators defined in configuration - OzoneConfiguration ozoneConf = new OzoneConfiguration(); - ozoneConf.setStrings(OzoneConfigKeys.OZONE_ADMINISTRATORS, - "adminUser1, adminUser2"); - // Non-admin user will get permission denied. - testRpcPermissionWithConf(ozoneConf, "unknownUser", true); - // Admin user will pass the permission check. - testRpcPermissionWithConf(ozoneConf, "adminUser2", false); - } - - private void testRpcPermissionWithConf( - OzoneConfiguration ozoneConf, String fakeRemoteUsername, - boolean expectPermissionDenied) throws Exception { - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(ozoneConf).build(); - cluster.waitForClusterToBeReady(); - try { - - SCMClientProtocolServer mockClientServer = Mockito.spy( - cluster.getStorageContainerManager().getClientProtocolServer()); - when(mockClientServer.getRpcRemoteUsername()) - .thenReturn(fakeRemoteUsername); - - try { - mockClientServer.deleteContainer( - ContainerTestHelper.getTestContainerID()); - fail("Operation should fail, expecting an IOException here."); - } catch (Exception e) { - if (expectPermissionDenied) { - verifyPermissionDeniedException(e, fakeRemoteUsername); - } else { - // If passes permission check, it should fail with - // container not exist exception. - Assert.assertTrue(e.getMessage() - .contains("container doesn't exist")); - } - } - - try { - ContainerWithPipeline container2 = mockClientServer - .allocateContainer(xceiverClientManager.getType(), - HddsProtos.ReplicationFactor.ONE, "OZONE"); - if (expectPermissionDenied) { - fail("Operation should fail, expecting an IOException here."); - } else { - Assert.assertEquals(1, container2.getPipeline().getNodes().size()); - } - } catch (Exception e) { - verifyPermissionDeniedException(e, fakeRemoteUsername); - } - - try { - ContainerWithPipeline container3 = mockClientServer - .allocateContainer(xceiverClientManager.getType(), - HddsProtos.ReplicationFactor.ONE, "OZONE"); - if (expectPermissionDenied) { - fail("Operation should fail, expecting an IOException here."); - } else { - Assert.assertEquals(1, container3.getPipeline().getNodes().size()); - } - } catch (Exception e) { - verifyPermissionDeniedException(e, fakeRemoteUsername); - } - - try { - mockClientServer.getContainer( - ContainerTestHelper.getTestContainerID()); - fail("Operation should fail, expecting an IOException here."); - } catch (Exception e) { - if (expectPermissionDenied) { - verifyPermissionDeniedException(e, fakeRemoteUsername); - } else { - // If passes permission check, it should fail with - // key not exist exception. - Assert.assertTrue(e instanceof ContainerNotFoundException); - } - } - } finally { - cluster.shutdown(); - } - } - - private void verifyPermissionDeniedException(Exception e, String userName) { - String expectedErrorMessage = "Access denied for user " - + userName + ". " + "Superuser privilege is required."; - Assert.assertTrue(e instanceof IOException); - Assert.assertEquals(expectedErrorMessage, e.getMessage()); - } - - @Test - public void testBlockDeletionTransactions() throws Exception { - int numKeys = 5; - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 100, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 100, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - 3000, - TimeUnit.MILLISECONDS); - conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); - conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, - 1, TimeUnit.SECONDS); - // Reset container provision size, otherwise only one container - // is created by default. - conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, - numKeys); - - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(100) - .build(); - cluster.waitForClusterToBeReady(); - - try { - DeletedBlockLog delLog = cluster.getStorageContainerManager() - .getScmBlockManager().getDeletedBlockLog(); - Assert.assertEquals(0, delLog.getNumOfValidTransactions()); - - // Create {numKeys} random names keys. - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - Map keyLocations = helper.createKeys(numKeys, 4096); - // Wait for container report - Thread.sleep(1000); - for (OmKeyInfo keyInfo : keyLocations.values()) { - OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(), - cluster.getStorageContainerManager()); - } - - Map> containerBlocks = createDeleteTXLog(delLog, - keyLocations, helper); - Set containerIDs = containerBlocks.keySet(); - - // Verify a few TX gets created in the TX log. - Assert.assertTrue(delLog.getNumOfValidTransactions() > 0); - - // Once TXs are written into the log, SCM starts to fetch TX - // entries from the log and schedule block deletions in HB interval, - // after sometime, all the TX should be proceed and by then - // the number of containerBlocks of all known containers will be - // empty again. - GenericTestUtils.waitFor(() -> { - try { - return delLog.getNumOfValidTransactions() == 0; - } catch (IOException e) { - return false; - } - }, 1000, 10000); - Assert.assertTrue(helper.getAllBlocks(containerIDs).isEmpty()); - - // Continue the work, add some TXs that with known container names, - // but unknown block IDs. - for (Long containerID : containerBlocks.keySet()) { - // Add 2 TXs per container. - delLog.addTransaction(containerID, - Collections.singletonList(RandomUtils.nextLong())); - delLog.addTransaction(containerID, - Collections.singletonList(RandomUtils.nextLong())); - } - - // Verify a few TX gets created in the TX log. - Assert.assertTrue(delLog.getNumOfValidTransactions() > 0); - - // These blocks cannot be found in the container, skip deleting them - // eventually these TX will success. - GenericTestUtils.waitFor(() -> { - try { - return delLog.getFailedTransactions().size() == 0; - } catch (IOException e) { - return false; - } - }, 1000, 10000); - } finally { - cluster.shutdown(); - } - } - - @Test - public void testBlockDeletingThrottling() throws Exception { - int numKeys = 15; - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); - conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); - conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, - 100, TimeUnit.MILLISECONDS); - conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, - numKeys); - - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(3000) - .build(); - cluster.waitForClusterToBeReady(); - - try { - DeletedBlockLog delLog = cluster.getStorageContainerManager() - .getScmBlockManager().getDeletedBlockLog(); - Assert.assertEquals(0, delLog.getNumOfValidTransactions()); - - int limitSize = 1; - // Reset limit value to 1, so that we only allow one TX is dealt per - // datanode. - SCMBlockDeletingService delService = cluster.getStorageContainerManager() - .getScmBlockManager().getSCMBlockDeletingService(); - delService.setBlockDeleteTXNum(limitSize); - - // Create {numKeys} random names keys. - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - Map keyLocations = helper.createKeys(numKeys, 4096); - // Wait for container report - Thread.sleep(5000); - for (OmKeyInfo keyInfo : keyLocations.values()) { - OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(), - cluster.getStorageContainerManager()); - } - - createDeleteTXLog(delLog, keyLocations, helper); - // Verify a few TX gets created in the TX log. - Assert.assertTrue(delLog.getNumOfValidTransactions() > 0); - - // Verify the size in delete commands is expected. - GenericTestUtils.waitFor(() -> { - NodeManager nodeManager = cluster.getStorageContainerManager() - .getScmNodeManager(); - List commands = nodeManager.processHeartbeat( - nodeManager.getNodes(NodeState.HEALTHY).get(0)); - - if (commands != null) { - for (SCMCommand cmd : commands) { - if (cmd.getType() == SCMCommandProto.Type.deleteBlocksCommand) { - List deletedTXs = - ((DeleteBlocksCommand) cmd).blocksTobeDeleted(); - return deletedTXs != null && deletedTXs.size() == limitSize; - } - } - } - return false; - }, 500, 10000); - } finally { - cluster.shutdown(); - } - } - - private Map> createDeleteTXLog(DeletedBlockLog delLog, - Map keyLocations, - TestStorageContainerManagerHelper helper) throws IOException { - // These keys will be written into a bunch of containers, - // gets a set of container names, verify container containerBlocks - // on datanodes. - Set containerNames = new HashSet<>(); - for (Map.Entry entry : keyLocations.entrySet()) { - entry.getValue().getLatestVersionLocations().getLocationList() - .forEach(loc -> containerNames.add(loc.getContainerID())); - } - - // Total number of containerBlocks of these containers should be equal to - // total number of containerBlocks via creation call. - int totalCreatedBlocks = 0; - for (OmKeyInfo info : keyLocations.values()) { - totalCreatedBlocks += info.getKeyLocationVersions().size(); - } - Assert.assertTrue(totalCreatedBlocks > 0); - Assert.assertEquals(totalCreatedBlocks, - helper.getAllBlocks(containerNames).size()); - - // Create a deletion TX for each key. - Map> containerBlocks = Maps.newHashMap(); - for (OmKeyInfo info : keyLocations.values()) { - List list = - info.getLatestVersionLocations().getLocationList(); - list.forEach(location -> { - if (containerBlocks.containsKey(location.getContainerID())) { - containerBlocks.get(location.getContainerID()) - .add(location.getBlockID().getLocalID()); - } else { - List blks = Lists.newArrayList(); - blks.add(location.getBlockID().getLocalID()); - containerBlocks.put(location.getContainerID(), blks); - } - }); - } - for (Map.Entry> tx : containerBlocks.entrySet()) { - delLog.addTransaction(tx.getKey(), tx.getValue()); - } - - return containerBlocks; - } - - @Test - public void testSCMInitialization() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - final String path = GenericTestUtils.getTempPath( - UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - - // This will initialize SCM - StorageContainerManager.scmInit(conf, "testClusterId"); - - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - Assert.assertEquals(NodeType.SCM, scmStore.getNodeType()); - Assert.assertEquals("testClusterId", scmStore.getClusterID()); - StorageContainerManager.scmInit(conf, "testClusterIdNew"); - Assert.assertEquals(NodeType.SCM, scmStore.getNodeType()); - Assert.assertEquals("testClusterId", scmStore.getClusterID()); - } - - @Test - public void testSCMReinitialization() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - final String path = GenericTestUtils.getTempPath( - UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - //This will set the cluster id in the version file - MiniOzoneCluster cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); - cluster.waitForClusterToBeReady(); - try { - // This will initialize SCM - StorageContainerManager.scmInit(conf, "testClusterId"); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - Assert.assertEquals(NodeType.SCM, scmStore.getNodeType()); - Assert.assertNotEquals("testClusterId", scmStore.getClusterID()); - } finally { - cluster.shutdown(); - } - } - - @Test - public void testSCMInitializationFailure() - throws IOException, AuthenticationException { - OzoneConfiguration conf = new OzoneConfiguration(); - final String path = - GenericTestUtils.getTempPath(UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true); - exception.expect(SCMException.class); - exception.expectMessage( - "SCM not initialized due to storage config failure"); - StorageContainerManager.createSCM(conf); - } - - @Test - public void testScmInfo() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - final String path = - GenericTestUtils.getTempPath(UUID.randomUUID().toString()); - try { - Path scmPath = Paths.get(path, "scm-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - scmStore.setClusterId(clusterId); - scmStore.setScmId(scmId); - // writes the version file properties - scmStore.initialize(); - StorageContainerManager scm = StorageContainerManager.createSCM(conf); - //Reads the SCM Info from SCM instance - ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); - Assert.assertEquals(clusterId, scmInfo.getClusterId()); - Assert.assertEquals(scmId, scmInfo.getScmId()); - - String expectedVersion = HddsVersionInfo.HDDS_VERSION_INFO.getVersion(); - String actualVersion = scm.getSoftwareVersion(); - Assert.assertEquals(expectedVersion, actualVersion); - } finally { - FileUtils.deleteQuietly(new File(path)); - } - } - - /** - * Test datanode heartbeat well processed with a 4-layer network topology. - */ - @Test(timeout = 60000) - public void testScmProcessDatanodeHeartbeat() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - String scmId = UUID.randomUUID().toString(); - conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, - StaticMapping.class, DNSToSwitchMapping.class); - StaticMapping.addNodeToRack(NetUtils.normalizeHostNames( - Collections.singleton(HddsUtils.getHostName(conf))).get(0), - "/rack1"); - - final int datanodeNum = 3; - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(datanodeNum) - .setScmId(scmId) - .build(); - cluster.waitForClusterToBeReady(); - StorageContainerManager scm = cluster.getStorageContainerManager(); - - try { - // first sleep 10s - Thread.sleep(10000); - // verify datanode heartbeats are well processed - long heartbeatCheckerIntervalMs = - MiniOzoneCluster.Builder.DEFAULT_HB_INTERVAL_MS; - long start = Time.monotonicNow(); - Thread.sleep(heartbeatCheckerIntervalMs * 2); - - List allNodes = scm.getScmNodeManager().getAllNodes(); - Assert.assertEquals(datanodeNum, allNodes.size()); - for (DatanodeDetails node : allNodes) { - DatanodeInfo datanodeInfo = (DatanodeInfo) scm.getScmNodeManager() - .getNodeByUuid(node.getUuidString()); - Assert.assertTrue(datanodeInfo.getLastHeartbeatTime() > start); - Assert.assertEquals(datanodeInfo.getUuidString(), - datanodeInfo.getNetworkName()); - Assert.assertEquals("/rack1", datanodeInfo.getNetworkLocation()); - } - } finally { - cluster.shutdown(); - } - } - - @Test - @SuppressWarnings("unchecked") - public void testCloseContainerCommandOnRestart() throws Exception { - int numKeys = 15; - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); - conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); - conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, - 100, TimeUnit.MILLISECONDS); - conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, - numKeys); - - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(3000) - .setTrace(false) - .setNumDatanodes(1) - .build(); - cluster.waitForClusterToBeReady(); - - try { - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - - helper.createKeys(10, 4096); - Thread.sleep(5000); - - StorageContainerManager scm = cluster.getStorageContainerManager(); - List containers = cluster.getStorageContainerManager() - .getContainerManager().getContainers(); - Assert.assertNotNull(containers); - ContainerInfo selectedContainer = containers.iterator().next(); - - // Stop processing HB - scm.getDatanodeProtocolServer().stop(); - - scm.getContainerManager().updateContainerState(selectedContainer - .containerID(), HddsProtos.LifeCycleEvent.FINALIZE); - cluster.restartStorageContainerManager(true); - scm = cluster.getStorageContainerManager(); - EventPublisher publisher = mock(EventPublisher.class); - ReplicationManager replicationManager = scm.getReplicationManager(); - Field f = ReplicationManager.class.getDeclaredField("eventPublisher"); - f.setAccessible(true); - Field modifiersField = Field.class.getDeclaredField("modifiers"); - modifiersField.setAccessible(true); - modifiersField.setInt(f, f.getModifiers() & ~Modifier.FINAL); - f.set(replicationManager, publisher); - scm.getReplicationManager().start(); - Thread.sleep(2000); - - UUID dnUuid = cluster.getHddsDatanodes().iterator().next() - .getDatanodeDetails().getUuid(); - - CloseContainerCommand closeContainerCommand = - new CloseContainerCommand(selectedContainer.getContainerID(), - selectedContainer.getPipelineID(), false); - - CommandForDatanode commandForDatanode = new CommandForDatanode( - dnUuid, closeContainerCommand); - - verify(publisher).fireEvent(eq(SCMEvents.DATANODE_COMMAND), argThat(new - CloseContainerCommandMatcher(dnUuid, commandForDatanode))); - } finally { - cluster.shutdown(); - } - } - - @SuppressWarnings("visibilitymodifier") - static class CloseContainerCommandMatcher - extends ArgumentMatcher { - - private final CommandForDatanode cmd; - private final UUID uuid; - - CloseContainerCommandMatcher(UUID uuid, CommandForDatanode cmd) { - this.uuid = uuid; - this.cmd = cmd; - } - - @Override - public boolean matches(Object argument) { - CommandForDatanode cmdRight = (CommandForDatanode) argument; - CloseContainerCommand left = (CloseContainerCommand) cmd.getCommand(); - CloseContainerCommand right = - (CloseContainerCommand) cmdRight.getCommand(); - return cmdRight.getDatanodeId().equals(uuid) - && left.getContainerID() == right.getContainerID() - && left.getPipelineID().equals(right.getPipelineID()) - && left.getType() == right.getType() - && left.getProto().equals(right.getProto()); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java deleted file mode 100644 index 9beddd4a71b..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java +++ /dev/null @@ -1,153 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; - -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; -import com.google.common.primitives.Longs; -import org.apache.commons.lang3.RandomStringUtils; - -/** - * A helper class used by {@link TestStorageContainerManager} to generate - * some keys and helps to verify containers and blocks locations. - */ -public class TestStorageContainerManagerHelper { - - private final MiniOzoneCluster cluster; - private final Configuration conf; - - public TestStorageContainerManagerHelper(MiniOzoneCluster cluster, - Configuration conf) throws IOException { - this.cluster = cluster; - this.conf = conf; - } - - public Map createKeys(int numOfKeys, int keySize) - throws Exception { - Map keyLocationMap = Maps.newHashMap(); - - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); - // Write 20 keys in bucketName. - Set keyNames = Sets.newHashSet(); - for (int i = 0; i < numOfKeys; i++) { - String keyName = RandomStringUtils.randomAlphabetic(5) + i; - keyNames.add(keyName); - - TestDataUtil - .createKey(bucket, keyName, RandomStringUtils.randomAlphabetic(5)); - } - - for (String key : keyNames) { - OmKeyArgs arg = new OmKeyArgs.Builder() - .setVolumeName(bucket.getVolumeName()) - .setBucketName(bucket.getName()) - .setKeyName(key) - .setRefreshPipeline(true) - .build(); - OmKeyInfo location = cluster.getOzoneManager() - .lookupKey(arg); - keyLocationMap.put(key, location); - } - return keyLocationMap; - } - - public List getPendingDeletionBlocks(Long containerID) - throws IOException { - List pendingDeletionBlocks = Lists.newArrayList(); - ReferenceCountedDB meta = getContainerMetadata(containerID); - KeyPrefixFilter filter = - new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX); - List> kvs = meta.getStore() - .getRangeKVs(null, Integer.MAX_VALUE, filter); - kvs.forEach(entry -> { - String key = DFSUtil.bytes2String(entry.getKey()); - pendingDeletionBlocks - .add(key.replace(OzoneConsts.DELETING_KEY_PREFIX, "")); - }); - meta.close(); - return pendingDeletionBlocks; - } - - public List getAllBlocks(Set containerIDs) - throws IOException { - List allBlocks = Lists.newArrayList(); - for (Long containerID : containerIDs) { - allBlocks.addAll(getAllBlocks(containerID)); - } - return allBlocks; - } - - public List getAllBlocks(Long containeID) throws IOException { - List allBlocks = Lists.newArrayList(); - ReferenceCountedDB meta = getContainerMetadata(containeID); - List> kvs = - meta.getStore().getRangeKVs(null, Integer.MAX_VALUE, - MetadataKeyFilters.getNormalKeyFilter()); - kvs.forEach(entry -> { - allBlocks.add(Longs.fromByteArray(entry.getKey())); - }); - meta.close(); - return allBlocks; - } - - private ReferenceCountedDB getContainerMetadata(Long containerID) - throws IOException { - ContainerWithPipeline containerWithPipeline = cluster - .getStorageContainerManager().getClientProtocolServer() - .getContainerWithPipeline(containerID); - - DatanodeDetails dn = - containerWithPipeline.getPipeline().getFirstNode(); - OzoneContainer containerServer = - getContainerServerByDatanodeUuid(dn.getUuidString()); - KeyValueContainerData containerData = - (KeyValueContainerData) containerServer.getContainerSet() - .getContainer(containerID).getContainerData(); - return BlockUtils.getDB(containerData, conf); - } - - private OzoneContainer getContainerServerByDatanodeUuid(String dnUUID) - throws IOException { - for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { - if (dn.getDatanodeDetails().getUuidString().equals(dnUUID)) { - return dn.getDatanodeStateMachine().getContainer(); - } - } - throw new IOException("Unable to get the ozone container " - + "for given datanode ID " + dnUUID); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/chaos/TestProbability.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/chaos/TestProbability.java deleted file mode 100644 index 41b8e56b12c..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/chaos/TestProbability.java +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.chaos; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.RandomUtils; - -/** - * This class is used to find out if a certain event is true. - * Every event is assigned a propbability and the isTrue function returns true - * when the probability has been met. - */ -final public class TestProbability { - private int pct; - - private TestProbability(int pct) { - Preconditions.checkArgument(pct <= 100 && pct > 0); - this.pct = pct; - } - - public boolean isTrue() { - return (RandomUtils.nextInt(0, 100) <= pct); - } - - public static TestProbability valueOf(int pct) { - return new TestProbability(pct); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java deleted file mode 100644 index d05093f2893..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; - -import java.io.InputStream; -import java.security.KeyPair; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.cert.CertStore; -import java.security.cert.X509Certificate; -import java.time.LocalDate; -import java.time.temporal.ChronoUnit; -import java.util.List; - -/** - * Test implementation for CertificateClient. To be used only for test - * purposes. - */ - -public class CertificateClientTestImpl implements CertificateClient { - - private final SecurityConfig securityConfig; - private final KeyPair keyPair; - private final Configuration config; - private final X509Certificate x509Certificate; - - public CertificateClientTestImpl(OzoneConfiguration conf) throws Exception { - securityConfig = new SecurityConfig(conf); - HDDSKeyGenerator keyGen = - new HDDSKeyGenerator(securityConfig.getConfiguration()); - keyPair = keyGen.generateKey(); - config = conf; - SelfSignedCertificate.Builder builder = - SelfSignedCertificate.newBuilder() - .setBeginDate(LocalDate.now()) - .setEndDate(LocalDate.now().plus(365, ChronoUnit.DAYS)) - .setClusterID("cluster1") - .setKey(keyPair) - .setSubject("localhost") - .setConfiguration(config) - .setScmID("TestScmId1") - .makeCA(); - X509CertificateHolder certificateHolder = null; - certificateHolder = builder.build(); - x509Certificate = new JcaX509CertificateConverter().getCertificate( - certificateHolder); - } - - @Override - public PrivateKey getPrivateKey() { - return keyPair.getPrivate(); - } - - @Override - public PublicKey getPublicKey() { - return keyPair.getPublic(); - } - - /** - * Returns the certificate of the specified component if it exists on the - * local system. - * - * @return certificate or Null if there is no data. - */ - @Override - public X509Certificate getCertificate(String certSerialId) - throws CertificateException { - return x509Certificate; - } - - @Override - public X509Certificate getCertificate() { - return x509Certificate; - } - - @Override - public X509Certificate getCACertificate() { - return x509Certificate; - } - - @Override - public boolean verifyCertificate(X509Certificate certificate) { - return true; - } - - @Override - public byte[] signDataStream(InputStream stream) - throws CertificateException { - return new byte[0]; - } - - @Override - public byte[] signData(byte[] data) throws CertificateException { - return new byte[0]; - } - - @Override - public boolean verifySignature(InputStream stream, byte[] signature, - X509Certificate cert) throws CertificateException { - return true; - } - - @Override - public boolean verifySignature(byte[] data, byte[] signature, - X509Certificate cert) throws CertificateException { - return true; - } - - @Override - public CertificateSignRequest.Builder getCSRBuilder() { - return new CertificateSignRequest.Builder(); - } - - @Override - public X509Certificate queryCertificate(String query) { - return null; - } - - @Override - public void storeCertificate(String cert, boolean force) - throws CertificateException { - } - - @Override - public void storeCertificate(String cert, boolean force, boolean caCert) - throws CertificateException { - } - - /** - * Stores the trusted chain of certificates for a specific component. - * - * @param keyStore - Cert Store. - * @throws CertificateException - on Error. - */ - @Override - public void storeTrustChain(CertStore keyStore) throws CertificateException { - - } - - @Override - public void storeTrustChain(List certificates) - throws CertificateException { - - } - - @Override - public InitResponse init() throws CertificateException { - return null; - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/package-info.java deleted file mode 100644 index b1023e824c3..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/package-info.java +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -/** - * Ozone Client tests. - */ -package org.apache.hadoop.ozone.client; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java deleted file mode 100644 index cf570d28f7c..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java +++ /dev/null @@ -1,156 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientRatis; -import org.apache.hadoop.hdds.scm.XceiverClientReply; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Test; - -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; - -/** - * This class tests the 2 way commit in Ratis. - */ -public class Test2WayCommitInRatis { - - private MiniOzoneCluster cluster; - private OzoneClient client; - private ObjectStore objectStore; - private String volumeName; - private String bucketName; - private int chunkSize; - private int flushSize; - private int maxFlushSize; - private int blockSize; - private StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - private static String containerOwner = "OZONE"; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - private void startCluster(OzoneConfiguration conf) throws Exception { - chunkSize = 100; - flushSize = 2 * chunkSize; - maxFlushSize = 2 * flushSize; - blockSize = 2 * maxFlushSize; - - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY, - 1, TimeUnit.SECONDS); - - conf.setQuietMode(false); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(7) - .setBlockSize(blockSize) - .setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) - .build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - volumeName = "watchforcommithandlingtest"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - storageContainerLocationClient = cluster - .getStorageContainerLocationClient(); - } - - - /** - * Shutdown MiniDFSCluster. - */ - private void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - - @Test - public void test2WayCommitForRetryfailure() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 20, - TimeUnit.SECONDS); - conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 20); - startCluster(conf); - GenericTestUtils.LogCapturer logCapturer = - GenericTestUtils.LogCapturer.captureLogs(XceiverClientRatis.LOG); - XceiverClientManager clientManager = new XceiverClientManager(conf); - - ContainerWithPipeline container1 = storageContainerLocationClient - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, containerOwner); - XceiverClientSpi xceiverClient = clientManager - .acquireClient(container1.getPipeline()); - Assert.assertEquals(1, xceiverClient.getRefcount()); - Assert.assertEquals(container1.getPipeline(), - xceiverClient.getPipeline()); - Pipeline pipeline = xceiverClient.getPipeline(); - XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient; - XceiverClientReply reply = xceiverClient.sendCommandAsync( - ContainerTestHelper.getCreateContainerRequest( - container1.getContainerInfo().getContainerID(), - xceiverClient.getPipeline())); - reply.getResponse().get(); - Assert.assertEquals(3, ratisClient.getCommitInfoMap().size()); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); - reply = xceiverClient.sendCommandAsync(ContainerTestHelper - .getCloseContainer(pipeline, - container1.getContainerInfo().getContainerID())); - reply.getResponse().get(); - xceiverClient.watchForCommit(reply.getLogIndex(), 20000); - - // commitInfo Map will be reduced to 2 here - Assert.assertEquals(2, ratisClient.getCommitInfoMap().size()); - clientManager.releaseClient(xceiverClient, false); - Assert.assertTrue(logCapturer.getOutput().contains("3 way commit failed")); - Assert - .assertTrue(logCapturer.getOutput().contains("Committed by majority")); - logCapturer.stopCapturing(); - shutdown(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java deleted file mode 100644 index 623b11d22d2..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java +++ /dev/null @@ -1,148 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys. - HDDS_COMMAND_STATUS_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys. - HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys. - HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys. - OZONE_SCM_STALENODE_INTERVAL; - -/** - * Tests the validity BCSID of a container. - */ -public class TestBCSID { - - private static OzoneConfiguration conf = new OzoneConfiguration(); - private static MiniOzoneCluster cluster; - private static OzoneClient client; - private static ObjectStore objectStore; - private static String volumeName; - private static String bucketName; - - /** - * Create a MiniDFSCluster for testing. - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - String path = GenericTestUtils - .getTempPath(TestBCSID.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); - - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); - conf.setQuietMode(false); - cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).setHbInterval(200) - .build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - volumeName = "bcsid"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testBCSID() throws Exception { - OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); - key.write("ratis".getBytes()); - key.close(); - - // get the name of a valid container. - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName). - setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName("ratis") - .setRefreshPipeline(true) - .build(); - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - List keyLocationInfos = - keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly(); - Assert.assertEquals(1, keyLocationInfos.size()); - OmKeyLocationInfo omKeyLocationInfo = keyLocationInfos.get(0); - - long blockCommitSequenceId = - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()) - .getContainerReport().getBlockCommitSequenceId(); - Assert.assertTrue(blockCommitSequenceId > 0); - - // make sure the persisted block Id in OM is same as that seen in the - // container report to be reported to SCM. - Assert.assertEquals(blockCommitSequenceId, - omKeyLocationInfo.getBlockCommitSequenceId()); - - // verify that on restarting the datanode, it reloads the BCSID correctly. - cluster.restartHddsDatanode(0, true); - Assert.assertEquals(blockCommitSequenceId, - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()) - .getContainerReport().getBlockCommitSequenceId()); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java deleted file mode 100644 index 399b977d333..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ /dev/null @@ -1,696 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientMetrics; -import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.IOException; -import java.io.OutputStream; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; - -/** - * Tests BlockOutputStream class. - */ -public class TestBlockOutputStream { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf = new OzoneConfiguration(); - private static OzoneClient client; - private static ObjectStore objectStore; - private static int chunkSize; - private static int flushSize; - private static int maxFlushSize; - private static int blockSize; - private static String volumeName; - private static String bucketName; - private static String keyString; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - chunkSize = 100; - flushSize = 2 * chunkSize; - maxFlushSize = 2 * flushSize; - blockSize = 2 * maxFlushSize; - conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms"); - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); - conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE"); - conf.setQuietMode(false); - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, - StorageUnit.MB); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(7) - .setBlockSize(blockSize) - .setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) - .build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - keyString = UUID.randomUUID().toString(); - volumeName = "testblockoutputstream"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - } - - private String getKeyName() { - return UUID.randomUUID().toString(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testBufferCaching() throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.WriteChunk); - long putBlockCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = metrics.getContainerOpsMetrics( - ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = metrics.getContainerOpsMetrics( - ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - int dataLength = 50; - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream(); - - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - OutputStream stream = keyOutputStream.getStreamEntries().get(0) - .getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - - // we have just written data less than a chunk size, the data will just sit - // in the buffer, with only one buffer being allocated in the buffer pool - - Assert.assertEquals(1, blockOutputStream.getBufferPool().getSize()); - //Just the writtenDataLength will be updated here - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - // no data will be flushed till now - Assert.assertEquals(0, blockOutputStream.getTotalDataFlushedLength()); - Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength()); - Assert.assertEquals(pendingWriteChunkCount, - XceiverClientManager.getXceiverClientMetrics() - .getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - XceiverClientManager.getXceiverClientMetrics() - .getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - - // commitIndex2FlushedData Map will be empty here - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().isEmpty()); - - // Now do a flush. This will flush the data and update the flush length and - // the map. - key.flush(); - - // flush is a sync call, all pending operations will complete - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - // we have just written data less than a chunk size, the data will just sit - // in the buffer, with only one buffer being allocated in the buffer pool - - Assert.assertEquals(1, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(0, - blockOutputStream.getBufferPool().getBuffer(0).position()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - Assert.assertEquals(0, - blockOutputStream.getCommitIndex2flushedDataMap().size()); - - // flush ensures watchForCommit updates the total length acknowledged - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - - Assert.assertEquals(1, keyOutputStream.getStreamEntries().size()); - // now close the stream, It will update the ack length after watchForCommit - key.close(); - - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 1, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 1, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 2, - metrics.getTotalOpCount()); - - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(0, keyOutputStream.getStreamEntries().size()); - validateData(keyName, data1); - } - - @Test - public void testFlushChunk() throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.WriteChunk); - long putBlockCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = metrics.getContainerOpsMetrics( - ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = metrics.getContainerOpsMetrics( - ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - int dataLength = flushSize; - // write data equal to 2 chunks - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - Assert.assertEquals(pendingWriteChunkCount + 2, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount + 1, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream(); - - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - OutputStream stream = keyOutputStream.getStreamEntries().get(0) - .getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - - // we have just written data equal flush Size = 2 chunks, at this time - // buffer pool will have 2 buffers allocated worth of chunk size - - Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize()); - // writtenDataLength as well flushedDataLength will be updated here - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength()); - - Assert.assertEquals(0, - blockOutputStream.getCommitIndex2flushedDataMap().size()); - - // Now do a flush. This will flush the data and update the flush length and - // the map. - key.flush(); - Assert.assertEquals(1, keyOutputStream.getStreamEntries().size()); - // flush is a sync call, all pending operations will complete - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - - // Since the data in the buffer is already flushed, flush here will have - // no impact on the counters and data structures - - Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize()); - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - Assert.assertEquals(0, - blockOutputStream.getCommitIndex2flushedDataMap().size()); - - // flush ensures watchForCommit updates the total length acknowledged - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - // now close the stream, It will update the ack length after watchForCommit - key.close(); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 1, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 3, - metrics.getTotalOpCount()); - Assert.assertEquals(0, keyOutputStream.getStreamEntries().size()); - validateData(keyName, data1); - } - - @Test - public void testMultiChunkWrite() throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.WriteChunk); - long putBlockCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = metrics.getContainerOpsMetrics( - ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = metrics.getContainerOpsMetrics( - ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - int dataLength = chunkSize + 50; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - Assert.assertEquals(pendingWriteChunkCount + 1, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream(); - - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - OutputStream stream = keyOutputStream.getStreamEntries().get(0) - .getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - - // we have just written data equal flush Size > 1 chunk, at this time - // buffer pool will have 2 buffers allocated worth of chunk size - - Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize()); - // writtenDataLength as well flushedDataLength will be updated here - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - // since data written is still less than flushLength, flushLength will - // still be 0. - Assert.assertEquals(0, - blockOutputStream.getTotalDataFlushedLength()); - Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength()); - - Assert.assertEquals(0, - blockOutputStream.getCommitIndex2flushedDataMap().size()); - - // Now do a flush. This will flush the data and update the flush length and - // the map. - key.flush(); - Assert.assertEquals(writeChunkCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 1, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - - Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - Assert.assertEquals(0, - blockOutputStream.getCommitIndex2flushedDataMap().size()); - - // flush ensures watchForCommit updates the total length acknowledged - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - - // now close the stream, It will update the ack length after watchForCommit - key.close(); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 1, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 3, - metrics.getTotalOpCount()); - Assert.assertEquals(0, keyOutputStream.getStreamEntries().size()); - validateData(keyName, data1); - } - - @Test - public void testMultiChunkWrite2() throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.WriteChunk); - long putBlockCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = metrics.getContainerOpsMetrics( - ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = metrics.getContainerOpsMetrics( - ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - int dataLength = flushSize + 50; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - Assert.assertEquals(pendingWriteChunkCount + 2, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount + 1, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream(); - - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - OutputStream stream = keyOutputStream.getStreamEntries().get(0) - .getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - - // we have just written data more than flush Size(2 chunks), at this time - // buffer pool will have 3 buffers allocated worth of chunk size - - Assert.assertEquals(3, blockOutputStream.getBufferPool().getSize()); - // writtenDataLength as well flushedDataLength will be updated here - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(flushSize, - blockOutputStream.getTotalDataFlushedLength()); - Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength()); - - Assert.assertEquals(0, - blockOutputStream.getCommitIndex2flushedDataMap().size()); - - Assert.assertEquals(flushSize, - blockOutputStream.getTotalDataFlushedLength()); - Assert.assertEquals(0, - blockOutputStream.getCommitIndex2flushedDataMap().size()); - - Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength()); - Assert.assertEquals(1, keyOutputStream.getStreamEntries().size()); - key.close(); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 3, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 5, - metrics.getTotalOpCount()); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(0, keyOutputStream.getStreamEntries().size()); - validateData(keyName, data1); - } - - @Test - public void testFullBufferCondition() throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.WriteChunk); - long putBlockCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = metrics.getContainerOpsMetrics( - ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = metrics.getContainerOpsMetrics( - ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - int dataLength = maxFlushSize; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - - // since its hitting the full bufferCondition, it will call watchForCommit - // and completes atleast putBlock for first flushSize worth of data - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk) - <= pendingWriteChunkCount + 2); - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock) - <= pendingPutBlockCount + 1); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream(); - - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - OutputStream stream = keyOutputStream.getStreamEntries().get(0) - .getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - // writtenDataLength as well flushedDataLength will be updated here - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(maxFlushSize, - blockOutputStream.getTotalDataFlushedLength()); - - // since data equals to maxBufferSize is written, this will be a blocking - // call and hence will wait for atleast flushSize worth of data to get - // ack'd by all servers right here - Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize); - - // watchForCommit will clean up atleast one entry from the map where each - // entry corresponds to flushSize worth of data - - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1); - - // Now do a flush. This will flush the data and update the flush length and - // the map. - key.flush(); - Assert.assertEquals(1, keyOutputStream.getStreamEntries().size()); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - - // Since the data in the buffer is already flushed, flush here will have - // no impact on the counters and data structures - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1); - - // now close the stream, It will update the ack length after watchForCommit - key.close(); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 4, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 6, - metrics.getTotalOpCount()); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(0, keyOutputStream.getStreamEntries().size()); - validateData(keyName, data1); - } - - @Test - public void testWriteWithExceedingMaxBufferLimit() throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.WriteChunk); - long putBlockCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = metrics.getContainerOpsMetrics( - ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = metrics.getContainerOpsMetrics( - ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - int dataLength = maxFlushSize + 50; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream(); - - // since its hitting the full bufferCondition, it will call watchForCommit - // and completes atleast putBlock for first flushSize worth of data - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk) - <= pendingWriteChunkCount + 2); - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock) - <= pendingPutBlockCount + 1); - Assert.assertEquals(writeChunkCount + 4, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 6, - metrics.getTotalOpCount()); - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - OutputStream stream = keyOutputStream.getStreamEntries().get(0) - .getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - // writtenDataLength as well flushedDataLength will be updated here - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(maxFlushSize, - blockOutputStream.getTotalDataFlushedLength()); - - // since data equals to maxBufferSize is written, this will be a blocking - // call and hence will wait for atleast flushSize worth of data to get - // ack'd by all servers right here - Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize); - - // watchForCommit will clean up atleast one entry from the map where each - // entry corresponds to flushSize worth of data - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1); - - // Now do a flush. This will flush the data and update the flush length and - // the map. - key.flush(); - Assert.assertEquals(1, keyOutputStream.getStreamEntries().size()); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - - // Since the data in the buffer is already flushed, flush here will have - // no impact on the counters and data structures - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - // flush will make sure one more entry gets updated in the map - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2); - - // now close the stream, It will update the ack length after watchForCommit - key.close(); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 5, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 3, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 8, - metrics.getTotalOpCount()); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(0, keyOutputStream.getStreamEntries().size()); - validateData(keyName, data1); - } - - private OzoneOutputStream createKey(String keyName, ReplicationType type, - long size) throws Exception { - return ContainerTestHelper - .createKey(keyName, type, size, objectStore, volumeName, bucketName); - } - private void validateData(String keyName, byte[] data) throws Exception { - ContainerTestHelper - .validateData(keyName, data, objectStore, volumeName, bucketName); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java deleted file mode 100644 index 8649837a0cd..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java +++ /dev/null @@ -1,1218 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientMetrics; -import org.apache.hadoop.hdds.scm.XceiverClientRatis; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.ratis.protocol.GroupMismatchException; -import org.apache.ratis.protocol.RaftRetryFailureException; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.io.OutputStream; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; - -/** - * Tests failure detection and handling in BlockOutputStream Class. - */ -public class TestBlockOutputStreamWithFailures { - - private static MiniOzoneCluster cluster; - private OzoneConfiguration conf = new OzoneConfiguration(); - private OzoneClient client; - private ObjectStore objectStore; - private int chunkSize; - private int flushSize; - private int maxFlushSize; - private int blockSize; - private String volumeName; - private String bucketName; - private String keyString; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @Before - public void init() throws Exception { - chunkSize = 100; - flushSize = 2 * chunkSize; - maxFlushSize = 2 * flushSize; - blockSize = 2 * maxFlushSize; - conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "1s"); - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 5, TimeUnit.SECONDS); - conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE"); - conf.setQuietMode(false); - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, - StorageUnit.MB); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7) - .setBlockSize(blockSize).setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES).build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - keyString = UUID.randomUUID().toString(); - volumeName = "testblockoutputstream"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - } - - private String getKeyName() { - return UUID.randomUUID().toString(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testWatchForCommitWithCloseContainerException() - throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk); - long putBlockCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - int dataLength = maxFlushSize + 50; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - - // since its hitting the full bufferCondition, it will call watchForCommit - // and completes atleast putBlock for first flushSize worth of data - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk) - <= pendingWriteChunkCount + 2); - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock) - <= pendingPutBlockCount + 1); - Assert.assertEquals(writeChunkCount + 4, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 6, metrics.getTotalOpCount()); - - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - OutputStream stream = - keyOutputStream.getStreamEntries().get(0).getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - - // we have just written data more than flush Size(2 chunks), at this time - // buffer pool will have 4 buffers allocated worth of chunk size - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - // writtenDataLength as well flushedDataLength will be updated here - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(maxFlushSize, - blockOutputStream.getTotalDataFlushedLength()); - - // since data equals to maxBufferSize is written, this will be a blocking - // call and hence will wait for atleast flushSize worth of data to get - // ack'd by all servers right here - Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize); - - // watchForCommit will clean up atleast one entry from the map where each - // entry corresponds to flushSize worth of data - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1); - - // Now do a flush. This will flush the data and update the flush length and - // the map. - key.flush(); - - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 5, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 3, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 8, metrics.getTotalOpCount()); - - // flush is a sync call, all pending operations will complete - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - - // Since the data in the buffer is already flushed, flush here will have - // no impact on the counters and data structures - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - // flush will make sure one more entry gets updated in the map - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2); - - XceiverClientRatis raftClient = - (XceiverClientRatis) blockOutputStream.getXceiverClient(); - Assert.assertEquals(3, raftClient.getCommitInfoMap().size()); - // Close the containers on the Datanode and write more data - ContainerTestHelper.waitForContainerClose(key, cluster); - // 4 writeChunks = maxFlushSize + 2 putBlocks will be discarded here - // once exception is hit - key.write(data1); - - // As a part of handling the exception, 4 failed writeChunks will be - // rewritten plus one partial chunk plus two putBlocks for flushSize - // and one flush for partial chunk - key.flush(); - Assert.assertEquals(2, keyOutputStream.getStreamEntries().size()); - Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream - .getIoException()) instanceof ContainerNotOpenException); - - // Make sure the retryCount is reset after the exception is handled - Assert.assertTrue(keyOutputStream.getRetryCount() == 0); - // commitInfoMap will remain intact as there is no server failure - Assert.assertEquals(3, raftClient.getCommitInfoMap().size()); - // now close the stream, It will update the ack length after watchForCommit - key.close(); - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(0, keyOutputStream.getStreamEntries().size()); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 14, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 8, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 22, metrics.getTotalOpCount()); - // Written the same data twice - String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); - } - - @Test - public void testWatchForCommitDatanodeFailure() throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk); - long putBlockCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - int dataLength = maxFlushSize + 50; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - // since its hitting the full bufferCondition, it will call watchForCommit - // and completes at least putBlock for first flushSize worth of data - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk) - <= pendingWriteChunkCount + 2); - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock) - <= pendingPutBlockCount + 1); - Assert.assertEquals(writeChunkCount + 4, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 6, metrics.getTotalOpCount()); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - OutputStream stream = - keyOutputStream.getStreamEntries().get(0).getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - - // we have just written data more than flush Size(2 chunks), at this time - // buffer pool will have 3 buffers allocated worth of chunk size - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - // writtenDataLength as well flushedDataLength will be updated here - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - // since data written is still less than flushLength, flushLength will - // still be 0. - Assert.assertEquals(maxFlushSize, - blockOutputStream.getTotalDataFlushedLength()); - - // since data equals to maxBufferSize is written, this will be a blocking - // call and hence will wait for atleast flushSize worth of data to get - // ack'd by all servers right here - Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize); - - // watchForCommit will clean up atleast flushSize worth of data buffer - // where each entry corresponds to flushSize worth of data - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2); - - // Now do a flush. This will flush the data and update the flush length and - // the map. - key.flush(); - - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 5, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 3, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 8, metrics.getTotalOpCount()); - - // Since the data in the buffer is already flushed, flush here will have - // no impact on the counters and data structures - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - // flush will make sure one more entry gets updated in the map - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() == 0); - - XceiverClientRatis raftClient = - (XceiverClientRatis) blockOutputStream.getXceiverClient(); - Assert.assertEquals(3, raftClient.getCommitInfoMap().size()); - Pipeline pipeline = raftClient.getPipeline(); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); - - // again write data with more than max buffer limit. This will call - // watchForCommit again. Since the commit will happen 2 way, the - // commitInfoMap will get updated for servers which are alive - key.write(data1); - - key.flush(); - Assert.assertEquals(2, raftClient.getCommitInfoMap().size()); - - Assert.assertEquals(2, keyOutputStream.getStreamEntries().size()); - // now close the stream, It will update the ack length after watchForCommit - key.close(); - Assert.assertEquals(blockSize, blockOutputStream.getTotalAckDataLength()); - // Make sure the retryCount is reset after the exception is handled - Assert.assertTrue(keyOutputStream.getRetryCount() == 0); - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(0, keyOutputStream.getStreamEntries().size()); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - - // in total, there are 8 full write chunks + 2 partial chunks written - Assert.assertEquals(writeChunkCount + 10, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - // 4 flushes at flushSize boundaries + 2 flush for partial chunks - Assert.assertEquals(putBlockCount + 6, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 16, metrics.getTotalOpCount()); - // Written the same data twice - String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); - } - - @Test - public void test2DatanodesFailure() throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk); - long putBlockCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - int dataLength = maxFlushSize + 50; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - // since its hitting the full bufferCondition, it will call watchForCommit - // and completes atleast putBlock for first flushSize worth of data - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk) - <= pendingWriteChunkCount + 2); - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock) - <= pendingPutBlockCount + 1); - Assert.assertEquals(writeChunkCount + 4, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 6, metrics.getTotalOpCount()); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - OutputStream stream = - keyOutputStream.getStreamEntries().get(0).getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - - // we have just written data more than flush Size(2 chunks), at this time - // buffer pool will have 3 buffers allocated worth of chunk size - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - // writtenDataLength as well flushedDataLength will be updated here - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(maxFlushSize, - blockOutputStream.getTotalDataFlushedLength()); - - // since data equals to maxBufferSize is written, this will be a blocking - // call and hence will wait for atleast flushSize worth of data to get - // acked by all servers right here - Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize); - - // watchForCommit will clean up atleast one entry from the map where each - // entry corresponds to flushSize worth of data - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1); - - // Now do a flush. This will flush the data and update the flush length and - // the map. - key.flush(); - - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 5, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 3, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 8, metrics.getTotalOpCount()); - - // Since the data in the buffer is already flushed, flush here will have - // no impact on the counters and data structures - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - // flush will make sure one more entry gets updated in the map - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2); - - XceiverClientRatis raftClient = - (XceiverClientRatis) blockOutputStream.getXceiverClient(); - Assert.assertEquals(3, raftClient.getCommitInfoMap().size()); - Pipeline pipeline = raftClient.getPipeline(); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(1)); - // again write data with more than max buffer limit. This will call - // watchForCommit again. Since the commit will happen 2 way, the - // commitInfoMap will get updated for servers which are alive - - // 4 writeChunks = maxFlushSize + 2 putBlocks will be discarded here - // once exception is hit - key.write(data1); - - // As a part of handling the exception, 4 failed writeChunks will be - // rewritten plus one partial chunk plus two putBlocks for flushSize - // and one flush for partial chunk - key.flush(); - - Throwable ioException = HddsClientUtils.checkForException( - blockOutputStream.getIoException()); - // Since, 2 datanodes went down, - // a) if the pipeline gets destroyed quickly it will hit - // GroupMismatchException. - // b) will hit close container exception if the container is closed - // but pipeline is still not destroyed. - // c) will fail with RaftRetryFailureException if the leader election - // did not finish before the request retry count finishes. - Assert.assertTrue(ioException instanceof RaftRetryFailureException - || ioException instanceof GroupMismatchException - || ioException instanceof ContainerNotOpenException); - // Make sure the retryCount is reset after the exception is handled - Assert.assertTrue(keyOutputStream.getRetryCount() == 0); - // now close the stream, It will update the ack length after watchForCommit - - Assert.assertEquals(2, keyOutputStream.getStreamEntries().size()); - key.close(); - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 14, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 8, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 22, metrics.getTotalOpCount()); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(0, keyOutputStream.getLocationInfoList().size()); - validateData(keyName, data1); - } - - @Test - public void testFailureWithPrimeSizedData() throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk); - long putBlockCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - int dataLength = 167; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk) - == pendingWriteChunkCount + 1); - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock) - == pendingPutBlockCount); - Assert.assertEquals(writeChunkCount + 1, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 1, metrics.getTotalOpCount()); - - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - OutputStream stream = - keyOutputStream.getStreamEntries().get(0).getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - - Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(0, blockOutputStream.getTotalDataFlushedLength()); - - Assert.assertTrue(blockOutputStream.getTotalAckDataLength() == 0); - - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() == 0); - - // Now do a flush. This will flush the data and update the flush length and - // the map. - key.flush(); - - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 1, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 3, metrics.getTotalOpCount()); - - // Since the data in the buffer is already flushed, flush here will have - // no impact on the counters and data structures - - Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - // flush will make sure one more entry gets updated in the map - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() == 0); - - XceiverClientRatis raftClient = - (XceiverClientRatis) blockOutputStream.getXceiverClient(); - Assert.assertEquals(3, raftClient.getCommitInfoMap().size()); - // Close the containers on the Datanode and write more data - ContainerTestHelper.waitForContainerClose(key, cluster); - key.write(data1); - - // As a part of handling the exception, 2 failed writeChunks will be - // rewritten plus 1 putBlocks for flush - // and one flush for partial chunk - key.flush(); - - Assert.assertEquals(2, keyOutputStream.getStreamEntries().size()); - Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream - .getIoException()) instanceof ContainerNotOpenException); - // Make sure the retryCount is reset after the exception is handled - Assert.assertTrue(keyOutputStream.getRetryCount() == 0); - - // commitInfoMap will remain intact as there is no server failure - Assert.assertEquals(3, raftClient.getCommitInfoMap().size()); - // now close the stream, It will update the ack length after watchForCommit - key.close(); - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 6, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 3, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 9, metrics.getTotalOpCount()); - Assert.assertTrue(keyOutputStream.getLocationInfoList().size() == 0); - // Written the same data twice - String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); - } - - @Test - public void testExceptionDuringClose() throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk); - long putBlockCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - int dataLength = 167; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk) - == pendingWriteChunkCount + 1); - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock) - == pendingPutBlockCount); - Assert.assertEquals(writeChunkCount + 1, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 1, metrics.getTotalOpCount()); - - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - OutputStream stream = - keyOutputStream.getStreamEntries().get(0).getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - - Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(0, blockOutputStream.getTotalDataFlushedLength()); - - Assert.assertTrue(blockOutputStream.getTotalAckDataLength() == 0); - - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() == 0); - - // Now do a flush. This will flush the data and update the flush length and - // the map. - key.flush(); - - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 1, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 3, metrics.getTotalOpCount()); - - // Since the data in the buffer is already flushed, flush here will have - // no impact on the counters and data structures - - Assert.assertEquals(2, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - // flush will make sure one more entry gets updated in the map - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() == 0); - - XceiverClientRatis raftClient = - (XceiverClientRatis) blockOutputStream.getXceiverClient(); - Assert.assertEquals(3, raftClient.getCommitInfoMap().size()); - // Close the containers on the Datanode and write more data - ContainerTestHelper.waitForContainerClose(key, cluster); - key.write(data1); - - // commitInfoMap will remain intact as there is no server failure - Assert.assertEquals(3, raftClient.getCommitInfoMap().size()); - // now close the stream, It will hit exception - key.close(); - - Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream - .getIoException()) instanceof ContainerNotOpenException); - // Make sure the retryCount is reset after the exception is handled - Assert.assertTrue(keyOutputStream.getRetryCount() == 0); - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 6, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 3, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 9, metrics.getTotalOpCount()); - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0); - // Written the same data twice - String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); - } - - @Test - public void testWatchForCommitWithSingleNodeRatis() throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk); - long putBlockCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = - createKey(keyName, ReplicationType.RATIS, 0, ReplicationFactor.ONE); - int dataLength = maxFlushSize + 50; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - - // since its hitting the full bufferCondition, it will call watchForCommit - // and completes atleast putBlock for first flushSize worth of data - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk) - <= pendingWriteChunkCount + 2); - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock) - <= pendingPutBlockCount + 1); - Assert.assertEquals(writeChunkCount + 4, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 6, metrics.getTotalOpCount()); - - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - OutputStream stream = - keyOutputStream.getStreamEntries().get(0).getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - - // we have just written data more than flush Size(2 chunks), at this time - // buffer pool will have 4 buffers allocated worth of chunk size - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - // writtenDataLength as well flushedDataLength will be updated here - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(maxFlushSize, - blockOutputStream.getTotalDataFlushedLength()); - - // since data equals to maxBufferSize is written, this will be a blocking - // call and hence will wait for atleast flushSize worth of data to get - // ack'd by all servers right here - Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize); - - // watchForCommit will clean up atleast one entry from the map where each - // entry corresponds to flushSize worth of data - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1); - - // Now do a flush. This will flush the data and update the flush length and - // the map. - key.flush(); - - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 5, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 3, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 8, metrics.getTotalOpCount()); - - // flush is a sync call, all pending operations will complete - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - - // Since the data in the buffer is already flushed, flush here will have - // no impact on the counters and data structures - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - // flush will make sure one more entry gets updated in the map - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2); - - XceiverClientRatis raftClient = - (XceiverClientRatis) blockOutputStream.getXceiverClient(); - Assert.assertEquals(1, raftClient.getCommitInfoMap().size()); - // Close the containers on the Datanode and write more data - ContainerTestHelper.waitForContainerClose(key, cluster); - // 4 writeChunks = maxFlushSize + 2 putBlocks will be discarded here - // once exception is hit - key.write(data1); - - // As a part of handling the exception, 4 failed writeChunks will be - // rewritten plus one partial chunk plus two putBlocks for flushSize - // and one flush for partial chunk - key.flush(); - - Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream - .getIoException()) instanceof ContainerNotOpenException); - // Make sure the retryCount is reset after the exception is handled - Assert.assertTrue(keyOutputStream.getRetryCount() == 0); - // commitInfoMap will remain intact as there is no server failure - Assert.assertEquals(1, raftClient.getCommitInfoMap().size()); - Assert.assertEquals(2, keyOutputStream.getStreamEntries().size()); - // now close the stream, It will update the ack length after watchForCommit - key.close(); - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(0, keyOutputStream.getLocationInfoList().size()); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 14, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 8, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 22, metrics.getTotalOpCount()); - // Written the same data twice - String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); - } - - @Test - public void testDatanodeFailureWithSingleNodeRatis() throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk); - long putBlockCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = - createKey(keyName, ReplicationType.RATIS, 0, ReplicationFactor.ONE); - int dataLength = maxFlushSize + 50; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - // since its hitting the full bufferCondition, it will call watchForCommit - // and completes at least putBlock for first flushSize worth of data - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk) - <= pendingWriteChunkCount + 2); - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock) - <= pendingPutBlockCount + 1); - Assert.assertEquals(writeChunkCount + 4, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 6, metrics.getTotalOpCount()); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - OutputStream stream = - keyOutputStream.getStreamEntries().get(0).getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - - // we have just written data more than flush Size(2 chunks), at this time - // buffer pool will have 3 buffers allocated worth of chunk size - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - // writtenDataLength as well flushedDataLength will be updated here - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(maxFlushSize, - blockOutputStream.getTotalDataFlushedLength()); - - // since data equals to maxBufferSize is written, this will be a blocking - // call and hence will wait for atleast flushSize worth of data to get - // ack'd by all servers right here - Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize); - - // watchForCommit will clean up atleast flushSize worth of data buffer - // where each entry corresponds to flushSize worth of data - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2); - - // Now do a flush. This will flush the data and update the flush length and - // the map. - key.flush(); - - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 5, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 3, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 8, metrics.getTotalOpCount()); - - // Since the data in the buffer is already flushed, flush here will have - // no impact on the counters and data structures - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - // flush will make sure one more entry gets updated in the map - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() == 0); - - XceiverClientRatis raftClient = - (XceiverClientRatis) blockOutputStream.getXceiverClient(); - Assert.assertEquals(1, raftClient.getCommitInfoMap().size()); - Pipeline pipeline = raftClient.getPipeline(); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); - - // again write data with more than max buffer limit. This will call - // watchForCommit again. No write will happen in the current block and - // data will be rewritten to the next block. - - key.write(data1); - - key.flush(); - - Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream - .getIoException()) instanceof RaftRetryFailureException); - Assert.assertEquals(1, raftClient.getCommitInfoMap().size()); - // Make sure the retryCount is reset after the exception is handled - Assert.assertTrue(keyOutputStream.getRetryCount() == 0); - Assert.assertEquals(2, keyOutputStream.getStreamEntries().size()); - // now close the stream, It will update the ack length after watchForCommit - key.close(); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(0, keyOutputStream.getStreamEntries().size()); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - - // in total, there are 14 full write chunks, 5 before the failure injection, - // 4 chunks after which we detect the failure and then 5 again on the next - // block - Assert.assertEquals(writeChunkCount + 14, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - // 3 flushes at flushSize boundaries before failure injection + 2 - // flush failed + 3 more flushes for the next block - Assert.assertEquals(putBlockCount + 8, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 22, metrics.getTotalOpCount()); - Assert.assertEquals(0, keyOutputStream.getLocationInfoList().size()); - // Written the same data twice - String dataString = new String(data1, UTF_8); - cluster.restartHddsDatanode(pipeline.getNodes().get(0), true); - validateData(keyName, dataString.concat(dataString).getBytes()); - } - - @Test - public void testDatanodeFailureWithPreAllocation() throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk); - long putBlockCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = - createKey(keyName, ReplicationType.RATIS, 3 * blockSize, - ReplicationFactor.ONE); - int dataLength = maxFlushSize + 50; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - // since its hitting the full bufferCondition, it will call watchForCommit - // and completes at least putBlock for first flushSize worth of data - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk) - <= pendingWriteChunkCount + 2); - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock) - <= pendingPutBlockCount + 1); - Assert.assertEquals(writeChunkCount + 4, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 6, metrics.getTotalOpCount()); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 3); - OutputStream stream = - keyOutputStream.getStreamEntries().get(0).getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - - // we have just written data more than flush Size(2 chunks), at this time - // buffer pool will have 3 buffers allocated worth of chunk size - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - // writtenDataLength as well flushedDataLength will be updated here - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(maxFlushSize, - blockOutputStream.getTotalDataFlushedLength()); - - // since data equals to maxBufferSize is written, this will be a blocking - // call and hence will wait for atleast flushSize worth of data to get - // ack'd by all servers right here - Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize); - - // watchForCommit will clean up atleast flushSize worth of data buffer - // where each entry corresponds to flushSize worth of data - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2); - - // Now do a flush. This will flush the data and update the flush length and - // the map. - key.flush(); - - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 5, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 3, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 8, metrics.getTotalOpCount()); - - // Since the data in the buffer is already flushed, flush here will have - // no impact on the counters and data structures - - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - // flush will make sure one more entry gets updated in the map - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() == 0); - - XceiverClientRatis raftClient = - (XceiverClientRatis) blockOutputStream.getXceiverClient(); - Assert.assertEquals(1, raftClient.getCommitInfoMap().size()); - Pipeline pipeline = raftClient.getPipeline(); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); - - // again write data with more than max buffer limit. This will call - // watchForCommit again. No write will happen and - - key.write(data1); - - key.flush(); - - Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream - .getIoException()) instanceof RaftRetryFailureException); - - // Make sure the retryCount is reset after the exception is handled - Assert.assertTrue(keyOutputStream.getRetryCount() == 0); - Assert.assertEquals(1, raftClient.getCommitInfoMap().size()); - - // now close the stream, It will update the ack length after watchForCommit - key.close(); - Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(0, keyOutputStream.getLocationInfoList().size()); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - - // in total, there are 14 full write chunks, 5 before the failure injection, - // 4 chunks after which we detect the failure and then 5 again on the next - // block - Assert.assertEquals(writeChunkCount + 14, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - - // 3 flushes at flushSize boundaries before failure injection + 2 - // flush failed + 3 more flushes for the next block - Assert.assertEquals(putBlockCount + 8, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 22, metrics.getTotalOpCount()); - // Written the same data twice - String dataString = new String(data1, UTF_8); - cluster.restartHddsDatanode(pipeline.getNodes().get(0), true); - validateData(keyName, dataString.concat(dataString).getBytes()); - } - - private OzoneOutputStream createKey(String keyName, ReplicationType type, - long size) throws Exception { - return createKey(keyName, type, size, ReplicationFactor.THREE); - } - - private OzoneOutputStream createKey(String keyName, ReplicationType type, - long size, ReplicationFactor factor) throws Exception { - return ContainerTestHelper - .createKey(keyName, type, factor, size, objectStore, volumeName, - bucketName); - } - - private void validateData(String keyName, byte[] data) throws Exception { - ContainerTestHelper - .validateData(keyName, data, objectStore, volumeName, bucketName); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java deleted file mode 100644 index e551ab1ae20..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java +++ /dev/null @@ -1,474 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; - -import static java.nio.charset.StandardCharsets.UTF_8; - -/** - * Tests Close Container Exception handling by Ozone Client. - */ -public class TestCloseContainerHandlingByClient { - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf = new OzoneConfiguration(); - private static OzoneClient client; - private static ObjectStore objectStore; - private static int chunkSize; - private static int blockSize; - private static String volumeName; - private static String bucketName; - private static String keyString; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - chunkSize = (int) OzoneConsts.MB; - blockSize = 4 * chunkSize; - conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms"); - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); - conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE"); - conf.setQuietMode(false); - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, - StorageUnit.MB); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7).build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - keyString = UUID.randomUUID().toString(); - volumeName = "closecontainerexceptionhandlingtest"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - } - - private String getKeyName() { - return UUID.randomUUID().toString(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testBlockWritesWithFlushAndClose() throws Exception { - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - // write data more than 1 chunk - byte[] data = ContainerTestHelper - .getFixedLengthString(keyString, chunkSize + chunkSize / 2) - .getBytes(UTF_8); - key.write(data); - - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - //get the name of a valid container - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - - waitForContainerClose(key); - key.write(data); - key.flush(); - key.close(); - // read the key from OM again and match the length.The length will still - // be the equal to the original data size. - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(2 * data.length, keyInfo.getDataSize()); - - // Written the same data twice - String dataString = new String(data, UTF_8); - dataString = dataString.concat(dataString); - validateData(keyName, dataString.getBytes(UTF_8)); - } - - @Test - public void testBlockWritesCloseConsistency() throws Exception { - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - // write data more than 1 chunk - byte[] data = ContainerTestHelper - .getFixedLengthString(keyString, chunkSize + chunkSize / 2) - .getBytes(UTF_8); - key.write(data); - - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - //get the name of a valid container - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName) - .setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - - waitForContainerClose(key); - key.close(); - // read the key from OM again and match the length.The length will still - // be the equal to the original data size. - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(data.length, keyInfo.getDataSize()); - validateData(keyName, data); - } - - @Test - public void testMultiBlockWrites() throws Exception { - - String keyName = getKeyName(); - OzoneOutputStream key = - createKey(keyName, ReplicationType.RATIS, (3 * blockSize)); - KeyOutputStream keyOutputStream = - (KeyOutputStream) key.getOutputStream(); - // With the initial size provided, it should have preallocated 4 blocks - Assert.assertEquals(3, keyOutputStream.getStreamEntries().size()); - // write data more than 1 block - byte[] data = - ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize)) - .getBytes(UTF_8); - Assert.assertEquals(data.length, 3 * blockSize); - key.write(data); - - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - //get the name of a valid container - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - - waitForContainerClose(key); - // write 1 more block worth of data. It will fail and new block will be - // allocated - key.write(ContainerTestHelper.getFixedLengthString(keyString, blockSize) - .getBytes(UTF_8)); - - key.close(); - // read the key from OM again and match the length.The length will still - // be the equal to the original data size. - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - List keyLocationInfos = - keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly(); - // Though we have written only block initially, the close will hit - // closeContainerException and remaining data in the chunkOutputStream - // buffer will be copied into a different allocated block and will be - // committed. - Assert.assertEquals(4, keyLocationInfos.size()); - Assert.assertEquals(4 * blockSize, keyInfo.getDataSize()); - for (OmKeyLocationInfo locationInfo : keyLocationInfos) { - Assert.assertEquals(blockSize, locationInfo.getLength()); - } - } - - @Test - public void testMultiBlockWrites2() throws Exception { - String keyName = getKeyName(); - OzoneOutputStream key = - createKey(keyName, ReplicationType.RATIS, 2 * blockSize); - KeyOutputStream keyOutputStream = - (KeyOutputStream) key.getOutputStream(); - - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - // With the initial size provided, it should have pre allocated 2 blocks - Assert.assertEquals(2, keyOutputStream.getStreamEntries().size()); - String dataString = - ContainerTestHelper.getFixedLengthString(keyString, (2 * blockSize)); - byte[] data = dataString.getBytes(UTF_8); - key.write(data); - // 2 block are completely written to the DataNode in 3 blocks. - // Data of length half of chunkSize resides in the chunkOutput stream buffer - String dataString2 = - ContainerTestHelper.getFixedLengthString(keyString, chunkSize); - key.write(dataString2.getBytes(UTF_8)); - key.flush(); - - String dataString3 = - ContainerTestHelper.getFixedLengthString(keyString, chunkSize); - key.write(dataString3.getBytes(UTF_8)); - key.flush(); - - String dataString4 = - ContainerTestHelper.getFixedLengthString(keyString, chunkSize * 1 / 2); - key.write(dataString4.getBytes(UTF_8)); - //get the name of a valid container - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - - waitForContainerClose(key); - - key.close(); - // read the key from OM again and match the length.The length will still - // be the equal to the original data size. - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - // Though we have written only block initially, the close will hit - // closeContainerException and remaining data in the chunkOutputStream - // buffer will be copied into a different allocated block and will be - // committed. - - String dataCommitted = - dataString.concat(dataString2).concat(dataString3).concat(dataString4); - Assert.assertEquals(dataCommitted.getBytes(UTF_8).length, - keyInfo.getDataSize()); - validateData(keyName, dataCommitted.getBytes(UTF_8)); - } - - @Test - public void testMultiBlockWrites3() throws Exception { - - String keyName = getKeyName(); - int keyLen = 4 * blockSize; - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, keyLen); - KeyOutputStream keyOutputStream = - (KeyOutputStream) key.getOutputStream(); - // With the initial size provided, it should have preallocated 4 blocks - Assert.assertEquals(4, keyOutputStream.getStreamEntries().size()); - // write data 4 blocks and one more chunk - byte[] writtenData = - ContainerTestHelper.getFixedLengthString(keyString, keyLen) - .getBytes(UTF_8); - byte[] data = Arrays.copyOfRange(writtenData, 0, 3 * blockSize + chunkSize); - Assert.assertEquals(data.length, 3 * blockSize + chunkSize); - key.write(data); - - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - //get the name of a valid container - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - - waitForContainerClose(key); - // write 3 more chunks worth of data. It will fail and new block will be - // allocated. This write completes 4 blocks worth of data written to key - data = Arrays.copyOfRange(writtenData, 3 * blockSize + chunkSize, keyLen); - key.write(data); - - key.close(); - // read the key from OM again and match the length and data. - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - List keyLocationInfos = - keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly(); - OzoneVolume volume = objectStore.getVolume(volumeName); - OzoneBucket bucket = volume.getBucket(bucketName); - OzoneInputStream inputStream = bucket.readKey(keyName); - byte[] readData = new byte[keyLen]; - inputStream.read(readData); - Assert.assertArrayEquals(writtenData, readData); - - // Though we have written only block initially, the close will hit - // closeContainerException and remaining data in the chunkOutputStream - // buffer will be copied into a different allocated block and will be - // committed. - long length = 0; - for (OmKeyLocationInfo locationInfo : keyLocationInfos) { - length += locationInfo.getLength(); - } - Assert.assertEquals(4 * blockSize, length); - } - - private void waitForContainerClose(OzoneOutputStream outputStream) - throws Exception { - ContainerTestHelper - .waitForContainerClose(outputStream, cluster); - } - - @Ignore // test needs to be fixed after close container is handled for - // non-existent containers on datanode. Test closes pre allocated containers - // on the datanode. - @Test - public void testDiscardPreallocatedBlocks() throws Exception { - String keyName = getKeyName(); - OzoneOutputStream key = - createKey(keyName, ReplicationType.RATIS, 2 * blockSize); - KeyOutputStream keyOutputStream = - (KeyOutputStream) key.getOutputStream(); - - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - // With the initial size provided, it should have pre allocated 4 blocks - Assert.assertEquals(2, keyOutputStream.getStreamEntries().size()); - String dataString = - ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize)); - byte[] data = dataString.getBytes(UTF_8); - key.write(data); - List locationInfos = - new ArrayList<>(keyOutputStream.getLocationInfoList()); - long containerID = locationInfos.get(0).getContainerID(); - ContainerInfo container = - cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerID)); - Pipeline pipeline = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(container.getPipelineID()); - List datanodes = pipeline.getNodes(); - Assert.assertEquals(1, datanodes.size()); - waitForContainerClose(key); - dataString = - ContainerTestHelper.getFixedLengthString(keyString, (1 * blockSize)); - data = dataString.getBytes(UTF_8); - key.write(data); - Assert.assertEquals(2, keyOutputStream.getStreamEntries().size()); - - // the 1st block got written. Now all the containers are closed, so the 2nd - // pre allocated block will be removed from the list and new block should - // have been allocated - Assert.assertTrue( - keyOutputStream.getLocationInfoList().get(0).getBlockID() - .equals(locationInfos.get(0).getBlockID())); - Assert.assertFalse( - keyOutputStream.getLocationInfoList().get(1).getBlockID() - .equals(locationInfos.get(1).getBlockID())); - key.close(); - } - - private OzoneOutputStream createKey(String keyName, ReplicationType type, - long size) throws Exception { - return ContainerTestHelper - .createKey(keyName, type, size, objectStore, volumeName, bucketName); - } - - private void validateData(String keyName, byte[] data) throws Exception { - ContainerTestHelper - .validateData(keyName, data, objectStore, volumeName, bucketName); - } - - @Test - public void testBlockWriteViaRatis() throws Exception { - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - byte[] data = ContainerTestHelper - .getFixedLengthString(keyString, chunkSize + chunkSize / 2) - .getBytes(UTF_8); - key.write(data); - - //get the name of a valid container - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName). - setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - waitForContainerClose(key); - // Again Write the Data. This will throw an exception which will be handled - // and new blocks will be allocated - key.write(data); - key.flush(); - // The write will fail but exception will be handled and length will be - // updated correctly in OzoneManager once the steam is closed - key.close(); - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - String dataString = new String(data, UTF_8); - dataString = dataString.concat(dataString); - Assert.assertEquals(2 * data.length, keyInfo.getDataSize()); - validateData(keyName, dataString.getBytes(UTF_8)); - } - - @Test - public void testBlockWrites() throws Exception { - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, 2 * chunkSize) - .getBytes(UTF_8); - key.write(data1); - - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - //get the name of a valid container - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.ONE).setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - - waitForContainerClose(key); - byte[] data2 = - ContainerTestHelper.getFixedLengthString(keyString, 3 * chunkSize) - .getBytes(UTF_8); - key.write(data2); - key.flush(); - key.close(); - // read the key from OM again and match the length.The length will still - // be the equal to the original data size. - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(5 * chunkSize, keyInfo.getDataSize()); - - // Written the same data twice - String dataString = new String(data1, UTF_8); - // Written the same data twice - String dataString2 = new String(data2, UTF_8); - dataString = dataString.concat(dataString2); - validateData(keyName, dataString.getBytes(UTF_8)); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java deleted file mode 100644 index ea519009718..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java +++ /dev/null @@ -1,296 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientRatis; -import org.apache.hadoop.hdds.scm.XceiverClientReply; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.storage.BufferPool; -import org.apache.hadoop.hdds.scm.storage.CommitWatcher; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; - -/** - * Class to test CommitWatcher functionality. - */ -public class TestCommitWatcher { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf = new OzoneConfiguration(); - private static OzoneClient client; - private static ObjectStore objectStore; - private static int chunkSize; - private static long flushSize; - private static long maxFlushSize; - private static long blockSize; - private static String volumeName; - private static String bucketName; - private static String keyString; - private static StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - private static String containerOwner = "OZONE"; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - chunkSize = (int)(1 * OzoneConsts.MB); - flushSize = 2 * chunkSize; - maxFlushSize = 2 * flushSize; - blockSize = 2 * maxFlushSize; - conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms"); - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); - conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE"); - conf.setQuietMode(false); - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, - StorageUnit.MB); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(7) - .setBlockSize(blockSize) - .setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) - .build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - keyString = UUID.randomUUID().toString(); - volumeName = "testblockoutputstream"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - storageContainerLocationClient = cluster - .getStorageContainerLocationClient(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testReleaseBuffers() throws Exception { - int capacity = 2; - BufferPool bufferPool = new BufferPool(chunkSize, capacity); - XceiverClientManager clientManager = new XceiverClientManager(conf); - ContainerWithPipeline container = storageContainerLocationClient - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, containerOwner); - Pipeline pipeline = container.getPipeline(); - long containerId = container.getContainerInfo().getContainerID(); - XceiverClientSpi xceiverClient = clientManager.acquireClient(pipeline); - Assert.assertEquals(1, xceiverClient.getRefcount()); - Assert.assertTrue(xceiverClient instanceof XceiverClientRatis); - XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient; - CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient, 10000); - BlockID blockID = ContainerTestHelper.getTestBlockID(containerId); - List bufferList = new ArrayList<>(); - List replies = new ArrayList<>(); - long length = 0; - List> - futures = new ArrayList<>(); - for (int i = 0; i < capacity; i++) { - bufferList.clear(); - ContainerProtos.ContainerCommandRequestProto writeChunkRequest = - ContainerTestHelper - .getWriteChunkRequest(pipeline, blockID, chunkSize); - // add the data to the buffer pool - ByteBuffer byteBuffer = bufferPool.allocateBufferIfNeeded().put( - writeChunkRequest.getWriteChunk().getData().asReadOnlyByteBuffer()); - ratisClient.sendCommandAsync(writeChunkRequest); - ContainerProtos.ContainerCommandRequestProto putBlockRequest = - ContainerTestHelper - .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk()); - XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest); - bufferList.add(byteBuffer); - length += byteBuffer.position(); - CompletableFuture future = - reply.getResponse().thenApply(v -> { - watcher.updateCommitInfoMap(reply.getLogIndex(), bufferList); - return v; - }); - futures.add(future); - watcher.getFutureMap().put(length, future); - replies.add(reply); - } - - Assert.assertTrue(replies.size() == 2); - // wait on the 1st putBlock to complete - CompletableFuture future1 = - futures.get(0); - CompletableFuture future2 = - futures.get(1); - future1.get(); - Assert.assertNotNull(watcher.getFutureMap().get(new Long(chunkSize))); - Assert.assertTrue( - watcher.getFutureMap().get(new Long(chunkSize)).equals(future1)); - // wait on 2nd putBlock to complete - future2.get(); - Assert.assertNotNull(watcher.getFutureMap().get(new Long(2 * chunkSize))); - Assert.assertTrue( - watcher.getFutureMap().get(new Long(2 * chunkSize)).equals(future2)); - Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 2); - watcher.watchOnFirstIndex(); - Assert.assertFalse(watcher.getCommitIndex2flushedDataMap() - .containsKey(replies.get(0).getLogIndex())); - Assert.assertFalse(watcher.getFutureMap().containsKey(chunkSize)); - Assert.assertTrue(watcher.getTotalAckDataLength() >= chunkSize); - watcher.watchOnLastIndex(); - Assert.assertFalse(watcher.getCommitIndex2flushedDataMap() - .containsKey(replies.get(1).getLogIndex())); - Assert.assertFalse(watcher.getFutureMap().containsKey(2 * chunkSize)); - Assert.assertTrue(watcher.getTotalAckDataLength() == 2 * chunkSize); - Assert.assertTrue(watcher.getFutureMap().isEmpty()); - Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().isEmpty()); - } - - @Test - public void testReleaseBuffersOnException() throws Exception { - int capacity = 2; - BufferPool bufferPool = new BufferPool(chunkSize, capacity); - XceiverClientManager clientManager = new XceiverClientManager(conf); - ContainerWithPipeline container = storageContainerLocationClient - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, containerOwner); - Pipeline pipeline = container.getPipeline(); - long containerId = container.getContainerInfo().getContainerID(); - XceiverClientSpi xceiverClient = clientManager.acquireClient(pipeline); - Assert.assertEquals(1, xceiverClient.getRefcount()); - Assert.assertTrue(xceiverClient instanceof XceiverClientRatis); - XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient; - CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient, 10000); - BlockID blockID = ContainerTestHelper.getTestBlockID(containerId); - List bufferList = new ArrayList<>(); - List replies = new ArrayList<>(); - long length = 0; - List> - futures = new ArrayList<>(); - for (int i = 0; i < capacity; i++) { - bufferList.clear(); - ContainerProtos.ContainerCommandRequestProto writeChunkRequest = - ContainerTestHelper - .getWriteChunkRequest(pipeline, blockID, chunkSize); - // add the data to the buffer pool - ByteBuffer byteBuffer = bufferPool.allocateBufferIfNeeded().put( - writeChunkRequest.getWriteChunk().getData().asReadOnlyByteBuffer()); - ratisClient.sendCommandAsync(writeChunkRequest); - ContainerProtos.ContainerCommandRequestProto putBlockRequest = - ContainerTestHelper - .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk()); - XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest); - bufferList.add(byteBuffer); - length += byteBuffer.position(); - CompletableFuture future = - reply.getResponse().thenApply(v -> { - watcher.updateCommitInfoMap(reply.getLogIndex(), bufferList); - return v; - }); - futures.add(future); - watcher.getFutureMap().put(length, future); - replies.add(reply); - } - - Assert.assertTrue(replies.size() == 2); - // wait on the 1st putBlock to complete - CompletableFuture future1 = - futures.get(0); - CompletableFuture future2 = - futures.get(1); - future1.get(); - Assert.assertNotNull(watcher.getFutureMap().get(new Long(chunkSize))); - Assert.assertTrue( - watcher.getFutureMap().get(new Long(chunkSize)).equals(future1)); - // wait on 2nd putBlock to complete - future2.get(); - Assert.assertNotNull(watcher.getFutureMap().get(new Long(2 * chunkSize))); - Assert.assertTrue( - watcher.getFutureMap().get(new Long(2 * chunkSize)).equals(future2)); - Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 2); - watcher.watchOnFirstIndex(); - Assert.assertFalse(watcher.getCommitIndex2flushedDataMap() - .containsKey(replies.get(0).getLogIndex())); - Assert.assertFalse(watcher.getFutureMap().containsKey(chunkSize)); - Assert.assertTrue(watcher.getTotalAckDataLength() >= chunkSize); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(1)); - try { - // just watch for a higher index so as to ensure, it does an actual - // call to Ratis. Otherwise, it may just return in case the commitInfoMap - // is updated to the latest index in putBlock response. - watcher.watchForCommit(replies.get(1).getLogIndex() + 1); - } catch(IOException ioe) { - Assert.assertTrue(ioe.getCause() instanceof TimeoutException); - } - long lastIndex = replies.get(1).getLogIndex(); - // Depending on the last successfully replicated commitIndex, either we - // discard only 1st buffer or both buffers - Assert.assertTrue(ratisClient.getReplicatedMinCommitIndex() <= lastIndex); - if (ratisClient.getReplicatedMinCommitIndex() < replies.get(1) - .getLogIndex()) { - Assert.assertTrue(watcher.getTotalAckDataLength() == chunkSize); - Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 1); - Assert.assertTrue(watcher.getFutureMap().size() == 1); - } else { - Assert.assertTrue(watcher.getTotalAckDataLength() == 2 * chunkSize); - Assert.assertTrue(watcher.getFutureMap().isEmpty()); - Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().isEmpty()); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java deleted file mode 100644 index 0886d26fe64..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java +++ /dev/null @@ -1,219 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.ContainerTestHelper; - -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.function.Predicate; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; - -/** - * Tests delete key operation with a slow follower in the datanode - * pipeline. - */ -public class TestContainerReplicationEndToEnd { - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static OzoneClient client; - private static ObjectStore objectStore; - private static String volumeName; - private static String bucketName; - private static String path; - private static XceiverClientManager xceiverClientManager; - private static long containerReportInterval; - - /** - * Create a MiniDFSCluster for testing. - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - path = GenericTestUtils - .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); - containerReportInterval = 2000; - - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, - containerReportInterval, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, containerReportInterval, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, - 2 * containerReportInterval, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000, - TimeUnit.SECONDS); - conf.setTimeDuration(OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY, - 1000, TimeUnit.SECONDS); - conf.setLong("hdds.scm.replication.thread.interval", - containerReportInterval); - - conf.setQuietMode(false); - cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(4).setHbInterval(200) - .build(); - cluster.waitForClusterToBeReady(); - cluster.getStorageContainerManager().getReplicationManager().start(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - xceiverClientManager = new XceiverClientManager(conf); - volumeName = "testcontainerstatemachinefailures"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - /** - * The test simulates end to end container replication. - */ - @Test - public void testContainerReplication() throws Exception { - String keyName = "testContainerReplication"; - OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey(keyName, 0, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>()); - byte[] testData = "ratis".getBytes(); - // First write and flush creates a container in the datanode - key.write(testData); - key.flush(); - - KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); - List locationInfoList = - groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); - OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); - long containerID = omKeyLocationInfo.getContainerID(); - PipelineID pipelineID = - cluster.getStorageContainerManager().getContainerManager() - .getContainer(new ContainerID(containerID)).getPipelineID(); - Pipeline pipeline = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(pipelineID); - key.close(); - - if (cluster.getStorageContainerManager().getContainerManager() - .getContainer(new ContainerID(containerID)).getState() != - HddsProtos.LifeCycleState.CLOSING) { - cluster.getStorageContainerManager().getContainerManager() - .updateContainerState(new ContainerID(containerID), - HddsProtos.LifeCycleEvent.FINALIZE); - } - // wait for container to move to OPEN state in SCM - Thread.sleep(2 * containerReportInterval); - DatanodeDetails oldReplicaNode = pipeline.getFirstNode(); - // now move the container to the closed on the datanode. - XceiverClientSpi xceiverClient = - xceiverClientManager.acquireClient(pipeline); - ContainerProtos.ContainerCommandRequestProto.Builder request = - ContainerProtos.ContainerCommandRequestProto.newBuilder(); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - request.setCmdType(ContainerProtos.Type.CloseContainer); - request.setContainerID(containerID); - request.setCloseContainer( - ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); - xceiverClient.sendCommand(request.build()); - // wait for container to move to closed state in SCM - Thread.sleep(2 * containerReportInterval); - Assert.assertTrue( - cluster.getStorageContainerManager().getContainerInfo(containerID) - .getState() == HddsProtos.LifeCycleState.CLOSED); - // shutdown the replica node - cluster.shutdownHddsDatanode(oldReplicaNode); - // now the container is under replicated and will be moved to a different dn - HddsDatanodeService dnService = null; - - for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { - Predicate p = - i -> i.getUuid().equals(dn.getDatanodeDetails().getUuid()); - if (!pipeline.getNodes().stream().anyMatch(p)) { - dnService = dn; - } - } - - Assert.assertNotNull(dnService); - final HddsDatanodeService newReplicaNode = dnService; - // wait for the container to get replicated - GenericTestUtils.waitFor(() -> { - return newReplicaNode.getDatanodeStateMachine().getContainer() - .getContainerSet().getContainer(containerID) != null; - }, 500, 100000); - Assert.assertTrue(newReplicaNode.getDatanodeStateMachine().getContainer() - .getContainerSet().getContainer(containerID).getContainerData() - .getBlockCommitSequenceId() > 0); - // wait for SCM to update the replica Map - Thread.sleep(5 * containerReportInterval); - // now shutdown the other two dns of the original pipeline and try reading - // the key again - for (DatanodeDetails dn : pipeline.getNodes()) { - cluster.shutdownHddsDatanode(dn); - } - // This will try to read the data from the dn to which the container got - // replicated after the container got closed. - ContainerTestHelper - .validateData(keyName, testData, objectStore, volumeName, bucketName); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java deleted file mode 100644 index 19a17079731..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java +++ /dev/null @@ -1,211 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.CertificateClientTestImpl; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.RatisServerConfiguration; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.util.HashMap; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; - -/** - * Tests the containerStateMachine failure handling. - */ - -public class TestContainerStateMachine { - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf = new OzoneConfiguration(); - private static OzoneClient client; - private static ObjectStore objectStore; - private static String volumeName; - private static String bucketName; - private static String path; - - /** - * Create a MiniDFSCluster for testing. - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - path = GenericTestUtils - .getTempPath(TestContainerStateMachine.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); - - conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); - // conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); - conf.setQuietMode(false); - OzoneManager.setTestSecureOmFlag(true); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); - // conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString()); - cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) - .setHbInterval(200) - .setCertificateClient(new CertificateClientTestImpl(conf)) - .build(); - cluster.waitForClusterToBeReady(); - cluster.getOzoneManager().startSecretManager(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - volumeName = "testcontainerstatemachinefailures"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testContainerStateMachineFailures() throws Exception { - OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); - // First write and flush creates a container in the datanode - key.write("ratis".getBytes()); - key.flush(); - key.write("ratis".getBytes()); - - //get the name of a valid container - KeyOutputStream groupOutputStream = - (KeyOutputStream) key.getOutputStream(); - - List locationInfoList = - groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); - OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); - - // delete the container dir - FileUtil.fullyDelete(new File( - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()).getContainerData() - .getContainerPath())); - - key.close(); - // Make sure the container is marked unhealthy - Assert.assertTrue( - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()) - .getContainerState() - == ContainerProtos.ContainerDataProto.State.UNHEALTHY); - } - - @Test - public void testRatisSnapshotRetention() throws Exception { - - ContainerStateMachine stateMachine = - (ContainerStateMachine) ContainerTestHelper.getStateMachine(cluster); - SimpleStateMachineStorage storage = - (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); - Assert.assertNull(storage.findLatestSnapshot()); - - // Write 10 keys. Num snapshots should be equal to config value. - for (int i = 1; i <= 10; i++) { - OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey(("ratis" + i), 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); - // First write and flush creates a container in the datanode - key.write(("ratis" + i).getBytes()); - key.flush(); - key.write(("ratis" + i).getBytes()); - } - - RatisServerConfiguration ratisServerConfiguration = - conf.getObject(RatisServerConfiguration.class); - - stateMachine = - (ContainerStateMachine) ContainerTestHelper.getStateMachine(cluster); - storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); - Path parentPath = storage.findLatestSnapshot().getFile().getPath(); - int numSnapshots = parentPath.getParent().toFile().listFiles().length; - Assert.assertTrue(Math.abs(ratisServerConfiguration - .getNumSnapshotsRetained() - numSnapshots) <= 1); - - // Write 10 more keys. Num Snapshots should remain the same. - for (int i = 11; i <= 20; i++) { - OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey(("ratis" + i), 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); - // First write and flush creates a container in the datanode - key.write(("ratis" + i).getBytes()); - key.flush(); - key.write(("ratis" + i).getBytes()); - } - stateMachine = - (ContainerStateMachine) ContainerTestHelper.getStateMachine(cluster); - storage = (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); - parentPath = storage.findLatestSnapshot().getFile().getPath(); - numSnapshots = parentPath.getParent().toFile().listFiles().length; - Assert.assertTrue(Math.abs(ratisServerConfiguration - .getNumSnapshotsRetained() - numSnapshots) <= 1); - } - -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java deleted file mode 100644 index 9ac45b88116..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ /dev/null @@ -1,504 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import com.google.common.primitives.Longs; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; -import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.protocol.RaftRetryFailureException; -import org.apache.ratis.protocol.StateMachineException; -import org.apache.ratis.server.storage.FileInfo; -import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.util.HashMap; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys. - HDDS_COMMAND_STATUS_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys. - HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - ContainerDataProto.State.UNHEALTHY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys. - HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys. - OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys. - OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; - -/** - * Tests the containerStateMachine failure handling. - */ - -public class TestContainerStateMachineFailures { - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static OzoneClient client; - private static ObjectStore objectStore; - private static String volumeName; - private static String bucketName; - private static String path; - private static XceiverClientManager xceiverClientManager; - - /** - * Create a MiniDFSCluster for testing. - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - path = GenericTestUtils - .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); - - - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); - conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 10, - TimeUnit.SECONDS); - conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 10); - conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY, - 1, TimeUnit.SECONDS); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); - conf.setQuietMode(false); - cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).setHbInterval(200) - .build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - xceiverClientManager = new XceiverClientManager(conf); - volumeName = "testcontainerstatemachinefailures"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testContainerStateMachineFailures() throws Exception { - OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); - byte[] testData = "ratis".getBytes(); - // First write and flush creates a container in the datanode - key.write(testData); - key.flush(); - key.write(testData); - KeyOutputStream groupOutputStream = - (KeyOutputStream) key.getOutputStream(); - List locationInfoList = - groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); - OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); - // delete the container dir - FileUtil.fullyDelete(new File( - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()).getContainerData() - .getContainerPath())); - try { - // there is only 1 datanode in the pipeline, the pipeline will be closed - // and allocation to new pipeline will fail as there is no other dn in - // the cluster - key.close(); - } catch(IOException ioe) { - Assert.assertTrue(ioe instanceof OMException); - } - long containerID = omKeyLocationInfo.getContainerID(); - - // Make sure the container is marked unhealthy - Assert.assertTrue( - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(containerID) - .getContainerState() - == ContainerProtos.ContainerDataProto.State.UNHEALTHY); - OzoneContainer ozoneContainer = cluster.getHddsDatanodes().get(0) - .getDatanodeStateMachine().getContainer(); - // make sure the missing containerSet is empty - HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer.getDispatcher(); - Assert.assertTrue(dispatcher.getMissingContainerSet().isEmpty()); - - // restart the hdds datanode, container should not in the regular set - cluster.restartHddsDatanode(0, true); - ozoneContainer = cluster.getHddsDatanodes().get(0) - .getDatanodeStateMachine().getContainer(); - Assert - .assertNull(ozoneContainer.getContainerSet().getContainer(containerID)); - } - - @Test - public void testUnhealthyContainer() throws Exception { - OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); - // First write and flush creates a container in the datanode - key.write("ratis".getBytes()); - key.flush(); - key.write("ratis".getBytes()); - KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); - List locationInfoList = - groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); - OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); - ContainerData containerData = - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()) - .getContainerData(); - Assert.assertTrue(containerData instanceof KeyValueContainerData); - KeyValueContainerData keyValueContainerData = - (KeyValueContainerData) containerData; - // delete the container db file - FileUtil.fullyDelete(new File(keyValueContainerData.getChunksPath())); - try { - // there is only 1 datanode in the pipeline, the pipeline will be closed - // and allocation to new pipeline will fail as there is no other dn in - // the cluster - key.close(); - } catch(IOException ioe) { - Assert.assertTrue(ioe instanceof OMException); - } - - long containerID = omKeyLocationInfo.getContainerID(); - - // Make sure the container is marked unhealthy - Assert.assertTrue( - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet().getContainer(containerID) - .getContainerState() - == ContainerProtos.ContainerDataProto.State.UNHEALTHY); - // Check metadata in the .container file - File containerFile = new File(keyValueContainerData.getMetadataPath(), - containerID + OzoneConsts.CONTAINER_EXTENSION); - - keyValueContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); - - // restart the hdds datanode and see if the container is listed in the - // in the missing container set and not in the regular set - cluster.restartHddsDatanode(0, true); - // make sure the container state is still marked unhealthy after restart - keyValueContainerData = (KeyValueContainerData) ContainerDataYaml - .readContainerFile(containerFile); - assertThat(keyValueContainerData.getState(), is(UNHEALTHY)); - - OzoneContainer ozoneContainer; - ozoneContainer = cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer(); - HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer.getDispatcher(); - ContainerProtos.ContainerCommandRequestProto.Builder request = - ContainerProtos.ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.CloseContainer); - request.setContainerID(containerID); - request.setCloseContainer( - ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); - request.setDatanodeUuid( - cluster.getHddsDatanodes().get(0).getDatanodeDetails().getUuidString()); - Assert.assertEquals(ContainerProtos.Result.CONTAINER_UNHEALTHY, - dispatcher.dispatch(request.build(), null).getResult()); - } - - @Test - public void testApplyTransactionFailure() throws Exception { - OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); - // First write and flush creates a container in the datanode - key.write("ratis".getBytes()); - key.flush(); - key.write("ratis".getBytes()); - KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); - List locationInfoList = - groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); - OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); - ContainerData containerData = - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()) - .getContainerData(); - Assert.assertTrue(containerData instanceof KeyValueContainerData); - KeyValueContainerData keyValueContainerData = - (KeyValueContainerData) containerData; - key.close(); - ContainerStateMachine stateMachine = - (ContainerStateMachine) ContainerTestHelper.getStateMachine(cluster); - SimpleStateMachineStorage storage = - (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); - Path parentPath = storage.findLatestSnapshot().getFile().getPath(); - // Since the snapshot threshold is set to 1, since there are - // applyTransactions, we should see snapshots - Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0); - FileInfo snapshot = storage.findLatestSnapshot().getFile(); - Assert.assertNotNull(snapshot); - long containerID = omKeyLocationInfo.getContainerID(); - // delete the container db file - FileUtil.fullyDelete(new File(keyValueContainerData.getContainerPath())); - Pipeline pipeline = cluster.getStorageContainerLocationClient() - .getContainerWithPipeline(containerID).getPipeline(); - XceiverClientSpi xceiverClient = - xceiverClientManager.acquireClient(pipeline); - ContainerProtos.ContainerCommandRequestProto.Builder request = - ContainerProtos.ContainerCommandRequestProto.newBuilder(); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - request.setCmdType(ContainerProtos.Type.CloseContainer); - request.setContainerID(containerID); - request.setCloseContainer( - ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); - // close container transaction will fail over Ratis and will initiate - // a pipeline close action - - // Since the applyTransaction failure is propagated to Ratis, - // stateMachineUpdater will it exception while taking the next snapshot - // and should shutdown the RaftServerImpl. The client request will fail - // with RaftRetryFailureException. - try { - xceiverClient.sendCommand(request.build()); - Assert.fail("Expected exception not thrown"); - } catch (IOException e) { - Assert.assertTrue(HddsClientUtils - .checkForException(e) instanceof RaftRetryFailureException); - } - // Make sure the container is marked unhealthy - Assert.assertTrue( - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet().getContainer(containerID) - .getContainerState() - == ContainerProtos.ContainerDataProto.State.UNHEALTHY); - try { - // try to take a new snapshot, ideally it should just fail - stateMachine.takeSnapshot(); - } catch (IOException ioe) { - Assert.assertTrue(ioe instanceof StateMachineException); - } - // Make sure the latest snapshot is same as the previous one - FileInfo latestSnapshot = storage.findLatestSnapshot().getFile(); - Assert.assertTrue(snapshot.getPath().equals(latestSnapshot.getPath())); - } - - @Test - public void testApplyTransactionIdempotencyWithClosedContainer() - throws Exception { - OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); - // First write and flush creates a container in the datanode - key.write("ratis".getBytes()); - key.flush(); - key.write("ratis".getBytes()); - KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); - List locationInfoList = - groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); - OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); - ContainerData containerData = - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()) - .getContainerData(); - Assert.assertTrue(containerData instanceof KeyValueContainerData); - key.close(); - ContainerStateMachine stateMachine = - (ContainerStateMachine) ContainerTestHelper.getStateMachine(cluster); - SimpleStateMachineStorage storage = - (SimpleStateMachineStorage) stateMachine.getStateMachineStorage(); - Path parentPath = storage.findLatestSnapshot().getFile().getPath(); - // Since the snapshot threshold is set to 1, since there are - // applyTransactions, we should see snapshots - Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0); - FileInfo snapshot = storage.findLatestSnapshot().getFile(); - Assert.assertNotNull(snapshot); - long containerID = omKeyLocationInfo.getContainerID(); - Pipeline pipeline = cluster.getStorageContainerLocationClient() - .getContainerWithPipeline(containerID).getPipeline(); - XceiverClientSpi xceiverClient = - xceiverClientManager.acquireClient(pipeline); - ContainerProtos.ContainerCommandRequestProto.Builder request = - ContainerProtos.ContainerCommandRequestProto.newBuilder(); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - request.setCmdType(ContainerProtos.Type.CloseContainer); - request.setContainerID(containerID); - request.setCloseContainer( - ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); - try { - xceiverClient.sendCommand(request.build()); - } catch (IOException e) { - Assert.fail("Exception should not be thrown"); - } - Assert.assertTrue( - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet().getContainer(containerID) - .getContainerState() - == ContainerProtos.ContainerDataProto.State.CLOSED); - Assert.assertTrue(stateMachine.isStateMachineHealthy()); - try { - stateMachine.takeSnapshot(); - } catch (IOException ioe) { - Assert.fail("Exception should not be thrown"); - } - FileInfo latestSnapshot = storage.findLatestSnapshot().getFile(); - Assert.assertFalse(snapshot.getPath().equals(latestSnapshot.getPath())); - } - - @Test - public void testValidateBCSIDOnDnRestart() throws Exception { - OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); - // First write and flush creates a container in the datanode - key.write("ratis".getBytes()); - key.flush(); - key.write("ratis".getBytes()); - KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); - List locationInfoList = - groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); - OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); - ContainerData containerData = - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()) - .getContainerData(); - Assert.assertTrue(containerData instanceof KeyValueContainerData); - KeyValueContainerData keyValueContainerData = - (KeyValueContainerData) containerData; - key.close(); - - long containerID = omKeyLocationInfo.getContainerID(); - cluster.shutdownHddsDatanode( - cluster.getHddsDatanodes().get(0).getDatanodeDetails()); - // delete the container db file - FileUtil.fullyDelete(new File(keyValueContainerData.getContainerPath())); - cluster.restartHddsDatanode( - cluster.getHddsDatanodes().get(0).getDatanodeDetails(), true); - OzoneContainer ozoneContainer = - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer(); - // make sure the missing containerSet is not empty - HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer.getDispatcher(); - Assert.assertTrue(!dispatcher.getMissingContainerSet().isEmpty()); - Assert - .assertTrue(dispatcher.getMissingContainerSet().contains(containerID)); - // write a new key - key = objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, - new HashMap<>()); - // First write and flush creates a container in the datanode - key.write("ratis1".getBytes()); - key.flush(); - groupOutputStream = (KeyOutputStream) key.getOutputStream(); - locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); - omKeyLocationInfo = locationInfoList.get(0); - key.close(); - containerID = omKeyLocationInfo.getContainerID(); - containerData = cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(omKeyLocationInfo.getContainerID()).getContainerData(); - Assert.assertTrue(containerData instanceof KeyValueContainerData); - keyValueContainerData = (KeyValueContainerData) containerData; - ReferenceCountedDB db = BlockUtils. - getDB(keyValueContainerData, conf); - byte[] blockCommitSequenceIdKey = - DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX); - - // modify the bcsid for the container in the ROCKS DB tereby inducing - // corruption - db.getStore().put(blockCommitSequenceIdKey, Longs.toByteArray(0)); - db.decrementReference(); - // shutdown of dn will take a snapsot which will persist the valid BCSID - // recorded in the container2BCSIDMap in ContainerStateMachine - cluster.shutdownHddsDatanode( - cluster.getHddsDatanodes().get(0).getDatanodeDetails()); - // after the restart, there will be a mismatch in BCSID of what is recorded - // in the and what is there in RockSDB and hence the container would be - // marked unhealthy - cluster.restartHddsDatanode( - cluster.getHddsDatanodes().get(0).getDatanodeDetails(), true); - // Make sure the container is marked unhealthy - Assert.assertTrue( - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet().getContainer(containerID) - .getContainerState() - == ContainerProtos.ContainerDataProto.State.UNHEALTHY); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java deleted file mode 100644 index 30c2624fbf5..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java +++ /dev/null @@ -1,291 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; - -/** - * Tests delete key operation with a slow follower in the datanode - * pipeline. - */ -public class TestDeleteWithSlowFollower { - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static OzoneClient client; - private static ObjectStore objectStore; - private static String volumeName; - private static String bucketName; - private static String path; - private static XceiverClientManager xceiverClientManager; - - /** - * Create a MiniDFSCluster for testing. - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - path = GenericTestUtils - .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); - - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); - // Make the stale, dead and server failure timeout higher so that a dead - // node is not detecte at SCM as well as the pipeline close action - // never gets initiated early at Datanode in the test. - conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 1000, TimeUnit.SECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, 2000, - TimeUnit.SECONDS); - conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000, - TimeUnit.SECONDS); - conf.setTimeDuration(OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY, - 1000, TimeUnit.SECONDS); - conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, - 1, TimeUnit.SECONDS); - - conf.setQuietMode(false); - cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).setHbInterval(100) - .build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - xceiverClientManager = new XceiverClientManager(conf); - volumeName = "testcontainerstatemachinefailures"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - /** - * The test simulates a slow follower by first writing key thereby creating a - * a container on 3 dns of the cluster. Then, a dn is shutdown and a close - * container cmd gets issued so that in the leader and the alive follower, - * container gets closed. And then, key is deleted and - * the node is started up again so that it - * rejoins the ring and starts applying the transaction from where it left - * by fetching the entries from the leader. Until and unless this follower - * catches up and its replica gets closed, - * the data is not deleted from any of the nodes which have the - * closed replica. - */ - @Test - public void testDeleteKeyWithSlowFollower() throws Exception { - - OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey("ratis", 0, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>()); - byte[] testData = "ratis".getBytes(); - // First write and flush creates a container in the datanode - key.write(testData); - key.flush(); - - KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); - List locationInfoList = - groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); - OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); - long containerID = omKeyLocationInfo.getContainerID(); - // A container is created on the datanode. Now figure out a follower node to - // kill/slow down. - HddsDatanodeService follower = null; - HddsDatanodeService leader = null; - - List pipelineList = - cluster.getStorageContainerManager().getPipelineManager() - .getPipelines(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); - Assert.assertTrue(pipelineList.size() == 1); - Pipeline pipeline = pipelineList.get(0); - for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { - if (ContainerTestHelper.isRatisFollower(dn, pipeline)) { - follower = dn; - } else if (ContainerTestHelper.isRatisLeader(dn, pipeline)) { - leader = dn; - } - } - Assert.assertNotNull(follower); - Assert.assertNotNull(leader); - // shutdown the slow follower - cluster.shutdownHddsDatanode(follower.getDatanodeDetails()); - key.write(testData); - key.close(); - - // now move the container to the closed on the datanode. - XceiverClientSpi xceiverClient = - xceiverClientManager.acquireClient(pipeline); - ContainerProtos.ContainerCommandRequestProto.Builder request = - ContainerProtos.ContainerCommandRequestProto.newBuilder(); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - request.setCmdType(ContainerProtos.Type.CloseContainer); - request.setContainerID(containerID); - request.setCloseContainer( - ContainerProtos.CloseContainerRequestProto.getDefaultInstance()); - xceiverClient.sendCommand(request.build()); - - ContainerStateMachine stateMachine = - (ContainerStateMachine) ContainerTestHelper - .getStateMachine(leader, pipeline); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName). - setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName("ratis") - .build(); - OmKeyInfo info = cluster.getOzoneManager().lookupKey(keyArgs); - BlockID blockID = - info.getKeyLocationVersions().get(0).getLocationList().get(0) - .getBlockID(); - OzoneContainer ozoneContainer; - final DatanodeStateMachine dnStateMachine = - leader.getDatanodeStateMachine(); - ozoneContainer = dnStateMachine.getContainer(); - KeyValueHandler keyValueHandler = - (KeyValueHandler) ozoneContainer.getDispatcher() - .getHandler(ContainerProtos.ContainerType.KeyValueContainer); - Container container = - ozoneContainer.getContainerSet().getContainer(blockID.getContainerID()); - KeyValueContainerData containerData = - ((KeyValueContainerData) container.getContainerData()); - long delTrxId = containerData.getDeleteTransactionId(); - long numPendingDeletionBlocks = containerData.getNumPendingDeletionBlocks(); - BlockData blockData = - keyValueHandler.getBlockManager().getBlock(container, blockID); - cluster.getOzoneManager().deleteKey(keyArgs); - GenericTestUtils.waitFor(() -> { - return - dnStateMachine.getCommandDispatcher().getDeleteBlocksCommandHandler() - .getInvocationCount() >= 1; - }, 500, 100000); - Assert.assertTrue(containerData.getDeleteTransactionId() > delTrxId); - Assert.assertTrue( - containerData.getNumPendingDeletionBlocks() > numPendingDeletionBlocks); - // make sure the chunk was never deleted on the leader even though - // deleteBlock handler is invoked - try { - for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) { - keyValueHandler.getChunkManager() - .readChunk(container, blockID, ChunkInfo.getFromProtoBuf(chunkInfo), - null); - } - } catch (IOException ioe) { - Assert.fail("Exception should not be thrown."); - - } - long numReadStateMachineOps = - stateMachine.getMetrics().getNumReadStateMachineOps(); - Assert.assertTrue( - stateMachine.getMetrics().getNumReadStateMachineFails() == 0); - stateMachine.evictStateMachineCache(); - cluster.restartHddsDatanode(follower.getDatanodeDetails(), false); - // wait for the raft server to come up and join the ratis ring - Thread.sleep(10000); - - // Make sure the readStateMachine call got triggered after the follower - // caught up - Assert.assertTrue(stateMachine.getMetrics().getNumReadStateMachineOps() - > numReadStateMachineOps); - Assert.assertTrue( - stateMachine.getMetrics().getNumReadStateMachineFails() == 0); - // wait for the chunk to get deleted now - Thread.sleep(10000); - for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { - keyValueHandler = - (KeyValueHandler) dn.getDatanodeStateMachine().getContainer() - .getDispatcher() - .getHandler(ContainerProtos.ContainerType.KeyValueContainer); - // make sure the chunk is now deleted on the all dns - try { - for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) { - keyValueHandler.getChunkManager().readChunk(container, blockID, - ChunkInfo.getFromProtoBuf(chunkInfo), null); - } - Assert.fail("Expected exception is not thrown"); - } catch (IOException ioe) { - Assert.assertTrue(ioe instanceof StorageContainerException); - Assert.assertTrue(((StorageContainerException) ioe).getResult() - == ContainerProtos.Result.UNABLE_TO_FIND_CHUNK); - } - } - - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java deleted file mode 100644 index edb796b8799..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ /dev/null @@ -1,415 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.net.DNSToSwitchMapping; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.net.StaticMapping; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.junit.After; -import org.junit.Assert; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic - .NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; - -/** - * Tests Close Container Exception handling by Ozone Client. - */ -public class TestFailureHandlingByClient { - - private MiniOzoneCluster cluster; - private OzoneConfiguration conf; - private OzoneClient client; - private ObjectStore objectStore; - private int chunkSize; - private int blockSize; - private String volumeName; - private String bucketName; - private String keyString; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - private void init() throws Exception { - conf = new OzoneConfiguration(); - chunkSize = (int) OzoneConsts.MB; - blockSize = 4 * chunkSize; - conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 5, - TimeUnit.SECONDS); - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS); - conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 10); - conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY, - 1, TimeUnit.SECONDS); - conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, - 1, TimeUnit.SECONDS); - conf.setBoolean( - OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); - - conf.setQuietMode(false); - conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, - StaticMapping.class, DNSToSwitchMapping.class); - StaticMapping.addNodeToRack(NetUtils.normalizeHostNames( - Collections.singleton(HddsUtils.getHostName(conf))).get(0), - "/rack1"); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10).build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - keyString = UUID.randomUUID().toString(); - volumeName = "datanodefailurehandlingtest"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - } - - private void startCluster() throws Exception { - init(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testBlockWritesWithDnFailures() throws Exception { - startCluster(); - String keyName = UUID.randomUUID().toString(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - byte[] data = - ContainerTestHelper - .getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes(); - key.write(data); - - // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream groupOutputStream = - (KeyOutputStream) key.getOutputStream(); - List locationInfoList = - groupOutputStream.getLocationInfoList(); - Assert.assertTrue(locationInfoList.size() == 1); - long containerId = locationInfoList.get(0).getContainerID(); - ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager() - .getContainer(ContainerID.valueof(containerId)); - Pipeline pipeline = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(container.getPipelineID()); - List datanodes = pipeline.getNodes(); - cluster.shutdownHddsDatanode(datanodes.get(0)); - cluster.shutdownHddsDatanode(datanodes.get(1)); - // The write will fail but exception will be handled and length will be - // updated correctly in OzoneManager once the steam is closed - key.close(); - //get the name of a valid container - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(data.length, keyInfo.getDataSize()); - validateData(keyName, data); - } - - @Test - public void testWriteSmallFile() throws Exception { - startCluster(); - String keyName = UUID.randomUUID().toString(); - OzoneOutputStream key = - createKey(keyName, ReplicationType.RATIS, 0); - String data = ContainerTestHelper - .getFixedLengthString(keyString, chunkSize/2); - key.write(data.getBytes()); - // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = - (KeyOutputStream) key.getOutputStream(); - List locationInfoList = - keyOutputStream.getLocationInfoList(); - long containerId = locationInfoList.get(0).getContainerID(); - BlockID blockId = locationInfoList.get(0).getBlockID(); - ContainerInfo container = - cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerId)); - Pipeline pipeline = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(container.getPipelineID()); - List datanodes = pipeline.getNodes(); - - cluster.shutdownHddsDatanode(datanodes.get(0)); - cluster.shutdownHddsDatanode(datanodes.get(1)); - key.close(); - // this will throw AlreadyClosedException and and current stream - // will be discarded and write a new block - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - - // Make sure a new block is written - Assert.assertNotEquals( - keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0) - .getBlockID(), blockId); - Assert.assertEquals(data.getBytes().length, keyInfo.getDataSize()); - validateData(keyName, data.getBytes()); - } - - - @Test - public void testContainerExclusionWithClosedContainerException() - throws Exception { - startCluster(); - String keyName = UUID.randomUUID().toString(); - OzoneOutputStream key = - createKey(keyName, ReplicationType.RATIS, blockSize); - String data = ContainerTestHelper - .getFixedLengthString(keyString, chunkSize); - - // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = - (KeyOutputStream) key.getOutputStream(); - List streamEntryList = - keyOutputStream.getStreamEntries(); - - // Assert that 1 block will be preallocated - Assert.assertEquals(1, streamEntryList.size()); - key.write(data.getBytes()); - key.flush(); - long containerId = streamEntryList.get(0).getBlockID().getContainerID(); - BlockID blockId = streamEntryList.get(0).getBlockID(); - List containerIdList = new ArrayList<>(); - containerIdList.add(containerId); - - // below check will assert if the container does not get closed - ContainerTestHelper - .waitForContainerClose(cluster, containerIdList.toArray(new Long[0])); - - // This write will hit ClosedContainerException and this container should - // will be added in the excludelist - key.write(data.getBytes()); - key.flush(); - - Assert.assertTrue(keyOutputStream.getExcludeList().getContainerIds() - .contains(ContainerID.valueof(containerId))); - Assert.assertTrue( - keyOutputStream.getExcludeList().getDatanodes().isEmpty()); - Assert.assertTrue( - keyOutputStream.getExcludeList().getPipelineIds().isEmpty()); - - // The close will just write to the buffer - key.close(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - - // Make sure a new block is written - Assert.assertNotEquals( - keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0) - .getBlockID(), blockId); - Assert.assertEquals(2 * data.getBytes().length, keyInfo.getDataSize()); - validateData(keyName, data.concat(data).getBytes()); - } - - @Test - public void testDatanodeExclusionWithMajorityCommit() throws Exception { - startCluster(); - String keyName = UUID.randomUUID().toString(); - OzoneOutputStream key = - createKey(keyName, ReplicationType.RATIS, blockSize); - String data = ContainerTestHelper - .getFixedLengthString(keyString, chunkSize); - - // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = - (KeyOutputStream) key.getOutputStream(); - List streamEntryList = - keyOutputStream.getStreamEntries(); - - // Assert that 1 block will be preallocated - Assert.assertEquals(1, streamEntryList.size()); - key.write(data.getBytes()); - key.flush(); - long containerId = streamEntryList.get(0).getBlockID().getContainerID(); - BlockID blockId = streamEntryList.get(0).getBlockID(); - ContainerInfo container = - cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerId)); - Pipeline pipeline = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(container.getPipelineID()); - List datanodes = pipeline.getNodes(); - - // shutdown 1 datanode. This will make sure the 2 way commit happens for - // next write ops. - cluster.shutdownHddsDatanode(datanodes.get(0)); - - key.write(data.getBytes()); - key.write(data.getBytes()); - key.flush(); - - Assert.assertTrue(keyOutputStream.getExcludeList().getDatanodes() - .contains(datanodes.get(0))); - Assert.assertTrue( - keyOutputStream.getExcludeList().getContainerIds().isEmpty()); - Assert.assertTrue( - keyOutputStream.getExcludeList().getPipelineIds().isEmpty()); - // The close will just write to the buffer - key.close(); - - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - - // Make sure a new block is written - Assert.assertNotEquals( - keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0) - .getBlockID(), blockId); - Assert.assertEquals(3 * data.getBytes().length, keyInfo.getDataSize()); - validateData(keyName, data.concat(data).concat(data).getBytes()); - } - - - @Test - public void testPipelineExclusionWithPipelineFailure() throws Exception { - startCluster(); - String keyName = UUID.randomUUID().toString(); - OzoneOutputStream key = - createKey(keyName, ReplicationType.RATIS, blockSize); - String data = ContainerTestHelper - .getFixedLengthString(keyString, chunkSize); - - // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = - (KeyOutputStream) key.getOutputStream(); - List streamEntryList = - keyOutputStream.getStreamEntries(); - - // Assert that 1 block will be preallocated - Assert.assertEquals(1, streamEntryList.size()); - key.write(data.getBytes()); - key.flush(); - long containerId = streamEntryList.get(0).getBlockID().getContainerID(); - BlockID blockId = streamEntryList.get(0).getBlockID(); - ContainerInfo container = - cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerId)); - Pipeline pipeline = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(container.getPipelineID()); - List datanodes = pipeline.getNodes(); - - // Two nodes, next write will hit AlreadyClosedException , the pipeline - // will be added in the exclude list - cluster.shutdownHddsDatanode(datanodes.get(0)); - cluster.shutdownHddsDatanode(datanodes.get(1)); - - key.write(data.getBytes()); - key.write(data.getBytes()); - key.flush(); - Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds() - .contains(pipeline.getId())); - Assert.assertTrue( - keyOutputStream.getExcludeList().getContainerIds().isEmpty()); - Assert.assertTrue( - keyOutputStream.getExcludeList().getDatanodes().isEmpty()); - // The close will just write to the buffer - key.close(); - - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - - // Make sure a new block is written - Assert.assertNotEquals( - keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0) - .getBlockID(), blockId); - Assert.assertEquals(3 * data.getBytes().length, keyInfo.getDataSize()); - validateData(keyName, data.concat(data).concat(data).getBytes()); - } - - private OzoneOutputStream createKey(String keyName, ReplicationType type, - long size) throws Exception { - return ContainerTestHelper - .createKey(keyName, type, size, objectStore, volumeName, bucketName); - } - - private void validateData(String keyName, byte[] data) throws Exception { - ContainerTestHelper - .validateData(keyName, data, objectStore, volumeName, bucketName); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java deleted file mode 100644 index 47a716e85ca..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java +++ /dev/null @@ -1,166 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneKeyDetails; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; -import java.util.HashMap; - -/** - * Tests Hybrid Pipeline Creation and IO on same set of Datanodes. - */ -public class TestHybridPipelineOnDatanode { - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static OzoneClient client; - private static ObjectStore objectStore; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - /** - * Tests reading a corrputed chunk file throws checksum exception. - * @throws IOException - */ - @Test - public void testHybridPipelineOnDatanode() throws IOException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - String value = UUID.randomUUID().toString(); - byte[] data = value.getBytes(); - objectStore.createVolume(volumeName); - OzoneVolume volume = objectStore.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - String keyName1 = UUID.randomUUID().toString(); - - // Write data into a key - OzoneOutputStream out = bucket - .createKey(keyName1, data.length, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - - String keyName2 = UUID.randomUUID().toString(); - - // Write data into a key - out = bucket - .createKey(keyName2, data.length, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - - // We need to find the location of the chunk file corresponding to the - // data we just wrote. - OzoneKey key1 = bucket.getKey(keyName1); - long containerID1 = - ((OzoneKeyDetails) key1).getOzoneKeyLocations().get(0).getContainerID(); - - OzoneKey key2 = bucket.getKey(keyName2); - long containerID2 = - ((OzoneKeyDetails) key2).getOzoneKeyLocations().get(0).getContainerID(); - - PipelineID pipelineID1 = - cluster.getStorageContainerManager().getContainerInfo(containerID1) - .getPipelineID(); - PipelineID pipelineID2 = - cluster.getStorageContainerManager().getContainerInfo(containerID2) - .getPipelineID(); - Pipeline pipeline1 = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(pipelineID1); - List dns = pipeline1.getNodes(); - Assert.assertTrue(dns.size() == 1); - - Pipeline pipeline2 = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(pipelineID2); - Assert.assertFalse(pipeline1.getFactor().equals(pipeline2.getFactor())); - Assert.assertTrue(pipeline1.getType() == HddsProtos.ReplicationType.RATIS); - Assert.assertTrue(pipeline1.getType() == pipeline2.getType()); - // assert that the pipeline Id1 and pipelineId2 are on the same node - // but different replication factor - Assert.assertTrue(pipeline2.getNodes().contains(dns.get(0))); - byte[] b1 = new byte[data.length]; - byte[] b2 = new byte[data.length]; - // now try to read both the keys - OzoneInputStream is = bucket.readKey(keyName1); - is.read(b1); - is.close(); - - // now try to read both the keys - is = bucket.readKey(keyName2); - is.read(b2); - is.close(); - Assert.assertTrue(Arrays.equals(b1, data)); - Assert.assertTrue(Arrays.equals(b1, b2)); - } -} - diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java deleted file mode 100644 index fa8a289ea81..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientMetrics; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.KeyInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.IOException; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; - -/** - * Tests {@link KeyInputStream}. - */ -public class TestKeyInputStream { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf = new OzoneConfiguration(); - private static OzoneClient client; - private static ObjectStore objectStore; - private static int chunkSize; - private static int flushSize; - private static int maxFlushSize; - private static int blockSize; - private static String volumeName; - private static String bucketName; - private static String keyString; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - chunkSize = 100; - flushSize = 4 * chunkSize; - maxFlushSize = 2 * flushSize; - blockSize = 2 * maxFlushSize; - conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms"); - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); - conf.setQuietMode(false); - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, - StorageUnit.MB); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .setBlockSize(blockSize) - .setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) - .build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - keyString = UUID.randomUUID().toString(); - volumeName = "test-key-input-stream-volume"; - bucketName = "test-key-input-stream-bucket"; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - private String getKeyName() { - return UUID.randomUUID().toString(); - } - - private OzoneOutputStream createKey(String keyName, ReplicationType type, - long size) throws Exception { - return ContainerTestHelper - .createKey(keyName, type, size, objectStore, volumeName, bucketName); - } - - @Test - public void testSeek() throws Exception { - XceiverClientMetrics metrics = XceiverClientManager - .getXceiverClientMetrics(); - long writeChunkCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.WriteChunk); - long readChunkCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.ReadChunk); - - String keyName = getKeyName(); - OzoneOutputStream key = ContainerTestHelper.createKey(keyName, - ReplicationType.RATIS, 0, objectStore, volumeName, bucketName); - - // write data spanning 3 chunks - int dataLength = (2 * chunkSize) + (chunkSize / 2); - byte[] inputData = ContainerTestHelper.getFixedLengthString( - keyString, dataLength).getBytes(UTF_8); - key.write(inputData); - key.close(); - - Assert.assertEquals(writeChunkCount + 3, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - - KeyInputStream keyInputStream = (KeyInputStream) objectStore - .getVolume(volumeName).getBucket(bucketName).readKey(keyName) - .getInputStream(); - - // Seek to position 150 - keyInputStream.seek(150); - - Assert.assertEquals(150, keyInputStream.getPos()); - - // Seek operation should not result in any readChunk operation. - Assert.assertEquals(readChunkCount, metrics - .getContainerOpsMetrics(ContainerProtos.Type.ReadChunk)); - Assert.assertEquals(readChunkCount, metrics - .getContainerOpCountMetrics(ContainerProtos.Type.ReadChunk)); - - byte[] readData = new byte[chunkSize]; - keyInputStream.read(readData, 0, chunkSize); - - // Since we reading data from index 150 to 250 and the chunk boundary is - // 100 bytes, we need to read 2 chunks. - Assert.assertEquals(readChunkCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.ReadChunk)); - - keyInputStream.close(); - - // Verify that the data read matches with the input data at corresponding - // indices. - for (int i = 0; i < chunkSize; i++) { - Assert.assertEquals(inputData[chunkSize + 50 + i], readData[i]); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java deleted file mode 100644 index 96662471a3e..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java +++ /dev/null @@ -1,220 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.junit.After; -import org.junit.Assert; -import org.junit.Test; - -import java.io.IOException; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; - -/** - * Tests MultiBlock Writes with Dn failures by Ozone Client. - */ -public class TestMultiBlockWritesWithDnFailures { - - private MiniOzoneCluster cluster; - private OzoneConfiguration conf; - private OzoneClient client; - private ObjectStore objectStore; - private int chunkSize; - private int blockSize; - private String volumeName; - private String bucketName; - private String keyString; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - private void startCluster(int datanodes) throws Exception { - conf = new OzoneConfiguration(); - chunkSize = (int) OzoneConsts.MB; - blockSize = 4 * chunkSize; - conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 5, - TimeUnit.SECONDS); - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS); - conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 10); - conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY, - 1, TimeUnit.SECONDS); - conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, - 1, TimeUnit.SECONDS); - - conf.setQuietMode(false); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(datanodes).build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - keyString = UUID.randomUUID().toString(); - volumeName = "datanodefailurehandlingtest"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testMultiBlockWritesWithDnFailures() throws Exception { - startCluster(6); - String keyName = "ratis3"; - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - String data = - ContainerTestHelper - .getFixedLengthString(keyString, blockSize + chunkSize); - key.write(data.getBytes()); - - // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream groupOutputStream = - (KeyOutputStream) key.getOutputStream(); - List locationInfoList = - groupOutputStream.getLocationInfoList(); - Assert.assertTrue(locationInfoList.size() == 2); - long containerId = locationInfoList.get(1).getContainerID(); - ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager() - .getContainer(ContainerID.valueof(containerId)); - Pipeline pipeline = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(container.getPipelineID()); - List datanodes = pipeline.getNodes(); - cluster.shutdownHddsDatanode(datanodes.get(0)); - cluster.shutdownHddsDatanode(datanodes.get(1)); - - // The write will fail but exception will be handled and length will be - // updated correctly in OzoneManager once the steam is closed - key.write(data.getBytes()); - key.close(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(2 * data.getBytes().length, keyInfo.getDataSize()); - validateData(keyName, data.concat(data).getBytes()); - } - - @Test - public void testMultiBlockWritesWithIntermittentDnFailures() - throws Exception { - startCluster(10); - String keyName = UUID.randomUUID().toString(); - OzoneOutputStream key = - createKey(keyName, ReplicationType.RATIS, 6 * blockSize); - String data = ContainerTestHelper - .getFixedLengthString(keyString, blockSize + chunkSize); - key.write(data.getBytes()); - - // get the name of a valid container - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = - (KeyOutputStream) key.getOutputStream(); - List streamEntryList = - keyOutputStream.getStreamEntries(); - - // Assert that 6 block will be preallocated - Assert.assertEquals(6, streamEntryList.size()); - key.write(data.getBytes()); - key.flush(); - long containerId = streamEntryList.get(0).getBlockID().getContainerID(); - BlockID blockId = streamEntryList.get(0).getBlockID(); - ContainerInfo container = - cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerId)); - Pipeline pipeline = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(container.getPipelineID()); - List datanodes = pipeline.getNodes(); - cluster.shutdownHddsDatanode(datanodes.get(0)); - - // The write will fail but exception will be handled and length will be - // updated correctly in OzoneManager once the steam is closed - key.write(data.getBytes()); - - // shutdown the second datanode - cluster.shutdownHddsDatanode(datanodes.get(1)); - key.write(data.getBytes()); - key.close(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.THREE).setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(4 * data.getBytes().length, keyInfo.getDataSize()); - validateData(keyName, - data.concat(data).concat(data).concat(data).getBytes()); - } - - private OzoneOutputStream createKey(String keyName, ReplicationType type, - long size) throws Exception { - return ContainerTestHelper - .createKey(keyName, type, size, objectStore, volumeName, bucketName); - } - - private void validateData(String keyName, byte[] data) throws Exception { - ContainerTestHelper - .validateData(keyName, data, objectStore, volumeName, bucketName); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java deleted file mode 100644 index 2d96b8d6ec5..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.kms.KMSClientProvider; -import org.apache.hadoop.crypto.key.kms.server.MiniKMS; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.BucketArgs; -import org.apache.hadoop.ozone.client.CertificateClientTestImpl; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.Time; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.security.NoSuchAlgorithmException; -import java.util.HashMap; -import java.util.UUID; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; - -/** - * This class is to test all the public facing APIs of Ozone Client. - */ -public class TestOzoneAtRestEncryption extends TestOzoneRpcClient { - - private static MiniOzoneCluster cluster = null; - private static MiniKMS miniKMS; - private static OzoneClient ozClient = null; - private static ObjectStore store = null; - private static OzoneManager ozoneManager; - private static StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - - private static final String SCM_ID = UUID.randomUUID().toString(); - private static File testDir; - private static OzoneConfiguration conf; - private static final String TEST_KEY = "key1"; - - - /** - * Create a MiniOzoneCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - testDir = GenericTestUtils.getTestDir( - TestSecureOzoneRpcClient.class.getSimpleName()); - - File kmsDir = new File(testDir, UUID.randomUUID().toString()); - Assert.assertTrue(kmsDir.mkdirs()); - MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder(); - miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build(); - miniKMS.start(); - - OzoneManager.setTestSecureOmFlag(true); - conf = new OzoneConfiguration(); - conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, - getKeyProviderURI(miniKMS)); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED, true); - conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - CertificateClientTestImpl certificateClientTest = - new CertificateClientTestImpl(conf); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10) - .setScmId(SCM_ID) - .setCertificateClient(certificateClientTest) - .build(); - cluster.getOzoneManager().startSecretManager(); - cluster.waitForClusterToBeReady(); - ozClient = OzoneClientFactory.getRpcClient(conf); - store = ozClient.getObjectStore(); - storageContainerLocationClient = - cluster.getStorageContainerLocationClient(); - ozoneManager = cluster.getOzoneManager(); - TestOzoneRpcClient.setCluster(cluster); - TestOzoneRpcClient.setOzClient(ozClient); - TestOzoneRpcClient.setOzoneManager(ozoneManager); - TestOzoneRpcClient.setStorageContainerLocationClient( - storageContainerLocationClient); - TestOzoneRpcClient.setStore(store); - TestOzoneRpcClient.setScmId(SCM_ID); - - // create test key - createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf); - } - - - - /** - * Close OzoneClient and shutdown MiniOzoneCluster. - */ - @AfterClass - public static void shutdown() throws IOException { - if(ozClient != null) { - ozClient.close(); - } - - if (storageContainerLocationClient != null) { - storageContainerLocationClient.close(); - } - - if (cluster != null) { - cluster.shutdown(); - } - - if (miniKMS != null) { - miniKMS.stop(); - } - } - - @Test - public void testPutKeyWithEncryption() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - long currentTime = Time.now(); - - String value = "sample value"; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - BucketArgs bucketArgs = BucketArgs.newBuilder() - .setBucketEncryptionKey(TEST_KEY).build(); - volume.createBucket(bucketName, bucketArgs); - OzoneBucket bucket = volume.getBucket(bucketName); - - for (int i = 0; i < 1; i++) { - String keyName = UUID.randomUUID().toString(); - - try (OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes("UTF-8").length, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>())) { - out.write(value.getBytes("UTF-8")); - } - - OzoneKey key = bucket.getKey(keyName); - Assert.assertEquals(keyName, key.getName()); - byte[] fileContent; - int len = 0; - - try(OzoneInputStream is = bucket.readKey(keyName)) { - fileContent = new byte[value.getBytes("UTF-8").length]; - len = is.read(fileContent); - } - - Assert.assertEquals(len, value.length()); - Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE)); - Assert.assertEquals(value, new String(fileContent, "UTF-8")); - Assert.assertTrue(key.getCreationTime() >= currentTime); - Assert.assertTrue(key.getModificationTime() >= currentTime); - } - } - - private boolean verifyRatisReplication(String volumeName, String bucketName, - String keyName, ReplicationType type, ReplicationFactor factor) - throws IOException { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - HddsProtos.ReplicationType replicationType = - HddsProtos.ReplicationType.valueOf(type.toString()); - HddsProtos.ReplicationFactor replicationFactor = - HddsProtos.ReplicationFactor.valueOf(factor.getValue()); - OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs); - for (OmKeyLocationInfo info: - keyInfo.getLatestVersionLocations().getLocationList()) { - ContainerInfo container = - storageContainerLocationClient.getContainer(info.getContainerID()); - if (!container.getReplicationFactor().equals(replicationFactor) || ( - container.getReplicationType() != replicationType)) { - return false; - } - } - return true; - } - - private static String getKeyProviderURI(MiniKMS kms) { - return KMSClientProvider.SCHEME_NAME + "://" + - kms.getKMSUrl().toExternalForm().replace("://", "@"); - } - - private static void createKey(String keyName, KeyProvider - provider, Configuration config) - throws NoSuchAlgorithmException, IOException { - final KeyProvider.Options options = KeyProvider.options(config); - options.setDescription(keyName); - options.setBitLength(128); - provider.createKey(keyName, options); - provider.flush(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java deleted file mode 100644 index 5f6d494a24b..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java +++ /dev/null @@ -1,233 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.ratis.protocol.GroupMismatchException; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; - -/** - * Tests failure detection and handling in BlockOutputStream Class. - */ -public class TestOzoneClientRetriesOnException { - - private static MiniOzoneCluster cluster; - private OzoneConfiguration conf = new OzoneConfiguration(); - private OzoneClient client; - private ObjectStore objectStore; - private int chunkSize; - private int flushSize; - private int maxFlushSize; - private int blockSize; - private String volumeName; - private String bucketName; - private String keyString; - private XceiverClientManager xceiverClientManager; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @Before - public void init() throws Exception { - chunkSize = 100; - flushSize = 2 * chunkSize; - maxFlushSize = 2 * flushSize; - blockSize = 2 * maxFlushSize; - conf.set(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, "5000ms"); - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - // conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); - conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE"); - conf.setInt(OzoneConfigKeys.OZONE_CLIENT_MAX_RETRIES, 3); - conf.setQuietMode(false); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(7) - .setBlockSize(blockSize) - .setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) - .build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - xceiverClientManager = new XceiverClientManager(conf); - keyString = UUID.randomUUID().toString(); - volumeName = "testblockoutputstreamwithretries"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - } - - private String getKeyName() { - return UUID.randomUUID().toString(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testGroupMismatchExceptionHandling() throws Exception { - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - int dataLength = maxFlushSize + 50; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream(); - long containerID = - keyOutputStream.getStreamEntries().get(0).getBlockID().getContainerID(); - OutputStream stream = keyOutputStream.getStreamEntries().get(0) - .getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - ContainerInfo container = - cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerID)); - Pipeline pipeline = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(container.getPipelineID()); - ContainerTestHelper.waitForPipelineClose(key, cluster, true); - key.flush(); - Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream - .getIoException()) instanceof GroupMismatchException); - Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds() - .contains(pipeline.getId())); - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 2); - key.close(); - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0); - validateData(keyName, data1); - } - - @Test - public void testMaxRetriesByOzoneClient() throws Exception { - String keyName = getKeyName(); - OzoneOutputStream key = - createKey(keyName, ReplicationType.RATIS, 4 * blockSize); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); - List entries = keyOutputStream.getStreamEntries(); - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 4); - int dataLength = maxFlushSize + 50; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - long containerID; - List containerList = new ArrayList<>(); - for (BlockOutputStreamEntry entry : entries) { - containerID = entry.getBlockID().getContainerID(); - ContainerInfo container = - cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerID)); - Pipeline pipeline = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(container.getPipelineID()); - XceiverClientSpi xceiverClient = - xceiverClientManager.acquireClient(pipeline); - if (!containerList.contains(containerID)) { - xceiverClient.sendCommand(ContainerTestHelper - .getCreateContainerRequest(containerID, pipeline)); - } - xceiverClientManager.releaseClient(xceiverClient, false); - } - key.write(data1); - OutputStream stream = entries.get(0).getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - ContainerTestHelper.waitForContainerClose(key, cluster); - try { - key.write(data1); - Assert.fail("Expected exception not thrown"); - } catch (IOException ioe) { - Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream - .getIoException()) instanceof ContainerNotOpenException); - Assert.assertTrue(ioe.getMessage().contains( - "Retry request failed. retries get failed due to exceeded maximum " - + "allowed retries number: 3")); - } - try { - key.flush(); - Assert.fail("Expected exception not thrown"); - } catch (IOException ioe) { - Assert.assertTrue(ioe.getMessage().contains("Stream is closed")); - } - try { - key.close(); - } catch (IOException ioe) { - Assert.fail("Expected should not be thrown"); - } - } - - private OzoneOutputStream createKey(String keyName, ReplicationType type, - long size) throws Exception { - return ContainerTestHelper - .createKey(keyName, type, size, objectStore, volumeName, bucketName); - } - - private void validateData(String keyName, byte[] data) throws Exception { - ContainerTestHelper - .validateData(keyName, data, objectStore, volumeName, bucketName); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java deleted file mode 100644 index 8ecddacdcc0..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import java.io.IOException; - - -/** - * This class is to test all the public facing APIs of Ozone Client. - */ -public class TestOzoneRpcClient extends TestOzoneRpcClientAbstract { - - /** - * Create a MiniOzoneCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); - startCluster(conf); - } - - /** - * Close OzoneClient and shutdown MiniOzoneCluster. - */ - @AfterClass - public static void shutdown() throws IOException { - shutdownCluster(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java deleted file mode 100644 index 9189c2fad18..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ /dev/null @@ -1,2810 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.BitSet; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.TreeMap; -import java.util.UUID; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.OzoneTestUtils; -import org.apache.hadoop.ozone.client.BucketArgs; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneKeyDetails; -import org.apache.hadoop.ozone.client.OzoneKeyLocation; -import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.common.OzoneChecksumException; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider; -import org.apache.hadoop.ozone.om.ha.OMProxyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.s3.util.OzoneS3Util; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.util.Time; - -import static java.nio.charset.StandardCharsets.UTF_8; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.RandomUtils; - -import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.either; - -import org.junit.Assert; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import org.junit.Ignore; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This is an abstract class to test all the public facing APIs of Ozone - * Client, w/o OM Ratis server. - * {@link TestOzoneRpcClient} tests the Ozone Client by submitting the - * requests directly to OzoneManager. {@link TestOzoneRpcClientWithRatis} - * tests the Ozone Client by submitting requests to OM's Ratis server. - */ -public abstract class TestOzoneRpcClientAbstract { - - static final Logger LOG = - LoggerFactory.getLogger(TestOzoneRpcClientAbstract.class); - private static MiniOzoneCluster cluster = null; - private static OzoneClient ozClient = null; - private static ObjectStore store = null; - private static OzoneManager ozoneManager; - private static StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - private static String remoteUserName = "remoteUser"; - private static String remoteGroupName = "remoteGroup"; - private static OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); - private static OzoneAcl defaultGroupAcl = new OzoneAcl(GROUP, remoteGroupName, - READ, DEFAULT); - private static OzoneAcl inheritedUserAcl = new OzoneAcl(USER, remoteUserName, - READ, ACCESS); - private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP, - remoteGroupName, READ, ACCESS); - - private static String scmId = UUID.randomUUID().toString(); - - /** - * Create a MiniOzoneCluster for testing. - * @param conf Configurations to start the cluster. - * @throws Exception - */ - static void startCluster(OzoneConfiguration conf) throws Exception { - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .setScmId(scmId) - .build(); - cluster.waitForClusterToBeReady(); - ozClient = OzoneClientFactory.getRpcClient(conf); - store = ozClient.getObjectStore(); - storageContainerLocationClient = - cluster.getStorageContainerLocationClient(); - ozoneManager = cluster.getOzoneManager(); - } - - /** - * Close OzoneClient and shutdown MiniOzoneCluster. - */ - static void shutdownCluster() throws IOException { - if(ozClient != null) { - ozClient.close(); - } - - if (storageContainerLocationClient != null) { - storageContainerLocationClient.close(); - } - - if (cluster != null) { - cluster.shutdown(); - } - } - - public static void setCluster(MiniOzoneCluster cluster) { - TestOzoneRpcClientAbstract.cluster = cluster; - } - - public static void setOzClient(OzoneClient ozClient) { - TestOzoneRpcClientAbstract.ozClient = ozClient; - } - - public static void setOzoneManager(OzoneManager ozoneManager){ - TestOzoneRpcClientAbstract.ozoneManager = ozoneManager; - } - - public static void setStorageContainerLocationClient( - StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient) { - TestOzoneRpcClientAbstract.storageContainerLocationClient = - storageContainerLocationClient; - } - - public static void setStore(ObjectStore store) { - TestOzoneRpcClientAbstract.store = store; - } - - public static ObjectStore getStore() { - return TestOzoneRpcClientAbstract.store; - } - - public static void setScmId(String scmId){ - TestOzoneRpcClientAbstract.scmId = scmId; - } - - /** - * Test OM Proxy Provider. - */ - @Test - public void testOMClientProxyProvider() { - OMFailoverProxyProvider omFailoverProxyProvider = store.getClientProxy() - .getOMProxyProvider(); - List omProxies = omFailoverProxyProvider.getOMProxyInfos(); - - // For a non-HA OM service, there should be only one OM proxy. - Assert.assertEquals(1, omProxies.size()); - // The address in OMProxyInfo object, which client will connect to, - // should match the OM's RPC address. - Assert.assertTrue(omProxies.get(0).getAddress().equals( - ozoneManager.getOmRpcServerAddr())); - } - - @Test - public void testSetVolumeQuota() - throws IOException { - String volumeName = UUID.randomUUID().toString(); - store.createVolume(volumeName); - store.getVolume(volumeName).setQuota( - OzoneQuota.parseQuota("100000000 BYTES")); - OzoneVolume volume = store.getVolume(volumeName); - Assert.assertEquals(100000000L, volume.getQuota()); - } - - @Test - public void testDeleteVolume() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - Assert.assertNotNull(volume); - store.deleteVolume(volumeName); - OzoneTestUtils.expectOmException(ResultCodes.VOLUME_NOT_FOUND, - () -> store.getVolume(volumeName)); - - } - - @Test - public void testCreateVolumeWithMetadata() - throws IOException, OzoneClientException { - String volumeName = UUID.randomUUID().toString(); - VolumeArgs volumeArgs = VolumeArgs.newBuilder() - .addMetadata("key1", "val1") - .build(); - store.createVolume(volumeName, volumeArgs); - OzoneVolume volume = store.getVolume(volumeName); - - Assert.assertEquals("val1", volume.getMetadata().get("key1")); - Assert.assertEquals(volumeName, volume.getName()); - } - - @Test - public void testCreateBucketWithMetadata() - throws IOException, OzoneClientException { - long currentTime = Time.now(); - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - BucketArgs args = BucketArgs.newBuilder() - .addMetadata("key1", "value1").build(); - volume.createBucket(bucketName, args); - OzoneBucket bucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, bucket.getName()); - Assert.assertNotNull(bucket.getMetadata()); - Assert.assertEquals("value1", bucket.getMetadata().get("key1")); - - } - - - @Test - public void testCreateBucket() - throws IOException, OzoneClientException { - long currentTime = Time.now(); - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, bucket.getName()); - Assert.assertTrue(bucket.getCreationTime() >= currentTime); - Assert.assertTrue(volume.getCreationTime() >= currentTime); - } - - @Test - public void testCreateS3Bucket() - throws IOException, OzoneClientException { - long currentTime = Time.now(); - String userName = UserGroupInformation.getCurrentUser().getUserName(); - String bucketName = UUID.randomUUID().toString(); - store.createS3Bucket(userName, bucketName); - String volumeName = store.getOzoneVolumeName(bucketName); - OzoneVolume volume = store.getVolume(volumeName); - OzoneBucket bucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, bucket.getName()); - Assert.assertTrue(bucket.getCreationTime() >= currentTime); - Assert.assertTrue(volume.getCreationTime() >= currentTime); - } - - @Test - public void testCreateSecureS3Bucket() throws IOException { - long currentTime = Time.now(); - String userName = "ozone/localhost@EXAMPLE.COM"; - String bucketName = UUID.randomUUID().toString(); - String s3VolumeName = OzoneS3Util.getVolumeName(userName); - store.createS3Bucket(s3VolumeName, bucketName); - String volumeName = store.getOzoneVolumeName(bucketName); - assertEquals(volumeName, "s3" + s3VolumeName); - - OzoneVolume volume = store.getVolume(volumeName); - OzoneBucket bucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, bucket.getName()); - Assert.assertTrue(bucket.getCreationTime() >= currentTime); - Assert.assertTrue(volume.getCreationTime() >= currentTime); - } - - - @Test - public void testListS3Buckets() - throws IOException, OzoneClientException { - String userName = "ozone100"; - String bucketName1 = UUID.randomUUID().toString(); - String bucketName2 = UUID.randomUUID().toString(); - store.createS3Bucket(userName, bucketName1); - store.createS3Bucket(userName, bucketName2); - Iterator iterator = store.listS3Buckets(userName, - null); - - while (iterator.hasNext()) { - assertThat(iterator.next().getName(), either(containsString(bucketName1)) - .or(containsString(bucketName2))); - } - - } - - @Test - public void testListS3BucketsFail() - throws IOException, OzoneClientException { - String userName = "randomUser"; - Iterator iterator = store.listS3Buckets(userName, - null); - - Assert.assertFalse(iterator.hasNext()); - - } - - @Test - public void testDeleteS3Bucket() - throws Exception { - long currentTime = Time.now(); - String userName = "ozone1"; - String bucketName = UUID.randomUUID().toString(); - store.createS3Bucket(userName, bucketName); - String volumeName = store.getOzoneVolumeName(bucketName); - OzoneVolume volume = store.getVolume(volumeName); - OzoneBucket bucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, bucket.getName()); - Assert.assertTrue(bucket.getCreationTime() >= currentTime); - Assert.assertTrue(volume.getCreationTime() >= currentTime); - store.deleteS3Bucket(bucketName); - - OzoneTestUtils.expectOmException(ResultCodes.S3_BUCKET_NOT_FOUND, - () -> store.getOzoneVolumeName(bucketName)); - } - - @Test - public void testDeleteS3NonExistingBucket() { - try { - store.deleteS3Bucket(UUID.randomUUID().toString()); - } catch (IOException ex) { - GenericTestUtils.assertExceptionContains("NOT_FOUND", ex); - } - } - - @Test - public void testCreateS3BucketMapping() - throws IOException, OzoneClientException { - long currentTime = Time.now(); - String userName = "ozone"; - String bucketName = UUID.randomUUID().toString(); - store.createS3Bucket(userName, bucketName); - String volumeName = store.getOzoneVolumeName(bucketName); - OzoneVolume volume = store.getVolume(volumeName); - OzoneBucket bucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, bucket.getName()); - - String mapping = store.getOzoneBucketMapping(bucketName); - Assert.assertEquals("s3"+userName+"/"+bucketName, mapping); - Assert.assertEquals(bucketName, store.getOzoneBucketName(bucketName)); - Assert.assertEquals("s3"+userName, store.getOzoneVolumeName(bucketName)); - - } - - @Test - public void testCreateBucketWithVersioning() - throws IOException, OzoneClientException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setVersioning(true); - volume.createBucket(bucketName, builder.build()); - OzoneBucket bucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, bucket.getName()); - Assert.assertEquals(true, bucket.getVersioning()); - } - - @Test - public void testCreateBucketWithStorageType() - throws IOException, OzoneClientException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setStorageType(StorageType.SSD); - volume.createBucket(bucketName, builder.build()); - OzoneBucket bucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, bucket.getName()); - Assert.assertEquals(StorageType.SSD, bucket.getStorageType()); - } - - @Test - public void testCreateBucketWithAcls() - throws IOException, OzoneClientException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - OzoneAcl userAcl = new OzoneAcl(USER, "test", - READ, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setAcls(acls); - volume.createBucket(bucketName, builder.build()); - OzoneBucket bucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, bucket.getName()); - Assert.assertTrue(bucket.getAcls().contains(userAcl)); - } - - @Test - public void testCreateBucketWithAllArgument() - throws IOException, OzoneClientException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACLType.ALL, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setVersioning(true) - .setStorageType(StorageType.SSD) - .setAcls(acls); - volume.createBucket(bucketName, builder.build()); - OzoneBucket bucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, bucket.getName()); - Assert.assertEquals(true, bucket.getVersioning()); - Assert.assertEquals(StorageType.SSD, bucket.getStorageType()); - Assert.assertTrue(bucket.getAcls().contains(userAcl)); - } - - @Test - public void testInvalidBucketCreation() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = "invalid#bucket"; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - LambdaTestUtils.intercept(IllegalArgumentException.class, - "Bucket or Volume name has an unsupported" + - " character : #", - () -> volume.createBucket(bucketName)); - - } - - @Test - public void testAddBucketAcl() - throws IOException, OzoneClientException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - List acls = new ArrayList<>(); - acls.add(new OzoneAcl(USER, "test", ACLType.ALL, ACCESS)); - OzoneBucket bucket = volume.getBucket(bucketName); - for (OzoneAcl acl : acls) { - assertTrue(bucket.addAcls(acl)); - } - OzoneBucket newBucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, newBucket.getName()); - Assert.assertTrue(bucket.getAcls().contains(acls.get(0))); - } - - @Test - public void testRemoveBucketAcl() - throws IOException, OzoneClientException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACLType.ALL, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setAcls(acls); - volume.createBucket(bucketName, builder.build()); - OzoneBucket bucket = volume.getBucket(bucketName); - for (OzoneAcl acl : acls) { - assertTrue(bucket.removeAcls(acl)); - } - OzoneBucket newBucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, newBucket.getName()); - Assert.assertTrue(!bucket.getAcls().contains(acls.get(0))); - } - - @Test - public void testRemoveBucketAclUsingRpcClientRemoveAcl() - throws IOException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACLType.ALL, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); - acls.add(new OzoneAcl(USER, "test1", - ACLType.ALL, ACCESS)); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setAcls(acls); - volume.createBucket(bucketName, builder.build()); - OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder() - .setBucketName(bucketName) - .setVolumeName(volumeName) - .setStoreType(OzoneObj.StoreType.OZONE) - .setResType(OzoneObj.ResourceType.BUCKET).build(); - - // Remove the 2nd acl added to the list. - boolean remove = store.removeAcl(ozoneObj, acls.get(1)); - Assert.assertTrue(remove); - Assert.assertFalse(store.getAcl(ozoneObj).contains(acls.get(1))); - - remove = store.removeAcl(ozoneObj, acls.get(0)); - Assert.assertTrue(remove); - Assert.assertFalse(store.getAcl(ozoneObj).contains(acls.get(0))); - } - - @Test - public void testSetBucketVersioning() - throws IOException, OzoneClientException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - bucket.setVersioning(true); - OzoneBucket newBucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, newBucket.getName()); - Assert.assertEquals(true, newBucket.getVersioning()); - } - - @Test - public void testAclsAfterCallingSetBucketProperty() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - - OzoneBucket ozoneBucket = volume.getBucket(bucketName); - List currentAcls = ozoneBucket.getAcls(); - - ozoneBucket.setVersioning(true); - - OzoneBucket newBucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, newBucket.getName()); - Assert.assertEquals(true, newBucket.getVersioning()); - - List aclsAfterSet = newBucket.getAcls(); - Assert.assertEquals(currentAcls, aclsAfterSet); - - } - - @Test - public void testSetBucketStorageType() - throws IOException, OzoneClientException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - bucket.setStorageType(StorageType.SSD); - OzoneBucket newBucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, newBucket.getName()); - Assert.assertEquals(StorageType.SSD, newBucket.getStorageType()); - } - - - @Test - public void testDeleteBucket() - throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - Assert.assertNotNull(bucket); - volume.deleteBucket(bucketName); - - OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND, - () -> volume.getBucket(bucketName) - ); - } - - private boolean verifyRatisReplication(String volumeName, String bucketName, - String keyName, ReplicationType type, ReplicationFactor factor) - throws IOException { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - HddsProtos.ReplicationType replicationType = - HddsProtos.ReplicationType.valueOf(type.toString()); - HddsProtos.ReplicationFactor replicationFactor = - HddsProtos.ReplicationFactor.valueOf(factor.getValue()); - OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs); - for (OmKeyLocationInfo info: - keyInfo.getLatestVersionLocations().getLocationList()) { - ContainerInfo container = - storageContainerLocationClient.getContainer(info.getContainerID()); - if (!container.getReplicationFactor().equals(replicationFactor) || ( - container.getReplicationType() != replicationType)) { - return false; - } - } - return true; - } - - @Test - public void testPutKey() - throws IOException, OzoneClientException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - long currentTime = Time.now(); - - String value = "sample value"; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - for (int i = 0; i < 10; i++) { - String keyName = UUID.randomUUID().toString(); - - OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, STAND_ALONE, - ONE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - OzoneKey key = bucket.getKey(keyName); - Assert.assertEquals(keyName, key.getName()); - OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[value.getBytes().length]; - is.read(fileContent); - Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, STAND_ALONE, - ONE)); - Assert.assertEquals(value, new String(fileContent)); - Assert.assertTrue(key.getCreationTime() >= currentTime); - Assert.assertTrue(key.getModificationTime() >= currentTime); - } - } - - @Test - public void testValidateBlockLengthWithCommitKey() throws IOException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - String value = RandomStringUtils.random(RandomUtils.nextInt(0, 1024)); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - String keyName = UUID.randomUUID().toString(); - - // create the initial key with size 0, write will allocate the first block. - OzoneOutputStream out = bucket.createKey(keyName, 0, - STAND_ALONE, ONE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); - builder.setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName).setRefreshPipeline(true); - OmKeyInfo keyInfo = ozoneManager.lookupKey(builder.build()); - - List locationInfoList = - keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly(); - // LocationList should have only 1 block - Assert.assertEquals(1, locationInfoList.size()); - // make sure the data block size is updated - Assert.assertEquals(value.getBytes().length, - locationInfoList.get(0).getLength()); - // make sure the total data size is set correctly - Assert.assertEquals(value.getBytes().length, keyInfo.getDataSize()); - } - - @Test - public void testPutKeyRatisOneNode() - throws IOException, OzoneClientException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - long currentTime = Time.now(); - - String value = "sample value"; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - for (int i = 0; i < 10; i++) { - String keyName = UUID.randomUUID().toString(); - - OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.RATIS, - ONE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - OzoneKey key = bucket.getKey(keyName); - Assert.assertEquals(keyName, key.getName()); - OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[value.getBytes().length]; - is.read(fileContent); - is.close(); - Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, ReplicationType.RATIS, ONE)); - Assert.assertEquals(value, new String(fileContent)); - Assert.assertTrue(key.getCreationTime() >= currentTime); - Assert.assertTrue(key.getModificationTime() >= currentTime); - } - } - - @Test - public void testPutKeyRatisThreeNodes() - throws IOException, OzoneClientException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - long currentTime = Time.now(); - - String value = "sample value"; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - for (int i = 0; i < 10; i++) { - String keyName = UUID.randomUUID().toString(); - - OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - OzoneKey key = bucket.getKey(keyName); - Assert.assertEquals(keyName, key.getName()); - OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[value.getBytes().length]; - is.read(fileContent); - is.close(); - Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, ReplicationType.RATIS, - ReplicationFactor.THREE)); - Assert.assertEquals(value, new String(fileContent)); - Assert.assertTrue(key.getCreationTime() >= currentTime); - Assert.assertTrue(key.getModificationTime() >= currentTime); - } - } - - - @Ignore("Debug Jenkins Timeout") - @Test - public void testPutKeyRatisThreeNodesParallel() throws IOException, - InterruptedException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - long currentTime = Time.now(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - CountDownLatch latch = new CountDownLatch(2); - AtomicInteger failCount = new AtomicInteger(0); - - Runnable r = () -> { - try { - for (int i = 0; i < 5; i++) { - String keyName = UUID.randomUUID().toString(); - String data = generateData(5 * 1024 * 1024, - (byte) RandomUtils.nextLong()).toString(); - OzoneOutputStream out = bucket.createKey(keyName, - data.getBytes().length, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>()); - out.write(data.getBytes()); - out.close(); - OzoneKey key = bucket.getKey(keyName); - Assert.assertEquals(keyName, key.getName()); - OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[data.getBytes().length]; - is.read(fileContent); - is.close(); - Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, ReplicationType.RATIS, - ReplicationFactor.THREE)); - Assert.assertEquals(data, new String(fileContent)); - Assert.assertTrue(key.getCreationTime() >= currentTime); - Assert.assertTrue(key.getModificationTime() >= currentTime); - } - latch.countDown(); - } catch (IOException ex) { - latch.countDown(); - failCount.incrementAndGet(); - } - }; - - Thread thread1 = new Thread(r); - Thread thread2 = new Thread(r); - - thread1.start(); - thread2.start(); - - latch.await(600, TimeUnit.SECONDS); - - if (failCount.get() > 0) { - fail("testPutKeyRatisThreeNodesParallel failed"); - } - - } - - - @Test - public void testReadKeyWithVerifyChecksumFlagEnable() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - // Create and corrupt key - createAndCorruptKey(volumeName, bucketName, keyName); - - // read corrupt key with verify checksum enabled - readCorruptedKey(volumeName, bucketName, keyName, true); - - } - - - @Test - public void testReadKeyWithVerifyChecksumFlagDisable() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - // Create and corrupt key - createAndCorruptKey(volumeName, bucketName, keyName); - - // read corrupt key with verify checksum enabled - readCorruptedKey(volumeName, bucketName, keyName, false); - - } - - private void createAndCorruptKey(String volumeName, String bucketName, - String keyName) throws IOException { - String value = "sample value"; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - // Write data into a key - OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.RATIS, - ONE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - - // We need to find the location of the chunk file corresponding to the - // data we just wrote. - OzoneKey key = bucket.getKey(keyName); - long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) - .getContainerID(); - - // Get the container by traversing the datanodes. Atleast one of the - // datanode must have this container. - Container container = null; - for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) { - container = hddsDatanode.getDatanodeStateMachine().getContainer() - .getContainerSet().getContainer(containerID); - if (container != null) { - break; - } - } - Assert.assertNotNull("Container not found", container); - corruptData(container, key); - } - - - private void readCorruptedKey(String volumeName, String bucketName, - String keyName, boolean verifyChecksum) throws IOException { - try { - Configuration configuration = cluster.getConf(); - configuration.setBoolean(OzoneConfigKeys.OZONE_CLIENT_VERIFY_CHECKSUM, - verifyChecksum); - RpcClient client = new RpcClient(configuration, null); - OzoneInputStream is = client.getKey(volumeName, bucketName, keyName); - is.read(new byte[100]); - is.close(); - if (verifyChecksum) { - fail("Reading corrupted data should fail, as verify checksum is " + - "enabled"); - } - } catch (IOException e) { - if (!verifyChecksum) { - fail("Reading corrupted data should not fail, as verify checksum is " + - "disabled"); - } - } - } - - - private void readKey(OzoneBucket bucket, String keyName, String data) - throws IOException { - OzoneKey key = bucket.getKey(keyName); - Assert.assertEquals(keyName, key.getName()); - OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[data.getBytes().length]; - is.read(fileContent); - is.close(); - } - - @Test - public void testGetKeyDetails() throws IOException, OzoneClientException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - String keyName = UUID.randomUUID().toString(); - String keyValue = RandomStringUtils.random(128); - //String keyValue = "this is a test value.glx"; - // create the initial key with size 0, write will allocate the first block. - OzoneOutputStream out = bucket.createKey(keyName, - keyValue.getBytes().length, STAND_ALONE, - ONE, new HashMap<>()); - out.write(keyValue.getBytes()); - out.close(); - - OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[32]; - is.read(fileContent); - - // First, confirm the key info from the client matches the info in OM. - OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); - builder.setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName).setRefreshPipeline(true); - OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()). - getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0); - long containerID = keyInfo.getContainerID(); - long localID = keyInfo.getLocalID(); - OzoneKeyDetails keyDetails = (OzoneKeyDetails)bucket.getKey(keyName); - Assert.assertEquals(keyName, keyDetails.getName()); - - List keyLocations = keyDetails.getOzoneKeyLocations(); - Assert.assertEquals(1, keyLocations.size()); - Assert.assertEquals(containerID, keyLocations.get(0).getContainerID()); - Assert.assertEquals(localID, keyLocations.get(0).getLocalID()); - - // Make sure that the data size matched. - Assert.assertEquals(keyValue.getBytes().length, - keyLocations.get(0).getLength()); - - // Second, sum the data size from chunks in Container via containerID - // and localID, make sure the size equals to the size from keyDetails. - ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(ContainerID.valueof(containerID)); - Pipeline pipeline = cluster.getStorageContainerManager() - .getPipelineManager().getPipeline(container.getPipelineID()); - List datanodes = pipeline.getNodes(); - Assert.assertEquals(datanodes.size(), 1); - - DatanodeDetails datanodeDetails = datanodes.get(0); - Assert.assertNotNull(datanodeDetails); - HddsDatanodeService datanodeService = null; - for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) { - if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) { - datanodeService = datanodeServiceItr; - break; - } - } - KeyValueContainerData containerData = - (KeyValueContainerData)(datanodeService.getDatanodeStateMachine() - .getContainer().getContainerSet().getContainer(containerID) - .getContainerData()); - String containerPath = new File(containerData.getMetadataPath()) - .getParent(); - try(KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator( - containerID, new File(containerPath))) { - while (keyValueBlockIterator.hasNext()) { - BlockData blockData = keyValueBlockIterator.nextBlock(); - if (blockData.getBlockID().getLocalID() == localID) { - long length = 0; - List chunks = blockData.getChunks(); - for (ContainerProtos.ChunkInfo chunk : chunks) { - length += chunk.getLen(); - } - Assert.assertEquals(length, keyValue.getBytes().length); - break; - } - } - } - } - - /** - * Tests reading a corrputed chunk file throws checksum exception. - * @throws IOException - */ - @Test - public void testReadKeyWithCorruptedData() throws IOException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - String value = "sample value"; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - String keyName = UUID.randomUUID().toString(); - - // Write data into a key - OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.RATIS, - ONE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - - // We need to find the location of the chunk file corresponding to the - // data we just wrote. - OzoneKey key = bucket.getKey(keyName); - long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) - .getContainerID(); - - // Get the container by traversing the datanodes. Atleast one of the - // datanode must have this container. - Container container = null; - for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) { - container = hddsDatanode.getDatanodeStateMachine().getContainer() - .getContainerSet().getContainer(containerID); - if (container != null) { - break; - } - } - Assert.assertNotNull("Container not found", container); - corruptData(container, key); - - // Try reading the key. Since the chunk file is corrupted, it should - // throw a checksum mismatch exception. - try { - OzoneInputStream is = bucket.readKey(keyName); - is.read(new byte[100]); - fail("Reading corrupted data should fail."); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains("Checksum mismatch", e); - } - } - - /** - * Tests reading a corrputed chunk file throws checksum exception. - * @throws IOException - */ - @Test - public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - String value = "sample value"; - byte[] data = value.getBytes(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - String keyName = UUID.randomUUID().toString(); - - // Write data into a key - OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - - // We need to find the location of the chunk file corresponding to the - // data we just wrote. - OzoneKey key = bucket.getKey(keyName); - List keyLocation = - ((OzoneKeyDetails) key).getOzoneKeyLocations(); - Assert.assertTrue("Key location not found in OM", !keyLocation.isEmpty()); - long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) - .getContainerID(); - - // Get the container by traversing the datanodes. - List containerList = new ArrayList<>(); - Container container; - for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) { - container = hddsDatanode.getDatanodeStateMachine().getContainer() - .getContainerSet().getContainer(containerID); - if (container != null) { - containerList.add(container); - if (containerList.size() == 3) { - break; - } - } - } - Assert.assertTrue("Container not found", !containerList.isEmpty()); - corruptData(containerList.get(0), key); - // Try reading the key. Read will fail on the first node and will eventually - // failover to next replica - try { - OzoneInputStream is = bucket.readKey(keyName); - byte[] b = new byte[data.length]; - is.read(b); - Assert.assertTrue(Arrays.equals(b, data)); - } catch (OzoneChecksumException e) { - fail("Reading corrupted data should not fail."); - } - corruptData(containerList.get(1), key); - // Try reading the key. Read will fail on the first node and will eventually - // failover to next replica - try { - OzoneInputStream is = bucket.readKey(keyName); - byte[] b = new byte[data.length]; - is.read(b); - Assert.assertTrue(Arrays.equals(b, data)); - } catch (OzoneChecksumException e) { - fail("Reading corrupted data should not fail."); - } - corruptData(containerList.get(2), key); - // Try reading the key. Read will fail here as all the replica are corrupt - try { - OzoneInputStream is = bucket.readKey(keyName); - byte[] b = new byte[data.length]; - is.read(b); - fail("Reading corrupted data should fail."); - } catch (IOException e) { - GenericTestUtils.assertExceptionContains("Checksum mismatch", e); - } - } - - private void corruptData(Container container, OzoneKey key) - throws IOException { - long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) - .getContainerID(); - long localID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) - .getLocalID(); - // From the containerData, get the block iterator for all the blocks in - // the container. - KeyValueContainerData containerData = - (KeyValueContainerData) container.getContainerData(); - String containerPath = - new File(containerData.getMetadataPath()).getParent(); - try (KeyValueBlockIterator keyValueBlockIterator = - new KeyValueBlockIterator(containerID, new File(containerPath))) { - - // Find the block corresponding to the key we put. We use the localID of - // the BlockData to identify out key. - BlockData blockData = null; - while (keyValueBlockIterator.hasNext()) { - blockData = keyValueBlockIterator.nextBlock(); - if (blockData.getBlockID().getLocalID() == localID) { - break; - } - } - Assert.assertNotNull("Block not found", blockData); - - // Get the location of the chunk file - String chunkName = blockData.getChunks().get(0).getChunkName(); - String containreBaseDir = - container.getContainerData().getVolume().getHddsRootDir().getPath(); - File chunksLocationPath = KeyValueContainerLocationUtil - .getChunksLocationPath(containreBaseDir, scmId, containerID); - File chunkFile = new File(chunksLocationPath, chunkName); - - // Corrupt the contents of the chunk file - String newData = new String("corrupted data"); - FileUtils.writeByteArrayToFile(chunkFile, newData.getBytes()); - } - } - - @Test - public void testDeleteKey() - throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - String value = "sample value"; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, STAND_ALONE, - ONE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - OzoneKey key = bucket.getKey(keyName); - Assert.assertEquals(keyName, key.getName()); - bucket.deleteKey(keyName); - - OzoneTestUtils.expectOmException(ResultCodes.KEY_NOT_FOUND, - () -> bucket.getKey(keyName)); - } - - @Test - public void testRenameKey() - throws IOException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String fromKeyName = UUID.randomUUID().toString(); - String value = "sample value"; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - OzoneOutputStream out = bucket.createKey(fromKeyName, - value.getBytes().length, STAND_ALONE, - ONE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - OzoneKey key = bucket.getKey(fromKeyName); - Assert.assertEquals(fromKeyName, key.getName()); - - // Rename to empty string should fail. - OMException oe = null; - String toKeyName = ""; - try { - bucket.renameKey(fromKeyName, toKeyName); - } catch (OMException e) { - oe = e; - } - Assert.assertEquals(ResultCodes.INVALID_KEY_NAME, oe.getResult()); - - toKeyName = UUID.randomUUID().toString(); - bucket.renameKey(fromKeyName, toKeyName); - - // Lookup for old key should fail. - try { - bucket.getKey(fromKeyName); - } catch (OMException e) { - oe = e; - } - Assert.assertEquals(ResultCodes.KEY_NOT_FOUND, oe.getResult()); - - key = bucket.getKey(toKeyName); - Assert.assertEquals(toKeyName, key.getName()); - } - - // Listing all volumes in the cluster feature has to be fixed after HDDS-357. - // TODO: fix this - @Ignore - @Test - public void testListVolume() throws IOException { - String volBase = "vol-" + RandomStringUtils.randomNumeric(3); - //Create 10 volume vol--a-0- to vol--a-9- - String volBaseNameA = volBase + "-a-"; - for(int i = 0; i < 10; i++) { - store.createVolume( - volBaseNameA + i + "-" + RandomStringUtils.randomNumeric(5)); - } - //Create 10 volume vol--b-0- to vol--b-9- - String volBaseNameB = volBase + "-b-"; - for(int i = 0; i < 10; i++) { - store.createVolume( - volBaseNameB + i + "-" + RandomStringUtils.randomNumeric(5)); - } - Iterator volIterator = store.listVolumes(volBase); - int totalVolumeCount = 0; - while(volIterator.hasNext()) { - volIterator.next(); - totalVolumeCount++; - } - Assert.assertEquals(20, totalVolumeCount); - Iterator volAIterator = store.listVolumes( - volBaseNameA); - for(int i = 0; i < 10; i++) { - Assert.assertTrue(volAIterator.next().getName() - .startsWith(volBaseNameA + i + "-")); - } - Assert.assertFalse(volAIterator.hasNext()); - Iterator volBIterator = store.listVolumes( - volBaseNameB); - for(int i = 0; i < 10; i++) { - Assert.assertTrue(volBIterator.next().getName() - .startsWith(volBaseNameB + i + "-")); - } - Assert.assertFalse(volBIterator.hasNext()); - Iterator iter = store.listVolumes(volBaseNameA + - "1-"); - Assert.assertTrue(iter.next().getName().startsWith(volBaseNameA + "1-")); - Assert.assertFalse(iter.hasNext()); - } - - @Test - public void testListBucket() - throws IOException { - String volumeA = "vol-a-" + RandomStringUtils.randomNumeric(5); - String volumeB = "vol-b-" + RandomStringUtils.randomNumeric(5); - store.createVolume(volumeA); - store.createVolume(volumeB); - OzoneVolume volA = store.getVolume(volumeA); - OzoneVolume volB = store.getVolume(volumeB); - - //Create 10 buckets in vol-a- and 10 in vol-b- - String bucketBaseNameA = "bucket-a-"; - for(int i = 0; i < 10; i++) { - volA.createBucket( - bucketBaseNameA + i + "-" + RandomStringUtils.randomNumeric(5)); - volB.createBucket( - bucketBaseNameA + i + "-" + RandomStringUtils.randomNumeric(5)); - } - //Create 10 buckets in vol-a- and 10 in vol-b- - String bucketBaseNameB = "bucket-b-"; - for(int i = 0; i < 10; i++) { - volA.createBucket( - bucketBaseNameB + i + "-" + RandomStringUtils.randomNumeric(5)); - volB.createBucket( - bucketBaseNameB + i + "-" + RandomStringUtils.randomNumeric(5)); - } - Iterator volABucketIter = - volA.listBuckets("bucket-"); - int volABucketCount = 0; - while(volABucketIter.hasNext()) { - volABucketIter.next(); - volABucketCount++; - } - Assert.assertEquals(20, volABucketCount); - Iterator volBBucketIter = - volA.listBuckets("bucket-"); - int volBBucketCount = 0; - while(volBBucketIter.hasNext()) { - volBBucketIter.next(); - volBBucketCount++; - } - Assert.assertEquals(20, volBBucketCount); - - Iterator volABucketAIter = - volA.listBuckets("bucket-a-"); - int volABucketACount = 0; - while(volABucketAIter.hasNext()) { - volABucketAIter.next(); - volABucketACount++; - } - Assert.assertEquals(10, volABucketACount); - Iterator volBBucketBIter = - volA.listBuckets("bucket-b-"); - int volBBucketBCount = 0; - while(volBBucketBIter.hasNext()) { - volBBucketBIter.next(); - volBBucketBCount++; - } - Assert.assertEquals(10, volBBucketBCount); - Iterator volABucketBIter = volA.listBuckets( - "bucket-b-"); - for(int i = 0; i < 10; i++) { - Assert.assertTrue(volABucketBIter.next().getName() - .startsWith(bucketBaseNameB + i + "-")); - } - Assert.assertFalse(volABucketBIter.hasNext()); - Iterator volBBucketAIter = volB.listBuckets( - "bucket-a-"); - for(int i = 0; i < 10; i++) { - Assert.assertTrue(volBBucketAIter.next().getName() - .startsWith(bucketBaseNameA + i + "-")); - } - Assert.assertFalse(volBBucketAIter.hasNext()); - - } - - @Test - public void testListBucketsOnEmptyVolume() - throws IOException { - String volume = "vol-" + RandomStringUtils.randomNumeric(5); - store.createVolume(volume); - OzoneVolume vol = store.getVolume(volume); - Iterator buckets = vol.listBuckets(""); - while(buckets.hasNext()) { - fail(); - } - } - - @Test - public void testListKey() - throws IOException { - String volumeA = "vol-a-" + RandomStringUtils.randomNumeric(5); - String volumeB = "vol-b-" + RandomStringUtils.randomNumeric(5); - String bucketA = "buc-a-" + RandomStringUtils.randomNumeric(5); - String bucketB = "buc-b-" + RandomStringUtils.randomNumeric(5); - store.createVolume(volumeA); - store.createVolume(volumeB); - OzoneVolume volA = store.getVolume(volumeA); - OzoneVolume volB = store.getVolume(volumeB); - volA.createBucket(bucketA); - volA.createBucket(bucketB); - volB.createBucket(bucketA); - volB.createBucket(bucketB); - OzoneBucket volAbucketA = volA.getBucket(bucketA); - OzoneBucket volAbucketB = volA.getBucket(bucketB); - OzoneBucket volBbucketA = volB.getBucket(bucketA); - OzoneBucket volBbucketB = volB.getBucket(bucketB); - - /* - Create 10 keys in vol-a-/buc-a-, - vol-a-/buc-b-, vol-b-/buc-a- and - vol-b-/buc-b- - */ - String keyBaseA = "key-a-"; - for (int i = 0; i < 10; i++) { - byte[] value = RandomStringUtils.randomAscii(10240).getBytes(); - OzoneOutputStream one = volAbucketA.createKey( - keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, - new HashMap<>()); - one.write(value); - one.close(); - OzoneOutputStream two = volAbucketB.createKey( - keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, - new HashMap<>()); - two.write(value); - two.close(); - OzoneOutputStream three = volBbucketA.createKey( - keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, - new HashMap<>()); - three.write(value); - three.close(); - OzoneOutputStream four = volBbucketB.createKey( - keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, - new HashMap<>()); - four.write(value); - four.close(); - } - /* - Create 10 keys in vol-a-/buc-a-, - vol-a-/buc-b-, vol-b-/buc-a- and - vol-b-/buc-b- - */ - String keyBaseB = "key-b-"; - for (int i = 0; i < 10; i++) { - byte[] value = RandomStringUtils.randomAscii(10240).getBytes(); - OzoneOutputStream one = volAbucketA.createKey( - keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, - new HashMap<>()); - one.write(value); - one.close(); - OzoneOutputStream two = volAbucketB.createKey( - keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, - new HashMap<>()); - two.write(value); - two.close(); - OzoneOutputStream three = volBbucketA.createKey( - keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, - new HashMap<>()); - three.write(value); - three.close(); - OzoneOutputStream four = volBbucketB.createKey( - keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, - new HashMap<>()); - four.write(value); - four.close(); - } - Iterator volABucketAIter = - volAbucketA.listKeys("key-"); - int volABucketAKeyCount = 0; - while(volABucketAIter.hasNext()) { - volABucketAIter.next(); - volABucketAKeyCount++; - } - Assert.assertEquals(20, volABucketAKeyCount); - Iterator volABucketBIter = - volAbucketB.listKeys("key-"); - int volABucketBKeyCount = 0; - while(volABucketBIter.hasNext()) { - volABucketBIter.next(); - volABucketBKeyCount++; - } - Assert.assertEquals(20, volABucketBKeyCount); - Iterator volBBucketAIter = - volBbucketA.listKeys("key-"); - int volBBucketAKeyCount = 0; - while(volBBucketAIter.hasNext()) { - volBBucketAIter.next(); - volBBucketAKeyCount++; - } - Assert.assertEquals(20, volBBucketAKeyCount); - Iterator volBBucketBIter = - volBbucketB.listKeys("key-"); - int volBBucketBKeyCount = 0; - while(volBBucketBIter.hasNext()) { - volBBucketBIter.next(); - volBBucketBKeyCount++; - } - Assert.assertEquals(20, volBBucketBKeyCount); - Iterator volABucketAKeyAIter = - volAbucketA.listKeys("key-a-"); - int volABucketAKeyACount = 0; - while(volABucketAKeyAIter.hasNext()) { - volABucketAKeyAIter.next(); - volABucketAKeyACount++; - } - Assert.assertEquals(10, volABucketAKeyACount); - Iterator volABucketAKeyBIter = - volAbucketA.listKeys("key-b-"); - for(int i = 0; i < 10; i++) { - Assert.assertTrue(volABucketAKeyBIter.next().getName() - .startsWith("key-b-" + i + "-")); - } - Assert.assertFalse(volABucketBIter.hasNext()); - } - - @Test - public void testListKeyOnEmptyBucket() - throws IOException { - String volume = "vol-" + RandomStringUtils.randomNumeric(5); - String bucket = "buc-" + RandomStringUtils.randomNumeric(5); - store.createVolume(volume); - OzoneVolume vol = store.getVolume(volume); - vol.createBucket(bucket); - OzoneBucket buc = vol.getBucket(bucket); - Iterator keys = buc.listKeys(""); - while(keys.hasNext()) { - fail(); - } - } - - @Test - public void testInitiateMultipartUploadWithReplicationInformationSet() throws - IOException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); - - assertNotNull(multipartInfo); - String uploadID = multipartInfo.getUploadID(); - Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); - Assert.assertEquals(bucketName, multipartInfo.getBucketName()); - Assert.assertEquals(keyName, multipartInfo.getKeyName()); - assertNotNull(multipartInfo.getUploadID()); - - // Call initiate multipart upload for the same key again, this should - // generate a new uploadID. - multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); - - assertNotNull(multipartInfo); - Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); - Assert.assertEquals(bucketName, multipartInfo.getBucketName()); - Assert.assertEquals(keyName, multipartInfo.getKeyName()); - assertNotEquals(multipartInfo.getUploadID(), uploadID); - assertNotNull(multipartInfo.getUploadID()); - } - - - @Test - public void testInitiateMultipartUploadWithDefaultReplication() throws - IOException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName); - - assertNotNull(multipartInfo); - String uploadID = multipartInfo.getUploadID(); - Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); - Assert.assertEquals(bucketName, multipartInfo.getBucketName()); - Assert.assertEquals(keyName, multipartInfo.getKeyName()); - assertNotNull(multipartInfo.getUploadID()); - - // Call initiate multipart upload for the same key again, this should - // generate a new uploadID. - multipartInfo = bucket.initiateMultipartUpload(keyName); - - assertNotNull(multipartInfo); - Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); - Assert.assertEquals(bucketName, multipartInfo.getBucketName()); - Assert.assertEquals(keyName, multipartInfo.getKeyName()); - assertNotEquals(multipartInfo.getUploadID(), uploadID); - assertNotNull(multipartInfo.getUploadID()); - } - - - @Test - public void testUploadPartWithNoOverride() throws IOException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - String sampleData = "sample Value"; - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); - - assertNotNull(multipartInfo); - String uploadID = multipartInfo.getUploadID(); - Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); - Assert.assertEquals(bucketName, multipartInfo.getBucketName()); - Assert.assertEquals(keyName, multipartInfo.getKeyName()); - assertNotNull(multipartInfo.getUploadID()); - - OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, - sampleData.length(), 1, uploadID); - ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0, - sampleData.length()); - ozoneOutputStream.close(); - - OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream - .getCommitUploadPartInfo(); - - assertNotNull(commitUploadPartInfo); - String partName = commitUploadPartInfo.getPartName(); - assertNotNull(commitUploadPartInfo.getPartName()); - - } - - @Test - public void testUploadPartOverrideWithStandAlone() throws IOException { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - String sampleData = "sample Value"; - int partNumber = 1; - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); - - assertNotNull(multipartInfo); - String uploadID = multipartInfo.getUploadID(); - Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); - Assert.assertEquals(bucketName, multipartInfo.getBucketName()); - Assert.assertEquals(keyName, multipartInfo.getKeyName()); - assertNotNull(multipartInfo.getUploadID()); - - OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, - sampleData.length(), partNumber, uploadID); - ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0, - sampleData.length()); - ozoneOutputStream.close(); - - OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream - .getCommitUploadPartInfo(); - - assertNotNull(commitUploadPartInfo); - String partName = commitUploadPartInfo.getPartName(); - assertNotNull(commitUploadPartInfo.getPartName()); - - //Overwrite the part by creating part key with same part number. - sampleData = "sample Data Changed"; - ozoneOutputStream = bucket.createMultipartKey(keyName, - sampleData.length(), partNumber, uploadID); - ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0, "name" - .length()); - ozoneOutputStream.close(); - - commitUploadPartInfo = ozoneOutputStream - .getCommitUploadPartInfo(); - - assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); - - // PartName should be different from old part Name. - assertNotEquals("Part names should be different", partName, - commitUploadPartInfo.getPartName()); - } - - @Test - public void testUploadPartOverrideWithRatis() throws IOException { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - String sampleData = "sample Value"; - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - ReplicationType.RATIS, ReplicationFactor.THREE); - - assertNotNull(multipartInfo); - String uploadID = multipartInfo.getUploadID(); - Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); - Assert.assertEquals(bucketName, multipartInfo.getBucketName()); - Assert.assertEquals(keyName, multipartInfo.getKeyName()); - assertNotNull(multipartInfo.getUploadID()); - - int partNumber = 1; - - OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, - sampleData.length(), partNumber, uploadID); - ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0, - sampleData.length()); - ozoneOutputStream.close(); - - OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream - .getCommitUploadPartInfo(); - - assertNotNull(commitUploadPartInfo); - String partName = commitUploadPartInfo.getPartName(); - assertNotNull(commitUploadPartInfo.getPartName()); - - //Overwrite the part by creating part key with same part number. - sampleData = "sample Data Changed"; - ozoneOutputStream = bucket.createMultipartKey(keyName, - sampleData.length(), partNumber, uploadID); - ozoneOutputStream.write(DFSUtil.string2Bytes(sampleData), 0, "name" - .length()); - ozoneOutputStream.close(); - - commitUploadPartInfo = ozoneOutputStream - .getCommitUploadPartInfo(); - - assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); - - // PartName should be different from old part Name. - assertNotEquals("Part names should be different", partName, - commitUploadPartInfo.getPartName()); - } - - @Test - public void testNoSuchUploadError() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - String sampleData = "sample Value"; - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - String uploadID = "random"; - OzoneTestUtils - .expectOmException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> - bucket - .createMultipartKey(keyName, sampleData.length(), 1, uploadID)); - } - - @Test - public void testMultipartUpload() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - doMultipartUpload(bucket, keyName, (byte)98); - } - - - @Test - public void testMultipartUploadOverride() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - doMultipartUpload(bucket, keyName, (byte)96); - - // Initiate Multipart upload again, now we should read latest version, as - // read always reads latest blocks. - doMultipartUpload(bucket, keyName, (byte)97); - - } - - - @Test - public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - // Initiate multipart upload - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, - ONE); - - // Upload Parts - Map partsMap = new TreeMap<>(); - // Uploading part 1 with less than min size - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); - partsMap.put(1, partName); - - partName = uploadPart(bucket, keyName, uploadID, 2, - "data".getBytes(UTF_8)); - partsMap.put(2, partName); - - - // Complete multipart upload - - OzoneTestUtils.expectOmException(ResultCodes.ENTITY_TOO_SMALL, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); - - } - @Test - public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, - ONE); - - // We have not uploaded any parts, but passing some list it should throw - // error. - TreeMap partsMap = new TreeMap<>(); - partsMap.put(1, UUID.randomUUID().toString()); - - OzoneTestUtils.expectOmException(ResultCodes.MISMATCH_MULTIPART_LIST, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); - - } - - @Test - public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, - ONE); - - uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); - // We have not uploaded any parts, but passing some list it should throw - // error. - TreeMap partsMap = new TreeMap<>(); - partsMap.put(1, UUID.randomUUID().toString()); - - OzoneTestUtils.expectOmException(ResultCodes.MISMATCH_MULTIPART_LIST, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); - - } - - @Test - public void testMultipartUploadWithMissingParts() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, - ONE); - - uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); - // We have not uploaded any parts, but passing some list it should throw - // error. - TreeMap partsMap = new TreeMap<>(); - partsMap.put(3, "random"); - - OzoneTestUtils.expectOmException(ResultCodes.MISSING_UPLOAD_PARTS, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); - } - - @Test - public void testAbortUploadFail() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - OzoneTestUtils.expectOmException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR, - () -> bucket.abortMultipartUpload(keyName, "random")); - } - - - @Test - public void testAbortUploadSuccessWithOutAnyParts() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, - ONE); - bucket.abortMultipartUpload(keyName, uploadID); - } - - @Test - public void testAbortUploadSuccessWithParts() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, - ONE); - uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); - bucket.abortMultipartUpload(keyName, uploadID); - } - - @Test - public void testListMultipartUploadParts() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, - ONE); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); - - String partName2 =uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); - - String partName3 =uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); - - OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = - bucket.listParts(keyName, uploadID, 0, 3); - - Assert.assertEquals(STAND_ALONE, - ozoneMultipartUploadPartListParts.getReplicationType()); - Assert.assertEquals(3, - ozoneMultipartUploadPartListParts.getPartInfoList().size()); - - Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts - .getPartInfoList().get(0).getPartNumber()), - ozoneMultipartUploadPartListParts.getPartInfoList().get(0) - .getPartName()); - Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts - .getPartInfoList().get(1).getPartNumber()), - ozoneMultipartUploadPartListParts.getPartInfoList().get(1) - .getPartName()); - Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts - .getPartInfoList().get(2).getPartNumber()), - ozoneMultipartUploadPartListParts.getPartInfoList().get(2) - .getPartName()); - - Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated()); - } - - @Test - public void testListMultipartUploadPartsWithContinuation() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, - ONE); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); - - String partName2 =uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); - - String partName3 =uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); - - OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = - bucket.listParts(keyName, uploadID, 0, 2); - - Assert.assertEquals(STAND_ALONE, - ozoneMultipartUploadPartListParts.getReplicationType()); - - Assert.assertEquals(2, - ozoneMultipartUploadPartListParts.getPartInfoList().size()); - - Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts - .getPartInfoList().get(0).getPartNumber()), - ozoneMultipartUploadPartListParts.getPartInfoList().get(0) - .getPartName()); - Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts - .getPartInfoList().get(1).getPartNumber()), - ozoneMultipartUploadPartListParts.getPartInfoList().get(1) - .getPartName()); - - // Get remaining - Assert.assertTrue(ozoneMultipartUploadPartListParts.isTruncated()); - ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, - ozoneMultipartUploadPartListParts.getNextPartNumberMarker(), 2); - - Assert.assertEquals(1, - ozoneMultipartUploadPartListParts.getPartInfoList().size()); - Assert.assertEquals(partsMap.get(ozoneMultipartUploadPartListParts - .getPartInfoList().get(0).getPartNumber()), - ozoneMultipartUploadPartListParts.getPartInfoList().get(0) - .getPartName()); - - - // As we don't have any parts for this, we should get false here - Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated()); - - } - - @Test - public void testListPartsInvalidPartMarker() throws Exception { - try { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - - OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = - bucket.listParts(keyName, "random", -1, 2); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("Should be greater than or " + - "equal to zero", ex); - } - } - - @Test - public void testListPartsInvalidMaxParts() throws Exception { - try { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - - OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = - bucket.listParts(keyName, "random", 1, -1); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("Max Parts Should be greater " + - "than zero", ex); - } - } - - @Test - public void testListPartsWithPartMarkerGreaterThanPartCount() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, - ONE); - uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - - - OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = - bucket.listParts(keyName, uploadID, 100, 2); - - // Should return empty - - Assert.assertEquals(0, - ozoneMultipartUploadPartListParts.getPartInfoList().size()); - Assert.assertEquals(STAND_ALONE, - ozoneMultipartUploadPartListParts.getReplicationType()); - - // As we don't have any parts with greater than partNumberMarker and list - // is not truncated, so it should return false here. - Assert.assertFalse(ozoneMultipartUploadPartListParts.isTruncated()); - - } - - @Test - public void testListPartsWithInvalidUploadID() throws Exception { - OzoneTestUtils - .expectOmException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR, () -> { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = - bucket.listParts(keyName, "random", 100, 2); - }); - } - - @Test - public void testNativeAclsForVolume() throws Exception { - String volumeName = UUID.randomUUID().toString(); - store.createVolume(volumeName); - - OzoneObj ozObj = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setResType(OzoneObj.ResourceType.VOLUME) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - - validateOzoneAccessAcl(ozObj); - } - - @Test - public void testNativeAclsForBucket() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - assertNotNull("Bucket creation failed", bucket); - - OzoneObj ozObj = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setResType(OzoneObj.ResourceType.BUCKET) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - - validateOzoneAccessAcl(ozObj); - - OzoneObj volObj = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setResType(OzoneObj.ResourceType.VOLUME) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - validateDefaultAcls(volObj, ozObj, volume, null); - } - - private void validateDefaultAcls(OzoneObj parentObj, OzoneObj childObj, - OzoneVolume volume, OzoneBucket bucket) throws Exception { - assertTrue(store.addAcl(parentObj, defaultUserAcl)); - assertTrue(store.addAcl(parentObj, defaultGroupAcl)); - if (volume != null) { - volume.deleteBucket(childObj.getBucketName()); - volume.createBucket(childObj.getBucketName()); - } else { - if (childObj.getResourceType().equals(OzoneObj.ResourceType.KEY)) { - bucket.deleteKey(childObj.getKeyName()); - writeKey(childObj.getKeyName(), bucket); - } else { - store.setAcl(childObj, getAclList(new OzoneConfiguration())); - } - } - List acls = store.getAcl(parentObj); - assertTrue("Current acls: " + StringUtils.join(",", acls) + - " inheritedUserAcl: " + inheritedUserAcl, - acls.contains(defaultUserAcl)); - assertTrue("Current acls: " + StringUtils.join(",", acls) + - " inheritedGroupAcl: " + inheritedGroupAcl, - acls.contains(defaultGroupAcl)); - - acls = store.getAcl(childObj); - assertTrue("Current acls:" + StringUtils.join(",", acls) + - " inheritedUserAcl:" + inheritedUserAcl, - acls.contains(inheritedUserAcl)); - assertTrue("Current acls:" + StringUtils.join(",", acls) + - " inheritedGroupAcl:" + inheritedGroupAcl, - acls.contains(inheritedGroupAcl)); - } - - @Test - public void testNativeAclsForKey() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String key1 = "dir1/dir2" + UUID.randomUUID().toString(); - String key2 = "dir1/dir2" + UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - assertNotNull("Bucket creation failed", bucket); - - writeKey(key1, bucket); - writeKey(key2, bucket); - - OzoneObj ozObj = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(key1) - .setResType(OzoneObj.ResourceType.KEY) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - - // Validates access acls. - validateOzoneAccessAcl(ozObj); - - // Check default acls inherited from bucket. - OzoneObj buckObj = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(key1) - .setResType(OzoneObj.ResourceType.BUCKET) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - - validateDefaultAcls(buckObj, ozObj, null, bucket); - - // Check default acls inherited from prefix. - OzoneObj prefixObj = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(key1) - .setPrefixName("dir1/") - .setResType(OzoneObj.ResourceType.PREFIX) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - store.setAcl(prefixObj, getAclList(new OzoneConfiguration())); - // Prefix should inherit DEFAULT acl from bucket. - - List acls = store.getAcl(prefixObj); - assertTrue("Current acls:" + StringUtils.join(",", acls), - acls.contains(inheritedUserAcl)); - assertTrue("Current acls:" + StringUtils.join(",", acls), - acls.contains(inheritedGroupAcl)); - // Remove inherited acls from prefix. - assertTrue(store.removeAcl(prefixObj, inheritedUserAcl)); - assertTrue(store.removeAcl(prefixObj, inheritedGroupAcl)); - - validateDefaultAcls(prefixObj, ozObj, null, bucket); - } - - @Test - public void testNativeAclsForPrefix() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - String prefix1 = "PF" + UUID.randomUUID().toString() + "/"; - String key1 = prefix1 + "KEY" + UUID.randomUUID().toString(); - - String prefix2 = "PF" + UUID.randomUUID().toString() + "/"; - String key2 = prefix2 + "KEY" + UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - assertNotNull("Bucket creation failed", bucket); - - writeKey(key1, bucket); - writeKey(key2, bucket); - - OzoneObj prefixObj = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setPrefixName(prefix1) - .setResType(OzoneObj.ResourceType.PREFIX) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - - OzoneObj prefixObj2 = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setPrefixName(prefix2) - .setResType(OzoneObj.ResourceType.PREFIX) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - - // add acl - BitSet aclRights1 = new BitSet(); - aclRights1.set(READ.ordinal()); - OzoneAcl user1Acl = new OzoneAcl(USER, - "user1", aclRights1, ACCESS); - assertTrue(store.addAcl(prefixObj, user1Acl)); - - // get acl - List aclsGet = store.getAcl(prefixObj); - Assert.assertEquals(1, aclsGet.size()); - Assert.assertEquals(user1Acl, aclsGet.get(0)); - - // remove acl - Assert.assertTrue(store.removeAcl(prefixObj, user1Acl)); - aclsGet = store.getAcl(prefixObj); - Assert.assertEquals(0, aclsGet.size()); - - // set acl - BitSet aclRights2 = new BitSet(); - aclRights2.set(ACLType.ALL.ordinal()); - OzoneAcl group1Acl = new OzoneAcl(GROUP, - "group1", aclRights2, ACCESS); - List acls = new ArrayList<>(); - acls.add(user1Acl); - acls.add(group1Acl); - Assert.assertTrue(store.setAcl(prefixObj, acls)); - - // get acl - aclsGet = store.getAcl(prefixObj); - Assert.assertEquals(2, aclsGet.size()); - - OzoneObj keyObj = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(key1) - .setResType(OzoneObj.ResourceType.KEY) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - - // Check default acls inherited from prefix. - validateDefaultAcls(prefixObj, keyObj, null, bucket); - - // Check default acls inherited from bucket when prefix does not exist. - validateDefaultAcls(prefixObj2, keyObj, null, bucket); - } - - /** - * Helper function to get default acl list for current user. - * - * @return list of default Acls. - * @throws IOException - * */ - private List getAclList(OzoneConfiguration conf) - throws IOException { - List listOfAcls = new ArrayList<>(); - //User ACL - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - OzoneAclConfig aclConfig = conf.getObject(OzoneAclConfig.class); - ACLType userRights = aclConfig.getUserDefaultRights(); - ACLType groupRights = aclConfig.getGroupDefaultRights(); - - listOfAcls.add(new OzoneAcl(USER, - ugi.getUserName(), userRights, ACCESS)); - //Group ACLs of the User - List userGroups = Arrays.asList(ugi.getGroupNames()); - userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(GROUP, group, groupRights, ACCESS))); - return listOfAcls; - } - - /** - * Helper function to validate ozone Acl for given object. - * @param ozObj - * */ - private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException { - // Get acls for volume. - List expectedAcls = getAclList(new OzoneConfiguration()); - - // Case:1 Add new acl permission to existing acl. - if(expectedAcls.size()>0) { - OzoneAcl oldAcl = expectedAcls.get(0); - OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), - ACLType.READ_ACL, ACCESS); - // Verify that operation successful. - assertTrue(store.addAcl(ozObj, newAcl)); - - assertEquals(expectedAcls.size(), store.getAcl(ozObj).size()); - final Optional readAcl = store.getAcl(ozObj).stream() - .filter(acl -> acl.getName().equals(newAcl.getName()) - && acl.getType().equals(newAcl.getType())) - .findFirst(); - assertTrue("New acl expected but not found.", readAcl.isPresent()); - assertTrue("READ_ACL should exist in current acls:" - + readAcl.get(), - readAcl.get().getAclList().contains(ACLType.READ_ACL)); - - - // Case:2 Remove newly added acl permission. - assertTrue(store.removeAcl(ozObj, newAcl)); - - assertEquals(expectedAcls.size(), store.getAcl(ozObj).size()); - final Optional nonReadAcl = store.getAcl(ozObj).stream() - .filter(acl -> acl.getName().equals(newAcl.getName()) - && acl.getType().equals(newAcl.getType())) - .findFirst(); - assertTrue("New acl expected but not found.", nonReadAcl.isPresent()); - assertFalse("READ_ACL should not exist in current acls:" - + nonReadAcl.get(), - nonReadAcl.get().getAclList().contains(ACLType.READ_ACL)); - } else { - fail("Default acl should not be empty."); - } - - List keyAcls = store.getAcl(ozObj); - expectedAcls.forEach(a -> assertTrue(keyAcls.contains(a))); - - // Remove all acl's. - for (OzoneAcl a : expectedAcls) { - store.removeAcl(ozObj, a); - } - List newAcls = store.getAcl(ozObj); - assertEquals(0, newAcls.size()); - - // Add acl's and then call getAcl. - int aclCount = 0; - for (OzoneAcl a : expectedAcls) { - aclCount++; - assertTrue(store.addAcl(ozObj, a)); - assertEquals(aclCount, store.getAcl(ozObj).size()); - } - newAcls = store.getAcl(ozObj); - assertEquals(expectedAcls.size(), newAcls.size()); - List finalNewAcls = newAcls; - expectedAcls.forEach(a -> assertTrue(finalNewAcls.contains(a))); - - // Reset acl's. - OzoneAcl ua = new OzoneAcl(USER, "userx", - ACLType.READ_ACL, ACCESS); - OzoneAcl ug = new OzoneAcl(GROUP, "userx", - ACLType.ALL, ACCESS); - store.setAcl(ozObj, Arrays.asList(ua, ug)); - newAcls = store.getAcl(ozObj); - assertEquals(2, newAcls.size()); - assertTrue(newAcls.contains(ua)); - assertTrue(newAcls.contains(ug)); - } - - private void writeKey(String key1, OzoneBucket bucket) throws IOException { - OzoneOutputStream out = bucket.createKey(key1, 1024, STAND_ALONE, - ONE, new HashMap<>()); - out.write(RandomStringUtils.random(1024).getBytes()); - out.close(); - } - - private byte[] generateData(int size, byte val) { - byte[] chars = new byte[size]; - Arrays.fill(chars, val); - return chars; - } - - - private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val) - throws Exception { - // Initiate Multipart upload request - String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType - .RATIS, ReplicationFactor.THREE); - - // Upload parts - Map partsMap = new TreeMap<>(); - - // get 5mb data, as each part should be of min 5mb, last part can be less - // than 5mb - int length = 0; - byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, val); - String partName = uploadPart(bucket, keyName, uploadID, 1, data); - partsMap.put(1, partName); - length += data.length; - - - partName = uploadPart(bucket, keyName, uploadID, 2, data); - partsMap.put(2, partName); - length += data.length; - - String part3 = UUID.randomUUID().toString(); - partName = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes( - UTF_8)); - partsMap.put(3, partName); - length += part3.getBytes(UTF_8).length; - - - // Complete multipart upload request - completeMultipartUpload(bucket, keyName, uploadID, partsMap); - - - //Now Read the key which has been completed multipart upload. - byte[] fileContent = new byte[data.length + data.length + part3.getBytes( - UTF_8).length]; - OzoneInputStream inputStream = bucket.readKey(keyName); - inputStream.read(fileContent); - - Assert.assertTrue(verifyRatisReplication(bucket.getVolumeName(), - bucket.getName(), keyName, ReplicationType.RATIS, - ReplicationFactor.THREE)); - - StringBuilder sb = new StringBuilder(length); - - // Combine all parts data, and check is it matching with get key data. - String part1 = new String(data); - String part2 = new String(data); - sb.append(part1); - sb.append(part2); - sb.append(part3); - Assert.assertEquals(sb.toString(), new String(fileContent)); - } - - - private String initiateMultipartUpload(OzoneBucket bucket, String keyName, - ReplicationType replicationType, ReplicationFactor replicationFactor) - throws Exception { - OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - replicationType, replicationFactor); - - String uploadID = multipartInfo.getUploadID(); - Assert.assertNotNull(uploadID); - return uploadID; - } - - private String uploadPart(OzoneBucket bucket, String keyName, String - uploadID, int partNumber, byte[] data) throws Exception { - OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, - data.length, partNumber, uploadID); - ozoneOutputStream.write(data, 0, - data.length); - ozoneOutputStream.close(); - - OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = - ozoneOutputStream.getCommitUploadPartInfo(); - - Assert.assertNotNull(omMultipartCommitUploadPartInfo); - Assert.assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); - - } - - private void completeMultipartUpload(OzoneBucket bucket, String keyName, - String uploadID, Map partsMap) throws Exception { - OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket - .completeMultipartUpload(keyName, uploadID, partsMap); - - Assert.assertNotNull(omMultipartUploadCompleteInfo); - Assert.assertEquals(omMultipartUploadCompleteInfo.getBucket(), bucket - .getName()); - Assert.assertEquals(omMultipartUploadCompleteInfo.getVolume(), bucket - .getVolumeName()); - Assert.assertEquals(omMultipartUploadCompleteInfo.getKey(), keyName); - Assert.assertNotNull(omMultipartUploadCompleteInfo.getHash()); - } - - /** - * Tests GDPR encryption/decryption. - * 1. Create GDPR Enabled bucket. - * 2. Create a Key in this bucket so it gets encrypted via GDPRSymmetricKey. - * 3. Read key and validate the content/metadata is as expected because the - * readKey will decrypt using the GDPR Symmetric Key with details from KeyInfo - * Metadata. - * 4. To check encryption, we forcibly update KeyInfo Metadata and remove the - * gdprEnabled flag - * 5. When we now read the key, {@link RpcClient} checks for GDPR Flag in - * method createInputStream. If the gdprEnabled flag in metadata is set to - * true, it decrypts using the GDPRSymmetricKey. Since we removed that flag - * from metadata for this key, if will read the encrypted data as-is. - * 6. Thus, when we compare this content with expected text, it should - * not match as the decryption has not been performed. - * @throws Exception - */ - @Test - public void testKeyReadWriteForGDPR() throws Exception { - //Step 1 - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - BucketArgs args = BucketArgs.newBuilder() - .addMetadata(OzoneConsts.GDPR_FLAG, "true").build(); - volume.createBucket(bucketName, args); - OzoneBucket bucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, bucket.getName()); - Assert.assertNotNull(bucket.getMetadata()); - Assert.assertEquals("true", - bucket.getMetadata().get(OzoneConsts.GDPR_FLAG)); - - //Step 2 - String text = "hello world"; - Map keyMetadata = new HashMap<>(); - keyMetadata.put(OzoneConsts.GDPR_FLAG, "true"); - OzoneOutputStream out = bucket.createKey(keyName, - text.getBytes().length, STAND_ALONE, ONE, keyMetadata); - out.write(text.getBytes()); - out.close(); - - //Step 3 - OzoneKeyDetails key = bucket.getKey(keyName); - - Assert.assertEquals(keyName, key.getName()); - Assert.assertEquals("true", key.getMetadata().get(OzoneConsts.GDPR_FLAG)); - Assert.assertEquals("AES", - key.getMetadata().get(OzoneConsts.GDPR_ALGORITHM)); - Assert.assertTrue(key.getMetadata().get(OzoneConsts.GDPR_SECRET) != null); - - OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[text.getBytes().length]; - is.read(fileContent); - Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, STAND_ALONE, - ONE)); - Assert.assertEquals(text, new String(fileContent)); - - //Step 4 - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - OmKeyInfo omKeyInfo = - omMetadataManager.getKeyTable().get(omMetadataManager.getOzoneKey( - volumeName, bucketName, keyName)); - - omKeyInfo.getMetadata().remove(OzoneConsts.GDPR_FLAG); - - omMetadataManager.getKeyTable().put(omMetadataManager.getOzoneKey( - volumeName, bucketName, keyName), omKeyInfo); - - //Step 5 - key = bucket.getKey(keyName); - Assert.assertEquals(keyName, key.getName()); - Assert.assertEquals(null, key.getMetadata().get(OzoneConsts.GDPR_FLAG)); - is = bucket.readKey(keyName); - fileContent = new byte[text.getBytes().length]; - is.read(fileContent); - - //Step 6 - Assert.assertNotEquals(text, new String(fileContent)); - - } - - /** - * Tests deletedKey for GDPR. - * 1. Create GDPR Enabled bucket. - * 2. Create a Key in this bucket so it gets encrypted via GDPRSymmetricKey. - * 3. Read key and validate the content/metadata is as expected because the - * readKey will decrypt using the GDPR Symmetric Key with details from KeyInfo - * Metadata. - * 4. Delete this key in GDPR enabled bucket - * 5. Confirm the deleted key metadata in deletedTable does not contain the - * GDPR encryption details (flag, secret, algorithm). - * @throws Exception - */ - @Test - public void testDeletedKeyForGDPR() throws Exception { - //Step 1 - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - BucketArgs args = BucketArgs.newBuilder() - .addMetadata(OzoneConsts.GDPR_FLAG, "true").build(); - volume.createBucket(bucketName, args); - OzoneBucket bucket = volume.getBucket(bucketName); - Assert.assertEquals(bucketName, bucket.getName()); - Assert.assertNotNull(bucket.getMetadata()); - Assert.assertEquals("true", - bucket.getMetadata().get(OzoneConsts.GDPR_FLAG)); - - //Step 2 - String text = "hello world"; - Map keyMetadata = new HashMap<>(); - keyMetadata.put(OzoneConsts.GDPR_FLAG, "true"); - OzoneOutputStream out = bucket.createKey(keyName, - text.getBytes().length, STAND_ALONE, ONE, keyMetadata); - out.write(text.getBytes()); - out.close(); - - //Step 3 - OzoneKeyDetails key = bucket.getKey(keyName); - - Assert.assertEquals(keyName, key.getName()); - Assert.assertEquals("true", key.getMetadata().get(OzoneConsts.GDPR_FLAG)); - Assert.assertEquals("AES", - key.getMetadata().get(OzoneConsts.GDPR_ALGORITHM)); - Assert.assertTrue(key.getMetadata().get(OzoneConsts.GDPR_SECRET) != null); - - OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[text.getBytes().length]; - is.read(fileContent); - Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, STAND_ALONE, - ONE)); - Assert.assertEquals(text, new String(fileContent)); - - //Step 4 - bucket.deleteKey(keyName); - - //Step 5 - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - RepeatedOmKeyInfo deletedKeys = - omMetadataManager.getDeletedTable().get(objectKey); - Map deletedKeyMetadata = - deletedKeys.getOmKeyInfoList().get(0).getMetadata(); - Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_FLAG)); - Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_SECRET)); - Assert.assertFalse( - deletedKeyMetadata.containsKey(OzoneConsts.GDPR_ALGORITHM)); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java deleted file mode 100644 index 0b424b1b171..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import net.jcip.annotations.NotThreadSafe; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.audit.AuditEventStatus; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.FixMethodOrder; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runners.MethodSorters; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; - -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; -import static org.junit.Assert.assertTrue; - -/** - * This class is to test audit logs for xxxACL APIs of Ozone Client. - * It is annotated as NotThreadSafe intentionally since this test reads from - * the generated audit logs to verify the operations. Since the - * maven test plugin will trigger parallel test execution, there is a - * possibility of other audit events being logged and leading to failure of - * all assertion based test in this class. - */ -@NotThreadSafe -@FixMethodOrder(MethodSorters.NAME_ASCENDING) -@Ignore("Fix this after adding audit support for HA Acl code. This will be " + - "fixed by HDDS-2038") -public class TestOzoneRpcClientForAclAuditLog { - - private static final Logger LOG = - LoggerFactory.getLogger(TestOzoneRpcClientForAclAuditLog.class); - private static UserGroupInformation ugi; - private static final OzoneAcl USER_ACL = - new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, - "johndoe", IAccessAuthorizer.ACLType.ALL, ACCESS); - private static final OzoneAcl USER_ACL_2 = - new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, - "jane", IAccessAuthorizer.ACLType.ALL, ACCESS); - private static List aclListToAdd = new ArrayList<>(); - private static MiniOzoneCluster cluster = null; - private static OzoneClient ozClient = null; - private static ObjectStore store = null; - private static StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - private static String scmId = UUID.randomUUID().toString(); - - - /** - * Create a MiniOzoneCluster for testing. - * - * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - System.setProperty("log4j.configurationFile", "auditlog.properties"); - ugi = UserGroupInformation.getCurrentUser(); - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(OZONE_ACL_ENABLED, true); - conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD); - conf.set(OZONE_ACL_AUTHORIZER_CLASS, - OZONE_ACL_AUTHORIZER_CLASS_NATIVE); - startCluster(conf); - aclListToAdd.add(USER_ACL); - aclListToAdd.add(USER_ACL_2); - emptyAuditLog(); - } - - /** - * Create a MiniOzoneCluster for testing. - * @param conf Configurations to start the cluster. - * @throws Exception - */ - private static void startCluster(OzoneConfiguration conf) throws Exception { - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .setScmId(scmId) - .build(); - cluster.waitForClusterToBeReady(); - ozClient = OzoneClientFactory.getRpcClient(conf); - store = ozClient.getObjectStore(); - storageContainerLocationClient = - cluster.getStorageContainerLocationClient(); - } - - /** - * Close OzoneClient and shutdown MiniOzoneCluster. - */ - @AfterClass - public static void teardown() throws IOException { - shutdownCluster(); - deleteAuditLog(); - } - - private static void deleteAuditLog() throws IOException { - File file = new File("audit.log"); - if (FileUtils.deleteQuietly(file)) { - LOG.info(file.getName() + - " has been deleted."); - } else { - LOG.info("audit.log could not be deleted."); - } - } - - private static void emptyAuditLog() throws IOException { - File file = new File("audit.log"); - FileUtils.writeLines(file, new ArrayList<>(), false); - } - - /** - * Close OzoneClient and shutdown MiniOzoneCluster. - */ - private static void shutdownCluster() throws IOException { - if(ozClient != null) { - ozClient.close(); - } - - if (storageContainerLocationClient != null) { - storageContainerLocationClient.close(); - } - - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testXXXAclSuccessAudits() throws Exception { - - String userName = ugi.getUserName(); - String adminName = ugi.getUserName(); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() - .setAdmin(adminName) - .setOwner(userName) - .build(); - store.createVolume(volumeName, createVolumeArgs); - verifyLog(OMAction.CREATE_VOLUME.name(), volumeName, - AuditEventStatus.SUCCESS.name()); - OzoneVolume retVolumeinfo = store.getVolume(volumeName); - verifyLog(OMAction.READ_VOLUME.name(), volumeName, - AuditEventStatus.SUCCESS.name()); - Assert.assertTrue(retVolumeinfo.getName().equalsIgnoreCase(volumeName)); - - OzoneObj volObj = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setResType(VOLUME) - .setStoreType(OZONE) - .build(); - - //Testing getAcl - List acls = store.getAcl(volObj); - verifyLog(OMAction.GET_ACL.name(), volumeName, - AuditEventStatus.SUCCESS.name()); - Assert.assertTrue(acls.size() > 0); - - //Testing addAcl - store.addAcl(volObj, USER_ACL); - verifyLog(OMAction.ADD_ACL.name(), volumeName, "johndoe", - AuditEventStatus.SUCCESS.name()); - - //Testing removeAcl - store.removeAcl(volObj, USER_ACL); - verifyLog(OMAction.REMOVE_ACL.name(), volumeName, "johndoe", - AuditEventStatus.SUCCESS.name()); - - //Testing setAcl - store.setAcl(volObj, aclListToAdd); - verifyLog(OMAction.SET_ACL.name(), volumeName, "johndoe", "jane", - AuditEventStatus.SUCCESS.name()); - - } - - @Test - public void testXXXAclFailureAudits() throws Exception { - - String userName = "bilbo"; - String adminName = "bilbo"; - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() - .setAdmin(adminName) - .setOwner(userName) - .build(); - store.createVolume(volumeName, createVolumeArgs); - verifyLog(OMAction.CREATE_VOLUME.name(), volumeName, - AuditEventStatus.SUCCESS.name()); - - OzoneObj volObj = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setResType(VOLUME) - .setStoreType(OZONE) - .build(); - - // xxxAcl will fail as current ugi user doesn't have the required access - // for volume - try{ - List acls = store.getAcl(volObj); - } catch (Exception ex) { - verifyLog(OMAction.GET_ACL.name(), volumeName, - AuditEventStatus.FAILURE.name()); - } - - try{ - store.addAcl(volObj, USER_ACL); - } catch (Exception ex) { - verifyLog(OMAction.ADD_ACL.name(), volumeName, - AuditEventStatus.FAILURE.name()); - } - - try{ - store.removeAcl(volObj, USER_ACL); - } catch (Exception ex) { - verifyLog(OMAction.REMOVE_ACL.name(), volumeName, - AuditEventStatus.FAILURE.name()); - } - - try{ - store.setAcl(volObj, aclListToAdd); - } catch (Exception ex) { - verifyLog(OMAction.SET_ACL.name(), volumeName, "johndoe", "jane", - AuditEventStatus.FAILURE.name()); - } - - } - - private void verifyLog(String... expected) throws Exception { - File file = new File("audit.log"); - final List lines = FileUtils.readLines(file, (String)null); - GenericTestUtils.waitFor(() -> - (lines != null) ? true : false, 100, 60000); - - try{ - // When log entry is expected, the log file will contain one line and - // that must be equal to the expected string - assertTrue(lines.size() != 0); - for(String exp: expected){ - assertTrue(lines.get(0).contains(exp)); - } - } catch (AssertionError ex){ - LOG.error("Error occurred in log verification", ex); - if(lines.size() != 0){ - LOG.error("Actual line ::: " + lines.get(0)); - LOG.error("Expected tokens ::: " + Arrays.toString(expected)); - } - throw ex; - } finally { - emptyAuditLog(); - } - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java deleted file mode 100644 index 73a7de5147b..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.UUID; - -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.common.OzoneChecksumException; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; -import static org.junit.Assert.fail; - -/** - * This class is to test all the public facing APIs of Ozone Client with an - * active OM Ratis server. - */ -public class TestOzoneRpcClientWithRatis extends TestOzoneRpcClientAbstract { - private static OzoneConfiguration conf; - /** - * Create a MiniOzoneCluster for testing. - * Ozone is made active by setting OZONE_ENABLED = true. - * Ozone OM Ratis server is made active by setting - * OZONE_OM_RATIS_ENABLE = true; - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, - true); - startCluster(conf); - } - - /** - * Close OzoneClient and shutdown MiniOzoneCluster. - */ - @AfterClass - public static void shutdown() throws IOException { - shutdownCluster(); - } - - /** - * Tests get the information of key with network topology awareness enabled. - * @throws IOException - */ - @Test - public void testGetKeyAndFileWithNetworkTopology() throws IOException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - String value = "sample value"; - getStore().createVolume(volumeName); - OzoneVolume volume = getStore().getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - String keyName = UUID.randomUUID().toString(); - - // Write data into a key - try (OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.RATIS, - THREE, new HashMap<>())) { - out.write(value.getBytes()); - } - - // Since the rpc client is outside of cluster, then getFirstNode should be - // equal to getClosestNode. - OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); - builder.setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName).setRefreshPipeline(true); - - // read key with topology aware read enabled - try (OzoneInputStream is = bucket.readKey(keyName)) { - byte[] b = new byte[value.getBytes().length]; - is.read(b); - Assert.assertTrue(Arrays.equals(b, value.getBytes())); - } catch (OzoneChecksumException e) { - fail("Read key should succeed"); - } - - // read file with topology aware read enabled - try (OzoneInputStream is = bucket.readKey(keyName)) { - byte[] b = new byte[value.getBytes().length]; - is.read(b); - Assert.assertTrue(Arrays.equals(b, value.getBytes())); - } catch (OzoneChecksumException e) { - fail("Read file should succeed"); - } - - // read key with topology aware read disabled - conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, - false); - try (OzoneClient newClient = OzoneClientFactory.getRpcClient(conf)) { - ObjectStore newStore = newClient.getObjectStore(); - OzoneBucket newBucket = - newStore.getVolume(volumeName).getBucket(bucketName); - try (OzoneInputStream is = newBucket.readKey(keyName)) { - byte[] b = new byte[value.getBytes().length]; - is.read(b); - Assert.assertTrue(Arrays.equals(b, value.getBytes())); - } catch (OzoneChecksumException e) { - fail("Read key should succeed"); - } - - // read file with topology aware read disabled - try (OzoneInputStream is = newBucket.readFile(keyName)) { - byte[] b = new byte[value.getBytes().length]; - is.read(b); - Assert.assertTrue(Arrays.equals(b, value.getBytes())); - } catch (OzoneChecksumException e) { - fail("Read file should succeed"); - } - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java deleted file mode 100644 index 1343a03a2a9..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java +++ /dev/null @@ -1,222 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientRatis; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.protocolPB - .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneKeyDetails; -import org.apache.hadoop.ozone.client.OzoneKeyLocation; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.junit.Rule; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Test; - -import org.junit.rules.ExpectedException; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.UUID; - -import static org.junit.Assert.fail; - -/** - * Test read retries from multiple nodes in the pipeline. - */ -public class TestReadRetries { - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private static MiniOzoneCluster cluster = null; - private static OzoneClient ozClient = null; - private static ObjectStore store = null; - private static OzoneManager ozoneManager; - private static StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - - private static final String SCM_ID = UUID.randomUUID().toString(); - - - /** - * Create a MiniOzoneCluster for testing. - * @throws Exception - */ - @BeforeClass - public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10) - .setScmId(SCM_ID) - .build(); - cluster.waitForClusterToBeReady(); - ozClient = OzoneClientFactory.getRpcClient(conf); - store = ozClient.getObjectStore(); - storageContainerLocationClient = - cluster.getStorageContainerLocationClient(); - ozoneManager = cluster.getOzoneManager(); - } - - - /** - * Close OzoneClient and shutdown MiniOzoneCluster. - */ - @AfterClass - public static void shutdown() throws IOException { - if(ozClient != null) { - ozClient.close(); - } - - if (storageContainerLocationClient != null) { - storageContainerLocationClient.close(); - } - - if (cluster != null) { - cluster.shutdown(); - } - } - - - @Test - public void testPutKeyAndGetKeyThreeNodes() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - String value = "sample value"; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - String keyName = UUID.randomUUID().toString(); - - OzoneOutputStream out = bucket - .createKey(keyName, value.getBytes().length, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>()); - KeyOutputStream groupOutputStream = - (KeyOutputStream) out.getOutputStream(); - XceiverClientManager manager = groupOutputStream.getXceiverClientManager(); - out.write(value.getBytes()); - out.close(); - // First, confirm the key info from the client matches the info in OM. - OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); - builder.setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName).setRefreshPipeline(true); - OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()). - getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0); - long containerID = keyInfo.getContainerID(); - long localID = keyInfo.getLocalID(); - OzoneKeyDetails keyDetails = bucket.getKey(keyName); - Assert.assertEquals(keyName, keyDetails.getName()); - - List keyLocations = keyDetails.getOzoneKeyLocations(); - Assert.assertEquals(1, keyLocations.size()); - Assert.assertEquals(containerID, keyLocations.get(0).getContainerID()); - Assert.assertEquals(localID, keyLocations.get(0).getLocalID()); - - // Make sure that the data size matched. - Assert - .assertEquals(value.getBytes().length, keyLocations.get(0).getLength()); - - ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(ContainerID.valueof(containerID)); - Pipeline pipeline = cluster.getStorageContainerManager() - .getPipelineManager().getPipeline(container.getPipelineID()); - List datanodes = pipeline.getNodes(); - - DatanodeDetails datanodeDetails = datanodes.get(0); - Assert.assertNotNull(datanodeDetails); - - XceiverClientSpi clientSpi = manager.acquireClient(pipeline); - Assert.assertTrue(clientSpi instanceof XceiverClientRatis); - XceiverClientRatis ratisClient = (XceiverClientRatis)clientSpi; - - ratisClient.watchForCommit(keyInfo.getBlockCommitSequenceId(), 5000); - // shutdown the datanode - cluster.shutdownHddsDatanode(datanodeDetails); - - Assert.assertTrue(container.getState() - == HddsProtos.LifeCycleState.OPEN); - // try to read, this shouls be successful - readKey(bucket, keyName, value); - - Assert.assertTrue(container.getState() - == HddsProtos.LifeCycleState.OPEN); - // shutdown the second datanode - datanodeDetails = datanodes.get(1); - cluster.shutdownHddsDatanode(datanodeDetails); - Assert.assertTrue(container.getState() - == HddsProtos.LifeCycleState.OPEN); - - // the container is open and with loss of 2 nodes we still should be able - // to read via Standalone protocol - // try to read - readKey(bucket, keyName, value); - - // shutdown the 3rd datanode - datanodeDetails = datanodes.get(2); - cluster.shutdownHddsDatanode(datanodeDetails); - try { - // try to read - readKey(bucket, keyName, value); - fail("Expected exception not thrown"); - } catch (IOException e) { - // it should throw an ioException as none of the servers - // are available - } - manager.releaseClient(clientSpi, false); - } - - private void readKey(OzoneBucket bucket, String keyName, String data) - throws IOException { - OzoneKey key = bucket.getKey(keyName); - Assert.assertEquals(keyName, key.getName()); - OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[data.getBytes().length]; - is.read(fileContent); - is.close(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java deleted file mode 100644 index 2ed24a2f0d5..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ /dev/null @@ -1,258 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.security.token.BlockTokenVerifier; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.CertificateClientTestImpl; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.util.Time; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.UUID; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; - -/** - * This class is to test all the public facing APIs of Ozone Client. - */ -public class TestSecureOzoneRpcClient extends TestOzoneRpcClient { - - private static MiniOzoneCluster cluster = null; - private static OzoneClient ozClient = null; - private static ObjectStore store = null; - private static OzoneManager ozoneManager; - private static StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - - private static final String SCM_ID = UUID.randomUUID().toString(); - private static File testDir; - private static OzoneConfiguration conf; - private static OzoneBlockTokenSecretManager secretManager; - - /** - * Create a MiniOzoneCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - testDir = GenericTestUtils.getTestDir( - TestSecureOzoneRpcClient.class.getSimpleName()); - OzoneManager.setTestSecureOmFlag(true); - conf = new OzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); - conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED, true); - conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - CertificateClientTestImpl certificateClientTest = - new CertificateClientTestImpl(conf); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10) - .setScmId(SCM_ID) - .setCertificateClient(certificateClientTest) - .build(); - String user = UserGroupInformation.getCurrentUser().getShortUserName(); - secretManager = new OzoneBlockTokenSecretManager(new SecurityConfig(conf), - 60 *60, certificateClientTest.getCertificate(). - getSerialNumber().toString()); - secretManager.start(certificateClientTest); - Token token = secretManager.generateToken( - user, EnumSet.allOf(AccessModeProto.class), 60*60); - UserGroupInformation.getCurrentUser().addToken(token); - cluster.getOzoneManager().startSecretManager(); - cluster.waitForClusterToBeReady(); - ozClient = OzoneClientFactory.getRpcClient(conf); - store = ozClient.getObjectStore(); - storageContainerLocationClient = - cluster.getStorageContainerLocationClient(); - ozoneManager = cluster.getOzoneManager(); - TestOzoneRpcClient.setCluster(cluster); - TestOzoneRpcClient.setOzClient(ozClient); - TestOzoneRpcClient.setOzoneManager(ozoneManager); - TestOzoneRpcClient.setStorageContainerLocationClient( - storageContainerLocationClient); - TestOzoneRpcClient.setStore(store); - TestOzoneRpcClient.setScmId(SCM_ID); - } - - /** - * Tests successful completion of following operations when grpc block - * token is used. - * 1. getKey - * 2. writeChunk - * */ - @Test - public void testPutKeySuccessWithBlockToken() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - long currentTime = Time.now(); - - String value = "sample value"; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - for (int i = 0; i < 10; i++) { - String keyName = UUID.randomUUID().toString(); - - try (OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>())) { - out.write(value.getBytes()); - } - - OzoneKey key = bucket.getKey(keyName); - Assert.assertEquals(keyName, key.getName()); - byte[] fileContent; - try(OzoneInputStream is = bucket.readKey(keyName)) { - fileContent = new byte[value.getBytes().length]; - is.read(fileContent); - } - - Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE)); - Assert.assertEquals(value, new String(fileContent)); - Assert.assertTrue(key.getCreationTime() >= currentTime); - Assert.assertTrue(key.getModificationTime() >= currentTime); - } - } - - /** - * Tests failure in following operations when grpc block token is - * not present. - * 1. getKey - * 2. writeChunk - * */ - @Test - @Ignore("Needs to be moved out of this class as client setup is static") - public void testKeyOpFailureWithoutBlockToken() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String value = "sample value"; - BlockTokenVerifier.setTestStub(true); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - for (int i = 0; i < 10; i++) { - String keyName = UUID.randomUUID().toString(); - - try (OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>())) { - LambdaTestUtils.intercept(IOException.class, "UNAUTHENTICATED: Fail " + - "to find any token ", - () -> out.write(value.getBytes())); - } - - OzoneKey key = bucket.getKey(keyName); - Assert.assertEquals(keyName, key.getName()); - LambdaTestUtils.intercept(IOException.class, "Failed to authenticate" + - " with GRPC XceiverServer with Ozone block token.", - () -> bucket.readKey(keyName)); - } - BlockTokenVerifier.setTestStub(false); - } - - private boolean verifyRatisReplication(String volumeName, String bucketName, - String keyName, ReplicationType type, ReplicationFactor factor) - throws IOException { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - HddsProtos.ReplicationType replicationType = - HddsProtos.ReplicationType.valueOf(type.toString()); - HddsProtos.ReplicationFactor replicationFactor = - HddsProtos.ReplicationFactor.valueOf(factor.getValue()); - OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs); - for (OmKeyLocationInfo info: - keyInfo.getLatestVersionLocations().getLocationList()) { - ContainerInfo container = - storageContainerLocationClient.getContainer(info.getContainerID()); - if (!container.getReplicationFactor().equals(replicationFactor) || ( - container.getReplicationType() != replicationType)) { - return false; - } - } - return true; - } - - /** - * Close OzoneClient and shutdown MiniOzoneCluster. - */ - @AfterClass - public static void shutdown() throws IOException { - if(ozClient != null) { - ozClient.close(); - } - - if (storageContainerLocationClient != null) { - storageContainerLocationClient.close(); - } - - if (cluster != null) { - cluster.shutdown(); - } - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java deleted file mode 100644 index 9b593491194..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ /dev/null @@ -1,463 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.*; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.protocol.GroupMismatchException; -import org.apache.ratis.protocol.RaftRetryFailureException; -import org.junit.Assert; -import org.junit.Test; - -import java.io.IOException; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.List; -import java.util.Random; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; - -/** - * This class verifies the watchForCommit Handling by xceiverClient. - */ -public class TestWatchForCommit { - - private MiniOzoneCluster cluster; - private OzoneClient client; - private ObjectStore objectStore; - private String volumeName; - private String bucketName; - private String keyString; - private int chunkSize; - private int flushSize; - private int maxFlushSize; - private int blockSize; - private StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - private static String containerOwner = "OZONE"; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - private void startCluster(OzoneConfiguration conf) throws Exception { - chunkSize = 100; - flushSize = 2 * chunkSize; - maxFlushSize = 2 * flushSize; - blockSize = 2 * maxFlushSize; - - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY, - 1, TimeUnit.SECONDS); - - conf.setQuietMode(false); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(7) - .setBlockSize(blockSize) - .setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .setStreamBufferSizeUnit(StorageUnit.BYTES) - .build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - keyString = UUID.randomUUID().toString(); - volumeName = "watchforcommithandlingtest"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - storageContainerLocationClient = cluster - .getStorageContainerLocationClient(); - } - - - /** - * Shutdown MiniDFSCluster. - */ - private void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - private String getKeyName() { - return UUID.randomUUID().toString(); - } - - @Test - public void testWatchForCommitWithKeyWrite() throws Exception { - // in this case, watch request should fail with RaftRetryFailureException - // and will be captured in keyOutputStream and the failover will happen - // to a different block - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 20, - TimeUnit.SECONDS); - conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 20); - conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, - 1, TimeUnit.SECONDS); - startCluster(conf); - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.WriteChunk); - long putBlockCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.PutBlock); - long pendingWriteChunkCount = metrics.getContainerOpsMetrics( - ContainerProtos.Type.WriteChunk); - long pendingPutBlockCount = metrics.getContainerOpsMetrics( - ContainerProtos.Type.PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - String keyName = getKeyName(); - OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); - int dataLength = maxFlushSize + 50; - // write data more than 1 chunk - byte[] data1 = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(data1); - // since its hitting the full bufferCondition, it will call watchForCommit - // and completes atleast putBlock for first flushSize worth of data - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk) - <= pendingWriteChunkCount + 2); - Assert.assertTrue( - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock) - <= pendingPutBlockCount + 1); - Assert.assertEquals(writeChunkCount + 4, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 2, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 6, - metrics.getTotalOpCount()); - Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); - KeyOutputStream keyOutputStream = (KeyOutputStream)key.getOutputStream(); - - Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); - OutputStream stream = keyOutputStream.getStreamEntries().get(0) - .getOutputStream(); - Assert.assertTrue(stream instanceof BlockOutputStream); - BlockOutputStream blockOutputStream = (BlockOutputStream) stream; - // we have just written data more than flush Size(2 chunks), at this time - // buffer pool will have 3 buffers allocated worth of chunk size - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - // writtenDataLength as well flushedDataLength will be updated here - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - Assert.assertEquals(maxFlushSize, - blockOutputStream.getTotalDataFlushedLength()); - // since data equals to maxBufferSize is written, this will be a blocking - // call and hence will wait for atleast flushSize worth of data to get - // acked by all servers right here - Assert.assertTrue(blockOutputStream.getTotalAckDataLength() >= flushSize); - // watchForCommit will clean up atleast one entry from the map where each - // entry corresponds to flushSize worth of data - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 1); - // Now do a flush. This will flush the data and update the flush length and - // the map. - key.flush(); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 5, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 3, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 8, - metrics.getTotalOpCount()); - // Since the data in the buffer is already flushed, flush here will have - // no impact on the counters and data structures - Assert.assertEquals(4, blockOutputStream.getBufferPool().getSize()); - Assert.assertEquals(dataLength, blockOutputStream.getWrittenDataLength()); - Assert.assertEquals(dataLength, - blockOutputStream.getTotalDataFlushedLength()); - // flush will make sure one more entry gets updated in the map - Assert.assertTrue( - blockOutputStream.getCommitIndex2flushedDataMap().size() <= 2); - XceiverClientRatis raftClient = - (XceiverClientRatis) blockOutputStream.getXceiverClient(); - Assert.assertEquals(3, raftClient.getCommitInfoMap().size()); - Pipeline pipeline = raftClient.getPipeline(); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(1)); - // again write data with more than max buffer limit. This will call - // watchForCommit again. Since the commit will happen 2 way, the - // commitInfoMap will get updated for servers which are alive - // 4 writeChunks = maxFlushSize + 2 putBlocks will be discarded here - // once exception is hit - key.write(data1); - // As a part of handling the exception, 4 failed writeChunks will be - // rewritten plus one partial chunk plus two putBlocks for flushSize - // and one flush for partial chunk - key.flush(); - Assert.assertTrue(HddsClientUtils.checkForException(blockOutputStream - .getIoException()) instanceof RaftRetryFailureException); - // Make sure the retryCount is reset after the exception is handled - Assert.assertTrue(keyOutputStream.getRetryCount() == 0); - // now close the stream, It will update the ack length after watchForCommit - Assert.assertEquals(2, keyOutputStream.getStreamEntries().size()); - key.close(); - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert - .assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - Assert.assertEquals(0, keyOutputStream.getStreamEntries().size()); - Assert.assertEquals(pendingWriteChunkCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(pendingPutBlockCount, - metrics.getContainerOpsMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(writeChunkCount + 14, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); - Assert.assertEquals(putBlockCount + 8, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)); - Assert.assertEquals(totalOpCount + 22, - metrics.getTotalOpCount()); - Assert - .assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); - // make sure the bufferPool is empty - Assert - .assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); - Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap()); - validateData(keyName, data1); - shutdown(); - } - - @Test - public void testWatchForCommitWithSmallerTimeoutValue() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 3, - TimeUnit.SECONDS); - conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 20); - startCluster(conf); - XceiverClientManager clientManager = new XceiverClientManager(conf); - ContainerWithPipeline container1 = storageContainerLocationClient - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, containerOwner); - XceiverClientSpi xceiverClient = clientManager - .acquireClient(container1.getPipeline()); - Assert.assertEquals(1, xceiverClient.getRefcount()); - Assert.assertEquals(container1.getPipeline(), - xceiverClient.getPipeline()); - Pipeline pipeline = xceiverClient.getPipeline(); - XceiverClientReply reply = xceiverClient.sendCommandAsync( - ContainerTestHelper.getCreateContainerRequest( - container1.getContainerInfo().getContainerID(), - xceiverClient.getPipeline())); - reply.getResponse().get(); - long index = reply.getLogIndex(); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(1)); - try { - // just watch for a log index which in not updated in the commitInfo Map - // as well as there is no logIndex generate in Ratis. - // The basic idea here is just to test if its throws an exception. - xceiverClient - .watchForCommit(index + new Random().nextInt(100) + 10, 3000); - Assert.fail("expected exception not thrown"); - } catch (Exception e) { - Assert.assertTrue( - HddsClientUtils.checkForException(e) instanceof TimeoutException); - } - // After releasing the xceiverClient, this connection should be closed - // and any container operations should fail - clientManager.releaseClient(xceiverClient, false); - shutdown(); - } - - @Test - public void testWatchForCommitForRetryfailure() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, - 100, TimeUnit.SECONDS); - conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 20); - startCluster(conf); - XceiverClientManager clientManager = new XceiverClientManager(conf); - ContainerWithPipeline container1 = storageContainerLocationClient - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, containerOwner); - XceiverClientSpi xceiverClient = clientManager - .acquireClient(container1.getPipeline()); - Assert.assertEquals(1, xceiverClient.getRefcount()); - Assert.assertEquals(container1.getPipeline(), - xceiverClient.getPipeline()); - Pipeline pipeline = xceiverClient.getPipeline(); - XceiverClientReply reply = xceiverClient.sendCommandAsync( - ContainerTestHelper.getCreateContainerRequest( - container1.getContainerInfo().getContainerID(), - xceiverClient.getPipeline())); - reply.getResponse().get(); - long index = reply.getLogIndex(); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(1)); - // again write data with more than max buffer limit. This wi - try { - // just watch for a log index which in not updated in the commitInfo Map - // as well as there is no logIndex generate in Ratis. - // The basic idea here is just to test if its throws an exception. - xceiverClient - .watchForCommit(index + new Random().nextInt(100) + 10, 20000); - Assert.fail("expected exception not thrown"); - } catch (Exception e) { - Assert.assertTrue(e instanceof ExecutionException); - // since the timeout value is quite long, the watch request will either - // fail with NotReplicated exceptio, RetryFailureException or - // RuntimeException - Assert.assertFalse(HddsClientUtils - .checkForException(e) instanceof TimeoutException); - } - clientManager.releaseClient(xceiverClient, false); - shutdown(); - } - - @Test - public void test2WayCommitForTimeoutException() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 3, - TimeUnit.SECONDS); - conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 20); - startCluster(conf); - GenericTestUtils.LogCapturer logCapturer = - GenericTestUtils.LogCapturer.captureLogs(XceiverClientRatis.LOG); - XceiverClientManager clientManager = new XceiverClientManager(conf); - - ContainerWithPipeline container1 = storageContainerLocationClient - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, containerOwner); - XceiverClientSpi xceiverClient = clientManager - .acquireClient(container1.getPipeline()); - Assert.assertEquals(1, xceiverClient.getRefcount()); - Assert.assertEquals(container1.getPipeline(), - xceiverClient.getPipeline()); - Pipeline pipeline = xceiverClient.getPipeline(); - XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient; - XceiverClientReply reply = xceiverClient.sendCommandAsync( - ContainerTestHelper.getCreateContainerRequest( - container1.getContainerInfo().getContainerID(), - xceiverClient.getPipeline())); - reply.getResponse().get(); - Assert.assertEquals(3, ratisClient.getCommitInfoMap().size()); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); - reply = xceiverClient.sendCommandAsync(ContainerTestHelper - .getCloseContainer(pipeline, - container1.getContainerInfo().getContainerID())); - reply.getResponse().get(); - xceiverClient.watchForCommit(reply.getLogIndex(), 3000); - - // commitInfo Map will be reduced to 2 here - Assert.assertEquals(2, ratisClient.getCommitInfoMap().size()); - clientManager.releaseClient(xceiverClient, false); - Assert.assertTrue(logCapturer.getOutput().contains("3 way commit failed")); - Assert.assertTrue(logCapturer.getOutput().contains("TimeoutException")); - Assert - .assertTrue(logCapturer.getOutput().contains("Committed by majority")); - logCapturer.stopCapturing(); - shutdown(); - } - - @Test - public void testWatchForCommitForGroupMismatchException() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT, 20, - TimeUnit.SECONDS); - conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 20); - - // mark the node stale early so that pipleline gets destroyed quickly - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); - startCluster(conf); - GenericTestUtils.LogCapturer logCapturer = - GenericTestUtils.LogCapturer.captureLogs(XceiverClientRatis.LOG); - XceiverClientManager clientManager = new XceiverClientManager(conf); - - ContainerWithPipeline container1 = storageContainerLocationClient - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, containerOwner); - XceiverClientSpi xceiverClient = clientManager - .acquireClient(container1.getPipeline()); - Assert.assertEquals(1, xceiverClient.getRefcount()); - Assert.assertEquals(container1.getPipeline(), - xceiverClient.getPipeline()); - Pipeline pipeline = xceiverClient.getPipeline(); - XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient; - long containerId = container1.getContainerInfo().getContainerID(); - XceiverClientReply reply = xceiverClient.sendCommandAsync( - ContainerTestHelper.getCreateContainerRequest(containerId, - xceiverClient.getPipeline())); - reply.getResponse().get(); - Assert.assertEquals(3, ratisClient.getCommitInfoMap().size()); - List pipelineList = new ArrayList<>(); - pipelineList.add(pipeline); - ContainerTestHelper.waitForPipelineClose(pipelineList, cluster); - try { - // just watch for a log index which in not updated in the commitInfo Map - // as well as there is no logIndex generate in Ratis. - // The basic idea here is just to test if its throws an exception. - xceiverClient - .watchForCommit(reply.getLogIndex() + new Random().nextInt(100) + 10, - 20000); - Assert.fail("Expected exception not thrown"); - } catch(Exception e) { - Assert.assertTrue(HddsClientUtils - .checkForException(e) instanceof GroupMismatchException); - } - clientManager.releaseClient(xceiverClient, false); - shutdown(); - } - - private OzoneOutputStream createKey(String keyName, ReplicationType type, - long size) throws Exception { - return ContainerTestHelper - .createKey(keyName, type, size, objectStore, volumeName, bucketName); - } - - private void validateData(String keyName, byte[] data) throws Exception { - ContainerTestHelper - .validateData(keyName, data, objectStore, volumeName, bucketName); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/package-info.java deleted file mode 100644 index 0f48495c614..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -/** - * This package contains test class for Ozone rpc client library. - */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java deleted file mode 100644 index 395bda0d5dd..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ /dev/null @@ -1,907 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container; - -import java.io.IOException; -import java.net.ServerSocket; -import java.nio.ByteBuffer; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Random; -import java.util.UUID; -import java.util.concurrent.TimeoutException; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto.Builder; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.ratis.RatisHelper; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry; -import org.apache.hadoop.ozone.client.io.KeyOutputStream; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.OzoneChecksumException; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; -import org.apache.hadoop.security.token.Token; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.server.impl.RaftServerImpl; -import org.apache.ratis.server.impl.RaftServerProxy; -import org.apache.ratis.statemachine.StateMachine; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.junit.Assert; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Helpers for container tests. - */ -public final class ContainerTestHelper { - public static final Logger LOG = LoggerFactory.getLogger( - ContainerTestHelper.class); - private static Random r = new Random(); - - public static final long CONTAINER_MAX_SIZE = - (long) StorageUnit.GB.toBytes(1); - - /** - * Never constructed. - */ - private ContainerTestHelper() { - } - - // TODO: mock multi-node pipeline - /** - * Create a pipeline with single node replica. - * - * @return Pipeline with single node in it. - * @throws IOException - */ - public static Pipeline createSingleNodePipeline() throws - IOException { - return createPipeline(1); - } - - public static String createLocalAddress() throws IOException { - try(ServerSocket s = new ServerSocket(0)) { - return "127.0.0.1:" + s.getLocalPort(); - } - } - public static DatanodeDetails createDatanodeDetails() throws IOException { - ServerSocket socket = new ServerSocket(0); - int port = socket.getLocalPort(); - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, port); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, port); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, port); - DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder() - .setUuid(UUID.randomUUID().toString()) - .setIpAddress(socket.getInetAddress().getHostAddress()) - .setHostName(socket.getInetAddress().getHostName()) - .addPort(containerPort) - .addPort(ratisPort) - .addPort(restPort) - .build(); - - socket.close(); - return datanodeDetails; - } - - /** - * Create a pipeline with single node replica. - * - * @return Pipeline with single node in it. - * @throws IOException - */ - public static Pipeline createPipeline(int numNodes) - throws IOException { - Preconditions.checkArgument(numNodes >= 1); - final List ids = new ArrayList<>(numNodes); - for(int i = 0; i < numNodes; i++) { - ids.add(createDatanodeDetails()); - } - return createPipeline(ids); - } - - public static Pipeline createPipeline( - Iterable ids) throws IOException { - Objects.requireNonNull(ids, "ids == null"); - Preconditions.checkArgument(ids.iterator().hasNext()); - List dns = new ArrayList<>(); - ids.forEach(dns::add); - Pipeline pipeline = Pipeline.newBuilder() - .setState(Pipeline.PipelineState.OPEN) - .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(ReplicationFactor.ONE) - .setNodes(dns) - .build(); - return pipeline; - } - - /** - * Creates a ChunkInfo for testing. - * - * @param keyID - ID of the key - * @param seqNo - Chunk number. - * @return ChunkInfo - * @throws IOException - */ - public static ChunkInfo getChunk(long keyID, int seqNo, long offset, - long len) throws IOException { - - ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", keyID, - seqNo), offset, len); - return info; - } - - /** - * Generates some data of the requested len. - * - * @param len - Number of bytes. - * @return byte array with valid data. - */ - public static ByteBuffer getData(int len) { - byte[] data = new byte[len]; - r.nextBytes(data); - return ByteBuffer.wrap(data); - } - - /** - * Computes the hash and sets the value correctly. - * - * @param info - chunk info. - * @param data - data array - * @throws NoSuchAlgorithmException - */ - public static void setDataChecksum(ChunkInfo info, ByteBuffer data) - throws OzoneChecksumException { - Checksum checksum = new Checksum(); - info.setChecksumData(checksum.computeChecksum(data)); - } - - /** - * Returns a writeChunk Request. - * - * @param pipeline - A set of machines where this container lives. - * @param blockID - Block ID of the chunk. - * @param datalen - Length of data. - * @return ContainerCommandRequestProto - * @throws IOException - * @throws NoSuchAlgorithmException - */ - public static ContainerCommandRequestProto getWriteChunkRequest( - Pipeline pipeline, BlockID blockID, int datalen) throws IOException { - LOG.trace("writeChunk {} (blockID={}) to pipeline=", - datalen, blockID, pipeline); - ContainerProtos.WriteChunkRequestProto.Builder writeRequest = - ContainerProtos.WriteChunkRequestProto - .newBuilder(); - - writeRequest.setBlockID(blockID.getDatanodeBlockIDProtobuf()); - - ByteBuffer data = getData(datalen); - ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen); - setDataChecksum(info, data); - - writeRequest.setChunkData(info.getProtoBufMessage()); - writeRequest.setData(ByteString.copyFrom(data)); - - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.WriteChunk); - request.setContainerID(blockID.getContainerID()); - request.setWriteChunk(writeRequest); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - - return request.build(); - } - - /** - * Returns PutSmallFile Request that we can send to the container. - * - * @param pipeline - Pipeline - * @param blockID - Block ID of the small file. - * @param dataLen - Number of bytes in the data - * @return ContainerCommandRequestProto - */ - public static ContainerCommandRequestProto getWriteSmallFileRequest( - Pipeline pipeline, BlockID blockID, int dataLen) - throws Exception { - ContainerProtos.PutSmallFileRequestProto.Builder smallFileRequest = - ContainerProtos.PutSmallFileRequestProto.newBuilder(); - ByteBuffer data = getData(dataLen); - ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, dataLen); - setDataChecksum(info, data); - - - ContainerProtos.PutBlockRequestProto.Builder putRequest = - ContainerProtos.PutBlockRequestProto.newBuilder(); - - BlockData blockData = new BlockData(blockID); - List newList = new LinkedList<>(); - newList.add(info.getProtoBufMessage()); - blockData.setChunks(newList); - putRequest.setBlockData(blockData.getProtoBufMessage()); - - smallFileRequest.setChunkInfo(info.getProtoBufMessage()); - smallFileRequest.setData(ByteString.copyFrom(data)); - smallFileRequest.setBlock(putRequest); - - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.PutSmallFile); - request.setContainerID(blockID.getContainerID()); - request.setPutSmallFile(smallFileRequest); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - return request.build(); - } - - - public static ContainerCommandRequestProto getReadSmallFileRequest( - Pipeline pipeline, ContainerProtos.PutBlockRequestProto putKey) - throws Exception { - ContainerProtos.GetSmallFileRequestProto.Builder smallFileRequest = - ContainerProtos.GetSmallFileRequestProto.newBuilder(); - ContainerCommandRequestProto getKey = getBlockRequest(pipeline, putKey); - smallFileRequest.setBlock(getKey.getGetBlock()); - - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.GetSmallFile); - request.setContainerID(getKey.getGetBlock().getBlockID().getContainerID()); - request.setGetSmallFile(smallFileRequest); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - return request.build(); - } - - /** - * Returns a read Request. - * - * @param pipeline pipeline. - * @param request writeChunkRequest. - * @return Request. - * @throws IOException - * @throws NoSuchAlgorithmException - */ - public static ContainerCommandRequestProto getReadChunkRequest( - Pipeline pipeline, ContainerProtos.WriteChunkRequestProto request) - throws IOException, NoSuchAlgorithmException { - LOG.trace("readChunk blockID={} from pipeline={}", - request.getBlockID(), pipeline); - - ContainerProtos.ReadChunkRequestProto.Builder readRequest = - ContainerProtos.ReadChunkRequestProto.newBuilder(); - readRequest.setBlockID(request.getBlockID()); - readRequest.setChunkData(request.getChunkData()); - - Builder newRequest = - ContainerCommandRequestProto.newBuilder(); - newRequest.setCmdType(ContainerProtos.Type.ReadChunk); - newRequest.setContainerID(readRequest.getBlockID().getContainerID()); - newRequest.setReadChunk(readRequest); - newRequest.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - return newRequest.build(); - } - - /** - * Returns a delete Request. - * - * @param pipeline pipeline. - * @param writeRequest - write request - * @return request - * @throws IOException - * @throws NoSuchAlgorithmException - */ - public static ContainerCommandRequestProto getDeleteChunkRequest( - Pipeline pipeline, ContainerProtos.WriteChunkRequestProto writeRequest) - throws - IOException, NoSuchAlgorithmException { - LOG.trace("deleteChunk blockID={} from pipeline={}", - writeRequest.getBlockID(), pipeline); - - ContainerProtos.DeleteChunkRequestProto.Builder deleteRequest = - ContainerProtos.DeleteChunkRequestProto - .newBuilder(); - - deleteRequest.setChunkData(writeRequest.getChunkData()); - deleteRequest.setBlockID(writeRequest.getBlockID()); - - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.DeleteChunk); - request.setContainerID(writeRequest.getBlockID().getContainerID()); - request.setDeleteChunk(deleteRequest); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - return request.build(); - } - - /** - * Returns a create container command for test purposes. There are a bunch of - * tests where we need to just send a request and get a reply. - * - * @return ContainerCommandRequestProto. - */ - public static ContainerCommandRequestProto getCreateContainerRequest( - long containerID, Pipeline pipeline) throws IOException { - LOG.trace("addContainer: {}", containerID); - return getContainerCommandRequestBuilder(containerID, pipeline).build(); - } - - /** - * Returns a create container command with token. There are a bunch of - * tests where we need to just send a request and get a reply. - * - * @return ContainerCommandRequestProto. - */ - public static ContainerCommandRequestProto getCreateContainerRequest( - long containerID, Pipeline pipeline, Token token) throws IOException { - LOG.trace("addContainer: {}", containerID); - return getContainerCommandRequestBuilder(containerID, pipeline) - .setEncodedToken(token.encodeToUrlString()) - .build(); - } - - private static Builder getContainerCommandRequestBuilder(long containerID, - Pipeline pipeline) throws IOException { - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.CreateContainer); - request.setContainerID(containerID); - request.setCreateContainer( - ContainerProtos.CreateContainerRequestProto.getDefaultInstance()); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - - return request; - } - - /** - * Returns a create container command for test purposes. There are a bunch of - * tests where we need to just send a request and get a reply. - * - * @return ContainerCommandRequestProto. - */ - public static ContainerCommandRequestProto getCreateContainerSecureRequest( - long containerID, Pipeline pipeline, - Token token) throws IOException { - LOG.trace("addContainer: {}", containerID); - - Builder request = getContainerCommandRequestBuilder(containerID, pipeline); - if(token != null){ - request.setEncodedToken(token.encodeToUrlString()); - } - return request.build(); - } - - /** - * Return an update container command for test purposes. - * Creates a container data based on the given meta data, - * and request to update an existing container with it. - * - * @param containerID - * @param metaData - * @return - * @throws IOException - */ - public static ContainerCommandRequestProto getUpdateContainerRequest( - long containerID, Map metaData) throws IOException { - ContainerProtos.UpdateContainerRequestProto.Builder updateRequestBuilder = - ContainerProtos.UpdateContainerRequestProto.newBuilder(); - String[] keys = metaData.keySet().toArray(new String[]{}); - for(int i=0; i newList = new LinkedList<>(); - newList.add(writeRequest.getChunkData()); - blockData.setChunks(newList); - blockData.setBlockCommitSequenceId(0); - putRequest.setBlockData(blockData.getProtoBufMessage()); - - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.PutBlock); - request.setContainerID(blockData.getContainerID()); - request.setPutBlock(putRequest); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - return request.build(); - } - - /** - * Gets a GetBlockRequest for test purpose. - * @param pipeline - pipeline - * @param putBlockRequest - putBlockRequest. - * @return - Request - * immediately. - */ - public static ContainerCommandRequestProto getBlockRequest( - Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest) - throws IOException { - ContainerProtos.DatanodeBlockID blockID = - putBlockRequest.getBlockData().getBlockID(); - LOG.trace("getKey: blockID={}", blockID); - - ContainerProtos.GetBlockRequestProto.Builder getRequest = - ContainerProtos.GetBlockRequestProto.newBuilder(); - getRequest.setBlockID(blockID); - - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.GetBlock); - request.setContainerID(blockID.getContainerID()); - request.setGetBlock(getRequest); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - return request.build(); - } - - /** - * Verify the response against the request. - * - * @param request - Request - * @param response - Response - */ - public static void verifyGetBlock(ContainerCommandRequestProto request, - ContainerCommandResponseProto response, int expectedChunksCount) { - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - Assert.assertEquals(expectedChunksCount, - response.getGetBlock().getBlockData().getChunksCount()); - } - - /** - * @param pipeline - pipeline. - * @param putBlockRequest - putBlockRequest. - * @return - Request - */ - public static ContainerCommandRequestProto getDeleteBlockRequest( - Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest) - throws IOException { - ContainerProtos.DatanodeBlockID blockID = putBlockRequest.getBlockData() - .getBlockID(); - LOG.trace("deleteBlock: name={}", blockID); - ContainerProtos.DeleteBlockRequestProto.Builder delRequest = - ContainerProtos.DeleteBlockRequestProto.newBuilder(); - delRequest.setBlockID(blockID); - Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.DeleteBlock); - request.setContainerID(blockID.getContainerID()); - request.setDeleteBlock(delRequest); - request.setDatanodeUuid(pipeline.getFirstNode().getUuidString()); - return request.build(); - } - - /** - * Returns a close container request. - * @param pipeline - pipeline - * @param containerID - ID of the container. - * @return ContainerCommandRequestProto. - */ - public static ContainerCommandRequestProto getCloseContainer( - Pipeline pipeline, long containerID) throws IOException { - ContainerProtos.ContainerCommandRequestProto cmd = - ContainerCommandRequestProto.newBuilder() - .setCmdType(ContainerProtos.Type.CloseContainer) - .setContainerID(containerID) - .setCloseContainer( - ContainerProtos.CloseContainerRequestProto.getDefaultInstance()) - .setDatanodeUuid(pipeline.getFirstNode().getUuidString()) - .build(); - - return cmd; - } - - /** - * Returns a simple request without traceId. - * @param pipeline - pipeline - * @param containerID - ID of the container. - * @return ContainerCommandRequestProto without traceId. - */ - public static ContainerCommandRequestProto getRequestWithoutTraceId( - Pipeline pipeline, long containerID) throws IOException { - Preconditions.checkNotNull(pipeline); - ContainerProtos.ContainerCommandRequestProto cmd = - ContainerCommandRequestProto.newBuilder() - .setCmdType(ContainerProtos.Type.CloseContainer) - .setContainerID(containerID) - .setCloseContainer( - ContainerProtos.CloseContainerRequestProto.getDefaultInstance()) - .setDatanodeUuid(pipeline.getFirstNode().getUuidString()) - .build(); - return cmd; - } - - /** - * Returns a delete container request. - * @param pipeline - pipeline - * @return ContainerCommandRequestProto. - */ - public static ContainerCommandRequestProto getDeleteContainer( - Pipeline pipeline, long containerID, boolean forceDelete) - throws IOException { - Preconditions.checkNotNull(pipeline); - ContainerProtos.DeleteContainerRequestProto deleteRequest = - ContainerProtos.DeleteContainerRequestProto.newBuilder(). - setForceDelete(forceDelete).build(); - return ContainerCommandRequestProto.newBuilder() - .setCmdType(ContainerProtos.Type.DeleteContainer) - .setContainerID(containerID) - .setDeleteContainer( - ContainerProtos.DeleteContainerRequestProto.getDefaultInstance()) - .setDeleteContainer(deleteRequest) - .setDatanodeUuid(pipeline.getFirstNode().getUuidString()) - .build(); - } - - private static void sleep(long milliseconds) { - try { - Thread.sleep(milliseconds); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - - public static BlockID getTestBlockID(long containerID) { - // Add 2ms delay so that localID based on UtcTime - // won't collide. - sleep(2); - return new BlockID(containerID, HddsUtils.getUtcTime()); - } - - public static long getTestContainerID() { - return HddsUtils.getUtcTime(); - } - - public static boolean isContainerClosed(MiniOzoneCluster cluster, - long containerID, DatanodeDetails datanode) { - ContainerData containerData; - for (HddsDatanodeService datanodeService : cluster.getHddsDatanodes()) { - if (datanode.equals(datanodeService.getDatanodeDetails())) { - Container container = - datanodeService.getDatanodeStateMachine().getContainer() - .getContainerSet().getContainer(containerID); - if (container != null) { - containerData = container.getContainerData(); - return containerData.isClosed(); - } - } - } - return false; - } - - public static boolean isContainerPresent(MiniOzoneCluster cluster, - long containerID, DatanodeDetails datanode) { - for (HddsDatanodeService datanodeService : cluster.getHddsDatanodes()) { - if (datanode.equals(datanodeService.getDatanodeDetails())) { - Container container = - datanodeService.getDatanodeStateMachine().getContainer() - .getContainerSet().getContainer(containerID); - if (container != null) { - return true; - } - } - } - return false; - } - - public static OzoneOutputStream createKey(String keyName, - ReplicationType type, long size, ObjectStore objectStore, - String volumeName, String bucketName) throws Exception { - org.apache.hadoop.hdds.client.ReplicationFactor factor = - type == ReplicationType.STAND_ALONE ? - org.apache.hadoop.hdds.client.ReplicationFactor.ONE : - org.apache.hadoop.hdds.client.ReplicationFactor.THREE; - return objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey(keyName, size, type, factor, new HashMap<>()); - } - - public static OzoneOutputStream createKey(String keyName, - ReplicationType type, - org.apache.hadoop.hdds.client.ReplicationFactor factor, long size, - ObjectStore objectStore, String volumeName, String bucketName) - throws Exception { - return objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey(keyName, size, type, factor, new HashMap<>()); - } - - public static void validateData(String keyName, byte[] data, - ObjectStore objectStore, String volumeName, String bucketName) - throws Exception { - byte[] readData = new byte[data.length]; - OzoneInputStream is = - objectStore.getVolume(volumeName).getBucket(bucketName) - .readKey(keyName); - is.read(readData); - MessageDigest sha1 = MessageDigest.getInstance(OzoneConsts.FILE_HASH); - sha1.update(data); - MessageDigest sha2 = MessageDigest.getInstance(OzoneConsts.FILE_HASH); - sha2.update(readData); - Assert.assertTrue(Arrays.equals(sha1.digest(), sha2.digest())); - is.close(); - } - - public static String getFixedLengthString(String string, int length) { - return String.format("%1$" + length + "s", string); - } - - public static void waitForContainerClose(OzoneOutputStream outputStream, - MiniOzoneCluster cluster) throws Exception { - KeyOutputStream keyOutputStream = - (KeyOutputStream) outputStream.getOutputStream(); - List streamEntryList = - keyOutputStream.getStreamEntries(); - List containerIdList = new ArrayList<>(); - for (BlockOutputStreamEntry entry : streamEntryList) { - long id = entry.getBlockID().getContainerID(); - if (!containerIdList.contains(id)) { - containerIdList.add(id); - } - } - Assert.assertTrue(!containerIdList.isEmpty()); - waitForContainerClose(cluster, containerIdList.toArray(new Long[0])); - } - - public static void waitForPipelineClose(OzoneOutputStream outputStream, - MiniOzoneCluster cluster, boolean waitForContainerCreation) - throws Exception { - KeyOutputStream keyOutputStream = - (KeyOutputStream) outputStream.getOutputStream(); - List streamEntryList = - keyOutputStream.getStreamEntries(); - List containerIdList = new ArrayList<>(); - for (BlockOutputStreamEntry entry : streamEntryList) { - long id = entry.getBlockID().getContainerID(); - if (!containerIdList.contains(id)) { - containerIdList.add(id); - } - } - Assert.assertTrue(!containerIdList.isEmpty()); - waitForPipelineClose(cluster, waitForContainerCreation, - containerIdList.toArray(new Long[0])); - } - - public static void waitForPipelineClose(MiniOzoneCluster cluster, - boolean waitForContainerCreation, Long... containerIdList) - throws TimeoutException, InterruptedException, IOException { - List pipelineList = new ArrayList<>(); - for (long containerID : containerIdList) { - ContainerInfo container = - cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerID)); - Pipeline pipeline = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(container.getPipelineID()); - if (!pipelineList.contains(pipeline)) { - pipelineList.add(pipeline); - } - List datanodes = pipeline.getNodes(); - - if (waitForContainerCreation) { - for (DatanodeDetails details : datanodes) { - // Client will issue write chunk and it will create the container on - // datanodes. - // wait for the container to be created - GenericTestUtils - .waitFor(() -> isContainerPresent(cluster, containerID, details), - 500, 100 * 1000); - Assert.assertTrue(isContainerPresent(cluster, containerID, details)); - - // make sure the container gets created first - Assert.assertFalse(ContainerTestHelper - .isContainerClosed(cluster, containerID, details)); - } - } - } - waitForPipelineClose(pipelineList, cluster); - } - - public static void waitForPipelineClose(List pipelineList, - MiniOzoneCluster cluster) - throws TimeoutException, InterruptedException, IOException { - for (Pipeline pipeline1 : pipelineList) { - // issue pipeline destroy command - cluster.getStorageContainerManager().getPipelineManager() - .finalizeAndDestroyPipeline(pipeline1, false); - } - - // wait for the pipeline to get destroyed in the datanodes - for (Pipeline pipeline : pipelineList) { - for (DatanodeDetails dn : pipeline.getNodes()) { - XceiverServerSpi server = - cluster.getHddsDatanodes().get(cluster.getHddsDatanodeIndex(dn)) - .getDatanodeStateMachine().getContainer().getWriteChannel(); - Assert.assertTrue(server instanceof XceiverServerRatis); - XceiverServerRatis raftServer = (XceiverServerRatis) server; - GenericTestUtils.waitFor( - () -> (!raftServer.getPipelineIds().contains(pipeline.getId())), - 500, 100 * 1000); - } - } - } - - public static void waitForContainerClose(MiniOzoneCluster cluster, - Long... containerIdList) - throws ContainerNotFoundException, PipelineNotFoundException, - TimeoutException, InterruptedException { - List pipelineList = new ArrayList<>(); - for (long containerID : containerIdList) { - ContainerInfo container = - cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerID)); - Pipeline pipeline = - cluster.getStorageContainerManager().getPipelineManager() - .getPipeline(container.getPipelineID()); - pipelineList.add(pipeline); - List datanodes = pipeline.getNodes(); - - for (DatanodeDetails details : datanodes) { - // Client will issue write chunk and it will create the container on - // datanodes. - // wait for the container to be created - GenericTestUtils - .waitFor(() -> isContainerPresent(cluster, containerID, details), - 500, 100 * 1000); - Assert.assertTrue(isContainerPresent(cluster, containerID, details)); - - // make sure the container gets created first - Assert.assertFalse(ContainerTestHelper - .isContainerClosed(cluster, containerID, details)); - // send the order to close the container - cluster.getStorageContainerManager().getEventQueue() - .fireEvent(SCMEvents.CLOSE_CONTAINER, - ContainerID.valueof(containerID)); - } - } - int index = 0; - for (long containerID : containerIdList) { - Pipeline pipeline = pipelineList.get(index); - List datanodes = pipeline.getNodes(); - // Below condition avoids the case where container has been allocated - // but not yet been used by the client. In such a case container is never - // created. - for (DatanodeDetails datanodeDetails : datanodes) { - GenericTestUtils.waitFor( - () -> isContainerClosed(cluster, containerID, datanodeDetails), 500, - 15 * 1000); - //double check if it's really closed - // (waitFor also throws an exception) - Assert.assertTrue( - isContainerClosed(cluster, containerID, datanodeDetails)); - } - index++; - } - } - - public static StateMachine getStateMachine(MiniOzoneCluster cluster) - throws Exception { - return getStateMachine(cluster.getHddsDatanodes().get(0), null); - } - - private static RaftServerImpl getRaftServerImpl(HddsDatanodeService dn, - Pipeline pipeline) throws Exception { - XceiverServerSpi server = dn.getDatanodeStateMachine(). - getContainer().getWriteChannel(); - RaftServerProxy proxy = - (RaftServerProxy) (((XceiverServerRatis) server).getServer()); - RaftGroupId groupId = - pipeline == null ? proxy.getGroupIds().iterator().next() : - RatisHelper.newRaftGroup(pipeline).getGroupId(); - return proxy.getImpl(groupId); - } - - public static StateMachine getStateMachine(HddsDatanodeService dn, - Pipeline pipeline) throws Exception { - return getRaftServerImpl(dn, pipeline).getStateMachine(); - } - - public static boolean isRatisLeader(HddsDatanodeService dn, Pipeline pipeline) - throws Exception { - return getRaftServerImpl(dn, pipeline).isLeader(); - } - - public static boolean isRatisFollower(HddsDatanodeService dn, - Pipeline pipeline) throws Exception { - return getRaftServerImpl(dn, pipeline).isFollower(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java deleted file mode 100644 index 524c3bdb4ad..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .DatanodeBlockID; -import org.apache.hadoop.hdds.scm.XceiverClientGrpc; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer; -import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; -import org.apache.hadoop.test.GenericTestUtils; - -import static org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer - .writeChunkForContainer; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -/** - * Tests ozone containers replication. - */ -public class TestContainerReplication { - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - private OzoneConfiguration conf; - private MiniOzoneCluster cluster; - - @Before - public void setup() throws Exception { - conf = newOzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2) - .setRandomContainerPort(true).build(); - } - - @After - public void teardown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testContainerReplication() throws Exception { - //GIVEN - long containerId = 1L; - - cluster.waitForClusterToBeReady(); - - HddsDatanodeService firstDatanode = cluster.getHddsDatanodes().get(0); - - //copy from the first datanode - List sourceDatanodes = new ArrayList<>(); - sourceDatanodes.add(firstDatanode.getDatanodeDetails()); - - Pipeline sourcePipelines = - ContainerTestHelper.createPipeline(sourceDatanodes); - - //create a new client - XceiverClientSpi client = new XceiverClientGrpc(sourcePipelines, conf); - client.connect(); - - //New container for testing - TestOzoneContainer.createContainerForTesting(client, containerId); - - ContainerCommandRequestProto requestProto = - writeChunkForContainer(client, containerId, 1024); - - DatanodeBlockID blockID = requestProto.getWriteChunk().getBlockID(); - - // Put Block to the test container - ContainerCommandRequestProto putBlockRequest = ContainerTestHelper - .getPutBlockRequest(sourcePipelines, requestProto.getWriteChunk()); - - ContainerCommandResponseProto response = - client.sendCommand(putBlockRequest); - - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - - HddsDatanodeService destinationDatanode = - chooseDatanodeWithoutContainer(sourcePipelines, - cluster.getHddsDatanodes()); - - // Close the container - ContainerCommandRequestProto closeContainerRequest = ContainerTestHelper - .getCloseContainer(sourcePipelines, containerId); - response = client.sendCommand(closeContainerRequest); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - - //WHEN: send the order to replicate the container - cluster.getStorageContainerManager().getScmNodeManager() - .addDatanodeCommand(destinationDatanode.getDatanodeDetails().getUuid(), - new ReplicateContainerCommand(containerId, - sourcePipelines.getNodes())); - - DatanodeStateMachine destinationDatanodeDatanodeStateMachine = - destinationDatanode.getDatanodeStateMachine(); - - //wait for the replication - GenericTestUtils.waitFor(() - -> destinationDatanodeDatanodeStateMachine.getSupervisor() - .getReplicationCounter() > 0, 1000, 20_000); - - OzoneContainer ozoneContainer = - destinationDatanodeDatanodeStateMachine.getContainer(); - - Container container = - ozoneContainer - .getContainerSet().getContainer(containerId); - - Assert.assertNotNull( - "Container is not replicated to the destination datanode", - container); - - Assert.assertNotNull( - "ContainerData of the replicated container is null", - container.getContainerData()); - - KeyValueHandler handler = (KeyValueHandler) ozoneContainer.getDispatcher() - .getHandler(ContainerType.KeyValueContainer); - - BlockData key = handler.getBlockManager() - .getBlock(container, BlockID.getFromProtobuf(blockID)); - - Assert.assertNotNull(key); - Assert.assertEquals(1, key.getChunks().size()); - Assert.assertEquals(requestProto.getWriteChunk().getChunkData(), - key.getChunks().get(0)); - } - - private HddsDatanodeService chooseDatanodeWithoutContainer(Pipeline pipeline, - List dataNodes) { - for (HddsDatanodeService datanode : dataNodes) { - if (!pipeline.getNodes().contains(datanode.getDatanodeDetails())) { - return datanode; - } - } - throw new AssertionError( - "No datanode outside of the pipeline"); - } - - private static OzoneConfiguration newOzoneConfiguration() { - return new OzoneConfiguration(); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java deleted file mode 100644 index e1d1a95f4e2..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ /dev/null @@ -1,465 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common; - -import com.google.common.collect.Lists; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl; -import org.apache.hadoop.ozone.container.common.impl.RandomContainerDeletionChoosingPolicy; -import org.apache.hadoop.ozone.container.keyvalue.statemachine.background - .BlockDeletingService; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.apache.hadoop.hdds.utils.BackgroundService; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Test; -import org.junit.BeforeClass; -import org.mockito.Mockito; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.charset.Charset; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; - -/** - * Tests to test block deleting service. - */ -public class TestBlockDeletingService { - - private static final Logger LOG = - LoggerFactory.getLogger(TestBlockDeletingService.class); - - private static File testRoot; - private static String scmId; - private static String clusterID; - - @BeforeClass - public static void init() throws IOException { - testRoot = GenericTestUtils - .getTestDir(TestBlockDeletingService.class.getSimpleName()); - if (testRoot.exists()) { - FileUtils.cleanDirectory(testRoot); - } - scmId = UUID.randomUUID().toString(); - clusterID = UUID.randomUUID().toString(); - } - - @AfterClass - public static void cleanup() throws IOException { - FileUtils.deleteDirectory(testRoot); - } - - /** - * A helper method to create some blocks and put them under deletion - * state for testing. This method directly updates container.db and - * creates some fake chunk files for testing. - */ - private void createToDeleteBlocks(ContainerSet containerSet, - Configuration conf, int numOfContainers, int numOfBlocksPerContainer, - int numOfChunksPerBlock) throws IOException { - for (int x = 0; x < numOfContainers; x++) { - conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); - long containerID = ContainerTestHelper.getTestContainerID(); - KeyValueContainerData data = new KeyValueContainerData(containerID, - ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - data.closeContainer(); - Container container = new KeyValueContainer(data, conf); - container.create(new VolumeSet(scmId, clusterID, conf), - new RoundRobinVolumeChoosingPolicy(), scmId); - containerSet.addContainer(container); - data = (KeyValueContainerData) containerSet.getContainer( - containerID).getContainerData(); - try(ReferenceCountedDB metadata = BlockUtils.getDB(data, conf)) { - for (int j = 0; j < numOfBlocksPerContainer; j++) { - BlockID blockID = - ContainerTestHelper.getTestBlockID(containerID); - String deleteStateName = OzoneConsts.DELETING_KEY_PREFIX + - blockID.getLocalID(); - BlockData kd = new BlockData(blockID); - List chunks = Lists.newArrayList(); - for (int k = 0; k < numOfChunksPerBlock; k++) { - // offset doesn't matter here - String chunkName = blockID.getLocalID() + "_chunk_" + k; - File chunk = new File(data.getChunksPath(), chunkName); - FileUtils.writeStringToFile(chunk, "a chunk", - Charset.defaultCharset()); - LOG.info("Creating file {}", chunk.getAbsolutePath()); - // make sure file exists - Assert.assertTrue(chunk.isFile() && chunk.exists()); - ContainerProtos.ChunkInfo info = - ContainerProtos.ChunkInfo.newBuilder() - .setChunkName(chunk.getAbsolutePath()) - .setLen(0) - .setOffset(0) - .setChecksumData(Checksum.getNoChecksumDataProto()) - .build(); - chunks.add(info); - } - kd.setChunks(chunks); - metadata.getStore().put(DFSUtil.string2Bytes(deleteStateName), - kd.getProtoBufMessage().toByteArray()); - } - } - } - } - - /** - * Run service runDeletingTasks and wait for it's been processed. - */ - private void deleteAndWait(BlockDeletingServiceTestImpl service, - int timesOfProcessed) throws TimeoutException, InterruptedException { - service.runDeletingTasks(); - GenericTestUtils.waitFor(() - -> service.getTimesOfProcessed() == timesOfProcessed, 100, 3000); - } - - /** - * Get under deletion blocks count from DB, - * note this info is parsed from container.db. - */ - private int getUnderDeletionBlocksCount(ReferenceCountedDB meta) - throws IOException { - List> underDeletionBlocks = - meta.getStore().getRangeKVs(null, 100, - new MetadataKeyFilters.KeyPrefixFilter() - .addFilter(OzoneConsts.DELETING_KEY_PREFIX)); - return underDeletionBlocks.size(); - } - - private int getDeletedBlocksCount(ReferenceCountedDB db) throws IOException { - List> underDeletionBlocks = - db.getStore().getRangeKVs(null, 100, - new MetadataKeyFilters.KeyPrefixFilter() - .addFilter(OzoneConsts.DELETED_KEY_PREFIX)); - return underDeletionBlocks.size(); - } - - @Test - public void testBlockDeletion() throws Exception { - Configuration conf = new OzoneConfiguration(); - conf.set( - ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, - RandomContainerDeletionChoosingPolicy.class.getName()); - conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); - conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); - ContainerSet containerSet = new ContainerSet(); - createToDeleteBlocks(containerSet, conf, 1, 3, 1); - - BlockDeletingServiceTestImpl svc = - getBlockDeletinService(containerSet, conf, 1000); - svc.start(); - GenericTestUtils.waitFor(svc::isStarted, 100, 3000); - - // Ensure 1 container was created - List containerData = Lists.newArrayList(); - containerSet.listContainer(0L, 1, containerData); - Assert.assertEquals(1, containerData.size()); - - try(ReferenceCountedDB meta = BlockUtils.getDB( - (KeyValueContainerData) containerData.get(0), conf)) { - Map> containerMap = containerSet.getContainerMapCopy(); - // NOTE: this test assumes that all the container is KetValueContainer and - // have DeleteTransactionId in KetValueContainerData. If other - // types is going to be added, this test should be checked. - long transactionId = ((KeyValueContainerData) containerMap - .get(containerData.get(0).getContainerID()).getContainerData()) - .getDeleteTransactionId(); - - - // Number of deleted blocks in container should be equal to 0 before - // block delete - Assert.assertEquals(0, transactionId); - - // Ensure there are 3 blocks under deletion and 0 deleted blocks - Assert.assertEquals(3, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(0, getDeletedBlocksCount(meta)); - - // An interval will delete 1 * 2 blocks - deleteAndWait(svc, 1); - Assert.assertEquals(1, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(2, getDeletedBlocksCount(meta)); - - deleteAndWait(svc, 2); - Assert.assertEquals(0, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(3, getDeletedBlocksCount(meta)); - - deleteAndWait(svc, 3); - Assert.assertEquals(0, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(3, getDeletedBlocksCount(meta)); - } - - svc.shutdown(); - } - - @Test - public void testShutdownService() throws Exception { - Configuration conf = new OzoneConfiguration(); - conf.set( - ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, - RandomContainerDeletionChoosingPolicy.class.getName()); - conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500, - TimeUnit.MILLISECONDS); - conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); - conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 10); - ContainerSet containerSet = new ContainerSet(); - // Create 1 container with 100 blocks - createToDeleteBlocks(containerSet, conf, 1, 100, 1); - - BlockDeletingServiceTestImpl service = - getBlockDeletinService(containerSet, conf, 1000); - service.start(); - GenericTestUtils.waitFor(service::isStarted, 100, 3000); - - // Run some deleting tasks and verify there are threads running - service.runDeletingTasks(); - GenericTestUtils.waitFor(() -> service.getThreadCount() > 0, 100, 1000); - - // Wait for 1 or 2 intervals - Thread.sleep(1000); - - // Shutdown service and verify all threads are stopped - service.shutdown(); - GenericTestUtils.waitFor(() -> service.getThreadCount() == 0, 100, 1000); - } - - @Test - public void testBlockDeletionTimeout() throws Exception { - Configuration conf = new OzoneConfiguration(); - conf.set( - ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, - RandomContainerDeletionChoosingPolicy.class.getName()); - conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); - conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); - ContainerSet containerSet = new ContainerSet(); - createToDeleteBlocks(containerSet, conf, 1, 3, 1); - - // set timeout value as 1ns to trigger timeout behavior - long timeout = 1; - OzoneContainer ozoneContainer = Mockito.mock(OzoneContainer.class); - Mockito.when(ozoneContainer.getContainerSet()) - .thenReturn(containerSet); - Mockito.when(ozoneContainer.getWriteChannel()) - .thenReturn(null); - BlockDeletingService svc = new BlockDeletingService(ozoneContainer, - TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.NANOSECONDS, - conf); - svc.start(); - - LogCapturer log = LogCapturer.captureLogs(BackgroundService.LOG); - GenericTestUtils.waitFor(() -> { - if(log.getOutput().contains( - "Background task executes timed out, retrying in next interval")) { - log.stopCapturing(); - return true; - } - - return false; - }, 1000, 100000); - - log.stopCapturing(); - svc.shutdown(); - - // test for normal case that doesn't have timeout limitation - timeout = 0; - createToDeleteBlocks(containerSet, conf, 1, 3, 1); - svc = new BlockDeletingService(ozoneContainer, - TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.MILLISECONDS, - conf); - svc.start(); - - // get container meta data - List containerData = Lists.newArrayList(); - containerSet.listContainer(0L, 1, containerData); - try(ReferenceCountedDB meta = BlockUtils.getDB( - (KeyValueContainerData) containerData.get(0), conf)) { - - LogCapturer newLog = LogCapturer.captureLogs(BackgroundService.LOG); - GenericTestUtils.waitFor(() -> { - try { - if (getUnderDeletionBlocksCount(meta) == 0) { - return true; - } - } catch (IOException ignored) { - } - return false; - }, 1000, 100000); - newLog.stopCapturing(); - - // The block deleting successfully and shouldn't catch timed - // out warning log. - Assert.assertFalse(newLog.getOutput().contains( - "Background task executes timed out, retrying in next interval")); - } - svc.shutdown(); - } - - private BlockDeletingServiceTestImpl getBlockDeletinService( - ContainerSet containerSet, Configuration conf, int timeout) { - OzoneContainer ozoneContainer = Mockito.mock(OzoneContainer.class); - Mockito.when(ozoneContainer.getContainerSet()).thenReturn(containerSet); - Mockito.when(ozoneContainer.getWriteChannel()).thenReturn(null); - return new BlockDeletingServiceTestImpl(ozoneContainer, timeout, conf); - } - - @Test(timeout = 30000) - public void testContainerThrottle() throws Exception { - // Properties : - // - Number of containers : 2 - // - Number of blocks per container : 1 - // - Number of chunks per block : 10 - // - Container limit per interval : 1 - // - Block limit per container : 1 - // - // Each time only 1 container can be processed, so each time - // 1 block from 1 container can be deleted. - Configuration conf = new OzoneConfiguration(); - // Process 1 container per interval - conf.set( - ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, - RandomContainerDeletionChoosingPolicy.class.getName()); - conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 1); - conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 1); - ContainerSet containerSet = new ContainerSet(); - createToDeleteBlocks(containerSet, conf, 2, 1, 10); - - BlockDeletingServiceTestImpl service = - getBlockDeletinService(containerSet, conf, 1000); - service.start(); - - try { - GenericTestUtils.waitFor(service::isStarted, 100, 3000); - // 1st interval processes 1 container 1 block and 10 chunks - deleteAndWait(service, 1); - Assert.assertEquals(10, getNumberOfChunksInContainers(containerSet)); - - AtomicInteger timesToProcess = new AtomicInteger(1); - GenericTestUtils.waitFor(() -> { - try { - timesToProcess.incrementAndGet(); - deleteAndWait(service, timesToProcess.get()); - if (getNumberOfChunksInContainers(containerSet) == 0) { - return true; - } - } catch (Exception ignored) {} - return false; - }, 100, 100000); - Assert.assertEquals(0, getNumberOfChunksInContainers(containerSet)); - } finally { - service.shutdown(); - } - } - - - @Test(timeout = 30000) - public void testBlockThrottle() throws Exception { - // Properties : - // - Number of containers : 5 - // - Number of blocks per container : 3 - // - Number of chunks per block : 1 - // - Container limit per interval : 10 - // - Block limit per container : 2 - // - // Each time containers can be all scanned, but only 2 blocks - // per container can be actually deleted. So it requires 2 waves - // to cleanup all blocks. - Configuration conf = new OzoneConfiguration(); - conf.set( - ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, - RandomContainerDeletionChoosingPolicy.class.getName()); - conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); - conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); - ContainerSet containerSet = new ContainerSet(); - createToDeleteBlocks(containerSet, conf, 5, 3, 1); - - // Make sure chunks are created - Assert.assertEquals(15, getNumberOfChunksInContainers(containerSet)); - OzoneContainer ozoneContainer = Mockito.mock(OzoneContainer.class); - Mockito.when(ozoneContainer.getContainerSet()).thenReturn(containerSet); - Mockito.when(ozoneContainer.getWriteChannel()).thenReturn(null); - BlockDeletingServiceTestImpl service = - getBlockDeletinService(containerSet, conf, 1000); - service.start(); - - try { - GenericTestUtils.waitFor(service::isStarted, 100, 3000); - // Total blocks = 3 * 5 = 15 - // block per task = 2 - // number of containers = 5 - // each interval will at most runDeletingTasks 5 * 2 = 10 blocks - deleteAndWait(service, 1); - Assert.assertEquals(5, getNumberOfChunksInContainers(containerSet)); - - // There is only 5 blocks left to runDeletingTasks - deleteAndWait(service, 2); - Assert.assertEquals(0, getNumberOfChunksInContainers(containerSet)); - } finally { - service.shutdown(); - } - } - - private int getNumberOfChunksInContainers(ContainerSet containerSet) { - Iterator> iterator = containerSet.getContainerIterator(); - int numChunks = 0; - while (iterator.hasNext()) { - Container container = iterator.next(); - File chunkDir = FileUtils.getFile( - ((KeyValueContainerData) container.getContainerData()) - .getChunksPath()); - numChunks += chunkDir.listFiles().length; - } - return numChunks; - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java deleted file mode 100644 index 2973a763bb6..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java +++ /dev/null @@ -1,132 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.helpers; - -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.common.Checksum; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestRule; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ThreadLocalRandom; - -/** - * Tests to test block deleting service. - */ -public class TestBlockData { - static final Logger LOG = LoggerFactory.getLogger(TestBlockData.class); - @Rule - public TestRule timeout = new Timeout(10000); - - static ContainerProtos.ChunkInfo buildChunkInfo(String name, long offset, - long len) { - return ContainerProtos.ChunkInfo.newBuilder() - .setChunkName(name) - .setOffset(offset) - .setLen(len) - .setChecksumData(Checksum.getNoChecksumDataProto()) - .build(); - } - - @Test - public void testAddAndRemove() { - final BlockData computed = new BlockData(null); - final List expected = new ArrayList<>(); - - assertChunks(expected, computed); - long offset = 0; - int n = 5; - for(int i = 0; i < n; i++) { - offset += assertAddChunk(expected, computed, offset); - } - - for(; !expected.isEmpty();) { - removeChunk(expected, computed); - } - } - - private static int chunkCount = 0; - static ContainerProtos.ChunkInfo addChunk( - List expected, long offset) { - final long length = ThreadLocalRandom.current().nextLong(1000); - final ContainerProtos.ChunkInfo info = - buildChunkInfo("c" + ++chunkCount, offset, length); - expected.add(info); - return info; - } - - static long assertAddChunk(List expected, - BlockData computed, long offset) { - final ContainerProtos.ChunkInfo info = addChunk(expected, offset); - LOG.info("addChunk: " + toString(info)); - computed.addChunk(info); - assertChunks(expected, computed); - return info.getLen(); - } - - - static void removeChunk(List expected, - BlockData computed) { - final int i = ThreadLocalRandom.current().nextInt(expected.size()); - final ContainerProtos.ChunkInfo info = expected.remove(i); - LOG.info("removeChunk: " + toString(info)); - computed.removeChunk(info); - assertChunks(expected, computed); - } - - static void assertChunks(List expected, - BlockData computed) { - final List computedChunks = computed.getChunks(); - Assert.assertEquals("expected=" + expected + "\ncomputed=" + - computedChunks, expected, computedChunks); - Assert.assertEquals(expected.stream().mapToLong(i -> i.getLen()).sum(), - computed.getSize()); - } - - static String toString(ContainerProtos.ChunkInfo info) { - return info.getChunkName() + ":" + info.getOffset() + "," + info.getLen(); - } - - static String toString(List infos) { - return infos.stream().map(TestBlockData::toString) - .reduce((left, right) -> left + ", " + right) - .orElse(""); - } - - @Test - public void testSetChunks() { - final BlockData computed = new BlockData(null); - final List expected = new ArrayList<>(); - - assertChunks(expected, computed); - long offset = 0; - int n = 5; - for(int i = 0; i < n; i++) { - offset += addChunk(expected, offset).getLen(); - LOG.info("setChunk: " + toString(expected)); - computed.setChunks(expected); - assertChunks(expected, computed); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java deleted file mode 100644 index b872516474f..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java +++ /dev/null @@ -1,191 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.impl; - -import java.io.File; -import java.io.IOException; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdfs.server.datanode.StorageLocation; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -/** - * The class for testing container deletion choosing policy. - */ -public class TestContainerDeletionChoosingPolicy { - private static String path; - private OzoneContainer ozoneContainer; - private ContainerSet containerSet; - private OzoneConfiguration conf; - private BlockDeletingService blockDeletingService; - // the service timeout - private static final int SERVICE_TIMEOUT_IN_MILLISECONDS = 0; - private static final int SERVICE_INTERVAL_IN_MILLISECONDS = 1000; - - @Before - public void init() throws Throwable { - conf = new OzoneConfiguration(); - path = GenericTestUtils - .getTempPath(TestContainerDeletionChoosingPolicy.class.getSimpleName()); - } - - @Test - public void testRandomChoosingPolicy() throws IOException { - File containerDir = new File(path); - if (containerDir.exists()) { - FileUtils.deleteDirectory(new File(path)); - } - Assert.assertTrue(containerDir.mkdirs()); - - conf.set( - ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, - RandomContainerDeletionChoosingPolicy.class.getName()); - List pathLists = new LinkedList<>(); - pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath())); - containerSet = new ContainerSet(); - - int numContainers = 10; - for (int i = 0; i < numContainers; i++) { - KeyValueContainerData data = new KeyValueContainerData(i, - ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - data.closeContainer(); - KeyValueContainer container = new KeyValueContainer(data, conf); - containerSet.addContainer(container); - Assert.assertTrue( - containerSet.getContainerMapCopy() - .containsKey(data.getContainerID())); - } - blockDeletingService = getBlockDeletingService(); - - ContainerDeletionChoosingPolicy deletionPolicy = - new RandomContainerDeletionChoosingPolicy(); - List result0 = - blockDeletingService.chooseContainerForBlockDeletion(5, deletionPolicy); - Assert.assertEquals(5, result0.size()); - - // test random choosing - List result1 = blockDeletingService - .chooseContainerForBlockDeletion(numContainers, deletionPolicy); - List result2 = blockDeletingService - .chooseContainerForBlockDeletion(numContainers, deletionPolicy); - - boolean hasShuffled = false; - for (int i = 0; i < numContainers; i++) { - if (result1.get(i).getContainerID() - != result2.get(i).getContainerID()) { - hasShuffled = true; - break; - } - } - Assert.assertTrue("Chosen container results were same", hasShuffled); - } - - @Test - public void testTopNOrderedChoosingPolicy() throws IOException { - File containerDir = new File(path); - if (containerDir.exists()) { - FileUtils.deleteDirectory(new File(path)); - } - Assert.assertTrue(containerDir.mkdirs()); - - conf.set( - ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, - TopNOrderedContainerDeletionChoosingPolicy.class.getName()); - List pathLists = new LinkedList<>(); - pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath())); - containerSet = new ContainerSet(); - - int numContainers = 10; - Random random = new Random(); - Map name2Count = new HashMap<>(); - // create [numContainers + 1] containers - for (int i = 0; i <= numContainers; i++) { - long containerId = RandomUtils.nextLong(); - KeyValueContainerData data = - new KeyValueContainerData(containerId, - ContainerTestHelper.CONTAINER_MAX_SIZE, - UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - if (i != numContainers) { - int deletionBlocks = random.nextInt(numContainers) + 1; - data.incrPendingDeletionBlocks(deletionBlocks); - name2Count.put(containerId, deletionBlocks); - } - KeyValueContainer container = new KeyValueContainer(data, conf); - data.closeContainer(); - containerSet.addContainer(container); - Assert.assertTrue( - containerSet.getContainerMapCopy().containsKey(containerId)); - } - - blockDeletingService = getBlockDeletingService(); - ContainerDeletionChoosingPolicy deletionPolicy = - new TopNOrderedContainerDeletionChoosingPolicy(); - List result0 = - blockDeletingService.chooseContainerForBlockDeletion(5, deletionPolicy); - Assert.assertEquals(5, result0.size()); - - List result1 = blockDeletingService - .chooseContainerForBlockDeletion(numContainers + 1, deletionPolicy); - // the empty deletion blocks container should not be chosen - Assert.assertEquals(numContainers, result1.size()); - - // verify the order of return list - int lastCount = Integer.MAX_VALUE; - for (ContainerData data : result1) { - int currentCount = name2Count.remove(data.getContainerID()); - // previous count should not smaller than next one - Assert.assertTrue(currentCount > 0 && currentCount <= lastCount); - lastCount = currentCount; - } - // ensure all the container data are compared - Assert.assertEquals(0, name2Count.size()); - } - - private BlockDeletingService getBlockDeletingService() { - ozoneContainer = Mockito.mock(OzoneContainer.class); - Mockito.when(ozoneContainer.getContainerSet()).thenReturn(containerSet); - Mockito.when(ozoneContainer.getWriteChannel()).thenReturn(null); - blockDeletingService = new BlockDeletingService(ozoneContainer, - SERVICE_INTERVAL_IN_MILLISECONDS, SERVICE_TIMEOUT_IN_MILLISECONDS, - TimeUnit.MILLISECONDS, conf); - return blockDeletingService; - - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java deleted file mode 100644 index ed482093dfb..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ /dev/null @@ -1,897 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.collect.Maps; -import org.apache.commons.codec.binary.Hex; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdfs.server.datanode.StorageLocation; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.Checksum; -import org.apache.hadoop.ozone.common.ChecksumData; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl; -import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.DirectoryStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BCSID_MISMATCH; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNKNOWN_BCSID; -import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk; -import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData; -import static org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Simple tests to verify that container persistence works as expected. Some of - * these tests are specific to {@link KeyValueContainer}. If a new {@link - * ContainerProtos.ContainerType} is added, the tests need to be modified. - */ -public class TestContainerPersistence { - private static final String DATANODE_UUID = UUID.randomUUID().toString(); - private static final String SCM_ID = UUID.randomUUID().toString(); - private static Logger log = - LoggerFactory.getLogger(TestContainerPersistence.class); - private static String hddsPath; - private static OzoneConfiguration conf; - private static ContainerSet containerSet; - private static VolumeSet volumeSet; - private static VolumeChoosingPolicy volumeChoosingPolicy; - private static BlockManager blockManager; - private static ChunkManager chunkManager; - @Rule - public ExpectedException exception = ExpectedException.none(); - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - private Long containerID = 8888L; - - @BeforeClass - public static void init() throws Throwable { - conf = new OzoneConfiguration(); - hddsPath = GenericTestUtils - .getTempPath(TestContainerPersistence.class.getSimpleName()); - conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, hddsPath); - volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); - } - - @AfterClass - public static void shutdown() throws IOException { - FileUtils.deleteDirectory(new File(hddsPath)); - } - - @Before - public void setupPaths() throws IOException { - containerSet = new ContainerSet(); - volumeSet = new VolumeSet(DATANODE_UUID, conf); - blockManager = new BlockManagerImpl(conf); - chunkManager = new ChunkManagerImpl(true); - - for (String dir : conf.getStrings(ScmConfigKeys.HDDS_DATANODE_DIR_KEY)) { - StorageLocation location = StorageLocation.parse(dir); - FileUtils.forceMkdir(new File(location.getNormalizedUri())); - } - } - - @After - public void cleanupDir() throws IOException { - // Clean up SCM metadata - log.info("Deleting {}", hddsPath); - FileUtils.deleteDirectory(new File(hddsPath)); - - // Clean up SCM datanode container metadata/data - for (String dir : conf.getStrings(ScmConfigKeys.HDDS_DATANODE_DIR_KEY)) { - StorageLocation location = StorageLocation.parse(dir); - FileUtils.deleteDirectory(new File(location.getNormalizedUri())); - } - } - - private long getTestContainerID() { - return ContainerTestHelper.getTestContainerID(); - } - - private DispatcherContext getDispatcherContext() { - return new DispatcherContext.Builder().build(); - } - - private Container addContainer(ContainerSet cSet, long cID) - throws IOException { - long commitBytesBefore = 0; - long commitBytesAfter = 0; - long commitIncrement = 0; - KeyValueContainerData data = new KeyValueContainerData(cID, - ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - data.addMetadata("VOLUME", "shire"); - data.addMetadata("owner)", "bilbo"); - KeyValueContainer container = new KeyValueContainer(data, conf); - container.create(volumeSet, volumeChoosingPolicy, SCM_ID); - commitBytesBefore = container.getContainerData() - .getVolume().getCommittedBytes(); - cSet.addContainer(container); - commitBytesAfter = container.getContainerData() - .getVolume().getCommittedBytes(); - commitIncrement = commitBytesAfter - commitBytesBefore; - // did we commit space for the new container? - Assert.assertTrue(commitIncrement == - ContainerTestHelper.CONTAINER_MAX_SIZE); - return container; - } - - @Test - public void testCreateContainer() throws Exception { - long testContainerID = getTestContainerID(); - addContainer(containerSet, testContainerID); - Assert.assertTrue(containerSet.getContainerMapCopy() - .containsKey(testContainerID)); - KeyValueContainerData kvData = - (KeyValueContainerData) containerSet.getContainer(testContainerID) - .getContainerData(); - - Assert.assertNotNull(kvData); - Assert.assertTrue(new File(kvData.getMetadataPath()).exists()); - Assert.assertTrue(new File(kvData.getChunksPath()).exists()); - Assert.assertTrue(kvData.getDbFile().exists()); - - Path meta = kvData.getDbFile().toPath().getParent(); - Assert.assertTrue(meta != null && Files.exists(meta)); - - ReferenceCountedDB store = null; - try { - store = BlockUtils.getDB(kvData, conf); - Assert.assertNotNull(store); - } finally { - if (store != null) { - store.close(); - } - } - } - - @Test - public void testCreateDuplicateContainer() throws Exception { - long testContainerID = getTestContainerID(); - - Container container = addContainer(containerSet, testContainerID); - try { - containerSet.addContainer(container); - fail("Expected Exception not thrown."); - } catch (IOException ex) { - Assert.assertNotNull(ex); - } - } - - @Test - public void testDeleteContainer() throws Exception { - long testContainerID1 = getTestContainerID(); - Thread.sleep(100); - long testContainerID2 = getTestContainerID(); - - Container container1 = addContainer(containerSet, testContainerID1); - container1.close(); - - Container container2 = addContainer(containerSet, testContainerID2); - - Assert.assertTrue(containerSet.getContainerMapCopy() - .containsKey(testContainerID1)); - Assert.assertTrue(containerSet.getContainerMapCopy() - .containsKey(testContainerID2)); - - container1.delete(); - containerSet.removeContainer(testContainerID1); - Assert.assertFalse(containerSet.getContainerMapCopy() - .containsKey(testContainerID1)); - - // Adding block to a deleted container should fail. - exception.expect(StorageContainerException.class); - exception.expectMessage("Error opening DB."); - BlockID blockID1 = ContainerTestHelper.getTestBlockID(testContainerID1); - BlockData someKey1 = new BlockData(blockID1); - someKey1.setChunks(new LinkedList()); - blockManager.putBlock(container1, someKey1); - - // Deleting a non-empty container should fail. - BlockID blockID2 = ContainerTestHelper.getTestBlockID(testContainerID2); - BlockData someKey2 = new BlockData(blockID2); - someKey2.setChunks(new LinkedList()); - blockManager.putBlock(container2, someKey2); - - exception.expect(StorageContainerException.class); - exception.expectMessage( - "Container cannot be deleted because it is not empty."); - container2.delete(); - Assert.assertTrue(containerSet.getContainerMapCopy() - .containsKey(testContainerID2)); - } - - @Test - public void testGetContainerReports() throws Exception { - final int count = 10; - List containerIDs = new ArrayList<>(); - - for (int i = 0; i < count; i++) { - long testContainerID = getTestContainerID(); - Container container = addContainer(containerSet, testContainerID); - - // Close a bunch of containers. - if (i % 3 == 0) { - container.close(); - } - containerIDs.add(testContainerID); - } - - // ContainerSet#getContainerReport currently returns all containers (open - // and closed) reports. - List reports = - containerSet.getContainerReport().getReportsList(); - Assert.assertEquals(10, reports.size()); - for (StorageContainerDatanodeProtocolProtos.ContainerReplicaProto report : - reports) { - long actualContainerID = report.getContainerID(); - Assert.assertTrue(containerIDs.remove(actualContainerID)); - } - Assert.assertTrue(containerIDs.isEmpty()); - } - - /** - * This test creates 50 containers and reads them back 5 containers at a time - * and verifies that we did get back all containers. - * - * @throws IOException - */ - @Test - public void testListContainer() throws IOException { - final int count = 10; - final int step = 5; - - Map testMap = new HashMap<>(); - for (int x = 0; x < count; x++) { - long testContainerID = getTestContainerID(); - Container container = addContainer(containerSet, testContainerID); - testMap.put(testContainerID, container.getContainerData()); - } - - int counter = 0; - long prevKey = 0; - List results = new LinkedList<>(); - while (counter < count) { - containerSet.listContainer(prevKey, step, results); - for (int y = 0; y < results.size(); y++) { - testMap.remove(results.get(y).getContainerID()); - } - counter += step; - long nextKey = results.get(results.size() - 1).getContainerID(); - - //Assert that container is returning results in a sorted fashion. - Assert.assertTrue(prevKey < nextKey); - prevKey = nextKey + 1; - results.clear(); - } - // Assert that we listed all the keys that we had put into - // container. - Assert.assertTrue(testMap.isEmpty()); - } - - private ChunkInfo writeChunkHelper(BlockID blockID) throws IOException { - final int datalen = 1024; - long commitBytesBefore = 0; - long commitBytesAfter = 0; - long commitDecrement = 0; - long testContainerID = blockID.getContainerID(); - Container container = containerSet.getContainer(testContainerID); - if (container == null) { - container = addContainer(containerSet, testContainerID); - } - ChunkInfo info = getChunk( - blockID.getLocalID(), 0, 0, datalen); - ByteBuffer data = getData(datalen); - setDataChecksum(info, data); - commitBytesBefore = container.getContainerData() - .getVolume().getCommittedBytes(); - chunkManager.writeChunk(container, blockID, info, data, - getDispatcherContext()); - commitBytesAfter = container.getContainerData() - .getVolume().getCommittedBytes(); - commitDecrement = commitBytesBefore - commitBytesAfter; - // did we decrement commit bytes by the amount of data we wrote? - Assert.assertTrue(commitDecrement == info.getLen()); - return info; - - } - - /** - * Writes a single chunk. - * - * @throws IOException - * @throws NoSuchAlgorithmException - */ - @Test - public void testWriteChunk() throws IOException, - NoSuchAlgorithmException { - BlockID blockID = ContainerTestHelper. - getTestBlockID(getTestContainerID()); - writeChunkHelper(blockID); - } - - /** - * Writes many chunks of the same block into different chunk files and - * verifies that we have that data in many files. - * - * @throws IOException - * @throws NoSuchAlgorithmException - */ - @Test - public void testWritReadManyChunks() throws IOException { - final int datalen = 1024; - final int chunkCount = 1024; - - long testContainerID = getTestContainerID(); - Container container = addContainer(containerSet, testContainerID); - - BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID); - Map fileHashMap = new HashMap<>(); - for (int x = 0; x < chunkCount; x++) { - ChunkInfo info = getChunk(blockID.getLocalID(), x, 0, datalen); - ByteBuffer data = getData(datalen); - setDataChecksum(info, data); - chunkManager.writeChunk(container, blockID, info, data, - getDispatcherContext()); - String fileName = String.format("%s.data.%d", blockID.getLocalID(), x); - fileHashMap.put(fileName, info); - } - - KeyValueContainerData cNewData = - (KeyValueContainerData) container.getContainerData(); - Assert.assertNotNull(cNewData); - Path dataDir = Paths.get(cNewData.getChunksPath()); - - String globFormat = String.format("%s.data.*", blockID.getLocalID()); - - // Read chunk via file system and verify. - int count = 0; - try (DirectoryStream stream = - Files.newDirectoryStream(dataDir, globFormat)) { - Checksum checksum = new Checksum(); - - for (Path fname : stream) { - ChecksumData checksumData = checksum - .computeChecksum(FileUtils.readFileToByteArray(fname.toFile())); - Assert.assertEquals(fileHashMap.get(fname.getFileName().toString()) - .getChecksumData(), checksumData); - count++; - } - Assert.assertEquals(chunkCount, count); - - // Read chunk via ReadChunk call. - for (int x = 0; x < chunkCount; x++) { - String fileName = String.format("%s.data.%d", blockID.getLocalID(), x); - ChunkInfo info = fileHashMap.get(fileName); - ByteBuffer data = chunkManager - .readChunk(container, blockID, info, getDispatcherContext()); - ChecksumData checksumData = checksum.computeChecksum(data); - Assert.assertEquals(info.getChecksumData(), checksumData); - } - } - } - - /** - * Test partial within a single chunk. - * - * @throws IOException - */ - @Test - public void testPartialRead() throws Exception { - final int datalen = 1024; - final int start = datalen / 4; - final int length = datalen / 2; - - long testContainerID = getTestContainerID(); - Container container = addContainer(containerSet, testContainerID); - - BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID); - ChunkInfo info = getChunk( - blockID.getLocalID(), 0, 0, datalen); - ByteBuffer data = getData(datalen); - setDataChecksum(info, data); - chunkManager.writeChunk(container, blockID, info, data, - getDispatcherContext()); - - ByteBuffer readData = chunkManager - .readChunk(container, blockID, info, getDispatcherContext()); - assertTrue(data.rewind().equals(readData.rewind())); - - ChunkInfo info2 = getChunk(blockID.getLocalID(), 0, start, length); - ByteBuffer readData2 = chunkManager - .readChunk(container, blockID, info2, getDispatcherContext()); - assertEquals(length, info2.getLen()); - boolean equals = - data.position(start).limit(start+length).equals(readData2.rewind()); - assertTrue(equals); - } - - /** - * Writes a single chunk and tries to overwrite that chunk without over write - * flag then re-tries with overwrite flag. - * - * @throws IOException - * @throws NoSuchAlgorithmException - */ - @Test - public void testOverWrite() throws IOException, - NoSuchAlgorithmException { - final int datalen = 1024; - - long testContainerID = getTestContainerID(); - Container container = addContainer(containerSet, testContainerID); - - BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID); - ChunkInfo info = getChunk( - blockID.getLocalID(), 0, 0, datalen); - ByteBuffer data = getData(datalen); - setDataChecksum(info, data); - chunkManager.writeChunk(container, blockID, info, data, - getDispatcherContext()); - data.rewind(); - chunkManager.writeChunk(container, blockID, info, data, - getDispatcherContext()); - data.rewind(); - // With the overwrite flag it should work now. - info.addMetadata(OzoneConsts.CHUNK_OVERWRITE, "true"); - chunkManager.writeChunk(container, blockID, info, data, - getDispatcherContext()); - long bytesUsed = container.getContainerData().getBytesUsed(); - Assert.assertEquals(datalen, bytesUsed); - - long bytesWrite = container.getContainerData().getWriteBytes(); - Assert.assertEquals(datalen * 3, bytesWrite); - } - - /** - * This test writes data as many small writes and tries to read back the data - * in a single large read. - * - * @throws IOException - * @throws NoSuchAlgorithmException - */ - @Test - public void testMultipleWriteSingleRead() throws IOException, - NoSuchAlgorithmException { - final int datalen = 1024; - final int chunkCount = 1024; - - long testContainerID = getTestContainerID(); - Container container = addContainer(containerSet, testContainerID); - - BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID); - MessageDigest oldSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH); - for (int x = 0; x < chunkCount; x++) { - // we are writing to the same chunk file but at different offsets. - long offset = x * datalen; - ChunkInfo info = getChunk( - blockID.getLocalID(), 0, offset, datalen); - ByteBuffer data = getData(datalen); - oldSha.update(data); - data.rewind(); - setDataChecksum(info, data); - chunkManager.writeChunk(container, blockID, info, data, - getDispatcherContext()); - } - - // Request to read the whole data in a single go. - ChunkInfo largeChunk = getChunk(blockID.getLocalID(), 0, 0, - datalen * chunkCount); - ByteBuffer newdata = - chunkManager.readChunk(container, blockID, largeChunk, - getDispatcherContext()); - MessageDigest newSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH); - newSha.update(newdata); - Assert.assertEquals(Hex.encodeHexString(oldSha.digest()), - Hex.encodeHexString(newSha.digest())); - } - - /** - * Writes a chunk and deletes it, re-reads to make sure it is gone. - * - * @throws IOException - * @throws NoSuchAlgorithmException - */ - @Test - public void testDeleteChunk() throws IOException, - NoSuchAlgorithmException { - final int datalen = 1024; - long testContainerID = getTestContainerID(); - Container container = addContainer(containerSet, testContainerID); - - BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID); - ChunkInfo info = getChunk( - blockID.getLocalID(), 0, 0, datalen); - ByteBuffer data = getData(datalen); - setDataChecksum(info, data); - chunkManager.writeChunk(container, blockID, info, data, - getDispatcherContext()); - chunkManager.deleteChunk(container, blockID, info); - exception.expect(StorageContainerException.class); - exception.expectMessage("Unable to find the chunk file."); - chunkManager.readChunk(container, blockID, info, getDispatcherContext()); - } - - /** - * Tests a put block and read block. - * - * @throws IOException - * @throws NoSuchAlgorithmException - */ - @Test - public void testPutBlock() throws IOException, NoSuchAlgorithmException { - long testContainerID = getTestContainerID(); - Container container = addContainer(containerSet, testContainerID); - - BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID); - ChunkInfo info = writeChunkHelper(blockID); - BlockData blockData = new BlockData(blockID); - List chunkList = new LinkedList<>(); - chunkList.add(info.getProtoBufMessage()); - blockData.setChunks(chunkList); - blockManager.putBlock(container, blockData); - BlockData readBlockData = blockManager. - getBlock(container, blockData.getBlockID()); - ChunkInfo readChunk = - ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(0)); - Assert.assertEquals(info.getChecksumData(), readChunk.getChecksumData()); - } - - /** - * Tests a put block and read block with invalid bcsId. - * - * @throws IOException - * @throws NoSuchAlgorithmException - */ - @Test - public void testPutBlockWithInvalidBCSId() - throws IOException, NoSuchAlgorithmException { - long testContainerID = getTestContainerID(); - Container container = addContainer(containerSet, testContainerID); - - BlockID blockID1 = ContainerTestHelper.getTestBlockID(testContainerID); - ChunkInfo info = writeChunkHelper(blockID1); - BlockData blockData = new BlockData(blockID1); - List chunkList = new LinkedList<>(); - chunkList.add(info.getProtoBufMessage()); - blockData.setChunks(chunkList); - blockData.setBlockCommitSequenceId(3); - blockManager.putBlock(container, blockData); - chunkList.clear(); - - // write a 2nd block - BlockID blockID2 = ContainerTestHelper.getTestBlockID(testContainerID); - info = writeChunkHelper(blockID2); - blockData = new BlockData(blockID2); - chunkList.add(info.getProtoBufMessage()); - blockData.setChunks(chunkList); - blockData.setBlockCommitSequenceId(4); - blockManager.putBlock(container, blockData); - BlockData readBlockData; - try { - blockID1.setBlockCommitSequenceId(5); - // read with bcsId higher than container bcsId - blockManager. - getBlock(container, blockID1); - Assert.fail("Expected exception not thrown"); - } catch (StorageContainerException sce) { - Assert.assertTrue(sce.getResult() == UNKNOWN_BCSID); - } - - try { - blockID1.setBlockCommitSequenceId(4); - // read with bcsId lower than container bcsId but greater than committed - // bcsId. - blockManager. - getBlock(container, blockID1); - Assert.fail("Expected exception not thrown"); - } catch (StorageContainerException sce) { - Assert.assertTrue(sce.getResult() == BCSID_MISMATCH); - } - readBlockData = blockManager. - getBlock(container, blockData.getBlockID()); - ChunkInfo readChunk = - ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(0)); - Assert.assertEquals(info.getChecksumData(), readChunk.getChecksumData()); - } - - /** - * Tests a put block and read block. - * - * @throws IOException - * @throws NoSuchAlgorithmException - */ - @Test - public void testPutBlockWithLotsOfChunks() throws IOException, - NoSuchAlgorithmException { - final int chunkCount = 2; - final int datalen = 1024; - long totalSize = 0L; - long testContainerID = getTestContainerID(); - Container container = addContainer(containerSet, testContainerID); - BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID); - List chunkList = new LinkedList<>(); - ChunkInfo info = writeChunkHelper(blockID); - totalSize += datalen; - chunkList.add(info); - for (int x = 1; x < chunkCount; x++) { - // with holes in the front (before x * datalen) - info = getChunk(blockID.getLocalID(), x, x * datalen, datalen); - ByteBuffer data = getData(datalen); - setDataChecksum(info, data); - chunkManager.writeChunk(container, blockID, info, data, - getDispatcherContext()); - totalSize += datalen; - chunkList.add(info); - } - - long bytesUsed = container.getContainerData().getBytesUsed(); - Assert.assertEquals(totalSize, bytesUsed); - long writeBytes = container.getContainerData().getWriteBytes(); - Assert.assertEquals(chunkCount * datalen, writeBytes); - long readCount = container.getContainerData().getReadCount(); - Assert.assertEquals(0, readCount); - long writeCount = container.getContainerData().getWriteCount(); - Assert.assertEquals(chunkCount, writeCount); - - BlockData blockData = new BlockData(blockID); - List chunkProtoList = new LinkedList<>(); - for (ChunkInfo i : chunkList) { - chunkProtoList.add(i.getProtoBufMessage()); - } - blockData.setChunks(chunkProtoList); - blockManager.putBlock(container, blockData); - BlockData readBlockData = blockManager. - getBlock(container, blockData.getBlockID()); - ChunkInfo lastChunk = chunkList.get(chunkList.size() - 1); - ChunkInfo readChunk = - ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(readBlockData - .getChunks().size() - 1)); - Assert.assertEquals( - lastChunk.getChecksumData(), readChunk.getChecksumData()); - } - - /** - * Deletes a block and tries to read it back. - * - * @throws IOException - * @throws NoSuchAlgorithmException - */ - @Test - public void testDeleteBlock() throws IOException, NoSuchAlgorithmException { - long testContainerID = getTestContainerID(); - Container container = addContainer(containerSet, testContainerID); - BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID); - ChunkInfo info = writeChunkHelper(blockID); - BlockData blockData = new BlockData(blockID); - List chunkList = new LinkedList<>(); - chunkList.add(info.getProtoBufMessage()); - blockData.setChunks(chunkList); - blockManager.putBlock(container, blockData); - blockManager.deleteBlock(container, blockID); - exception.expect(StorageContainerException.class); - exception.expectMessage("Unable to find the block."); - blockManager.getBlock(container, blockData.getBlockID()); - } - - /** - * Tries to Deletes a block twice. - * - * @throws IOException - * @throws NoSuchAlgorithmException - */ - @Test - public void testDeleteBlockTwice() throws IOException, - NoSuchAlgorithmException { - long testContainerID = getTestContainerID(); - Container container = addContainer(containerSet, testContainerID); - BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID); - ChunkInfo info = writeChunkHelper(blockID); - BlockData blockData = new BlockData(blockID); - List chunkList = new LinkedList<>(); - chunkList.add(info.getProtoBufMessage()); - blockData.setChunks(chunkList); - blockManager.putBlock(container, blockData); - blockManager.deleteBlock(container, blockID); - exception.expect(StorageContainerException.class); - exception.expectMessage("Unable to find the block."); - blockManager.deleteBlock(container, blockID); - } - - /** - * Tries to update an existing and non-existing container. Verifies container - * map and persistent data both updated. - * - * @throws IOException - */ - @Test - public void testUpdateContainer() throws IOException { - long testContainerID = ContainerTestHelper.getTestContainerID(); - KeyValueContainer container = - (KeyValueContainer) addContainer(containerSet, testContainerID); - - File orgContainerFile = container.getContainerFile(); - Assert.assertTrue(orgContainerFile.exists()); - - Map newMetadata = Maps.newHashMap(); - newMetadata.put("VOLUME", "shire_new"); - newMetadata.put("owner", "bilbo_new"); - - container.update(newMetadata, false); - - Assert.assertEquals(1, containerSet.getContainerMapCopy().size()); - Assert.assertTrue(containerSet.getContainerMapCopy() - .containsKey(testContainerID)); - - // Verify in-memory map - KeyValueContainerData actualNewData = (KeyValueContainerData) - containerSet.getContainer(testContainerID).getContainerData(); - Assert.assertEquals("shire_new", - actualNewData.getMetadata().get("VOLUME")); - Assert.assertEquals("bilbo_new", - actualNewData.getMetadata().get("owner")); - - // Verify container data on disk - File containerBaseDir = new File(actualNewData.getMetadataPath()) - .getParentFile(); - File newContainerFile = ContainerUtils.getContainerFile(containerBaseDir); - Assert.assertTrue("Container file should exist.", - newContainerFile.exists()); - Assert.assertEquals("Container file should be in same location.", - orgContainerFile.getAbsolutePath(), - newContainerFile.getAbsolutePath()); - - ContainerData actualContainerData = ContainerDataYaml.readContainerFile( - newContainerFile); - Assert.assertEquals("shire_new", - actualContainerData.getMetadata().get("VOLUME")); - Assert.assertEquals("bilbo_new", - actualContainerData.getMetadata().get("owner")); - - - // Test force update flag. - // Close the container and then try to update without force update flag. - container.close(); - try { - container.update(newMetadata, false); - } catch (StorageContainerException ex) { - Assert.assertEquals("Updating a closed container without " + - "force option is not allowed. ContainerID: " + - testContainerID, ex.getMessage()); - } - - // Update with force flag, it should be success. - newMetadata.put("VOLUME", "shire_new_1"); - newMetadata.put("owner", "bilbo_new_1"); - container.update(newMetadata, true); - - // Verify in-memory map - actualNewData = (KeyValueContainerData) - containerSet.getContainer(testContainerID).getContainerData(); - Assert.assertEquals("shire_new_1", - actualNewData.getMetadata().get("VOLUME")); - Assert.assertEquals("bilbo_new_1", - actualNewData.getMetadata().get("owner")); - - } - - private BlockData writeBlockHelper(BlockID blockID, int i) - throws IOException, NoSuchAlgorithmException { - ChunkInfo info = writeChunkHelper(blockID); - BlockData blockData = new BlockData(blockID); - blockData.setBlockCommitSequenceId((long) i); - List chunkList = new LinkedList<>(); - chunkList.add(info.getProtoBufMessage()); - blockData.setChunks(chunkList); - return blockData; - } - - @Test - public void testListBlock() throws Exception { - long testContainerID = getTestContainerID(); - Container container = addContainer(containerSet, testContainerID); - List expectedBlocks = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - BlockID blockID = new BlockID(testContainerID, i); - expectedBlocks.add(blockID); - BlockData kd = writeBlockHelper(blockID, i); - blockManager.putBlock(container, kd); - } - - // List all blocks - List result = blockManager.listBlock( - container, 0, 100); - Assert.assertEquals(10, result.size()); - - int index = 0; - for (int i = index; i < result.size(); i++) { - BlockData data = result.get(i); - Assert.assertEquals(testContainerID, data.getContainerID()); - Assert.assertEquals(expectedBlocks.get(i).getLocalID(), - data.getLocalID()); - index++; - } - - // List block with startBlock filter - long k6 = expectedBlocks.get(6).getLocalID(); - result = blockManager.listBlock(container, k6, 100); - - Assert.assertEquals(4, result.size()); - for (int i = 6; i < 10; i++) { - Assert.assertEquals(expectedBlocks.get(i).getLocalID(), - result.get(i - 6).getLocalID()); - } - - // Count must be >0 - exception.expect(IllegalArgumentException.class); - exception.expectMessage("Count must be a positive number."); - blockManager.listBlock(container, 0, -1); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java deleted file mode 100644 index c7b799264be..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ /dev/null @@ -1,350 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import com.google.common.primitives.Longs; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl; -import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.OzoneTestUtils; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.event.Level; - -import java.io.File; -import java.io.IOException; -import java.util.HashMap; -import java.util.Set; -import java.util.List; -import java.util.HashSet; -import java.util.ArrayList; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static java.lang.Math.max; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; -import static org.apache.hadoop.hdds - .HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.hadoop.ozone - .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; - -/** - * Tests for Block deletion. - */ -public class TestBlockDeletion { - private static OzoneConfiguration conf = null; - private static ObjectStore store; - private static MiniOzoneCluster cluster = null; - private static StorageContainerManager scm = null; - private static OzoneManager om = null; - private static Set containerIdsWithDeletedBlocks; - private static long maxTransactionId = 0; - - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - GenericTestUtils.setLogLevel(DeletedBlockLogImpl.LOG, Level.DEBUG); - GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG); - - String path = - GenericTestUtils.getTempPath(TestBlockDeletion.class.getSimpleName()); - File baseDir = new File(path); - baseDir.mkdirs(); - - conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, - 3, TimeUnit.SECONDS); - conf.setQuietMode(false); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1) - .setHbInterval(200) - .build(); - cluster.waitForClusterToBeReady(); - store = OzoneClientFactory.getRpcClient(conf).getObjectStore(); - om = cluster.getOzoneManager(); - scm = cluster.getStorageContainerManager(); - containerIdsWithDeletedBlocks = new HashSet<>(); - } - - @AfterClass - public static void cleanup() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testBlockDeletion() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - String value = RandomStringUtils.random(10000000); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - String keyName = UUID.randomUUID().toString(); - - OzoneOutputStream out = bucket.createKey(keyName, value.getBytes().length, - ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); - for (int i = 0; i < 100; i++) { - out.write(value.getBytes()); - } - out.close(); - - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setKeyName(keyName).setDataSize(0) - .setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.ONE) - .setRefreshPipeline(true) - .build(); - List omKeyLocationInfoGroupList = - om.lookupKey(keyArgs).getKeyLocationVersions(); - - // verify key blocks were created in DN. - verifyBlocksCreated(omKeyLocationInfoGroupList); - // No containers with deleted blocks - Assert.assertTrue(containerIdsWithDeletedBlocks.isEmpty()); - // Delete transactionIds for the containers should be 0. - // NOTE: this test assumes that all the container is KetValueContainer. If - // other container types is going to be added, this test should be checked. - matchContainerTransactionIds(); - om.deleteKey(keyArgs); - Thread.sleep(5000); - // The blocks should not be deleted in the DN as the container is open - try { - verifyBlocksDeleted(omKeyLocationInfoGroupList); - Assert.fail("Blocks should not have been deleted"); - } catch (Throwable e) { - Assert.assertTrue(e.getMessage().contains("expected null, but was")); - Assert.assertEquals(e.getClass(), AssertionError.class); - } - - // close the containers which hold the blocks for the key - OzoneTestUtils.closeContainers(omKeyLocationInfoGroupList, scm); - - waitForDatanodeCommandRetry(); - - // make sure the containers are closed on the dn - omKeyLocationInfoGroupList.forEach((group) -> { - List locationInfo = group.getLocationList(); - locationInfo.forEach( - (info) -> cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet() - .getContainer(info.getContainerID()).getContainerData() - .setState(ContainerProtos.ContainerDataProto.State.CLOSED)); - }); - waitForDatanodeBlockDeletionStart(); - // The blocks should be deleted in the DN. - verifyBlocksDeleted(omKeyLocationInfoGroupList); - - // Few containers with deleted blocks - Assert.assertTrue(!containerIdsWithDeletedBlocks.isEmpty()); - // Containers in the DN and SCM should have same delete transactionIds - matchContainerTransactionIds(); - // Containers in the DN and SCM should have same delete transactionIds - // after DN restart. The assertion is just to verify that the state of - // containerInfos in dn and scm is consistent after dn restart. - cluster.restartHddsDatanode(0, true); - matchContainerTransactionIds(); - - // verify PENDING_DELETE_STATUS event is fired - verifyPendingDeleteEvent(); - - // Verify transactions committed - verifyTransactionsCommitted(); - } - - private void waitForDatanodeBlockDeletionStart() - throws TimeoutException, InterruptedException { - LogCapturer logCapturer = - LogCapturer.captureLogs(DeleteBlocksCommandHandler.LOG); - logCapturer.clearOutput(); - GenericTestUtils.waitFor(() -> logCapturer.getOutput() - .contains("Start to delete container block"), - 500, 10000); - Thread.sleep(1000); - } - - /** - * Waits for datanode command to be retried when datanode is dead. - */ - private void waitForDatanodeCommandRetry() - throws TimeoutException, InterruptedException { - cluster.shutdownHddsDatanode(0); - LogCapturer logCapturer = - LogCapturer.captureLogs(RetriableDatanodeEventWatcher.LOG); - logCapturer.clearOutput(); - GenericTestUtils.waitFor(() -> logCapturer.getOutput() - .contains("RetriableDatanodeCommand type=deleteBlocksCommand"), - 500, 5000); - cluster.restartHddsDatanode(0, true); - } - - private void verifyTransactionsCommitted() throws IOException { - DeletedBlockLogImpl deletedBlockLog = - (DeletedBlockLogImpl) scm.getScmBlockManager().getDeletedBlockLog(); - for (long txnID = 1; txnID <= maxTransactionId; txnID++) { - Assert.assertNull( - scm.getScmMetadataStore().getDeletedBlocksTXTable().get(txnID)); - } - } - - private void verifyPendingDeleteEvent() - throws IOException, InterruptedException { - ContainerSet dnContainerSet = - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet(); - LogCapturer logCapturer = - LogCapturer.captureLogs(SCMBlockDeletingService.LOG); - // Create dummy container reports with deleteTransactionId set as 0 - ContainerReportsProto containerReport = dnContainerSet.getContainerReport(); - ContainerReportsProto.Builder dummyReportsBuilder = - ContainerReportsProto.newBuilder(); - for (ContainerReplicaProto containerInfo : - containerReport.getReportsList()) { - dummyReportsBuilder.addReports( - ContainerReplicaProto.newBuilder(containerInfo) - .setDeleteTransactionId(0) - .build()); - } - ContainerReportsProto dummyReport = dummyReportsBuilder.build(); - - logCapturer.clearOutput(); - cluster.getHddsDatanodes().get(0) - .getDatanodeStateMachine().getContext().addReport(dummyReport); - cluster.getHddsDatanodes().get(0) - .getDatanodeStateMachine().triggerHeartbeat(); - // wait for event to be handled by event handler - Thread.sleep(1000); - String output = logCapturer.getOutput(); - for (ContainerReplicaProto containerInfo : dummyReport.getReportsList()) { - long containerId = containerInfo.getContainerID(); - // Event should be triggered only for containers which have deleted blocks - if (containerIdsWithDeletedBlocks.contains(containerId)) { - Assert.assertTrue(output.contains( - "for containerID " + containerId + ". Datanode delete txnID")); - } else { - Assert.assertTrue(!output.contains( - "for containerID " + containerId + ". Datanode delete txnID")); - } - } - logCapturer.clearOutput(); - } - - private void matchContainerTransactionIds() throws IOException { - ContainerSet dnContainerSet = - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet(); - List containerDataList = new ArrayList<>(); - dnContainerSet.listContainer(0, 10000, containerDataList); - for (ContainerData containerData : containerDataList) { - long containerId = containerData.getContainerID(); - if (containerIdsWithDeletedBlocks.contains(containerId)) { - Assert.assertTrue( - scm.getContainerInfo(containerId).getDeleteTransactionId() > 0); - maxTransactionId = max(maxTransactionId, - scm.getContainerInfo(containerId).getDeleteTransactionId()); - } else { - Assert.assertEquals( - scm.getContainerInfo(containerId).getDeleteTransactionId(), 0); - } - Assert.assertEquals(((KeyValueContainerData)dnContainerSet - .getContainer(containerId).getContainerData()) - .getDeleteTransactionId(), - scm.getContainerInfo(containerId).getDeleteTransactionId()); - } - } - - private void verifyBlocksCreated( - List omKeyLocationInfoGroups) throws Exception { - ContainerSet dnContainerSet = - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet(); - OzoneTestUtils.performOperationOnKeyContainers((blockID) -> { - try(ReferenceCountedDB db = - BlockUtils.getDB((KeyValueContainerData) dnContainerSet - .getContainer(blockID.getContainerID()).getContainerData(), conf)) { - Assert.assertNotNull(db.getStore().get( - Longs.toByteArray(blockID.getLocalID()))); - } - }, omKeyLocationInfoGroups); - } - - private void verifyBlocksDeleted( - List omKeyLocationInfoGroups) throws Exception { - ContainerSet dnContainerSet = - cluster.getHddsDatanodes().get(0).getDatanodeStateMachine() - .getContainer().getContainerSet(); - OzoneTestUtils.performOperationOnKeyContainers((blockID) -> { - try(ReferenceCountedDB db = - BlockUtils.getDB((KeyValueContainerData) dnContainerSet - .getContainer(blockID.getContainerID()).getContainerData(), conf)) { - Assert.assertNull(db.getStore().get( - Longs.toByteArray(blockID.getLocalID()))); - Assert.assertNull(db.getStore().get(DFSUtil.string2Bytes( - OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID()))); - Assert.assertNotNull(DFSUtil.string2Bytes( - OzoneConsts.DELETED_KEY_PREFIX + blockID.getLocalID())); - } - containerIdsWithDeletedBlocks.add(blockID.getContainerID()); - }, omKeyLocationInfoGroups); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java deleted file mode 100644 index b676e1c9676..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ /dev/null @@ -1,354 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.concurrent.TimeoutException; - -/** - * Test container closing. - */ -public class TestCloseContainerByPipeline { - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static OzoneClient client; - private static ObjectStore objectStore; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, "1"); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10) - .build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - objectStore.createVolume("test"); - objectStore.getVolume("test").createBucket("test"); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception { - String keyName = "testIfCloseContainerCommandHandlerIsInvoked"; - OzoneOutputStream key = objectStore.getVolume("test").getBucket("test") - .createKey(keyName, 1024, ReplicationType.RATIS, ReplicationFactor.ONE, - new HashMap<>()); - key.write(keyName.getBytes()); - key.close(); - - //get the name of a valid container - OmKeyArgs keyArgs = - new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test") - .setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024) - .setKeyName(keyName).setRefreshPipeline(true).build(); - OmKeyLocationInfo omKeyLocationInfo = - cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() - .get(0).getBlocksLatestVersionOnly().get(0); - - long containerID = omKeyLocationInfo.getContainerID(); - ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(ContainerID.valueof(containerID)); - Pipeline pipeline = cluster.getStorageContainerManager() - .getPipelineManager().getPipeline(container.getPipelineID()); - List datanodes = pipeline.getNodes(); - Assert.assertEquals(datanodes.size(), 1); - - DatanodeDetails datanodeDetails = datanodes.get(0); - HddsDatanodeService datanodeService = null; - Assert - .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails)); - for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) { - if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) { - datanodeService = datanodeServiceItr; - break; - } - } - CommandHandler closeContainerHandler = - datanodeService.getDatanodeStateMachine().getCommandDispatcher() - .getCloseContainerHandler(); - int lastInvocationCount = closeContainerHandler.getInvocationCount(); - //send the order to close the container - cluster.getStorageContainerManager().getScmNodeManager() - .addDatanodeCommand(datanodeDetails.getUuid(), - new CloseContainerCommand(containerID, pipeline.getId())); - GenericTestUtils - .waitFor(() -> isContainerClosed(cluster, containerID, datanodeDetails), - 500, 5 * 1000); - // Make sure the closeContainerCommandHandler is Invoked - Assert.assertTrue( - closeContainerHandler.getInvocationCount() > lastInvocationCount); - } - - @Test - public void testCloseContainerViaStandAlone() - throws IOException, TimeoutException, InterruptedException { - - OzoneOutputStream key = objectStore.getVolume("test").getBucket("test") - .createKey("standalone", 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); - key.write("standalone".getBytes()); - key.close(); - - //get the name of a valid container - OmKeyArgs keyArgs = - new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test") - .setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024) - .setKeyName("standalone") - .setRefreshPipeline(true) - .build(); - - OmKeyLocationInfo omKeyLocationInfo = - cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() - .get(0).getBlocksLatestVersionOnly().get(0); - - long containerID = omKeyLocationInfo.getContainerID(); - ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(ContainerID.valueof(containerID)); - Pipeline pipeline = cluster.getStorageContainerManager() - .getPipelineManager().getPipeline(container.getPipelineID()); - List datanodes = pipeline.getNodes(); - Assert.assertEquals(datanodes.size(), 1); - - DatanodeDetails datanodeDetails = datanodes.get(0); - Assert - .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails)); - - // Send the order to close the container, give random pipeline id so that - // the container will not be closed via RATIS - cluster.getStorageContainerManager().getScmNodeManager() - .addDatanodeCommand(datanodeDetails.getUuid(), - new CloseContainerCommand(containerID, pipeline.getId())); - - //double check if it's really closed (waitFor also throws an exception) - // TODO: change the below line after implementing QUASI_CLOSED to CLOSED - // logic. The container will be QUASI closed as of now - GenericTestUtils - .waitFor(() -> isContainerClosed(cluster, containerID, datanodeDetails), - 500, 5 * 1000); - Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails)); - - cluster.getStorageContainerManager().getPipelineManager() - .finalizeAndDestroyPipeline(pipeline, false); - Thread.sleep(5000); - // Pipeline close should not affect a container in CLOSED state - Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails)); - } - - @Test - public void testCloseContainerViaRatis() throws IOException, - TimeoutException, InterruptedException { - - OzoneOutputStream key = objectStore.getVolume("test").getBucket("test") - .createKey("ratis", 1024, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>()); - key.write("ratis".getBytes()); - key.close(); - - //get the name of a valid container - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName("test"). - setBucketName("test").setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.THREE).setDataSize(1024) - .setKeyName("ratis").setRefreshPipeline(true).build(); - - OmKeyLocationInfo omKeyLocationInfo = - cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() - .get(0).getBlocksLatestVersionOnly().get(0); - - long containerID = omKeyLocationInfo.getContainerID(); - ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(ContainerID.valueof(containerID)); - Pipeline pipeline = cluster.getStorageContainerManager() - .getPipelineManager().getPipeline(container.getPipelineID()); - List datanodes = pipeline.getNodes(); - Assert.assertEquals(3, datanodes.size()); - - List metadataStores = new ArrayList<>(datanodes.size()); - for (DatanodeDetails details : datanodes) { - Assert.assertFalse(isContainerClosed(cluster, containerID, details)); - //send the order to close the container - cluster.getStorageContainerManager().getScmNodeManager() - .addDatanodeCommand(details.getUuid(), - new CloseContainerCommand(containerID, pipeline.getId())); - int index = cluster.getHddsDatanodeIndex(details); - Container dnContainer = cluster.getHddsDatanodes().get(index) - .getDatanodeStateMachine().getContainer().getContainerSet() - .getContainer(containerID); - try(ReferenceCountedDB store = BlockUtils.getDB( - (KeyValueContainerData) dnContainer.getContainerData(), conf)) { - metadataStores.add(store); - } - } - - // There should be as many rocks db as the number of datanodes in pipeline. - Assert.assertEquals(datanodes.size(), - metadataStores.stream().distinct().count()); - - // Make sure that it is CLOSED - for (DatanodeDetails datanodeDetails : datanodes) { - GenericTestUtils.waitFor( - () -> isContainerClosed(cluster, containerID, datanodeDetails), 500, - 15 * 1000); - //double check if it's really closed (waitFor also throws an exception) - Assert.assertTrue(isContainerClosed(cluster, - containerID, datanodeDetails)); - } - } - - @Test - public void testQuasiCloseTransitionViaRatis() - throws IOException, TimeoutException, InterruptedException { - - String keyName = "testQuasiCloseTransitionViaRatis"; - OzoneOutputStream key = objectStore.getVolume("test").getBucket("test") - .createKey(keyName, 1024, ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); - key.write(keyName.getBytes()); - key.close(); - - OmKeyArgs keyArgs = - new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test") - .setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024) - .setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - - OmKeyLocationInfo omKeyLocationInfo = - cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() - .get(0).getBlocksLatestVersionOnly().get(0); - - long containerID = omKeyLocationInfo.getContainerID(); - ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(ContainerID.valueof(containerID)); - Pipeline pipeline = cluster.getStorageContainerManager() - .getPipelineManager().getPipeline(container.getPipelineID()); - List datanodes = pipeline.getNodes(); - Assert.assertEquals(datanodes.size(), 1); - - DatanodeDetails datanodeDetails = datanodes.get(0); - Assert - .assertFalse(isContainerClosed(cluster, containerID, datanodeDetails)); - - // close the pipeline - cluster.getStorageContainerManager() - .getPipelineManager().finalizeAndDestroyPipeline(pipeline, false); - - // All the containers in OPEN or CLOSING state should transition to - // QUASI-CLOSED after pipeline close - GenericTestUtils.waitFor( - () -> isContainerQuasiClosed(cluster, containerID, datanodeDetails), - 500, 5 * 1000); - Assert.assertTrue( - isContainerQuasiClosed(cluster, containerID, datanodeDetails)); - - // Send close container command from SCM to datanode with forced flag as - // true - cluster.getStorageContainerManager().getScmNodeManager() - .addDatanodeCommand(datanodeDetails.getUuid(), - new CloseContainerCommand(containerID, pipeline.getId(), true)); - GenericTestUtils - .waitFor(() -> isContainerClosed( - cluster, containerID, datanodeDetails), 500, 5 * 1000); - Assert.assertTrue( - isContainerClosed(cluster, containerID, datanodeDetails)); - } - - private Boolean isContainerClosed(MiniOzoneCluster ozoneCluster, - long containerID, - DatanodeDetails datanode) { - ContainerData containerData; - for (HddsDatanodeService datanodeService : ozoneCluster - .getHddsDatanodes()) { - if (datanode.equals(datanodeService.getDatanodeDetails())) { - containerData = - datanodeService.getDatanodeStateMachine().getContainer() - .getContainerSet().getContainer(containerID).getContainerData(); - return containerData.isClosed(); - } - } - return false; - } - - private Boolean isContainerQuasiClosed(MiniOzoneCluster miniCluster, - long containerID, DatanodeDetails datanode) { - ContainerData containerData; - for (HddsDatanodeService datanodeService : miniCluster.getHddsDatanodes()) { - if (datanode.equals(datanodeService.getDatanodeDetails())) { - containerData = - datanodeService.getDatanodeStateMachine().getContainer() - .getContainerSet().getContainer(containerID).getContainerData(); - return containerData.isQuasiClosed(); - } - } - return false; - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java deleted file mode 100644 index 5c7f2c1a9fe..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import java.util.HashMap; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.test.GenericTestUtils; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -/** - * Test to behaviour of the datanode when receive close container command. - */ -public class TestCloseContainerHandler { - - private MiniOzoneCluster cluster; - private OzoneConfiguration conf; - - @Before - public void setup() throws Exception { - //setup a cluster (1G free space is enough for a unit test) - conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB"); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1).build(); - } - - @After - public void teardown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void test() throws Exception { - cluster.waitForClusterToBeReady(); - - //the easiest way to create an open container is creating a key - OzoneClient client = OzoneClientFactory.getClient(conf); - ObjectStore objectStore = client.getObjectStore(); - objectStore.createVolume("test"); - objectStore.getVolume("test").createBucket("test"); - OzoneOutputStream key = objectStore.getVolume("test").getBucket("test") - .createKey("test", 1024, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); - key.write("test".getBytes()); - key.close(); - - //get the name of a valid container - OmKeyArgs keyArgs = - new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test") - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024) - .setKeyName("test") - .setRefreshPipeline(true) - .build(); - - OmKeyLocationInfo omKeyLocationInfo = - cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() - .get(0).getBlocksLatestVersionOnly().get(0); - - ContainerID containerId = ContainerID.valueof( - omKeyLocationInfo.getContainerID()); - ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(containerId); - Pipeline pipeline = cluster.getStorageContainerManager() - .getPipelineManager().getPipeline(container.getPipelineID()); - - Assert.assertFalse(isContainerClosed(cluster, containerId.getId())); - - DatanodeDetails datanodeDetails = - cluster.getHddsDatanodes().get(0).getDatanodeDetails(); - //send the order to close the container - cluster.getStorageContainerManager().getScmNodeManager() - .addDatanodeCommand(datanodeDetails.getUuid(), - new CloseContainerCommand(containerId.getId(), pipeline.getId())); - - GenericTestUtils.waitFor(() -> - isContainerClosed(cluster, containerId.getId()), - 500, - 5 * 1000); - - //double check if it's really closed (waitFor also throws an exception) - Assert.assertTrue(isContainerClosed(cluster, containerId.getId())); - } - - private static Boolean isContainerClosed(MiniOzoneCluster cluster, - long containerID) { - ContainerData containerData; - containerData = cluster.getHddsDatanodes().get(0) - .getDatanodeStateMachine().getContainer().getContainerSet() - .getContainer(containerID).getContainerData(); - return !containerData.isOpen(); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java deleted file mode 100644 index 1cbf69ef6d1..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ /dev/null @@ -1,279 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.IOException; -import java.util.HashMap; -import java.util.UUID; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; - -/** - * Tests DeleteContainerCommand Handler. - */ -public class TestDeleteContainerHandler { - - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static ObjectStore objectStore; - private static String volumeName = UUID.randomUUID().toString(); - private static String bucketName = UUID.randomUUID().toString(); - - @BeforeClass - public static void setup() throws Exception { - conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB"); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1).build(); - cluster.waitForClusterToBeReady(); - - OzoneClient client = OzoneClientFactory.getClient(conf); - objectStore = client.getObjectStore(); - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - } - - @AfterClass - public static void shutdown() { - if (cluster != null) { - try { - cluster.shutdown(); - } catch (Exception e) { - // do nothing. - } - } - } - - @Test(timeout = 60000) - public void testDeleteContainerRequestHandlerOnClosedContainer() - throws Exception { - - //the easiest way to create an open container is creating a key - - String keyName = UUID.randomUUID().toString(); - - // create key - createKey(keyName); - - // get containerID of the key - ContainerID containerId = getContainerID(keyName); - - ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(containerId); - - Pipeline pipeline = cluster.getStorageContainerManager() - .getPipelineManager().getPipeline(container.getPipelineID()); - - // We need to close the container because delete container only happens - // on closed containers with force flag set to false. - - HddsDatanodeService hddsDatanodeService = - cluster.getHddsDatanodes().get(0); - - Assert.assertFalse(isContainerClosed(hddsDatanodeService, - containerId.getId())); - - DatanodeDetails datanodeDetails = hddsDatanodeService.getDatanodeDetails(); - - NodeManager nodeManager = - cluster.getStorageContainerManager().getScmNodeManager(); - - //send the order to close the container - nodeManager.addDatanodeCommand(datanodeDetails.getUuid(), - new CloseContainerCommand(containerId.getId(), pipeline.getId())); - - GenericTestUtils.waitFor(() -> - isContainerClosed(hddsDatanodeService, containerId.getId()), - 500, 5 * 1000); - - //double check if it's really closed (waitFor also throws an exception) - Assert.assertTrue(isContainerClosed(hddsDatanodeService, - containerId.getId())); - - // Check container exists before sending delete container command - Assert.assertFalse(isContainerDeleted(hddsDatanodeService, - containerId.getId())); - - // send delete container to the datanode - nodeManager.addDatanodeCommand(datanodeDetails.getUuid(), - new DeleteContainerCommand(containerId.getId(), false)); - - GenericTestUtils.waitFor(() -> - isContainerDeleted(hddsDatanodeService, containerId.getId()), - 500, 5 * 1000); - - Assert.assertTrue(isContainerDeleted(hddsDatanodeService, - containerId.getId())); - - } - - - @Test - public void testDeleteContainerRequestHandlerOnOpenContainer() - throws Exception { - - //the easiest way to create an open container is creating a key - String keyName = UUID.randomUUID().toString(); - - // create key - createKey(keyName); - - // get containerID of the key - ContainerID containerId = getContainerID(keyName); - - HddsDatanodeService hddsDatanodeService = - cluster.getHddsDatanodes().get(0); - DatanodeDetails datanodeDetails = - hddsDatanodeService.getDatanodeDetails(); - - NodeManager nodeManager = - cluster.getStorageContainerManager().getScmNodeManager(); - - // Send delete container command with force flag set to false. - nodeManager.addDatanodeCommand(datanodeDetails.getUuid(), - new DeleteContainerCommand(containerId.getId(), false)); - - // Here it should not delete it, and the container should exist in the - // containerset - int count = 1; - // Checking for 5 seconds, whether it is containerSet, as after command - // is issued, giving some time for it to process. - while (!isContainerDeleted(hddsDatanodeService, containerId.getId())) { - Thread.sleep(1000); - count++; - if (count == 5) { - break; - } - } - - Assert.assertFalse(isContainerDeleted(hddsDatanodeService, - containerId.getId())); - - - // Now delete container with force flag set to true. now it should delete - // container - - nodeManager.addDatanodeCommand(datanodeDetails.getUuid(), - new DeleteContainerCommand(containerId.getId(), true)); - - GenericTestUtils.waitFor(() -> - isContainerDeleted(hddsDatanodeService, containerId.getId()), - 500, 5 * 1000); - - Assert.assertTrue(isContainerDeleted(hddsDatanodeService, - containerId.getId())); - - } - - /** - * create a key with specified name. - * @param keyName - * @throws IOException - */ - private void createKey(String keyName) throws IOException { - OzoneOutputStream key = objectStore.getVolume(volumeName) - .getBucket(bucketName) - .createKey(keyName, 1024, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); - key.write("test".getBytes()); - key.close(); - } - - /** - * Return containerID of the key. - * @param keyName - * @return ContainerID - * @throws IOException - */ - private ContainerID getContainerID(String keyName) throws IOException { - OmKeyArgs keyArgs = - new OmKeyArgs.Builder().setVolumeName(volumeName) - .setBucketName(bucketName) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(HddsProtos.ReplicationFactor.ONE) - .setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - - OmKeyLocationInfo omKeyLocationInfo = - cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() - .get(0).getBlocksLatestVersionOnly().get(0); - - return ContainerID.valueof( - omKeyLocationInfo.getContainerID()); - } - - /** - * Checks whether is closed or not on a datanode. - * @param hddsDatanodeService - * @param containerID - * @return true - if container is closes, else returns false. - */ - private Boolean isContainerClosed(HddsDatanodeService hddsDatanodeService, - long containerID) { - ContainerData containerData; - containerData =hddsDatanodeService - .getDatanodeStateMachine().getContainer().getContainerSet() - .getContainer(containerID).getContainerData(); - return !containerData.isOpen(); - } - - /** - * Checks whether container is deleted from the datanode or not. - * @param hddsDatanodeService - * @param containerID - * @return true - if container is deleted, else returns false - */ - private Boolean isContainerDeleted(HddsDatanodeService hddsDatanodeService, - long containerID) { - Container container; - // if container is not in container set, it means container got deleted. - container = hddsDatanodeService - .getDatanodeStateMachine().getContainer().getContainerSet() - .getContainer(containerID); - return container == null; - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java deleted file mode 100644 index 67bdc177b17..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Integration tests for the command handler's. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java deleted file mode 100644 index 3967c0ccf72..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ /dev/null @@ -1,237 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License - */ - -package org.apache.hadoop.ozone.container.common.transport.server.ratis; - -import static org.apache.hadoop.test.MetricsAsserts.assertCounter; -import static org.apache.hadoop.test.MetricsAsserts.getDoubleGauge; -import static org.apache.hadoop.test.MetricsAsserts.getMetrics; - -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.ArrayList; - -import com.google.common.collect.Maps; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.scm.*; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.RatisTestHelper; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.transport.server - .XceiverServerSpi; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -import static org.apache.ratis.rpc.SupportedRpcType.GRPC; -import static org.junit.Assert.assertTrue; - -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.util.function.CheckedBiConsumer; - -import java.util.Map; -import java.util.function.BiConsumer; - -import org.junit.Test; -import org.junit.Assert; - -/** - * This class tests the metrics of ContainerStateMachine. - */ -public class TestCSMMetrics { - static final String TEST_DIR = - GenericTestUtils.getTestDir("dfs").getAbsolutePath() - + File.separator; - @FunctionalInterface - interface CheckedBiFunction { - OUT apply(LEFT left, RIGHT right) throws THROWABLE; - } - - @Test - public void testContainerStateMachineMetrics() throws Exception { - runContainerStateMachineMetrics(1, - (pipeline, conf) -> RatisTestHelper.initRatisConf(GRPC, conf), - XceiverClientRatis::newXceiverClientRatis, - TestCSMMetrics::newXceiverServerRatis, - (dn, p) -> RatisTestHelper.initXceiverServerRatis(GRPC, dn, p)); - } - - static void runContainerStateMachineMetrics( - int numDatanodes, - BiConsumer initConf, - TestCSMMetrics.CheckedBiFunction createClient, - TestCSMMetrics.CheckedBiFunction createServer, - CheckedBiConsumer initServer) - throws Exception { - final List servers = new ArrayList<>(); - XceiverClientSpi client = null; - String containerName = OzoneUtils.getRequestID(); - try { - final Pipeline pipeline = ContainerTestHelper.createPipeline( - numDatanodes); - final OzoneConfiguration conf = new OzoneConfiguration(); - initConf.accept(pipeline, conf); - - for (DatanodeDetails dn : pipeline.getNodes()) { - final XceiverServerSpi s = createServer.apply(dn, conf); - servers.add(s); - s.start(); - initServer.accept(dn, pipeline); - } - - client = createClient.apply(pipeline, conf); - client.connect(); - - // Before Read Chunk/Write Chunk - MetricsRecordBuilder metric = getMetrics(CSMMetrics.SOURCE_NAME + - RaftGroupId.valueOf(pipeline.getId().getId()).toString()); - assertCounter("NumWriteStateMachineOps", 0L, metric); - assertCounter("NumReadStateMachineOps", 0L, metric); - assertCounter("NumApplyTransactionOps", 0L, metric); - assertCounter("NumBytesWrittenCount", 0L, metric); - assertCounter("NumBytesCommittedCount", 0L, metric); - assertCounter("NumStartTransactionVerifyFailures", 0L, metric); - assertCounter("NumContainerNotOpenVerifyFailures", 0L, metric); - assertCounter("WriteChunkNumOps", 0L, metric); - double applyTransactionLatency = getDoubleGauge( - "ApplyTransactionAvgTime", metric); - assertTrue(applyTransactionLatency == 0.0); - double writeStateMachineLatency = getDoubleGauge( - "WriteStateMachineDataAvgTime", metric); - assertTrue(writeStateMachineLatency == 0.0); - - // Write Chunk - BlockID blockID = ContainerTestHelper.getTestBlockID(ContainerTestHelper. - getTestContainerID()); - ContainerProtos.ContainerCommandRequestProto writeChunkRequest = - ContainerTestHelper.getWriteChunkRequest( - pipeline, blockID, 1024); - ContainerCommandResponseProto response = - client.sendCommand(writeChunkRequest); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, - response.getResult()); - - metric = getMetrics(CSMMetrics.SOURCE_NAME + - RaftGroupId.valueOf(pipeline.getId().getId()).toString()); - assertCounter("NumWriteStateMachineOps", 1L, metric); - assertCounter("NumBytesWrittenCount", 1024L, metric); - assertCounter("NumApplyTransactionOps", 1L, metric); - assertCounter("NumBytesCommittedCount", 1024L, metric); - assertCounter("NumStartTransactionVerifyFailures", 0L, metric); - assertCounter("NumContainerNotOpenVerifyFailures", 0L, metric); - assertCounter("WriteChunkNumOps", 1L, metric); - - //Read Chunk - ContainerProtos.ContainerCommandRequestProto readChunkRequest = - ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest - .getWriteChunk()); - response = client.sendCommand(readChunkRequest); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, - response.getResult()); - - metric = getMetrics(CSMMetrics.SOURCE_NAME + - RaftGroupId.valueOf(pipeline.getId().getId()).toString()); - assertCounter("NumQueryStateMachineOps", 1L, metric); - assertCounter("NumApplyTransactionOps", 1L, metric); - applyTransactionLatency = getDoubleGauge( - "ApplyTransactionAvgTime", metric); - assertTrue(applyTransactionLatency > 0.0); - writeStateMachineLatency = getDoubleGauge( - "WriteStateMachineDataAvgTime", metric); - assertTrue(writeStateMachineLatency > 0.0); - - } finally { - if (client != null) { - client.close(); - } - servers.stream().forEach(XceiverServerSpi::stop); - } - } - - static XceiverServerRatis newXceiverServerRatis( - DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); - final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); - - final ContainerDispatcher dispatcher = new TestContainerDispatcher(); - return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, - new ContainerController(new ContainerSet(), Maps.newHashMap()), - null, null); - } - - private static class TestContainerDispatcher implements ContainerDispatcher { - /** - * Dispatches commands to container layer. - * - * @param msg - Command Request - * @return Command Response - */ - @Override - public ContainerCommandResponseProto dispatch( - ContainerCommandRequestProto msg, - DispatcherContext context) { - return ContainerTestHelper.getCreateContainerResponse(msg); - } - - @Override - public void validateContainerCommand( - ContainerCommandRequestProto msg) throws StorageContainerException { - } - - @Override - public void init() { - } - - @Override - public void shutdown() { - } - - @Override - public Handler getHandler(ContainerProtos.ContainerType containerType) { - return null; - } - - @Override - public void setScmId(String scmId) { - - } - - @Override - public void buildMissingContainerSetAndValidate( - Map container2BCSIDMap) { - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java deleted file mode 100644 index 43c354c146e..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.container.metrics; - -import static org.apache.hadoop.test.MetricsAsserts.assertCounter; -import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges; -import static org.apache.hadoop.test.MetricsAsserts.getMetrics; - -import com.google.common.collect.Maps; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientGrpc; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.container.replication.GrpcReplicationService; -import org.apache.hadoop.ozone.container.replication.OnDemandContainerReplicationSource; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.File; -import java.util.Map; -import java.util.UUID; - -/** - * Test for metrics published by storage containers. - */ -public class TestContainerMetrics { - - private GrpcReplicationService createReplicationService( - ContainerController controller) { - return new GrpcReplicationService( - new OnDemandContainerReplicationSource(controller)); - } - - @Test - public void testContainerMetrics() throws Exception { - XceiverServerGrpc server = null; - XceiverClientGrpc client = null; - long containerID = ContainerTestHelper.getTestContainerID(); - String path = GenericTestUtils.getRandomizedTempPath(); - - try { - final int interval = 1; - Pipeline pipeline = ContainerTestHelper - .createSingleNodePipeline(); - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); - conf.setInt(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, - interval); - - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, path); - VolumeSet volumeSet = new VolumeSet( - datanodeDetails.getUuidString(), conf); - ContainerSet containerSet = new ContainerSet(); - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()) - .thenReturn(datanodeDetails); - Mockito.when(context.getParent()).thenReturn(stateMachine); - ContainerMetrics metrics = ContainerMetrics.create(conf); - Map handlers = Maps.newHashMap(); - for (ContainerProtos.ContainerType containerType : - ContainerProtos.ContainerType.values()) { - handlers.put(containerType, - Handler.getHandlerForContainerType(containerType, conf, context, - containerSet, volumeSet, metrics)); - } - HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, - volumeSet, handlers, context, metrics); - dispatcher.setScmId(UUID.randomUUID().toString()); - - server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null, - createReplicationService(new ContainerController( - containerSet, handlers))); - client = new XceiverClientGrpc(pipeline, conf); - - server.start(); - client.connect(); - - // Create container - ContainerCommandRequestProto request = ContainerTestHelper - .getCreateContainerRequest(containerID, pipeline); - ContainerCommandResponseProto response = client.sendCommand(request); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, - response.getResult()); - - // Write Chunk - BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); - ContainerTestHelper.getWriteChunkRequest( - pipeline, blockID, 1024); - ContainerProtos.ContainerCommandRequestProto writeChunkRequest = - ContainerTestHelper.getWriteChunkRequest( - pipeline, blockID, 1024); - response = client.sendCommand(writeChunkRequest); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, - response.getResult()); - - //Read Chunk - ContainerProtos.ContainerCommandRequestProto readChunkRequest = - ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest - .getWriteChunk()); - response = client.sendCommand(readChunkRequest); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - - MetricsRecordBuilder containerMetrics = getMetrics( - "StorageContainerMetrics"); - assertCounter("NumOps", 3L, containerMetrics); - assertCounter("numCreateContainer", 1L, containerMetrics); - assertCounter("numWriteChunk", 1L, containerMetrics); - assertCounter("numReadChunk", 1L, containerMetrics); - assertCounter("bytesWriteChunk", 1024L, containerMetrics); - assertCounter("bytesReadChunk", 1024L, containerMetrics); - - String sec = interval + "s"; - Thread.sleep((interval + 1) * 1000); - assertQuantileGauges("WriteChunkNanos" + sec, containerMetrics); - } finally { - if (client != null) { - client.close(); - } - if (server != null) { - server.stop(); - } - // clean up volume dir - File file = new File(path); - if(file.exists()) { - FileUtil.fullyDelete(file); - } - } - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java deleted file mode 100644 index 70a88af645a..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ /dev/null @@ -1,574 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.XceiverClientGrpc; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.Timeout; -import org.mockito.Mockito; - -import java.util.*; -import java.util.concurrent.CompletableFuture; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; - -/** - * Tests ozone containers. - */ -public class TestOzoneContainer { - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - @Rule - public TemporaryFolder tempFolder = new TemporaryFolder(); - - @Test - public void testCreateOzoneContainer() throws Exception { - long containerID = ContainerTestHelper.getTestContainerID(); - OzoneConfiguration conf = newOzoneConfiguration(); - OzoneContainer container = null; - MiniOzoneCluster cluster = null; - try { - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - // We don't start Ozone Container via data node, we will do it - // independently in our test path. - Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(); - conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); - conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); - - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - StateContext context = Mockito.mock(StateContext.class); - DatanodeStateMachine dsm = Mockito.mock(DatanodeStateMachine.class); - Mockito.when(dsm.getDatanodeDetails()).thenReturn(datanodeDetails); - Mockito.when(context.getParent()).thenReturn(dsm); - container = new OzoneContainer(datanodeDetails, conf, context, null); - //Set scmId and manually start ozone container. - container.start(UUID.randomUUID().toString()); - - XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf); - client.connect(); - createContainerForTesting(client, containerID); - } finally { - if (container != null) { - container.stop(); - } - if (cluster != null) { - cluster.shutdown(); - } - } - } - - @Test - public void testOzoneContainerStart() throws Exception { - OzoneConfiguration conf = newOzoneConfiguration(); - MiniOzoneCluster cluster = null; - OzoneContainer container = null; - - try { - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - - Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(); - conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); - conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); - - - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - StateContext context = Mockito.mock(StateContext.class); - DatanodeStateMachine dsm = Mockito.mock(DatanodeStateMachine.class); - Mockito.when(dsm.getDatanodeDetails()).thenReturn(datanodeDetails); - Mockito.when(context.getParent()).thenReturn(dsm); - container = new OzoneContainer(datanodeDetails, conf, - context, null); - - String scmId = UUID.randomUUID().toString(); - container.start(scmId); - try { - container.start(scmId); - } catch (Exception e) { - Assert.fail(); - } - - container.stop(); - try { - container.stop(); - } catch (Exception e) { - Assert.fail(); - } - - } finally { - if (container != null) { - container.stop(); - } - if (cluster != null) { - cluster.shutdown(); - } - } - } - - - static OzoneConfiguration newOzoneConfiguration() { - final OzoneConfiguration conf = new OzoneConfiguration(); - return conf; - } - - @Test - public void testOzoneContainerViaDataNode() throws Exception { - MiniOzoneCluster cluster = null; - try { - long containerID = - ContainerTestHelper.getTestContainerID(); - OzoneConfiguration conf = newOzoneConfiguration(); - - // Start ozone container Via Datanode create. - - Pipeline pipeline = - ContainerTestHelper.createSingleNodePipeline(); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); - - cluster = MiniOzoneCluster.newBuilder(conf) - .setRandomContainerPort(false) - .build(); - cluster.waitForClusterToBeReady(); - - // This client talks to ozone container via datanode. - XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf); - - runTestOzoneContainerViaDataNode(containerID, client); - } finally { - if (cluster != null) { - cluster.shutdown(); - } - } - } - - static void runTestOzoneContainerViaDataNode( - long testContainerID, XceiverClientSpi client) throws Exception { - ContainerProtos.ContainerCommandRequestProto - request, writeChunkRequest, putBlockRequest, - updateRequest1, updateRequest2; - ContainerProtos.ContainerCommandResponseProto response, - updateResponse1, updateResponse2; - try { - client.connect(); - - Pipeline pipeline = client.getPipeline(); - createContainerForTesting(client, testContainerID); - writeChunkRequest = writeChunkForContainer(client, testContainerID, - 1024); - - // Read Chunk - request = ContainerTestHelper.getReadChunkRequest( - pipeline, writeChunkRequest.getWriteChunk()); - - response = client.sendCommand(request); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - - // Put Block - putBlockRequest = ContainerTestHelper.getPutBlockRequest( - pipeline, writeChunkRequest.getWriteChunk()); - - - response = client.sendCommand(putBlockRequest); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - - // Get Block - request = ContainerTestHelper. - getBlockRequest(pipeline, putBlockRequest.getPutBlock()); - response = client.sendCommand(request); - int chunksCount = putBlockRequest.getPutBlock().getBlockData(). - getChunksCount(); - ContainerTestHelper.verifyGetBlock(request, response, chunksCount); - - - // Delete Block - request = - ContainerTestHelper.getDeleteBlockRequest( - pipeline, putBlockRequest.getPutBlock()); - response = client.sendCommand(request); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - - //Delete Chunk - request = ContainerTestHelper.getDeleteChunkRequest( - pipeline, writeChunkRequest.getWriteChunk()); - - response = client.sendCommand(request); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - - //Update an existing container - Map containerUpdate = new HashMap(); - containerUpdate.put("container_updated_key", "container_updated_value"); - updateRequest1 = ContainerTestHelper.getUpdateContainerRequest( - testContainerID, containerUpdate); - updateResponse1 = client.sendCommand(updateRequest1); - Assert.assertNotNull(updateResponse1); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, - response.getResult()); - - //Update an non-existing container - long nonExistingContinerID = - ContainerTestHelper.getTestContainerID(); - updateRequest2 = ContainerTestHelper.getUpdateContainerRequest( - nonExistingContinerID, containerUpdate); - updateResponse2 = client.sendCommand(updateRequest2); - Assert.assertEquals(ContainerProtos.Result.CONTAINER_NOT_FOUND, - updateResponse2.getResult()); - } finally { - if (client != null) { - client.close(); - } - } - } - - @Test - public void testBothGetandPutSmallFile() throws Exception { - MiniOzoneCluster cluster = null; - XceiverClientGrpc client = null; - try { - OzoneConfiguration conf = newOzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - tempFolder.getRoot().getPath()); - client = createClientForTesting(conf); - cluster = MiniOzoneCluster.newBuilder(conf) - .setRandomContainerPort(false) - .build(); - cluster.waitForClusterToBeReady(); - long containerID = ContainerTestHelper.getTestContainerID(); - runTestBothGetandPutSmallFile(containerID, client); - } finally { - if (cluster != null) { - cluster.shutdown(); - } - } - } - - static void runTestBothGetandPutSmallFile( - long containerID, XceiverClientSpi client) throws Exception { - try { - client.connect(); - - createContainerForTesting(client, containerID); - - BlockID blockId = ContainerTestHelper.getTestBlockID(containerID); - final ContainerProtos.ContainerCommandRequestProto smallFileRequest - = ContainerTestHelper.getWriteSmallFileRequest( - client.getPipeline(), blockId, 1024); - ContainerProtos.ContainerCommandResponseProto response - = client.sendCommand(smallFileRequest); - Assert.assertNotNull(response); - - final ContainerProtos.ContainerCommandRequestProto getSmallFileRequest - = ContainerTestHelper.getReadSmallFileRequest(client.getPipeline(), - smallFileRequest.getPutSmallFile().getBlock()); - response = client.sendCommand(getSmallFileRequest); - Assert.assertArrayEquals( - smallFileRequest.getPutSmallFile().getData().toByteArray(), - response.getGetSmallFile().getData().getData().toByteArray()); - } finally { - if (client != null) { - client.close(); - } - } - } - - - - @Test - public void testCloseContainer() throws Exception { - MiniOzoneCluster cluster = null; - XceiverClientGrpc client = null; - ContainerProtos.ContainerCommandResponseProto response; - ContainerProtos.ContainerCommandRequestProto - writeChunkRequest, putBlockRequest, request; - try { - - OzoneConfiguration conf = newOzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - tempFolder.getRoot().getPath()); - client = createClientForTesting(conf); - cluster = MiniOzoneCluster.newBuilder(conf) - .setRandomContainerPort(false) - .build(); - cluster.waitForClusterToBeReady(); - client.connect(); - - long containerID = ContainerTestHelper.getTestContainerID(); - createContainerForTesting(client, containerID); - writeChunkRequest = writeChunkForContainer(client, containerID, - 1024); - - - putBlockRequest = ContainerTestHelper.getPutBlockRequest( - client.getPipeline(), writeChunkRequest.getWriteChunk()); - // Put block before closing. - response = client.sendCommand(putBlockRequest); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, - response.getResult()); - - // Close the contianer. - request = ContainerTestHelper.getCloseContainer( - client.getPipeline(), containerID); - response = client.sendCommand(request); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - - - // Assert that none of the write operations are working after close. - - // Write chunks should fail now. - - response = client.sendCommand(writeChunkRequest); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO, - response.getResult()); - - // Read chunk must work on a closed container. - request = ContainerTestHelper.getReadChunkRequest(client.getPipeline(), - writeChunkRequest.getWriteChunk()); - response = client.sendCommand(request); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - - // Put block will fail on a closed container. - response = client.sendCommand(putBlockRequest); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO, - response.getResult()); - - // Get block must work on the closed container. - request = ContainerTestHelper.getBlockRequest(client.getPipeline(), - putBlockRequest.getPutBlock()); - response = client.sendCommand(request); - int chunksCount = putBlockRequest.getPutBlock().getBlockData() - .getChunksCount(); - ContainerTestHelper.verifyGetBlock(request, response, chunksCount); - - // Delete block must fail on a closed container. - request = - ContainerTestHelper.getDeleteBlockRequest(client.getPipeline(), - putBlockRequest.getPutBlock()); - response = client.sendCommand(request); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO, - response.getResult()); - } finally { - if (client != null) { - client.close(); - } - if (cluster != null) { - cluster.shutdown(); - } - } - } - - @Test - public void testDeleteContainer() throws Exception { - MiniOzoneCluster cluster = null; - XceiverClientGrpc client = null; - ContainerProtos.ContainerCommandResponseProto response; - ContainerProtos.ContainerCommandRequestProto request, - writeChunkRequest, putBlockRequest; - try { - OzoneConfiguration conf = newOzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - tempFolder.getRoot().getPath()); - client = createClientForTesting(conf); - cluster = MiniOzoneCluster.newBuilder(conf) - .setRandomContainerPort(false) - .build(); - cluster.waitForClusterToBeReady(); - client.connect(); - - long containerID = ContainerTestHelper.getTestContainerID(); - createContainerForTesting(client, containerID); - writeChunkRequest = writeChunkForContainer( - client, containerID, 1024); - - putBlockRequest = ContainerTestHelper.getPutBlockRequest( - client.getPipeline(), writeChunkRequest.getWriteChunk()); - // Put key before deleting. - response = client.sendCommand(putBlockRequest); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, - response.getResult()); - - // Container cannot be deleted because force flag is set to false and - // the container is still open - request = ContainerTestHelper.getDeleteContainer( - client.getPipeline(), containerID, false); - response = client.sendCommand(request); - - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.DELETE_ON_OPEN_CONTAINER, - response.getResult()); - - // Container can be deleted, by setting force flag, even with out closing - request = ContainerTestHelper.getDeleteContainer( - client.getPipeline(), containerID, true); - response = client.sendCommand(request); - - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, - response.getResult()); - - } finally { - if (client != null) { - client.close(); - } - if (cluster != null) { - cluster.shutdown(); - } - } - } - - - // Runs a set of commands as Async calls and verifies that calls indeed worked - // as expected. - static void runAsyncTests( - long containerID, XceiverClientSpi client) throws Exception { - try { - client.connect(); - - createContainerForTesting(client, containerID); - final List computeResults = new LinkedList<>(); - int requestCount = 1000; - // Create a bunch of Async calls from this test. - for(int x = 0; x - response = client.sendCommandAsync(smallFileRequest).getResponse(); - computeResults.add(response); - } - - CompletableFuture combinedFuture = - CompletableFuture.allOf(computeResults.toArray( - new CompletableFuture[computeResults.size()])); - // Wait for all futures to complete. - combinedFuture.get(); - // Assert that all futures are indeed done. - for (CompletableFuture future : computeResults) { - Assert.assertTrue(future.isDone()); - } - } finally { - if (client != null) { - client.close(); - } - } - } - - @Test - public void testXcieverClientAsync() throws Exception { - MiniOzoneCluster cluster = null; - XceiverClientGrpc client = null; - try { - OzoneConfiguration conf = newOzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, - tempFolder.getRoot().getPath()); - client = createClientForTesting(conf); - cluster = MiniOzoneCluster.newBuilder(conf) - .setRandomContainerPort(false) - .build(); - cluster.waitForClusterToBeReady(); - long containerID = ContainerTestHelper.getTestContainerID(); - runAsyncTests(containerID, client); - } finally { - if (cluster != null) { - cluster.shutdown(); - } - } - } - - private static XceiverClientGrpc createClientForTesting( - OzoneConfiguration conf) throws Exception { - // Start ozone container Via Datanode create. - Pipeline pipeline = - ContainerTestHelper.createSingleNodePipeline(); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); - - // This client talks to ozone container via datanode. - return new XceiverClientGrpc(pipeline, conf); - } - - public static void createContainerForTesting(XceiverClientSpi client, - long containerID) throws Exception { - // Create container - ContainerProtos.ContainerCommandRequestProto request = - ContainerTestHelper.getCreateContainerRequest( - containerID, client.getPipeline()); - ContainerProtos.ContainerCommandResponseProto response = - client.sendCommand(request); - Assert.assertNotNull(response); - } - - public static ContainerProtos.ContainerCommandRequestProto - writeChunkForContainer(XceiverClientSpi client, - long containerID, int dataLen) throws Exception { - // Write Chunk - BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); - ContainerProtos.ContainerCommandRequestProto writeChunkRequest = - ContainerTestHelper.getWriteChunkRequest(client.getPipeline(), - blockID, dataLen); - ContainerProtos.ContainerCommandResponseProto response = - client.sendCommand(writeChunkRequest); - Assert.assertNotNull(response); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - return writeChunkRequest; - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java deleted file mode 100644 index 8577156a40e..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.RatisTestHelper; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.rpc.SupportedRpcType; -import org.apache.ratis.util.function.CheckedBiConsumer; -import org.apache.ratis.util.CollectionUtils; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; - -/** - * Tests ozone containers with Apache Ratis. - */ -@Ignore("Disabling Ratis tests for pipeline work.") -public class TestOzoneContainerRatis { - private static final Logger LOG = LoggerFactory.getLogger( - TestOzoneContainerRatis.class); - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - static OzoneConfiguration newOzoneConfiguration() { - return new OzoneConfiguration(); - } - - private static void runTestOzoneContainerViaDataNodeRatis( - RpcType rpc, int numNodes) throws Exception { - runTest("runTestOzoneContainerViaDataNodeRatis", rpc, numNodes, - TestOzoneContainer::runTestOzoneContainerViaDataNode); - } - - private static void runTest( - String testName, RpcType rpc, int numNodes, - CheckedBiConsumer test) - throws Exception { - LOG.info(testName + "(rpc=" + rpc + ", numNodes=" + numNodes); - - // create Ozone clusters - final OzoneConfiguration conf = newOzoneConfiguration(); - RatisTestHelper.initRatisConf(rpc, conf); - final MiniOzoneCluster cluster = - MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(numNodes) - .build(); - try { - cluster.waitForClusterToBeReady(); - - final String containerName = OzoneUtils.getRequestID(); - final List datanodes = cluster.getHddsDatanodes(); - final Pipeline pipeline = ContainerTestHelper.createPipeline( - CollectionUtils.as(datanodes, - HddsDatanodeService::getDatanodeDetails)); - LOG.info("pipeline=" + pipeline); - - // Create Ratis cluster -// final String ratisId = "ratis1"; -// final PipelineManager manager = RatisManagerImpl.newRatisManager(conf); -// manager.createPipeline(ratisId, pipeline.getNodes()); -// LOG.info("Created RatisCluster " + ratisId); -// -// // check Ratis cluster members -// final List dns = manager.getMembers(ratisId); -// Assert.assertEquals(pipeline.getNodes(), dns); -// -// // run test -// final XceiverClientSpi client = XceiverClientRatis -// .newXceiverClientRatis( -// pipeline, conf); -// test.accept(containerName, client); - } finally { - cluster.shutdown(); - } - } - - private static void runTestBothGetandPutSmallFileRatis( - RpcType rpc, int numNodes) throws Exception { - runTest("runTestBothGetandPutSmallFileRatis", rpc, numNodes, - TestOzoneContainer::runTestBothGetandPutSmallFile); - } - - @Test - public void testOzoneContainerViaDataNodeRatisGrpc() throws Exception { - runTestOzoneContainerViaDataNodeRatis(SupportedRpcType.GRPC, 1); - runTestOzoneContainerViaDataNodeRatis(SupportedRpcType.GRPC, 3); - } - - @Test - public void testOzoneContainerViaDataNodeRatisNetty() throws Exception { - runTestOzoneContainerViaDataNodeRatis(SupportedRpcType.NETTY, 1); - runTestOzoneContainerViaDataNodeRatis(SupportedRpcType.NETTY, 3); - } - - @Test - public void testBothGetandPutSmallFileRatisNetty() throws Exception { - runTestBothGetandPutSmallFileRatis(SupportedRpcType.NETTY, 1); - runTestBothGetandPutSmallFileRatis(SupportedRpcType.NETTY, 3); - } - - @Test - public void testBothGetandPutSmallFileRatisGrpc() throws Exception { - runTestBothGetandPutSmallFileRatis(SupportedRpcType.GRPC, 1); - runTestBothGetandPutSmallFileRatis(SupportedRpcType.GRPC, 3); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java deleted file mode 100644 index 30a2593bc4c..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.client.CertificateClientTestImpl; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.XceiverClientGrpc; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.*; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.mockito.Mockito; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.util.Arrays; -import java.util.Collection; -import java.util.EnumSet; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME_DEFAULT; -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; - -/** - * Tests ozone containers via secure grpc/netty. - */ -@RunWith(Parameterized.class) -@Ignore("TODO:HDDS-1157") -public class TestOzoneContainerWithTLS { - private final static Logger LOG = LoggerFactory.getLogger( - TestOzoneContainerWithTLS.class); - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - @Rule - public TemporaryFolder tempFolder = new TemporaryFolder(); - - private OzoneConfiguration conf; - private OzoneBlockTokenSecretManager secretManager; - private CertificateClientTestImpl caClient; - private boolean blockTokenEnabled; - - public TestOzoneContainerWithTLS(boolean blockTokenEnabled) { - this.blockTokenEnabled = blockTokenEnabled; - } - - @Parameterized.Parameters - public static Collection enableBlockToken() { - return Arrays.asList(new Object[][] { - {false}, - {true} - }); - } - - @Before - public void setup() throws Exception { - conf = new OzoneConfiguration(); - String ozoneMetaPath = - GenericTestUtils.getTempPath("ozoneMeta"); - File ozoneMetaFile = new File(ozoneMetaPath); - conf.set(OZONE_METADATA_DIRS, ozoneMetaPath); - - FileUtil.fullyDelete(ozoneMetaFile); - String keyDirName = conf.get(HDDS_KEY_DIR_NAME, - HDDS_KEY_DIR_NAME_DEFAULT); - - File ozoneKeyDir = new File(ozoneMetaFile, keyDirName); - ozoneKeyDir.mkdirs(); - conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); - conf.setBoolean(HddsConfigKeys.HDDS_GRPC_TLS_ENABLED, true); - - conf.setBoolean(HddsConfigKeys.HDDS_GRPC_TLS_TEST_CERT, true); - - long expiryTime = conf.getTimeDuration( - HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME, - HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME_DEFAULT, - TimeUnit.MILLISECONDS); - - caClient = new CertificateClientTestImpl(conf); - secretManager = new OzoneBlockTokenSecretManager(new SecurityConfig(conf), - expiryTime, caClient.getCertificate(). - getSerialNumber().toString()); - } - - @Test - public void testCreateOzoneContainer() throws Exception { - LOG.info("testCreateOzoneContainer with TLS and blockToken enabled: {}", - blockTokenEnabled); - conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED, - blockTokenEnabled); - - long containerID = ContainerTestHelper.getTestContainerID(); - OzoneContainer container = null; - System.out.println(System.getProperties().getProperty("java.library.path")); - DatanodeDetails dn = TestUtils.randomDatanodeDetails(); - try { - Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(); - conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE) - .getValue()); - conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); - - container = new OzoneContainer(dn, conf, getContext(dn), caClient); - //Set scmId and manually start ozone container. - container.start(UUID.randomUUID().toString()); - - XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf, - caClient.getCACertificate()); - - if (blockTokenEnabled) { - secretManager.start(caClient); - Token token = secretManager.generateToken( - "123", EnumSet.allOf( - HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - RandomUtils.nextLong()); - client.connect(token.encodeToUrlString()); - createSecureContainerForTesting(client, containerID, token); - } else { - createContainerForTesting(client, containerID); - client.connect(); - } - } finally { - if (container != null) { - container.stop(); - } - } - } - - public static void createContainerForTesting(XceiverClientSpi client, - long containerID) throws Exception { - ContainerProtos.ContainerCommandRequestProto request = - ContainerTestHelper.getCreateContainerRequest( - containerID, client.getPipeline()); - ContainerProtos.ContainerCommandResponseProto response = - client.sendCommand(request); - Assert.assertNotNull(response); - } - - public static void createSecureContainerForTesting(XceiverClientSpi client, - long containerID, Token token) - throws Exception { - ContainerProtos.ContainerCommandRequestProto request = - ContainerTestHelper.getCreateContainerSecureRequest( - containerID, client.getPipeline(), token); - ContainerProtos.ContainerCommandResponseProto response = - client.sendCommand(request); - Assert.assertNotNull(response); - } - - - private StateContext getContext(DatanodeDetails datanodeDetails) { - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails); - Mockito.when(context.getParent()).thenReturn(stateMachine); - return context; - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java deleted file mode 100644 index 1e78ec64977..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.RatisTestHelper; -import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.rpc.SupportedRpcType; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; -import java.util.concurrent.ThreadLocalRandom; -import java.util.stream.Collectors; - -/** - * Tests ozone containers with Apache Ratis. - */ -@Ignore("Disabling Ratis tests for pipeline work.") -public class TestRatisManager { - private static final Logger LOG = LoggerFactory.getLogger( - TestRatisManager.class); - - static OzoneConfiguration newOzoneConfiguration() { - return new OzoneConfiguration(); - } - - - /** Set the timeout for every test. */ - @Rule - public Timeout testTimeout = new Timeout(200_000); - - @Test - public void testTestRatisManagerGrpc() throws Exception { - runTestRatisManager(SupportedRpcType.GRPC); - } - - @Test - public void testTestRatisManagerNetty() throws Exception { - runTestRatisManager(SupportedRpcType.NETTY); - } - - private static void runTestRatisManager(RpcType rpc) throws Exception { - LOG.info("runTestRatisManager, rpc=" + rpc); - - // create Ozone clusters - final OzoneConfiguration conf = newOzoneConfiguration(); - RatisTestHelper.initRatisConf(rpc, conf); - final MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5) - .build(); - try { - cluster.waitForClusterToBeReady(); - - final List datanodes = cluster.getHddsDatanodes(); - final List datanodeDetailsSet = datanodes.stream() - .map(HddsDatanodeService::getDatanodeDetails).collect( - Collectors.toList()); - - //final RatisManager manager = RatisManager.newRatisManager(conf); - - final int[] idIndex = {3, 4, 5}; - for (int i = 0; i < idIndex.length; i++) { - final int previous = i == 0 ? 0 : idIndex[i - 1]; - final List subIds = datanodeDetailsSet.subList( - previous, idIndex[i]); - - // Create Ratis cluster - final String ratisId = "ratis" + i; - //manager.createRatisCluster(ratisId, subIds); - LOG.info("Created RatisCluster " + ratisId); - - // check Ratis cluster members - //final List dns = manager.getMembers(ratisId); - //Assert.assertEquals(subIds, dns); - } - - // randomly close two of the clusters - final int chosen = ThreadLocalRandom.current().nextInt(idIndex.length); - LOG.info("chosen = " + chosen); - - for (int i = 0; i < idIndex.length; i++) { - if (i != chosen) { - final String ratisId = "ratis" + i; - //manager.closeRatisCluster(ratisId); - } - } - - // update datanodes - final String ratisId = "ratis" + chosen; - //manager.updatePipeline(ratisId, allIds); - - // check Ratis cluster members - //final List dns = manager.getMembers(ratisId); - //Assert.assertEquals(allIds, dns); - } finally { - cluster.shutdown(); - } - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java deleted file mode 100644 index fca449bd75a..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.ozoneimpl; - -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.client.CertificateClientTestImpl; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.XceiverClientGrpc; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.Time; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.mockito.Mockito; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.security.PrivilegedAction; -import java.util.Arrays; -import java.util.Collection; -import java.util.EnumSet; -import java.util.UUID; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Tests ozone containers via secure grpc/netty. - */ -@RunWith(Parameterized.class) -public class TestSecureOzoneContainer { - private static final Logger LOG = LoggerFactory.getLogger( - TestSecureOzoneContainer.class); - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - @Rule - public TemporaryFolder tempFolder = new TemporaryFolder(); - - private OzoneConfiguration conf; - private SecurityConfig secConfig; - private Boolean requireBlockToken; - private Boolean hasBlockToken; - private Boolean blockTokeExpired; - private CertificateClientTestImpl caClient; - private OzoneBlockTokenSecretManager secretManager; - - - public TestSecureOzoneContainer(Boolean requireBlockToken, - Boolean hasBlockToken, Boolean blockTokenExpired) { - this.requireBlockToken = requireBlockToken; - this.hasBlockToken = hasBlockToken; - this.blockTokeExpired = blockTokenExpired; - } - - @Parameterized.Parameters - public static Collection blockTokenOptions() { - return Arrays.asList(new Object[][] { - {true, true, false}, - {true, true, true}, - {true, false, false}, - {false, true, false}, - {false, false, false}}); - } - - @Before - public void setup() throws Exception { - DefaultMetricsSystem.setMiniClusterMode(true); - conf = new OzoneConfiguration(); - String ozoneMetaPath = - GenericTestUtils.getTempPath("ozoneMeta"); - conf.set(OZONE_METADATA_DIRS, ozoneMetaPath); - secConfig = new SecurityConfig(conf); - caClient = new CertificateClientTestImpl(conf); - secretManager = new OzoneBlockTokenSecretManager(new SecurityConfig(conf), - 60 * 60 * 24, caClient.getCertificate(). - getSerialNumber().toString()); - } - - @Test - public void testCreateOzoneContainer() throws Exception { - LOG.info("Test case: requireBlockToken: {} hasBlockToken: {} " + - "blockTokenExpired: {}.", requireBlockToken, hasBlockToken, - blockTokeExpired); - conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED, - requireBlockToken); - - long containerID = ContainerTestHelper.getTestContainerID(); - OzoneContainer container = null; - System.out.println(System.getProperties().getProperty("java.library.path")); - try { - Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(); - conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline - .getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE) - .getValue()); - conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); - - DatanodeDetails dn = TestUtils.randomDatanodeDetails(); - container = new OzoneContainer(dn, conf, getContext(dn), caClient); - //Set scmId and manually start ozone container. - container.start(UUID.randomUUID().toString()); - - UserGroupInformation ugi = UserGroupInformation.createUserForTesting( - "user1", new String[] {"usergroup"}); - long expiryDate = (blockTokeExpired) ? - Time.now() - 60 * 60 * 2 : Time.now() + 60 * 60 * 24; - - OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier( - "testUser", "cid:lud:bcsid", - EnumSet.allOf(AccessModeProto.class), - expiryDate, "1234", 128L); - - int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); - if (port == 0) { - port = secConfig.getConfiguration().getInt(OzoneConfigKeys - .DFS_CONTAINER_IPC_PORT, DFS_CONTAINER_IPC_PORT_DEFAULT); - } - secretManager.start(caClient); - Token token = secretManager.generateToken( - "123", EnumSet.allOf(AccessModeProto.class), RandomUtils.nextLong()); - if (hasBlockToken) { - ugi.addToken(token); - } - - ugi.doAs((PrivilegedAction) () -> { - try { - XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf); - client.connect(token.encodeToUrlString()); - if (hasBlockToken) { - createContainerForTesting(client, containerID, token); - } else { - createContainerForTesting(client, containerID, null); - } - - } catch (Exception e) { - if (requireBlockToken && hasBlockToken && !blockTokeExpired) { - LOG.error("Unexpected error. ", e); - fail("Client with BlockToken should succeed when block token is" + - " required."); - } - if (requireBlockToken && hasBlockToken && blockTokeExpired) { - assertTrue("Receive expected exception", - e instanceof SCMSecurityException); - } - if (requireBlockToken && !hasBlockToken) { - assertTrue("Receive expected exception", e instanceof - IOException); - } - } - return null; - }); - } finally { - if (container != null) { - container.stop(); - } - } - } - - public static void createContainerForTesting(XceiverClientSpi client, - long containerID, Token token) throws Exception { - // Create container - ContainerProtos.ContainerCommandRequestProto request = - ContainerTestHelper.getCreateContainerSecureRequest( - containerID, client.getPipeline(), token); - ContainerProtos.ContainerCommandResponseProto response = - client.sendCommand(request); - Assert.assertNotNull(response); - } - - private StateContext getContext(DatanodeDetails datanodeDetails) { - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails); - Mockito.when(context.getParent()).thenReturn(stateMachine); - return context; - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java deleted file mode 100644 index 59d741d16ad..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ /dev/null @@ -1,299 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.server; - -import com.google.common.collect.Maps; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.container.replication.GrpcReplicationService; -import org.apache.hadoop.ozone.container.replication.OnDemandContainerReplicationSource; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; - -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.RatisTestHelper; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.hdds.scm.XceiverClientGrpc; -import org.apache.hadoop.hdds.scm.XceiverClientRatis; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.util.function.CheckedBiConsumer; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import static org.apache.ratis.rpc.SupportedRpcType.GRPC; -import static org.apache.ratis.rpc.SupportedRpcType.NETTY; -import static org.mockito.Mockito.mock; - -/** - * Test Containers. - */ -@Ignore("Takes too long to run this test. Ignoring for time being.") -public class TestContainerServer { - static final String TEST_DIR = GenericTestUtils.getTestDir("dfs") - .getAbsolutePath() + File.separator; - private static final OzoneConfiguration CONF = new OzoneConfiguration(); - private static CertificateClient caClient; - - private GrpcReplicationService createReplicationService( - ContainerController containerController) { - return new GrpcReplicationService( - new OnDemandContainerReplicationSource(containerController)); - } - - @BeforeClass - static public void setup() { - CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR); - caClient = new DNCertificateClient(new SecurityConfig(CONF)); - } - - @Test - public void testClientServer() throws Exception { - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - ContainerSet containerSet = new ContainerSet(); - ContainerController controller = new ContainerController( - containerSet, null); - runTestClientServer(1, (pipeline, conf) -> conf - .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), - XceiverClientGrpc::new, - (dn, conf) -> new XceiverServerGrpc(datanodeDetails, conf, - new TestContainerDispatcher(), caClient, - createReplicationService(controller)), (dn, p) -> { - }); - } - - @FunctionalInterface - interface CheckedBiFunction { - OUT apply(LEFT left, RIGHT right) throws THROWABLE; - } - - @Test - public void testClientServerRatisNetty() throws Exception { - runTestClientServerRatis(NETTY, 1); - runTestClientServerRatis(NETTY, 3); - } - - @Test - public void testClientServerRatisGrpc() throws Exception { - runTestClientServerRatis(GRPC, 1); - runTestClientServerRatis(GRPC, 3); - } - - static XceiverServerRatis newXceiverServerRatis( - DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); - final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); - - final ContainerDispatcher dispatcher = new TestContainerDispatcher(); - return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, - new ContainerController(new ContainerSet(), Maps.newHashMap()), - caClient, null); - } - - static void runTestClientServerRatis(RpcType rpc, int numNodes) - throws Exception { - runTestClientServer(numNodes, - (pipeline, conf) -> RatisTestHelper.initRatisConf(rpc, conf), - XceiverClientRatis::newXceiverClientRatis, - TestContainerServer::newXceiverServerRatis, - (dn, p) -> RatisTestHelper.initXceiverServerRatis(rpc, dn, p)); - } - - static void runTestClientServer( - int numDatanodes, - CheckedBiConsumer initConf, - CheckedBiFunction createClient, - CheckedBiFunction createServer, - CheckedBiConsumer initServer) - throws Exception { - final List servers = new ArrayList<>(); - XceiverClientSpi client = null; - String containerName = OzoneUtils.getRequestID(); - try { - final Pipeline pipeline = - ContainerTestHelper.createPipeline(numDatanodes); - initConf.accept(pipeline, CONF); - - for (DatanodeDetails dn : pipeline.getNodes()) { - final XceiverServerSpi s = createServer.apply(dn, CONF); - servers.add(s); - s.start(); - initServer.accept(dn, pipeline); - } - - client = createClient.apply(pipeline, CONF); - client.connect(); - - final ContainerCommandRequestProto request = - ContainerTestHelper - .getCreateContainerRequest( - ContainerTestHelper.getTestContainerID(), pipeline); - Assert.assertNotNull(request.getTraceID()); - - ContainerCommandResponseProto response = client.sendCommand(request); - } finally { - if (client != null) { - client.close(); - } - servers.stream().forEach(XceiverServerSpi::stop); - } - } - - @Test - public void testClientServerWithContainerDispatcher() throws Exception { - XceiverServerGrpc server = null; - XceiverClientGrpc client = null; - UUID scmId = UUID.randomUUID(); - try { - Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(); - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); - - ContainerSet containerSet = new ContainerSet(); - VolumeSet volumeSet = mock(VolumeSet.class); - ContainerMetrics metrics = ContainerMetrics.create(conf); - Map handlers = Maps.newHashMap(); - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - DatanodeStateMachine stateMachine = Mockito.mock( - DatanodeStateMachine.class); - StateContext context = Mockito.mock(StateContext.class); - Mockito.when(stateMachine.getDatanodeDetails()) - .thenReturn(datanodeDetails); - Mockito.when(context.getParent()).thenReturn(stateMachine); - - - for (ContainerProtos.ContainerType containerType : - ContainerProtos.ContainerType.values()) { - handlers.put(containerType, - Handler.getHandlerForContainerType(containerType, conf, context, - containerSet, volumeSet, metrics)); - } - HddsDispatcher dispatcher = new HddsDispatcher( - conf, containerSet, volumeSet, handlers, context, metrics); - dispatcher.setScmId(scmId.toString()); - dispatcher.init(); - - server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, - caClient, createReplicationService( - new ContainerController(containerSet, null))); - client = new XceiverClientGrpc(pipeline, conf); - - server.start(); - client.connect(); - - ContainerCommandRequestProto request = - ContainerTestHelper.getCreateContainerRequest( - ContainerTestHelper.getTestContainerID(), pipeline); - ContainerCommandResponseProto response = client.sendCommand(request); - Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); - } finally { - if (client != null) { - client.close(); - } - if (server != null) { - server.stop(); - } - } - } - - private static class TestContainerDispatcher implements ContainerDispatcher { - /** - * Dispatches commands to container layer. - * - * @param msg - Command Request - * @return Command Response - */ - @Override - public ContainerCommandResponseProto dispatch( - ContainerCommandRequestProto msg, - DispatcherContext context) { - return ContainerTestHelper.getCreateContainerResponse(msg); - } - - @Override - public void init() { - } - - @Override - public void validateContainerCommand( - ContainerCommandRequestProto msg) throws StorageContainerException { - } - - @Override - public void shutdown() { - } - @Override - public Handler getHandler(ContainerProtos.ContainerType containerType) { - return null; - } - - @Override - public void setScmId(String scmId) { - - } - - @Override - public void buildMissingContainerSetAndValidate( - Map container2BCSIDMap) { - } - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java deleted file mode 100644 index cfee1a63788..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ /dev/null @@ -1,291 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.server; - -import com.google.common.collect.Maps; -import org.apache.commons.lang3.RandomUtils; -import org.apache.commons.lang3.exception.ExceptionUtils; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.XceiverClientGrpc; -import org.apache.hadoop.hdds.scm.XceiverClientRatis; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.RatisTestHelper; -import org.apache.hadoop.ozone.client.CertificateClientTestImpl; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.container.replication.GrpcReplicationService; -import org.apache.hadoop.ozone.container.replication.OnDemandContainerReplicationSource; -import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; - -import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.util.function.CheckedBiConsumer; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.EnumSet; -import java.util.List; -import java.util.Map; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.SUCCESS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; -import static org.apache.hadoop.ozone.container.ContainerTestHelper.getCreateContainerRequest; -import static org.apache.hadoop.ozone.container.ContainerTestHelper.getTestContainerID; -import static org.apache.ratis.rpc.SupportedRpcType.GRPC; -import static org.apache.ratis.rpc.SupportedRpcType.NETTY; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -/** - * Test Container servers when security is enabled. - */ -public class TestSecureContainerServer { - static final String TEST_DIR - = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; - private static final OzoneConfiguration CONF = new OzoneConfiguration(); - private static CertificateClientTestImpl caClient; - - private GrpcReplicationService createReplicationService( - ContainerController containerController) { - return new GrpcReplicationService( - new OnDemandContainerReplicationSource(containerController)); - } - - @BeforeClass - static public void setup() throws Exception { - DefaultMetricsSystem.setMiniClusterMode(true); - CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR); - CONF.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); - CONF.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); - caClient = new CertificateClientTestImpl(CONF); - } - - @Test - public void testClientServer() throws Exception { - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - ContainerSet containerSet = new ContainerSet(); - ContainerController controller = new ContainerController( - containerSet, null); - runTestClientServer(1, (pipeline, conf) -> conf - .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - pipeline.getFirstNode() - .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), - XceiverClientGrpc::new, - (dn, conf) -> new XceiverServerGrpc(datanodeDetails, conf, - new TestContainerDispatcher(), caClient, - createReplicationService(controller)), (dn, p) -> { - }); - } - - @FunctionalInterface - interface CheckedBiFunction { - OUT apply(LEFT left, RIGHT right) throws THROWABLE; - } - - @Test - public void testClientServerRatisGrpc() throws Exception { - runTestClientServerRatis(GRPC, 1); - runTestClientServerRatis(GRPC, 3); - } - - @Test - @Ignore - public void testClientServerRatisNetty() throws Exception { - runTestClientServerRatis(NETTY, 1); - runTestClientServerRatis(NETTY, 3); - } - - static XceiverServerRatis newXceiverServerRatis( - DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, - dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); - final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); - - final ContainerDispatcher dispatcher = new TestContainerDispatcher(); - return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, - new ContainerController(new ContainerSet(), Maps.newHashMap()), - caClient, null); - } - - static void runTestClientServerRatis(RpcType rpc, int numNodes) - throws Exception { - runTestClientServer(numNodes, - (pipeline, conf) -> RatisTestHelper.initRatisConf(rpc, conf), - XceiverClientRatis::newXceiverClientRatis, - TestSecureContainerServer::newXceiverServerRatis, - (dn, p) -> RatisTestHelper.initXceiverServerRatis(rpc, dn, p)); - } - - static void runTestClientServer( - int numDatanodes, - CheckedBiConsumer initConf, - CheckedBiFunction createClient, - CheckedBiFunction createServer, - CheckedBiConsumer initServer) - throws Exception { - final List servers = new ArrayList<>(); - XceiverClientSpi client = null; - String containerName = OzoneUtils.getRequestID(); - try { - final Pipeline pipeline = - ContainerTestHelper.createPipeline(numDatanodes); - - initConf.accept(pipeline, CONF); - - for (DatanodeDetails dn : pipeline.getNodes()) { - final XceiverServerSpi s = createServer.apply(dn, CONF); - servers.add(s); - s.start(); - initServer.accept(dn, pipeline); - } - - client = createClient.apply(pipeline, CONF); - client.connect(); - - // Test 1: Test failure in request without block token. - final ContainerCommandRequestProto request = - getCreateContainerRequest( - getTestContainerID(), pipeline); - Assert.assertNotNull(request.getTraceID()); - - XceiverClientSpi finalClient = client; - // Validation is different for grpc and ratis client. - if(client instanceof XceiverClientGrpc) { - LambdaTestUtils.intercept(SCMSecurityException.class, "Failed to" + - " authenticate with GRPC XceiverServer with Ozone block token", - () -> finalClient.sendCommand(request)); - } else { - IOException e = LambdaTestUtils.intercept(IOException.class, - () -> finalClient.sendCommand(request)); - Throwable rootCause = ExceptionUtils.getRootCause(e); - String msg = rootCause.getMessage(); - assertTrue(msg, msg.contains("Block token verification failed")); - } - - // Test 2: Test success in request with valid block token. - long expiryTime = Time.monotonicNow() + 60 * 60 * 24; - - String omCertSerialId = - caClient.getCertificate().getSerialNumber().toString(); - OzoneBlockTokenSecretManager secretManager = - new OzoneBlockTokenSecretManager(new SecurityConfig(CONF), - expiryTime, omCertSerialId); - secretManager.start(caClient); - Token token = secretManager.generateToken("1", - EnumSet.allOf(AccessModeProto.class), RandomUtils.nextLong()); - final ContainerCommandRequestProto request2 = - ContainerTestHelper - .getCreateContainerSecureRequest( - getTestContainerID(), pipeline, - token); - Assert.assertNotNull(request2.getTraceID()); - XceiverClientSpi finalClient2 = createClient.apply(pipeline, CONF); - if(finalClient2 instanceof XceiverClientGrpc) { - finalClient2.connect(token.encodeToUrlString()); - } else { - finalClient2.connect(); - } - - ContainerCommandRequestProto request3 = getCreateContainerRequest( - getTestContainerID(), pipeline, token); - ContainerCommandResponseProto resp = finalClient2.sendCommand(request3); - assertEquals(SUCCESS, resp.getResult()); - } finally { - if (client != null) { - client.close(); - } - servers.stream().forEach(XceiverServerSpi::stop); - } - } - - private static class TestContainerDispatcher implements ContainerDispatcher { - /** - * Dispatches commands to container layer. - * - * @param msg - Command Request - * @return Command Response - */ - @Override - public ContainerCommandResponseProto dispatch( - ContainerCommandRequestProto msg, - DispatcherContext context) { - return ContainerTestHelper.getCreateContainerResponse(msg); - } - - @Override - public void init() { - } - - @Override - public void validateContainerCommand( - ContainerCommandRequestProto msg) throws StorageContainerException { - } - - @Override - public void shutdown() { - } - @Override - public Handler getHandler(ContainerProtos.ContainerType containerType) { - return null; - } - - @Override - public void setScmId(String scmId) { - } - - @Override - public void buildMissingContainerSetAndValidate( - Map container2BCSIDMap) { - } - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java deleted file mode 100644 index 7fb9825f801..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -package org.apache.hadoop.ozone.dn.scrubber; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.interfaces.Container; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerMetadataScanner; -import org.apache.hadoop.ozone.container.ozoneimpl.ContainerScrubberConfiguration; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.util.Time; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Set; -import java.util.UUID; -import java.io.File; - -import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; - -/** - * This class tests the data scrubber functionality. - */ -public class TestDataScrubber { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration ozoneConfig; - private static OzoneClient ozClient = null; - private static ObjectStore store = null; - private static OzoneManager ozoneManager; - private static StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - - @BeforeClass - public static void init() throws Exception { - ozoneConfig = new OzoneConfiguration(); - ozoneConfig.set(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, "1s"); - ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); - cluster = MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1) - .build(); - cluster.waitForClusterToBeReady(); - ozClient = OzoneClientFactory.getRpcClient(ozoneConfig); - store = ozClient.getObjectStore(); - ozoneManager = cluster.getOzoneManager(); - storageContainerLocationClient = - cluster.getStorageContainerLocationClient(); - } - - @AfterClass - public static void shutdown() throws IOException { - if (ozClient != null) { - ozClient.close(); - } - if (storageContainerLocationClient != null) { - storageContainerLocationClient.close(); - } - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testOpenContainerIntegrity() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - long currentTime = Time.now(); - - String value = "sample value"; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - for (int i = 0; i < 10; i++) { - String keyName = UUID.randomUUID().toString(); - - OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, STAND_ALONE, - ONE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - OzoneKey key = bucket.getKey(keyName); - Assert.assertEquals(keyName, key.getName()); - OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[value.getBytes().length]; - is.read(fileContent); - Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, STAND_ALONE, - ONE)); - Assert.assertEquals(value, new String(fileContent)); - Assert.assertTrue(key.getCreationTime() >= currentTime); - Assert.assertTrue(key.getModificationTime() >= currentTime); - } - - // wait for the container report to propagate to SCM - Thread.sleep(5000); - - - Assert.assertEquals(1, cluster.getHddsDatanodes().size()); - - HddsDatanodeService dn = cluster.getHddsDatanodes().get(0); - OzoneContainer oc = dn.getDatanodeStateMachine().getContainer(); - ContainerSet cs = oc.getContainerSet(); - Container c = cs.getContainerIterator().next(); - - Assert.assertTrue(cs.containerCount() > 0); - - // delete the chunks directory. - File chunksDir = new File(c.getContainerData().getContainerPath(), - "chunks"); - deleteDirectory(chunksDir); - Assert.assertFalse(chunksDir.exists()); - - ContainerScrubberConfiguration conf = ozoneConfig.getObject( - ContainerScrubberConfiguration.class); - ContainerMetadataScanner sb = new ContainerMetadataScanner(conf, - oc.getController()); - sb.scrub(c); - - // wait for the incremental container report to propagate to SCM - Thread.sleep(5000); - - ContainerManager cm = cluster.getStorageContainerManager() - .getContainerManager(); - Set replicas = cm.getContainerReplicas( - ContainerID.valueof(c.getContainerData().getContainerID())); - Assert.assertEquals(1, replicas.size()); - ContainerReplica r = replicas.iterator().next(); - Assert.assertEquals(StorageContainerDatanodeProtocolProtos. - ContainerReplicaProto.State.UNHEALTHY, r.getState()); - } - - boolean deleteDirectory(File directoryToBeDeleted) { - File[] allContents = directoryToBeDeleted.listFiles(); - if (allContents != null) { - for (File file : allContents) { - deleteDirectory(file); - } - } - return directoryToBeDeleted.delete(); - } - - private boolean verifyRatisReplication(String volumeName, String bucketName, - String keyName, ReplicationType type, - ReplicationFactor factor) - throws IOException { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setRefreshPipeline(true) - .build(); - HddsProtos.ReplicationType replicationType = - HddsProtos.ReplicationType.valueOf(type.toString()); - HddsProtos.ReplicationFactor replicationFactor = - HddsProtos.ReplicationFactor.valueOf(factor.getValue()); - OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs); - for (OmKeyLocationInfo info : - keyInfo.getLatestVersionLocations().getLocationList()) { - ContainerInfo container = - storageContainerLocationClient.getContainer(info.getContainerID()); - if (!container.getReplicationFactor().equals(replicationFactor) || ( - container.getReplicationType() != replicationType)) { - return false; - } - } - return true; - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/package-info.java deleted file mode 100644 index 13d86ababc0..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.freon; -/** - * Classes related to Ozone tools tests. - */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java deleted file mode 100644 index ef49931822e..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java +++ /dev/null @@ -1,132 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import org.apache.commons.lang3.RandomStringUtils; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.*; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashMap; - -/** - * This class tests container report with DN container state info. - */ -public class TestContainerReportWithKeys { - private static final Logger LOG = LoggerFactory.getLogger( - TestContainerReportWithKeys.class); - private static MiniOzoneCluster cluster = null; - private static OzoneConfiguration conf; - private static StorageContainerManager scm; - - @Rule - public ExpectedException exception = ExpectedException.none(); - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - scm = cluster.getStorageContainerManager(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testContainerReportKeyWrite() throws Exception { - final String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - final String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - final String keyName = "key" + RandomStringUtils.randomNumeric(5); - final int keySize = 100; - - OzoneClient client = OzoneClientFactory.getClient(conf); - ObjectStore objectStore = client.getObjectStore(); - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - OzoneOutputStream key = - objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey(keyName, keySize, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); - String dataString = RandomStringUtils.randomAlphabetic(keySize); - key.write(dataString.getBytes()); - key.close(); - - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(keySize) - .setRefreshPipeline(true) - .build(); - - - OmKeyLocationInfo keyInfo = - cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() - .get(0).getBlocksLatestVersionOnly().get(0); - - - ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID()); - - LOG.info("SCM Container Info keyCount: {} usedBytes: {}", - cinfo.getNumberOfKeys(), cinfo.getUsedBytes()); - } - - - private static ContainerData getContainerData(long containerID) { - ContainerData containerData; - ContainerSet containerManager = cluster.getHddsDatanodes().get(0) - .getDatanodeStateMachine().getContainer().getContainerSet(); - containerData = - containerManager.getContainer(containerID).getContainerData(); - return containerData; - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java deleted file mode 100644 index 44a386a1f4f..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ /dev/null @@ -1,975 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.BitSet; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang.RandomStringUtils; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; -import org.apache.hadoop.hdds.scm.net.NodeSchema; -import org.apache.hadoop.hdds.scm.net.NodeSchemaManager; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.hdds.scm.server.SCMConfigurator; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneTestUtils; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.ozone.security.acl.RequestContext; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.LambdaTestUtils; - -import org.apache.hadoop.util.Time; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Assume; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.mockito.Mockito; - -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; - -import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; - -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; - -/** - * Test class for @{@link KeyManagerImpl}. - */ -public class TestKeyManagerImpl { - - private static PrefixManager prefixManager; - private static KeyManagerImpl keyManager; - private static NodeManager nodeManager; - private static StorageContainerManager scm; - private static ScmBlockLocationProtocol mockScmBlockLocationProtocol; - private static OzoneConfiguration conf; - private static OMMetadataManager metadataManager; - private static File dir; - private static long scmBlockSize; - private static final String KEY_NAME = "key1"; - private static final String BUCKET_NAME = "bucket1"; - private static final String VOLUME_NAME = "vol1"; - - @Rule - public ExpectedException exception = ExpectedException.none(); - - @BeforeClass - public static void setUp() throws Exception { - conf = new OzoneConfiguration(); - dir = GenericTestUtils.getRandomizedTestDir(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); - conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true"); - mockScmBlockLocationProtocol = Mockito.mock(ScmBlockLocationProtocol.class); - metadataManager = new OmMetadataManagerImpl(conf); - nodeManager = new MockNodeManager(true, 10); - NodeSchema[] schemas = new NodeSchema[] - {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}; - NodeSchemaManager schemaManager = NodeSchemaManager.getInstance(); - schemaManager.init(schemas, false); - NetworkTopology clusterMap = new NetworkTopologyImpl(schemaManager); - nodeManager.getAllNodes().stream().forEach(node -> { - node.setNetworkName(node.getUuidString()); - clusterMap.add(node); - }); - ((MockNodeManager)nodeManager).setNetworkTopology(clusterMap); - SCMConfigurator configurator = new SCMConfigurator(); - configurator.setScmNodeManager(nodeManager); - configurator.setNetworkTopology(clusterMap); - scm = TestUtils.getScm(conf, configurator); - scm.start(); - scm.exitSafeMode(); - scmBlockSize = (long) conf - .getStorageSize(OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, - StorageUnit.BYTES); - conf.setLong(OZONE_KEY_PREALLOCATION_BLOCKS_MAX, 10); - - keyManager = - new KeyManagerImpl(scm.getBlockProtocolServer(), metadataManager, conf, - "om1", null); - prefixManager = new PrefixManagerImpl(metadataManager, false); - - Mockito.when(mockScmBlockLocationProtocol - .allocateBlock(Mockito.anyLong(), Mockito.anyInt(), - Mockito.any(ReplicationType.class), - Mockito.any(ReplicationFactor.class), Mockito.anyString(), - Mockito.any(ExcludeList.class))).thenThrow( - new SCMException("SafeModePrecheck failed for allocateBlock", - ResultCodes.SAFE_MODE_EXCEPTION)); - createVolume(VOLUME_NAME); - createBucket(VOLUME_NAME, BUCKET_NAME); - } - - @AfterClass - public static void cleanup() throws Exception { - scm.stop(); - scm.join(); - metadataManager.stop(); - keyManager.stop(); - FileUtils.deleteDirectory(dir); - } - - @After - public void cleanupTest() throws IOException { - List fileStatuses = keyManager - .listStatus(createBuilder().setKeyName("").build(), true, "", 100000); - for (OzoneFileStatus fileStatus : fileStatuses) { - if (fileStatus.isFile()) { - keyManager.deleteKey( - createKeyArgs(fileStatus.getPath().toString().substring(1))); - } else { - keyManager.deleteKey(createKeyArgs(OzoneFSUtils - .addTrailingSlashIfNeeded( - fileStatus.getPath().toString().substring(1)))); - } - } - } - - private static void createBucket(String volumeName, String bucketName) - throws IOException { - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .build(); - - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); - } - - private static void createVolume(String volumeName) throws IOException { - OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder() - .setVolume(volumeName) - .setAdminName("bilbo") - .setOwnerName("bilbo") - .build(); - TestOMRequestUtils.addVolumeToOM(metadataManager, volumeArgs); - } - - @Test - public void allocateBlockFailureInSafeMode() throws Exception { - KeyManager keyManager1 = new KeyManagerImpl(mockScmBlockLocationProtocol, - metadataManager, conf, "om1", null); - OmKeyArgs keyArgs = createBuilder() - .setKeyName(KEY_NAME) - .build(); - - // As now openKey will allocate at least one block, even if the size - // passed is 0. So adding an entry to openKeyTable manually to test - // allocateBlock failure. - OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(0) - .setReplicationType(keyArgs.getType()) - .setReplicationFactor(keyArgs.getFactor()) - .setFileEncryptionInfo(null).build(); - metadataManager.getOpenKeyTable().put( - metadataManager.getOpenKey(VOLUME_NAME, BUCKET_NAME, KEY_NAME, 1L), - omKeyInfo); - LambdaTestUtils.intercept(OMException.class, - "SafeModePrecheck failed for allocateBlock", () -> { - keyManager1 - .allocateBlock(keyArgs, 1L, new ExcludeList()); - }); - } - - @Test - public void openKeyFailureInSafeMode() throws Exception { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - KeyManager keyManager1 = new KeyManagerImpl(mockScmBlockLocationProtocol, - metadataManager, conf, "om1", null); - OmKeyArgs keyArgs = createBuilder() - .setKeyName(KEY_NAME) - .setDataSize(1000) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(), - ALL, ALL)) - .build(); - LambdaTestUtils.intercept(OMException.class, - "SafeModePrecheck failed for allocateBlock", () -> { - keyManager1.openKey(keyArgs); - }); - } - - @Test - public void openKeyWithMultipleBlocks() throws IOException { - OmKeyArgs keyArgs = createBuilder() - .setKeyName(UUID.randomUUID().toString()) - .setDataSize(scmBlockSize * 10) - .build(); - OpenKeySession keySession = keyManager.openKey(keyArgs); - OmKeyInfo keyInfo = keySession.getKeyInfo(); - Assert.assertEquals(10, - keyInfo.getLatestVersionLocations().getLocationList().size()); - } - - @Test - public void testCreateDirectory() throws IOException { - // Create directory where the parent directory does not exist - String keyName = RandomStringUtils.randomAlphabetic(5); - OmKeyArgs keyArgs = createBuilder() - .setKeyName(keyName) - .build(); - for (int i =0; i< 5; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } - keyManager.createDirectory(keyArgs); - Path path = Paths.get(keyName); - while (path != null) { - // verify parent directories are created - Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory()); - path = path.getParent(); - } - - // make sure create directory fails where parent is a file - keyName = RandomStringUtils.randomAlphabetic(5); - keyArgs = createBuilder() - .setKeyName(keyName) - .build(); - OpenKeySession keySession = keyManager.openKey(keyArgs); - keyArgs.setLocationInfoList( - keySession.getKeyInfo().getLatestVersionLocations().getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); - for (int i =0; i< 5; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } - try { - keyManager.createDirectory(keyArgs); - Assert.fail("Creation should fail for directory."); - } catch (OMException e) { - Assert.assertEquals(e.getResult(), - OMException.ResultCodes.FILE_ALREADY_EXISTS); - } - - // create directory for root directory - keyName = ""; - keyArgs = createBuilder() - .setKeyName(keyName) - .build(); - keyManager.createDirectory(keyArgs); - Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory()); - - // create directory where parent is root - keyName = RandomStringUtils.randomAlphabetic(5); - keyArgs = createBuilder() - .setKeyName(keyName) - .build(); - keyManager.createDirectory(keyArgs); - Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory()); - } - - @Test - public void testOpenFile() throws IOException { - // create key - String keyName = RandomStringUtils.randomAlphabetic(5); - OmKeyArgs keyArgs = createBuilder() - .setKeyName(keyName) - .build(); - OpenKeySession keySession = keyManager.createFile(keyArgs, false, false); - keyArgs.setLocationInfoList( - keySession.getKeyInfo().getLatestVersionLocations().getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); - - // try to open created key with overWrite flag set to false - try { - keyManager.createFile(keyArgs, false, false); - Assert.fail("Open key should fail for non overwrite create"); - } catch (OMException ex) { - if (ex.getResult() != OMException.ResultCodes.FILE_ALREADY_EXISTS) { - throw ex; - } - } - - // create file should pass with overwrite flag set to true - keyManager.createFile(keyArgs, true, false); - - // try to create a file where parent directories do not exist and - // recursive flag is set to false - keyName = RandomStringUtils.randomAlphabetic(5); - for (int i =0; i< 5; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } - keyArgs = createBuilder() - .setKeyName(keyName) - .build(); - try { - keyManager.createFile(keyArgs, false, false); - Assert.fail("Open file should fail for non recursive write"); - } catch (OMException ex) { - if (ex.getResult() != OMException.ResultCodes.DIRECTORY_NOT_FOUND) { - throw ex; - } - } - - // file create should pass when recursive flag is set to true - keySession = keyManager.createFile(keyArgs, false, true); - keyArgs.setLocationInfoList( - keySession.getKeyInfo().getLatestVersionLocations().getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); - Assert.assertTrue(keyManager - .getFileStatus(keyArgs).isFile()); - - // try creating a file over a directory - keyArgs = createBuilder() - .setKeyName("") - .build(); - try { - keyManager.createFile(keyArgs, true, true); - Assert.fail("Open file should fail for non recursive write"); - } catch (OMException ex) { - if (ex.getResult() != OMException.ResultCodes.NOT_A_FILE) { - throw ex; - } - } - } - - @Test - public void testCheckAccessForFileKey() throws Exception { - OmKeyArgs keyArgs = createBuilder() - .setKeyName("testdir/deep/NOTICE.txt") - .build(); - OpenKeySession keySession = keyManager.createFile(keyArgs, false, true); - keyArgs.setLocationInfoList( - keySession.getKeyInfo().getLatestVersionLocations().getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); - - OzoneObj fileKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - RequestContext context = currentUserReads(); - Assert.assertTrue(keyManager.checkAccess(fileKey, context)); - - OzoneObj parentDirKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs) - .setStoreType(OzoneObj.StoreType.OZONE) - .setKeyName("testdir") - .build(); - Assert.assertTrue(keyManager.checkAccess(parentDirKey, context)); - } - - @Test - public void testCheckAccessForNonExistentKey() throws Exception { - OmKeyArgs keyArgs = createBuilder() - .setKeyName("testdir/deep/NO_SUCH_FILE.txt") - .build(); - OzoneObj nonExistentKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - OzoneTestUtils.expectOmException(OMException.ResultCodes.KEY_NOT_FOUND, - () -> keyManager.checkAccess(nonExistentKey, currentUserReads())); - } - - @Test - public void testCheckAccessForDirectoryKey() throws Exception { - OmKeyArgs keyArgs = createBuilder() - .setKeyName("some/dir") - .build(); - keyManager.createDirectory(keyArgs); - - OzoneObj dirKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - Assert.assertTrue(keyManager.checkAccess(dirKey, currentUserReads())); - } - - @Test - public void testPrefixAclOps() throws IOException { - String volumeName = "vol1"; - String bucketName = "bucket1"; - String prefix1 = "pf1/"; - - OzoneObj ozPrefix1 = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setPrefixName(prefix1) - .setResType(OzoneObj.ResourceType.PREFIX) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - - OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1", - ACLType.READ, ACCESS); - prefixManager.addAcl(ozPrefix1, ozAcl1); - - List ozAclGet = prefixManager.getAcl(ozPrefix1); - Assert.assertEquals(1, ozAclGet.size()); - Assert.assertEquals(ozAcl1, ozAclGet.get(0)); - - List acls = new ArrayList<>(); - OzoneAcl ozAcl2 = new OzoneAcl(ACLIdentityType.USER, "admin", - ACLType.ALL, ACCESS); - - BitSet rwRights = new BitSet(); - rwRights.set(IAccessAuthorizer.ACLType.WRITE.ordinal()); - rwRights.set(IAccessAuthorizer.ACLType.READ.ordinal()); - OzoneAcl ozAcl3 = new OzoneAcl(ACLIdentityType.GROUP, "dev", - rwRights, ACCESS); - - BitSet wRights = new BitSet(); - wRights.set(IAccessAuthorizer.ACLType.WRITE.ordinal()); - OzoneAcl ozAcl4 = new OzoneAcl(ACLIdentityType.GROUP, "dev", - wRights, ACCESS); - - BitSet rRights = new BitSet(); - rRights.set(IAccessAuthorizer.ACLType.READ.ordinal()); - OzoneAcl ozAcl5 = new OzoneAcl(ACLIdentityType.GROUP, "dev", - rRights, ACCESS); - - acls.add(ozAcl2); - acls.add(ozAcl3); - - prefixManager.setAcl(ozPrefix1, acls); - ozAclGet = prefixManager.getAcl(ozPrefix1); - Assert.assertEquals(2, ozAclGet.size()); - - int matchEntries = 0; - for (OzoneAcl acl : ozAclGet) { - if (acl.getType() == ACLIdentityType.GROUP) { - Assert.assertEquals(ozAcl3, acl); - matchEntries++; - } - if (acl.getType() == ACLIdentityType.USER) { - Assert.assertEquals(ozAcl2, acl); - matchEntries++; - } - } - Assert.assertEquals(2, matchEntries); - - boolean result = prefixManager.removeAcl(ozPrefix1, ozAcl4); - Assert.assertEquals(true, result); - - ozAclGet = prefixManager.getAcl(ozPrefix1); - Assert.assertEquals(2, ozAclGet.size()); - - result = prefixManager.removeAcl(ozPrefix1, ozAcl3); - Assert.assertEquals(true, result); - ozAclGet = prefixManager.getAcl(ozPrefix1); - Assert.assertEquals(1, ozAclGet.size()); - - Assert.assertEquals(ozAcl2, ozAclGet.get(0)); - - // add dev:w - prefixManager.addAcl(ozPrefix1, ozAcl4); - ozAclGet = prefixManager.getAcl(ozPrefix1); - Assert.assertEquals(2, ozAclGet.size()); - - // add dev:r and validate the acl bitset combined - prefixManager.addAcl(ozPrefix1, ozAcl5); - ozAclGet = prefixManager.getAcl(ozPrefix1); - Assert.assertEquals(2, ozAclGet.size()); - - matchEntries = 0; - for (OzoneAcl acl : ozAclGet) { - if (acl.getType() == ACLIdentityType.GROUP) { - Assert.assertEquals(ozAcl3, acl); - matchEntries++; - } - if (acl.getType() == ACLIdentityType.USER) { - Assert.assertEquals(ozAcl2, acl); - matchEntries++; - } - } - Assert.assertEquals(2, matchEntries); - } - - @Test - public void testInvalidPrefixAcl() throws IOException { - String volumeName = "vol1"; - String bucketName = "bucket1"; - String prefix1 = "pf1/"; - - // Invalid prefix not ending with "/" - String invalidPrefix = "invalid/pf"; - OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1", - ACLType.READ, ACCESS); - - OzoneObj ozInvalidPrefix = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setPrefixName(invalidPrefix) - .setResType(OzoneObj.ResourceType.PREFIX) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - - // add acl with invalid prefix name - exception.expect(OMException.class); - exception.expectMessage("Invalid prefix name"); - prefixManager.addAcl(ozInvalidPrefix, ozAcl1); - - OzoneObj ozPrefix1 = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setPrefixName(prefix1) - .setResType(OzoneObj.ResourceType.PREFIX) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - - - List ozAclGet = prefixManager.getAcl(ozPrefix1); - Assert.assertEquals(1, ozAclGet.size()); - Assert.assertEquals(ozAcl1, ozAclGet.get(0)); - - // get acl with invalid prefix name - exception.expect(OMException.class); - exception.expectMessage("Invalid prefix name"); - ozAclGet = prefixManager.getAcl(ozInvalidPrefix); - Assert.assertEquals(null, ozAcl1); - - // set acl with invalid prefix name - List ozoneAcls = new ArrayList(); - ozoneAcls.add(ozAcl1); - exception.expect(OMException.class); - exception.expectMessage("Invalid prefix name"); - prefixManager.setAcl(ozInvalidPrefix, ozoneAcls); - - // remove acl with invalid prefix name - exception.expect(OMException.class); - exception.expectMessage("Invalid prefix name"); - prefixManager.removeAcl(ozInvalidPrefix, ozAcl1); - } - - @Test - public void testLongestPrefixPath() throws IOException { - String volumeName = "vol1"; - String bucketName = "bucket1"; - String prefix1 = "pf1/pf11/pf111/pf1111/"; - String file1 = "pf1/pf11/file1"; - String file2 = "pf1/pf11/pf111/pf1111/file2"; - - OzoneObj ozPrefix1 = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setPrefixName(prefix1) - .setResType(OzoneObj.ResourceType.PREFIX) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - - OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1", - ACLType.READ, ACCESS); - prefixManager.addAcl(ozPrefix1, ozAcl1); - - OzoneObj ozFile1 = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(file1) - .setResType(OzoneObj.ResourceType.KEY) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - - List prefixInfos = - prefixManager.getLongestPrefixPath(ozFile1.getPath()); - Assert.assertEquals(5, prefixInfos.size()); - - OzoneObj ozFile2 = new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setPrefixName(file2) - .setResType(OzoneObj.ResourceType.KEY) - .setStoreType(OzoneObj.StoreType.OZONE) - .build(); - - prefixInfos = - prefixManager.getLongestPrefixPath(ozFile2.getPath()); - Assert.assertEquals(7, prefixInfos.size()); - // Only the last node has acl on it - Assert.assertEquals(ozAcl1, prefixInfos.get(6).getAcls().get(0)); - // All other nodes don't have acl value associate with it - for (int i = 0; i < 6; i++) { - Assert.assertEquals(null, prefixInfos.get(i)); - } - } - - @Test - public void testLookupFile() throws IOException { - String keyName = RandomStringUtils.randomAlphabetic(5); - OmKeyArgs keyArgs = createBuilder() - .setKeyName(keyName) - .build(); - - // lookup for a non-existent file - try { - keyManager.lookupFile(keyArgs, null); - Assert.fail("Lookup file should fail for non existent file"); - } catch (OMException ex) { - if (ex.getResult() != OMException.ResultCodes.FILE_NOT_FOUND) { - throw ex; - } - } - - // create a file - OpenKeySession keySession = keyManager.createFile(keyArgs, false, false); - keyArgs.setLocationInfoList( - keySession.getKeyInfo().getLatestVersionLocations().getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); - Assert.assertEquals(keyManager.lookupFile(keyArgs, null).getKeyName(), - keyName); - - // lookup for created file - keyArgs = createBuilder() - .setKeyName("") - .build(); - try { - keyManager.lookupFile(keyArgs, null); - Assert.fail("Lookup file should fail for a directory"); - } catch (OMException ex) { - if (ex.getResult() != OMException.ResultCodes.NOT_A_FILE) { - throw ex; - } - } - } - - private OmKeyArgs createKeyArgs(String toKeyName) throws IOException { - return createBuilder().setKeyName(toKeyName).build(); - } - - @Test - public void testLookupKeyWithLocation() throws IOException { - String keyName = RandomStringUtils.randomAlphabetic(5); - OmKeyArgs keyArgs = createBuilder() - .setKeyName(keyName) - .setSortDatanodesInPipeline(true) - .build(); - - // lookup for a non-existent key - try { - keyManager.lookupKey(keyArgs, null); - Assert.fail("Lookup key should fail for non existent key"); - } catch (OMException ex) { - if (ex.getResult() != OMException.ResultCodes.KEY_NOT_FOUND) { - throw ex; - } - } - - // create a key - OpenKeySession keySession = keyManager.createFile(keyArgs, false, false); - // randomly select 3 datanodes - List nodeList = new ArrayList<>(); - nodeList.add((DatanodeDetails)scm.getClusterMap().getNode( - 0, null, null, null, null, 0)); - nodeList.add((DatanodeDetails)scm.getClusterMap().getNode( - 1, null, null, null, null, 0)); - nodeList.add((DatanodeDetails)scm.getClusterMap().getNode( - 2, null, null, null, null, 0)); - Assume.assumeFalse(nodeList.get(0).equals(nodeList.get(1))); - Assume.assumeFalse(nodeList.get(0).equals(nodeList.get(2))); - // create a pipeline using 3 datanodes - Pipeline pipeline = scm.getPipelineManager().createPipeline( - ReplicationType.RATIS, ReplicationFactor.THREE, nodeList); - List locationInfoList = new ArrayList<>(); - locationInfoList.add( - new OmKeyLocationInfo.Builder().setPipeline(pipeline) - .setBlockID(new BlockID(1L, 1L)).build()); - keyArgs.setLocationInfoList(locationInfoList); - - keyManager.commitKey(keyArgs, keySession.getId()); - - OmKeyInfo key = keyManager.lookupKey(keyArgs, null); - Assert.assertEquals(key.getKeyName(), keyName); - List keyLocations = - key.getLatestVersionLocations().getLocationList(); - DatanodeDetails leader = - keyLocations.get(0).getPipeline().getFirstNode(); - DatanodeDetails follower1 = - keyLocations.get(0).getPipeline().getNodes().get(1); - DatanodeDetails follower2 = - keyLocations.get(0).getPipeline().getNodes().get(2); - Assert.assertNotEquals(leader, follower1); - Assert.assertNotEquals(follower1, follower2); - - // lookup key, leader as client - OmKeyInfo key1 = keyManager.lookupKey(keyArgs, leader.getIpAddress()); - Assert.assertEquals(leader, key1.getLatestVersionLocations() - .getLocationList().get(0).getPipeline().getClosestNode()); - - // lookup key, follower1 as client - OmKeyInfo key2 = keyManager.lookupKey(keyArgs, follower1.getIpAddress()); - Assert.assertEquals(follower1, key2.getLatestVersionLocations() - .getLocationList().get(0).getPipeline().getClosestNode()); - - // lookup key, follower2 as client - OmKeyInfo key3 = keyManager.lookupKey(keyArgs, follower2.getIpAddress()); - Assert.assertEquals(follower2, key3.getLatestVersionLocations() - .getLocationList().get(0).getPipeline().getClosestNode()); - - // lookup key, random node as client - OmKeyInfo key4 = keyManager.lookupKey(keyArgs, - "/d=default-drack/127.0.0.1"); - Assert.assertEquals(leader, key4.getLatestVersionLocations() - .getLocationList().get(0).getPipeline().getClosestNode()); - } - - @Test - public void testListStatus() throws IOException { - String superDir = RandomStringUtils.randomAlphabetic(5); - - int numDirectories = 5; - int numFiles = 5; - // set of directory descendants of root - Set directorySet = new TreeSet<>(); - // set of file descendants of root - Set fileSet = new TreeSet<>(); - createDepthTwoDirectory(superDir, numDirectories, numFiles, directorySet, - fileSet); - // set of all descendants of root - Set children = new TreeSet<>(directorySet); - children.addAll(fileSet); - // number of entries in the filesystem - int numEntries = directorySet.size() + fileSet.size(); - - OmKeyArgs rootDirArgs = createKeyArgs(""); - List fileStatuses = - keyManager.listStatus(rootDirArgs, true, "", 100); - // verify the number of status returned is same as number of entries - Assert.assertEquals(numEntries, fileStatuses.size()); - - fileStatuses = keyManager.listStatus(rootDirArgs, false, "", 100); - // the number of immediate children of root is 1 - Assert.assertEquals(1, fileStatuses.size()); - - // if startKey is the first descendant of the root then listStatus should - // return all the entries. - String startKey = children.iterator().next(); - fileStatuses = keyManager.listStatus(rootDirArgs, true, - startKey.substring(0, startKey.length() - 1), 100); - Assert.assertEquals(numEntries, fileStatuses.size()); - - for (String directory : directorySet) { - // verify status list received for each directory with recursive flag set - // to false - OmKeyArgs dirArgs = createKeyArgs(directory); - fileStatuses = keyManager.listStatus(dirArgs, false, "", 100); - verifyFileStatus(directory, fileStatuses, directorySet, fileSet, false); - - // verify status list received for each directory with recursive flag set - // to true - fileStatuses = keyManager.listStatus(dirArgs, true, "", 100); - verifyFileStatus(directory, fileStatuses, directorySet, fileSet, true); - - // verify list status call with using the startKey parameter and - // recursive flag set to false. After every call to listStatus use the - // latest received file status as the startKey until no more entries are - // left to list. - List tempFileStatus = null; - Set tmpStatusSet = new HashSet<>(); - do { - tempFileStatus = keyManager.listStatus(dirArgs, false, - tempFileStatus != null ? OzoneFSUtils.pathToKey( - tempFileStatus.get(tempFileStatus.size() - 1).getPath()) : null, - 2); - tmpStatusSet.addAll(tempFileStatus); - } while (tempFileStatus.size() == 2); - verifyFileStatus(directory, new ArrayList<>(tmpStatusSet), directorySet, - fileSet, false); - - // verify list status call with using the startKey parameter and - // recursive flag set to true. After every call to listStatus use the - // latest received file status as the startKey until no more entries are - // left to list. - tempFileStatus = null; - tmpStatusSet = new HashSet<>(); - do { - tempFileStatus = keyManager.listStatus(dirArgs, true, - tempFileStatus != null ? OzoneFSUtils.pathToKey( - tempFileStatus.get(tempFileStatus.size() - 1).getPath()) : null, - 2); - tmpStatusSet.addAll(tempFileStatus); - } while (tempFileStatus.size() == 2); - verifyFileStatus(directory, new ArrayList<>(tmpStatusSet), directorySet, - fileSet, true); - } - } - - /** - * Creates a depth two directory. - * - * @param superDir Super directory to create - * @param numDirectories number of directory children - * @param numFiles number of file children - * @param directorySet set of descendant directories for the super directory - * @param fileSet set of descendant files for the super directory - */ - private void createDepthTwoDirectory(String superDir, int numDirectories, - int numFiles, Set directorySet, Set fileSet) - throws IOException { - // create super directory - OmKeyArgs superDirArgs = createKeyArgs(superDir); - keyManager.createDirectory(superDirArgs); - directorySet.add(superDir); - - // add directory children to super directory - Set childDirectories = - createDirectories(superDir, new HashMap<>(), numDirectories); - directorySet.addAll(childDirectories); - // add file to super directory - fileSet.addAll(createFiles(superDir, new HashMap<>(), numFiles)); - - // for each child directory create files and directories - for (String child : childDirectories) { - fileSet.addAll(createFiles(child, new HashMap<>(), numFiles)); - directorySet - .addAll(createDirectories(child, new HashMap<>(), numDirectories)); - } - } - - private void verifyFileStatus(String directory, - List fileStatuses, Set directorySet, - Set fileSet, boolean recursive) { - - for (OzoneFileStatus fileStatus : fileStatuses) { - String keyName = OzoneFSUtils.pathToKey(fileStatus.getPath()); - String parent = Paths.get(keyName).getParent().toString(); - if (!recursive) { - // if recursive is false, verify all the statuses have the input - // directory as parent - Assert.assertEquals(parent, directory); - } - // verify filestatus is present in directory or file set accordingly - if (fileStatus.isDirectory()) { - Assert.assertTrue(directorySet.contains(keyName)); - } else { - Assert.assertTrue(fileSet.contains(keyName)); - } - } - - // count the number of entries which should be present in the directory - int numEntries = 0; - Set entrySet = new TreeSet<>(directorySet); - entrySet.addAll(fileSet); - for (String entry : entrySet) { - if (OzoneFSUtils.getParent(entry) - .startsWith(OzoneFSUtils.addTrailingSlashIfNeeded(directory))) { - if (recursive) { - numEntries++; - } else if (OzoneFSUtils.getParent(entry) - .equals(OzoneFSUtils.addTrailingSlashIfNeeded(directory))) { - numEntries++; - } - } - } - // verify the number of entries match the status list size - Assert.assertEquals(fileStatuses.size(), numEntries); - } - - private Set createDirectories(String parent, - Map> directoryMap, int numDirectories) - throws IOException { - Set keyNames = new TreeSet<>(); - for (int i = 0; i < numDirectories; i++) { - String keyName = parent + "/" + RandomStringUtils.randomAlphabetic(5); - OmKeyArgs keyArgs = createBuilder().setKeyName(keyName).build(); - keyManager.createDirectory(keyArgs); - keyNames.add(keyName); - } - directoryMap.put(parent, new ArrayList<>(keyNames)); - return keyNames; - } - - private List createFiles(String parent, - Map> fileMap, int numFiles) throws IOException { - List keyNames = new ArrayList<>(); - for (int i = 0; i < numFiles; i++) { - String keyName = parent + "/" + RandomStringUtils.randomAlphabetic(5); - OmKeyArgs keyArgs = createBuilder().setKeyName(keyName).build(); - OpenKeySession keySession = keyManager.createFile(keyArgs, false, false); - keyArgs.setLocationInfoList( - keySession.getKeyInfo().getLatestVersionLocations() - .getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); - keyNames.add(keyName); - } - fileMap.put(parent, keyNames); - return keyNames; - } - - private OmKeyArgs.Builder createBuilder() throws IOException { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - return new OmKeyArgs.Builder() - .setBucketName(BUCKET_NAME) - .setFactor(ReplicationFactor.ONE) - .setDataSize(0) - .setType(ReplicationType.STAND_ALONE) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(), - ALL, ALL)) - .setVolumeName(VOLUME_NAME); - } - - private RequestContext currentUserReads() throws IOException { - return RequestContext.newBuilder() - .setClientUgi(UserGroupInformation.getCurrentUser()) - .setAclRights(ACLType.READ_ACL) - .setAclType(ACLIdentityType.USER) - .build(); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java deleted file mode 100644 index 732fb3445ad..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java +++ /dev/null @@ -1,138 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; - -/** - * Test OM's {@link KeyDeletingService}. - */ -public class TestKeyPurging { - - private static MiniOzoneCluster cluster; - private static ObjectStore store; - private static OzoneManager om; - - private static final int NUM_KEYS = 10; - private static final int KEY_SIZE = 100; - - @Before - public void setup() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - conf.setQuietMode(false); - - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1) - .setHbInterval(200) - .build(); - cluster.waitForClusterToBeReady(); - store = OzoneClientFactory.getRpcClient(conf).getObjectStore(); - om = cluster.getOzoneManager(); - } - - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test(timeout = 30000) - public void testKeysPurgingByKeyDeletingService() throws Exception { - // Create Volume and Bucket - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - - // Create some keys and write data into them - String keyBase = UUID.randomUUID().toString(); - String keyString = UUID.randomUUID().toString(); - byte[] data = ContainerTestHelper.getFixedLengthString( - keyString, KEY_SIZE).getBytes(UTF_8); - List keys = new ArrayList<>(NUM_KEYS); - for (int i = 1; i <= NUM_KEYS; i++) { - String keyName = keyBase + "-" + i; - keys.add(keyName); - OzoneOutputStream keyStream = ContainerTestHelper.createKey( - keyName, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, - KEY_SIZE, store, volumeName, bucketName); - keyStream.write(data); - keyStream.close(); - } - - // Delete created keys - for (String key : keys) { - bucket.deleteKey(key); - } - - // Verify that KeyDeletingService picks up deleted keys and purges them - // from DB. - KeyManager keyManager = om.getKeyManager(); - KeyDeletingService keyDeletingService = - (KeyDeletingService) keyManager.getDeletingService(); - - GenericTestUtils.waitFor( - () -> keyDeletingService.getDeletedKeyCount().get() >= NUM_KEYS, - 1000, 10000); - - Assert.assertTrue(keyDeletingService.getRunCount().get() > 1); - - GenericTestUtils.waitFor( - () -> { - try { - return keyManager.getPendingDeletionKeys(Integer.MAX_VALUE) - .size() == 0; - } catch (IOException e) { - return false; - } - }, 1000, 10000); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java deleted file mode 100644 index 3cba9b3effa..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java +++ /dev/null @@ -1,172 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys. - OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.util.UUID; - -import javax.servlet.ServletContext; -import javax.servlet.ServletException; -import javax.servlet.ServletOutputStream; -import javax.servlet.WriteListener; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.mockito.Matchers; - -import static org.apache.hadoop.ozone.OzoneConsts. - OZONE_DB_CHECKPOINT_REQUEST_FLUSH; -import static org.mockito.Mockito.doCallRealMethod; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -/** - * Class used for testing the OM DB Checkpoint provider servlet. - */ -public class TestOMDbCheckpointServlet { - private MiniOzoneCluster cluster = null; - private OMMetrics omMetrics; - private OzoneConfiguration conf; - private String clusterId; - private String scmId; - private String omId; - - @Rule - public Timeout timeout = new Timeout(60000); - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); - conf.setBoolean(OZONE_ACL_ENABLED, true); - conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2); - cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOmId(omId) - .build(); - cluster.waitForClusterToBeReady(); - omMetrics = cluster.getOzoneManager().getMetrics(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testDoGet() throws ServletException, IOException { - - File tempFile = null; - try { - OMDBCheckpointServlet omDbCheckpointServletMock = - mock(OMDBCheckpointServlet.class); - - doCallRealMethod().when(omDbCheckpointServletMock).init(); - - HttpServletRequest requestMock = mock(HttpServletRequest.class); - HttpServletResponse responseMock = mock(HttpServletResponse.class); - - ServletContext servletContextMock = mock(ServletContext.class); - when(omDbCheckpointServletMock.getServletContext()) - .thenReturn(servletContextMock); - - when(servletContextMock.getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE)) - .thenReturn(cluster.getOzoneManager()); - when(requestMock.getParameter(OZONE_DB_CHECKPOINT_REQUEST_FLUSH)) - .thenReturn("true"); - doNothing().when(responseMock).setContentType("application/x-tgz"); - doNothing().when(responseMock).setHeader(Matchers.anyString(), - Matchers.anyString()); - - tempFile = File.createTempFile("testDoGet_" + System - .currentTimeMillis(), ".tar.gz"); - - FileOutputStream fileOutputStream = new FileOutputStream(tempFile); - when(responseMock.getOutputStream()).thenReturn( - new ServletOutputStream() { - @Override - public boolean isReady() { - return true; - } - - @Override - public void setWriteListener(WriteListener writeListener) { - } - - @Override - public void write(int b) throws IOException { - fileOutputStream.write(b); - } - }); - - doCallRealMethod().when(omDbCheckpointServletMock).doGet(requestMock, - responseMock); - - omDbCheckpointServletMock.init(); - - Assert.assertTrue( - omMetrics.getLastCheckpointCreationTimeTaken() == 0); - Assert.assertTrue( - omMetrics.getLastCheckpointStreamingTimeTaken() == 0); - - omDbCheckpointServletMock.doGet(requestMock, responseMock); - - Assert.assertTrue(tempFile.length() > 0); - Assert.assertTrue( - omMetrics.getLastCheckpointCreationTimeTaken() > 0); - Assert.assertTrue( - omMetrics.getLastCheckpointStreamingTimeTaken() > 0); - } finally { - FileUtils.deleteQuietly(tempFile); - } - - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java deleted file mode 100644 index 901dbe9493e..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ /dev/null @@ -1,191 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; -import org.apache.hadoop.hdds.utils.db.DBCheckpoint; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.Timeout; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -import static org.apache.hadoop.ozone.om.TestOzoneManagerHA.createKey; - -/** - * Tests the Ratis snaphsots feature in OM. - */ -public class TestOMRatisSnapshots { - - private MiniOzoneHAClusterImpl cluster = null; - private ObjectStore objectStore; - private OzoneConfiguration conf; - private String clusterId; - private String scmId; - private String omServiceId; - private int numOfOMs = 3; - private static final long SNAPSHOT_THRESHOLD = 50; - private static final int LOG_PURGE_GAP = 50; - - @Rule - public ExpectedException exception = ExpectedException.none(); - - @Rule - public Timeout timeout = new Timeout(500_000); - - /** - * Create a MiniOzoneCluster for testing. The cluster initially has one - * inactive OM. So at the start of the cluster, there will be 2 active and 1 - * inactive OM. - * - * @throws IOException - */ - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omServiceId = "om-service-test1"; - conf.setLong( - OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY, - SNAPSHOT_THRESHOLD); - conf.setInt(OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, LOG_PURGE_GAP); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOMServiceId("om-service-test1") - .setNumOfOzoneManagers(numOfOMs) - .setNumOfActiveOMs(2) - .build(); - cluster.waitForClusterToBeReady(); - objectStore = OzoneClientFactory.getRpcClient(omServiceId, conf) - .getObjectStore(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testInstallSnapshot() throws Exception { - // Get the leader OM - String leaderOMNodeId = objectStore.getClientProxy().getOMProxyProvider() - .getCurrentProxyOMNodeId(); - OzoneManager leaderOM = cluster.getOzoneManager(leaderOMNodeId); - OzoneManagerRatisServer leaderRatisServer = leaderOM.getOmRatisServer(); - - // Find the inactive OM - String followerNodeId = leaderOM.getPeerNodes().get(0).getOMNodeId(); - if (cluster.isOMActive(followerNodeId)) { - followerNodeId = leaderOM.getPeerNodes().get(1).getOMNodeId(); - } - OzoneManager followerOM = cluster.getOzoneManager(followerNodeId); - - // Do some transactions so that the log index increases - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() - .setOwner(userName) - .setAdmin(adminName) - .build(); - - objectStore.createVolume(volumeName, createVolumeArgs); - OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); - - retVolumeinfo.createBucket(bucketName); - OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName); - - long leaderOMappliedLogIndex = - leaderRatisServer.getStateMachineLastAppliedIndex(); - - List keys = new ArrayList<>(); - while (leaderOMappliedLogIndex < 2000) { - keys.add(createKey(ozoneBucket)); - leaderOMappliedLogIndex = - leaderRatisServer.getStateMachineLastAppliedIndex(); - } - - // Get the latest db checkpoint from the leader OM. - long leaderOMSnaphsotIndex = leaderOM.saveRatisSnapshot(); - DBCheckpoint leaderDbCheckpoint = - leaderOM.getMetadataManager().getStore().getCheckpoint(false); - - // Start the inactive OM - cluster.startInactiveOM(followerNodeId); - - // The recently started OM should be lagging behind the leader OM. - long followerOMLastAppliedIndex = - followerOM.getOmRatisServer().getStateMachineLastAppliedIndex(); - Assert.assertTrue( - followerOMLastAppliedIndex < leaderOMSnaphsotIndex); - - // Install leader OM's db checkpoint on the lagging OM. - followerOM.getOmRatisServer().getOmStateMachine().pause(); - followerOM.getMetadataManager().getStore().close(); - followerOM.replaceOMDBWithCheckpoint( - leaderOMSnaphsotIndex, leaderDbCheckpoint.getCheckpointLocation()); - - // Reload the follower OM with new DB checkpoint from the leader OM. - followerOM.reloadOMState(leaderOMSnaphsotIndex); - followerOM.getOmRatisServer().getOmStateMachine().unpause( - leaderOMSnaphsotIndex); - - // After the new checkpoint is loaded and state machine is unpaused, the - // follower OM lastAppliedIndex must match the snapshot index of the - // checkpoint. - followerOMLastAppliedIndex = followerOM.getOmRatisServer() - .getStateMachineLastAppliedIndex(); - Assert.assertEquals(leaderOMSnaphsotIndex, followerOMLastAppliedIndex); - - // Verify that the follower OM's DB contains the transactions which were - // made while it was inactive. - OMMetadataManager followerOMMetaMngr = followerOM.getMetadataManager(); - Assert.assertNotNull(followerOMMetaMngr.getVolumeTable().get( - followerOMMetaMngr.getVolumeKey(volumeName))); - Assert.assertNotNull(followerOMMetaMngr.getBucketTable().get( - followerOMMetaMngr.getBucketKey(volumeName, bucketName))); - for (String key : keys) { - Assert.assertNotNull(followerOMMetaMngr.getKeyTable().get( - followerOMMetaMngr.getOzoneKey(volumeName, bucketName, key))); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java deleted file mode 100644 index c75e365f415..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java +++ /dev/null @@ -1,151 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import java.util.UUID; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneTestUtils; -import org.apache.hadoop.ozone.TestDataUtil; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.IOzoneObj; -import org.apache.hadoop.ozone.security.acl.RequestContext; -import org.apache.hadoop.test.GenericTestUtils; - -import org.apache.commons.lang3.RandomStringUtils; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; -import org.junit.AfterClass; -import static org.junit.Assert.assertTrue; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -/** - * Test for Ozone Manager ACLs. - */ -public class TestOmAcls { - - private static boolean aclAllow = true; - private static MiniOzoneCluster cluster = null; - private static OMMetrics omMetrics; - private static OzoneConfiguration conf; - private static String clusterId; - private static String scmId; - private static String omId; - private static GenericTestUtils.LogCapturer logCapturer; - - @Rule - public ExpectedException exception = ExpectedException.none(); - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); - conf.setBoolean(OZONE_ACL_ENABLED, true); - conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2); - conf.setClass(OZONE_ACL_AUTHORIZER_CLASS, OzoneAccessAuthorizerTest.class, - IAccessAuthorizer.class); - conf.setStrings(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD); - cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOmId(omId) - .build(); - cluster.waitForClusterToBeReady(); - omMetrics = cluster.getOzoneManager().getMetrics(); - logCapturer = - GenericTestUtils.LogCapturer.captureLogs(OzoneManager.getLogger()); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - /** - * Tests the OM Initialization. - */ - - @Test - public void testBucketCreationPermissionDenied() throws Exception { - - TestOmAcls.aclAllow = true; - - String volumeName = RandomStringUtils.randomAlphabetic(5).toLowerCase(); - String bucketName = RandomStringUtils.randomAlphabetic(5).toLowerCase(); - cluster.getClient().getObjectStore().createVolume(volumeName); - OzoneVolume volume = - cluster.getClient().getObjectStore().getVolume(volumeName); - - TestOmAcls.aclAllow = false; - OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED, - () -> volume.createBucket(bucketName)); - - assertTrue(logCapturer.getOutput() - .contains("doesn't have CREATE permission to access volume")); - } - - @Test - public void testFailureInKeyOp() throws Exception { - final VolumeArgs createVolumeArgs; - - TestOmAcls.aclAllow = true; - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); - logCapturer.clearOutput(); - - TestOmAcls.aclAllow = false; - - OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED, - () -> TestDataUtil.createKey(bucket, "testKey", "testcontent")); - assertTrue(logCapturer.getOutput().contains("doesn't have WRITE " + - "permission to access bucket")); - } - - /** - * Test implementation to negative case. - */ - static class OzoneAccessAuthorizerTest implements IAccessAuthorizer { - - @Override - public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) { - return TestOmAcls.aclAllow; - } - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java deleted file mode 100644 index ff1cf039e0c..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java +++ /dev/null @@ -1,222 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.TestDataUtil; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.security.UserGroupInformation; - -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -/** - * This class tests the versioning of blocks from OM side. - */ -public class TestOmBlockVersioning { - private static MiniOzoneCluster cluster = null; - private static OzoneConfiguration conf; - private static OzoneManager ozoneManager; - - @Rule - public ExpectedException exception = ExpectedException.none(); - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - ozoneManager = cluster.getOzoneManager(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testAllocateCommit() throws Exception { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - String userName = ugi.getUserName(); - String adminName = ugi.getUserName(); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - - TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName); - - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setDataSize(1000) - .setRefreshPipeline(true) - .setAcls(new ArrayList<>()) - .build(); - - // 1st update, version 0 - OpenKeySession openKey = ozoneManager.openKey(keyArgs); - // explicitly set the keyLocation list before committing the key. - keyArgs.setLocationInfoList(openKey.getKeyInfo().getLatestVersionLocations() - .getBlocksLatestVersionOnly()); - ozoneManager.commitKey(keyArgs, openKey.getId()); - - OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs); - OmKeyLocationInfoGroup highestVersion = - checkVersions(keyInfo.getKeyLocationVersions()); - assertEquals(0, highestVersion.getVersion()); - assertEquals(1, highestVersion.getLocationList().size()); - - // 2nd update, version 1 - openKey = ozoneManager.openKey(keyArgs); - //OmKeyLocationInfo locationInfo = - // ozoneManager.allocateBlock(keyArgs, openKey.getId()); - // explicitly set the keyLocation list before committing the key. - keyArgs.setLocationInfoList(openKey.getKeyInfo().getLatestVersionLocations() - .getBlocksLatestVersionOnly()); - ozoneManager.commitKey(keyArgs, openKey.getId()); - - keyInfo = ozoneManager.lookupKey(keyArgs); - highestVersion = checkVersions(keyInfo.getKeyLocationVersions()); - assertEquals(1, highestVersion.getVersion()); - assertEquals(2, highestVersion.getLocationList().size()); - - // 3rd update, version 2 - openKey = ozoneManager.openKey(keyArgs); - - // this block will be appended to the latest version of version 2. - OmKeyLocationInfo locationInfo = - ozoneManager.allocateBlock(keyArgs, openKey.getId(), - new ExcludeList()); - List locationInfoList = - openKey.getKeyInfo().getLatestVersionLocations() - .getBlocksLatestVersionOnly(); - Assert.assertTrue(locationInfoList.size() == 1); - locationInfoList.add(locationInfo); - keyArgs.setLocationInfoList(locationInfoList); - ozoneManager.commitKey(keyArgs, openKey.getId()); - - keyInfo = ozoneManager.lookupKey(keyArgs); - highestVersion = checkVersions(keyInfo.getKeyLocationVersions()); - assertEquals(2, highestVersion.getVersion()); - assertEquals(4, highestVersion.getLocationList().size()); - } - - private OmKeyLocationInfoGroup checkVersions( - List versions) { - OmKeyLocationInfoGroup currentVersion = null; - for (OmKeyLocationInfoGroup version : versions) { - if (currentVersion != null) { - assertEquals(currentVersion.getVersion() + 1, version.getVersion()); - for (OmKeyLocationInfo info : currentVersion.getLocationList()) { - boolean found = false; - // all the blocks from the previous version must present in the next - // version - for (OmKeyLocationInfo info2 : version.getLocationList()) { - if (info.getLocalID() == info2.getLocalID()) { - found = true; - break; - } - } - assertTrue(found); - } - } - currentVersion = version; - } - return currentVersion; - } - - @Test - public void testReadLatestVersion() throws Exception { - - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - - OzoneBucket bucket = - TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName); - - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setDataSize(1000) - .setRefreshPipeline(true) - .build(); - - String dataString = RandomStringUtils.randomAlphabetic(100); - - TestDataUtil.createKey(bucket, keyName, dataString); - assertEquals(dataString, TestDataUtil.getKey(bucket, keyName)); - OmKeyInfo keyInfo = ozoneManager.lookupKey(omKeyArgs); - assertEquals(0, keyInfo.getLatestVersionLocations().getVersion()); - assertEquals(1, - keyInfo.getLatestVersionLocations().getLocationList().size()); - - // this write will create 2nd version, 2nd version will contain block from - // version 1, and add a new block - TestDataUtil.createKey(bucket, keyName, dataString); - - - keyInfo = ozoneManager.lookupKey(omKeyArgs); - assertEquals(dataString, TestDataUtil.getKey(bucket, keyName)); - assertEquals(1, keyInfo.getLatestVersionLocations().getVersion()); - assertEquals(2, - keyInfo.getLatestVersionLocations().getLocationList().size()); - - dataString = RandomStringUtils.randomAlphabetic(200); - TestDataUtil.createKey(bucket, keyName, dataString); - - keyInfo = ozoneManager.lookupKey(omKeyArgs); - assertEquals(dataString, TestDataUtil.getKey(bucket, keyName)); - assertEquals(2, keyInfo.getLatestVersionLocations().getVersion()); - assertEquals(3, - keyInfo.getLatestVersionLocations().getLocationList().size()); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java deleted file mode 100644 index de42fdccc03..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import java.io.IOException; -import java.util.UUID; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.security.authentication.client.AuthenticationException; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -/** - * Test Ozone Manager Init. - */ -public class TestOmInit { - private static MiniOzoneCluster cluster = null; - private static OMMetrics omMetrics; - private static OzoneConfiguration conf; - private static String clusterId; - private static String scmId; - private static String omId; - - @Rule - public ExpectedException exception = ExpectedException.none(); - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); - conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2); - cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOmId(omId) - .build(); - cluster.waitForClusterToBeReady(); - omMetrics = cluster.getOzoneManager().getMetrics(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - - /** - * Tests the OM Initialization. - * @throws IOException, AuthenticationException - */ - @Test - public void testOmInitAgain() throws IOException, - AuthenticationException { - // Stop the Ozone Manager - cluster.getOzoneManager().stop(); - // Now try to init the OM again. It should succeed - Assert.assertTrue(OzoneManager.omInit(conf)); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java deleted file mode 100644 index e079974ada8..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ /dev/null @@ -1,443 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import static org.apache.hadoop.test.MetricsAsserts.assertCounter; -import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyLong; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.test.MetricsAsserts; -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.mockito.Mockito; - -/** - * Test for OM metrics. - */ -@SuppressWarnings("deprecation") -public class TestOmMetrics { - private MiniOzoneCluster cluster; - private OzoneManager ozoneManager; - - /** - * The exception used for testing failure metrics. - */ - private IOException exception = new IOException(); - - /** - * Create a MiniDFSCluster for testing. - * - * @throws IOException - */ - @Before - public void setup() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL, - 1000, TimeUnit.MILLISECONDS); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - ozoneManager = cluster.getOzoneManager(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - - - @Test - public void testVolumeOps() throws IOException { - VolumeManager volumeManager = - (VolumeManager) HddsWhiteboxTestUtils.getInternalState( - ozoneManager, "volumeManager"); - VolumeManager mockVm = Mockito.spy(volumeManager); - - Mockito.doNothing().when(mockVm).createVolume(null); - Mockito.doNothing().when(mockVm).deleteVolume(null); - Mockito.doReturn(null).when(mockVm).getVolumeInfo(null); - Mockito.doReturn(true).when(mockVm).checkVolumeAccess(null, null); - Mockito.doNothing().when(mockVm).setOwner(null, null); - Mockito.doReturn(null).when(mockVm).listVolumes(null, null, null, 0); - - HddsWhiteboxTestUtils.setInternalState( - ozoneManager, "volumeManager", mockVm); - doVolumeOps(); - - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - assertCounter("NumVolumeOps", 6L, omMetrics); - assertCounter("NumVolumeCreates", 1L, omMetrics); - assertCounter("NumVolumeUpdates", 1L, omMetrics); - assertCounter("NumVolumeInfos", 1L, omMetrics); - assertCounter("NumVolumeCheckAccesses", 1L, omMetrics); - assertCounter("NumVolumeDeletes", 1L, omMetrics); - assertCounter("NumVolumeLists", 1L, omMetrics); - assertCounter("NumVolumes", 0L, omMetrics); - - ozoneManager.createVolume(null); - ozoneManager.createVolume(null); - ozoneManager.createVolume(null); - ozoneManager.deleteVolume(null); - - omMetrics = getMetrics("OMMetrics"); - assertCounter("NumVolumes", 2L, omMetrics); - - - // inject exception to test for Failure Metrics - Mockito.doThrow(exception).when(mockVm).createVolume(null); - Mockito.doThrow(exception).when(mockVm).deleteVolume(null); - Mockito.doThrow(exception).when(mockVm).getVolumeInfo(null); - Mockito.doThrow(exception).when(mockVm).checkVolumeAccess(null, null); - Mockito.doThrow(exception).when(mockVm).setOwner(null, null); - Mockito.doThrow(exception).when(mockVm).listVolumes(null, null, null, 0); - - HddsWhiteboxTestUtils.setInternalState(ozoneManager, - "volumeManager", mockVm); - doVolumeOps(); - - omMetrics = getMetrics("OMMetrics"); - assertCounter("NumVolumeOps", 16L, omMetrics); - assertCounter("NumVolumeCreates", 5L, omMetrics); - assertCounter("NumVolumeUpdates", 2L, omMetrics); - assertCounter("NumVolumeInfos", 2L, omMetrics); - assertCounter("NumVolumeCheckAccesses", 2L, omMetrics); - assertCounter("NumVolumeDeletes", 3L, omMetrics); - assertCounter("NumVolumeLists", 2L, omMetrics); - - assertCounter("NumVolumeCreateFails", 1L, omMetrics); - assertCounter("NumVolumeUpdateFails", 1L, omMetrics); - assertCounter("NumVolumeInfoFails", 1L, omMetrics); - assertCounter("NumVolumeCheckAccessFails", 1L, omMetrics); - assertCounter("NumVolumeDeleteFails", 1L, omMetrics); - assertCounter("NumVolumeListFails", 1L, omMetrics); - - // As last call for volumesOps does not increment numVolumes as those are - // failed. - assertCounter("NumVolumes", 2L, omMetrics); - - cluster.restartOzoneManager(); - assertCounter("NumVolumes", 2L, omMetrics); - - - } - - @Test - @Ignore("Test failing because of table cache. Revisit later.") - public void testBucketOps() throws IOException { - BucketManager bucketManager = - (BucketManager) HddsWhiteboxTestUtils.getInternalState( - ozoneManager, "bucketManager"); - BucketManager mockBm = Mockito.spy(bucketManager); - - S3BucketManager s3BucketManager = - (S3BucketManager) HddsWhiteboxTestUtils.getInternalState( - ozoneManager, "s3BucketManager"); - S3BucketManager mockS3Bm = Mockito.spy(s3BucketManager); - - Mockito.doNothing().when(mockS3Bm).createS3Bucket("random", "random"); - Mockito.doNothing().when(mockS3Bm).deleteS3Bucket("random"); - Mockito.doReturn(true).when(mockS3Bm).createOzoneVolumeIfNeeded(null); - - Mockito.doNothing().when(mockBm).createBucket(null); - Mockito.doNothing().when(mockBm).createBucket(null); - Mockito.doNothing().when(mockBm).deleteBucket(null, null); - Mockito.doReturn(null).when(mockBm).getBucketInfo(null, null); - Mockito.doNothing().when(mockBm).setBucketProperty(null); - Mockito.doReturn(null).when(mockBm).listBuckets(null, null, null, 0); - - HddsWhiteboxTestUtils.setInternalState( - ozoneManager, "bucketManager", mockBm); - doBucketOps(); - - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - assertCounter("NumBucketOps", 5L, omMetrics); - assertCounter("NumBucketCreates", 1L, omMetrics); - assertCounter("NumBucketUpdates", 1L, omMetrics); - assertCounter("NumBucketInfos", 1L, omMetrics); - assertCounter("NumBucketDeletes", 1L, omMetrics); - assertCounter("NumBucketLists", 1L, omMetrics); - assertCounter("NumBuckets", 0L, omMetrics); - - ozoneManager.createBucket(null); - ozoneManager.createBucket(null); - ozoneManager.createBucket(null); - ozoneManager.deleteBucket(null, null); - - //Taking already existing value, as the same metrics is used over all the - // test cases. - long numVolumesOps = MetricsAsserts.getLongCounter("NumVolumeOps", - omMetrics); - long numVolumes = MetricsAsserts.getLongCounter("NumVolumes", - omMetrics); - long numVolumeCreates = MetricsAsserts.getLongCounter("NumVolumeCreates", - omMetrics); - - ozoneManager.createS3Bucket("random", "random"); - ozoneManager.createS3Bucket("random1", "random1"); - ozoneManager.createS3Bucket("random2", "random2"); - ozoneManager.deleteS3Bucket("random"); - - omMetrics = getMetrics("OMMetrics"); - assertCounter("NumBuckets", 4L, omMetrics); - - assertCounter("NumVolumeOps", numVolumesOps + 3, omMetrics); - assertCounter("NumVolumeCreates", numVolumeCreates + 3, omMetrics); - assertCounter("NumVolumes", numVolumes + 3, omMetrics); - - - - // inject exception to test for Failure Metrics - Mockito.doThrow(exception).when(mockBm).createBucket(null); - Mockito.doThrow(exception).when(mockBm).deleteBucket(null, null); - Mockito.doThrow(exception).when(mockBm).getBucketInfo(null, null); - Mockito.doThrow(exception).when(mockBm).setBucketProperty(null); - Mockito.doThrow(exception).when(mockBm).listBuckets(null, null, null, 0); - - HddsWhiteboxTestUtils.setInternalState( - ozoneManager, "bucketManager", mockBm); - doBucketOps(); - - omMetrics = getMetrics("OMMetrics"); - assertCounter("NumBucketOps", 18L, omMetrics); - assertCounter("NumBucketCreates", 8L, omMetrics); - assertCounter("NumBucketUpdates", 2L, omMetrics); - assertCounter("NumBucketInfos", 2L, omMetrics); - assertCounter("NumBucketDeletes", 4L, omMetrics); - assertCounter("NumBucketLists", 2L, omMetrics); - - assertCounter("NumBucketCreateFails", 1L, omMetrics); - assertCounter("NumBucketUpdateFails", 1L, omMetrics); - assertCounter("NumBucketInfoFails", 1L, omMetrics); - assertCounter("NumBucketDeleteFails", 1L, omMetrics); - assertCounter("NumBucketListFails", 1L, omMetrics); - - assertCounter("NumBuckets", 4L, omMetrics); - - cluster.restartOzoneManager(); - assertCounter("NumBuckets", 4L, omMetrics); - } - - @Test - public void testKeyOps() throws IOException { - KeyManager keyManager = (KeyManager) HddsWhiteboxTestUtils - .getInternalState(ozoneManager, "keyManager"); - KeyManager mockKm = Mockito.spy(keyManager); - - Mockito.doReturn(null).when(mockKm).openKey(null); - Mockito.doNothing().when(mockKm).deleteKey(null); - Mockito.doReturn(null).when(mockKm).lookupKey(null, ""); - Mockito.doReturn(null).when(mockKm).listKeys(null, null, null, null, 0); - Mockito.doNothing().when(mockKm).commitKey(any(OmKeyArgs.class), anyLong()); - Mockito.doReturn(null).when(mockKm).initiateMultipartUpload( - any(OmKeyArgs.class)); - - HddsWhiteboxTestUtils.setInternalState( - ozoneManager, "keyManager", mockKm); - doKeyOps(); - - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeyOps", 6L, omMetrics); - assertCounter("NumKeyAllocate", 1L, omMetrics); - assertCounter("NumKeyLookup", 1L, omMetrics); - assertCounter("NumKeyDeletes", 1L, omMetrics); - assertCounter("NumKeyLists", 1L, omMetrics); - assertCounter("NumKeys", 0L, omMetrics); - assertCounter("NumInitiateMultipartUploads", 1L, omMetrics); - - - ozoneManager.openKey(null); - ozoneManager.commitKey(createKeyArgs(), 0); - ozoneManager.openKey(null); - ozoneManager.commitKey(createKeyArgs(), 0); - ozoneManager.openKey(null); - ozoneManager.commitKey(createKeyArgs(), 0); - ozoneManager.deleteKey(null); - - - omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 2L, omMetrics); - - // inject exception to test for Failure Metrics - Mockito.doThrow(exception).when(mockKm).openKey(null); - Mockito.doThrow(exception).when(mockKm).deleteKey(null); - Mockito.doThrow(exception).when(mockKm).lookupKey(null, ""); - Mockito.doThrow(exception).when(mockKm).listKeys( - null, null, null, null, 0); - Mockito.doThrow(exception).when(mockKm).commitKey(any(OmKeyArgs.class), - anyLong()); - Mockito.doThrow(exception).when(mockKm).initiateMultipartUpload( - any(OmKeyArgs.class)); - - HddsWhiteboxTestUtils.setInternalState( - ozoneManager, "keyManager", mockKm); - doKeyOps(); - - omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeyOps", 19L, omMetrics); - assertCounter("NumKeyAllocate", 5L, omMetrics); - assertCounter("NumKeyLookup", 2L, omMetrics); - assertCounter("NumKeyDeletes", 3L, omMetrics); - assertCounter("NumKeyLists", 2L, omMetrics); - assertCounter("NumInitiateMultipartUploads", 2L, omMetrics); - - assertCounter("NumKeyAllocateFails", 1L, omMetrics); - assertCounter("NumKeyLookupFails", 1L, omMetrics); - assertCounter("NumKeyDeleteFails", 1L, omMetrics); - assertCounter("NumKeyListFails", 1L, omMetrics); - assertCounter("NumInitiateMultipartUploadFails", 1L, omMetrics); - - - assertCounter("NumKeys", 2L, omMetrics); - - cluster.restartOzoneManager(); - assertCounter("NumKeys", 2L, omMetrics); - - } - - /** - * Test volume operations with ignoring thrown exception. - */ - private void doVolumeOps() { - try { - ozoneManager.createVolume(null); - } catch (IOException ignored) { - } - - try { - ozoneManager.deleteVolume(null); - } catch (IOException ignored) { - } - - try { - ozoneManager.getVolumeInfo(null); - } catch (IOException ignored) { - } - - try { - ozoneManager.checkVolumeAccess(null, null); - } catch (IOException ignored) { - } - - try { - ozoneManager.setOwner(null, null); - } catch (IOException ignored) { - } - - try { - ozoneManager.listAllVolumes(null, null, 0); - } catch (IOException ignored) { - } - } - - /** - * Test bucket operations with ignoring thrown exception. - */ - private void doBucketOps() { - try { - ozoneManager.createBucket(null); - } catch (IOException ignored) { - } - - try { - ozoneManager.deleteBucket(null, null); - } catch (IOException ignored) { - } - - try { - ozoneManager.getBucketInfo(null, null); - } catch (IOException ignored) { - } - - try { - ozoneManager.setBucketProperty(null); - } catch (IOException ignored) { - } - - try { - ozoneManager.listBuckets(null, null, null, 0); - } catch (IOException ignored) { - } - } - - /** - * Test key operations with ignoring thrown exception. - */ - private void doKeyOps() { - try { - ozoneManager.openKey(null); - } catch (IOException ignored) { - } - - try { - ozoneManager.deleteKey(null); - } catch (IOException ignored) { - } - - try { - ozoneManager.lookupKey(null); - } catch (IOException ignored) { - } - - try { - ozoneManager.listKeys(null, null, null, null, 0); - } catch (IOException ignored) { - } - - try { - ozoneManager.commitKey(createKeyArgs(), 0); - } catch (IOException ignored) { - } - - try { - ozoneManager.initiateMultipartUpload(null); - } catch (IOException ignored) { - } - - } - - private OmKeyArgs createKeyArgs() { - OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() - .setBlockID(new BlockID(new ContainerBlockID(1, 1))).build(); - keyLocationInfo.setCreateVersion(0); - List omKeyLocationInfoList = new ArrayList<>(); - omKeyLocationInfoList.add(keyLocationInfo); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setLocationInfoList( - omKeyLocationInfoList).build(); - return keyArgs; - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java deleted file mode 100644 index 2716d51f07e..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java +++ /dev/null @@ -1,346 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneIllegalArgumentException; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.protocol.RaftPeer; -import org.apache.ratis.util.LifeCycle; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Collection; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -/** - * Tests OM related configurations. - */ -public class TestOzoneManagerConfiguration { - - private OzoneConfiguration conf; - private MiniOzoneCluster cluster; - private String omId; - private String clusterId; - private String scmId; - private OzoneManager om; - private OzoneManagerRatisServer omRatisServer; - - private static final long LEADER_ELECTION_TIMEOUT = 500L; - - @Before - public void init() throws IOException { - conf = new OzoneConfiguration(); - omId = UUID.randomUUID().toString(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - final String path = GenericTestUtils.getTempPath(omId); - Path metaDirPath = Paths.get(path, "om-meta"); - conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - conf.setTimeDuration( - OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, - LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS); - - OMStorage omStore = new OMStorage(conf); - omStore.setClusterId("testClusterId"); - omStore.setScmId("testScmId"); - // writes the version file properties - omStore.initialize(); - } - - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - private void startCluster() throws Exception { - cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOmId(omId) - .build(); - cluster.waitForClusterToBeReady(); - } - - /** - * Test that if no OM address is specified, then the OM rpc server - * is started on localhost. - */ - @Test - public void testNoConfiguredOMAddress() throws Exception { - startCluster(); - om = cluster.getOzoneManager(); - - Assert.assertTrue(NetUtils.isLocalAddress( - om.getOmRpcServerAddr().getAddress())); - } - - /** - * Test that if only the hostname is specified for om address, then the - * default port is used. - */ - @Test - public void testDefaultPortIfNotSpecified() throws Exception { - - String omNode1Id = "omNode1"; - String omNode2Id = "omNode2"; - String omNodesKeyValue = omNode1Id + "," + omNode2Id; - String serviceID = "service1"; - conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, serviceID); - conf.set(OMConfigKeys.OZONE_OM_NODES_KEY + "." + serviceID, - omNodesKeyValue); - - String omNode1RpcAddrKey = getOMAddrKeyWithSuffix(serviceID, omNode1Id); - String omNode2RpcAddrKey = getOMAddrKeyWithSuffix(serviceID, omNode2Id); - - conf.set(omNode1RpcAddrKey, "0.0.0.0"); - conf.set(omNode2RpcAddrKey, "122.0.0.122"); - - // Set omNode1 as the current node. omNode1 address does not have a port - // number specified. So the default port should be taken. - conf.set(OMConfigKeys.OZONE_OM_NODE_ID_KEY, omNode1Id); - - startCluster(); - om = cluster.getOzoneManager(); - Assert.assertEquals("0.0.0.0", - om.getOmRpcServerAddr().getHostName()); - Assert.assertEquals(OMConfigKeys.OZONE_OM_PORT_DEFAULT, - om.getOmRpcServerAddr().getPort()); - - // Verify that the 2nd OMs address stored in the current OM also has the - // default port as the port is not specified - InetSocketAddress omNode2Addr = om.getPeerNodes().get(0).getRpcAddress(); - Assert.assertEquals("122.0.0.122", omNode2Addr.getHostString()); - Assert.assertEquals(OMConfigKeys.OZONE_OM_PORT_DEFAULT, - omNode2Addr.getPort()); - - } - - /** - * Test a single node OM service (default setting for MiniOzoneCluster). - * @throws Exception - */ - @Test - public void testSingleNodeOMservice() throws Exception { - // Default settings of MiniOzoneCluster start a sinle node OM service. - startCluster(); - om = cluster.getOzoneManager(); - omRatisServer = om.getOmRatisServer(); - - Assert.assertEquals(LifeCycle.State.RUNNING, om.getOmRatisServerState()); - // OM's Ratis server should have only 1 peer (itself) in its RaftGroup - Collection peers = omRatisServer.getRaftGroup().getPeers(); - Assert.assertEquals(1, peers.size()); - - // The RaftPeer id should match the configured omId - RaftPeer raftPeer = peers.toArray(new RaftPeer[1])[0]; - Assert.assertEquals(omId, raftPeer.getId().toString()); - } - - /** - * Test configurating an OM service with three OM nodes. - * @throws Exception - */ - @Test - public void testThreeNodeOMservice() throws Exception { - // Set the configuration for 3 node OM service. Set one node's rpc - // address to localhost. OM will parse all configurations and find the - // nodeId representing the localhost - - final String omServiceId = "om-service-test1"; - final String omNode1Id = "omNode1"; - final String omNode2Id = "omNode2"; - final String omNode3Id = "omNode3"; - - String omNodesKeyValue = omNode1Id + "," + omNode2Id + "," + omNode3Id; - String omNodesKey = OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId); - - String omNode1RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode1Id); - String omNode2RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode2Id); - String omNode3RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode3Id); - - String omNode3RatisPortKey = OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, omServiceId, omNode3Id); - - conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId); - conf.set(omNodesKey, omNodesKeyValue); - - // Set node2 to localhost and the other two nodes to dummy addresses - conf.set(omNode1RpcAddrKey, "123.0.0.123:9862"); - conf.set(omNode2RpcAddrKey, "0.0.0.0:9862"); - conf.set(omNode3RpcAddrKey, "124.0.0.124:9862"); - - conf.setInt(omNode3RatisPortKey, 9898); - - startCluster(); - om = cluster.getOzoneManager(); - omRatisServer = om.getOmRatisServer(); - - Assert.assertEquals(LifeCycle.State.RUNNING, om.getOmRatisServerState()); - - // OM's Ratis server should have 3 peers in its RaftGroup - Collection peers = omRatisServer.getRaftGroup().getPeers(); - Assert.assertEquals(3, peers.size()); - - // Ratis server RaftPeerId should match with omNode2 ID as node2 is the - // localhost - Assert.assertEquals(omNode2Id, omRatisServer.getRaftPeerId().toString()); - - // Verify peer details - for (RaftPeer peer : peers) { - String expectedPeerAddress = null; - switch (peer.getId().toString()) { - case omNode1Id : - // Ratis port is not set for node1. So it should take the default port - expectedPeerAddress = "123.0.0.123:" + - OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT; - break; - case omNode2Id : - expectedPeerAddress = "0.0.0.0:"+ - OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT; - break; - case omNode3Id : - // Ratis port is not set for node3. So it should take the default port - expectedPeerAddress = "124.0.0.124:9898"; - break; - default : Assert.fail("Unrecognized RaftPeerId"); - } - Assert.assertEquals(expectedPeerAddress, peer.getAddress()); - } - } - - /** - * Test a wrong configuration for OM HA. A configuration with none of the - * OM addresses matching the local address should throw an error. - * @throws Exception - */ - @Test - public void testWrongConfiguration() throws Exception { - String omServiceId = "om-service-test1"; - - String omNode1Id = "omNode1"; - String omNode2Id = "omNode2"; - String omNode3Id = "omNode3"; - String omNodesKeyValue = omNode1Id + "," + omNode2Id + "," + omNode3Id; - String omNodesKey = OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId); - - String omNode1RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode1Id); - String omNode2RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode2Id); - String omNode3RpcAddrKey = getOMAddrKeyWithSuffix(omServiceId, omNode3Id); - - conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId); - conf.set(omNodesKey, omNodesKeyValue); - - // Set node2 to localhost and the other two nodes to dummy addresses - conf.set(omNode1RpcAddrKey, "123.0.0.123:9862"); - conf.set(omNode2RpcAddrKey, "125.0.0.2:9862"); - conf.set(omNode3RpcAddrKey, "124.0.0.124:9862"); - - try { - startCluster(); - Assert.fail("Wrong Configuration. OM initialization should have failed."); - } catch (OzoneIllegalArgumentException e) { - GenericTestUtils.assertExceptionContains("Configuration has no " + - OMConfigKeys.OZONE_OM_ADDRESS_KEY + " address that matches local " + - "node's address.", e); - } - } - - /** - * Test multiple OM service configuration. - */ - @Test - public void testMultipleOMServiceIds() throws Exception { - // Set up OZONE_OM_SERVICES_KEY with 2 service Ids. - String om1ServiceId = "om-service-test1"; - String om2ServiceId = "om-service-test2"; - String omServices = om1ServiceId + "," + om2ServiceId; - conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServices); - - String omNode1Id = "omNode1"; - String omNode2Id = "omNode2"; - String omNode3Id = "omNode3"; - String omNodesKeyValue = omNode1Id + "," + omNode2Id + "," + omNode3Id; - - // Set the node Ids for the 2 services. The nodeIds need to be - // distinch within one service. The ids can overlap between - // different services. - String om1NodesKey = OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_NODES_KEY, om1ServiceId); - String om2NodesKey = OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_NODES_KEY, om2ServiceId); - conf.set(om1NodesKey, omNodesKeyValue); - conf.set(om2NodesKey, omNodesKeyValue); - - // Set the RPC addresses for all 6 OMs (3 for each service). Only one - // node out of these must have the localhost address. - conf.set(getOMAddrKeyWithSuffix(om1ServiceId, omNode1Id), - "122.0.0.123:9862"); - conf.set(getOMAddrKeyWithSuffix(om1ServiceId, omNode2Id), - "123.0.0.124:9862"); - conf.set(getOMAddrKeyWithSuffix(om1ServiceId, omNode3Id), - "124.0.0.125:9862"); - conf.set(getOMAddrKeyWithSuffix(om2ServiceId, omNode1Id), - "125.0.0.126:9862"); - conf.set(getOMAddrKeyWithSuffix(om2ServiceId, omNode2Id), - "0.0.0.0:9862"); - conf.set(getOMAddrKeyWithSuffix(om2ServiceId, omNode3Id), - "126.0.0.127:9862"); - - startCluster(); - om = cluster.getOzoneManager(); - omRatisServer = om.getOmRatisServer(); - - Assert.assertEquals(LifeCycle.State.RUNNING, om.getOmRatisServerState()); - - // OM's Ratis server should have 3 peers in its RaftGroup - Collection peers = omRatisServer.getRaftGroup().getPeers(); - Assert.assertEquals(3, peers.size()); - - // Verify that the serviceId and nodeId match the node with the localhost - // address - om-service-test2 and omNode2 - Assert.assertEquals(om2ServiceId, om.getOMServiceId()); - Assert.assertEquals(omNode2Id, omRatisServer.getRaftPeerId().toString()); - } - - private String getOMAddrKeyWithSuffix(String serviceId, String nodeId) { - return OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, - serviceId, nodeId); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java deleted file mode 100644 index 62658dc769d..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java +++ /dev/null @@ -1,1248 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import java.io.IOException; -import java.net.ConnectException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.BitSet; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.Timeout; -import org.apache.log4j.Logger; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdfs.LogVerificationAppender; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; -import org.apache.hadoop.ozone.OzoneTestUtils; -import org.apache.hadoop.ozone.client.BucketArgs; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneKeyDetails; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider; -import org.apache.hadoop.ozone.om.ha.OMProxyInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.util.Time; - - -import static org.apache.hadoop.ozone.MiniOzoneHAClusterImpl - .NODE_FAILURE_TIMEOUT; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_CLIENT_RETRY_MAX_ATTEMPTS_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; -import static org.junit.Assert.fail; - -/** - * Test Ozone Manager operation in distributed handler scenario. - */ -public class TestOzoneManagerHA { - - private MiniOzoneHAClusterImpl cluster = null; - private ObjectStore objectStore; - private OzoneConfiguration conf; - private String clusterId; - private String scmId; - private String omServiceId; - private int numOfOMs = 3; - private static final long SNAPSHOT_THRESHOLD = 50; - private static final int LOG_PURGE_GAP = 50; - - @Rule - public ExpectedException exception = ExpectedException.none(); - - @Rule - public Timeout timeout = new Timeout(300_000); - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omServiceId = "om-service-test1"; - conf.setBoolean(OZONE_ACL_ENABLED, true); - conf.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, - OZONE_ADMINISTRATORS_WILDCARD); - conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2); - conf.setInt(OZONE_CLIENT_RETRY_MAX_ATTEMPTS_KEY, 10); - conf.setInt(OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, 10); - conf.setLong( - OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY, - SNAPSHOT_THRESHOLD); - conf.setInt(OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, LOG_PURGE_GAP); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOMServiceId(omServiceId) - .setNumOfOzoneManagers(numOfOMs) - .build(); - cluster.waitForClusterToBeReady(); - objectStore = OzoneClientFactory.getRpcClient(omServiceId, conf) - .getObjectStore(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - - private OzoneVolume createAndCheckVolume(String volumeName) - throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() - .setOwner(userName) - .setAdmin(adminName) - .build(); - - objectStore.createVolume(volumeName, createVolumeArgs); - - OzoneVolume retVolume = objectStore.getVolume(volumeName); - - Assert.assertTrue(retVolume.getName().equals(volumeName)); - Assert.assertTrue(retVolume.getOwner().equals(userName)); - Assert.assertTrue(retVolume.getAdmin().equals(adminName)); - - return retVolume; - } - @Test - public void testAllVolumeOperations() throws Exception { - - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - - createAndCheckVolume(volumeName); - - objectStore.deleteVolume(volumeName); - - OzoneTestUtils.expectOmException(OMException.ResultCodes.VOLUME_NOT_FOUND, - () -> objectStore.getVolume(volumeName)); - - OzoneTestUtils.expectOmException(OMException.ResultCodes.VOLUME_NOT_FOUND, - () -> objectStore.deleteVolume(volumeName)); - } - - - @Test - public void testAllBucketOperations() throws Exception { - - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "volume" + RandomStringUtils.randomNumeric(5); - - OzoneVolume retVolume = createAndCheckVolume(volumeName); - - BucketArgs bucketArgs = - BucketArgs.newBuilder().setStorageType(StorageType.DISK) - .setVersioning(true).build(); - - - retVolume.createBucket(bucketName, bucketArgs); - - - OzoneBucket ozoneBucket = retVolume.getBucket(bucketName); - - Assert.assertEquals(volumeName, ozoneBucket.getVolumeName()); - Assert.assertEquals(bucketName, ozoneBucket.getName()); - Assert.assertTrue(ozoneBucket.getVersioning()); - Assert.assertEquals(StorageType.DISK, ozoneBucket.getStorageType()); - Assert.assertTrue(ozoneBucket.getCreationTime() <= Time.now()); - - - // Change versioning to false - ozoneBucket.setVersioning(false); - - ozoneBucket = retVolume.getBucket(bucketName); - Assert.assertFalse(ozoneBucket.getVersioning()); - - retVolume.deleteBucket(bucketName); - - OzoneTestUtils.expectOmException(OMException.ResultCodes.BUCKET_NOT_FOUND, - () -> retVolume.deleteBucket(bucketName)); - - - - } - - /** - * Test a client request when all OM nodes are running. The request should - * succeed. - * @throws Exception - */ - @Test - public void testAllOMNodesRunning() throws Exception { - createVolumeTest(true); - createKeyTest(true); - } - - /** - * Test client request succeeds even if one OM is down. - */ - @Test - public void testOneOMNodeDown() throws Exception { - cluster.stopOzoneManager(1); - Thread.sleep(NODE_FAILURE_TIMEOUT * 2); - - createVolumeTest(true); - - createKeyTest(true); - } - - /** - * Test client request fails when 2 OMs are down. - */ - @Test - public void testTwoOMNodesDown() throws Exception { - cluster.stopOzoneManager(1); - cluster.stopOzoneManager(2); - Thread.sleep(NODE_FAILURE_TIMEOUT * 2); - - createVolumeTest(false); - - createKeyTest(false); - - } - - private OzoneBucket setupBucket() throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() - .setOwner(userName) - .setAdmin(adminName) - .build(); - - objectStore.createVolume(volumeName, createVolumeArgs); - OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); - - Assert.assertTrue(retVolumeinfo.getName().equals(volumeName)); - Assert.assertTrue(retVolumeinfo.getOwner().equals(userName)); - Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName)); - - String bucketName = UUID.randomUUID().toString(); - retVolumeinfo.createBucket(bucketName); - - OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName); - - Assert.assertTrue(ozoneBucket.getName().equals(bucketName)); - Assert.assertTrue(ozoneBucket.getVolumeName().equals(volumeName)); - - return ozoneBucket; - } - - @Test - public void testMultipartUpload() throws Exception { - - // Happy scenario when all OM's are up. - OzoneBucket ozoneBucket = setupBucket(); - - String keyName = UUID.randomUUID().toString(); - String uploadID = initiateMultipartUpload(ozoneBucket, keyName); - - createMultipartKeyAndReadKey(ozoneBucket, keyName, uploadID); - - } - - - @Test - public void testFileOperationsWithRecursive() throws Exception { - OzoneBucket ozoneBucket = setupBucket(); - - String data = "random data"; - - // one level key name - String keyName = UUID.randomUUID().toString(); - testCreateFile(ozoneBucket, keyName, data, true, false); - - // multi level key name - keyName = "dir1/dir2/dir3/file1"; - testCreateFile(ozoneBucket, keyName, data, true, false); - - - data = "random data random data"; - - // multi level key name with over write set. - testCreateFile(ozoneBucket, keyName, data, true, true); - - - try { - testCreateFile(ozoneBucket, keyName, data, true, false); - fail("testFileOperationsWithRecursive"); - } catch (OMException ex) { - Assert.assertEquals(FILE_ALREADY_EXISTS, ex.getResult()); - } - - // Try now with a file name which is same as a directory. - try { - keyName = "folder/folder2"; - ozoneBucket.createDirectory(keyName); - testCreateFile(ozoneBucket, keyName, data, true, false); - fail("testFileOperationsWithNonRecursive"); - } catch (OMException ex) { - Assert.assertEquals(NOT_A_FILE, ex.getResult()); - } - - } - - - @Test - public void testFileOperationsWithNonRecursive() throws Exception { - OzoneBucket ozoneBucket = setupBucket(); - - String data = "random data"; - - // one level key name - String keyName = UUID.randomUUID().toString(); - testCreateFile(ozoneBucket, keyName, data, false, false); - - // multi level key name - keyName = "dir1/dir2/dir3/file1"; - - // Should fail, as this is non-recursive and no parent directories exist - try { - testCreateFile(ozoneBucket, keyName, data, false, false); - } catch (OMException ex) { - Assert.assertEquals(NOT_A_FILE, ex.getResult()); - } - - // create directory, now this should pass. - ozoneBucket.createDirectory("dir1/dir2/dir3"); - testCreateFile(ozoneBucket, keyName, data, false, false); - data = "random data random data"; - - // multi level key name with over write set. - testCreateFile(ozoneBucket, keyName, data, false, true); - - try { - testCreateFile(ozoneBucket, keyName, data, false, false); - fail("testFileOperationsWithRecursive"); - } catch (OMException ex) { - Assert.assertEquals(FILE_ALREADY_EXISTS, ex.getResult()); - } - - - // Try now with a file which already exists under the path - ozoneBucket.createDirectory("folder1/folder2/folder3/folder4"); - - keyName = "folder1/folder2/folder3/folder4/file1"; - testCreateFile(ozoneBucket, keyName, data, false, false); - - keyName = "folder1/folder2/folder3/file1"; - testCreateFile(ozoneBucket, keyName, data, false, false); - - // Try now with a file under path already. This should fail. - try { - keyName = "folder/folder2"; - ozoneBucket.createDirectory(keyName); - testCreateFile(ozoneBucket, keyName, data, false, false); - fail("testFileOperationsWithNonRecursive"); - } catch (OMException ex) { - Assert.assertEquals(NOT_A_FILE, ex.getResult()); - } - - } - - /** - * This method createFile and verifies the file is successfully created or - * not. - * @param ozoneBucket - * @param keyName - * @param data - * @param recursive - * @param overwrite - * @throws Exception - */ - public void testCreateFile(OzoneBucket ozoneBucket, String keyName, - String data, boolean recursive, boolean overwrite) - throws Exception { - - OzoneOutputStream ozoneOutputStream = ozoneBucket.createFile(keyName, - data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, - overwrite, recursive); - - ozoneOutputStream.write(data.getBytes(), 0, data.length()); - ozoneOutputStream.close(); - - OzoneKeyDetails ozoneKeyDetails = ozoneBucket.getKey(keyName); - - Assert.assertEquals(keyName, ozoneKeyDetails.getName()); - Assert.assertEquals(ozoneBucket.getName(), ozoneKeyDetails.getBucketName()); - Assert.assertEquals(ozoneBucket.getVolumeName(), - ozoneKeyDetails.getVolumeName()); - Assert.assertEquals(data.length(), ozoneKeyDetails.getDataSize()); - - OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName); - - byte[] fileContent = new byte[data.getBytes().length]; - ozoneInputStream.read(fileContent); - Assert.assertEquals(data, new String(fileContent)); - } - - @Test - public void testMultipartUploadWithOneOmNodeDown() throws Exception { - - OzoneBucket ozoneBucket = setupBucket(); - - String keyName = UUID.randomUUID().toString(); - String uploadID = initiateMultipartUpload(ozoneBucket, keyName); - - // After initiate multipartupload, shutdown leader OM. - // Stop leader OM, to see when the OM leader changes - // multipart upload is happening successfully or not. - - OMFailoverProxyProvider omFailoverProxyProvider = - objectStore.getClientProxy().getOMProxyProvider(); - - // The OMFailoverProxyProvider will point to the current leader OM node. - String leaderOMNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId(); - - // Stop one of the ozone manager, to see when the OM leader changes - // multipart upload is happening successfully or not. - cluster.stopOzoneManager(leaderOMNodeId); - Thread.sleep(NODE_FAILURE_TIMEOUT * 2); - - createMultipartKeyAndReadKey(ozoneBucket, keyName, uploadID); - - String newLeaderOMNodeId = - omFailoverProxyProvider.getCurrentProxyOMNodeId(); - - Assert.assertTrue(leaderOMNodeId != newLeaderOMNodeId); - } - - - private String initiateMultipartUpload(OzoneBucket ozoneBucket, - String keyName) throws Exception { - - OmMultipartInfo omMultipartInfo = - ozoneBucket.initiateMultipartUpload(keyName, - ReplicationType.RATIS, - ReplicationFactor.ONE); - - String uploadID = omMultipartInfo.getUploadID(); - Assert.assertTrue(uploadID != null); - return uploadID; - } - - private void createMultipartKeyAndReadKey(OzoneBucket ozoneBucket, - String keyName, String uploadID) throws Exception { - - String value = "random data"; - OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( - keyName, value.length(), 1, uploadID); - ozoneOutputStream.write(value.getBytes(), 0, value.length()); - ozoneOutputStream.close(); - - - Map partsMap = new HashMap<>(); - partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName()); - OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = - ozoneBucket.completeMultipartUpload(keyName, uploadID, partsMap); - - Assert.assertTrue(omMultipartUploadCompleteInfo != null); - Assert.assertTrue(omMultipartUploadCompleteInfo.getHash() != null); - - - OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName); - - byte[] fileContent = new byte[value.getBytes().length]; - ozoneInputStream.read(fileContent); - Assert.assertEquals(value, new String(fileContent)); - } - - - private void createKeyTest(boolean checkSuccess) throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() - .setOwner(userName) - .setAdmin(adminName) - .build(); - - try { - objectStore.createVolume(volumeName, createVolumeArgs); - - OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); - - Assert.assertTrue(retVolumeinfo.getName().equals(volumeName)); - Assert.assertTrue(retVolumeinfo.getOwner().equals(userName)); - Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName)); - - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - retVolumeinfo.createBucket(bucketName); - - OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName); - - Assert.assertTrue(ozoneBucket.getName().equals(bucketName)); - Assert.assertTrue(ozoneBucket.getVolumeName().equals(volumeName)); - - String value = "random data"; - OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, - value.length(), ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); - ozoneOutputStream.write(value.getBytes(), 0, value.length()); - ozoneOutputStream.close(); - - OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName); - - byte[] fileContent = new byte[value.getBytes().length]; - ozoneInputStream.read(fileContent); - Assert.assertEquals(value, new String(fileContent)); - - } catch (ConnectException | RemoteException e) { - if (!checkSuccess) { - // If the last OM to be tried by the RetryProxy is down, we would get - // ConnectException. Otherwise, we would get a RemoteException from the - // last running OM as it would fail to get a quorum. - if (e instanceof RemoteException) { - GenericTestUtils.assertExceptionContains( - "NotLeaderException", e); - } - } else { - throw e; - } - } - } - - /** - * Create a volume and test its attribute. - */ - private void createVolumeTest(boolean checkSuccess) throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() - .setOwner(userName) - .setAdmin(adminName) - .build(); - - try { - objectStore.createVolume(volumeName, createVolumeArgs); - - OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); - - if (checkSuccess) { - Assert.assertTrue(retVolumeinfo.getName().equals(volumeName)); - Assert.assertTrue(retVolumeinfo.getOwner().equals(userName)); - Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName)); - } else { - // Verify that the request failed - fail("There is no quorum. Request should have failed"); - } - } catch (ConnectException | RemoteException e) { - if (!checkSuccess) { - // If the last OM to be tried by the RetryProxy is down, we would get - // ConnectException. Otherwise, we would get a RemoteException from the - // last running OM as it would fail to get a quorum. - if (e instanceof RemoteException) { - GenericTestUtils.assertExceptionContains( - "NotLeaderException", e); - } - } else { - throw e; - } - } - } - - /** - * Test that OMFailoverProxyProvider creates an OM proxy for each OM in the - * cluster. - */ - @Test - public void testOMProxyProviderInitialization() throws Exception { - OzoneClient rpcClient = cluster.getRpcClient(); - OMFailoverProxyProvider omFailoverProxyProvider = - rpcClient.getObjectStore().getClientProxy().getOMProxyProvider(); - List omProxies = - omFailoverProxyProvider.getOMProxyInfos(); - - Assert.assertEquals(numOfOMs, omProxies.size()); - - for (int i = 0; i < numOfOMs; i++) { - InetSocketAddress omRpcServerAddr = - cluster.getOzoneManager(i).getOmRpcServerAddr(); - boolean omClientProxyExists = false; - for (OMProxyInfo omProxyInfo : omProxies) { - if (omProxyInfo.getAddress().equals(omRpcServerAddr)) { - omClientProxyExists = true; - break; - } - } - Assert.assertTrue("There is no OM Client Proxy corresponding to OM " + - "node" + cluster.getOzoneManager(i).getOMNodeId(), - omClientProxyExists); - } - } - - /** - * Test OMFailoverProxyProvider failover on connection exception to OM client. - */ - @Test - public void testOMProxyProviderFailoverOnConnectionFailure() - throws Exception { - OMFailoverProxyProvider omFailoverProxyProvider = - objectStore.getClientProxy().getOMProxyProvider(); - String firstProxyNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId(); - - createVolumeTest(true); - - // On stopping the current OM Proxy, the next connection attempt should - // failover to a another OM proxy. - cluster.stopOzoneManager(firstProxyNodeId); - Thread.sleep(OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT * 4); - - // Next request to the proxy provider should result in a failover - createVolumeTest(true); - Thread.sleep(OZONE_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT); - - // Get the new OM Proxy NodeId - String newProxyNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId(); - - // Verify that a failover occured. the new proxy nodeId should be - // different from the old proxy nodeId. - Assert.assertNotEquals("Failover did not occur as expected", - firstProxyNodeId, newProxyNodeId); - } - - /** - * Test OMFailoverProxyProvider failover when current OM proxy is not - * the current OM Leader. - */ - @Test - public void testOMProxyProviderFailoverToCurrentLeader() throws Exception { - OMFailoverProxyProvider omFailoverProxyProvider = - objectStore.getClientProxy().getOMProxyProvider(); - - // Run couple of createVolume tests to discover the current Leader OM - createVolumeTest(true); - createVolumeTest(true); - - // The OMFailoverProxyProvider will point to the current leader OM node. - String leaderOMNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId(); - - // Perform a manual failover of the proxy provider to move the - // currentProxyIndex to a node other than the leader OM. - omFailoverProxyProvider.performFailover( - (OzoneManagerProtocolPB) omFailoverProxyProvider.getProxy().proxy); - - String newProxyNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId(); - Assert.assertNotEquals(leaderOMNodeId, newProxyNodeId); - - // Once another request is sent to this new proxy node, the leader - // information must be returned via the response and a failover must - // happen to the leader proxy node. - createVolumeTest(true); - Thread.sleep(2000); - - String newLeaderOMNodeId = - omFailoverProxyProvider.getCurrentProxyOMNodeId(); - - // The old and new Leader OM NodeId must match since there was no new - // election in the Ratis ring. - Assert.assertEquals(leaderOMNodeId, newLeaderOMNodeId); - } - - @Test - public void testOMRetryProxy() throws Exception { - // Stop all the OMs. After making 5 (set maxRetries value) attempts at - // connection, the RpcClient should give up. - for (int i = 0; i < numOfOMs; i++) { - cluster.stopOzoneManager(i); - } - - final LogVerificationAppender appender = new LogVerificationAppender(); - final org.apache.log4j.Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); - - try { - createVolumeTest(true); - fail("TestOMRetryProxy should fail when there are no OMs running"); - } catch (ConnectException e) { - // Each retry attempt tries upto 10 times to connect. So there should be - // 10*10 "Retrying connect to server" messages - Assert.assertEquals(100, - appender.countLinesWithMessage("Retrying connect to server:")); - - Assert.assertEquals(1, - appender.countLinesWithMessage("Failed to connect to OM. Attempted " + - "10 retries and 10 failovers")); - } - } - - @Test - public void testReadRequest() throws Exception { - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - objectStore.createVolume(volumeName); - - OMFailoverProxyProvider omFailoverProxyProvider = - objectStore.getClientProxy().getOMProxyProvider(); - String currentLeaderNodeId = omFailoverProxyProvider - .getCurrentProxyOMNodeId(); - - // A read request from any proxy should failover to the current leader OM - for (int i = 0; i < numOfOMs; i++) { - // Failover OMFailoverProxyProvider to OM at index i - OzoneManager ozoneManager = cluster.getOzoneManager(i); - String omHostName = ozoneManager.getOmRpcServerAddr().getHostName(); - int rpcPort = ozoneManager.getOmRpcServerAddr().getPort(); - - // Get the ObjectStore and FailoverProxyProvider for OM at index i - final ObjectStore store = OzoneClientFactory.getRpcClient( - omHostName, rpcPort, omServiceId, conf).getObjectStore(); - final OMFailoverProxyProvider proxyProvider = - store.getClientProxy().getOMProxyProvider(); - - // Failover to the OM node that the objectStore points to - omFailoverProxyProvider.performFailoverIfRequired( - ozoneManager.getOMNodeId()); - - // A read request should result in the proxyProvider failing over to - // leader node. - OzoneVolume volume = store.getVolume(volumeName); - Assert.assertEquals(volumeName, volume.getName()); - - Assert.assertEquals(currentLeaderNodeId, - proxyProvider.getCurrentProxyOMNodeId()); - } - } - - @Test - public void testAddBucketAcl() throws Exception { - OzoneBucket ozoneBucket = setupBucket(); - String remoteUserName = "remoteUser"; - OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); - - OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder() - .setResType(OzoneObj.ResourceType.BUCKET) - .setStoreType(OzoneObj.StoreType.OZONE) - .setVolumeName(ozoneBucket.getVolumeName()) - .setBucketName(ozoneBucket.getName()).build(); - - testAddAcl(remoteUserName, ozoneObj, defaultUserAcl); - } - @Test - public void testRemoveBucketAcl() throws Exception { - OzoneBucket ozoneBucket = setupBucket(); - String remoteUserName = "remoteUser"; - OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); - - OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder() - .setResType(OzoneObj.ResourceType.BUCKET) - .setStoreType(OzoneObj.StoreType.OZONE) - .setVolumeName(ozoneBucket.getVolumeName()) - .setBucketName(ozoneBucket.getName()).build(); - - testRemoveAcl(remoteUserName, ozoneObj, defaultUserAcl); - - } - - @Test - public void testSetBucketAcl() throws Exception { - OzoneBucket ozoneBucket = setupBucket(); - String remoteUserName = "remoteUser"; - OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); - - OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder() - .setResType(OzoneObj.ResourceType.BUCKET) - .setStoreType(OzoneObj.StoreType.OZONE) - .setVolumeName(ozoneBucket.getVolumeName()) - .setBucketName(ozoneBucket.getName()).build(); - - testSetAcl(remoteUserName, ozoneObj, defaultUserAcl); - } - - private boolean containsAcl(OzoneAcl ozoneAcl, List ozoneAcls) { - for (OzoneAcl acl : ozoneAcls) { - boolean result = compareAcls(ozoneAcl, acl); - if (result) { - // We found a match, return. - return result; - } - } - return false; - } - - private boolean compareAcls(OzoneAcl givenAcl, OzoneAcl existingAcl) { - if (givenAcl.getType().equals(existingAcl.getType()) - && givenAcl.getName().equals(existingAcl.getName()) - && givenAcl.getAclScope().equals(existingAcl.getAclScope())) { - BitSet bitSet = (BitSet) givenAcl.getAclBitSet().clone(); - bitSet.and(existingAcl.getAclBitSet()); - if (bitSet.equals(existingAcl.getAclBitSet())) { - return true; - } - } - return false; - } - - @Test - public void testAddKeyAcl() throws Exception { - OzoneBucket ozoneBucket = setupBucket(); - String remoteUserName = "remoteUser"; - OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); - - String key = createKey(ozoneBucket); - - OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder() - .setResType(OzoneObj.ResourceType.KEY) - .setStoreType(OzoneObj.StoreType.OZONE) - .setVolumeName(ozoneBucket.getVolumeName()) - .setBucketName(ozoneBucket.getName()) - .setKeyName(key).build(); - - testAddAcl(remoteUserName, ozoneObj, userAcl); - } - - @Test - public void testRemoveKeyAcl() throws Exception { - OzoneBucket ozoneBucket = setupBucket(); - String remoteUserName = "remoteUser"; - OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); - - String key = createKey(ozoneBucket); - - OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder() - .setResType(OzoneObj.ResourceType.KEY) - .setStoreType(OzoneObj.StoreType.OZONE) - .setVolumeName(ozoneBucket.getVolumeName()) - .setBucketName(ozoneBucket.getName()) - .setKeyName(key).build(); - - testRemoveAcl(remoteUserName, ozoneObj, userAcl); - - } - - @Test - public void testSetKeyAcl() throws Exception { - OzoneBucket ozoneBucket = setupBucket(); - String remoteUserName = "remoteUser"; - OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); - - String key = createKey(ozoneBucket); - - OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder() - .setResType(OzoneObj.ResourceType.KEY) - .setStoreType(OzoneObj.StoreType.OZONE) - .setVolumeName(ozoneBucket.getVolumeName()) - .setBucketName(ozoneBucket.getName()) - .setKeyName(key).build(); - - testSetAcl(remoteUserName, ozoneObj, userAcl); - - } - - @Test - public void testAddPrefixAcl() throws Exception { - OzoneBucket ozoneBucket = setupBucket(); - String remoteUserName = "remoteUser"; - String prefixName = RandomStringUtils.randomAlphabetic(5) +"/"; - OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); - - OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder() - .setResType(OzoneObj.ResourceType.PREFIX) - .setStoreType(OzoneObj.StoreType.OZONE) - .setVolumeName(ozoneBucket.getVolumeName()) - .setBucketName(ozoneBucket.getName()) - .setPrefixName(prefixName).build(); - - testAddAcl(remoteUserName, ozoneObj, defaultUserAcl); - } - @Test - public void testRemovePrefixAcl() throws Exception { - OzoneBucket ozoneBucket = setupBucket(); - String remoteUserName = "remoteUser"; - String prefixName = RandomStringUtils.randomAlphabetic(5) +"/"; - OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName, - READ, ACCESS); - OzoneAcl userAcl1 = new OzoneAcl(USER, "remote", - READ, ACCESS); - - OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder() - .setResType(OzoneObj.ResourceType.PREFIX) - .setStoreType(OzoneObj.StoreType.OZONE) - .setVolumeName(ozoneBucket.getVolumeName()) - .setBucketName(ozoneBucket.getName()) - .setPrefixName(prefixName).build(); - - boolean result = objectStore.addAcl(ozoneObj, userAcl); - Assert.assertTrue(result); - - result = objectStore.addAcl(ozoneObj, userAcl1); - Assert.assertTrue(result); - - result = objectStore.removeAcl(ozoneObj, userAcl); - Assert.assertTrue(result); - - // try removing already removed acl. - result = objectStore.removeAcl(ozoneObj, userAcl); - Assert.assertFalse(result); - - result = objectStore.removeAcl(ozoneObj, userAcl1); - Assert.assertTrue(result); - - } - - @Test - public void testSetPrefixAcl() throws Exception { - OzoneBucket ozoneBucket = setupBucket(); - String remoteUserName = "remoteUser"; - String prefixName = RandomStringUtils.randomAlphabetic(5) +"/"; - OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); - - OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder() - .setResType(OzoneObj.ResourceType.PREFIX) - .setStoreType(OzoneObj.StoreType.OZONE) - .setVolumeName(ozoneBucket.getVolumeName()) - .setBucketName(ozoneBucket.getName()) - .setPrefixName(prefixName).build(); - - testSetAcl(remoteUserName, ozoneObj, defaultUserAcl); - } - - - private void testSetAcl(String remoteUserName, OzoneObj ozoneObj, - OzoneAcl userAcl) throws Exception { - // As by default create will add some default acls in RpcClient. - - if (!ozoneObj.getResourceType().name().equals( - OzoneObj.ResourceType.PREFIX.name())) { - List acls = objectStore.getAcl(ozoneObj); - - Assert.assertTrue(acls.size() > 0); - } - - OzoneAcl modifiedUserAcl = new OzoneAcl(USER, remoteUserName, - WRITE, DEFAULT); - - List newAcls = Collections.singletonList(modifiedUserAcl); - boolean setAcl = objectStore.setAcl(ozoneObj, newAcls); - Assert.assertTrue(setAcl); - - // Get acls and check whether they are reset or not. - List getAcls = objectStore.getAcl(ozoneObj); - - Assert.assertTrue(newAcls.size() == getAcls.size()); - int i = 0; - for (OzoneAcl ozoneAcl : newAcls) { - Assert.assertTrue(compareAcls(getAcls.get(i++), ozoneAcl)); - } - - } - - private void testAddAcl(String remoteUserName, OzoneObj ozoneObj, - OzoneAcl userAcl) throws Exception { - boolean addAcl = objectStore.addAcl(ozoneObj, userAcl); - Assert.assertTrue(addAcl); - - List acls = objectStore.getAcl(ozoneObj); - - Assert.assertTrue(containsAcl(userAcl, acls)); - - // Add an already existing acl. - addAcl = objectStore.addAcl(ozoneObj, userAcl); - Assert.assertFalse(addAcl); - - // Add an acl by changing acl type with same type, name and scope. - userAcl = new OzoneAcl(USER, remoteUserName, - WRITE, DEFAULT); - addAcl = objectStore.addAcl(ozoneObj, userAcl); - Assert.assertTrue(addAcl); - } - - private void testRemoveAcl(String remoteUserName, OzoneObj ozoneObj, - OzoneAcl userAcl) - throws Exception{ - // As by default create will add some default acls in RpcClient. - List acls = objectStore.getAcl(ozoneObj); - - Assert.assertTrue(acls.size() > 0); - - // Remove an existing acl. - boolean removeAcl = objectStore.removeAcl(ozoneObj, acls.get(0)); - Assert.assertTrue(removeAcl); - - // Trying to remove an already removed acl. - removeAcl = objectStore.removeAcl(ozoneObj, acls.get(0)); - Assert.assertFalse(removeAcl); - - boolean addAcl = objectStore.addAcl(ozoneObj, userAcl); - Assert.assertTrue(addAcl); - - // Just changed acl type here to write, rest all is same as defaultUserAcl. - OzoneAcl modifiedUserAcl = new OzoneAcl(USER, remoteUserName, - WRITE, DEFAULT); - addAcl = objectStore.addAcl(ozoneObj, modifiedUserAcl); - Assert.assertTrue(addAcl); - - removeAcl = objectStore.removeAcl(ozoneObj, modifiedUserAcl); - Assert.assertTrue(removeAcl); - - removeAcl = objectStore.removeAcl(ozoneObj, userAcl); - Assert.assertTrue(removeAcl); - } - - - - @Test - public void testOMRatisSnapshot() throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() - .setOwner(userName) - .setAdmin(adminName) - .build(); - - objectStore.createVolume(volumeName, createVolumeArgs); - OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); - - retVolumeinfo.createBucket(bucketName); - OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName); - - String leaderOMNodeId = objectStore.getClientProxy().getOMProxyProvider() - .getCurrentProxyOMNodeId(); - OzoneManager ozoneManager = cluster.getOzoneManager(leaderOMNodeId); - - // Send commands to ratis to increase the log index so that ratis - // triggers a snapshot on the state machine. - - long appliedLogIndex = 0; - while (appliedLogIndex <= SNAPSHOT_THRESHOLD) { - createKey(ozoneBucket); - appliedLogIndex = ozoneManager.getOmRatisServer() - .getStateMachineLastAppliedIndex(); - } - - GenericTestUtils.waitFor(() -> { - if (ozoneManager.getRatisSnapshotIndex() > 0) { - return true; - } - return false; - }, 1000, 100000); - - // The current lastAppliedLogIndex on the state machine should be greater - // than or equal to the saved snapshot index. - long smLastAppliedIndex = - ozoneManager.getOmRatisServer().getStateMachineLastAppliedIndex(); - long ratisSnapshotIndex = ozoneManager.getRatisSnapshotIndex(); - Assert.assertTrue("LastAppliedIndex on OM State Machine (" - + smLastAppliedIndex + ") is less than the saved snapshot index(" - + ratisSnapshotIndex + ").", - smLastAppliedIndex >= ratisSnapshotIndex); - - // Add more transactions to Ratis to trigger another snapshot - while (appliedLogIndex <= (smLastAppliedIndex + SNAPSHOT_THRESHOLD)) { - createKey(ozoneBucket); - appliedLogIndex = ozoneManager.getOmRatisServer() - .getStateMachineLastAppliedIndex(); - } - - GenericTestUtils.waitFor(() -> { - if (ozoneManager.getRatisSnapshotIndex() > 0) { - return true; - } - return false; - }, 1000, 100000); - - // The new snapshot index must be greater than the previous snapshot index - long ratisSnapshotIndexNew = ozoneManager.getRatisSnapshotIndex(); - Assert.assertTrue("Latest snapshot index must be greater than previous " + - "snapshot indices", ratisSnapshotIndexNew > ratisSnapshotIndex); - - } - - /** - * Create a key in the bucket. - * @return the key name. - */ - static String createKey(OzoneBucket ozoneBucket) throws IOException { - String keyName = "key" + RandomStringUtils.randomNumeric(5); - String data = "data" + RandomStringUtils.randomNumeric(5); - OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, - data.length(), ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); - ozoneOutputStream.write(data.getBytes(), 0, data.length()); - ozoneOutputStream.close(); - return keyName; - } - - @Test - public void testOMRestart() throws Exception { - // Get the leader OM - String leaderOMNodeId = objectStore.getClientProxy().getOMProxyProvider() - .getCurrentProxyOMNodeId(); - OzoneManager leaderOM = cluster.getOzoneManager(leaderOMNodeId); - - // Get follower OMs - OzoneManager followerOM1 = cluster.getOzoneManager( - leaderOM.getPeerNodes().get(0).getOMNodeId()); - OzoneManager followerOM2 = cluster.getOzoneManager( - leaderOM.getPeerNodes().get(1).getOMNodeId()); - - // Do some transactions so that the log index increases - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() - .setOwner(userName) - .setAdmin(adminName) - .build(); - - objectStore.createVolume(volumeName, createVolumeArgs); - OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); - - retVolumeinfo.createBucket(bucketName); - OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName); - - for (int i = 0; i < 10; i++) { - createKey(ozoneBucket); - } - - long lastAppliedTxOnFollowerOM = - followerOM1.getOmRatisServer().getStateMachineLastAppliedIndex(); - - // Stop one follower OM - followerOM1.stop(); - - // Do more transactions. Stopped OM should miss these transactions and - // the logs corresponding to atleast some of the missed transactions - // should be purged. This will force the OM to install snapshot when - // restarted. - long minNewTxIndex = lastAppliedTxOnFollowerOM + (LOG_PURGE_GAP * 10); - long leaderOMappliedLogIndex = leaderOM.getOmRatisServer() - .getStateMachineLastAppliedIndex(); - - List missedKeys = new ArrayList<>(); - while (leaderOMappliedLogIndex < minNewTxIndex) { - missedKeys.add(createKey(ozoneBucket)); - leaderOMappliedLogIndex = leaderOM.getOmRatisServer() - .getStateMachineLastAppliedIndex(); - } - - // Restart the stopped OM. - followerOM1.restart(); - - // Get the latest snapshotIndex from the leader OM. - long leaderOMSnaphsotIndex = leaderOM.saveRatisSnapshot(); - - // The recently started OM should be lagging behind the leader OM. - long followerOMLastAppliedIndex = - followerOM1.getOmRatisServer().getStateMachineLastAppliedIndex(); - Assert.assertTrue( - followerOMLastAppliedIndex < leaderOMSnaphsotIndex); - - // Wait for the follower OM to catch up - GenericTestUtils.waitFor(() -> { - long lastAppliedIndex = - followerOM1.getOmRatisServer().getStateMachineLastAppliedIndex(); - if (lastAppliedIndex >= leaderOMSnaphsotIndex) { - return true; - } - return false; - }, 100, 200000); - - // Do more transactions. The restarted OM should receive the - // new transactions. It's last applied tx index should increase from the - // last snapshot index after more transactions are applied. - for (int i = 0; i < 10; i++) { - createKey(ozoneBucket); - } - long followerOM1lastAppliedIndex = followerOM1.getOmRatisServer() - .getStateMachineLastAppliedIndex(); - Assert.assertTrue(followerOM1lastAppliedIndex > - leaderOMSnaphsotIndex); - - // The follower OMs should be in sync. There can be a small lag between - // leader OM and follower OMs as txns are applied first on leader OM. - long followerOM2lastAppliedIndex = followerOM1.getOmRatisServer() - .getStateMachineLastAppliedIndex(); - Assert.assertEquals(followerOM1lastAppliedIndex, - followerOM2lastAppliedIndex); - - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java deleted file mode 100644 index 8168d27a5d1..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.core.type.TypeReference; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.ServicePort; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.http.HttpResponse; -import org.apache.http.client.HttpClient; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.impl.client.HttpClients; -import org.apache.http.util.EntityUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.net.InetSocketAddress; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; -import static org.apache.hadoop.ozone.OmUtils.getOmAddressForClients; - -/** - * This class is to test the REST interface exposed by OzoneManager. - */ -public class TestOzoneManagerRestInterface { - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - - @BeforeClass - public static void setUp() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - } - - @AfterClass - public static void tearDown() throws Exception { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testGetServiceList() throws Exception { - OzoneManagerHttpServer server = - cluster.getOzoneManager().getHttpServer(); - HttpClient client = HttpClients.createDefault(); - String connectionUri = "http://" + - NetUtils.getHostPortString(server.getHttpAddress()); - HttpGet httpGet = new HttpGet(connectionUri + "/serviceList"); - HttpResponse response = client.execute(httpGet); - String serviceListJson = EntityUtils.toString(response.getEntity()); - - ObjectMapper objectMapper = new ObjectMapper(); - TypeReference> serviceInfoReference = - new TypeReference>() {}; - List serviceInfos = objectMapper.readValue( - serviceListJson, serviceInfoReference); - Map serviceMap = new HashMap<>(); - for (ServiceInfo serviceInfo : serviceInfos) { - serviceMap.put(serviceInfo.getNodeType(), serviceInfo); - } - - InetSocketAddress omAddress = - getOmAddressForClients(conf); - ServiceInfo omInfo = serviceMap.get(HddsProtos.NodeType.OM); - - Assert.assertEquals(omAddress.getHostName(), omInfo.getHostname()); - Assert.assertEquals(omAddress.getPort(), - omInfo.getPort(ServicePort.Type.RPC)); - Assert.assertEquals(server.getHttpAddress().getPort(), - omInfo.getPort(ServicePort.Type.HTTP)); - - InetSocketAddress scmAddress = - getScmAddressForClients(conf); - ServiceInfo scmInfo = serviceMap.get(HddsProtos.NodeType.SCM); - - Assert.assertEquals(scmAddress.getHostName(), scmInfo.getHostname()); - Assert.assertEquals(scmAddress.getPort(), - scmInfo.getPort(ServicePort.Type.RPC)); - - ServiceInfo datanodeInfo = serviceMap.get(HddsProtos.NodeType.DATANODE); - DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0) - .getDatanodeDetails(); - Assert.assertEquals(datanodeDetails.getHostName(), - datanodeInfo.getHostname()); - - Map ports = datanodeInfo.getPorts(); - for(ServicePort.Type type : ports.keySet()) { - switch (type) { - case HTTP: - case HTTPS: - Assert.assertEquals( - datanodeDetails.getPort(DatanodeDetails.Port.Name.REST).getValue(), - ports.get(type)); - break; - default: - // OM only sends Datanode's info port details - // i.e. HTTP or HTTPS - // Other ports are not expected as of now. - Assert.fail(); - break; - } - } - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java deleted file mode 100644 index 443f3059fd1..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java +++ /dev/null @@ -1,210 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om; - -import java.io.IOException; -import java.util.HashMap; -import java.util.UUID; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.test.GenericTestUtils; - -import org.apache.commons.lang3.RandomStringUtils; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; -import org.junit.After; -import org.junit.Assert; -import static org.junit.Assert.fail; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -/** - * Test some client operations after cluster starts. And perform restart and - * then performs client operations and check the behavior is expected or not. - */ -public class TestOzoneManagerRestart { - private MiniOzoneCluster cluster = null; - private OzoneConfiguration conf; - private String clusterId; - private String scmId; - private String omId; - - @Rule - public Timeout timeout = new Timeout(60000); - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); - conf.setBoolean(OZONE_ACL_ENABLED, true); - conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2); - conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD); - cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOmId(omId) - .build(); - cluster.waitForClusterToBeReady(); - - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testRestartOMWithVolumeOperation() throws Exception { - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - - OzoneClient client = cluster.getClient(); - - ObjectStore objectStore = client.getObjectStore(); - - objectStore.createVolume(volumeName); - - OzoneVolume ozoneVolume = objectStore.getVolume(volumeName); - Assert.assertTrue(ozoneVolume.getName().equals(volumeName)); - - cluster.restartOzoneManager(); - cluster.restartStorageContainerManager(true); - - // After restart, try to create same volume again, it should fail. - try { - objectStore.createVolume(volumeName); - fail("testRestartOM failed"); - } catch (IOException ex) { - GenericTestUtils.assertExceptionContains("VOLUME_ALREADY_EXISTS", ex); - } - - // Get Volume. - ozoneVolume = objectStore.getVolume(volumeName); - Assert.assertTrue(ozoneVolume.getName().equals(volumeName)); - - } - - - @Test - public void testRestartOMWithBucketOperation() throws Exception { - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - - OzoneClient client = cluster.getClient(); - - ObjectStore objectStore = client.getObjectStore(); - - objectStore.createVolume(volumeName); - - OzoneVolume ozoneVolume = objectStore.getVolume(volumeName); - Assert.assertTrue(ozoneVolume.getName().equals(volumeName)); - - ozoneVolume.createBucket(bucketName); - - OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName); - Assert.assertTrue(ozoneBucket.getName().equals(bucketName)); - - cluster.restartOzoneManager(); - cluster.restartStorageContainerManager(true); - - // After restart, try to create same bucket again, it should fail. - try { - ozoneVolume.createBucket(bucketName); - fail("testRestartOMWithBucketOperation failed"); - } catch (IOException ex) { - GenericTestUtils.assertExceptionContains("BUCKET_ALREADY_EXISTS", ex); - } - - // Get bucket. - ozoneBucket = ozoneVolume.getBucket(bucketName); - Assert.assertTrue(ozoneBucket.getName().equals(bucketName)); - - } - - - @Test - public void testRestartOMWithKeyOperation() throws Exception { - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String key = "key" + RandomStringUtils.randomNumeric(5); - - OzoneClient client = cluster.getClient(); - - ObjectStore objectStore = client.getObjectStore(); - - objectStore.createVolume(volumeName); - - OzoneVolume ozoneVolume = objectStore.getVolume(volumeName); - Assert.assertTrue(ozoneVolume.getName().equals(volumeName)); - - ozoneVolume.createBucket(bucketName); - - OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName); - Assert.assertTrue(ozoneBucket.getName().equals(bucketName)); - - String data = "random data"; - OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key, - data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, - new HashMap<>()); - - ozoneOutputStream.write(data.getBytes(), 0, data.length()); - ozoneOutputStream.close(); - - cluster.restartOzoneManager(); - cluster.restartStorageContainerManager(true); - - - // As we allow override of keys, not testing re-create key. We shall see - // after restart key exists or not. - - // Get key. - OzoneKey ozoneKey = ozoneBucket.getKey(key); - Assert.assertTrue(ozoneKey.getName().equals(key)); - Assert.assertTrue(ozoneKey.getReplicationType().equals( - ReplicationType.RATIS)); - } - - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java deleted file mode 100644 index 5ca2eea5705..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om; - -import java.util.UUID; -import java.util.concurrent.TimeoutException; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -/** - * Test RocksDB logging for Ozone Manager. - */ -public class TestOzoneManagerRocksDBLogging { - private MiniOzoneCluster cluster = null; - private OzoneConfiguration conf; - private String clusterId; - private String scmId; - private String omId; - - @Rule - public Timeout timeout = new Timeout(60000); - - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - conf.set("hadoop.hdds.db.rocksdb.logging.enabled", "true"); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); - cluster = MiniOzoneCluster.newBuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOmId(omId) - .build(); - cluster.waitForClusterToBeReady(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testOMRocksDBLoggingEnabled() throws Exception { - - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(DBStoreBuilder.ROCKS_DB_LOGGER); - cluster.restartOzoneManager(); - GenericTestUtils.waitFor(() -> logCapturer.getOutput() - .contains("db_impl.cc"), - 1000, 10000); - - cluster.getConf().set("hadoop.hdds.db.rocksdb.logging.enabled", "false"); - cluster.restartOzoneManager(); - logCapturer.clearOutput(); - try { - GenericTestUtils.waitFor(() -> logCapturer.getOutput() - .contains("db_impl.cc"), - 1000, 10000); - Assert.fail(); - } catch (TimeoutException ex) { - Assert.assertTrue(ex.getMessage().contains("Timed out")); - } - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java deleted file mode 100644 index 3614a05a8eb..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ /dev/null @@ -1,353 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.SCMContainerManager; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; -import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.TestStorageContainerManagerHelper; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; -import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Test Ozone Manager operation in distributed handler scenario. - */ -public class TestScmSafeMode { - - private final static Logger LOG = LoggerFactory - .getLogger(TestScmSafeMode.class); - private static MiniOzoneCluster cluster = null; - private static MiniOzoneCluster.Builder builder = null; - private static OzoneConfiguration conf; - private static OzoneManager om; - private static StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - - - @Rule - public Timeout timeout = new Timeout(1000 * 200); - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true and - * OZONE_HANDLER_TYPE_KEY = "distributed" - * - * @throws IOException - */ - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_STALENODE_INTERVAL, "10s"); - conf.set(OZONE_SCM_DEADNODE_INTERVAL, "25s"); - builder = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(500) - .setStartDataNodes(false); - cluster = builder.build(); - cluster.startHddsDatanodes(); - cluster.waitForClusterToBeReady(); - om = cluster.getOzoneManager(); - storageContainerLocationClient = cluster - .getStorageContainerLocationClient(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - try { - cluster.shutdown(); - } catch (Exception e) { - // do nothing. - } - } - } - - @Test(timeout = 300_000) - public void testSafeModeOperations() throws Exception { - // Create {numKeys} random names keys. - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - Map keyLocations = helper.createKeys(100, 4096); - final List containers = cluster - .getStorageContainerManager().getContainerManager().getContainers(); - GenericTestUtils.waitFor(() -> containers.size() >= 3, 100, 1000); - - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - - ObjectStore store = cluster.getRpcClient().getObjectStore(); - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - bucket.createKey(keyName, 1000, RATIS, ONE, new HashMap<>()); - - cluster.stop(); - - try { - cluster = builder.build(); - } catch (IOException e) { - fail("failed"); - } - - - StorageContainerManager scm; - - scm = cluster.getStorageContainerManager(); - Assert.assertTrue(scm.isInSafeMode()); - - om = cluster.getOzoneManager(); - - - final OzoneBucket bucket1 = - cluster.getRpcClient().getObjectStore().getVolume(volumeName) - .getBucket(bucketName); - -// As cluster is restarted with out datanodes restart - LambdaTestUtils.intercept(IOException.class, - "SafeModePrecheck failed for allocateBlock", - () -> bucket1.createKey(keyName, 1000, RATIS, ONE, - new HashMap<>())); - } - - /** - * Tests inSafeMode & forceExitSafeMode api calls. - */ - @Test(timeout = 300_000) - public void testIsScmInSafeModeAndForceExit() throws Exception { - // Test 1: SCM should be out of safe mode. - Assert.assertFalse(storageContainerLocationClient.inSafeMode()); - cluster.stop(); - // Restart the cluster with same metadata dir. - - try { - cluster = builder.build(); - } catch (IOException e) { - Assert.fail("Cluster startup failed."); - } - - // Test 2: Scm should be in safe mode as datanodes are not started yet. - storageContainerLocationClient = cluster - .getStorageContainerLocationClient(); - Assert.assertTrue(storageContainerLocationClient.inSafeMode()); - // Force scm out of safe mode. - cluster.getStorageContainerManager().getClientProtocolServer() - .forceExitSafeMode(); - // Test 3: SCM should be out of safe mode. - GenericTestUtils.waitFor(() -> { - try { - return !cluster.getStorageContainerManager().getClientProtocolServer() - .inSafeMode(); - } catch (IOException e) { - Assert.fail("Cluster"); - return false; - } - }, 10, 1000 * 5); - - } - - @Test(timeout = 300_000) - public void testSCMSafeMode() throws Exception { - // Test1: Test safe mode when there are no containers in system. - cluster.stop(); - - try { - cluster = builder.build(); - } catch (IOException e) { - Assert.fail("Cluster startup failed."); - } - assertTrue(cluster.getStorageContainerManager().isInSafeMode()); - cluster.startHddsDatanodes(); - cluster.waitForClusterToBeReady(); - cluster.waitTobeOutOfSafeMode(); - assertFalse(cluster.getStorageContainerManager().isInSafeMode()); - - // Test2: Test safe mode when containers are there in system. - // Create {numKeys} random names keys. - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - Map keyLocations = helper.createKeys(100 * 2, 4096); - final List containers = cluster - .getStorageContainerManager().getContainerManager().getContainers(); - GenericTestUtils.waitFor(() -> containers.size() >= 3, 100, 1000 * 30); - - // Removing some container to keep them open. - containers.remove(0); - containers.remove(0); - - // Close remaining containers - SCMContainerManager mapping = (SCMContainerManager) cluster - .getStorageContainerManager().getContainerManager(); - containers.forEach(c -> { - try { - mapping.updateContainerState(c.containerID(), - HddsProtos.LifeCycleEvent.FINALIZE); - mapping.updateContainerState(c.containerID(), - LifeCycleEvent.CLOSE); - } catch (IOException e) { - LOG.info("Failed to change state of open containers.", e); - } - }); - cluster.stop(); - - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(SCMSafeModeManager.getLogger()); - logCapturer.clearOutput(); - - try { - cluster = builder.build(); - } catch (IOException ex) { - fail("failed"); - } - - StorageContainerManager scm; - - scm = cluster.getStorageContainerManager(); - assertTrue(scm.isInSafeMode()); - assertFalse(logCapturer.getOutput().contains("SCM exiting safe mode.")); - assertTrue(scm.getCurrentContainerThreshold() == 0); - for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { - dn.start(); - } - GenericTestUtils - .waitFor(() -> scm.getCurrentContainerThreshold() == 1.0, 100, 20000); - - EventQueue eventQueue = - (EventQueue) cluster.getStorageContainerManager().getEventQueue(); - eventQueue.processAll(5000L); - - double safeModeCutoff = conf - .getDouble(HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, - HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT); - assertTrue(scm.getCurrentContainerThreshold() >= safeModeCutoff); - assertTrue(logCapturer.getOutput().contains("SCM exiting safe mode.")); - assertFalse(scm.isInSafeMode()); - } - - @Test(timeout = 300_000) - public void testSCMSafeModeRestrictedOp() throws Exception { - conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, - OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB); - cluster.stop(); - cluster = builder.build(); - StorageContainerManager scm = cluster.getStorageContainerManager(); - assertTrue(scm.isInSafeMode()); - - LambdaTestUtils.intercept(SCMException.class, - "SafeModePrecheck failed for allocateContainer", () -> { - scm.getClientProtocolServer() - .allocateContainer(ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, ""); - }); - - cluster.startHddsDatanodes(); - cluster.waitForClusterToBeReady(); - cluster.waitTobeOutOfSafeMode(); - assertFalse(scm.isInSafeMode()); - - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - helper.createKeys(10, 4096); - SCMClientProtocolServer clientProtocolServer = cluster - .getStorageContainerManager().getClientProtocolServer(); - assertFalse((scm.getClientProtocolServer()).getSafeModeStatus()); - final List containers = scm.getContainerManager() - .getContainers(); - scm.getEventQueue().fireEvent(SCMEvents.SAFE_MODE_STATUS, - new SCMSafeModeManager.SafeModeStatus(true)); - GenericTestUtils.waitFor(() -> { - return clientProtocolServer.getSafeModeStatus(); - }, 50, 1000 * 30); - assertTrue(clientProtocolServer.getSafeModeStatus()); - - cluster.shutdownHddsDatanodes(); - Thread.sleep(30000); - LambdaTestUtils.intercept(SCMException.class, - "Open container " + containers.get(0).getContainerID() + " " - + "doesn't have enough replicas to service this operation in Safe" - + " mode.", () -> clientProtocolServer - .getContainerWithPipeline(containers.get(0).getContainerID())); - } - - @Test(timeout = 300_000) - public void testSCMSafeModeDisabled() throws Exception { - cluster.shutdown(); - - // If safe mode is disabled, cluster should not be in safe mode even if - // min number of datanodes are not started. - conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, false); - conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, 3); - builder = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(500) - .setNumDatanodes(1); - cluster = builder.build(); - StorageContainerManager scm = cluster.getStorageContainerManager(); - assertFalse(scm.isInSafeMode()); - - // Even on SCM restart, cluster should be out of safe mode immediately. - cluster.restartStorageContainerManager(true); - assertFalse(scm.isInSafeMode()); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java deleted file mode 100644 index 48a9c6a9ef3..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java +++ /dev/null @@ -1,217 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.client.OMCertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.test.LambdaTestUtils; -import org.bouncycastle.cert.X509CertificateHolder; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.KeyPair; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.cert.X509Certificate; -import java.util.UUID; - -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; -import static org.apache.hadoop.test.GenericTestUtils.*; - -/** - * Test secure Ozone Manager operation in distributed handler scenario. - */ -public class TestSecureOzoneManager { - - private static final String COMPONENT = "om"; - private MiniOzoneCluster cluster = null; - private OzoneConfiguration conf; - private String clusterId; - private String scmId; - private String omId; - private Path metaDir; - - @Rule - public Timeout timeout = new Timeout(1000 * 25); - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - */ - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); - conf.setBoolean(OZONE_ACL_ENABLED, true); - conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); - conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2); - conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString()); - conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 2); - conf.set(OZONE_SCM_NAMES, "localhost"); - final String path = getTempPath(UUID.randomUUID().toString()); - metaDir = Paths.get(path, "om-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); - OzoneManager.setTestSecureOmFlag(true); - - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - FileUtils.deleteQuietly(metaDir.toFile()); - } - - /** - * Test failure cases for secure OM initialization. - */ - @Test - public void testSecureOmInitFailures() throws Exception { - PrivateKey privateKey; - PublicKey publicKey; - LogCapturer omLogs = - LogCapturer.captureLogs(OzoneManager.getLogger()); - OMStorage omStorage = new OMStorage(conf); - omStorage.setClusterId(clusterId); - omStorage.setScmId(scmId); - omStorage.setOmId(omId); - omLogs.clearOutput(); - - // Case 1: When keypair as well as certificate is missing. Initial keypair - // boot-up. Get certificate will fail when SCM is not running. - SecurityConfig securityConfig = new SecurityConfig(conf); - CertificateClient client = new OMCertificateClient(securityConfig, - omStorage.getOmCertSerialId()); - Assert.assertEquals(CertificateClient.InitResponse.GETCERT, client.init()); - privateKey = client.getPrivateKey(); - publicKey = client.getPublicKey(); - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNull(client.getCertificate()); - - // Case 2: If key pair already exist than response should be RECOVER. - client = new OMCertificateClient(securityConfig, - omStorage.getOmCertSerialId()); - Assert.assertEquals(CertificateClient.InitResponse.RECOVER, client.init()); - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNull(client.getCertificate()); - - // Case 3: When public key as well as certificate is missing. - client = new OMCertificateClient(securityConfig); - FileUtils.deleteQuietly(Paths.get(securityConfig.getKeyLocation(COMPONENT) - .toString(), securityConfig.getPublicKeyFileName()).toFile()); - Assert.assertEquals(CertificateClient.InitResponse.FAILURE, client.init()); - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNull(client.getPublicKey()); - Assert.assertNull(client.getCertificate()); - - // Case 4: When private key and certificate is missing. - client = new OMCertificateClient(securityConfig); - KeyCodec keyCodec = new KeyCodec(securityConfig, COMPONENT); - keyCodec.writePublicKey(publicKey); - FileUtils.deleteQuietly(Paths.get(securityConfig.getKeyLocation(COMPONENT) - .toString(), securityConfig.getPrivateKeyFileName()).toFile()); - Assert.assertEquals(CertificateClient.InitResponse.FAILURE, client.init()); - Assert.assertNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNull(client.getCertificate()); - - // Case 5: When only certificate is present. - FileUtils.deleteQuietly(Paths.get(securityConfig.getKeyLocation(COMPONENT) - .toString(), securityConfig.getPublicKeyFileName()).toFile()); - CertificateCodec certCodec = - new CertificateCodec(securityConfig, COMPONENT); - X509Certificate x509Certificate = KeyStoreTestUtil.generateCertificate( - "CN=Test", new KeyPair(publicKey, privateKey), 10, - securityConfig.getSignatureAlgo()); - certCodec.writeCertificate(new X509CertificateHolder( - x509Certificate.getEncoded())); - client = new OMCertificateClient(securityConfig, - x509Certificate.getSerialNumber().toString()); - omStorage.setOmCertSerialId(x509Certificate.getSerialNumber().toString()); - Assert.assertEquals(CertificateClient.InitResponse.FAILURE, client.init()); - Assert.assertNull(client.getPrivateKey()); - Assert.assertNull(client.getPublicKey()); - Assert.assertNotNull(client.getCertificate()); - - // Case 6: When private key and certificate is present. - client = new OMCertificateClient(securityConfig, - x509Certificate.getSerialNumber().toString()); - FileUtils.deleteQuietly(Paths.get(securityConfig.getKeyLocation(COMPONENT) - .toString(), securityConfig.getPublicKeyFileName()).toFile()); - keyCodec.writePrivateKey(privateKey); - Assert.assertEquals(CertificateClient.InitResponse.SUCCESS, client.init()); - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNotNull(client.getCertificate()); - - // Case 7 When keypair and certificate is present. - client = new OMCertificateClient(securityConfig, - x509Certificate.getSerialNumber().toString()); - Assert.assertEquals(CertificateClient.InitResponse.SUCCESS, client.init()); - Assert.assertNotNull(client.getPrivateKey()); - Assert.assertNotNull(client.getPublicKey()); - Assert.assertNotNull(client.getCertificate()); - } - - /** - * Test om bind socket address. - */ - @Test - public void testSecureOmInitFailure() throws Exception { - OzoneConfiguration config = new OzoneConfiguration(conf); - OMStorage omStorage = new OMStorage(config); - omStorage.setClusterId(clusterId); - omStorage.setScmId(scmId); - omStorage.setOmId(omId); - config.set(OZONE_OM_ADDRESS_KEY, "om-unknown"); - LambdaTestUtils.intercept(RuntimeException.class, "Can't get SCM signed" + - " certificate", - () -> OzoneManager.initializeSecurity(config, omStorage)); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/package-info.java deleted file mode 100644 index 5ad67703279..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Ozone Manager Tests. - */ -package org.apache.hadoop.ozone.om; \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOMRatisSnapshotInfo.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOMRatisSnapshotInfo.java deleted file mode 100644 index 56fef1a8434..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOMRatisSnapshotInfo.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.snapshot; - -import org.apache.hadoop.ozone.om.ratis.OMRatisSnapshotInfo; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.util.Random; - -/** - * Tests {@link org.apache.hadoop.ozone.om.ratis.OMRatisSnapshotInfo}. - */ -public class TestOMRatisSnapshotInfo { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Test - public void testSaveAndLoadSnapshotInfo() throws Exception { - File rootDir = folder.newFolder(); - OMRatisSnapshotInfo omRatisSnapshotInfo = new OMRatisSnapshotInfo(rootDir); - - // Initially term and index should be 0 and -1 - Assert.assertEquals(0, omRatisSnapshotInfo.getTerm()); - Assert.assertEquals(-1, omRatisSnapshotInfo.getIndex()); - - Random random = new Random(); - int snapshotIndex = random.nextInt(50); - int termIndex = random.nextInt(10); - - // Save snapshotInfo to disk - omRatisSnapshotInfo.updateTerm(termIndex); - omRatisSnapshotInfo.saveRatisSnapshotToDisk(snapshotIndex); - - Assert.assertEquals(termIndex, omRatisSnapshotInfo.getTerm()); - Assert.assertEquals(snapshotIndex, omRatisSnapshotInfo.getIndex()); - - // Load the snapshot file into new SnapshotInfo - OMRatisSnapshotInfo newSnapshotInfo = new OMRatisSnapshotInfo(rootDir); - - // Verify that the snapshot file loaded properly - Assert.assertEquals(termIndex, newSnapshotInfo.getTerm()); - Assert.assertEquals(snapshotIndex, newSnapshotInfo.getIndex()); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java deleted file mode 100644 index 92a4a34da76..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.snapshot; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.hdds.utils.db.DBCheckpoint; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -import java.util.UUID; - -/** - * Test OM's snapshot provider service. - */ -public class TestOzoneManagerSnapshotProvider { - - private MiniOzoneHAClusterImpl cluster = null; - private ObjectStore objectStore; - private OzoneConfiguration conf; - private String clusterId; - private String scmId; - private String omServiceId; - private int numOfOMs = 3; - - @Rule - public Timeout timeout = new Timeout(300_000); - - /** - * Create a MiniDFSCluster for testing. - */ - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omServiceId = "om-service-test1"; - conf.setBoolean(OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY, true); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOMServiceId(omServiceId) - .setNumOfOzoneManagers(numOfOMs) - .build(); - cluster.waitForClusterToBeReady(); - objectStore = OzoneClientFactory.getRpcClient(omServiceId, conf) - .getObjectStore(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testDownloadCheckpoint() throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - - VolumeArgs createVolumeArgs = VolumeArgs.newBuilder() - .setOwner(userName) - .setAdmin(adminName) - .build(); - - objectStore.createVolume(volumeName, createVolumeArgs); - OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); - - retVolumeinfo.createBucket(bucketName); - OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName); - - String leaderOMNodeId = objectStore.getClientProxy().getOMProxyProvider() - .getCurrentProxyOMNodeId(); - OzoneManager ozoneManager = cluster.getOzoneManager(leaderOMNodeId); - - // Get a follower OM - String followerNodeId = ozoneManager.getPeerNodes().get(0).getOMNodeId(); - OzoneManager followerOM = cluster.getOzoneManager(followerNodeId); - - // Download latest checkpoint from leader OM to follower OM - DBCheckpoint omSnapshot = followerOM.getOmSnapshotProvider() - .getOzoneManagerDBSnapshot(leaderOMNodeId); - - long leaderSnapshotIndex = ozoneManager.getRatisSnapshotIndex(); - long downloadedSnapshotIndex = omSnapshot.getRatisSnapshotIndex(); - - // The snapshot index downloaded from leader OM should match the ratis - // snapshot index on the leader OM - Assert.assertEquals("The snapshot index downloaded from leader OM does " + - "not match its ratis snapshot index", - leaderSnapshotIndex, downloadedSnapshotIndex); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java deleted file mode 100644 index 65bc2751423..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java +++ /dev/null @@ -1,204 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.ozShell; - -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.PrintStream; -import java.util.Arrays; -import java.util.List; - -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.test.GenericTestUtils; - -import com.google.common.base.Strings; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import static org.junit.Assert.fail; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine; -import picocli.CommandLine.ExecutionException; -import picocli.CommandLine.IExceptionHandler2; -import picocli.CommandLine.ParameterException; -import picocli.CommandLine.ParseResult; -import picocli.CommandLine.RunLast; - -/** - * This test class specified for testing Ozone datanode shell command. - */ -public class TestOzoneDatanodeShell { - - private static final Logger LOG = - LoggerFactory.getLogger(TestOzoneDatanodeShell.class); - - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - private static File baseDir; - private static OzoneConfiguration conf = null; - private static MiniOzoneCluster cluster = null; - private static HddsDatanodeService datanode = null; - - private final ByteArrayOutputStream out = new ByteArrayOutputStream(); - private final ByteArrayOutputStream err = new ByteArrayOutputStream(); - private static final PrintStream OLD_OUT = System.out; - private static final PrintStream OLD_ERR = System.err; - - /** - * Create a MiniDFSCluster for testing with using distributed Ozone - * handler type. - * - * @throws Exception - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - - String path = GenericTestUtils.getTempPath( - TestOzoneDatanodeShell.class.getSimpleName()); - baseDir = new File(path); - baseDir.mkdirs(); - - datanode = HddsDatanodeService.createHddsDatanodeService(null); - - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .build(); - conf.setInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue()); - conf.setQuietMode(false); - cluster.waitForClusterToBeReady(); - } - - /** - * shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - - if (baseDir != null) { - FileUtil.fullyDelete(baseDir, true); - } - } - - @Before - public void setup() { - System.setOut(new PrintStream(out)); - System.setErr(new PrintStream(err)); - } - - @After - public void reset() { - // reset stream after each unit test - out.reset(); - err.reset(); - - // restore system streams - System.setOut(OLD_OUT); - System.setErr(OLD_ERR); - } - - - private void executeDatanode(HddsDatanodeService hdds, String[] args) { - LOG.info("Executing datanode command with args {}", Arrays.asList(args)); - CommandLine cmd = hdds.getCmd(); - - IExceptionHandler2> exceptionHandler = - new IExceptionHandler2>() { - @Override - public List handleParseException(ParameterException ex, - String[] args) { - throw ex; - } - - @Override - public List handleExecutionException(ExecutionException ex, - ParseResult parseResult) { - throw ex; - } - }; - cmd.parseWithHandlers(new RunLast(), - exceptionHandler, args); - } - - /** - * Execute command, assert exception message and returns true if error - * was thrown and contains the specified usage string. - */ - private void executeDatanodeWithError(HddsDatanodeService hdds, String[] args, - String expectedError) { - if (Strings.isNullOrEmpty(expectedError)) { - executeDatanode(hdds, args); - } else { - try { - executeDatanode(hdds, args); - fail("Exception is expected from command execution " + Arrays - .asList(args)); - } catch (Exception ex) { - if (!Strings.isNullOrEmpty(expectedError)) { - Throwable exceptionToCheck = ex; - if (exceptionToCheck.getCause() != null) { - exceptionToCheck = exceptionToCheck.getCause(); - } - Assert.assertTrue( - String.format( - "Error of shell code doesn't contain the " + - "exception [%s] in [%s]", - expectedError, exceptionToCheck.getMessage()), - exceptionToCheck.getMessage().contains(expectedError)); - } - } - } - } - - @Test - public void testDatanodeCommand() { - LOG.info("Running testDatanodeIncompleteCommand"); - String[] args = new String[]{}; //executing 'ozone datanode' - - //'ozone datanode' command should not result in error - executeDatanodeWithError(datanode, args, null); - } - - @Test - public void testDatanodeInvalidParamCommand() { - LOG.info("Running testDatanodeIncompleteCommand"); - String expectedError = "Unknown option: -invalidParam"; - //executing 'ozone datanode -invalidParam' - String[] args = new String[]{"-invalidParam"}; - - executeDatanodeWithError(datanode, args, expectedError); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShellHA.java deleted file mode 100644 index 4e04b4ce265..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShellHA.java +++ /dev/null @@ -1,343 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.ozShell; - -import com.google.common.base.Strings; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.web.ozShell.OzoneShell; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine; -import picocli.CommandLine.ExecutionException; -import picocli.CommandLine.IExceptionHandler2; -import picocli.CommandLine.ParameterException; -import picocli.CommandLine.ParseResult; -import picocli.CommandLine.RunLast; - -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.PrintStream; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.UUID; - -import static org.junit.Assert.fail; - -/** - * This class tests Ozone sh shell command. - * Inspired by TestS3Shell - */ -public class TestOzoneShellHA { - - private static final Logger LOG = - LoggerFactory.getLogger(TestOzoneShellHA.class); - - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - private static File baseDir; - private static OzoneConfiguration conf = null; - private static MiniOzoneCluster cluster = null; - private static OzoneShell ozoneShell = null; - - private final ByteArrayOutputStream out = new ByteArrayOutputStream(); - private final ByteArrayOutputStream err = new ByteArrayOutputStream(); - private static final PrintStream OLD_OUT = System.out; - private static final PrintStream OLD_ERR = System.err; - - private static String omServiceId; - private static String clusterId; - private static String scmId; - private static int numOfOMs; - - /** - * Create a MiniOzoneCluster for testing with using distributed Ozone - * handler type. - * - * @throws Exception - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - - String path = GenericTestUtils.getTempPath( - TestOzoneShellHA.class.getSimpleName()); - baseDir = new File(path); - baseDir.mkdirs(); - ozoneShell = new OzoneShell(); - - // Init HA cluster - omServiceId = "om-service-test1"; - numOfOMs = 3; - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - cluster = MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOMServiceId(omServiceId) - .setNumOfOzoneManagers(numOfOMs) - .build(); - conf.setQuietMode(false); - cluster.waitForClusterToBeReady(); - } - - /** - * shutdown MiniOzoneCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - - if (baseDir != null) { - FileUtil.fullyDelete(baseDir, true); - } - } - - @Before - public void setup() { - System.setOut(new PrintStream(out)); - System.setErr(new PrintStream(err)); - } - - @After - public void reset() { - // reset stream after each unit test - out.reset(); - err.reset(); - - // restore system streams - System.setOut(OLD_OUT); - System.setErr(OLD_ERR); - } - - private void execute(OzoneShell shell, String[] args) { - LOG.info("Executing OzoneShell command with args {}", Arrays.asList(args)); - CommandLine cmd = shell.getCmd(); - - IExceptionHandler2> exceptionHandler = - new IExceptionHandler2>() { - @Override - public List handleParseException(ParameterException ex, - String[] args) { - throw ex; - } - - @Override - public List handleExecutionException(ExecutionException ex, - ParseResult parseRes) { - throw ex; - } - }; - - // Since there is no elegant way to pass Ozone config to the shell, - // the idea is to use 'set' to place those OM HA configs. - String[] argsWithHAConf = getHASetConfStrings(args); - - cmd.parseWithHandlers(new RunLast(), exceptionHandler, argsWithHAConf); - } - - /** - * Execute command, assert exception message and returns true if error - * was thrown. - */ - private void executeWithError(OzoneShell shell, String[] args, - String expectedError) { - if (Strings.isNullOrEmpty(expectedError)) { - execute(shell, args); - } else { - try { - execute(shell, args); - fail("Exception is expected from command execution " + Arrays - .asList(args)); - } catch (Exception ex) { - if (!Strings.isNullOrEmpty(expectedError)) { - Throwable exceptionToCheck = ex; - if (exceptionToCheck.getCause() != null) { - exceptionToCheck = exceptionToCheck.getCause(); - } - Assert.assertTrue( - String.format( - "Error of OzoneShell code doesn't contain the " + - "exception [%s] in [%s]", - expectedError, exceptionToCheck.getMessage()), - exceptionToCheck.getMessage().contains(expectedError)); - } - } - } - } - - /** - * @return the leader OM's Node ID in the MiniOzoneHACluster. - * - * TODO: This should be put into MiniOzoneHAClusterImpl in the future. - * This helper function is similar to the one in TestOzoneFsHAURLs. - */ - private String getLeaderOMNodeId() { - Collection omNodeIds = OmUtils.getOMNodeIds(conf, omServiceId); - assert(omNodeIds.size() == numOfOMs); - MiniOzoneHAClusterImpl haCluster = (MiniOzoneHAClusterImpl) cluster; - // Note: this loop may be implemented inside MiniOzoneHAClusterImpl - for (String omNodeId : omNodeIds) { - // Find the leader OM - if (!haCluster.getOzoneManager(omNodeId).isLeader()) { - continue; - } - return omNodeId; - } - return null; - } - - private String getSetConfStringFromConf(String key) { - return String.format("--set=%s=%s", key, conf.get(key)); - } - - private String generateSetConfString(String key, String value) { - return String.format("--set=%s=%s", key, value); - } - - /** - * Helper function to get a String array to be fed into OzoneShell. - * @param numOfArgs Additional number of arguments after the HA conf string, - * this translates into the number of empty array elements - * after the HA conf string. - * @return String array. - */ - private String[] getHASetConfStrings(int numOfArgs) { - assert(numOfArgs >= 0); - String[] res = new String[1 + 1 + numOfOMs + numOfArgs]; - final int indexOmServiceIds = 0; - final int indexOmNodes = 1; - final int indexOmAddressStart = 2; - - res[indexOmServiceIds] = getSetConfStringFromConf( - OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY); - - String omNodesKey = OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId); - String omNodesVal = conf.get(omNodesKey); - res[indexOmNodes] = generateSetConfString(omNodesKey, omNodesVal); - - String[] omNodesArr = omNodesVal.split(","); - // Sanity check - assert(omNodesArr.length == numOfOMs); - for (int i = 0; i < numOfOMs; i++) { - res[indexOmAddressStart + i] = - getSetConfStringFromConf(OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodesArr[i])); - } - - return res; - } - - /** - * Helper function to create a new set of arguments that contains HA configs. - * @param existingArgs Existing arguments to be fed into OzoneShell command. - * @return String array. - */ - private String[] getHASetConfStrings(String[] existingArgs) { - // Get a String array populated with HA configs first - String[] res = getHASetConfStrings(existingArgs.length); - - int indexCopyStart = res.length - existingArgs.length; - // Then copy the existing args to the returned String array - for (int i = 0; i < existingArgs.length; i++) { - res[indexCopyStart + i] = existingArgs[i]; - } - return res; - } - - /** - * Tests ozone sh command URI parsing with volume and bucket create commands. - */ - @Test - public void testOzoneShCmdURIs() { - // Test case 1: ozone sh volume create /volume - // Expectation: Failure. - String[] args = new String[] {"volume", "create", "/volume"}; - executeWithError(ozoneShell, args, - "Service ID or host name must not be omitted"); - - // Get leader OM node RPC address from ozone.om.address.omServiceId.omNode - String omLeaderNodeId = getLeaderOMNodeId(); - String omLeaderNodeAddrKey = OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omLeaderNodeId); - String omLeaderNodeAddr = conf.get(omLeaderNodeAddrKey); - String omLeaderNodeAddrWithoutPort = omLeaderNodeAddr.split(":")[0]; - - // Test case 2: ozone sh volume create o3://om1/volume2 - // Expectation: Success. - // Note: For now it seems OzoneShell is only trying the default port 9862 - // instead of using the port defined in ozone.om.address (as ozone fs does). - // So the test will fail before this behavior is fixed. - // TODO: Fix this behavior, then uncomment the execute() below. - String setOmAddress = "--set=" + OMConfigKeys.OZONE_OM_ADDRESS_KEY + "=" - + omLeaderNodeAddr; - args = new String[] {setOmAddress, - "volume", "create", "o3://" + omLeaderNodeAddrWithoutPort + "/volume2"}; - //execute(ozoneShell, args); - - // Test case 3: ozone sh volume create o3://om1:port/volume3 - // Expectation: Success. - args = new String[] { - "volume", "create", "o3://" + omLeaderNodeAddr + "/volume3"}; - execute(ozoneShell, args); - - // Test case 4: ozone sh volume create o3://id1/volume - // Expectation: Success. - args = new String[] {"volume", "create", "o3://" + omServiceId + "/volume"}; - execute(ozoneShell, args); - - // Test case 5: ozone sh volume create o3://id1:port/volume - // Expectation: Failure. - args = new String[] {"volume", "create", - "o3://" + omServiceId + ":9862" + "/volume"}; - executeWithError(ozoneShell, args, "does not use port information"); - - // Test case 6: ozone sh bucket create /volume/bucket - // Expectation: Failure. - args = new String[] {"bucket", "create", "/volume/bucket"}; - executeWithError(ozoneShell, args, - "Service ID or host name must not be omitted"); - - // Test case 7: ozone sh bucket create o3://om1/volume/bucket - // Expectation: Success. - args = new String[] { - "bucket", "create", "o3://" + omServiceId + "/volume/bucket"}; - execute(ozoneShell, args); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestS3Shell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestS3Shell.java deleted file mode 100644 index c55de0b4dba..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestS3Shell.java +++ /dev/null @@ -1,292 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.ozShell; - -import com.google.common.base.Strings; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.client.rpc.RpcClient; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.web.ozShell.s3.S3Shell; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine; -import picocli.CommandLine.ExecutionException; -import picocli.CommandLine.IExceptionHandler2; -import picocli.CommandLine.ParameterException; -import picocli.CommandLine.ParseResult; -import picocli.CommandLine.RunLast; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort; - - -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.IOException; -import java.io.PrintStream; -import java.util.Arrays; -import java.util.List; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.S3_BUCKET_NOT_FOUND; -import static org.apache.hadoop.ozone.web.ozShell.s3.GetS3SecretHandler.OZONE_GETS3SECRET_ERROR; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * This test class specified for testing Ozone s3Shell command. - */ -public class TestS3Shell { - - private static final Logger LOG = - LoggerFactory.getLogger(TestS3Shell.class); - - /** - * Set the timeout for every test. - */ - @Rule - public Timeout testTimeout = new Timeout(300000); - - private static String url; - private static File baseDir; - private static OzoneConfiguration conf = null; - private static MiniOzoneCluster cluster = null; - private static ClientProtocol client = null; - private static S3Shell s3Shell = null; - - private final ByteArrayOutputStream out = new ByteArrayOutputStream(); - private final ByteArrayOutputStream err = new ByteArrayOutputStream(); - private static final PrintStream OLD_OUT = System.out; - private static final PrintStream OLD_ERR = System.err; - - /** - * Create a MiniOzoneCluster for testing with using distributed Ozone - * handler type. - * - * @throws Exception - */ - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - - String path = GenericTestUtils.getTempPath( - TestS3Shell.class.getSimpleName()); - baseDir = new File(path); - baseDir.mkdirs(); - - s3Shell = new S3Shell(); - - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .build(); - conf.setInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue()); - conf.setQuietMode(false); - client = new RpcClient(conf, null); - cluster.waitForClusterToBeReady(); - } - - /** - * shutdown MiniOzoneCluster. - */ - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - - if (baseDir != null) { - FileUtil.fullyDelete(baseDir, true); - } - } - - @Before - public void setup() { - System.setOut(new PrintStream(out)); - System.setErr(new PrintStream(err)); - url = "o3://" + getOmAddress(); - } - - @After - public void reset() { - // reset stream after each unit test - out.reset(); - err.reset(); - - // restore system streams - System.setOut(OLD_OUT); - System.setErr(OLD_ERR); - } - - @Test - public void testS3BucketMapping() throws IOException { - String setOmAddress = - "--set=" + OZONE_OM_ADDRESS_KEY + "=" + getOmAddress(); - - String s3Bucket = "bucket1"; - String commandOutput; - createS3Bucket("ozone", s3Bucket); - - // WHEN - String[] args = - new String[] {setOmAddress, "path", s3Bucket}; - execute(s3Shell, args); - - // THEN - commandOutput = out.toString(); - String volumeName = client.getOzoneVolumeName(s3Bucket); - assertTrue(commandOutput.contains("Volume name for S3Bucket is : " + - volumeName)); - assertTrue(commandOutput.contains(OzoneConsts.OZONE_URI_SCHEME + "://" + - s3Bucket + "." + volumeName)); - out.reset(); - - // Trying to get map for an unknown bucket - args = new String[] {setOmAddress, "path", "unknownbucket"}; - executeWithError(s3Shell, args, S3_BUCKET_NOT_FOUND); - - // No bucket name - args = new String[] {setOmAddress, "path"}; - executeWithError(s3Shell, args, "Missing required parameter"); - - // Invalid bucket name - args = new String[] {setOmAddress, "path", "/asd/multipleslash"}; - executeWithError(s3Shell, args, S3_BUCKET_NOT_FOUND); - } - - @Test - public void testS3SecretUnsecuredCluster() throws Exception { - String setOmAddress = - "--set=" + OZONE_OM_ADDRESS_KEY + "=" + getOmAddress(); - - String output; - - String[] args = new String[] {setOmAddress, "getsecret"}; - execute(s3Shell, args); - // Get the first line of output - output = out.toString().split("\n")[0]; - - assertTrue(output.equals(OZONE_GETS3SECRET_ERROR)); - } - - private void createS3Bucket(String userName, String s3Bucket) { - try { - client.createS3Bucket("ozone", s3Bucket); - } catch (IOException ex) { - GenericTestUtils.assertExceptionContains("S3_BUCKET_ALREADY_EXISTS", ex); - } - } - - private void execute(S3Shell shell, String[] args) { - LOG.info("Executing s3Shell command with args {}", Arrays.asList(args)); - CommandLine cmd = shell.getCmd(); - - IExceptionHandler2> exceptionHandler = - new IExceptionHandler2>() { - @Override - public List handleParseException(ParameterException ex, - String[] args) { - throw ex; - } - - @Override - public List handleExecutionException(ExecutionException ex, - ParseResult parseRes) { - throw ex; - } - }; - cmd.parseWithHandlers(new RunLast(), - exceptionHandler, args); - } - - /** - * Execute command, assert exception message and returns true if error - * was thrown. - */ - private void executeWithError(S3Shell shell, String[] args, - OMException.ResultCodes code) { - try { - execute(shell, args); - fail("Exception is expected from command execution " + Arrays - .asList(args)); - } catch (Exception ex) { - Assert.assertEquals(OMException.class, ex.getCause().getClass()); - Assert.assertEquals(code, ((OMException) ex.getCause()).getResult()); - } - } - - /** - * Execute command, assert exception message and returns true if error - * was thrown. - */ - private void executeWithError(S3Shell shell, String[] args, - String expectedError) { - if (Strings.isNullOrEmpty(expectedError)) { - execute(shell, args); - } else { - try { - execute(shell, args); - fail("Exception is expected from command execution " + Arrays - .asList(args)); - } catch (Exception ex) { - if (!Strings.isNullOrEmpty(expectedError)) { - Throwable exceptionToCheck = ex; - if (exceptionToCheck.getCause() != null) { - exceptionToCheck = exceptionToCheck.getCause(); - } - Assert.assertTrue( - String.format( - "Error of s3Shell code doesn't contain the " + - "exception [%s] in [%s]", - expectedError, exceptionToCheck.getMessage()), - exceptionToCheck.getMessage().contains(expectedError)); - } - } - } - } - - private String getOmAddress() { - List services; - try { - services = cluster.getOzoneManager().getServiceList(); - } catch (IOException e) { - fail("Could not get service list from OM"); - return null; - } - - return services.stream() - .filter(a -> HddsProtos.NodeType.OM.equals(a.getNodeType())) - .findFirst() - .map(s -> s.getServiceAddress(ServicePort.Type.RPC)) - .orElseThrow(IllegalStateException::new); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java deleted file mode 100644 index 84eb8dd25b4..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Test utils for Ozone. - */ -package org.apache.hadoop.ozone; \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java deleted file mode 100644 index 88b7c043203..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.scm; - -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -/** - * Test allocate container calls. - */ -public class TestAllocateContainer { - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - private static XceiverClientManager xceiverClientManager; - private static String containerOwner = "OZONE"; - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @BeforeClass - public static void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build(); - cluster.waitForClusterToBeReady(); - storageContainerLocationClient = - cluster.getStorageContainerLocationClient(); - xceiverClientManager = new XceiverClientManager(conf); - } - - @AfterClass - public static void shutdown() throws InterruptedException { - if(cluster != null) { - cluster.shutdown(); - } - IOUtils.cleanupWithLogger(null, storageContainerLocationClient); - } - - @Test - public void testAllocate() throws Exception { - ContainerWithPipeline container = - storageContainerLocationClient.allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), - containerOwner); - Assert.assertNotNull(container); - Assert.assertNotNull(container.getPipeline().getFirstNode()); - - } - - @Test - public void testAllocateNull() throws Exception { - thrown.expect(NullPointerException.class); - storageContainerLocationClient.allocateContainer( - xceiverClientManager.getType(), - xceiverClientManager.getFactor(), null); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java deleted file mode 100644 index 4c62c70db7f..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java +++ /dev/null @@ -1,203 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.scm; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.protocolPB - .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -/** - * Test Container calls. - */ -public class TestContainerSmallFile { - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration ozoneConfig; - private static StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - private static XceiverClientManager xceiverClientManager; - private static String containerOwner = "OZONE"; - - @BeforeClass - public static void init() throws Exception { - ozoneConfig = new OzoneConfiguration(); - ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); - cluster = MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1) - .build(); - cluster.waitForClusterToBeReady(); - storageContainerLocationClient = cluster - .getStorageContainerLocationClient(); - xceiverClientManager = new XceiverClientManager(ozoneConfig); - } - - @AfterClass - public static void shutdown() throws InterruptedException { - if (cluster != null) { - cluster.shutdown(); - } - IOUtils.cleanupWithLogger(null, storageContainerLocationClient); - } - - @Test - public void testAllocateWrite() throws Exception { - ContainerWithPipeline container = - storageContainerLocationClient.allocateContainer( - xceiverClientManager.getType(), - HddsProtos.ReplicationFactor.ONE, containerOwner); - XceiverClientSpi client = xceiverClientManager - .acquireClient(container.getPipeline()); - ContainerProtocolCalls.createContainer(client, - container.getContainerInfo().getContainerID(), null); - - BlockID blockID = ContainerTestHelper.getTestBlockID( - container.getContainerInfo().getContainerID()); - ContainerProtocolCalls.writeSmallFile(client, blockID, - "data123".getBytes()); - ContainerProtos.GetSmallFileResponseProto response = - ContainerProtocolCalls.readSmallFile(client, blockID); - String readData = response.getData().getData().toStringUtf8(); - Assert.assertEquals("data123", readData); - xceiverClientManager.releaseClient(client, false); - } - - @Test - public void testInvalidBlockRead() throws Exception { - ContainerWithPipeline container = - storageContainerLocationClient.allocateContainer( - xceiverClientManager.getType(), - HddsProtos.ReplicationFactor.ONE, containerOwner); - XceiverClientSpi client = xceiverClientManager - .acquireClient(container.getPipeline()); - ContainerProtocolCalls.createContainer(client, - container.getContainerInfo().getContainerID(), null); - - thrown.expect(StorageContainerException.class); - thrown.expectMessage("Unable to find the block"); - - BlockID blockID = ContainerTestHelper.getTestBlockID( - container.getContainerInfo().getContainerID()); - // Try to read a Key Container Name - ContainerProtos.GetSmallFileResponseProto response = - ContainerProtocolCalls.readSmallFile(client, blockID); - xceiverClientManager.releaseClient(client, false); - } - - @Test - public void testInvalidContainerRead() throws Exception { - long nonExistContainerID = 8888L; - ContainerWithPipeline container = - storageContainerLocationClient.allocateContainer( - xceiverClientManager.getType(), - HddsProtos.ReplicationFactor.ONE, containerOwner); - XceiverClientSpi client = xceiverClientManager - .acquireClient(container.getPipeline()); - ContainerProtocolCalls.createContainer(client, - container.getContainerInfo().getContainerID(), null); - BlockID blockID = ContainerTestHelper.getTestBlockID( - container.getContainerInfo().getContainerID()); - ContainerProtocolCalls.writeSmallFile(client, blockID, - "data123".getBytes()); - - thrown.expect(StorageContainerException.class); - thrown.expectMessage("ContainerID 8888 does not exist"); - - // Try to read a invalid key - ContainerProtos.GetSmallFileResponseProto response = - ContainerProtocolCalls.readSmallFile(client, - ContainerTestHelper.getTestBlockID( - nonExistContainerID)); - xceiverClientManager.releaseClient(client, false); - } - - @Test - public void testReadWriteWithBCSId() throws Exception { - ContainerWithPipeline container = - storageContainerLocationClient.allocateContainer( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, containerOwner); - XceiverClientSpi client = xceiverClientManager - .acquireClient(container.getPipeline()); - ContainerProtocolCalls.createContainer(client, - container.getContainerInfo().getContainerID(), null); - - BlockID blockID1 = ContainerTestHelper.getTestBlockID( - container.getContainerInfo().getContainerID()); - ContainerProtos.PutSmallFileResponseProto responseProto = - ContainerProtocolCalls - .writeSmallFile(client, blockID1, "data123".getBytes()); - long bcsId = responseProto.getCommittedBlockLength().getBlockID() - .getBlockCommitSequenceId(); - try { - blockID1.setBlockCommitSequenceId(bcsId + 1); - //read a file with higher bcsId than the container bcsId - ContainerProtocolCalls - .readSmallFile(client, blockID1); - Assert.fail("Expected exception not thrown"); - } catch (StorageContainerException sce) { - Assert - .assertTrue(sce.getResult() == ContainerProtos.Result.UNKNOWN_BCSID); - } - - // write a new block again to bump up the container bcsId - BlockID blockID2 = ContainerTestHelper - .getTestBlockID(container.getContainerInfo().getContainerID()); - ContainerProtocolCalls - .writeSmallFile(client, blockID2, "data123".getBytes()); - - try { - blockID1.setBlockCommitSequenceId(bcsId + 1); - //read a file with higher bcsId than the committed bcsId for the block - ContainerProtocolCalls.readSmallFile(client, blockID1); - Assert.fail("Expected exception not thrown"); - } catch (StorageContainerException sce) { - Assert - .assertTrue(sce.getResult() == ContainerProtos.Result.BCSID_MISMATCH); - } - blockID1.setBlockCommitSequenceId(bcsId); - ContainerProtos.GetSmallFileResponseProto response = - ContainerProtocolCalls.readSmallFile(client, blockID1); - String readData = response.getData().getData().toStringUtf8(); - Assert.assertEquals("data123", readData); - xceiverClientManager.releaseClient(client, false); - } -} - - diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java deleted file mode 100644 index 8e4645f01af..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java +++ /dev/null @@ -1,179 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.scm; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.container.common.helpers. - ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers. - StorageContainerException; -import org.apache.hadoop.hdds.scm.container.placement.algorithms. - ContainerPlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms. - SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.scm.protocolPB. - StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -/** - * Test Container calls. - */ -public class TestGetCommittedBlockLengthAndPutKey { - - private static MiniOzoneCluster cluster; - private static OzoneConfiguration ozoneConfig; - private static StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - private static XceiverClientManager xceiverClientManager; - private static String containerOwner = "OZONE"; - - @BeforeClass - public static void init() throws Exception { - ozoneConfig = new OzoneConfiguration(); - ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); - cluster = - MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1).build(); - cluster.waitForClusterToBeReady(); - storageContainerLocationClient = - cluster.getStorageContainerLocationClient(); - xceiverClientManager = new XceiverClientManager(ozoneConfig); - } - - @AfterClass - public static void shutdown() throws InterruptedException { - if (cluster != null) { - cluster.shutdown(); - } - IOUtils.cleanupWithLogger(null, storageContainerLocationClient); - } - - @Test - public void tesGetCommittedBlockLength() throws Exception { - ContainerProtos.GetCommittedBlockLengthResponseProto response; - ContainerWithPipeline container = storageContainerLocationClient - .allocateContainer(xceiverClientManager.getType(), - HddsProtos.ReplicationFactor.ONE, containerOwner); - long containerID = container.getContainerInfo().getContainerID(); - Pipeline pipeline = container.getPipeline(); - XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline); - //create the container - ContainerProtocolCalls.createContainer(client, containerID, null); - - BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); - byte[] data = - RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(); - ContainerProtos.ContainerCommandRequestProto writeChunkRequest = - ContainerTestHelper - .getWriteChunkRequest(container.getPipeline(), blockID, - data.length); - client.sendCommand(writeChunkRequest); - // Now, explicitly make a putKey request for the block. - ContainerProtos.ContainerCommandRequestProto putKeyRequest = - ContainerTestHelper - .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk()); - client.sendCommand(putKeyRequest); - response = ContainerProtocolCalls - .getCommittedBlockLength(client, blockID); - // make sure the block ids in the request and response are same. - Assert.assertTrue( - BlockID.getFromProtobuf(response.getBlockID()).equals(blockID)); - Assert.assertTrue(response.getBlockLength() == data.length); - xceiverClientManager.releaseClient(client, false); - } - - @Test - public void testGetCommittedBlockLengthForInvalidBlock() throws Exception { - ContainerWithPipeline container = storageContainerLocationClient - .allocateContainer(xceiverClientManager.getType(), - HddsProtos.ReplicationFactor.ONE, containerOwner); - long containerID = container.getContainerInfo().getContainerID(); - XceiverClientSpi client = xceiverClientManager - .acquireClient(container.getPipeline()); - ContainerProtocolCalls.createContainer(client, containerID, null); - - BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); - // move the container to closed state - ContainerProtocolCalls.closeContainer(client, containerID, null); - try { - // There is no block written inside the container. The request should - // fail. - ContainerProtocolCalls.getCommittedBlockLength(client, blockID); - Assert.fail("Expected exception not thrown"); - } catch (StorageContainerException sce) { - Assert.assertTrue(sce.getMessage().contains("Unable to find the block")); - } - xceiverClientManager.releaseClient(client, false); - } - - @Test - public void tesPutKeyResposne() throws Exception { - ContainerProtos.PutBlockResponseProto response; - ContainerWithPipeline container = storageContainerLocationClient - .allocateContainer(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, containerOwner); - long containerID = container.getContainerInfo().getContainerID(); - Pipeline pipeline = container.getPipeline(); - XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline); - //create the container - ContainerProtocolCalls.createContainer(client, containerID, null); - - BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); - byte[] data = - RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(); - ContainerProtos.ContainerCommandRequestProto writeChunkRequest = - ContainerTestHelper - .getWriteChunkRequest(container.getPipeline(), blockID, - data.length); - client.sendCommand(writeChunkRequest); - // Now, explicitly make a putKey request for the block. - ContainerProtos.ContainerCommandRequestProto putKeyRequest = - ContainerTestHelper - .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk()); - response = client.sendCommand(putKeyRequest).getPutBlock(); - Assert.assertEquals( - response.getCommittedBlockLength().getBlockLength(), data.length); - Assert.assertTrue(response.getCommittedBlockLength().getBlockID() - .getBlockCommitSequenceId() > 0); - BlockID responseBlockID = BlockID - .getFromProtobuf(response.getCommittedBlockLength().getBlockID()); - blockID - .setBlockCommitSequenceId(responseBlockID.getBlockCommitSequenceId()); - // make sure the block ids in the request and response are same. - // This will also ensure that closing the container committed the block - // on the Datanodes. - Assert.assertEquals(responseBlockID, blockID); - xceiverClientManager.releaseClient(client, false); - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java deleted file mode 100644 index 536d807aedb..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java +++ /dev/null @@ -1,156 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.scm; - -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementMetrics; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.net.DNSToSwitchMapping; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.net.StaticMapping; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.UUID; -import java.util.stream.Collectors; - -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic - .NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY; -import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; -import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; -import static org.apache.hadoop.test.MetricsAsserts.getMetrics; - -/** - * Test cases to verify the metrics exposed by SCMPipelineManager. - */ -public class TestSCMContainerPlacementPolicyMetrics { - - private MiniOzoneCluster cluster; - private MetricsRecordBuilder metrics; - private static OzoneClient ozClient = null; - private static ObjectStore store = null; - - @Before - public void setup() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - "org.apache.hadoop.hdds.scm.container.placement.algorithms." + - "SCMContainerPlacementRackAware"); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, - StaticMapping.class, DNSToSwitchMapping.class); - StaticMapping.addNodeToRack(NetUtils.normalizeHostNames( - Collections.singleton(HddsUtils.getHostName(conf))).get(0), - "/rack1"); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(4) - .build(); - cluster.waitForClusterToBeReady(); - metrics = getMetrics(SCMContainerPlacementMetrics.class.getSimpleName()); - ozClient = OzoneClientFactory.getRpcClient(conf); - store = ozClient.getObjectStore(); - } - - /** - * Verifies container placement metric. - */ - @Test(timeout = 60000) - public void test() throws IOException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - String value = "sample value"; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - String keyName = UUID.randomUUID().toString(); - - // Write data into a key - try (OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.RATIS, - THREE, new HashMap<>())) { - out.write(value.getBytes()); - } - - // close container - PipelineManager manager = - cluster.getStorageContainerManager().getPipelineManager(); - List pipelines = manager.getPipelines().stream().filter(p -> - p.getType() == HddsProtos.ReplicationType.RATIS && - p.getFactor() == HddsProtos.ReplicationFactor.THREE) - .collect(Collectors.toList()); - Pipeline targetPipeline = pipelines.get(0); - List nodes = targetPipeline.getNodes(); - manager.finalizeAndDestroyPipeline(pipelines.get(0), true); - - // kill datanode to trigger under-replicated container replication - cluster.shutdownHddsDatanode(nodes.get(0)); - try { - Thread.sleep(5 * 1000); - } catch (InterruptedException e) { - } - cluster.getStorageContainerManager().getReplicationManager() - .processContainersNow(); - try { - Thread.sleep(30 * 1000); - } catch (InterruptedException e) { - } - - long totalRequest = getLongCounter("DatanodeRequestCount", metrics); - long tryCount = getLongCounter("DatanodeChooseAttemptCount", metrics); - long sucessCount = - getLongCounter("DatanodeChooseSuccessCount", metrics); - long compromiseCount = - getLongCounter("DatanodeChooseFallbackCount", metrics); - - // Seems no under-replicated closed containers get replicated - Assert.assertTrue(totalRequest == 0); - Assert.assertTrue(tryCount == 0); - Assert.assertTrue(sucessCount == 0); - Assert.assertTrue(compromiseCount == 0); - } - - @After - public void teardown() { - cluster.shutdown(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java deleted file mode 100644 index e700a0e5975..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.scm; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Test; -import javax.management.MBeanServer; -import javax.management.ObjectName; -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Iterator; -import java.util.UUID; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeoutException; - -import javax.management.openmbean.CompositeData; -import javax.management.openmbean.TabularData; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * - * This class is to test JMX management interface for scm information. - */ -public class TestSCMMXBean { - - public static final Log LOG = LogFactory.getLog(TestSCMMXBean.class); - private static int numOfDatanodes = 1; - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static StorageContainerManager scm; - private static MBeanServer mbs; - - @BeforeClass - public static void init() throws IOException, TimeoutException, - InterruptedException { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(numOfDatanodes) - .build(); - cluster.waitForClusterToBeReady(); - scm = cluster.getStorageContainerManager(); - mbs = ManagementFactory.getPlatformMBeanServer(); - } - - @AfterClass - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testSCMMXBean() throws Exception { - ObjectName bean = new ObjectName( - "Hadoop:service=StorageContainerManager," - + "name=StorageContainerManagerInfo," - + "component=ServerRuntime"); - - String dnRpcPort = (String)mbs.getAttribute(bean, - "DatanodeRpcPort"); - assertEquals(scm.getDatanodeRpcPort(), dnRpcPort); - - - String clientRpcPort = (String)mbs.getAttribute(bean, - "ClientRpcPort"); - assertEquals(scm.getClientRpcPort(), clientRpcPort); - - ConcurrentMap map = scm.getContainerReportCache(); - ContainerStat stat = new ContainerStat(1, 2, 3, 4, 5, 6, 7); - map.put("nodeID", stat); - TabularData data = (TabularData) mbs.getAttribute( - bean, "ContainerReport"); - - // verify report info - assertEquals(1, data.values().size()); - for (Object obj : data.values()) { - assertTrue(obj instanceof CompositeData); - CompositeData d = (CompositeData) obj; - Iterator it = d.values().iterator(); - String key = it.next().toString(); - String value = it.next().toString(); - assertEquals("nodeID", key); - assertEquals(stat.toJsonString(), value); - } - - boolean inSafeMode = (boolean) mbs.getAttribute(bean, - "InSafeMode"); - assertEquals(scm.isInSafeMode(), inSafeMode); - - double containerThreshold = (double) mbs.getAttribute(bean, - "SafeModeCurrentContainerThreshold"); - assertEquals(scm.getCurrentContainerThreshold(), containerThreshold, 0); - } - - @Test - public void testSCMContainerStateCount() throws Exception { - - ObjectName bean = new ObjectName( - "Hadoop:service=StorageContainerManager," - + "name=StorageContainerManagerInfo," - + "component=ServerRuntime"); - TabularData data = (TabularData) mbs.getAttribute( - bean, "ContainerStateCount"); - Map containerStateCount = scm.getContainerStateCount(); - verifyEquals(data, containerStateCount); - - // Do some changes like allocate containers and change the container states - ContainerManager scmContainerManager = scm.getContainerManager(); - - List containerInfoList = new ArrayList<>(); - for (int i=0; i < 10; i++) { - containerInfoList.add(scmContainerManager.allocateContainer(HddsProtos - .ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor.ONE, - UUID.randomUUID().toString())); - } - long containerID; - for (int i=0; i < 10; i++) { - if (i % 2 == 0) { - containerID = containerInfoList.get(i).getContainerID(); - scmContainerManager.updateContainerState( - new ContainerID(containerID), HddsProtos.LifeCycleEvent.FINALIZE); - assertEquals(scmContainerManager.getContainer(new ContainerID( - containerID)).getState(), HddsProtos.LifeCycleState.CLOSING); - } else { - containerID = containerInfoList.get(i).getContainerID(); - scmContainerManager.updateContainerState( - new ContainerID(containerID), HddsProtos.LifeCycleEvent.FINALIZE); - scmContainerManager.updateContainerState( - new ContainerID(containerID), HddsProtos.LifeCycleEvent.CLOSE); - assertEquals(scmContainerManager.getContainer(new ContainerID( - containerID)).getState(), HddsProtos.LifeCycleState.CLOSED); - } - - } - - data = (TabularData) mbs.getAttribute( - bean, "ContainerStateCount"); - containerStateCount = scm.getContainerStateCount(); - - containerStateCount.forEach((k, v) -> { - if(k == HddsProtos.LifeCycleState.CLOSING.toString()) { - assertEquals((int)v, 5); - } else if (k == HddsProtos.LifeCycleState.CLOSED.toString()) { - assertEquals((int)v, 5); - } else { - // Remaining all container state count should be zero. - assertEquals((int)v, 0); - } - }); - - verifyEquals(data, containerStateCount); - - } - - - /** - * An internal function used to compare a TabularData returned - * by JMX with the expected data in a Map. - */ - private void verifyEquals(TabularData actualData, - Map expectedData) { - if (actualData == null || expectedData == null) { - fail("Data should not be null."); - } - for (Object obj : actualData.values()) { - // Each TabularData is a set of CompositeData - assertTrue(obj instanceof CompositeData); - CompositeData cds = (CompositeData) obj; - assertEquals(2, cds.values().size()); - Iterator it = cds.values().iterator(); - String key = it.next().toString(); - String value = it.next().toString(); - int num = Integer.parseInt(value); - assertTrue(expectedData.containsKey(key)); - assertEquals(expectedData.remove(key).intValue(), num); - } - assertTrue(expectedData.isEmpty()); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java deleted file mode 100644 index 43b9bf03a6e..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMNodeManagerMXBean.java +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.scm; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; - -import javax.management.MBeanServer; -import javax.management.ObjectName; -import javax.management.openmbean.CompositeData; -import javax.management.openmbean.TabularData; -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Class which tests the SCMNodeManagerInfo Bean. - */ -public class TestSCMNodeManagerMXBean { - public static final Log LOG = LogFactory.getLog(TestSCMMXBean.class); - private static int numOfDatanodes = 3; - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static StorageContainerManager scm; - private static MBeanServer mbs; - - @BeforeClass - public static void init() throws IOException, TimeoutException, - InterruptedException { - conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_STALENODE_INTERVAL, "60000ms"); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(numOfDatanodes) - .build(); - cluster.waitForClusterToBeReady(); - scm = cluster.getStorageContainerManager(); - mbs = ManagementFactory.getPlatformMBeanServer(); - } - - @AfterClass - public static void cleanup() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testDiskUsage() throws Exception { - ObjectName bean = new ObjectName( - "Hadoop:service=SCMNodeManager," - + "name=SCMNodeManagerInfo"); - - TabularData data = (TabularData) mbs.getAttribute(bean, "NodeInfo"); - Map datanodeInfo = scm.getScmNodeManager().getNodeInfo(); - verifyEquals(data, datanodeInfo); - } - - @Test - public void testNodeCount() throws Exception { - ObjectName bean = new ObjectName( - "Hadoop:service=SCMNodeManager," - + "name=SCMNodeManagerInfo"); - - TabularData data = (TabularData) mbs.getAttribute(bean, "NodeCount"); - Map nodeCount = scm.getScmNodeManager().getNodeCount(); - Map nodeCountLong = new HashMap<>(); - nodeCount.forEach((k, v) -> nodeCountLong.put(k, new Long(v))); - verifyEquals(data, nodeCountLong); - } - - private void verifyEquals(TabularData actualData, Map - expectedData) { - if (actualData == null || expectedData == null) { - fail("Data should not be null."); - } - for (Object obj : actualData.values()) { - assertTrue(obj instanceof CompositeData); - CompositeData cds = (CompositeData) obj; - assertEquals(2, cds.values().size()); - Iterator it = cds.values().iterator(); - String key = it.next().toString(); - String value = it.next().toString(); - long num = Long.parseLong(value); - assertTrue(expectedData.containsKey(key)); - assertEquals(expectedData.remove(key).longValue(), num); - } - assertTrue(expectedData.isEmpty()); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java deleted file mode 100644 index 4c25b0c587e..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.scm; - -import com.google.common.cache.Cache; -import org.apache.hadoop.hdds.scm.XceiverClientManager.ScmClientConfig; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.protocolPB - .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import java.io.IOException; -import java.util.UUID; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; - -/** - * Test for XceiverClientManager caching and eviction. - */ -public class TestXceiverClientManager { - private static OzoneConfiguration config; - private static MiniOzoneCluster cluster; - private static StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - private static String containerOwner = "OZONE"; - - @Rule - public ExpectedException exception = ExpectedException.none(); - - @Before - public void init() throws Exception { - config = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(config) - .setNumDatanodes(3) - .build(); - cluster.waitForClusterToBeReady(); - storageContainerLocationClient = cluster - .getStorageContainerLocationClient(); - } - - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - IOUtils.cleanupWithLogger(null, storageContainerLocationClient); - } - - @Test - public void testCaching() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - String metaDir = GenericTestUtils.getTempPath( - TestXceiverClientManager.class.getName() + UUID.randomUUID()); - conf.set(HDDS_METADATA_DIR_NAME, metaDir); - - XceiverClientManager clientManager = new XceiverClientManager(conf); - - ContainerWithPipeline container1 = storageContainerLocationClient - .allocateContainer(clientManager.getType(), clientManager.getFactor(), - containerOwner); - XceiverClientSpi client1 = clientManager - .acquireClient(container1.getPipeline()); - Assert.assertEquals(1, client1.getRefcount()); - - ContainerWithPipeline container2 = storageContainerLocationClient - .allocateContainer(clientManager.getType(), clientManager.getFactor(), - containerOwner); - XceiverClientSpi client2 = clientManager - .acquireClient(container2.getPipeline()); - Assert.assertEquals(1, client2.getRefcount()); - - XceiverClientSpi client3 = clientManager - .acquireClient(container1.getPipeline()); - Assert.assertEquals(2, client3.getRefcount()); - Assert.assertEquals(2, client1.getRefcount()); - Assert.assertEquals(client1, client3); - clientManager.releaseClient(client1, false); - clientManager.releaseClient(client2, false); - clientManager.releaseClient(client3, false); - } - - @Test - public void testFreeByReference() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class); - clientConfig.setMaxSize(1); - String metaDir = GenericTestUtils.getTempPath( - TestXceiverClientManager.class.getName() + UUID.randomUUID()); - conf.set(HDDS_METADATA_DIR_NAME, metaDir); - XceiverClientManager clientManager = - new XceiverClientManager(conf, clientConfig, null); - Cache cache = - clientManager.getClientCache(); - - ContainerWithPipeline container1 = - storageContainerLocationClient.allocateContainer( - clientManager.getType(), HddsProtos.ReplicationFactor.ONE, - containerOwner); - XceiverClientSpi client1 = clientManager - .acquireClient(container1.getPipeline()); - Assert.assertEquals(1, client1.getRefcount()); - Assert.assertEquals(container1.getPipeline(), - client1.getPipeline()); - - ContainerWithPipeline container2 = - storageContainerLocationClient.allocateContainer( - clientManager.getType(), - HddsProtos.ReplicationFactor.ONE, containerOwner); - XceiverClientSpi client2 = clientManager - .acquireClient(container2.getPipeline()); - Assert.assertEquals(1, client2.getRefcount()); - Assert.assertNotEquals(client1, client2); - - // least recent container (i.e containerName1) is evicted - XceiverClientSpi nonExistent1 = cache.getIfPresent( - container1.getContainerInfo().getPipelineID().getId().toString() - + container1.getContainerInfo().getReplicationType()); - Assert.assertEquals(null, nonExistent1); - // However container call should succeed because of refcount on the client. - ContainerProtocolCalls.createContainer(client1, - container1.getContainerInfo().getContainerID(), null); - - // After releasing the client, this connection should be closed - // and any container operations should fail - clientManager.releaseClient(client1, false); - - String expectedMessage = "This channel is not connected."; - try { - ContainerProtocolCalls.createContainer(client1, - container1.getContainerInfo().getContainerID(), null); - Assert.fail("Create container should throw exception on closed" - + "client"); - } catch (Exception e) { - Assert.assertEquals(e.getClass(), IOException.class); - Assert.assertTrue(e.getMessage().contains(expectedMessage)); - } - clientManager.releaseClient(client2, false); - } - - @Test - public void testFreeByEviction() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class); - clientConfig.setMaxSize(1); - String metaDir = GenericTestUtils.getTempPath( - TestXceiverClientManager.class.getName() + UUID.randomUUID()); - conf.set(HDDS_METADATA_DIR_NAME, metaDir); - XceiverClientManager clientManager = - new XceiverClientManager(conf, clientConfig, null); - Cache cache = - clientManager.getClientCache(); - - ContainerWithPipeline container1 = - storageContainerLocationClient.allocateContainer( - clientManager.getType(), - clientManager.getFactor(), containerOwner); - XceiverClientSpi client1 = clientManager - .acquireClient(container1.getPipeline()); - Assert.assertEquals(1, client1.getRefcount()); - - clientManager.releaseClient(client1, false); - Assert.assertEquals(0, client1.getRefcount()); - - ContainerWithPipeline container2 = storageContainerLocationClient - .allocateContainer(clientManager.getType(), clientManager.getFactor(), - containerOwner); - XceiverClientSpi client2 = clientManager - .acquireClient(container2.getPipeline()); - Assert.assertEquals(1, client2.getRefcount()); - Assert.assertNotEquals(client1, client2); - - // now client 1 should be evicted - XceiverClientSpi nonExistent = cache.getIfPresent( - container1.getContainerInfo().getPipelineID().getId().toString() - + container1.getContainerInfo().getReplicationType()); - Assert.assertEquals(null, nonExistent); - - // Any container operation should now fail - String expectedMessage = "This channel is not connected."; - try { - ContainerProtocolCalls.createContainer(client1, - container1.getContainerInfo().getContainerID(), null); - Assert.fail("Create container should throw exception on closed" - + "client"); - } catch (Exception e) { - Assert.assertEquals(e.getClass(), IOException.class); - Assert.assertTrue(e.getMessage().contains(expectedMessage)); - } - clientManager.releaseClient(client2, false); - } - - @Test - public void testFreeByRetryFailure() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - ScmClientConfig clientConfig = conf.getObject(ScmClientConfig.class); - clientConfig.setMaxSize(1); - XceiverClientManager clientManager = - new XceiverClientManager(conf, clientConfig, null); - Cache cache = - clientManager.getClientCache(); - - // client is added in cache - ContainerWithPipeline container1 = storageContainerLocationClient - .allocateContainer(clientManager.getType(), clientManager.getFactor(), - containerOwner); - XceiverClientSpi client1 = - clientManager.acquireClient(container1.getPipeline()); - clientManager.acquireClient(container1.getPipeline()); - Assert.assertEquals(2, client1.getRefcount()); - - // client should be invalidated in the cache - clientManager.releaseClient(client1, true); - Assert.assertEquals(1, client1.getRefcount()); - Assert.assertNull(cache.getIfPresent( - container1.getContainerInfo().getPipelineID().getId().toString() - + container1.getContainerInfo().getReplicationType())); - - // new client should be added in cache - XceiverClientSpi client2 = - clientManager.acquireClient(container1.getPipeline()); - Assert.assertNotEquals(client1, client2); - Assert.assertEquals(1, client2.getRefcount()); - - // on releasing the old client the entry in cache should not be invalidated - clientManager.releaseClient(client1, true); - Assert.assertEquals(0, client1.getRefcount()); - Assert.assertNotNull(cache.getIfPresent( - container1.getContainerInfo().getPipelineID().getId().toString() - + container1.getContainerInfo().getReplicationType())); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java deleted file mode 100644 index 5285fb31219..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java +++ /dev/null @@ -1,179 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.scm; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; -import static org.apache.hadoop.test.MetricsAsserts.assertCounter; -import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; -import static org.apache.hadoop.test.MetricsAsserts.getMetrics; - -import java.util.List; -import java.util.ArrayList; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CountDownLatch; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.XceiverClientMetrics; -import org.apache.hadoop.hdds.scm.XceiverClientSpi; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; - -/** - * This class tests the metrics of XceiverClient. - */ -public class TestXceiverClientMetrics { - // only for testing - private volatile boolean breakFlag; - private CountDownLatch latch; - - private static OzoneConfiguration config; - private static MiniOzoneCluster cluster; - private static StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient; - private static String containerOwner = "OZONE"; - - @BeforeClass - public static void init() throws Exception { - config = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(config).build(); - cluster.waitForClusterToBeReady(); - storageContainerLocationClient = cluster - .getStorageContainerLocationClient(); - } - - @AfterClass - public static void shutdown() { - cluster.shutdown(); - } - - @Test - public void testMetrics() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - String metaDir = GenericTestUtils.getTempPath( - TestXceiverClientManager.class.getName() + UUID.randomUUID()); - conf.set(HDDS_METADATA_DIR_NAME, metaDir); - - XceiverClientManager clientManager = new XceiverClientManager(conf); - - ContainerWithPipeline container = storageContainerLocationClient - .allocateContainer(clientManager.getType(), clientManager.getFactor(), - containerOwner); - XceiverClientSpi client = clientManager - .acquireClient(container.getPipeline()); - - ContainerCommandRequestProto request = ContainerTestHelper - .getCreateContainerRequest( - container.getContainerInfo().getContainerID(), - container.getPipeline()); - client.sendCommand(request); - - MetricsRecordBuilder containerMetrics = getMetrics( - XceiverClientMetrics.SOURCE_NAME); - // Above request command is in a synchronous way, so there will be no - // pending requests. - assertCounter("PendingOps", 0L, containerMetrics); - assertCounter("numPendingCreateContainer", 0L, containerMetrics); - // the counter value of average latency metric should be increased - assertCounter("CreateContainerLatencyNumOps", 1L, containerMetrics); - - breakFlag = false; - latch = new CountDownLatch(1); - - int numRequest = 10; - List> computeResults - = new ArrayList<>(); - // start new thread to send async requests - Thread sendThread = new Thread(() -> { - while (!breakFlag) { - try { - // use async interface for testing pending metrics - for (int i = 0; i < numRequest; i++) { - BlockID blockID = ContainerTestHelper. - getTestBlockID(container.getContainerInfo().getContainerID()); - ContainerProtos.ContainerCommandRequestProto smallFileRequest; - - smallFileRequest = ContainerTestHelper.getWriteSmallFileRequest( - client.getPipeline(), blockID, 1024); - CompletableFuture - response = - client.sendCommandAsync(smallFileRequest).getResponse(); - computeResults.add(response); - } - - Thread.sleep(1000); - } catch (Exception ignored) { - } - } - - latch.countDown(); - }); - sendThread.start(); - - GenericTestUtils.waitFor(() -> { - // check if pending metric count is increased - MetricsRecordBuilder metric = - getMetrics(XceiverClientMetrics.SOURCE_NAME); - long pendingOps = getLongCounter("PendingOps", metric); - long pendingPutSmallFileOps = - getLongCounter("numPendingPutSmallFile", metric); - - if (pendingOps > 0 && pendingPutSmallFileOps > 0) { - // reset break flag - breakFlag = true; - return true; - } else { - return false; - } - }, 100, 60000); - - // blocking until we stop sending async requests - latch.await(); - // Wait for all futures being done. - GenericTestUtils.waitFor(() -> { - for (CompletableFuture future : computeResults) { - if (!future.isDone()) { - return false; - } - } - - return true; - }, 100, 60000); - - // the counter value of pending metrics should be decreased to 0 - containerMetrics = getMetrics(XceiverClientMetrics.SOURCE_NAME); - assertCounter("PendingOps", 0L, containerMetrics); - assertCounter("numPendingPutSmallFile", 0L, containerMetrics); - - clientManager.close(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java deleted file mode 100644 index c9b8c89e04d..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java +++ /dev/null @@ -1,132 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.scm.node; - -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.XceiverClientManager; -import org.apache.hadoop.hdds.scm.client.ContainerOperationClient; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static java.util.concurrent.TimeUnit.SECONDS; - -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_HEARTBEAT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_PIPELINE_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_COMMAND_STATUS_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.HddsConfigKeys - .HDDS_NODE_REPORT_INTERVAL; - -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_STALENODE_INTERVAL; -import static org.junit.Assert.assertEquals; - -/** - * Test Query Node Operation. - */ -public class TestQueryNode { - private static int numOfDatanodes = 5; - private MiniOzoneCluster cluster; - - private ContainerOperationClient scmClient; - - @Before - public void setUp() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - final int interval = 100; - - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - interval, TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(HDDS_NODE_REPORT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); - - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(numOfDatanodes) - .build(); - cluster.waitForClusterToBeReady(); - scmClient = new ContainerOperationClient(cluster - .getStorageContainerLocationClient(), - new XceiverClientManager(conf)); - } - - @After - public void tearDown() throws Exception { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testHealthyNodesCount() throws Exception { - List nodes = scmClient.queryNode(HEALTHY, - HddsProtos.QueryScope.CLUSTER, ""); - assertEquals("Expected live nodes", numOfDatanodes, - nodes.size()); - } - - @Test(timeout = 10 * 1000L) - public void testStaleNodesCount() throws Exception { - cluster.shutdownHddsDatanode(0); - cluster.shutdownHddsDatanode(1); - - GenericTestUtils.waitFor(() -> - cluster.getStorageContainerManager().getNodeCount(STALE) == 2, - 100, 4 * 1000); - - int nodeCount = scmClient.queryNode(STALE, - HddsProtos.QueryScope.CLUSTER, "").size(); - assertEquals("Mismatch of expected nodes count", 2, nodeCount); - - GenericTestUtils.waitFor(() -> - cluster.getStorageContainerManager().getNodeCount(DEAD) == 2, - 100, 4 * 1000); - - // Assert that we don't find any stale nodes. - nodeCount = scmClient.queryNode(STALE, - HddsProtos.QueryScope.CLUSTER, "").size(); - assertEquals("Mismatch of expected nodes count", 0, nodeCount); - - // Assert that we find the expected number of dead nodes. - nodeCount = scmClient.queryNode(DEAD, - HddsProtos.QueryScope.CLUSTER, "").size(); - assertEquals("Mismatch of expected nodes count", 2, nodeCount); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java deleted file mode 100644 index 65a6357de9d..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java +++ /dev/null @@ -1,174 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.scm.node; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.node.SCMNodeMetrics; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import static org.apache.hadoop.test.MetricsAsserts.assertCounter; -import static org.apache.hadoop.test.MetricsAsserts.assertGauge; -import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; -import static org.apache.hadoop.test.MetricsAsserts.getMetrics; - -/** - * Test cases to verify the metrics exposed by SCMNodeManager. - */ -public class TestSCMNodeMetrics { - - private MiniOzoneCluster cluster; - - @Before - public void setup() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - } - - /** - * Verifies heartbeat processing count. - * - * @throws InterruptedException - */ - @Test - public void testHBProcessing() throws InterruptedException { - MetricsRecordBuilder metrics = getMetrics( - SCMNodeMetrics.class.getSimpleName()); - long hbProcessed = getLongCounter("NumHBProcessed", metrics); - cluster.getHddsDatanodes().get(0) - .getDatanodeStateMachine().triggerHeartbeat(); - // Give some time so that SCM receives and processes the heartbeat. - Thread.sleep(100L); - assertCounter("NumHBProcessed", hbProcessed + 1, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - } - - /** - * Verifies heartbeat processing failure count. - */ - @Test - public void testHBProcessingFailure() { - MetricsRecordBuilder metrics = getMetrics( - SCMNodeMetrics.class.getSimpleName()); - long hbProcessedFailed = getLongCounter("NumHBProcessingFailed", metrics); - cluster.getStorageContainerManager().getScmNodeManager() - .processHeartbeat(TestUtils.randomDatanodeDetails()); - assertCounter("NumHBProcessingFailed", hbProcessedFailed + 1, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - } - - /** - * Verifies node report processing count. - * - * @throws InterruptedException - */ - @Test - public void testNodeReportProcessing() throws InterruptedException { - MetricsRecordBuilder metrics = getMetrics( - SCMNodeMetrics.class.getSimpleName()); - long nrProcessed = getLongCounter("NumNodeReportProcessed", metrics); - HddsDatanodeService datanode = cluster.getHddsDatanodes().get(0); - StorageReportProto storageReport = TestUtils.createStorageReport( - datanode.getDatanodeDetails().getUuid(), "/tmp", 100, 10, 90, null); - NodeReportProto nodeReport = NodeReportProto.newBuilder() - .addStorageReport(storageReport).build(); - datanode.getDatanodeStateMachine().getContext().addReport(nodeReport); - cluster.getStorageContainerManager().getScmNodeManager() - .processNodeReport(datanode.getDatanodeDetails(), nodeReport); - - assertCounter("NumNodeReportProcessed", nrProcessed + 1, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - } - - /** - * Verifies node report processing failure count. - */ - @Test - public void testNodeReportProcessingFailure() { - MetricsRecordBuilder metrics = getMetrics( - SCMNodeMetrics.class.getSimpleName()); - long nrProcessed = getLongCounter("NumNodeReportProcessingFailed", - metrics); - DatanodeDetails datanode = TestUtils.randomDatanodeDetails(); - StorageReportProto storageReport = TestUtils.createStorageReport( - datanode.getUuid(), "/tmp", 100, 10, 90, null); - NodeReportProto nodeReport = NodeReportProto.newBuilder() - .addStorageReport(storageReport).build(); - - cluster.getStorageContainerManager().getScmNodeManager() - .processNodeReport(datanode, nodeReport); - assertCounter("NumNodeReportProcessingFailed", nrProcessed + 1, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - } - - /** - * Verify that datanode aggregated state and capacity metrics are reported. - */ - @Test - public void testNodeCountAndInfoMetricsReported() throws Exception { - HddsDatanodeService datanode = cluster.getHddsDatanodes().get(0); - StorageReportProto storageReport = TestUtils.createStorageReport( - datanode.getDatanodeDetails().getUuid(), "/tmp", 100, 10, 90, null); - NodeReportProto nodeReport = NodeReportProto.newBuilder() - .addStorageReport(storageReport).build(); - datanode.getDatanodeStateMachine().getContext().addReport(nodeReport); - cluster.getStorageContainerManager().getScmNodeManager() - .processNodeReport(datanode.getDatanodeDetails(), nodeReport); - - assertGauge("HealthyNodes", 1, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - assertGauge("StaleNodes", 0, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - assertGauge("DeadNodes", 0, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - assertGauge("DecommissioningNodes", 0, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - assertGauge("DecommissionedNodes", 0, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - assertGauge("DiskCapacity", 100L, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - assertGauge("DiskUsed", 10L, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - assertGauge("DiskRemaining", 90L, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - assertGauge("SSDCapacity", 0L, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - assertGauge("SSDUsed", 0L, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - assertGauge("SSDRemaining", 0L, - getMetrics(SCMNodeMetrics.class.getSimpleName())); - } - - @After - public void teardown() { - cluster.shutdown(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java deleted file mode 100644 index 7ac6d189582..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *

- * Utility classes to encode/decode DTO objects to/from byte array. - */ - -/** - * Unit tests for Node related functions in SCM. - */ -package org.apache.hadoop.ozone.scm.node; \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java deleted file mode 100644 index cdc9f0f6cd4..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.scm.pipeline; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import javax.management.MBeanServer; -import javax.management.ObjectName; -import javax.management.openmbean.CompositeData; -import javax.management.openmbean.TabularData; -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.util.Iterator; -import java.util.Map; -import java.util.concurrent.TimeoutException; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * Test cases to verify the metrics exposed by SCMPipelineManager via MXBean. - */ -public class TestPipelineManagerMXBean { - - private MiniOzoneCluster cluster; - private static MBeanServer mbs; - - @Before - public void init() - throws IOException, TimeoutException, InterruptedException { - OzoneConfiguration conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - mbs = ManagementFactory.getPlatformMBeanServer(); - } - - /** - * Verifies SCMPipelineManagerInfo metrics. - * - * @throws Exception - */ - @Test - public void testPipelineInfo() throws Exception { - ObjectName bean = new ObjectName( - "Hadoop:service=SCMPipelineManager,name=SCMPipelineManagerInfo"); - - TabularData data = (TabularData) mbs.getAttribute(bean, "PipelineInfo"); - Map datanodeInfo = cluster.getStorageContainerManager() - .getPipelineManager().getPipelineInfo(); - verifyEquals(data, datanodeInfo); - } - - private void verifyEquals(TabularData actualData, Map - expectedData) { - if (actualData == null || expectedData == null) { - fail("Data should not be null."); - } - for (Object obj : actualData.values()) { - assertTrue(obj instanceof CompositeData); - CompositeData cds = (CompositeData) obj; - assertEquals(2, cds.values().size()); - Iterator it = cds.values().iterator(); - String key = it.next().toString(); - String value = it.next().toString(); - long num = Long.parseLong(value); - assertTrue(expectedData.containsKey(key)); - assertEquals(expectedData.remove(key).longValue(), num); - } - assertTrue(expectedData.isEmpty()); - } - - @After - public void teardown() { - cluster.shutdown(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java deleted file mode 100644 index 2f1ec66d694..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.scm.pipeline; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineMetrics; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; -import org.apache.hadoop.ozone.MiniOzoneCluster; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.Optional; - -import static org.apache.hadoop.test.MetricsAsserts.assertCounter; -import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; -import static org.apache.hadoop.test.MetricsAsserts.getMetrics; - -/** - * Test cases to verify the metrics exposed by SCMPipelineManager. - */ -public class TestSCMPipelineMetrics { - - private MiniOzoneCluster cluster; - - @Before - public void setup() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .build(); - cluster.waitForClusterToBeReady(); - } - - /** - * Verifies pipeline creation metric. - */ - @Test - public void testPipelineCreation() { - MetricsRecordBuilder metrics = getMetrics( - SCMPipelineMetrics.class.getSimpleName()); - long numPipelineCreated = getLongCounter("NumPipelineCreated", metrics); - // Pipelines are created in background when the cluster starts. - Assert.assertTrue(numPipelineCreated > 0); - } - - /** - * Verifies pipeline destroy metric. - */ - @Test - public void testPipelineDestroy() { - PipelineManager pipelineManager = cluster - .getStorageContainerManager().getPipelineManager(); - Optional pipeline = pipelineManager - .getPipelines().stream().findFirst(); - Assert.assertTrue(pipeline.isPresent()); - pipeline.ifPresent(pipeline1 -> { - try { - cluster.getStorageContainerManager() - .getClientProtocolServer().closePipeline( - pipeline.get().getId().getProtobuf()); - } catch (IOException e) { - e.printStackTrace(); - Assert.fail(); - } - }); - MetricsRecordBuilder metrics = getMetrics( - SCMPipelineMetrics.class.getSimpleName()); - assertCounter("NumPipelineDestroyed", 1L, metrics); - } - - @Test - public void testNumBlocksAllocated() throws IOException { - AllocatedBlock block = - cluster.getStorageContainerManager().getScmBlockManager() - .allocateBlock(5, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, "Test", new ExcludeList()); - MetricsRecordBuilder metrics = - getMetrics(SCMPipelineMetrics.class.getSimpleName()); - Pipeline pipeline = block.getPipeline(); - long numBlocksAllocated = getLongCounter( - SCMPipelineMetrics.getBlockAllocationMetricName(pipeline), metrics); - Assert.assertEquals(numBlocksAllocated, 1); - - // destroy the pipeline - try { - cluster.getStorageContainerManager().getClientProtocolServer() - .closePipeline(pipeline.getId().getProtobuf()); - } catch (IOException e) { - e.printStackTrace(); - Assert.fail(); - } - metrics = getMetrics(SCMPipelineMetrics.class.getSimpleName()); - try { - getLongCounter(SCMPipelineMetrics.getBlockAllocationMetricName(pipeline), - metrics); - Assert.fail("Metric should not be present for closed pipeline."); - } catch (AssertionError e) { - Assert.assertTrue(e.getMessage().contains( - "Expected exactly one metric for name " + SCMPipelineMetrics - .getBlockAllocationMetricName(block.getPipeline()))); - } - } - - @After - public void teardown() { - cluster.shutdown(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/package-info.java deleted file mode 100644 index ea6734a6da1..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *

- * Utility classes to encode/decode DTO objects to/from byte array. - */ - -/** - * Unit tests for Pipeline related functions in SCM. - */ -package org.apache.hadoop.ozone.scm.pipeline; \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java deleted file mode 100644 index 43ce6793294..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java +++ /dev/null @@ -1,470 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.security.acl; - -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.server.SCMConfigurator; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.BucketManagerImpl; -import org.apache.hadoop.ozone.om.IOzoneAcl; -import org.apache.hadoop.ozone.om.KeyManagerImpl; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.PrefixManager; -import org.apache.hadoop.ozone.om.PrefixManagerImpl; -import org.apache.hadoop.ozone.om.VolumeManagerImpl; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.stream.Collectors; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.ANONYMOUS; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.WORLD; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.BUCKET; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.PREFIX; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; -import static org.junit.Assert.*; -import static org.junit.Assert.assertTrue; - -/** - * Test class for {@link OzoneNativeAuthorizer}. - */ -@RunWith(Parameterized.class) -public class TestOzoneNativeAuthorizer { - - private static OzoneConfiguration ozConfig; - private String vol; - private String buck; - private String key; - private String prefix; - private ACLType parentDirUserAcl; - private ACLType parentDirGroupAcl; - private boolean expectedAclResult; - - private static KeyManagerImpl keyManager; - private static VolumeManagerImpl volumeManager; - private static BucketManagerImpl bucketManager; - private static PrefixManager prefixManager; - private static OMMetadataManager metadataManager; - private static OzoneNativeAuthorizer nativeAuthorizer; - - private static StorageContainerManager scm; - private static UserGroupInformation ugi; - - private static OzoneObj volObj; - private static OzoneObj buckObj; - private static OzoneObj keyObj; - private static OzoneObj prefixObj; - - @Parameterized.Parameters - public static Collection data() { - return Arrays.asList(new Object[][]{ - {"key", "dir1/", ALL, ALL, true}, - {"file1", "2019/june/01/", ALL, ALL, true}, - {"file2", "", ALL, ALL, true}, - {"dir1/dir2/dir4/", "", ALL, ALL, true}, - {"key", "dir1/", NONE, NONE, false}, - {"file1", "2019/june/01/", NONE, NONE, false}, - {"file2", "", NONE, NONE, false}, - {"dir1/dir2/dir4/", "", NONE, NONE, false} - }); - } - - public TestOzoneNativeAuthorizer(String keyName, String prefixName, - ACLType userRight, - ACLType groupRight, boolean expectedResult) throws IOException { - int randomInt = RandomUtils.nextInt(); - vol = "vol" + randomInt; - buck = "bucket" + randomInt; - key = keyName + randomInt; - prefix = prefixName + randomInt + OZONE_URI_DELIMITER; - parentDirUserAcl = userRight; - parentDirGroupAcl = groupRight; - expectedAclResult = expectedResult; - - createVolume(vol); - createBucket(vol, buck); - createKey(vol, buck, key); - } - - @BeforeClass - public static void setup() throws Exception { - ozConfig = new OzoneConfiguration(); - ozConfig.set(OZONE_ACL_AUTHORIZER_CLASS, - OZONE_ACL_AUTHORIZER_CLASS_NATIVE); - File dir = GenericTestUtils.getRandomizedTestDir(); - ozConfig.set(OZONE_METADATA_DIRS, dir.toString()); - ozConfig.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD); - - metadataManager = new OmMetadataManagerImpl(ozConfig); - volumeManager = new VolumeManagerImpl(metadataManager, ozConfig); - bucketManager = new BucketManagerImpl(metadataManager); - prefixManager = new PrefixManagerImpl(metadataManager, false); - - NodeManager nodeManager = new MockNodeManager(true, 10); - SCMConfigurator configurator = new SCMConfigurator(); - configurator.setScmNodeManager(nodeManager); - scm = TestUtils.getScm(ozConfig, configurator); - scm.start(); - scm.exitSafeMode(); - keyManager = - new KeyManagerImpl(scm.getBlockProtocolServer(), metadataManager, - ozConfig, - "om1", null); - - nativeAuthorizer = new OzoneNativeAuthorizer(volumeManager, bucketManager, - keyManager, prefixManager); - //keySession. - ugi = UserGroupInformation.getCurrentUser(); - } - - private void createKey(String volume, - String bucket, String keyName) throws IOException { - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volume) - .setBucketName(bucket) - .setKeyName(keyName) - .setFactor(HddsProtos.ReplicationFactor.ONE) - .setDataSize(0) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(), - ALL, ALL)) - .build(); - - if (keyName.split(OZONE_URI_DELIMITER).length > 1) { - keyManager.createDirectory(keyArgs); - key = key + OZONE_URI_DELIMITER; - } else { - OpenKeySession keySession = keyManager.createFile(keyArgs, true, false); - keyArgs.setLocationInfoList( - keySession.getKeyInfo().getLatestVersionLocations() - .getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); - } - - keyObj = new OzoneObjInfo.Builder() - .setVolumeName(vol) - .setBucketName(buck) - .setKeyName(key) - .setResType(KEY) - .setStoreType(OZONE) - .build(); - } - - private void createBucket(String volumeName, String bucketName) - throws IOException { - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .build(); - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); - buckObj = new OzoneObjInfo.Builder() - .setVolumeName(vol) - .setBucketName(buck) - .setResType(BUCKET) - .setStoreType(OZONE) - .build(); - } - - private void createVolume(String volumeName) throws IOException { - OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder() - .setVolume(volumeName) - .setAdminName("bilbo") - .setOwnerName("bilbo") - .build(); - TestOMRequestUtils.addVolumeToOM(metadataManager, volumeArgs); - volObj = new OzoneObjInfo.Builder() - .setVolumeName(vol) - .setResType(VOLUME) - .setStoreType(OZONE) - .build(); - } - - @Test - public void testCheckAccessForVolume() throws Exception { - expectedAclResult = true; - resetAclsAndValidateAccess(volObj, USER, volumeManager); - resetAclsAndValidateAccess(volObj, GROUP, volumeManager); - resetAclsAndValidateAccess(volObj, WORLD, volumeManager); - resetAclsAndValidateAccess(volObj, ANONYMOUS, volumeManager); - } - - @Test - public void testCheckAccessForBucket() throws Exception { - - OzoneAcl userAcl = new OzoneAcl(USER, ugi.getUserName(), parentDirUserAcl, - ACCESS); - OzoneAcl groupAcl = new OzoneAcl(GROUP, ugi.getGroups().size() > 0 ? - ugi.getGroups().get(0) : "", parentDirGroupAcl, ACCESS); - // Set access for volume. - volumeManager.setAcl(volObj, Arrays.asList(userAcl, groupAcl)); - - resetAclsAndValidateAccess(buckObj, USER, bucketManager); - resetAclsAndValidateAccess(buckObj, GROUP, bucketManager); - resetAclsAndValidateAccess(buckObj, WORLD, bucketManager); - resetAclsAndValidateAccess(buckObj, ANONYMOUS, bucketManager); - } - - @Test - public void testCheckAccessForKey() throws Exception { - OzoneAcl userAcl = new OzoneAcl(USER, ugi.getUserName(), parentDirUserAcl, - ACCESS); - OzoneAcl groupAcl = new OzoneAcl(GROUP, ugi.getGroups().size() > 0 ? - ugi.getGroups().get(0) : "", parentDirGroupAcl, ACCESS); - // Set access for volume, bucket & prefix. - volumeManager.setAcl(volObj, Arrays.asList(userAcl, groupAcl)); - bucketManager.setAcl(buckObj, Arrays.asList(userAcl, groupAcl)); - //prefixManager.setAcl(prefixObj, Arrays.asList(userAcl, groupAcl)); - - resetAclsAndValidateAccess(keyObj, USER, keyManager); - resetAclsAndValidateAccess(keyObj, GROUP, keyManager); - resetAclsAndValidateAccess(keyObj, WORLD, keyManager); - resetAclsAndValidateAccess(keyObj, ANONYMOUS, keyManager); - } - - @Test - public void testCheckAccessForPrefix() throws Exception { - prefixObj = new OzoneObjInfo.Builder() - .setVolumeName(vol) - .setBucketName(buck) - .setPrefixName(prefix) - .setResType(PREFIX) - .setStoreType(OZONE) - .build(); - - OzoneAcl userAcl = new OzoneAcl(USER, ugi.getUserName(), parentDirUserAcl, - ACCESS); - OzoneAcl groupAcl = new OzoneAcl(GROUP, ugi.getGroups().size() > 0 ? - ugi.getGroups().get(0) : "", parentDirGroupAcl, ACCESS); - // Set access for volume & bucket. - volumeManager.setAcl(volObj, Arrays.asList(userAcl, groupAcl)); - bucketManager.setAcl(buckObj, Arrays.asList(userAcl, groupAcl)); - - resetAclsAndValidateAccess(prefixObj, USER, prefixManager); - resetAclsAndValidateAccess(prefixObj, GROUP, prefixManager); - resetAclsAndValidateAccess(prefixObj, WORLD, prefixManager); - resetAclsAndValidateAccess(prefixObj, ANONYMOUS, prefixManager); - } - - private void resetAclsAndValidateAccess(OzoneObj obj, - ACLIdentityType accessType, IOzoneAcl aclImplementor) - throws IOException { - - List acls; - String user = ""; - String group = ""; - - user = ugi.getUserName(); - if (ugi.getGroups().size() > 0) { - group = ugi.getGroups().get(0); - } - - RequestContext.Builder builder = new RequestContext.Builder() - .setClientUgi(ugi) - .setAclType(accessType); - - // Get all acls. - List allAcls = Arrays.stream(ACLType.values()). - collect(Collectors.toList()); - - /** - * 1. Reset default acls to an acl. - * 2. Test if user/group has access only to it. - * 3. Add remaining acls one by one and then test - * if user/group has access to them. - * */ - for (ACLType a1 : allAcls) { - OzoneAcl newAcl = new OzoneAcl(accessType, getAclName(accessType), a1, - ACCESS); - - // Reset acls to only one right. - aclImplementor.setAcl(obj, Arrays.asList(newAcl)); - - // Fetch current acls and validate. - acls = aclImplementor.getAcl(obj); - assertTrue(acls.size() == 1); - assertTrue(acls.contains(newAcl)); - - // Special handling for ALL. - if (a1.equals(ALL)) { - validateAll(obj, builder); - continue; - } - - // Special handling for NONE. - if (a1.equals(NONE)) { - validateNone(obj, builder); - continue; - } - assertEquals("Acl to check:" + a1 + " accessType:" + - accessType + " path:" + obj.getPath(), - expectedAclResult, nativeAuthorizer.checkAccess(obj, - builder.setAclRights(a1).build())); - - List aclsToBeValidated = - Arrays.stream(ACLType.values()).collect(Collectors.toList()); - List aclsToBeAdded = - Arrays.stream(ACLType.values()).collect(Collectors.toList()); - aclsToBeValidated.remove(NONE); - aclsToBeValidated.remove(a1); - - aclsToBeAdded.remove(NONE); - aclsToBeAdded.remove(ALL); - - // Fetch acls again. - for (ACLType a2 : aclsToBeAdded) { - if (!a2.equals(a1)) { - - acls = aclImplementor.getAcl(obj); - List right = acls.stream().map(a -> a.getAclList()).collect( - Collectors.toList()); - assertFalse("Do not expected client to have " + a2 + " acl. " + - "Current acls found:" + right + ". Type:" + accessType + "," - + " name:" + (accessType == USER ? user : group), - nativeAuthorizer.checkAccess(obj, - builder.setAclRights(a2).build())); - - // Randomize next type. - int type = RandomUtils.nextInt(0, 3); - ACLIdentityType identityType = ACLIdentityType.values()[type]; - // Add remaining acls one by one and then check access. - OzoneAcl addAcl = new OzoneAcl(identityType, - getAclName(identityType), a2, ACCESS); - aclImplementor.addAcl(obj, addAcl); - - // Fetch acls again. - acls = aclImplementor.getAcl(obj); - boolean a2AclFound = false; - boolean a1AclFound = false; - for (OzoneAcl acl : acls) { - if (acl.getAclList().contains(a2)) { - a2AclFound = true; - } - if (acl.getAclList().contains(a1)) { - a1AclFound = true; - } - } - - assertTrue("Current acls :" + acls + ". " + - "Type:" + accessType + ", name:" + (accessType == USER ? user - : group) + " acl:" + a2, a2AclFound); - assertTrue("Expected client to have " + a1 + " acl. Current acls " + - "found:" + acls + ". Type:" + accessType + - ", name:" + (accessType == USER ? user : group), a1AclFound); - assertEquals("Current acls " + acls + ". Expect acl:" + a2 + - " to be set? " + expectedAclResult + " accessType:" - + accessType, expectedAclResult, - nativeAuthorizer.checkAccess(obj, - builder.setAclRights(a2).build())); - aclsToBeValidated.remove(a2); - for (ACLType a3 : aclsToBeValidated) { - if (!a3.equals(a1) && !a3.equals(a2)) { - assertFalse("User shouldn't have right " + a3 + ". " + - "Current acl rights for user:" + a1 + "," + a2, - nativeAuthorizer.checkAccess(obj, - builder.setAclRights(a3).build())); - } - } - } - } - } - - } - - private String getAclName(ACLIdentityType identityType) { - switch (identityType) { - case USER: - return ugi.getUserName(); - case GROUP: - if (ugi.getGroups().size() > 0) { - return ugi.getGroups().get(0); - } - default: - return ""; - } - } - - /** - * Helper function to test acl rights with user/group had ALL acl bit set. - * @param obj - * @param builder - */ - private void validateAll(OzoneObj obj, RequestContext.Builder - builder) throws OMException { - List allAcls = new ArrayList<>(Arrays.asList(ACLType.values())); - allAcls.remove(ALL); - allAcls.remove(NONE); - for (ACLType a : allAcls) { - assertEquals("User should have right " + a + ".", - nativeAuthorizer.checkAccess(obj, - builder.setAclRights(a).build()), expectedAclResult); - } - } - - /** - * Helper function to test acl rights with user/group had NONE acl bit set. - * @param obj - * @param builder - */ - private void validateNone(OzoneObj obj, RequestContext.Builder - builder) throws OMException { - List allAcls = new ArrayList<>(Arrays.asList(ACLType.values())); - allAcls.remove(NONE); - for (ACLType a : allAcls) { - assertFalse("User shouldn't have right " + a + ".", - nativeAuthorizer.checkAccess(obj, builder.setAclRights(a).build())); - } - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/resources/auditlog.properties b/hadoop-ozone/integration-test/src/test/resources/auditlog.properties deleted file mode 100644 index 19daa6fe17b..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/auditlog.properties +++ /dev/null @@ -1,76 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with this -# work for additional information regarding copyright ownership. The ASF -# licenses this file to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -#

-# http://www.apache.org/licenses/LICENSE-2.0 -#

-# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations under -# the License. -# -name=PropertiesConfig - -# Checks for config change periodically and reloads -monitorInterval=5 - -filter=read, write -# filter.read.onMatch = DENY avoids logging all READ events -# filter.read.onMatch = ACCEPT permits logging all READ events -# The above two settings ignore the log levels in configuration -# filter.read.onMatch = NEUTRAL permits logging of only those READ events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.read.type = MarkerFilter -filter.read.marker = READ -filter.read.onMatch = NEUTRAL -filter.read.onMismatch = NEUTRAL - -# filter.write.onMatch = DENY avoids logging all WRITE events -# filter.write.onMatch = ACCEPT permits logging all WRITE events -# The above two settings ignore the log levels in configuration -# filter.write.onMatch = NEUTRAL permits logging of only those WRITE events -# which are attempted at log level equal or greater than log level specified -# in the configuration -filter.write.type = MarkerFilter -filter.write.marker = WRITE -filter.write.onMatch = NEUTRAL -filter.write.onMismatch = NEUTRAL - -# Log Levels are organized from most specific to least: -# OFF (most specific, no logging) -# FATAL (most specific, little data) -# ERROR -# WARN -# INFO -# DEBUG -# TRACE (least specific, a lot of data) -# ALL (least specific, all data) - -appenders = console, audit -appender.console.type = Console -appender.console.name = STDOUT -appender.console.layout.type = PatternLayout -appender.console.layout.pattern = %d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n - -appender.audit.type = File -appender.audit.name = AUDITLOG -appender.audit.fileName=audit.log -appender.audit.layout.type=PatternLayout -appender.audit.layout.pattern= %d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n - -loggers=audit -logger.audit.type=AsyncLogger -logger.audit.name=OMAudit -logger.audit.level = INFO -logger.audit.appenderRefs = audit -logger.audit.appenderRef.file.ref = AUDITLOG - -rootLogger.level = INFO -rootLogger.appenderRefs = stdout -rootLogger.appenderRef.stdout.ref = STDOUT diff --git a/hadoop-ozone/integration-test/src/test/resources/core-site.xml b/hadoop-ozone/integration-test/src/test/resources/core-site.xml deleted file mode 100644 index 77dd7ef9940..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/core-site.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - diff --git a/hadoop-ozone/integration-test/src/test/resources/hdfs-site.xml b/hadoop-ozone/integration-test/src/test/resources/hdfs-site.xml deleted file mode 100644 index 77dd7ef9940..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/hdfs-site.xml +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - diff --git a/hadoop-ozone/integration-test/src/test/resources/log4j.properties b/hadoop-ozone/integration-test/src/test/resources/log4j.properties deleted file mode 100644 index b8ad21d6c7f..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/log4j.properties +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# log4j configuration used during build and unit tests - -log4j.rootLogger=info,stdout -log4j.threshold=ALL -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n - -log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR -log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/ca.crt b/hadoop-ozone/integration-test/src/test/resources/ssl/ca.crt deleted file mode 100644 index 501be3c6f81..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/ssl/ca.crt +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEpDCCAowCCQDu0m7J8pzvvjANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQDDAls -b2NhbGhvc3QwHhcNMTgxMjA2MDY1ODE1WhcNMTkxMjA2MDY1ODE1WjAUMRIwEAYD -VQQDDAlsb2NhbGhvc3QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDU -T3bUikPWsSLLYWpeg/S9Zko/X3aijj6INIC1pGD4IX40neKHqWLgkkhGQnM8nFa4 -soUz3Ewv/N+RPnUhLhXaMyD5Ca6h1+6g5839E9QLXcpd5OSwQ2iu/JLsrWb5Belb -7PN60izZIbgRuA4lWmqeI9dEwQDIwbaPJW8HFlnUM5Ci5MM/CxmV/IdCSXocBKQ3 -whLP+vhLnhjOWMgFhhzdL1zObonbHVLONLBROnbFc+1zxI7gt7RD3OqTsnToXotZ -Jy3CfN5TxzwuP6xRDj3xciwIsGwE9tSE4H3a8DbYKfEUzyS/OlWeHboFiumRxcm8 -qkA5tmOA4+AoPLigsrJpxgtQR/0YwjI8yn+Hh79g+rZCckoR0Fs/OYEpXw6xg92o -YUzDA1SrRHd43r4xI0BDP/660fsbYtRk56WVmCQHNTKvJpeDlyg9qYwzWvZZSrPL -vO9qJ0k1SUbnEd4StPUmF/UQfdVfkdcR86j7ZLXJ9ZLhcWJjVlXeXfwnEl2/ctSt -RJROogM4ourc6sNNLOuFboLpnMEd5n8bijtoFG9vEJ0Cb//Zez942OEJa7db8fu1 -TEGPZzJTxnlgMIvaTrRdAE2VoZN2fzyIBF33wFgV4vgvllO61qeBH/SUFlpcOOo4 -LReY6bZxoKPlL9sG8ZHauQeq/uX+hhX50VP4cV1g+wIDAQABMA0GCSqGSIb3DQEB -CwUAA4ICAQApJDDPq2cmn3JWEfabkc3YxX62Q0qNyXDv+hY/O3zrJBbvJ74lEu9k -UPBk/oMIAZQGk/yvU5jBpJ1SndqB8ONnZcnOs7mDoqABcO9C8bB+kTmTXmxeZvcu -ZnF/3wkzuecYndcZwfC4Yt76DDny3gEMKruEbr51aehLkqYQOI5EGrrtc3Q2HE4D -z5H5CfzltaUajAkE8X+Iw6aVnEFrKbP5+VuQunMSi0lmmlBcpiVU6iyULt5LPNY5 -SbsEVgqUVekX0Qnn31ojabXOZJr4qK8/J+h5cGzaOQxGHopYqd34QjvlvZZnGCjd -6MrlO9WF0KWBJyJxPuLI0j3qNyrRF253ZBTOzow9jl4EZ3nsNe0WDgxUsv1qRqlv -CR4wKiCY9+Ti85k1KC1xQt6LEi0PRgE+rTpINGhWHQKOwkXwdZPdmPeXu9MFnDjt -iEEudugRrxGscTWMIOThL7HQhdGHPg6eCgdxLZ+q/pW0t3NKa+oMBuXFrOlBOwwE -iC9dpXPsd2S6wC5V33pj07WnIj+/+L/ViJvGimcudh/wj4KRhatsdFPjUBQI1b+E -tJm8gbVRYrueHhlvSfD09BKkf4aQRJ7RW18SQrLbHhqO3g6jZa8HciQiVxF0YC3x -qZh2A7b1BgOcqpFKEJp7k1U4qeH7H8hFm7vghGOnrd8bLE8viJRlxA== ------END CERTIFICATE----- diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/ca.key b/hadoop-ozone/integration-test/src/test/resources/ssl/ca.key deleted file mode 100644 index e53eef9f3e8..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/ssl/ca.key +++ /dev/null @@ -1,54 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -Proc-Type: 4,ENCRYPTED -DEK-Info: DES-EDE3-CBC,39D6E9EAB7ABCD69 - -NvhRc8RQDMtbbJXEilb8mKEcVCJHhew278cdoJZyep5G2VvifpZJk207+ZZo2YFt -hPlX5xtlOd+zmdj4nVsRVpDA5Vd/pF+PmRIbfoIwbgkcPIuUtRGoJtIhPa0zlUYu -uyc85h1/YOAkxzaTq4fVgUr4pOb+GLlkve6Q1nKFif5q58uMYBzyHRrf/VsWbip7 -dL1CEoY8Alv+mph++ckauY+5zAM6CBXBpFK3bbVw1bj3QNFi8ISsEQjrzpMnm8qa -7ZTNWN2gJ8QykbUxkFM5WCwb6iSkLdnV2zE/hUPLfK/ec3FGPJ6Qpyi/nY3urNTU -6ett8g+W+/8ZdxZt7YWlIVfiHE/dQyqlTvGKCjnODEfdwcMvMOsDTAwTH2AkFf6c -9sNSVIYScmnc9mu4MqYJN4uNZz0qcBA+QFBw0WtYRg2J4A6CD0YOINCt9aBuY8hf -p1FxMCiZs8pEju5ujS9J/NRTpWGbCIQxq/9j9TJc5N/Pb9Rp4LOOeGSt+7iRbSnt -BYV54MzS2zP+qwSZSWeyyhw0yagg1GP9b7RhvaZzzRAX9Ey4IHQAyWnjj0H6qq/K -dEes4GRcbSlQyxIfgoY7wH3P6IaZraykVLHHzUcn4iQlmxZehh0ppVuqSBn0Mmql -pK0mtoSETxBJAoyjorIsA1rpr0T8yGmmH5Gkhwn4npJxfJwOOygd3rOhAncC26Bk -f8O/0RNnF9frt2N0XLDg0/HlLdsyiEu/rEWIIrHY3m29Z4BT/i2rPGwtO9aweVHQ -JoORgZIdPTf9qT/PCyJSbna4N3AChgAMLzM1nacqKTQ99jhOmA2Z1iXf8umaOrv0 -6sWHwx+W8Ax5JiG4bWknePm02LVmpGxt3o0aecjzOBqrx+VdtJ4wRboq/LskqRcP -X9sIO2q1r01Lt4nZiwLNO/OFaVUzVW1IddTS9oE6gM9vFZsOjDgyu8jxwZjeYmHy -fWim4Rvc2w86vCmqx6Ff5TCsEJIqQ9QvIT+CvqMk9f09ftiCeuf93LrRDtcBGrir -LS8Dd7nFV7bsdjYAGeDY01NzwDzZ+LV46BeGwjDDuiTANeJCGHBnoZaIsnTYGCzc -U7ZEdlga7zMTGaIDPEHe7e4pyZFP6bubFPy+rxxXQfA/w4YfjgRyR8uXW9Fq3oWX -Utz65aMUV2owxlsWhIBwrKJoJAYxXaST5V7PDAZ+h661bILMhl+m0XrlWofA6dr8 -Yfr67aDbRjXRD8J5poq/+fP8D4NUdoa4GCo3TXl39af7vEXSkE7CIu/UidZlFr4o -2tCCUC7P/ZHtl+6durlQ4gBwpFPB9s1aqAA/8l0wDDUki0e1Pft0AZ/00LLkzlU2 -fwxfyYsA2L4M/mjCviPibi7VoUJjWZd28L8ixyKOopUT82gQ6eCs91kusMbZPI2t -0wxGHhm41ij28xkMI3iK9mF9Of3N5D1XlK5SDN1lJ43dYXSYDay52KL28n/Pu6F5 -UyAkJIbDhHmNTQ2bjYTl2xtxdlKF7SfbJ1LlySrpmnmm9f63vm4jj+xTHbO1pUTM -PPrxnfgdX7+E9/ZuiKNJoY9XXpPvLxA2aCvGWMjsYnuJ1d1TGrhI0BqMOIgq7G8N -chdS9TP/eGihJO2vUyqcQWwKhNTFpwDH9/VomTaLglMB8SjPQHMrV/WrCjGPj1ql -Oc4eVm2oBOkIeae3eaKU8xMKDaUrOEWjT1E7o+mhpK0pfmMg/qpjD75ZuBCMRTNS -Ihgo2KBFzygE+T2lnbPQGtUkwPFEzeZVTzL/fmOrQ72UGovS8e2NmYy2Lqrwwl8Q -xouYMWO85xVJhocd/mstl41y+Xl2v1oULEYLoznJDd3IWm4zkUW78KmZ45unIBAy -zkLoO2OssTsc4n6Qb1/d4KEahgBIE1NyiWl1eh3cZAeBbt4zuMZ/3wOSo2ErK15z -oxjH/eEti6tP0Fe/FCiBnW3fCs7vN4CkAFISrEJo28J9e0UjBsfEacZv89Lf9ued -wH/jkdk4q2o068Uf3piLaBgaugIlFcjS9h2Mzwwdbvcs5HT5pRztZDhm8CFMOjEd -nkAdshTEkJ2UDQPIDWl2LcYfLWY0/dMToMEfefkurd73RaTdkWqDaulBpvzFILzJ -Kh3is/AyOlnEKYmcafvH0S+dAIH+LVI7tkaJQDNS6uftSF30q9faNHNzbvjbPd5N -YMOZDARDILRvHrVPAA1NzSnedJiM1iG4gqKC/sC7CyfxX8hW0tvk65KZ6jVEgthg -nEcrUZxI7YajnNKJJi6LslO8dX4rULEGPMCtwgCA26EANe8uvk0GrH7PLjcaVtOE -1O3WL3HDy6tdfnFCNL5W8IlFP7x7yVgf5xlwurV6AW1kMokuF20UCaQs15c+/+ob -ge3Q9w5RsWs/2iyxZ6QyDcMKPpkeyRJUjGqcOmTcFDGwbiShckkTK1vGIHWMk/8I -oLuAC/yAbNEL3ROmD554AuJDK0PAS2+zND+eB8steJxuBouVaroDzS1QJUg9TFkR -VaFjYCOjMHsIPZ3WjwzofCQsL3waPOfYIeHtWOULqRWtb1GGQZTdxk2Q/rb4U8Eh -x3zngQzIynGegWXi+1ZTJAoDNCEPBB65u3JVU8hLKlLCmjnAh5UW0dVSWkDIERU3 -9sPvpaJherJQeUzwnSdMCQrbyhXR63nlJQUILyr/pvKS6cHIC8U2rCO7NEn36qHD -nYEL/1cFmf+3zb1KauoOHbbTbvIcw6xNGGcGJOhzQL4WF0M3vgvwCQscSmFUXP9i -gCdstl0viQkjRGkMWoynfVC0MYypNdEsVvLpE/IsjWugtwrKK+4s+gx1C0+tlMf9 -XGo+gfz1haHtDoxckfFG1vDXjxOaxsjsS3xMqJqFMzlph1lMI5d0RK5JASZfrim/ -v4B5bhpBLmE/JViCZbm8wD2a5GsfuutpPZj9KF+A9hWGOm1vm9hC7OAJfBzQ8NgC -agSDqGt74mtmHs59ueR/oq/JXlUNBZvxnAGaWd8/n3e4nijwiyQ6uhxRSIq/CJ12 -wguxZNCSDBd/Eec+7bBSlbHUsAHf3XdjQ5Qi8eAq7XNgjw3iATBGsDvwVZ6QXin2 -2WCSzSqecCEwYCA5E4WCfn72SkF6Ls+1VWOPQBLCYpB/bvgjAxgsfkVBqpRlTTs1 ------END RSA PRIVATE KEY----- diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/client.crt b/hadoop-ozone/integration-test/src/test/resources/ssl/client.crt deleted file mode 100644 index f093a7075ef..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/ssl/client.crt +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEnDCCAoQCAQEwDQYJKoZIhvcNAQEFBQAwFDESMBAGA1UEAwwJbG9jYWxob3N0 -MB4XDTE4MTIwNjA2NTkwM1oXDTE5MTIwNjA2NTkwM1owFDESMBAGA1UEAwwJbG9j -YWxob3N0MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAmfmrFVX0mDZE -VZ0cI3V0Kzy8PprNcMc8vl8K02v3yAuJaigv+FJxE7jAa31sTHco/SN1kCKf9fiG -Gf7lhLqTutDCkjXxF7LhGBmC+W0KFJG2Ucd88KPLzQKG7tOV5+4eoxOvQcfO41w/ -daAlrKuao2/hj2tUV2T4Sh7e5co11lt2ndP/eegFMq6hnP7sunZKjX+kETx8H7rL -exBB8NP7yZnrzEspxsoMQ/GLfguSgrQOcwz0MkujFTBGfOlzdJSm1qIl5Y+ZnnjG -kHM8bKN3/gO5vov0iqpCNIwTt8L2vFqEy2vuW1yj/A4qP7RLXpmWv+XI0v3Bbyff -BvYNBfCzJCivPKOKIcPYfDQ/Y8DUrWOOevoCB8LkqsLtVmtgNJB5wDDbtQYLPz6i -TSm1M9oxtSypRa/GyYDwZQq1wkRguqj1y5VI24SJ/zYrLr0DzzpUx2CuWV/X9BMq -V87MxLEjcqqT743lvSx/6gAX987EhMvZfQETyHU1qON+P1V1fYUuhVb0kS0itBim -Aa5zKLCthcIMLchJlje2GpHOSd+/hnDdiDZEwWmGaX0OOV9kfd0gQl0kSSvuVZsG -nHL2VIWcIUJEp/sW2IklZHS1W1B0M8WjFKCZGS84MyL7CllT759AS1pHF6HVVuH+ -7sNvTOnf5lVy1XmLigocFiB0sc8Uul0CAwEAATANBgkqhkiG9w0BAQUFAAOCAgEA -uRurFJyKrNVQ/QLKaVDTORukuct7wfw/+FWDKdBEzD6styCVKrXHfSa3ZZS6Wv1f -XR2PrLuW6oJoGonVvt7xj086Vu7Dt+dB8JZIOn1QgNCNlocsVvEptZ6fKPfqcF6J -cZDcgXhFxB4dY/qV+TfcOKpF4sMJhqJXMh6xtJWskc6Saj0O7xQD/XnL0PeJbrk0 -l9ZiLWzxkXaYomM5YHolMdwpZSjpm7hHzr8cbNmWQLPl4NHvNrEvnDgLa7MTuLS/ -Zf3Yi/RtJIbA1ew1Kqs4zdA3jd/eTNCuVTxgj8VM1WR8i5li/kVv69wd20fO0nWq -EWpRIMMTzKGfYSCM4SUTTQXfmvg6o/dzM/p5NCQPyQPnEVGzxxJQ8NetM1dCjidl -F+ZzjW++DppwIIV8Ntah9tZIvATyCbIJSrX6ntsjnz7C1yZWqgkbbc3sTy9tQTJS -7Oa1sub8PdTj8gIlGdrRGDoVJ6fy/XQJkf0LuvadL5h7um2iL093Y5W5MS43hI8i -18qO4udxTXN+Xk+YZHBXvruLhE/QTm2KizPjA+EMU17zSQEybpwqCFshjyGjiJ2i -UFx5Cllg/QSqxKmSc2vTGCOM5T7+SaD5byg2x+f49pt0tXsFFmTphFNvdlKW9NJ2 -GXACHF0k7kh+q0a5ajb8nupIxkbtyvBEY7/y+XCj9zw= ------END CERTIFICATE----- diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/client.csr b/hadoop-ozone/integration-test/src/test/resources/ssl/client.csr deleted file mode 100644 index 38ecdb1b036..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/ssl/client.csr +++ /dev/null @@ -1,26 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIEWTCCAkECAQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MIICIjANBgkqhkiG9w0B -AQEFAAOCAg8AMIICCgKCAgEAmfmrFVX0mDZEVZ0cI3V0Kzy8PprNcMc8vl8K02v3 -yAuJaigv+FJxE7jAa31sTHco/SN1kCKf9fiGGf7lhLqTutDCkjXxF7LhGBmC+W0K -FJG2Ucd88KPLzQKG7tOV5+4eoxOvQcfO41w/daAlrKuao2/hj2tUV2T4Sh7e5co1 -1lt2ndP/eegFMq6hnP7sunZKjX+kETx8H7rLexBB8NP7yZnrzEspxsoMQ/GLfguS -grQOcwz0MkujFTBGfOlzdJSm1qIl5Y+ZnnjGkHM8bKN3/gO5vov0iqpCNIwTt8L2 -vFqEy2vuW1yj/A4qP7RLXpmWv+XI0v3BbyffBvYNBfCzJCivPKOKIcPYfDQ/Y8DU -rWOOevoCB8LkqsLtVmtgNJB5wDDbtQYLPz6iTSm1M9oxtSypRa/GyYDwZQq1wkRg -uqj1y5VI24SJ/zYrLr0DzzpUx2CuWV/X9BMqV87MxLEjcqqT743lvSx/6gAX987E -hMvZfQETyHU1qON+P1V1fYUuhVb0kS0itBimAa5zKLCthcIMLchJlje2GpHOSd+/ -hnDdiDZEwWmGaX0OOV9kfd0gQl0kSSvuVZsGnHL2VIWcIUJEp/sW2IklZHS1W1B0 -M8WjFKCZGS84MyL7CllT759AS1pHF6HVVuH+7sNvTOnf5lVy1XmLigocFiB0sc8U -ul0CAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4ICAQCZCPz6ps4cqB0KPFk7aRtE0Ga8 -MvnEbreFJ7UyVknUDz6cqW9Jsx0OpvCPbh6C/iXqBMx5tD1ZQwVRmqhNTwGzg1zN -27PDtx+7SEa+vc0IM3qNilff2TS0G4LMPpp1K3VOwAb9bQCM2CCqRtEnwmC8rQc3 -ZZYmo5+EEFgzgsZ43k2bOvytEcWhcnviUfYc7PHxiWLxrwEoqQCBT0YWLGqjqR0k -Zm6O8f+y4U+f25e2h/Wjt+qMERoZq2v/chpcvav0l/zHFTClPg8E/BflQnllys8K -Z1nOgb2qpB5FID7ighVLggL/iSVQU91XX6+TAATBtNCuAYBp/89UBmBkwgkHRzhb -eFSSjZtIBpFzDpcx1dKE2RQuySEk9K7aC9BMeh5m2DFVZDUZJi0qXNfex/KuVA5q -jgX88axjQDtn4BqkPTLR5/SLNk1MIZydiVQewTd2zmmHboJKiozjMWdd/+/79xuJ -zxPFfx5yIkGvipk0Tn6AdtW/YgxqhocUl/cpq4gYBFxzqJiHTfODVHZhV+svrFy8 -fm/f4DxMa6Fl5hqnoJHM0KVw/OYoGujSV8ER73gxzYSAHpAW7dWJqD1MBy6OU2a2 -uICQutBInoITDDtyH/9Uqkw4PfWrdrcwPEkPG+LrvgRgc2Gd8cFv1bXyJWNRRpMc -GsAeGqu8EGrQkRmfOQ== ------END CERTIFICATE REQUEST----- diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/client.key b/hadoop-ozone/integration-test/src/test/resources/ssl/client.key deleted file mode 100644 index 0286a3ff865..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/ssl/client.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEAmfmrFVX0mDZEVZ0cI3V0Kzy8PprNcMc8vl8K02v3yAuJaigv -+FJxE7jAa31sTHco/SN1kCKf9fiGGf7lhLqTutDCkjXxF7LhGBmC+W0KFJG2Ucd8 -8KPLzQKG7tOV5+4eoxOvQcfO41w/daAlrKuao2/hj2tUV2T4Sh7e5co11lt2ndP/ -eegFMq6hnP7sunZKjX+kETx8H7rLexBB8NP7yZnrzEspxsoMQ/GLfguSgrQOcwz0 -MkujFTBGfOlzdJSm1qIl5Y+ZnnjGkHM8bKN3/gO5vov0iqpCNIwTt8L2vFqEy2vu -W1yj/A4qP7RLXpmWv+XI0v3BbyffBvYNBfCzJCivPKOKIcPYfDQ/Y8DUrWOOevoC -B8LkqsLtVmtgNJB5wDDbtQYLPz6iTSm1M9oxtSypRa/GyYDwZQq1wkRguqj1y5VI -24SJ/zYrLr0DzzpUx2CuWV/X9BMqV87MxLEjcqqT743lvSx/6gAX987EhMvZfQET -yHU1qON+P1V1fYUuhVb0kS0itBimAa5zKLCthcIMLchJlje2GpHOSd+/hnDdiDZE -wWmGaX0OOV9kfd0gQl0kSSvuVZsGnHL2VIWcIUJEp/sW2IklZHS1W1B0M8WjFKCZ -GS84MyL7CllT759AS1pHF6HVVuH+7sNvTOnf5lVy1XmLigocFiB0sc8Uul0CAwEA -AQKCAgADpg/wzH2kUbzizntJN9JN5/2J+j8eCgqddEEca3WOrv9NnbAtUT7OudUN -dwZm9XfqL7nsdXWW7ZG38ftcXtN7XNEPh+mzpxCAcrJQ2M2hWSaZ34FNboQ40nOC -G091FIZzVNcVVvfHGXuDfQ0Hf3WFo/QTYva3r3PWxc6AYX9PGhHAgbKPH/lnjw3T -W5MehAkWO00W/3jtg46o1uTJISzZRSV6TNmrlUQfJA0rKnkJUdz5yvfKbVJrAR7a -fOm4fIFLmsINI47/W1tRNvnalTEVut7e7hAYbRpuhlc9Roh0RCzbaS5XyeU05t0H -b21Ny5Pv7jEJFuxLhwVY8+GxH1gPXOJUkpoDS4gE9MOE6lq3oBj1Q+KFX2lGjlxn -fpOjFfuFTAgmsr7dzYP29T7X4rgixEQ6PKba8lhITq7Emaxer2bYlmyD9UlqVZHb -GjW9o7GcD5YRnbvxy5XMTbNVQatlOTOGmB+XkyfuJCwiSKT51HuR+YOLgtZASahS -0vDQduy26s9hWPMc6/+oy3eVBRMBrU+T7M5qkIFsPrDC4nRkZDhpxBpnAJm4yRwo -Bl+SWMD5DIXEwVuQfB8xBsM2sOSlT8/kVoiTze0X/F8ZGLLAFzUOillvNKu//69C -tQURH1RhA6AFlQOBVXkCDP7OSDmAYVXTynJL5FRMRPXz5sDHHQKCAQEAyZh/9Mjx -cFgf4NCKT26iPaoeoPPl6tAjsSEiW+jG8XozbF2uXOLbAgZguLMcrZDxXD+CNE3m -QnXr8Pi7SBMzhBfntVP9knEzCVwXYodk1fJdrZu41WL0/PspHzOw6+Q0MKSgySJ9 -aEbu67EgBkfZjAlAqDNXl5dwcOKX8KN9vKVe0Uj8OI6PWgyE0pbkelfUP6BWWq/R -2Ws6MMcHMHfw1Jku4rR+ybjbR+tnqXzC+M36RqjD+igcbjLwzx8Ab5Zo4sbpcufc -4KvY7S9nYb5h5IffnCsaGMBGeJwHNfzDaFuncwDiAtveryWPBUsdoABrT9MeX6L0 -uzQRlwQPQo1XZwKCAQEAw4c/vlFxWWJHbTwhf/cbqatuLT4PEC88yRvYk3vFDNkY -dnKyR1AxZwMJa0V9SKXLftEzmE3uFzVL8IPx+lRkZmLrab9lJiAc8z9nIFKSDCcc -MP2opo14fPK0ID7AMe96YWomHENhmcZdzaOYMMoRC9J312PQ5+ChodEl8vlUY/PU -WB/vPAtYfJEdZPJwnIhXSdae0uwghyzqlJ963Dxmh4dxJLh24PYLBVdhsgP21z8H -Du4KM4jtkHHlAz7GTdkt1H7fmFodsowFmc6SNu+22iiZ1XQrP4V++Umu24Gttw7I -f2rvZHDsQE9Qk6K84g7a/LAtvhO3U9H+uYmMgjJZmwKCAQBDL/IlUPs2qAgn0xjl -lEe6KYJ/vgm4kpnypMpgu1nijQmqaiZ8ipbXO+zsYbWDGzV1uyzX5caCC+8QprU0 -NkILGjR9OHrgXZ3W1rxseBdhPp9+BtI5O/vOfJ6d6Ypjc/D47UUxA6+sG0fxgVzc -+wFELKlB5aqhuTUeSka9Sp/TSYIqWhrFdq3MIzP5Q5TuOWthsTxWiRZ1UclZDFwX -CUJYeJ0prWI8NMHQXGJ2GECaz3tEJWb7bnbbO1sKjJiGmChovEZ9p0z0DBIGKrBX -4S2bDrW1xJ+z9BEIjWfR1GYD19gc+gRZU5IJ6YibCQfclYcuWXxb/2F1KstZ+15i -ndytAoIBAGvqWtEkzCWkK33roSWqcfccKcwIo3GwUKFCoC8OMbycmXbOaP0ZEpsj -PvCYwsP01bKhrhNSd6URgl81w7kBGQS1de7Adwgq0y+h/74ENJ1GfLXBWnLKRATa -Q3ZEi/lDjkzztCMHQXgI1r7nmtjavbvDpucXLTa9cRgJgiNvXxdnfPxCa9y8+lKO -GSYc9PBAA8U6EiChuHZC4Rm0R7AEGiaVJ2o38UzKH10MVFxW+cbk/3VLBhBZc5y0 -b8xxuis/QZ81gxzoJ9nilDjGnUZ62XXg0L7RxgjiGilmdH6sPP96xkgk8gmClbIM -1JEXUZ6GynCKoER3R0iY7zjh5M37Eh8CggEBAKIWY+cRumpBZAlWRIvp0DGToWbM -2GhuFi3Pd83DiifBGsKDNbqqnPQzxy0uqmGp8ollHFXDDt9ZlhWE0jcIa4Pb8ymv -toR36hGtGq1g0TggTy+OJneuHp27pzqSd/8VvIrxQEoag5pzLMPCoJniRTi78Nhg -60OkMJz0ycnrP79LyCK0OjJetoDMZLSvEy9XE7oV3L45l1rbFlcha6RFaGKa9ApW -Hl7E0pcbSWrUstTw8ywH3Dj3qgViDam+DuiDe3BaewCQlElVBHQyxbRVWID6m5jI -eR4RgzIebd9g5Pa7Q/GAt2qAREWCYjLvUmNEIGbXtKY/w0WCCqlgOyUPPC8= ------END RSA PRIVATE KEY----- diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/client.pem b/hadoop-ozone/integration-test/src/test/resources/ssl/client.pem deleted file mode 100644 index 508e465e699..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/ssl/client.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCZ+asVVfSYNkRV -nRwjdXQrPLw+ms1wxzy+XwrTa/fIC4lqKC/4UnETuMBrfWxMdyj9I3WQIp/1+IYZ -/uWEupO60MKSNfEXsuEYGYL5bQoUkbZRx3zwo8vNAobu05Xn7h6jE69Bx87jXD91 -oCWsq5qjb+GPa1RXZPhKHt7lyjXWW3ad0/956AUyrqGc/uy6dkqNf6QRPHwfust7 -EEHw0/vJmevMSynGygxD8Yt+C5KCtA5zDPQyS6MVMEZ86XN0lKbWoiXlj5meeMaQ -czxso3f+A7m+i/SKqkI0jBO3wva8WoTLa+5bXKP8Dio/tEtemZa/5cjS/cFvJ98G -9g0F8LMkKK88o4ohw9h8ND9jwNStY456+gIHwuSqwu1Wa2A0kHnAMNu1Bgs/PqJN -KbUz2jG1LKlFr8bJgPBlCrXCRGC6qPXLlUjbhIn/NisuvQPPOlTHYK5ZX9f0EypX -zszEsSNyqpPvjeW9LH/qABf3zsSEy9l9ARPIdTWo434/VXV9hS6FVvSRLSK0GKYB -rnMosK2FwgwtyEmWN7Yakc5J37+GcN2INkTBaYZpfQ45X2R93SBCXSRJK+5Vmwac -cvZUhZwhQkSn+xbYiSVkdLVbUHQzxaMUoJkZLzgzIvsKWVPvn0BLWkcXodVW4f7u -w29M6d/mVXLVeYuKChwWIHSxzxS6XQIDAQABAoICAAOmD/DMfaRRvOLOe0k30k3n -/Yn6Px4KCp10QRxrdY6u/02dsC1RPs651Q13Bmb1d+ovuex1dZbtkbfx+1xe03tc -0Q+H6bOnEIByslDYzaFZJpnfgU1uhDjSc4IbT3UUhnNU1xVW98cZe4N9DQd/dYWj -9BNi9revc9bFzoBhf08aEcCBso8f+WePDdNbkx6ECRY7TRb/eO2DjqjW5MkhLNlF -JXpM2auVRB8kDSsqeQlR3PnK98ptUmsBHtp86bh8gUuawg0jjv9bW1E2+dqVMRW6 -3t7uEBhtGm6GVz1GiHRELNtpLlfJ5TTm3QdvbU3Lk+/uMQkW7EuHBVjz4bEfWA9c -4lSSmgNLiAT0w4TqWregGPVD4oVfaUaOXGd+k6MV+4VMCCayvt3Ng/b1PtfiuCLE -RDo8ptryWEhOrsSZrF6vZtiWbIP1SWpVkdsaNb2jsZwPlhGdu/HLlcxNs1VBq2U5 -M4aYH5eTJ+4kLCJIpPnUe5H5g4uC1kBJqFLS8NB27Lbqz2FY8xzr/6jLd5UFEwGt -T5PszmqQgWw+sMLidGRkOGnEGmcAmbjJHCgGX5JYwPkMhcTBW5B8HzEGwzaw5KVP -z+RWiJPN7Rf8XxkYssAXNQ6KWW80q7//r0K1BREfVGEDoAWVA4FVeQIM/s5IOYBh -VdPKckvkVExE9fPmwMcdAoIBAQDJmH/0yPFwWB/g0IpPbqI9qh6g8+Xq0COxISJb -6MbxejNsXa5c4tsCBmC4sxytkPFcP4I0TeZCdevw+LtIEzOEF+e1U/2ScTMJXBdi -h2TV8l2tm7jVYvT8+ykfM7Dr5DQwpKDJIn1oRu7rsSAGR9mMCUCoM1eXl3Bw4pfw -o328pV7RSPw4jo9aDITSluR6V9Q/oFZar9HZazowxwcwd/DUmS7itH7JuNtH62ep -fML4zfpGqMP6KBxuMvDPHwBvlmjixuly59zgq9jtL2dhvmHkh9+cKxoYwEZ4nAc1 -/MNoW6dzAOIC296vJY8FSx2gAGtP0x5fovS7NBGXBA9CjVdnAoIBAQDDhz++UXFZ -YkdtPCF/9xupq24tPg8QLzzJG9iTe8UM2Rh2crJHUDFnAwlrRX1Ipct+0TOYTe4X -NUvwg/H6VGRmYutpv2UmIBzzP2cgUpIMJxww/aimjXh88rQgPsAx73phaiYcQ2GZ -xl3No5gwyhEL0nfXY9Dn4KGh0SXy+VRj89RYH+88C1h8kR1k8nCciFdJ1p7S7CCH -LOqUn3rcPGaHh3EkuHbg9gsFV2GyA/bXPwcO7goziO2QceUDPsZN2S3Uft+YWh2y -jAWZzpI277baKJnVdCs/hX75Sa7bga23Dsh/au9kcOxAT1CTorziDtr8sC2+E7dT -0f65iYyCMlmbAoIBAEMv8iVQ+zaoCCfTGOWUR7opgn++CbiSmfKkymC7WeKNCapq -JnyKltc77OxhtYMbNXW7LNflxoIL7xCmtTQ2QgsaNH04euBdndbWvGx4F2E+n34G -0jk7+858np3pimNz8PjtRTEDr6wbR/GBXNz7AUQsqUHlqqG5NR5KRr1Kn9NJgipa -GsV2rcwjM/lDlO45a2GxPFaJFnVRyVkMXBcJQlh4nSmtYjw0wdBcYnYYQJrPe0Ql -Zvtudts7WwqMmIaYKGi8Rn2nTPQMEgYqsFfhLZsOtbXEn7P0EQiNZ9HUZgPX2Bz6 -BFlTkgnpiJsJB9yVhy5ZfFv/YXUqy1n7XmKd3K0CggEAa+pa0STMJaQrfeuhJapx -9xwpzAijcbBQoUKgLw4xvJyZds5o/RkSmyM+8JjCw/TVsqGuE1J3pRGCXzXDuQEZ -BLV17sB3CCrTL6H/vgQ0nUZ8tcFacspEBNpDdkSL+UOOTPO0IwdBeAjWvuea2Nq9 -u8Om5xctNr1xGAmCI29fF2d8/EJr3Lz6Uo4ZJhz08EADxToSIKG4dkLhGbRHsAQa -JpUnajfxTMofXQxUXFb5xuT/dUsGEFlznLRvzHG6Kz9BnzWDHOgn2eKUOMadRnrZ -deDQvtHGCOIaKWZ0fqw8/3rGSCTyCYKVsgzUkRdRnobKcIqgRHdHSJjvOOHkzfsS -HwKCAQEAohZj5xG6akFkCVZEi+nQMZOhZszYaG4WLc93zcOKJ8EawoM1uqqc9DPH -LS6qYanyiWUcVcMO31mWFYTSNwhrg9vzKa+2hHfqEa0arWDROCBPL44md64enbun -OpJ3/xW8ivFAShqDmnMsw8KgmeJFOLvw2GDrQ6QwnPTJyes/v0vIIrQ6Ml62gMxk -tK8TL1cTuhXcvjmXWtsWVyFrpEVoYpr0ClYeXsTSlxtJatSy1PDzLAfcOPeqBWIN -qb4O6IN7cFp7AJCUSVUEdDLFtFVYgPqbmMh5HhGDMh5t32Dk9rtD8YC3aoBERYJi -Mu9SY0QgZte0pj/DRYIKqWA7JQ88Lw== ------END PRIVATE KEY----- diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/generate.sh b/hadoop-ozone/integration-test/src/test/resources/ssl/generate.sh deleted file mode 100755 index 5eb5ff23a74..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/ssl/generate.sh +++ /dev/null @@ -1,34 +0,0 @@ -# Changes these CN's to match your hosts in your environment if needed. -SERVER_CN=localhost -# Used when doing mutual TLS -CLIENT_CN=localhost - -echo Generate CA key: -openssl genrsa -passout pass:1111 -des3 -out ca.key 4096 -echo Generate CA certificate: -# Generates ca.crt which is the trustCertCollectionFile -openssl req -passin pass:1111 -new -x509 -days 365 -key ca.key -out ca.crt -subj "/CN=${SERVER_CN}" -echo Generate server key: -openssl genrsa -passout pass:1111 -des3 -out server.key 4096 -echo Generate server signing request: -openssl req -passin pass:1111 -new -key server.key -out server.csr -subj "/CN=${SERVER_CN}" -echo Self-signed server certificate: -# Generates server.crt which is the certChainFile for the server -openssl x509 -req -passin pass:1111 -days 365 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt -echo Remove passphrase from server key: -openssl rsa -passin pass:1111 -in server.key -out server.key -echo Generate client key -openssl genrsa -passout pass:1111 -des3 -out client.key 4096 -echo Generate client signing request: -openssl req -passin pass:1111 -new -key client.key -out client.csr -subj "/CN=${CLIENT_CN}" -echo Self-signed client certificate: -# Generates client.crt which is the clientCertChainFile for the client (need for mutual TLS only) -openssl x509 -passin pass:1111 -req -days 365 -in client.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out client.crt -echo Remove passphrase from client key: -openssl rsa -passin pass:1111 -in client.key -out client.key -echo Converting the private keys to X.509: -# Generates client.pem which is the clientPrivateKeyFile for the Client (needed for mutual TLS only) -openssl pkcs8 -topk8 -nocrypt -in client.key -out client.pem -# Generates server.pem which is the privateKeyFile for the Server -openssl pkcs8 -topk8 -nocrypt -in server.key -out server.pem - diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/server.crt b/hadoop-ozone/integration-test/src/test/resources/ssl/server.crt deleted file mode 100644 index 88757acd612..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/ssl/server.crt +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEnDCCAoQCAQEwDQYJKoZIhvcNAQEFBQAwFDESMBAGA1UEAwwJbG9jYWxob3N0 -MB4XDTE4MTIwNjA2NTgzNVoXDTE5MTIwNjA2NTgzNVowFDESMBAGA1UEAwwJbG9j -YWxob3N0MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxXU3AclIWZJm -iqNfUaurfSrgUFbbCSmhce1R0lVIKqscCJUpE1XNTWVXVCY7FWt9S1bJwFIzOYqN -zEqaGDIiemxfHyN87mz1pKZsIS8tL6+TBxcVsDQ5tMBXT8+jnOTvffCYeSDPL4tr -iTqmXvT3Qhc1YbjK9MbMIf+sXRWKzg9OADAVvO7GQCEiLtlfBRUIwXHAyRMtclHR -x8r6VrRZH3tHTr2ruixpFIH5/Ak+s8Wq+nNsqWGiMj15wlDG+pPiRzTcnJNoH4zs -R1D2trv2Qd87dxf/j0e441XyT0PEuqMmrXUKWy3JvF849EO4yNrns1LTYgnHawgu -9ahStUqaPYnE8dcR8GnDZoJHQ8BtQt3/X8F5LnoVxZPn89jdKPBY4foQ/XE7kwlO -U7JE6FATwsdUPwq2WgmPDqlVe2cvvCxyp6ZBQrM3EpLSew57oJZzDU5T8jqLkwqV -7pJyxYtz2sETsbeq7SJhS66pkP64/A4L03/gVh+OYlGJJqbX1GwYLbzexZAv166+ -eVK1IbFDhYCGdqinJfCfgCrAPnnRhuSAWvXLvaYJCHQIiu10umebLoaLjBjg/z2v -tXyl+sX7Lx127JgDXJUsiWSKVKCbGVd+d5e9cxdngWhnq0/cYgUKSwKZcA4eZ4CX -yA+8O4bUdhPZNfbGvuHCSdvMv6cgmT0CAwEAATANBgkqhkiG9w0BAQUFAAOCAgEA -J0VSeWd8nScizyFr74hCFcwRtdtTaPtkHaHTPpBVrGl7Wygsajao5LS3dBZt7h4S -uq4fVH2vPjjbPrdWbQZ0wmCzqaiGy75ZAglwIReosazXCBaaxYpWDZxOcgl/CCdr -1A3Ls84QzDGYGsVNlEhvyEkjWOw1urAqC49aKZdSle4Z0pagfHn9Bg0zjLyHTvS2 -BxWDUCEJmaNf7NwO2PFL5lAaA62rQyWK7VQkOsFPKjb9lY2/+R6AZnB/dLyWgFaY -wqbdzjjFkQRdjJPnf2azfh7Td+Z02H/b5h+B7KK1VDHv4R6INSlaci8SoUital8B -UtAhKjzbI+4MCx12zPPf5sp/g9jxKopnpNsKBrTdwe/h6iJ9mpOhAMgpXAxMKftA -EHoI1bnyRVUcbQPUFGQYecT7bRqANhZLB5ysUenk09jNQRFcWXl9MJhLjq8LvO/w -DXvQKVLEDQs9idJpuf9wjAIow0QxLE7zsAY6ZKFXiYas60cKcH6BtLc0eGxvgF5a -42b84B28nmjVoZUJzmeKPSpxMd9o/nTXFud3jbUBdXfaoNvqZIdNmKPvytbcKTil -4QVjcNhQEo76YWEfkFx5ZmyvxGWwwPcOmeT87BhK7ma6s1AMi1m6/rTpsizbPiuK -ZXnEuIZagK3AHUEEAWi3ZeGvAqGPZW/jUPL4xOO296c= ------END CERTIFICATE----- diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/server.csr b/hadoop-ozone/integration-test/src/test/resources/ssl/server.csr deleted file mode 100644 index 3c1c5d619fa..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/ssl/server.csr +++ /dev/null @@ -1,26 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIEWTCCAkECAQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MIICIjANBgkqhkiG9w0B -AQEFAAOCAg8AMIICCgKCAgEAxXU3AclIWZJmiqNfUaurfSrgUFbbCSmhce1R0lVI -KqscCJUpE1XNTWVXVCY7FWt9S1bJwFIzOYqNzEqaGDIiemxfHyN87mz1pKZsIS8t -L6+TBxcVsDQ5tMBXT8+jnOTvffCYeSDPL4triTqmXvT3Qhc1YbjK9MbMIf+sXRWK -zg9OADAVvO7GQCEiLtlfBRUIwXHAyRMtclHRx8r6VrRZH3tHTr2ruixpFIH5/Ak+ -s8Wq+nNsqWGiMj15wlDG+pPiRzTcnJNoH4zsR1D2trv2Qd87dxf/j0e441XyT0PE -uqMmrXUKWy3JvF849EO4yNrns1LTYgnHawgu9ahStUqaPYnE8dcR8GnDZoJHQ8Bt -Qt3/X8F5LnoVxZPn89jdKPBY4foQ/XE7kwlOU7JE6FATwsdUPwq2WgmPDqlVe2cv -vCxyp6ZBQrM3EpLSew57oJZzDU5T8jqLkwqV7pJyxYtz2sETsbeq7SJhS66pkP64 -/A4L03/gVh+OYlGJJqbX1GwYLbzexZAv166+eVK1IbFDhYCGdqinJfCfgCrAPnnR -huSAWvXLvaYJCHQIiu10umebLoaLjBjg/z2vtXyl+sX7Lx127JgDXJUsiWSKVKCb -GVd+d5e9cxdngWhnq0/cYgUKSwKZcA4eZ4CXyA+8O4bUdhPZNfbGvuHCSdvMv6cg -mT0CAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4ICAQB5EuNt1A6Q+AO80t+08wEeV6/a -sJlLZKkEww4yMajMFo/i8zr70jW/9Garc74pzhF054XpLrTwMTLlPFfMaf3wDtMy -8v6Oh3jaroiYVLn14KbkxM2UCkwY0rh1eu0e9HVpM2763Ycc27Bgt5DQJ0h8tU/P -S+knwmEACIjimQIrqpgB2lPYU68cvCmifLjyzJ93mGdgOllKoXshv1uhGFNACBMj -xYt+bWSP+uZx/aFU0tPcXdo4b6QmlE43iLcDFduf8nSNcSldvXquXUjWvRVFMqUr -7gzmvCV9uekJHSW8ftORB3O9Q8OmBMQ0WLHexE/zcXmXNILHBMIKvYe7K5CCOU7h -6q5aBmsZkdPwVeY8FGtLShj3ljRKyxdCddN3zzouRmKWHId5QSDD4fZhyCtH4DvP -E0GLyJkZnHvQ8/HCCLEltNSjL9tXRj5aO/RqCqNAHUmhc9LcItS+wUJPVZBEo6Np -+4pSMI2Vm97wD9qV1soGz/KwpFpj69sn8klQWVAdTKJ6bCF5028sh+UT9sVynq33 -Cp7Zbg/soNAYWGVNffcz/3vCumMTRJGTDkAap0xcHlhqGo8t4OoDSRVTWoci35or -aV17gDiE5Q0s0IP/lnkoPAp45CB+GIhjuPqXPpBUOZ+4YM/furhDUoYoXaRo15Ru -75qeGYFfwO9cnTZT5Q== ------END CERTIFICATE REQUEST----- diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/server.key b/hadoop-ozone/integration-test/src/test/resources/ssl/server.key deleted file mode 100644 index de16c357487..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/ssl/server.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJJwIBAAKCAgEAxXU3AclIWZJmiqNfUaurfSrgUFbbCSmhce1R0lVIKqscCJUp -E1XNTWVXVCY7FWt9S1bJwFIzOYqNzEqaGDIiemxfHyN87mz1pKZsIS8tL6+TBxcV -sDQ5tMBXT8+jnOTvffCYeSDPL4triTqmXvT3Qhc1YbjK9MbMIf+sXRWKzg9OADAV -vO7GQCEiLtlfBRUIwXHAyRMtclHRx8r6VrRZH3tHTr2ruixpFIH5/Ak+s8Wq+nNs -qWGiMj15wlDG+pPiRzTcnJNoH4zsR1D2trv2Qd87dxf/j0e441XyT0PEuqMmrXUK -Wy3JvF849EO4yNrns1LTYgnHawgu9ahStUqaPYnE8dcR8GnDZoJHQ8BtQt3/X8F5 -LnoVxZPn89jdKPBY4foQ/XE7kwlOU7JE6FATwsdUPwq2WgmPDqlVe2cvvCxyp6ZB -QrM3EpLSew57oJZzDU5T8jqLkwqV7pJyxYtz2sETsbeq7SJhS66pkP64/A4L03/g -Vh+OYlGJJqbX1GwYLbzexZAv166+eVK1IbFDhYCGdqinJfCfgCrAPnnRhuSAWvXL -vaYJCHQIiu10umebLoaLjBjg/z2vtXyl+sX7Lx127JgDXJUsiWSKVKCbGVd+d5e9 -cxdngWhnq0/cYgUKSwKZcA4eZ4CXyA+8O4bUdhPZNfbGvuHCSdvMv6cgmT0CAwEA -AQKCAgBx2vWdzQ8vvs/rruo+cGtQoBF5oato7B1QUNQ2IMCdAc8HT+LAaGAZ+Y5S -Uj0NS86SS3fHsl4hFrhOjNGvk/D3gFeU3+Sgoik+CEwfElHOxkFT/EagNGz1wVZX -CdZAmG1TxBBW/8kXlB+soCngZQXRkQpRz7kPTTXVgNRFVC+WQ5LpXtCaAWBFCBXq -x6IXjxpeWJYeGzXATldVCcAxkIo3MeFbENjdX9AzaALaBgamqBq/kSCdxlM8/t+f -YO5q/CykfGGc0w5d6ucu9AteMKF9OBfUwvy0BFoik6NFe0ELkRmzOOKA0rUZLhrs -FcSN5FNnviFuzU60c6KIOcd/C4ZFYvFS8TZu3DX3KnwVeyOhvXXNOnHfYL1ELwmj -8K9BIkixBENkB64mq3lvf84SbflcgVsD1V+ALwF84YY+Zgq9pFrv4PxtS//1pxlD -P7/V+oS+G5RvcvcwPjug0z/EXaeyIehvJs1C/829clMvGc7+WuzxIZX26yFGESOC -z29aRUlNRDibQ0qkgS4+mCNp2xLj3PwgTBDxIxWyynOOCzQKp01vlXU5lxW1IMP9 -8JAZ75jNUJseMiCG9OnPrlRwHv9vomUihUZjwiJXLlH4DIibU3T0fGK4PAykESCM -HyUK2bx93UtJl/uTIMtoekz9pyeM+3JqHPQKfhjCdr1kZoXZAQKCAQEA+QNSx8Y5 -wnbXZSD9vdWA4n5RC2DIHSENNedx7S1ubTg++cR6Fs10dKHrhIPEybh7zmyR1uwF -/Yz8FixNXuzuCf0E3dwSxq242Ja9pKoB5rc0uKC8F+58uJl3EBeMX81KjA1K+7Xj -L2GlJdeCZm0y56JHmufR1nWd7K0J1DVNVHU5MOQt1ZQb1qenXGI67MxwgUXnzYdS -EtxOeinPUMsaZGM2ZwLfCVt+heOCcKJht9WAu+kstm9rD5ArEy8EB0yYBoEp1WSH -KCN0K2sAc0brHrCtfXtfImqAZUzD1+FCuuoukvdsbL03e7PeTlSuSjlKgWjxY/Sc -ND6zy/iRayAMwQKCAQEAyv+RWDhirgODp7ufmBPFlDzF5dPUek/kMGk754hJ9S06 -eLnFAsR23jddZGSzH2nOIaSfJ/5mWRqJMFIXXqlU/juJWhyeBr7DPvx4i0wxJdks -2Z35LQkCeZqmaNwrxIy71lrZ7qlElOquj864mZiZp6WIrgQF68HIIzDqZOM2VGZJ -RzZTmw21aNjzWBl7LdEFKMQhMJGmknH2YN7INk8pgyQz2u5MUoCYeMh7hVm11Kdz -q7L8Ixc/ZRRFTgVD2wXF76tyD2OjZJt9iCHCIvyPh59upt/Ie/vYQHULHjKsPYy6 -ijHKDWyg/oaDBdY/JYYwU+ThLRnvVwsJpO3Jf68ffQKCAQAgTs0TvGVMFM03gsNJ -OQVC3a64MjNkjCBBqSi/5BAavZx2HYbVpIyCgWukQtBqd7QggTee0fqo/fzLB652 -LXlo9FoISwBopKuB9nTeg2xBue1uMvSUik3GSasH/HYrC+CrMSJUbDHwuNOLiF2T -2oErSoPN1lwEXjhCN+U5kjzZQ2hLLp+/wTqnbBMrylbo2FGUhDRiFzeP2OOZuAj8 -640eDz1Eujuj5CoTRwRqhrb0+g980fEKLoSOfV8JWyVDqS1kUqfR1vwuOgNdisGB -M2dYEQZBbJtYRMcp3X7faIuW4sFuMgnwRdCIDTs/oH8IhExlY+9Fz7vgj24Wfcao -Rn1BAoIBAD5kZJjX48SWQe3Y5gmI8i5Iq46jF+hsC7excH8OTaT0vMcEWgAqwFo2 -bBcCOGfMTlXa0iwpre1vEYFvic1HgF8Pj3zJ1Ow/z6TZVneB+I0offd47XAhF8im -dsU9/pnPo6ATlm4bSn/2zaZXpDdZRsjXQPYzOFqo2cmvLCvMBhPUyGsB0JqUkRBj -tg967Xg8iThpZ8YUzjyumEpXzvOaSykKhIGiwoSND8/31rc6xn9Q5GV+gq6KY6q+ -mzqKtbtov9iVOl5ugnbWr7OapJ+6PqcxooHZwDYTRvkwwDUM4BGe4mq9ONv9alIw -p66wlgIDh3ERpQAGu6BmPRWbHFaJTcUCggEASrbK3C2se9fB8WGxfQJ6BLKrpuQF -GQTWmNnj/Ie6Y2LZsM6cE9tNxVXJO2RZMmJSrSBOTmCf8CuUkZsI1aH1YncAvpWv -C5aelEEGfX5cuRTVGHgMsyMxseghES+eKbUqgEbTYYv7363aSNsAkd/iPScGKVX1 -NQXe3yxXHuCiDfbwasZoCWn6fP9wPtQITVC3scYk5OMne6NwNXePlnBufg69UdTC -2ygG93nOAgJ0AI2Q0Nx4bagIpEOGzOGEGwoYuSmq1LSx/Bno4vO2BlaLJVH6Zwhg -m7aD2YwJSIotcF0zzfT7bbBIYxZflQYaYfE8b2sEwy3rYQLKD4wdZ0Qr3w== ------END RSA PRIVATE KEY----- diff --git a/hadoop-ozone/integration-test/src/test/resources/ssl/server.pem b/hadoop-ozone/integration-test/src/test/resources/ssl/server.pem deleted file mode 100644 index e3ca684c895..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/ssl/server.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQDFdTcByUhZkmaK -o19Rq6t9KuBQVtsJKaFx7VHSVUgqqxwIlSkTVc1NZVdUJjsVa31LVsnAUjM5io3M -SpoYMiJ6bF8fI3zubPWkpmwhLy0vr5MHFxWwNDm0wFdPz6Oc5O998Jh5IM8vi2uJ -OqZe9PdCFzVhuMr0xswh/6xdFYrOD04AMBW87sZAISIu2V8FFQjBccDJEy1yUdHH -yvpWtFkfe0dOvau6LGkUgfn8CT6zxar6c2ypYaIyPXnCUMb6k+JHNNyck2gfjOxH -UPa2u/ZB3zt3F/+PR7jjVfJPQ8S6oyatdQpbLcm8Xzj0Q7jI2uezUtNiCcdrCC71 -qFK1Spo9icTx1xHwacNmgkdDwG1C3f9fwXkuehXFk+fz2N0o8Fjh+hD9cTuTCU5T -skToUBPCx1Q/CrZaCY8OqVV7Zy+8LHKnpkFCszcSktJ7DnuglnMNTlPyOouTCpXu -knLFi3PawROxt6rtImFLrqmQ/rj8DgvTf+BWH45iUYkmptfUbBgtvN7FkC/Xrr55 -UrUhsUOFgIZ2qKcl8J+AKsA+edGG5IBa9cu9pgkIdAiK7XS6Z5suhouMGOD/Pa+1 -fKX6xfsvHXbsmANclSyJZIpUoJsZV353l71zF2eBaGerT9xiBQpLAplwDh5ngJfI -D7w7htR2E9k19sa+4cJJ28y/pyCZPQIDAQABAoICAHHa9Z3NDy++z+uu6j5wa1Cg -EXmhq2jsHVBQ1DYgwJ0BzwdP4sBoYBn5jlJSPQ1LzpJLd8eyXiEWuE6M0a+T8PeA -V5Tf5KCiKT4ITB8SUc7GQVP8RqA0bPXBVlcJ1kCYbVPEEFb/yReUH6ygKeBlBdGR -ClHPuQ9NNdWA1EVUL5ZDkule0JoBYEUIFerHohePGl5Ylh4bNcBOV1UJwDGQijcx -4VsQ2N1f0DNoAtoGBqaoGr+RIJ3GUzz+359g7mr8LKR8YZzTDl3q5y70C14woX04 -F9TC/LQEWiKTo0V7QQuRGbM44oDStRkuGuwVxI3kU2e+IW7NTrRzoog5x38LhkVi -8VLxNm7cNfcqfBV7I6G9dc06cd9gvUQvCaPwr0EiSLEEQ2QHriareW9/zhJt+VyB -WwPVX4AvAXzhhj5mCr2kWu/g/G1L//WnGUM/v9X6hL4blG9y9zA+O6DTP8Rdp7Ih -6G8mzUL/zb1yUy8Zzv5a7PEhlfbrIUYRI4LPb1pFSU1EOJtDSqSBLj6YI2nbEuPc -/CBMEPEjFbLKc44LNAqnTW+VdTmXFbUgw/3wkBnvmM1Qmx4yIIb06c+uVHAe/2+i -ZSKFRmPCIlcuUfgMiJtTdPR8Yrg8DKQRIIwfJQrZvH3dS0mX+5Mgy2h6TP2nJ4z7 -cmoc9Ap+GMJ2vWRmhdkBAoIBAQD5A1LHxjnCdtdlIP291YDiflELYMgdIQ0153Ht -LW5tOD75xHoWzXR0oeuEg8TJuHvObJHW7AX9jPwWLE1e7O4J/QTd3BLGrbjYlr2k -qgHmtzS4oLwX7ny4mXcQF4xfzUqMDUr7teMvYaUl14JmbTLnokea59HWdZ3srQnU -NU1UdTkw5C3VlBvWp6dcYjrszHCBRefNh1IS3E56Kc9QyxpkYzZnAt8JW36F44Jw -omG31YC76Sy2b2sPkCsTLwQHTJgGgSnVZIcoI3QrawBzRusesK19e18iaoBlTMPX -4UK66i6S92xsvTd7s95OVK5KOUqBaPFj9Jw0PrPL+JFrIAzBAoIBAQDK/5FYOGKu -A4Onu5+YE8WUPMXl09R6T+QwaTvniEn1LTp4ucUCxHbeN11kZLMfac4hpJ8n/mZZ -GokwUhdeqVT+O4laHJ4GvsM+/HiLTDEl2SzZnfktCQJ5mqZo3CvEjLvWWtnuqUSU -6q6PzriZmJmnpYiuBAXrwcgjMOpk4zZUZklHNlObDbVo2PNYGXst0QUoxCEwkaaS -cfZg3sg2TymDJDPa7kxSgJh4yHuFWbXUp3OrsvwjFz9lFEVOBUPbBcXvq3IPY6Nk -m32IIcIi/I+Hn26m38h7+9hAdQseMqw9jLqKMcoNbKD+hoMF1j8lhjBT5OEtGe9X -Cwmk7cl/rx99AoIBACBOzRO8ZUwUzTeCw0k5BULdrrgyM2SMIEGpKL/kEBq9nHYd -htWkjIKBa6RC0Gp3tCCBN57R+qj9/MsHrnYteWj0WghLAGikq4H2dN6DbEG57W4y -9JSKTcZJqwf8disL4KsxIlRsMfC404uIXZPagStKg83WXAReOEI35TmSPNlDaEsu -n7/BOqdsEyvKVujYUZSENGIXN4/Y45m4CPzrjR4PPUS6O6PkKhNHBGqGtvT6D3zR -8QouhI59XwlbJUOpLWRSp9HW/C46A12KwYEzZ1gRBkFsm1hExyndft9oi5biwW4y -CfBF0IgNOz+gfwiETGVj70XPu+CPbhZ9xqhGfUECggEAPmRkmNfjxJZB7djmCYjy -LkirjqMX6GwLt7Fwfw5NpPS8xwRaACrAWjZsFwI4Z8xOVdrSLCmt7W8RgW+JzUeA -Xw+PfMnU7D/PpNlWd4H4jSh993jtcCEXyKZ2xT3+mc+joBOWbhtKf/bNplekN1lG -yNdA9jM4WqjZya8sK8wGE9TIawHQmpSREGO2D3rteDyJOGlnxhTOPK6YSlfO85pL -KQqEgaLChI0Pz/fWtzrGf1DkZX6Cropjqr6bOoq1u2i/2JU6Xm6Cdtavs5qkn7o+ -pzGigdnANhNG+TDANQzgEZ7iar042/1qUjCnrrCWAgOHcRGlAAa7oGY9FZscVolN -xQKCAQBKtsrcLax718HxYbF9AnoEsqum5AUZBNaY2eP8h7pjYtmwzpwT203FVck7 -ZFkyYlKtIE5OYJ/wK5SRmwjVofVidwC+la8Llp6UQQZ9fly5FNUYeAyzIzGx6CER -L54ptSqARtNhi/vfrdpI2wCR3+I9JwYpVfU1Bd7fLFce4KIN9vBqxmgJafp8/3A+ -1AhNULexxiTk4yd7o3A1d4+WcG5+Dr1R1MLbKAb3ec4CAnQAjZDQ3HhtqAikQ4bM -4YQbChi5KarUtLH8Geji87YGVoslUfpnCGCbtoPZjAlIii1wXTPN9PttsEhjFl+V -Bhph8TxvawTDLethAsoPjB1nRCvf ------END PRIVATE KEY----- diff --git a/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep b/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep deleted file mode 100644 index 09697dce6e1..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/hadoop-ozone/integration-test/src/test/resources/webapps/scm/.gitkeep b/hadoop-ozone/integration-test/src/test/resources/webapps/scm/.gitkeep deleted file mode 100644 index 09697dce6e1..00000000000 --- a/hadoop-ozone/integration-test/src/test/resources/webapps/scm/.gitkeep +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml deleted file mode 100644 index 653209b0fa5..00000000000 --- a/hadoop-ozone/ozone-manager/pom.xml +++ /dev/null @@ -1,135 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-ozone-manager - 0.5.0-SNAPSHOT - Apache Hadoop Ozone Manager Server - Apache Hadoop Ozone Manager Server - jar - - - - - org.apache.hadoop - hadoop-ozone-common - - - - org.apache.hadoop - hadoop-ozone-client - - - - org.apache.hadoop - hadoop-hdds-docs - - - - org.bouncycastle - bcprov-jdk15on - - - - org.mockito - mockito-core - 2.28.2 - test - - - com.github.spotbugs - spotbugs - provided - - - org.apache.hadoop - hadoop-common - test - test-jar - - - - org.apache.hadoop - hadoop-hdds-server-scm - test - test-jar - - - - junit - junit - test - - - - org.jmockit - jmockit - 1.24 - test - - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - copy-common-html - prepare-package - - unpack - - - - - org.apache.hadoop - hadoop-hdds-server-framework - ${project.build.outputDirectory} - - webapps/static/**/*.* - - - org.apache.hadoop - hadoop-hdds-docs - ${project.build.outputDirectory}/webapps/ozoneManager - docs/**/*.* - - - true - - - - - - - - ${basedir}/../../hadoop-hdds/common/src/main/resources - - - ${basedir}/src/test/resources - - - - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java deleted file mode 100644 index 595ea43df0e..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; - -import java.io.IOException; -import java.util.List; - -/** - * BucketManager handles all the bucket level operations. - */ -public interface BucketManager extends IOzoneAcl { - /** - * Creates a bucket. - * @param bucketInfo - OmBucketInfo for creating bucket. - */ - void createBucket(OmBucketInfo bucketInfo) throws IOException; - - - /** - * Returns Bucket Information. - * @param volumeName - Name of the Volume. - * @param bucketName - Name of the Bucket. - */ - OmBucketInfo getBucketInfo(String volumeName, String bucketName) - throws IOException; - - /** - * Sets bucket property from args. - * @param args - BucketArgs. - * @throws IOException - */ - void setBucketProperty(OmBucketArgs args) throws IOException; - - /** - * Deletes an existing empty bucket from volume. - * @param volumeName - Name of the volume. - * @param bucketName - Name of the bucket. - * @throws IOException - */ - void deleteBucket(String volumeName, String bucketName) throws IOException; - - /** - * Returns a list of buckets represented by {@link OmBucketInfo} - * in the given volume. - * - * @param volumeName - * Required parameter volume name determines buckets in which volume - * to return. - * @param startBucket - * Optional start bucket name parameter indicating where to start - * the bucket listing from, this key is excluded from the result. - * @param bucketPrefix - * Optional start key parameter, restricting the response to buckets - * that begin with the specified name. - * @param maxNumOfBuckets - * The maximum number of buckets to return. It ensures - * the size of the result will not exceed this limit. - * @return a list of buckets. - * @throws IOException - */ - List listBuckets(String volumeName, - String startBucket, String bucketPrefix, int maxNumOfBuckets) - throws IOException; - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java deleted file mode 100644 index d64eae4e6e4..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ /dev/null @@ -1,590 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; - -import org.apache.hadoop.crypto.CipherSuite; -import org.apache.hadoop.crypto.CryptoProtocolVersion; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.RequestContext; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; - -import com.google.common.base.Preconditions; -import org.iq80.leveldb.DBException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; - -/** - * OM bucket manager. - */ -public class BucketManagerImpl implements BucketManager { - private static final Logger LOG = - LoggerFactory.getLogger(BucketManagerImpl.class); - - /** - * OMMetadataManager is used for accessing OM MetadataDB and ReadWriteLock. - */ - private final OMMetadataManager metadataManager; - private final KeyProviderCryptoExtension kmsProvider; - - /** - * Constructs BucketManager. - * - * @param metadataManager - */ - public BucketManagerImpl(OMMetadataManager metadataManager) { - this(metadataManager, null, false); - } - - public BucketManagerImpl(OMMetadataManager metadataManager, - KeyProviderCryptoExtension kmsProvider) { - this(metadataManager, kmsProvider, false); - } - - public BucketManagerImpl(OMMetadataManager metadataManager, - KeyProviderCryptoExtension kmsProvider, boolean isRatisEnabled) { - this.metadataManager = metadataManager; - this.kmsProvider = kmsProvider; - } - - KeyProviderCryptoExtension getKMSProvider() { - return kmsProvider; - } - - /** - * MetadataDB is maintained in MetadataManager and shared between - * BucketManager and VolumeManager. (and also by BlockManager) - * - * BucketManager uses MetadataDB to store bucket level information. - * - * Keys used in BucketManager for storing data into MetadataDB - * for BucketInfo: - * {volume/bucket} -> bucketInfo - * - * Work flow of create bucket: - * - * -> Check if the Volume exists in metadataDB, if not throw - * VolumeNotFoundException. - * -> Else check if the Bucket exists in metadataDB, if so throw - * BucketExistException - * -> Else update MetadataDB with VolumeInfo. - */ - - /** - * Creates a bucket. - * - * @param bucketInfo - OmBucketInfo. - */ - @Override - public void createBucket(OmBucketInfo bucketInfo) throws IOException { - Preconditions.checkNotNull(bucketInfo); - String volumeName = bucketInfo.getVolumeName(); - String bucketName = bucketInfo.getBucketName(); - boolean acquiredBucketLock = false; - metadataManager.getLock().acquireLock(VOLUME_LOCK, volumeName); - try { - acquiredBucketLock = metadataManager.getLock().acquireLock(BUCKET_LOCK, - volumeName, bucketName); - String volumeKey = metadataManager.getVolumeKey(volumeName); - String bucketKey = metadataManager.getBucketKey(volumeName, bucketName); - OmVolumeArgs volumeArgs = metadataManager.getVolumeTable().get(volumeKey); - - //Check if the volume exists - if (volumeArgs == null) { - LOG.debug("volume: {} not found ", volumeName); - throw new OMException("Volume doesn't exist", - OMException.ResultCodes.VOLUME_NOT_FOUND); - } - //Check if bucket already exists - if (metadataManager.getBucketTable().get(bucketKey) != null) { - LOG.debug("bucket: {} already exists ", bucketName); - throw new OMException("Bucket already exist", - OMException.ResultCodes.BUCKET_ALREADY_EXISTS); - } - BucketEncryptionKeyInfo bek = bucketInfo.getEncryptionKeyInfo(); - BucketEncryptionKeyInfo.Builder bekb = null; - if (bek != null) { - if (kmsProvider == null) { - throw new OMException("Invalid KMS provider, check configuration " + - CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH, - OMException.ResultCodes.INVALID_KMS_PROVIDER); - } - if (bek.getKeyName() == null) { - throw new OMException("Bucket encryption key needed.", OMException - .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); - } - // Talk to KMS to retrieve the bucket encryption key info. - KeyProvider.Metadata metadata = getKMSProvider().getMetadata( - bek.getKeyName()); - if (metadata == null) { - throw new OMException("Bucket encryption key " + bek.getKeyName() - + " doesn't exist.", - OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); - } - // If the provider supports pool for EDEKs, this will fill in the pool - kmsProvider.warmUpEncryptedKeys(bek.getKeyName()); - bekb = new BucketEncryptionKeyInfo.Builder() - .setKeyName(bek.getKeyName()) - .setVersion(CryptoProtocolVersion.ENCRYPTION_ZONES) - .setSuite(CipherSuite.convert(metadata.getCipher())); - } - List acls = new ArrayList<>(); - acls.addAll(bucketInfo.getAcls()); - volumeArgs.getAclMap().getDefaultAclList().forEach( - a -> acls.add(OzoneAcl.fromProtobufWithAccessType(a))); - - OmBucketInfo.Builder omBucketInfoBuilder = OmBucketInfo.newBuilder() - .setVolumeName(bucketInfo.getVolumeName()) - .setBucketName(bucketInfo.getBucketName()) - .setAcls(acls) - .setStorageType(bucketInfo.getStorageType()) - .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) - .setCreationTime(Time.now()) - .addAllMetadata(bucketInfo.getMetadata()); - - if (bekb != null) { - omBucketInfoBuilder.setBucketEncryptionKey(bekb.build()); - } - - OmBucketInfo omBucketInfo = omBucketInfoBuilder.build(); - commitBucketInfoToDB(omBucketInfo); - LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName); - } catch (IOException | DBException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Bucket creation failed for bucket:{} in volume:{}", - bucketName, volumeName, ex); - } - throw ex; - } finally { - if (acquiredBucketLock) { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, - bucketName); - } - metadataManager.getLock().releaseLock(VOLUME_LOCK, volumeName); - } - } - - private void commitBucketInfoToDB(OmBucketInfo omBucketInfo) - throws IOException { - String dbBucketKey = - metadataManager.getBucketKey(omBucketInfo.getVolumeName(), - omBucketInfo.getBucketName()); - metadataManager.getBucketTable().put(dbBucketKey, - omBucketInfo); - } - - /** - * Returns Bucket Information. - * - * @param volumeName - Name of the Volume. - * @param bucketName - Name of the Bucket. - */ - @Override - public OmBucketInfo getBucketInfo(String volumeName, String bucketName) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, - bucketName); - try { - String bucketKey = metadataManager.getBucketKey(volumeName, bucketName); - OmBucketInfo value = metadataManager.getBucketTable().get(bucketKey); - if (value == null) { - LOG.debug("bucket: {} not found in volume: {}.", bucketName, - volumeName); - throw new OMException("Bucket not found", - BUCKET_NOT_FOUND); - } - return value; - } catch (IOException | DBException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Exception while getting bucket info for bucket: {}", - bucketName, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - /** - * Sets bucket property from args. - * - * @param args - BucketArgs. - * @throws IOException - On Failure. - */ - @Override - public void setBucketProperty(OmBucketArgs args) throws IOException { - Preconditions.checkNotNull(args); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); - try { - String bucketKey = metadataManager.getBucketKey(volumeName, bucketName); - OmBucketInfo oldBucketInfo = - metadataManager.getBucketTable().get(bucketKey); - //Check if bucket exist - if (oldBucketInfo == null) { - LOG.debug("bucket: {} not found ", bucketName); - throw new OMException("Bucket doesn't exist", - BUCKET_NOT_FOUND); - } - OmBucketInfo.Builder bucketInfoBuilder = OmBucketInfo.newBuilder(); - bucketInfoBuilder.setVolumeName(oldBucketInfo.getVolumeName()) - .setBucketName(oldBucketInfo.getBucketName()); - bucketInfoBuilder.addAllMetadata(args.getMetadata()); - - //Check StorageType to update - StorageType storageType = args.getStorageType(); - if (storageType != null) { - bucketInfoBuilder.setStorageType(storageType); - LOG.debug("Updating bucket storage type for bucket: {} in volume: {}", - bucketName, volumeName); - } else { - bucketInfoBuilder.setStorageType(oldBucketInfo.getStorageType()); - } - - //Check Versioning to update - Boolean versioning = args.getIsVersionEnabled(); - if (versioning != null) { - bucketInfoBuilder.setIsVersionEnabled(versioning); - LOG.debug("Updating bucket versioning for bucket: {} in volume: {}", - bucketName, volumeName); - } else { - bucketInfoBuilder - .setIsVersionEnabled(oldBucketInfo.getIsVersionEnabled()); - } - bucketInfoBuilder.setCreationTime(oldBucketInfo.getCreationTime()); - - // Set acls from oldBucketInfo if it has any. - if (oldBucketInfo.getAcls() != null) { - bucketInfoBuilder.setAcls(oldBucketInfo.getAcls()); - } - - OmBucketInfo omBucketInfo = bucketInfoBuilder.build(); - - - commitBucketInfoToDB(omBucketInfo); - } catch (IOException | DBException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Setting bucket property failed for bucket:{} in volume:{}", - bucketName, volumeName, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - /** - * Deletes an existing empty bucket from volume. - * - * @param volumeName - Name of the volume. - * @param bucketName - Name of the bucket. - * @throws IOException - on Failure. - */ - @Override - public void deleteBucket(String volumeName, String bucketName) - throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); - try { - //Check if bucket exists - String bucketKey = metadataManager.getBucketKey(volumeName, bucketName); - if (metadataManager.getBucketTable().get(bucketKey) == null) { - LOG.debug("bucket: {} not found ", bucketName); - throw new OMException("Bucket doesn't exist", - BUCKET_NOT_FOUND); - } - //Check if bucket is empty - if (!metadataManager.isBucketEmpty(volumeName, bucketName)) { - LOG.debug("bucket: {} is not empty ", bucketName); - throw new OMException("Bucket is not empty", - OMException.ResultCodes.BUCKET_NOT_EMPTY); - } - commitDeleteBucketInfoToOMDB(bucketKey); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName, - volumeName, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - private void commitDeleteBucketInfoToOMDB(String dbBucketKey) - throws IOException { - metadataManager.getBucketTable().delete(dbBucketKey); - } - - /** - * {@inheritDoc} - */ - @Override - public List listBuckets(String volumeName, - String startBucket, String bucketPrefix, int maxNumOfBuckets) - throws IOException { - Preconditions.checkNotNull(volumeName); - return metadataManager.listBuckets( - volumeName, startBucket, bucketPrefix, maxNumOfBuckets); - - } - - /** - * Add acl for Ozone object. Return true if acl is added successfully else - * false. - * - * @param obj Ozone object for which acl should be added. - * @param acl ozone acl top be added. - * @throws IOException if there is error. - */ - @Override - public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - Objects.requireNonNull(obj); - Objects.requireNonNull(acl); - if (!obj.getResourceType().equals(OzoneObj.ResourceType.BUCKET)) { - throw new IllegalArgumentException("Unexpected argument passed to " + - "BucketManager. OzoneObj type:" + obj.getResourceType()); - } - String volume = obj.getVolumeName(); - String bucket = obj.getBucketName(); - boolean changed = false; - metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); - try { - String dbBucketKey = metadataManager.getBucketKey(volume, bucket); - OmBucketInfo bucketInfo = - metadataManager.getBucketTable().get(dbBucketKey); - if (bucketInfo == null) { - LOG.debug("Bucket:{}/{} does not exist", volume, bucket); - throw new OMException("Bucket " + bucket + " is not found", - BUCKET_NOT_FOUND); - } - - changed = bucketInfo.addAcl(acl); - if (changed) { - metadataManager.getBucketTable().put(dbBucketKey, bucketInfo); - } - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Add acl operation failed for bucket:{}/{} acl:{}", - volume, bucket, acl, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); - } - - return changed; - } - - /** - * Remove acl for Ozone object. Return true if acl is removed successfully - * else false. - * - * @param obj Ozone object. - * @param acl Ozone acl to be removed. - * @throws IOException if there is error. - */ - @Override - public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - Objects.requireNonNull(obj); - Objects.requireNonNull(acl); - if (!obj.getResourceType().equals(OzoneObj.ResourceType.BUCKET)) { - throw new IllegalArgumentException("Unexpected argument passed to " + - "BucketManager. OzoneObj type:" + obj.getResourceType()); - } - String volume = obj.getVolumeName(); - String bucket = obj.getBucketName(); - boolean removed = false; - metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); - try { - String dbBucketKey = metadataManager.getBucketKey(volume, bucket); - OmBucketInfo bucketInfo = - metadataManager.getBucketTable().get(dbBucketKey); - if (bucketInfo == null) { - LOG.debug("Bucket:{}/{} does not exist", volume, bucket); - throw new OMException("Bucket " + bucket + " is not found", - BUCKET_NOT_FOUND); - } - removed = bucketInfo.removeAcl(acl); - if (removed) { - metadataManager.getBucketTable().put(dbBucketKey, bucketInfo); - } - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Remove acl operation failed for bucket:{}/{} acl:{}", - volume, bucket, acl, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); - } - return removed; - } - - /** - * Acls to be set for given Ozone object. This operations reset ACL for given - * object to list of ACLs provided in argument. - * - * @param obj Ozone object. - * @param acls List of acls. - * @throws IOException if there is error. - */ - @Override - public boolean setAcl(OzoneObj obj, List acls) throws IOException { - Objects.requireNonNull(obj); - Objects.requireNonNull(acls); - if (!obj.getResourceType().equals(OzoneObj.ResourceType.BUCKET)) { - throw new IllegalArgumentException("Unexpected argument passed to " + - "BucketManager. OzoneObj type:" + obj.getResourceType()); - } - String volume = obj.getVolumeName(); - String bucket = obj.getBucketName(); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); - try { - String dbBucketKey = metadataManager.getBucketKey(volume, bucket); - OmBucketInfo bucketInfo = - metadataManager.getBucketTable().get(dbBucketKey); - if (bucketInfo == null) { - LOG.debug("Bucket:{}/{} does not exist", volume, bucket); - throw new OMException("Bucket " + bucket + " is not found", - BUCKET_NOT_FOUND); - } - bucketInfo.setAcls(acls); - metadataManager.getBucketTable().put(dbBucketKey, bucketInfo); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Set acl operation failed for bucket:{}/{} acl:{}", - volume, bucket, StringUtils.join(",", acls), ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); - } - return true; - } - - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @throws IOException if there is error. - */ - @Override - public List getAcl(OzoneObj obj) throws IOException { - Objects.requireNonNull(obj); - - if (!obj.getResourceType().equals(OzoneObj.ResourceType.BUCKET)) { - throw new IllegalArgumentException("Unexpected argument passed to " + - "BucketManager. OzoneObj type:" + obj.getResourceType()); - } - String volume = obj.getVolumeName(); - String bucket = obj.getBucketName(); - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket); - try { - String dbBucketKey = metadataManager.getBucketKey(volume, bucket); - OmBucketInfo bucketInfo = - metadataManager.getBucketTable().get(dbBucketKey); - if (bucketInfo == null) { - LOG.debug("Bucket:{}/{} does not exist", volume, bucket); - throw new OMException("Bucket " + bucket + " is not found", - BUCKET_NOT_FOUND); - } - return bucketInfo.getAcls(); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Get acl operation failed for bucket:{}/{} acl:{}", - volume, bucket, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volume, bucket); - } - } - - /** - * Check access for given ozoneObject. - * - * @param ozObject object for which access needs to be checked. - * @param context Context object encapsulating all user related information. - * @return true if user has access else false. - */ - @Override - public boolean checkAccess(OzoneObj ozObject, RequestContext context) - throws OMException { - Objects.requireNonNull(ozObject); - Objects.requireNonNull(context); - - String volume = ozObject.getVolumeName(); - String bucket = ozObject.getBucketName(); - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket); - try { - String dbBucketKey = metadataManager.getBucketKey(volume, bucket); - OmBucketInfo bucketInfo = - metadataManager.getBucketTable().get(dbBucketKey); - if (bucketInfo == null) { - LOG.debug("Bucket:{}/{} does not exist", volume, bucket); - throw new OMException("Bucket " + bucket + " is not found", - BUCKET_NOT_FOUND); - } - boolean hasAccess = OzoneAclUtil.checkAclRights(bucketInfo.getAcls(), - context); - if (LOG.isDebugEnabled()) { - LOG.debug("user:{} has access rights for bucket:{} :{} ", - context.getClientUgi(), ozObject.getBucketName(), hasAccess); - } - return hasAccess; - } catch (IOException ex) { - if(ex instanceof OMException) { - throw (OMException) ex; - } - LOG.error("CheckAccess operation failed for bucket:{}/{} acl:{}", - volume, bucket, ex); - throw new OMException("Check access operation failed for " + - "bucket:" + bucket, ex, INTERNAL_ERROR); - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volume, bucket); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/IOzoneAcl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/IOzoneAcl.java deleted file mode 100644 index 6162ba2700a..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/IOzoneAcl.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.RequestContext; - -import java.io.IOException; -import java.util.List; - -/** - * Interface for Ozone Acl management. - */ -public interface IOzoneAcl { - - /** - * Add acl for Ozone object. Return true if acl is added successfully else - * false. - * @param obj Ozone object for which acl should be added. - * @param acl ozone acl top be added. - * - * @throws IOException if there is error. - * */ - boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException; - - /** - * Remove acl for Ozone object. Return true if acl is removed successfully - * else false. - * @param obj Ozone object. - * @param acl Ozone acl to be removed. - * - * @throws IOException if there is error. - * */ - boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException; - - /** - * Acls to be set for given Ozone object. This operations reset ACL for - * given object to list of ACLs provided in argument. - * @param obj Ozone object. - * @param acls List of acls. - * - * @throws IOException if there is error. - * */ - boolean setAcl(OzoneObj obj, List acls) throws IOException; - - /** - * Returns list of ACLs for given Ozone object. - * @param obj Ozone object. - * - * @throws IOException if there is error. - * */ - List getAcl(OzoneObj obj) throws IOException; - - /** - * Check access for given ozoneObject. - * - * @param ozObject object for which access needs to be checked. - * @param context Context object encapsulating all user related information. - * @throws org.apache.hadoop.ozone.om.exceptions.OMException - * @return true if user has access else false. - */ - boolean checkAccess(OzoneObj ozObject, RequestContext context) - throws OMException; -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java deleted file mode 100644 index ff12123f363..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java +++ /dev/null @@ -1,256 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import com.google.protobuf.ServiceException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgeKeysRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.BackgroundService; -import org.apache.hadoop.hdds.utils.BackgroundTask; -import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; -import org.apache.hadoop.hdds.utils.BackgroundTaskResult; -import org.apache.hadoop.hdds.utils.BackgroundTaskResult.EmptyTaskResult; - -import com.google.common.annotations.VisibleForTesting; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; - -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.ratis.protocol.ClientId; -import org.rocksdb.RocksDBException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This is the background service to delete keys. Scan the metadata of om - * periodically to get the keys from DeletedTable and ask scm to delete - * metadata accordingly, if scm returns success for keys, then clean up those - * keys. - */ -public class KeyDeletingService extends BackgroundService { - private static final Logger LOG = - LoggerFactory.getLogger(KeyDeletingService.class); - - // The thread pool size for key deleting service. - private final static int KEY_DELETING_CORE_POOL_SIZE = 2; - - private final OzoneManager ozoneManager; - private final ScmBlockLocationProtocol scmClient; - private final KeyManager manager; - private ClientId clientId = ClientId.randomId(); - private final int keyLimitPerTask; - private final AtomicLong deletedKeyCount; - private final AtomicLong runCount; - - KeyDeletingService(OzoneManager ozoneManager, - ScmBlockLocationProtocol scmClient, - KeyManager manager, long serviceInterval, - long serviceTimeout, Configuration conf) { - super("KeyDeletingService", serviceInterval, TimeUnit.MILLISECONDS, - KEY_DELETING_CORE_POOL_SIZE, serviceTimeout); - this.ozoneManager = ozoneManager; - this.scmClient = scmClient; - this.manager = manager; - this.keyLimitPerTask = conf.getInt(OZONE_KEY_DELETING_LIMIT_PER_TASK, - OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT); - this.deletedKeyCount = new AtomicLong(0); - this.runCount = new AtomicLong(0); - } - - /** - * Returns the number of times this Background service has run. - * - * @return Long, run count. - */ - @VisibleForTesting - public AtomicLong getRunCount() { - return runCount; - } - - /** - * Returns the number of keys deleted by the background service. - * - * @return Long count. - */ - @VisibleForTesting - public AtomicLong getDeletedKeyCount() { - return deletedKeyCount; - } - - @Override - public BackgroundTaskQueue getTasks() { - BackgroundTaskQueue queue = new BackgroundTaskQueue(); - queue.add(new KeyDeletingTask()); - return queue; - } - - private boolean shouldRun() { - if (ozoneManager == null) { - // OzoneManager can be null for testing - return true; - } - return ozoneManager.isLeader(); - } - - private boolean isRatisEnabled() { - if (ozoneManager == null) { - return false; - } - return ozoneManager.isRatisEnabled(); - } - - /** - * A key deleting task scans OM DB and looking for a certain number of - * pending-deletion keys, sends these keys along with their associated blocks - * to SCM for deletion. Once SCM confirms keys are deleted (once SCM persisted - * the blocks info in its deletedBlockLog), it removes these keys from the - * DB. - */ - private class KeyDeletingTask implements - BackgroundTask { - - @Override - public int getPriority() { - return 0; - } - - @Override - public BackgroundTaskResult call() throws Exception { - // Check if this is the Leader OM. If not leader, no need to execute this - // task. - if (shouldRun()) { - runCount.incrementAndGet(); - try { - long startTime = Time.monotonicNow(); - List keyBlocksList = manager - .getPendingDeletionKeys(keyLimitPerTask); - if (keyBlocksList != null && keyBlocksList.size() > 0) { - List results = - scmClient.deleteKeyBlocks(keyBlocksList); - if (results != null) { - int delCount; - if (isRatisEnabled()) { - delCount = submitPurgeKeysRequest(results); - } else { - // TODO: Once HA and non-HA paths are merged, we should have - // only one code path here. Purge keys should go through an - // OMRequest model. - delCount = deleteAllKeys(results); - } - LOG.debug("Number of keys deleted: {}, elapsed time: {}ms", - delCount, Time.monotonicNow() - startTime); - deletedKeyCount.addAndGet(delCount); - } - } - } catch (IOException e) { - LOG.error("Error while running delete keys background task. Will " + - "retry at next run.", e); - } - } - // By design, no one cares about the results of this call back. - return EmptyTaskResult.newResult(); - } - - /** - * Deletes all the keys that SCM has acknowledged and queued for delete. - * - * @param results DeleteBlockGroups returned by SCM. - * @throws RocksDBException on Error. - * @throws IOException on Error - */ - private int deleteAllKeys(List results) - throws RocksDBException, IOException { - Table deletedTable = manager.getMetadataManager().getDeletedTable(); - - DBStore store = manager.getMetadataManager().getStore(); - - // Put all keys to delete in a single transaction and call for delete. - int deletedCount = 0; - try (BatchOperation writeBatch = store.initBatchOperation()) { - for (DeleteBlockGroupResult result : results) { - if (result.isSuccess()) { - // Purge key from OM DB. - deletedTable.deleteWithBatch(writeBatch, - result.getObjectKey()); - LOG.debug("Key {} deleted from OM DB", result.getObjectKey()); - deletedCount++; - } - } - // Write a single transaction for delete. - store.commitBatchOperation(writeBatch); - } - return deletedCount; - } - - /** - * Submits PurgeKeys request for the keys whose blocks have been deleted - * by SCM. - * - * @param results DeleteBlockGroups returned by SCM. - * @throws IOException on Error - */ - public int submitPurgeKeysRequest(List results) { - List purgeKeysList = new ArrayList<>(); - - // Put all keys to be purged in a list - int deletedCount = 0; - for (DeleteBlockGroupResult result : results) { - if (result.isSuccess()) { - // Add key to PurgeKeys list. - String deletedKey = result.getObjectKey(); - purgeKeysList.add(deletedKey); - LOG.debug("Key {} set to be purged from OM DB", deletedKey); - deletedCount++; - } - } - - PurgeKeysRequest purgeKeysRequest = PurgeKeysRequest.newBuilder() - .addAllKeys(purgeKeysList) - .build(); - - OMRequest omRequest = OMRequest.newBuilder() - .setCmdType(Type.PurgeKeys) - .setPurgeKeysRequest(purgeKeysRequest) - .setClientId(clientId.toString()) - .build(); - - // Submit PurgeKeys request to OM - try { - ozoneManager.getOmServerProtocol().submitRequest(null, omRequest); - } catch (ServiceException e) { - LOG.error("PurgeKey request failed. Will retry at next run."); - return 0; - } - - return deletedCount; - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java deleted file mode 100644 index c1aeaa9a39d..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ /dev/null @@ -1,250 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.fs.OzoneManagerFS; -import org.apache.hadoop.hdds.utils.BackgroundService; - -import java.io.IOException; -import java.util.List; - -/** - * Handles key level commands. - */ -public interface KeyManager extends OzoneManagerFS, IOzoneAcl { - - /** - * Start key manager. - * - * @param configuration - * @throws IOException - */ - void start(OzoneConfiguration configuration); - - /** - * Stop key manager. - */ - void stop() throws IOException; - - /** - * After calling commit, the key will be made visible. There can be multiple - * open key writes in parallel (identified by client id). The most recently - * committed one will be the one visible. - * - * @param args the key to commit. - * @param clientID the client that is committing. - * @throws IOException - */ - void commitKey(OmKeyArgs args, long clientID) throws IOException; - - /** - * A client calls this on an open key, to request to allocate a new block, - * and appended to the tail of current block list of the open client. - * - * @param args the key to append - * @param clientID the client requesting block. - * @param excludeList List of datanodes/containers to exclude during block - * allocation. - * @return the reference to the new block. - * @throws IOException - */ - OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, - ExcludeList excludeList) throws IOException; - - /** - * Given the args of a key to put, write an open key entry to meta data. - * - * In case that the container creation or key write failed on - * DistributedStorageHandler, this key's metadata will still stay in OM. - * TODO garbage collect the open keys that never get closed - * - * @param args the args of the key provided by client. - * @return a OpenKeySession instance client uses to talk to container. - * @throws IOException - */ - OpenKeySession openKey(OmKeyArgs args) throws IOException; - - /** - * Look up an existing key. Return the info of the key to client side, which - * DistributedStorageHandler will use to access the data on datanode. - * - * @param args the args of the key provided by client. - * @param clientAddress a hint to key manager, order the datanode in returned - * pipeline by distance between client and datanode. - * @return a OmKeyInfo instance client uses to talk to container. - * @throws IOException - */ - OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) throws IOException; - - /** - * Renames an existing key within a bucket. - * - * @param args the args of the key provided by client. - * @param toKeyName New name to be used for the key - * @throws IOException if specified key doesn't exist or - * some other I/O errors while renaming the key. - */ - void renameKey(OmKeyArgs args, String toKeyName) throws IOException; - - /** - * Deletes an object by an object key. The key will be immediately removed - * from OM namespace and become invisible to clients. The object data - * will be removed in async manner that might retain for some time. - * - * @param args the args of the key provided by client. - * @throws IOException if specified key doesn't exist or - * some other I/O errors while deleting an object. - */ - void deleteKey(OmKeyArgs args) throws IOException; - - /** - * Returns a list of keys represented by {@link OmKeyInfo} - * in the given bucket. - * - * @param volumeName - * the name of the volume. - * @param bucketName - * the name of the bucket. - * @param startKey - * the start key name, only the keys whose name is - * after this value will be included in the result. - * This key is excluded from the result. - * @param keyPrefix - * key name prefix, only the keys whose name has - * this prefix will be included in the result. - * @param maxKeys - * the maximum number of keys to return. It ensures - * the size of the result will not exceed this limit. - * @return a list of keys. - * @throws IOException - */ - List listKeys(String volumeName, - String bucketName, String startKey, String keyPrefix, int maxKeys) - throws IOException; - - /** - * Returns a list of pending deletion key info that ups to the given count. - * Each entry is a {@link BlockGroup}, which contains the info about the - * key name and all its associated block IDs. A pending deletion key is - * stored with #deleting# prefix in OM DB. - * - * @param count max number of keys to return. - * @return a list of {@link BlockGroup} representing keys and blocks. - * @throws IOException - */ - List getPendingDeletionKeys(int count) throws IOException; - - /** - * Returns a list of all still open key info. Which contains the info about - * the key name and all its associated block IDs. A pending open key has - * prefix #open# in OM DB. - * - * @return a list of {@link BlockGroup} representing keys and blocks. - * @throws IOException - */ - List getExpiredOpenKeys() throws IOException; - - /** - * Deletes a expired open key by its name. Called when a hanging key has been - * lingering for too long. Once called, the open key entries gets removed - * from OM mdata data. - * - * @param objectKeyName object key name with #open# prefix. - * @throws IOException if specified key doesn't exist or other I/O errors. - */ - void deleteExpiredOpenKey(String objectKeyName) throws IOException; - - /** - * Returns the metadataManager. - * @return OMMetadataManager. - */ - OMMetadataManager getMetadataManager(); - - /** - * Returns the instance of Deleting Service. - * @return Background service. - */ - BackgroundService getDeletingService(); - - - /** - * Initiate multipart upload for the specified key. - * @param keyArgs - * @return MultipartInfo - * @throws IOException - */ - OmMultipartInfo initiateMultipartUpload(OmKeyArgs keyArgs) throws IOException; - - /** - * Commit Multipart upload part file. - * @param omKeyArgs - * @param clientID - * @return OmMultipartCommitUploadPartInfo - * @throws IOException - */ - - OmMultipartCommitUploadPartInfo commitMultipartUploadPart( - OmKeyArgs omKeyArgs, long clientID) throws IOException; - - /** - * Complete Multipart upload Request. - * @param omKeyArgs - * @param multipartUploadList - * @return OmMultipartUploadCompleteInfo - * @throws IOException - */ - OmMultipartUploadCompleteInfo completeMultipartUpload(OmKeyArgs omKeyArgs, - OmMultipartUploadCompleteList multipartUploadList) throws IOException; - - /** - * Abort multipart upload request. - * @param omKeyArgs - * @throws IOException - */ - void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException; - - OmMultipartUploadList listMultipartUploads(String volumeName, - String bucketName, String prefix) throws OMException; - - /** - * Returns list of parts of a multipart upload key. - * @param volumeName - * @param bucketName - * @param keyName - * @param uploadID - * @param partNumberMarker - * @param maxParts - * @return OmMultipartUploadListParts - */ - OmMultipartUploadListParts listParts(String volumeName, String bucketName, - String keyName, String uploadID, int partNumberMarker, - int maxParts) throws IOException; -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java deleted file mode 100644 index 20b7fdfec53..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ /dev/null @@ -1,2157 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.security.GeneralSecurityException; -import java.security.PrivilegedExceptionAction; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Collections; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.TreeMap; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; -import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; -import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.hdds.utils.BackgroundService; -import org.apache.hadoop.hdds.utils.UniqueId; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.CodecRegistry; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.RDBStore; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; -import org.apache.hadoop.ozone.om.helpers.OmPartInfo; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; -import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.RequestContext; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Time; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import org.apache.commons.codec.digest.DigestUtils; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KMS_PROVIDER; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; -import static org.apache.hadoop.util.Time.monotonicNow; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Implementation of keyManager. - */ -public class KeyManagerImpl implements KeyManager { - private static final Logger LOG = - LoggerFactory.getLogger(KeyManagerImpl.class); - - /** - * A SCM block client, used to talk to SCM to allocate block during putKey. - */ - private final OzoneManager ozoneManager; - private final ScmClient scmClient; - private final OMMetadataManager metadataManager; - private final long scmBlockSize; - private final boolean useRatis; - - private final int preallocateBlocksMax; - private final String omId; - private final OzoneBlockTokenSecretManager secretManager; - private final boolean grpcBlockTokenEnabled; - - private BackgroundService keyDeletingService; - - private final KeyProviderCryptoExtension kmsProvider; - private final PrefixManager prefixManager; - - - @VisibleForTesting - public KeyManagerImpl(ScmBlockLocationProtocol scmBlockClient, - OMMetadataManager metadataManager, OzoneConfiguration conf, String omId, - OzoneBlockTokenSecretManager secretManager) { - this(null, new ScmClient(scmBlockClient, null), metadataManager, - conf, omId, secretManager, null, null); - } - - public KeyManagerImpl(OzoneManager om, ScmClient scmClient, - OzoneConfiguration conf, String omId) { - this (om, scmClient, om.getMetadataManager(), conf, omId, - om.getBlockTokenMgr(), om.getKmsProvider(), om.getPrefixManager()); - } - - @SuppressWarnings("parameternumber") - private KeyManagerImpl(OzoneManager om, ScmClient scmClient, - OMMetadataManager metadataManager, OzoneConfiguration conf, String omId, - OzoneBlockTokenSecretManager secretManager, - KeyProviderCryptoExtension kmsProvider, PrefixManager prefixManager) { - this.scmBlockSize = (long) conf - .getStorageSize(OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, - StorageUnit.BYTES); - this.useRatis = conf.getBoolean(DFS_CONTAINER_RATIS_ENABLED_KEY, - DFS_CONTAINER_RATIS_ENABLED_DEFAULT); - this.preallocateBlocksMax = conf.getInt( - OZONE_KEY_PREALLOCATION_BLOCKS_MAX, - OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT); - this.grpcBlockTokenEnabled = conf.getBoolean( - HDDS_BLOCK_TOKEN_ENABLED, - HDDS_BLOCK_TOKEN_ENABLED_DEFAULT); - - this.ozoneManager = om; - this.omId = omId; - this.scmClient = scmClient; - this.metadataManager = metadataManager; - this.prefixManager = prefixManager; - this.secretManager = secretManager; - this.kmsProvider = kmsProvider; - - } - - @Override - public void start(OzoneConfiguration configuration) { - if (keyDeletingService == null) { - long blockDeleteInterval = configuration.getTimeDuration( - OZONE_BLOCK_DELETING_SERVICE_INTERVAL, - OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - long serviceTimeout = configuration.getTimeDuration( - OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, - OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - keyDeletingService = new KeyDeletingService(ozoneManager, - scmClient.getBlockClient(), this, blockDeleteInterval, - serviceTimeout, configuration); - keyDeletingService.start(); - } - } - - KeyProviderCryptoExtension getKMSProvider() { - return kmsProvider; - } - - @Override - public void stop() throws IOException { - if (keyDeletingService != null) { - keyDeletingService.shutdown(); - keyDeletingService = null; - } - } - - private OmBucketInfo getBucketInfo(String volumeName, String bucketName) - throws IOException { - String bucketKey = metadataManager.getBucketKey(volumeName, bucketName); - return metadataManager.getBucketTable().get(bucketKey); - } - - private void validateBucket(String volumeName, String bucketName) - throws IOException { - String bucketKey = metadataManager.getBucketKey(volumeName, bucketName); - // Check if bucket exists - if (metadataManager.getBucketTable().get(bucketKey) == null) { - String volumeKey = metadataManager.getVolumeKey(volumeName); - // If the volume also does not exist, we should throw volume not found - // exception - if (metadataManager.getVolumeTable().get(volumeKey) == null) { - LOG.error("volume not found: {}", volumeName); - throw new OMException("Volume not found", - VOLUME_NOT_FOUND); - } - - // if the volume exists but bucket does not exist, throw bucket not found - // exception - LOG.error("bucket not found: {}/{} ", volumeName, bucketName); - throw new OMException("Bucket not found", - BUCKET_NOT_FOUND); - } - } - - /** - * Check S3 bucket exists or not. - * @param volumeName - * @param bucketName - * @throws IOException - */ - private OmBucketInfo validateS3Bucket(String volumeName, String bucketName) - throws IOException { - - String bucketKey = metadataManager.getBucketKey(volumeName, bucketName); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable(). - get(bucketKey); - //Check if bucket already exists - if (omBucketInfo == null) { - LOG.error("bucket not found: {}/{} ", volumeName, bucketName); - throw new OMException("Bucket not found", - BUCKET_NOT_FOUND); - } - return omBucketInfo; - } - - @Override - public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, - ExcludeList excludeList) throws IOException { - Preconditions.checkNotNull(args); - - - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - validateBucket(volumeName, bucketName); - String openKey = metadataManager.getOpenKey( - volumeName, bucketName, keyName, clientID); - - OmKeyInfo keyInfo = metadataManager.getOpenKeyTable().get(openKey); - if (keyInfo == null) { - LOG.error("Allocate block for a key not in open status in meta store" + - " /{}/{}/{} with ID {}", volumeName, bucketName, keyName, clientID); - throw new OMException("Open Key not found", - KEY_NOT_FOUND); - } - - // current version not committed, so new blocks coming now are added to - // the same version - List locationInfos = - allocateBlock(keyInfo, excludeList, scmBlockSize); - - keyInfo.appendNewBlocks(locationInfos, true); - keyInfo.updateModifcationTime(); - metadataManager.getOpenKeyTable().put(openKey, keyInfo); - - return locationInfos.get(0); - - } - - /** - * This methods avoids multiple rpc calls to SCM by allocating multiple blocks - * in one rpc call. - * @param keyInfo - key info for key to be allocated. - * @param requestedSize requested length for allocation. - * @param excludeList exclude list while allocating blocks. - * @param requestedSize requested size to be allocated. - * @return - * @throws IOException - */ - private List allocateBlock(OmKeyInfo keyInfo, - ExcludeList excludeList, long requestedSize) throws IOException { - int numBlocks = Math.min((int) ((requestedSize - 1) / scmBlockSize + 1), - preallocateBlocksMax); - List locationInfos = new ArrayList<>(numBlocks); - String remoteUser = getRemoteUser().getShortUserName(); - List allocatedBlocks; - try { - allocatedBlocks = scmClient.getBlockClient() - .allocateBlock(scmBlockSize, numBlocks, keyInfo.getType(), - keyInfo.getFactor(), omId, excludeList); - } catch (SCMException ex) { - if (ex.getResult() - .equals(SCMException.ResultCodes.SAFE_MODE_EXCEPTION)) { - throw new OMException(ex.getMessage(), ResultCodes.SCM_IN_SAFE_MODE); - } - throw ex; - } - for (AllocatedBlock allocatedBlock : allocatedBlocks) { - OmKeyLocationInfo.Builder builder = new OmKeyLocationInfo.Builder() - .setBlockID(new BlockID(allocatedBlock.getBlockID())) - .setLength(scmBlockSize) - .setOffset(0) - .setPipeline(allocatedBlock.getPipeline()); - if (grpcBlockTokenEnabled) { - builder.setToken(secretManager - .generateToken(remoteUser, allocatedBlock.getBlockID().toString(), - getAclForUser(remoteUser), scmBlockSize)); - } - locationInfos.add(builder.build()); - } - return locationInfos; - } - - /* Optimize ugi lookup for RPC operations to avoid a trip through - * UGI.getCurrentUser which is synch'ed. - */ - public static UserGroupInformation getRemoteUser() throws IOException { - UserGroupInformation ugi = Server.getRemoteUser(); - return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser(); - } - - /** - * Return acl for user. - * @param user - * - * */ - private EnumSet getAclForUser(String user) { - // TODO: Return correct acl for user. - return EnumSet.allOf(AccessModeProto.class); - } - - private EncryptedKeyVersion generateEDEK( - final String ezKeyName) throws IOException { - if (ezKeyName == null) { - return null; - } - long generateEDEKStartTime = monotonicNow(); - EncryptedKeyVersion edek = SecurityUtil.doAsLoginUser( - new PrivilegedExceptionAction() { - @Override - public EncryptedKeyVersion run() throws IOException { - try { - return getKMSProvider().generateEncryptedKey(ezKeyName); - } catch (GeneralSecurityException e) { - throw new IOException(e); - } - } - }); - long generateEDEKTime = monotonicNow() - generateEDEKStartTime; - LOG.debug("generateEDEK takes {} ms", generateEDEKTime); - Preconditions.checkNotNull(edek); - return edek; - } - - @Override - public OpenKeySession openKey(OmKeyArgs args) throws IOException { - Preconditions.checkNotNull(args); - Preconditions.checkNotNull(args.getAcls(), "Default acls " + - "should be set."); - - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - validateBucket(volumeName, bucketName); - - long currentTime = UniqueId.next(); - OmKeyInfo keyInfo; - long openVersion; - // NOTE size of a key is not a hard limit on anything, it is a value that - // client should expect, in terms of current size of key. If client sets - // a value, then this value is used, otherwise, we allocate a single - // block which is the current size, if read by the client. - final long size = args.getDataSize() > 0 ? - args.getDataSize() : scmBlockSize; - final List locations = new ArrayList<>(); - - ReplicationFactor factor = args.getFactor(); - if (factor == null) { - factor = useRatis ? ReplicationFactor.THREE : ReplicationFactor.ONE; - } - - ReplicationType type = args.getType(); - if (type == null) { - type = useRatis ? ReplicationType.RATIS : ReplicationType.STAND_ALONE; - } - - String dbKeyName = metadataManager.getOzoneKey( - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - - FileEncryptionInfo encInfo; - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); - OmBucketInfo bucketInfo; - try { - bucketInfo = getBucketInfo(volumeName, bucketName); - encInfo = getFileEncryptionInfo(bucketInfo); - keyInfo = prepareKeyInfo(args, dbKeyName, size, locations, encInfo); - } catch (OMException e) { - throw e; - } catch (IOException ex) { - LOG.error("Key open failed for volume:{} bucket:{} key:{}", - volumeName, bucketName, keyName, ex); - throw new OMException(ex.getMessage(), ResultCodes.KEY_ALLOCATION_ERROR); - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, - bucketName); - } - if (keyInfo == null) { - // the key does not exist, create a new object, the new blocks are the - // version 0 - keyInfo = createKeyInfo(args, locations, factor, type, size, - encInfo, bucketInfo); - } - openVersion = keyInfo.getLatestVersionLocations().getVersion(); - LOG.debug("Key {} allocated in volume {} bucket {}", - keyName, volumeName, bucketName); - allocateBlockInKey(keyInfo, size, currentTime); - return new OpenKeySession(currentTime, keyInfo, openVersion); - } - - private void allocateBlockInKey(OmKeyInfo keyInfo, long size, long sessionId) - throws IOException { - String openKey = metadataManager - .getOpenKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), - keyInfo.getKeyName(), sessionId); - // requested size is not required but more like a optimization: - // SCM looks at the requested, if it 0, no block will be allocated at - // the point, if client needs more blocks, client can always call - // allocateBlock. But if requested size is not 0, OM will preallocate - // some blocks and piggyback to client, to save RPC calls. - if (size > 0) { - List locationInfos = - allocateBlock(keyInfo, new ExcludeList(), size); - keyInfo.appendNewBlocks(locationInfos, true); - } - - metadataManager.getOpenKeyTable().put(openKey, keyInfo); - - } - - private OmKeyInfo prepareKeyInfo( - OmKeyArgs keyArgs, String dbKeyName, long size, - List locations, FileEncryptionInfo encInfo) - throws IOException { - OmKeyInfo keyInfo = null; - if (keyArgs.getIsMultipartKey()) { - keyInfo = prepareMultipartKeyInfo(keyArgs, size, locations, encInfo); - } else if (metadataManager.getKeyTable().isExist(dbKeyName)) { - keyInfo = metadataManager.getKeyTable().get(dbKeyName); - // the key already exist, the new blocks will be added as new version - // when locations.size = 0, the new version will have identical blocks - // as its previous version - keyInfo.addNewVersion(locations, true); - keyInfo.setDataSize(size + keyInfo.getDataSize()); - } - if(keyInfo != null) { - keyInfo.setMetadata(keyArgs.getMetadata()); - } - return keyInfo; - } - - private OmKeyInfo prepareMultipartKeyInfo(OmKeyArgs args, long size, - List locations, FileEncryptionInfo encInfo) - throws IOException { - ReplicationFactor factor; - ReplicationType type; - - Preconditions.checkArgument(args.getMultipartUploadPartNumber() > 0, - "PartNumber Should be greater than zero"); - // When key is multipart upload part key, we should take replication - // type and replication factor from original key which has done - // initiate multipart upload. If we have not found any such, we throw - // error no such multipart upload. - String uploadID = args.getMultipartUploadID(); - Preconditions.checkNotNull(uploadID); - String multipartKey = metadataManager - .getMultipartKey(args.getVolumeName(), args.getBucketName(), - args.getKeyName(), uploadID); - OmKeyInfo partKeyInfo = metadataManager.getOpenKeyTable().get( - multipartKey); - if (partKeyInfo == null) { - throw new OMException("No such Multipart upload is with specified " + - "uploadId " + uploadID, - ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } else { - factor = partKeyInfo.getFactor(); - type = partKeyInfo.getType(); - } - // For this upload part we don't need to check in KeyTable. As this - // is not an actual key, it is a part of the key. - return createKeyInfo(args, locations, factor, type, size, encInfo, - getBucketInfo(args.getVolumeName(), args.getBucketName())); - } - - /** - * Create OmKeyInfo object. - * @param keyArgs - * @param locations - * @param factor - * @param type - * @param size - * @param encInfo - * @param omBucketInfo - * @return - */ - private OmKeyInfo createKeyInfo(OmKeyArgs keyArgs, - List locations, - ReplicationFactor factor, - ReplicationType type, long size, - FileEncryptionInfo encInfo, - OmBucketInfo omBucketInfo) { - OmKeyInfo.Builder builder = new OmKeyInfo.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, locations))) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(size) - .setReplicationType(type) - .setReplicationFactor(factor) - .setFileEncryptionInfo(encInfo) - .addAllMetadata(keyArgs.getMetadata()); - builder.setAcls(getAclsForKey(keyArgs, omBucketInfo)); - - if(Boolean.valueOf(omBucketInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) { - builder.addMetadata(OzoneConsts.GDPR_FLAG, Boolean.TRUE.toString()); - } - return builder.build(); - } - - @Override - public void commitKey(OmKeyArgs args, long clientID) throws IOException { - Preconditions.checkNotNull(args); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - List locationInfoList = args.getLocationInfoList(); - String objectKey = metadataManager - .getOzoneKey(volumeName, bucketName, keyName); - String openKey = metadataManager - .getOpenKey(volumeName, bucketName, keyName, clientID); - Preconditions.checkNotNull(locationInfoList); - try { - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, - bucketName); - validateBucket(volumeName, bucketName); - OmKeyInfo keyInfo = metadataManager.getOpenKeyTable().get(openKey); - if (keyInfo == null) { - throw new OMException("Failed to commit key, as " + openKey + "entry " + - "is not found in the openKey table", KEY_NOT_FOUND); - } - keyInfo.setDataSize(args.getDataSize()); - keyInfo.setModificationTime(Time.now()); - - //update the block length for each block - keyInfo.updateLocationInfoList(locationInfoList); - metadataManager.getStore().move( - openKey, - objectKey, - keyInfo, - metadataManager.getOpenKeyTable(), - metadataManager.getKeyTable()); - } catch (OMException e) { - throw e; - } catch (IOException ex) { - LOG.error("Key commit failed for volume:{} bucket:{} key:{}", - volumeName, bucketName, keyName, ex); - throw new OMException(ex.getMessage(), - ResultCodes.KEY_ALLOCATION_ERROR); - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - @Override - public OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) - throws IOException { - Preconditions.checkNotNull(args); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, - bucketName); - try { - String keyBytes = metadataManager.getOzoneKey( - volumeName, bucketName, keyName); - OmKeyInfo value = metadataManager.getKeyTable().get(keyBytes); - if (value == null) { - LOG.debug("volume:{} bucket:{} Key:{} not found", - volumeName, bucketName, keyName); - throw new OMException("Key not found", - KEY_NOT_FOUND); - } - if (grpcBlockTokenEnabled) { - String remoteUser = getRemoteUser().getShortUserName(); - for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) { - key.getLocationList().forEach(k -> { - k.setToken(secretManager.generateToken(remoteUser, - k.getBlockID().getContainerBlockID().toString(), - getAclForUser(remoteUser), - k.getLength())); - }); - } - } - // Refresh container pipeline info from SCM - // based on OmKeyArgs.refreshPipeline flag - // 1. Client send initial read request OmKeyArgs.refreshPipeline = false - // and uses the pipeline cached in OM to access datanode - // 2. If succeeded, done. - // 3. If failed due to pipeline does not exist or invalid pipeline state - // exception, client should retry lookupKey with - // OmKeyArgs.refreshPipeline = true - if (args.getRefreshPipeline()) { - for (OmKeyLocationInfoGroup key : value.getKeyLocationVersions()) { - key.getLocationList().forEach(k -> { - // TODO: fix Some tests that may not initialize container client - // The production should always have containerClient initialized. - if (scmClient.getContainerClient() != null) { - try { - ContainerWithPipeline cp = scmClient.getContainerClient() - .getContainerWithPipeline(k.getContainerID()); - if (!cp.getPipeline().equals(k.getPipeline())) { - k.setPipeline(cp.getPipeline()); - } - } catch (IOException e) { - LOG.error("Unable to update pipeline for container:{}", - k.getContainerID()); - } - } - }); - } - } - if (args.getSortDatanodes()) { - sortDatanodeInPipeline(value, clientAddress); - } - return value; - } catch (IOException ex) { - LOG.debug("Get key failed for volume:{} bucket:{} key:{}", - volumeName, bucketName, keyName, ex); - throw new OMException(ex.getMessage(), - KEY_NOT_FOUND); - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - @Override - public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { - Preconditions.checkNotNull(args); - Preconditions.checkNotNull(toKeyName); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String fromKeyName = args.getKeyName(); - if (toKeyName.length() == 0 || fromKeyName.length() == 0) { - LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}", - volumeName, bucketName, fromKeyName, toKeyName); - throw new OMException("Key name is empty", - ResultCodes.INVALID_KEY_NAME); - } - - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); - try { - // fromKeyName should exist - String fromKey = metadataManager.getOzoneKey( - volumeName, bucketName, fromKeyName); - OmKeyInfo fromKeyValue = metadataManager.getKeyTable().get(fromKey); - if (fromKeyValue == null) { - // TODO: Add support for renaming open key - LOG.error( - "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. " - + "Key: {} not found.", volumeName, bucketName, fromKeyName, - toKeyName, fromKeyName); - throw new OMException("Key not found", - KEY_NOT_FOUND); - } - - // A rename is a no-op if the target and source name is same. - // TODO: Discuss if we need to throw?. - if (fromKeyName.equals(toKeyName)) { - return; - } - - // toKeyName should not exist - String toKey = - metadataManager.getOzoneKey(volumeName, bucketName, toKeyName); - OmKeyInfo toKeyValue = metadataManager.getKeyTable().get(toKey); - if (toKeyValue != null) { - LOG.error( - "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. " - + "Key: {} already exists.", volumeName, bucketName, - fromKeyName, toKeyName, toKeyName); - throw new OMException("Key already exists", - OMException.ResultCodes.KEY_ALREADY_EXISTS); - } - - fromKeyValue.setKeyName(toKeyName); - fromKeyValue.updateModifcationTime(); - DBStore store = metadataManager.getStore(); - try (BatchOperation batch = store.initBatchOperation()) { - metadataManager.getKeyTable().deleteWithBatch(batch, fromKey); - metadataManager.getKeyTable().putWithBatch(batch, toKey, - fromKeyValue); - store.commitBatchOperation(batch); - } - } catch (IOException ex) { - if (ex instanceof OMException) { - throw ex; - } - LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}", - volumeName, bucketName, fromKeyName, toKeyName, ex); - throw new OMException(ex.getMessage(), - ResultCodes.KEY_RENAME_ERROR); - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - @Override - public void deleteKey(OmKeyArgs args) throws IOException { - Preconditions.checkNotNull(args); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); - try { - String objectKey = metadataManager.getOzoneKey( - volumeName, bucketName, keyName); - OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); - if (keyInfo == null) { - throw new OMException("Key not found", - KEY_NOT_FOUND); - } else { - // directly delete key with no blocks from db. This key need not be - // moved to deleted table. - if (isKeyEmpty(keyInfo)) { - metadataManager.getKeyTable().delete(objectKey); - LOG.debug("Key {} deleted from OM DB", keyName); - return; - } - } - RepeatedOmKeyInfo repeatedOmKeyInfo = - metadataManager.getDeletedTable().get(objectKey); - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(keyInfo, - repeatedOmKeyInfo); - metadataManager.getKeyTable().delete(objectKey); - metadataManager.getDeletedTable().put(objectKey, repeatedOmKeyInfo); - } catch (OMException ex) { - throw ex; - } catch (IOException ex) { - LOG.error(String.format("Delete key failed for volume:%s " - + "bucket:%s key:%s", volumeName, bucketName, keyName), ex); - throw new OMException(ex.getMessage(), ex, - ResultCodes.KEY_DELETION_ERROR); - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - private boolean isKeyEmpty(OmKeyInfo keyInfo) { - for (OmKeyLocationInfoGroup keyLocationList : keyInfo - .getKeyLocationVersions()) { - if (keyLocationList.getLocationList().size() != 0) { - return false; - } - } - return true; - } - - @Override - public List listKeys(String volumeName, String bucketName, - String startKey, String keyPrefix, - int maxKeys) throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - - // We don't take a lock in this path, since we walk the - // underlying table using an iterator. That automatically creates a - // snapshot of the data, so we don't need these locks at a higher level - // when we iterate. - return metadataManager.listKeys(volumeName, bucketName, - startKey, keyPrefix, maxKeys); - } - - @Override - public List getPendingDeletionKeys(final int count) - throws IOException { - return metadataManager.getPendingDeletionKeys(count); - } - - @Override - public List getExpiredOpenKeys() throws IOException { - return metadataManager.getExpiredOpenKeys(); - - } - - @Override - public void deleteExpiredOpenKey(String objectKeyName) throws IOException { - Preconditions.checkNotNull(objectKeyName); - // TODO: Fix this in later patches. - } - - @Override - public OMMetadataManager getMetadataManager() { - return metadataManager; - } - - @Override - public BackgroundService getDeletingService() { - return keyDeletingService; - } - - @Override - public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws - IOException { - Preconditions.checkNotNull(omKeyArgs); - String uploadID = UUID.randomUUID().toString() + "-" + UniqueId.next(); - return createMultipartInfo(omKeyArgs, uploadID); - } - - private OmMultipartInfo createMultipartInfo(OmKeyArgs keyArgs, - String multipartUploadID) throws IOException { - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); - OmBucketInfo bucketInfo = validateS3Bucket(volumeName, bucketName); - try { - - // We are adding uploadId to key, because if multiple users try to - // perform multipart upload on the same key, each will try to upload, who - // ever finally commit the key, we see that key in ozone. Suppose if we - // don't add id, and use the same key /volume/bucket/key, when multiple - // users try to upload the key, we update the parts of the key's from - // multiple users to same key, and the key output can be a mix of the - // parts from multiple users. - - // So on same key if multiple time multipart upload is initiated we - // store multiple entries in the openKey Table. - // Checked AWS S3, when we try to run multipart upload, each time a - // new uploadId is returned. - - String multipartKey = metadataManager.getMultipartKey(volumeName, - bucketName, keyName, multipartUploadID); - - // Not checking if there is an already key for this in the keyTable, as - // during final complete multipart upload we take care of this. - - - Map partKeyInfoMap = new HashMap<>(); - OmMultipartKeyInfo multipartKeyInfo = new OmMultipartKeyInfo( - multipartUploadID, partKeyInfoMap); - List locations = new ArrayList<>(); - OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setReplicationType(keyArgs.getType()) - .setReplicationFactor(keyArgs.getFactor()) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, locations))) - .setAcls(getAclsForKey(keyArgs, bucketInfo)) - .build(); - DBStore store = metadataManager.getStore(); - try (BatchOperation batch = store.initBatchOperation()) { - // Create an entry in open key table and multipart info table for - // this key. - metadataManager.getMultipartInfoTable().putWithBatch(batch, - multipartKey, multipartKeyInfo); - metadataManager.getOpenKeyTable().putWithBatch(batch, - multipartKey, omKeyInfo); - store.commitBatchOperation(batch); - return new OmMultipartInfo(volumeName, bucketName, keyName, - multipartUploadID); - } - } catch (IOException ex) { - LOG.error("Initiate Multipart upload Failed for volume:{} bucket:{} " + - "key:{}", volumeName, bucketName, keyName, ex); - throw new OMException(ex.getMessage(), - ResultCodes.INITIATE_MULTIPART_UPLOAD_ERROR); - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - private List getAclsForKey(OmKeyArgs keyArgs, - OmBucketInfo bucketInfo) { - List acls = new ArrayList<>(); - - if(keyArgs.getAcls() != null) { - acls.addAll(keyArgs.getAcls()); - } - - // Inherit DEFAULT acls from prefix. - if(prefixManager != null) { - List prefixList = prefixManager.getLongestPrefixPath( - OZONE_URI_DELIMITER + - keyArgs.getVolumeName() + OZONE_URI_DELIMITER + - keyArgs.getBucketName() + OZONE_URI_DELIMITER + - keyArgs.getKeyName()); - - if(prefixList.size() > 0) { - // Add all acls from direct parent to key. - OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1); - if(prefixInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls())) { - return acls; - } - } - } - } - - // Inherit DEFAULT acls from bucket only if DEFAULT acls for - // prefix are not set. - if (bucketInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls())) { - return acls; - } - } - - // TODO: do we need to further fallback to volume default ACL - return acls; - } - - @Override - public OmMultipartCommitUploadPartInfo commitMultipartUploadPart( - OmKeyArgs omKeyArgs, long clientID) throws IOException { - Preconditions.checkNotNull(omKeyArgs); - String volumeName = omKeyArgs.getVolumeName(); - String bucketName = omKeyArgs.getBucketName(); - String keyName = omKeyArgs.getKeyName(); - String uploadID = omKeyArgs.getMultipartUploadID(); - int partNumber = omKeyArgs.getMultipartUploadPartNumber(); - - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); - validateS3Bucket(volumeName, bucketName); - String partName; - try { - String multipartKey = metadataManager.getMultipartKey(volumeName, - bucketName, keyName, uploadID); - OmMultipartKeyInfo multipartKeyInfo = metadataManager - .getMultipartInfoTable().get(multipartKey); - - String openKey = metadataManager.getOpenKey( - volumeName, bucketName, keyName, clientID); - OmKeyInfo keyInfo = metadataManager.getOpenKeyTable().get( - openKey); - - // set the data size and location info list - keyInfo.setDataSize(omKeyArgs.getDataSize()); - keyInfo.updateLocationInfoList(omKeyArgs.getLocationInfoList()); - - partName = metadataManager.getOzoneKey(volumeName, bucketName, keyName) - + clientID; - if (multipartKeyInfo == null) { - // This can occur when user started uploading part by the time commit - // of that part happens, in between the user might have requested - // abort multipart upload. If we just throw exception, then the data - // will not be garbage collected, so move this part to delete table - // and throw error - // Move this part to delete table. - RepeatedOmKeyInfo repeatedOmKeyInfo = - metadataManager.getDeletedTable().get(partName); - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - keyInfo, repeatedOmKeyInfo); - metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo); - throw new OMException("No such Multipart upload is with specified " + - "uploadId " + uploadID, ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } else { - PartKeyInfo oldPartKeyInfo = - multipartKeyInfo.getPartKeyInfo(partNumber); - PartKeyInfo.Builder partKeyInfo = PartKeyInfo.newBuilder(); - partKeyInfo.setPartName(partName); - partKeyInfo.setPartNumber(partNumber); - partKeyInfo.setPartKeyInfo(keyInfo.getProtobuf()); - multipartKeyInfo.addPartKeyInfo(partNumber, partKeyInfo.build()); - if (oldPartKeyInfo == null) { - // This is the first time part is being added. - DBStore store = metadataManager.getStore(); - try (BatchOperation batch = store.initBatchOperation()) { - metadataManager.getOpenKeyTable().deleteWithBatch(batch, openKey); - metadataManager.getMultipartInfoTable().putWithBatch(batch, - multipartKey, multipartKeyInfo); - store.commitBatchOperation(batch); - } - } else { - // If we have this part already, that means we are overriding it. - // We need to 3 steps. - // Add the old entry to delete table. - // Remove the new entry from openKey table. - // Add the new entry in to the list of part keys. - DBStore store = metadataManager.getStore(); - try (BatchOperation batch = store.initBatchOperation()) { - OmKeyInfo partKey = OmKeyInfo.getFromProtobuf( - oldPartKeyInfo.getPartKeyInfo()); - - RepeatedOmKeyInfo repeatedOmKeyInfo = - metadataManager.getDeletedTable() - .get(oldPartKeyInfo.getPartName()); - - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - partKey, repeatedOmKeyInfo); - - metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo); - metadataManager.getDeletedTable().putWithBatch(batch, - oldPartKeyInfo.getPartName(), - repeatedOmKeyInfo); - metadataManager.getOpenKeyTable().deleteWithBatch(batch, openKey); - metadataManager.getMultipartInfoTable().putWithBatch(batch, - multipartKey, multipartKeyInfo); - store.commitBatchOperation(batch); - } - } - } - } catch (IOException ex) { - LOG.error("Upload part Failed: volume:{} bucket:{} " + - "key:{} PartNumber: {}", volumeName, bucketName, keyName, - partNumber, ex); - throw new OMException(ex.getMessage(), - ResultCodes.MULTIPART_UPLOAD_PARTFILE_ERROR); - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, - bucketName); - } - - return new OmMultipartCommitUploadPartInfo(partName); - - } - - @Override - @SuppressWarnings("methodlength") - public OmMultipartUploadCompleteInfo completeMultipartUpload( - OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList) - throws IOException { - Preconditions.checkNotNull(omKeyArgs); - Preconditions.checkNotNull(multipartUploadList); - String volumeName = omKeyArgs.getVolumeName(); - String bucketName = omKeyArgs.getBucketName(); - String keyName = omKeyArgs.getKeyName(); - String uploadID = omKeyArgs.getMultipartUploadID(); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); - validateS3Bucket(volumeName, bucketName); - try { - String multipartKey = metadataManager.getMultipartKey(volumeName, - bucketName, keyName, uploadID); - String ozoneKey = metadataManager.getOzoneKey(volumeName, bucketName, - keyName); - OmKeyInfo keyInfo = metadataManager.getKeyTable().get(ozoneKey); - - OmMultipartKeyInfo multipartKeyInfo = metadataManager - .getMultipartInfoTable().get(multipartKey); - if (multipartKeyInfo == null) { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, - ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } - TreeMap partKeyInfoMap = multipartKeyInfo - .getPartKeyInfoMap(); - - TreeMap multipartMap = multipartUploadList - .getMultipartMap(); - - // Last key in the map should be having key value as size, as map's - // are sorted. Last entry in both maps should have partNumber as size - // of the map. As we have part entries 1, 2, 3, 4 and then we get - // complete multipart upload request so the map last entry should have 4, - // if it is having value greater or less than map size, then there is - // some thing wrong throw error. - - Map.Entry multipartMapLastEntry = multipartMap - .lastEntry(); - Map.Entry partKeyInfoLastEntry = partKeyInfoMap - .lastEntry(); - if (partKeyInfoMap.size() != multipartMap.size()) { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, - ResultCodes.MISMATCH_MULTIPART_LIST); - } - - // Last entry part Number should be the size of the map, otherwise this - // means we have missing some parts but we got a complete request. - if (multipartMapLastEntry.getKey() != partKeyInfoMap.size() || - partKeyInfoLastEntry.getKey() != partKeyInfoMap.size()) { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, - ResultCodes.MISSING_UPLOAD_PARTS); - } - ReplicationType type = partKeyInfoLastEntry.getValue().getPartKeyInfo() - .getType(); - ReplicationFactor factor = partKeyInfoLastEntry.getValue() - .getPartKeyInfo().getFactor(); - List locations = new ArrayList<>(); - long size = 0; - int partsCount =1; - int partsMapSize = partKeyInfoMap.size(); - for(Map.Entry partKeyInfoEntry : partKeyInfoMap - .entrySet()) { - int partNumber = partKeyInfoEntry.getKey(); - PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue(); - // Check we have all parts to complete multipart upload and also - // check partNames provided match with actual part names - String providedPartName = multipartMap.get(partNumber); - String actualPartName = partKeyInfo.getPartName(); - if (partNumber == partsCount) { - if (!actualPartName.equals(providedPartName)) { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, - ResultCodes.MISMATCH_MULTIPART_LIST); - } - OmKeyInfo currentPartKeyInfo = OmKeyInfo - .getFromProtobuf(partKeyInfo.getPartKeyInfo()); - // Check if any part size is less than 5mb, last part can be less - // than 5 mb. - if (partsCount != partsMapSize && - currentPartKeyInfo.getDataSize() < OM_MULTIPART_MIN_SIZE) { - LOG.error("MultipartUpload: " + ozoneKey + "Part number: " + - partKeyInfo.getPartNumber() + "size " + currentPartKeyInfo - .getDataSize() + " is less than minimum part size " + - OzoneConsts.OM_MULTIPART_MIN_SIZE); - throw new OMException("Complete Multipart Upload Failed: Entity " + - "too small: volume: " + volumeName + "bucket: " + bucketName - + "key: " + keyName, ResultCodes.ENTITY_TOO_SMALL); - } - // As all part keys will have only one version. - OmKeyLocationInfoGroup currentKeyInfoGroup = currentPartKeyInfo - .getKeyLocationVersions().get(0); - locations.addAll(currentKeyInfoGroup.getLocationList()); - size += currentPartKeyInfo.getDataSize(); - } else { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, - ResultCodes.MISSING_UPLOAD_PARTS); - } - partsCount++; - } - if (keyInfo == null) { - // This is a newly added key, it does not have any versions. - OmKeyLocationInfoGroup keyLocationInfoGroup = new - OmKeyLocationInfoGroup(0, locations); - // A newly created key, this is the first version. - keyInfo = new OmKeyInfo.Builder() - .setVolumeName(omKeyArgs.getVolumeName()) - .setBucketName(omKeyArgs.getBucketName()) - .setKeyName(omKeyArgs.getKeyName()) - .setReplicationFactor(factor) - .setReplicationType(type) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(size) - .setOmKeyLocationInfos( - Collections.singletonList(keyLocationInfoGroup)) - .setAcls(omKeyArgs.getAcls()).build(); - } else { - // Already a version exists, so we should add it as a new version. - // But now as versioning is not supported, just following the commit - // key approach. When versioning support comes, then we can uncomment - // below code keyInfo.addNewVersion(locations); - keyInfo.updateLocationInfoList(locations); - } - DBStore store = metadataManager.getStore(); - try (BatchOperation batch = store.initBatchOperation()) { - //Remove entry in multipart table and add a entry in to key table - metadataManager.getMultipartInfoTable().deleteWithBatch(batch, - multipartKey); - metadataManager.getKeyTable().putWithBatch(batch, - ozoneKey, keyInfo); - metadataManager.getOpenKeyTable().deleteWithBatch(batch, multipartKey); - store.commitBatchOperation(batch); - } - return new OmMultipartUploadCompleteInfo(omKeyArgs.getVolumeName(), - omKeyArgs.getBucketName(), omKeyArgs.getKeyName(), DigestUtils - .sha256Hex(keyName)); - } catch (OMException ex) { - throw ex; - } catch (IOException ex) { - LOG.error("Complete Multipart Upload Failed: volume: " + volumeName + - "bucket: " + bucketName + "key: " + keyName, ex); - throw new OMException(ex.getMessage(), ResultCodes - .COMPLETE_MULTIPART_UPLOAD_ERROR); - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - @Override - public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException { - - Preconditions.checkNotNull(omKeyArgs); - String volumeName = omKeyArgs.getVolumeName(); - String bucketName = omKeyArgs.getBucketName(); - String keyName = omKeyArgs.getKeyName(); - String uploadID = omKeyArgs.getMultipartUploadID(); - Preconditions.checkNotNull(uploadID, "uploadID cannot be null"); - validateS3Bucket(volumeName, bucketName); - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); - OmBucketInfo bucketInfo; - try { - String multipartKey = metadataManager.getMultipartKey(volumeName, - bucketName, keyName, uploadID); - OmMultipartKeyInfo multipartKeyInfo = metadataManager - .getMultipartInfoTable().get(multipartKey); - OmKeyInfo openKeyInfo = metadataManager.getOpenKeyTable().get( - multipartKey); - - // If there is no entry in openKeyTable, then there is no multipart - // upload initiated for this key. - if (openKeyInfo == null) { - LOG.error("Abort Multipart Upload Failed: volume: " + volumeName + - "bucket: " + bucketName + "key: " + keyName + "with error no " + - "such uploadID:" + uploadID); - throw new OMException("Abort Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, - ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } else { - // Move all the parts to delete table - TreeMap partKeyInfoMap = multipartKeyInfo - .getPartKeyInfoMap(); - DBStore store = metadataManager.getStore(); - try (BatchOperation batch = store.initBatchOperation()) { - for (Map.Entry partKeyInfoEntry : partKeyInfoMap - .entrySet()) { - PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue(); - OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf( - partKeyInfo.getPartKeyInfo()); - - RepeatedOmKeyInfo repeatedOmKeyInfo = - metadataManager.getDeletedTable() - .get(partKeyInfo.getPartName()); - - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - currentKeyPartInfo, repeatedOmKeyInfo); - - metadataManager.getDeletedTable().putWithBatch(batch, - partKeyInfo.getPartName(), repeatedOmKeyInfo); - } - // Finally delete the entry from the multipart info table and open - // key table - metadataManager.getMultipartInfoTable().deleteWithBatch(batch, - multipartKey); - metadataManager.getOpenKeyTable().deleteWithBatch(batch, - multipartKey); - store.commitBatchOperation(batch); - } - } - } catch (OMException ex) { - throw ex; - } catch (IOException ex) { - LOG.error("Abort Multipart Upload Failed: volume: " + volumeName + - "bucket: " + bucketName + "key: " + keyName, ex); - throw new OMException(ex.getMessage(), ResultCodes - .ABORT_MULTIPART_UPLOAD_FAILED); - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, - bucketName); - } - - } - - @Override - public OmMultipartUploadList listMultipartUploads(String volumeName, - String bucketName, String prefix) throws OMException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, - bucketName); - try { - - List multipartUploadKeys = - metadataManager - .getMultipartUploadKeys(volumeName, bucketName, prefix); - - List collect = multipartUploadKeys.stream() - .map(OmMultipartUpload::from) - .map(upload -> { - String dbKey = metadataManager - .getOzoneKey(upload.getVolumeName(), - upload.getBucketName(), - upload.getKeyName()); - try { - Table openKeyTable = - metadataManager.getOpenKeyTable(); - - OmKeyInfo omKeyInfo = - openKeyTable.get(upload.getDbKey()); - - upload.setCreationTime( - Instant.ofEpochMilli(omKeyInfo.getCreationTime())); - - upload.setReplicationType(omKeyInfo.getType()); - upload.setReplicationFactor(omKeyInfo.getFactor()); - } catch (IOException e) { - LOG.warn( - "Open key entry for multipart upload record can be read {}", - dbKey); - } - return upload; - }) - .collect(Collectors.toList()); - - return new OmMultipartUploadList(collect); - - } catch (IOException ex) { - LOG.error("List Multipart Uploads Failed: volume: " + volumeName + - "bucket: " + bucketName + "prefix: " + prefix, ex); - throw new OMException(ex.getMessage(), ResultCodes - .LIST_MULTIPART_UPLOAD_PARTS_FAILED); - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - @Override - public OmMultipartUploadListParts listParts(String volumeName, - String bucketName, String keyName, String uploadID, - int partNumberMarker, int maxParts) throws IOException { - Preconditions.checkNotNull(volumeName); - Preconditions.checkNotNull(bucketName); - Preconditions.checkNotNull(keyName); - Preconditions.checkNotNull(uploadID); - boolean isTruncated = false; - int nextPartNumberMarker = 0; - - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, - bucketName); - try { - String multipartKey = metadataManager.getMultipartKey(volumeName, - bucketName, keyName, uploadID); - - OmMultipartKeyInfo multipartKeyInfo = - metadataManager.getMultipartInfoTable().get(multipartKey); - - if (multipartKeyInfo == null) { - throw new OMException("No Such Multipart upload exists for this key.", - ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } else { - TreeMap partKeyInfoMap = - multipartKeyInfo.getPartKeyInfoMap(); - Iterator> partKeyInfoMapIterator = - partKeyInfoMap.entrySet().iterator(); - - HddsProtos.ReplicationType replicationType = null; - HddsProtos.ReplicationFactor replicationFactor = null; - - int count = 0; - List omPartInfoList = new ArrayList<>(); - - while (count < maxParts && partKeyInfoMapIterator.hasNext()) { - Map.Entry partKeyInfoEntry = - partKeyInfoMapIterator.next(); - nextPartNumberMarker = partKeyInfoEntry.getKey(); - // As we should return only parts with part number greater - // than part number marker - if (partKeyInfoEntry.getKey() > partNumberMarker) { - PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue(); - OmPartInfo omPartInfo = new OmPartInfo(partKeyInfo.getPartNumber(), - partKeyInfo.getPartName(), - partKeyInfo.getPartKeyInfo().getModificationTime(), - partKeyInfo.getPartKeyInfo().getDataSize()); - omPartInfoList.add(omPartInfo); - - //if there are parts, use replication type from one of the parts - replicationType = partKeyInfo.getPartKeyInfo().getType(); - replicationFactor = partKeyInfo.getPartKeyInfo().getFactor(); - count++; - } - } - - if (replicationType == null) { - //if there are no parts, use the replicationType from the open key. - - OmKeyInfo omKeyInfo = - metadataManager.getOpenKeyTable().get(multipartKey); - - if (omKeyInfo == null) { - throw new IllegalStateException( - "Open key is missing for multipart upload " + multipartKey); - } - - replicationType = omKeyInfo.getType(); - replicationFactor = omKeyInfo.getFactor(); - } - Preconditions.checkNotNull(replicationType, - "Replication type can't be identified"); - Preconditions.checkNotNull(replicationFactor, - "Replication factor can't be identified"); - - if (partKeyInfoMapIterator.hasNext()) { - Map.Entry partKeyInfoEntry = - partKeyInfoMapIterator.next(); - isTruncated = true; - } else { - isTruncated = false; - nextPartNumberMarker = 0; - } - OmMultipartUploadListParts omMultipartUploadListParts = - new OmMultipartUploadListParts(replicationType, replicationFactor, - nextPartNumberMarker, isTruncated); - omMultipartUploadListParts.addPartList(omPartInfoList); - return omMultipartUploadListParts; - } - } catch (OMException ex) { - throw ex; - } catch (IOException ex){ - LOG.error( - "List Multipart Upload Parts Failed: volume: {}, bucket: {}, ,key: " - + "{} ", - volumeName, bucketName, keyName, ex); - throw new OMException(ex.getMessage(), ResultCodes - .LIST_MULTIPART_UPLOAD_PARTS_FAILED); - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - /** - * Add acl for Ozone object. Return true if acl is added successfully else - * false. - * - * @param obj Ozone object for which acl should be added. - * @param acl ozone acl top be added. - * @throws IOException if there is error. - */ - @Override - public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - validateOzoneObj(obj); - String volume = obj.getVolumeName(); - String bucket = obj.getBucketName(); - String keyName = obj.getKeyName(); - boolean changed = false; - - - metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); - try { - validateBucket(volume, bucket); - String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName); - OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); - if (keyInfo == null) { - throw new OMException("Key not found. Key:" + objectKey, KEY_NOT_FOUND); - } - - if (keyInfo.getAcls() == null) { - keyInfo.setAcls(new ArrayList<>()); - } - changed = keyInfo.addAcl(acl); - if (changed) { - metadataManager.getKeyTable().put(objectKey, keyInfo); - } - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Add acl operation failed for key:{}/{}/{}", volume, - bucket, keyName, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); - } - return changed; - } - - /** - * Remove acl for Ozone object. Return true if acl is removed successfully - * else false. - * - * @param obj Ozone object. - * @param acl Ozone acl to be removed. - * @throws IOException if there is error. - */ - @Override - public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - validateOzoneObj(obj); - String volume = obj.getVolumeName(); - String bucket = obj.getBucketName(); - String keyName = obj.getKeyName(); - boolean changed = false; - - metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); - try { - validateBucket(volume, bucket); - String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName); - OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); - if (keyInfo == null) { - throw new OMException("Key not found. Key:" + objectKey, KEY_NOT_FOUND); - } - - changed = keyInfo.removeAcl(acl); - if (changed) { - metadataManager.getKeyTable().put(objectKey, keyInfo); - } - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Remove acl operation failed for key:{}/{}/{}", volume, - bucket, keyName, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); - } - return changed; - } - - /** - * Acls to be set for given Ozone object. This operations reset ACL for given - * object to list of ACLs provided in argument. - * - * @param obj Ozone object. - * @param acls List of acls. - * @throws IOException if there is error. - */ - @Override - public boolean setAcl(OzoneObj obj, List acls) throws IOException { - validateOzoneObj(obj); - String volume = obj.getVolumeName(); - String bucket = obj.getBucketName(); - String keyName = obj.getKeyName(); - boolean changed = false; - - metadataManager.getLock().acquireLock(BUCKET_LOCK, volume, bucket); - try { - validateBucket(volume, bucket); - String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName); - OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); - if (keyInfo == null) { - throw new OMException("Key not found. Key:" + objectKey, KEY_NOT_FOUND); - } - - changed = keyInfo.setAcls(acls); - - if (changed) { - metadataManager.getKeyTable().put(objectKey, keyInfo); - } - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Set acl operation failed for key:{}/{}/{}", volume, - bucket, keyName, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volume, bucket); - } - return changed; - } - - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @throws IOException if there is error. - */ - @Override - public List getAcl(OzoneObj obj) throws IOException { - validateOzoneObj(obj); - String volume = obj.getVolumeName(); - String bucket = obj.getBucketName(); - String keyName = obj.getKeyName(); - - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket); - try { - validateBucket(volume, bucket); - String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName); - OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey); - if (keyInfo == null) { - throw new OMException("Key not found. Key:" + objectKey, KEY_NOT_FOUND); - } - - return keyInfo.getAcls(); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Get acl operation failed for key:{}/{}/{}", volume, - bucket, keyName, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volume, bucket); - } - } - - /** - * Check access for given ozoneObject. - * - * @param ozObject object for which access needs to be checked. - * @param context Context object encapsulating all user related information. - * @return true if user has access else false. - */ - @Override - public boolean checkAccess(OzoneObj ozObject, RequestContext context) - throws OMException { - Objects.requireNonNull(ozObject); - Objects.requireNonNull(context); - Objects.requireNonNull(context.getClientUgi()); - - String volume = ozObject.getVolumeName(); - String bucket = ozObject.getBucketName(); - String keyName = ozObject.getKeyName(); - String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName); - OmKeyArgs args = new OmKeyArgs.Builder() - .setVolumeName(volume) - .setBucketName(bucket) - .setKeyName(keyName) - .build(); - - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volume, bucket); - try { - validateBucket(volume, bucket); - OmKeyInfo keyInfo = null; - try { - OzoneFileStatus fileStatus = getFileStatus(args); - keyInfo = fileStatus.getKeyInfo(); - if (keyInfo == null) { - // the key does not exist, but it is a parent "dir" of some key - // let access be determined based on volume/bucket/prefix ACL - if (LOG.isDebugEnabled()) { - LOG.debug("key:{} is non-existent parent, permit access to user:{}", - keyName, context.getClientUgi()); - } - return true; - } - } catch (OMException e) { - if (e.getResult() == FILE_NOT_FOUND) { - keyInfo = metadataManager.getOpenKeyTable().get(objectKey); - } - } - - if (keyInfo == null) { - throw new OMException("Key not found, checkAccess failed. Key:" + - objectKey, KEY_NOT_FOUND); - } - - boolean hasAccess = OzoneAclUtil.checkAclRight( - keyInfo.getAcls(), context); - if (LOG.isDebugEnabled()) { - LOG.debug("user:{} has access rights for key:{} :{} ", - context.getClientUgi(), ozObject.getKeyName(), hasAccess); - } - return hasAccess; - } catch (IOException ex) { - if(ex instanceof OMException) { - throw (OMException) ex; - } - LOG.error("CheckAccess operation failed for key:{}/{}/{}", volume, - bucket, keyName, ex); - throw new OMException("Check access operation failed for " + - "key:" + keyName, ex, INTERNAL_ERROR); - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volume, bucket); - } - } - - /** - * Helper method to validate ozone object. - * @param obj - * */ - private void validateOzoneObj(OzoneObj obj) throws OMException { - Objects.requireNonNull(obj); - - if (!obj.getResourceType().equals(KEY)) { - throw new IllegalArgumentException("Unexpected argument passed to " + - "KeyManager. OzoneObj type:" + obj.getResourceType()); - } - String volume = obj.getVolumeName(); - String bucket = obj.getBucketName(); - String keyName = obj.getKeyName(); - - if (Strings.isNullOrEmpty(volume)) { - throw new OMException("Volume name is required.", VOLUME_NOT_FOUND); - } - if (Strings.isNullOrEmpty(bucket)) { - throw new OMException("Bucket name is required.", BUCKET_NOT_FOUND); - } - if (Strings.isNullOrEmpty(keyName)) { - throw new OMException("Key name is required.", KEY_NOT_FOUND); - } - } - - /** - * OzoneFS api to get file status for an entry. - * - * @param args Key args - * @throws OMException if file does not exist - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { - Preconditions.checkNotNull(args, "Key args can not be null"); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, - bucketName); - try { - // Check if this is the root of the filesystem. - if (keyName.length() == 0) { - validateBucket(volumeName, bucketName); - return new OzoneFileStatus(OZONE_URI_DELIMITER); - } - - // Check if the key is a file. - String fileKeyBytes = metadataManager.getOzoneKey( - volumeName, bucketName, keyName); - OmKeyInfo fileKeyInfo = metadataManager.getKeyTable().get(fileKeyBytes); - if (fileKeyInfo != null) { - // this is a file - return new OzoneFileStatus(fileKeyInfo, scmBlockSize, false); - } - - String dirKey = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); - String dirKeyBytes = metadataManager.getOzoneKey( - volumeName, bucketName, dirKey); - OmKeyInfo dirKeyInfo = metadataManager.getKeyTable().get(dirKeyBytes); - if (dirKeyInfo != null) { - return new OzoneFileStatus(dirKeyInfo, scmBlockSize, true); - } - - List keys = metadataManager.listKeys(volumeName, bucketName, - null, dirKey, 1); - if (keys.iterator().hasNext()) { - return new OzoneFileStatus(keyName); - } - if (LOG.isDebugEnabled()) { - LOG.debug("Unable to get file status for the key: volume: {}, bucket:" + - " {}, key: {}, with error: No such file exists.", volumeName, - bucketName, keyName); - } - throw new OMException("Unable to get file status: volume: " + - volumeName + " bucket: " + bucketName + " key: " + keyName, - FILE_NOT_FOUND); - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - /** - * Ozone FS api to create a directory. Parent directories if do not exist - * are created for the input directory. - * - * @param args Key args - * @throws OMException if any entry in the path exists as a file - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - public void createDirectory(OmKeyArgs args) throws IOException { - Preconditions.checkNotNull(args, "Key args can not be null"); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); - try { - - // Check if this is the root of the filesystem. - if (keyName.length() == 0) { - return; - } - - Path keyPath = Paths.get(keyName); - OzoneFileStatus status = - verifyNoFilesInPath(volumeName, bucketName, keyPath, false); - if (status != null && OzoneFSUtils.pathToKey(status.getPath()) - .equals(keyName)) { - // if directory already exists - return; - } - OmKeyInfo dirDbKeyInfo = - createDirectoryKey(volumeName, bucketName, keyName, args.getAcls()); - String dirDbKey = metadataManager - .getOzoneKey(volumeName, bucketName, dirDbKeyInfo.getKeyName()); - metadataManager.getKeyTable().put(dirDbKey, dirDbKeyInfo); - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - private OmKeyInfo createDirectoryKey(String volumeName, String bucketName, - String keyName, List acls) throws IOException { - // verify bucket exists - OmBucketInfo bucketInfo = getBucketInfo(volumeName, bucketName); - - String dir = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); - FileEncryptionInfo encInfo = getFileEncryptionInfo(bucketInfo); - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(dir) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(0) - .setReplicationType(ReplicationType.RATIS) - .setReplicationFactor(ReplicationFactor.ONE) - .setFileEncryptionInfo(encInfo) - .setAcls(acls) - .build(); - } - - /** - * OzoneFS api to creates an output stream for a file. - * - * @param args Key args - * @param isOverWrite if true existing file at the location will be - * overwritten - * @param isRecursive if true file would be created even if parent - * directories do not exist - * @throws OMException if given key is a directory - * if file exists and isOverwrite flag is false - * if an ancestor exists as a file - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - @Override - public OpenKeySession createFile(OmKeyArgs args, boolean isOverWrite, - boolean isRecursive) throws IOException { - Preconditions.checkNotNull(args, "Key args can not be null"); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - OpenKeySession keySession; - - metadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName, bucketName); - try { - OzoneFileStatus fileStatus; - try { - fileStatus = getFileStatus(args); - if (fileStatus.isDirectory()) { - throw new OMException("Can not write to directory: " + keyName, - ResultCodes.NOT_A_FILE); - } else if (fileStatus.isFile()) { - if (!isOverWrite) { - throw new OMException("File " + keyName + " already exists", - ResultCodes.FILE_ALREADY_EXISTS); - } - } - } catch (OMException ex) { - if (ex.getResult() != FILE_NOT_FOUND) { - throw ex; - } - } - - verifyNoFilesInPath(volumeName, bucketName, - Paths.get(keyName).getParent(), !isRecursive); - // TODO: Optimize call to openKey as keyInfo is already available in the - // filestatus. We can avoid some operations in openKey call. - keySession = openKey(args); - } finally { - metadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName, - bucketName); - } - - return keySession; - } - - /** - * OzoneFS api to lookup for a file. - * - * @param args Key args - * @throws OMException if given key is not found or it is not a file - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - @Override - public OmKeyInfo lookupFile(OmKeyArgs args, String clientAddress) - throws IOException { - Preconditions.checkNotNull(args, "Key args can not be null"); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, - bucketName); - try { - OzoneFileStatus fileStatus = getFileStatus(args); - if (fileStatus.isFile()) { - if (args.getSortDatanodes()) { - sortDatanodeInPipeline(fileStatus.getKeyInfo(), clientAddress); - } - return fileStatus.getKeyInfo(); - } - //if key is not of type file or if key is not found we throw an exception - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, - bucketName); - } - - throw new OMException("Can not write to directory: " + keyName, - ResultCodes.NOT_A_FILE); - } - - /** - * List the status for a file or a directory and its contents. - * - * @param args Key args - * @param recursive For a directory if true all the descendants of a - * particular directory are listed - * @param startKey Key from which listing needs to start. If startKey exists - * its status is included in the final list. - * @param numEntries Number of entries to list from the start key - * @return list of file status - */ - public List listStatus(OmKeyArgs args, boolean recursive, - String startKey, long numEntries) throws IOException { - Preconditions.checkNotNull(args, "Key args can not be null"); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - - List fileStatusList = new ArrayList<>(); - metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, - bucketName); - try { - if (Strings.isNullOrEmpty(startKey)) { - OzoneFileStatus fileStatus = getFileStatus(args); - if (fileStatus.isFile()) { - return Collections.singletonList(fileStatus); - } - startKey = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); - } - - String seekKeyInDb = - metadataManager.getOzoneKey(volumeName, bucketName, startKey); - String keyInDb = OzoneFSUtils.addTrailingSlashIfNeeded( - metadataManager.getOzoneKey(volumeName, bucketName, keyName)); - TableIterator> - iterator = metadataManager.getKeyTable().iterator(); - iterator.seek(seekKeyInDb); - - if (!iterator.hasNext()) { - return Collections.emptyList(); - } - - if (iterator.key().equals(keyInDb)) { - // skip the key which needs to be listed - iterator.next(); - } - - while (iterator.hasNext() && numEntries - fileStatusList.size() > 0) { - String entryInDb = iterator.key(); - OmKeyInfo value = iterator.value().getValue(); - if (entryInDb.startsWith(keyInDb)) { - String entryKeyName = value.getKeyName(); - if (recursive) { - // for recursive list all the entries - fileStatusList.add(new OzoneFileStatus(value, scmBlockSize, - !OzoneFSUtils.isFile(entryKeyName))); - iterator.next(); - } else { - // get the child of the directory to list from the entry. For - // example if directory to list is /a and entry is /a/b/c where - // c is a file. The immediate child is b which is a directory. c - // should not be listed as child of a. - String immediateChild = OzoneFSUtils - .getImmediateChild(entryKeyName, keyName); - boolean isFile = OzoneFSUtils.isFile(immediateChild); - if (isFile) { - fileStatusList - .add(new OzoneFileStatus(value, scmBlockSize, !isFile)); - iterator.next(); - } else { - // if entry is a directory - fileStatusList.add(new OzoneFileStatus(immediateChild)); - // skip the other descendants of this child directory. - iterator.seek( - getNextGreaterString(volumeName, bucketName, immediateChild)); - } - } - } else { - break; - } - } - } finally { - metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, - bucketName); - } - return fileStatusList; - } - - private String getNextGreaterString(String volumeName, String bucketName, - String keyPrefix) throws IOException { - // Increment the last character of the string and return the new ozone key. - Preconditions.checkArgument(!Strings.isNullOrEmpty(keyPrefix), - "Key prefix is null or empty"); - CodecRegistry codecRegistry = - ((RDBStore) metadataManager.getStore()).getCodecRegistry(); - byte[] keyPrefixInBytes = codecRegistry.asRawData(keyPrefix); - keyPrefixInBytes[keyPrefixInBytes.length - 1]++; - String nextPrefix = codecRegistry.asObject(keyPrefixInBytes, String.class); - return metadataManager.getOzoneKey(volumeName, bucketName, nextPrefix); - } - - /** - * Verify that none of the parent path exists as file in the filesystem. - * - * @param volumeName Volume name - * @param bucketName Bucket name - * @param path Directory path. This is the absolute path of the - * directory for the ozone filesystem. - * @param directoryMustExist throws exception if true and given path does not - * exist as directory - * @return OzoneFileStatus of the first directory found in path in reverse - * order - * @throws OMException if ancestor exists as file in the filesystem - * if directoryMustExist flag is true and parent does - * not exist - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - private OzoneFileStatus verifyNoFilesInPath(String volumeName, - String bucketName, Path path, boolean directoryMustExist) - throws IOException { - OmKeyArgs.Builder argsBuilder = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName); - while (path != null) { - String keyName = path.toString(); - try { - OzoneFileStatus fileStatus = - getFileStatus(argsBuilder.setKeyName(keyName).build()); - if (fileStatus.isFile()) { - LOG.error("Unable to create directory (File already exists): volume: " - + volumeName + "bucket: " + bucketName + "key: " + keyName); - throw new OMException( - "Unable to create directory at : volume: " + volumeName - + "bucket: " + bucketName + "key: " + keyName, - ResultCodes.FILE_ALREADY_EXISTS); - } else if (fileStatus.isDirectory()) { - return fileStatus; - } - } catch (OMException ex) { - if (ex.getResult() != FILE_NOT_FOUND) { - throw ex; - } else if (ex.getResult() == FILE_NOT_FOUND) { - if (directoryMustExist) { - throw new OMException("Parent directory does not exist", - ex.getCause(), DIRECTORY_NOT_FOUND); - } - } - } - path = path.getParent(); - } - return null; - } - - private FileEncryptionInfo getFileEncryptionInfo(OmBucketInfo bucketInfo) - throws IOException { - FileEncryptionInfo encInfo = null; - BucketEncryptionKeyInfo ezInfo = bucketInfo.getEncryptionKeyInfo(); - if (ezInfo != null) { - if (getKMSProvider() == null) { - throw new OMException("Invalid KMS provider, check configuration " + - HADOOP_SECURITY_KEY_PROVIDER_PATH, - INVALID_KMS_PROVIDER); - } - - final String ezKeyName = ezInfo.getKeyName(); - EncryptedKeyVersion edek = generateEDEK(ezKeyName); - encInfo = new FileEncryptionInfo(ezInfo.getSuite(), ezInfo.getVersion(), - edek.getEncryptedKeyVersion().getMaterial(), - edek.getEncryptedKeyIv(), - ezKeyName, edek.getEncryptionKeyVersionName()); - } - return encInfo; - } - - private void sortDatanodeInPipeline(OmKeyInfo keyInfo, String clientMachine) { - if (keyInfo != null && clientMachine != null && !clientMachine.isEmpty()) { - for (OmKeyLocationInfoGroup key : keyInfo.getKeyLocationVersions()) { - key.getLocationList().forEach(k -> { - List nodes = k.getPipeline().getNodes(); - if (nodes == null || nodes.size() == 0) { - LOG.warn("Datanodes for pipeline {} is empty", - k.getPipeline().getId().toString()); - return; - } - List nodeList = new ArrayList<>(); - nodes.stream().forEach(node -> - nodeList.add(node.getUuidString())); - try { - List sortedNodes = scmClient.getBlockClient() - .sortDatanodes(nodeList, clientMachine); - k.getPipeline().setNodesInOrder(sortedNodes); - if (LOG.isDebugEnabled()) { - LOG.debug("Sort datanodes {} for client {}, return {}", nodes, - clientMachine, sortedNodes); - } - } catch (IOException e) { - LOG.warn("Unable to sort datanodes based on distance to " + - "client, volume=" + keyInfo.getVolumeName() + - ", bucket=" + keyInfo.getBucketName() + - ", key=" + keyInfo.getKeyName() + - ", client=" + clientMachine + - ", datanodes=" + nodes.toString() + - ", exception=" + e.getMessage()); - } - }); - } - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java deleted file mode 100644 index 81031838b23..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java +++ /dev/null @@ -1,177 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om; - -import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT; -import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_INDEX; -import static org.apache.hadoop.ozone.OzoneConsts. - OZONE_DB_CHECKPOINT_REQUEST_FLUSH; - -import java.io.IOException; -import java.nio.file.Path; -import java.time.Duration; -import java.time.Instant; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.util.DataTransferThrottler; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.DBCheckpoint; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Provides the current checkpoint Snapshot of the OM DB. (tar.gz) - */ -public class OMDBCheckpointServlet extends HttpServlet { - - private static final Logger LOG = - LoggerFactory.getLogger(OMDBCheckpointServlet.class); - private static final long serialVersionUID = 1L; - - private transient OzoneManager om; - private transient DBStore omDbStore; - private transient OMMetrics omMetrics; - private transient DataTransferThrottler throttler = null; - - @Override - public void init() throws ServletException { - - om = (OzoneManager) getServletContext() - .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE); - - if (om == null) { - LOG.error("Unable to initialize OMDBCheckpointServlet. OM is null"); - return; - } - - omDbStore = om.getMetadataManager().getStore(); - omMetrics = om.getMetrics(); - - OzoneConfiguration configuration = om.getConfiguration(); - long transferBandwidth = configuration.getLongBytes( - OMConfigKeys.OZONE_DB_CHECKPOINT_TRANSFER_RATE_KEY, - OMConfigKeys.OZONE_DB_CHECKPOINT_TRANSFER_RATE_DEFAULT); - - if (transferBandwidth > 0) { - throttler = new DataTransferThrottler(transferBandwidth); - } - } - - /** - * Process a GET request for the Ozone Manager DB checkpoint snapshot. - * - * @param request The servlet request we are processing - * @param response The servlet response we are creating - */ - @Override - public void doGet(HttpServletRequest request, HttpServletResponse response) { - - LOG.info("Received request to obtain OM DB checkpoint snapshot"); - if (omDbStore == null) { - LOG.error( - "Unable to process metadata snapshot request. DB Store is null"); - response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); - return; - } - - DBCheckpoint checkpoint = null; - try { - - boolean flush = false; - String flushParam = - request.getParameter(OZONE_DB_CHECKPOINT_REQUEST_FLUSH); - if (StringUtils.isNotEmpty(flushParam)) { - flush = Boolean.valueOf(flushParam); - } - - boolean takeRatisSnapshot = false; - String snapshotBeforeCheckpointParam = - request.getParameter(OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT); - if (StringUtils.isNotEmpty(snapshotBeforeCheckpointParam)) { - takeRatisSnapshot = Boolean.valueOf(snapshotBeforeCheckpointParam); - } - - long ratisSnapshotIndex; - if (takeRatisSnapshot) { - // If OM follower is downloading the checkpoint, we should save a - // ratis snapshot first. This step also included flushing the OM DB. - // Hence, we can set flush to false. - flush = false; - ratisSnapshotIndex = om.saveRatisSnapshot(); - } else { - ratisSnapshotIndex = om.getRatisSnapshotIndex(); - } - - checkpoint = omDbStore.getCheckpoint(flush); - if (checkpoint == null || checkpoint.getCheckpointLocation() == null) { - LOG.error("Unable to process metadata snapshot request. " + - "Checkpoint request returned null."); - response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); - return; - } - omMetrics.setLastCheckpointCreationTimeTaken( - checkpoint.checkpointCreationTimeTaken()); - - Path file = checkpoint.getCheckpointLocation().getFileName(); - if (file == null) { - return; - } - response.setContentType("application/x-tgz"); - response.setHeader("Content-Disposition", - "attachment; filename=\"" + - file.toString() + ".tgz\""); - // Ratis snapshot index used when downloading DB checkpoint to OM follower - response.setHeader(OM_RATIS_SNAPSHOT_INDEX, - String.valueOf(ratisSnapshotIndex)); - - Instant start = Instant.now(); - OmUtils.writeOmDBCheckpointToStream(checkpoint, - response.getOutputStream()); - Instant end = Instant.now(); - - long duration = Duration.between(start, end).toMillis(); - LOG.info("Time taken to write the checkpoint to response output " + - "stream: " + duration + " milliseconds"); - omMetrics.setLastCheckpointStreamingTimeTaken(duration); - - } catch (Exception e) { - LOG.error( - "Unable to process metadata snapshot request. ", e); - response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); - } finally { - if (checkpoint != null) { - try { - checkpoint.cleanupCheckpoint(); - } catch (IOException e) { - LOG.error("Error trying to clean checkpoint at {} .", - checkpoint.getCheckpointLocation().toString()); - } - } - } - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java deleted file mode 100644 index 3ab9f47568d..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.server.ServiceRuntimeInfo; - -/** - * This is the JMX management interface for OM information. - */ -@InterfaceAudience.Private -public interface OMMXBean extends ServiceRuntimeInfo { - - String getRpcPort(); -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java deleted file mode 100644 index 2d1ae30648d..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ /dev/null @@ -1,763 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om; - -import com.google.common.annotations.VisibleForTesting; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableGaugeLong; - -/** - * This class is for maintaining Ozone Manager statistics. - */ -@InterfaceAudience.Private -@Metrics(about="Ozone Manager Metrics", context="dfs") -public class OMMetrics { - private static final String SOURCE_NAME = - OMMetrics.class.getSimpleName(); - - // OM request type op metrics - private @Metric MutableCounterLong numVolumeOps; - private @Metric MutableCounterLong numBucketOps; - private @Metric MutableCounterLong numKeyOps; - private @Metric MutableCounterLong numFSOps; - - // OM op metrics - private @Metric MutableCounterLong numVolumeCreates; - private @Metric MutableCounterLong numVolumeUpdates; - private @Metric MutableCounterLong numVolumeInfos; - private @Metric MutableCounterLong numVolumeCheckAccesses; - private @Metric MutableCounterLong numBucketCreates; - private @Metric MutableCounterLong numVolumeDeletes; - private @Metric MutableCounterLong numBucketInfos; - private @Metric MutableCounterLong numBucketUpdates; - private @Metric MutableCounterLong numBucketDeletes; - private @Metric MutableCounterLong numKeyAllocate; - private @Metric MutableCounterLong numKeyLookup; - private @Metric MutableCounterLong numKeyRenames; - private @Metric MutableCounterLong numKeyDeletes; - private @Metric MutableCounterLong numBucketLists; - private @Metric MutableCounterLong numKeyLists; - private @Metric MutableCounterLong numVolumeLists; - private @Metric MutableCounterLong numKeyCommits; - private @Metric MutableCounterLong numBlockAllocations; - private @Metric MutableCounterLong numGetServiceLists; - private @Metric MutableCounterLong numBucketS3Lists; - private @Metric MutableCounterLong numInitiateMultipartUploads; - private @Metric MutableCounterLong numCompleteMultipartUploads; - - private @Metric MutableCounterLong numGetFileStatus; - private @Metric MutableCounterLong numCreateDirectory; - private @Metric MutableCounterLong numCreateFile; - private @Metric MutableCounterLong numLookupFile; - private @Metric MutableCounterLong numListStatus; - - // Failure Metrics - private @Metric MutableCounterLong numVolumeCreateFails; - private @Metric MutableCounterLong numVolumeUpdateFails; - private @Metric MutableCounterLong numVolumeInfoFails; - private @Metric MutableCounterLong numVolumeDeleteFails; - private @Metric MutableCounterLong numBucketCreateFails; - private @Metric MutableCounterLong numVolumeCheckAccessFails; - private @Metric MutableCounterLong numBucketInfoFails; - private @Metric MutableCounterLong numBucketUpdateFails; - private @Metric MutableCounterLong numBucketDeleteFails; - private @Metric MutableCounterLong numKeyAllocateFails; - private @Metric MutableCounterLong numKeyLookupFails; - private @Metric MutableCounterLong numKeyRenameFails; - private @Metric MutableCounterLong numKeyDeleteFails; - private @Metric MutableCounterLong numBucketListFails; - private @Metric MutableCounterLong numKeyListFails; - private @Metric MutableCounterLong numVolumeListFails; - private @Metric MutableCounterLong numKeyCommitFails; - private @Metric MutableCounterLong numBlockAllocationFails; - private @Metric MutableCounterLong numGetServiceListFails; - private @Metric MutableCounterLong numBucketS3ListFails; - private @Metric MutableCounterLong numInitiateMultipartUploadFails; - private @Metric MutableCounterLong numCommitMultipartUploadParts; - private @Metric MutableCounterLong numCommitMultipartUploadPartFails; - private @Metric MutableCounterLong numCompleteMultipartUploadFails; - private @Metric MutableCounterLong numAbortMultipartUploads; - private @Metric MutableCounterLong numAbortMultipartUploadFails; - private @Metric MutableCounterLong numListMultipartUploadParts; - private @Metric MutableCounterLong numListMultipartUploadPartFails; - - private @Metric MutableCounterLong numGetFileStatusFails; - private @Metric MutableCounterLong numCreateDirectoryFails; - private @Metric MutableCounterLong numCreateFileFails; - private @Metric MutableCounterLong numLookupFileFails; - private @Metric MutableCounterLong numListStatusFails; - - // Metrics for total number of volumes, buckets and keys - - private @Metric MutableCounterLong numVolumes; - private @Metric MutableCounterLong numBuckets; - private @Metric MutableCounterLong numS3Buckets; - - //TODO: This metric is an estimate and it may be inaccurate on restart if the - // OM process was not shutdown cleanly. Key creations/deletions in the last - // few minutes before restart may not be included in this count. - private @Metric MutableCounterLong numKeys; - - - - // Metrics to track checkpointing statistics from last run. - private @Metric MutableGaugeLong lastCheckpointCreationTimeTaken; - private @Metric MutableGaugeLong lastCheckpointStreamingTimeTaken; - - private @Metric MutableCounterLong numBucketS3Creates; - private @Metric MutableCounterLong numBucketS3CreateFails; - private @Metric MutableCounterLong numBucketS3Deletes; - private @Metric MutableCounterLong numBucketS3DeleteFails; - - private @Metric MutableCounterLong numListMultipartUploadFails; - private @Metric MutableCounterLong numListMultipartUploads; - - public OMMetrics() { - } - - public static OMMetrics create() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - return ms.register(SOURCE_NAME, - "Ozone Manager Metrics", - new OMMetrics()); - } - - public void incNumS3BucketCreates() { - numBucketOps.incr(); - numBucketS3Creates.incr(); - } - - public void incNumS3BucketCreateFails() { - numBucketS3CreateFails.incr(); - } - - public void incNumS3BucketDeletes() { - numBucketOps.incr(); - numBucketS3Deletes.incr(); - } - - public void incNumS3BucketDeleteFails() { - numBucketOps.incr(); - numBucketS3DeleteFails.incr(); - } - - - public void incNumS3Buckets() { - numS3Buckets.incr(); - } - - public void decNumS3Buckets() { - numS3Buckets.incr(); - } - - public void incNumVolumes() { - numVolumes.incr(); - } - - public void decNumVolumes() { - numVolumes.incr(-1); - } - - public void incNumBuckets() { - numBuckets.incr(); - } - - public void decNumBuckets() { - numBuckets.incr(-1); - } - - public void incNumKeys() { - numKeys.incr(); - } - - public void decNumKeys() { - numKeys.incr(-1); - } - - public void setNumVolumes(long val) { - long oldVal = this.numVolumes.value(); - this.numVolumes.incr(val - oldVal); - } - - public void setNumBuckets(long val) { - long oldVal = this.numBuckets.value(); - this.numBuckets.incr(val - oldVal); - } - - public void setNumKeys(long val) { - long oldVal = this.numKeys.value(); - this.numKeys.incr(val- oldVal); - } - - public long getNumVolumes() { - return numVolumes.value(); - } - - public long getNumBuckets() { - return numBuckets.value(); - } - - public long getNumKeys() { - return numKeys.value(); - } - - - public void incNumVolumeCreates() { - numVolumeOps.incr(); - numVolumeCreates.incr(); - } - - public void incNumVolumeUpdates() { - numVolumeOps.incr(); - numVolumeUpdates.incr(); - } - - public void incNumVolumeInfos() { - numVolumeOps.incr(); - numVolumeInfos.incr(); - } - - public void incNumVolumeDeletes() { - numVolumeOps.incr(); - numVolumeDeletes.incr(); - } - - public void incNumVolumeCheckAccesses() { - numVolumeOps.incr(); - numVolumeCheckAccesses.incr(); - } - - public void incNumBucketCreates() { - numBucketOps.incr(); - numBucketCreates.incr(); - } - - public void incNumBucketInfos() { - numBucketOps.incr(); - numBucketInfos.incr(); - } - - public void incNumBucketUpdates() { - numBucketOps.incr(); - numBucketUpdates.incr(); - } - - public void incNumBucketDeletes() { - numBucketOps.incr(); - numBucketDeletes.incr(); - } - - public void incNumBucketLists() { - numBucketOps.incr(); - numBucketLists.incr(); - } - - public void incNumKeyLists() { - numKeyOps.incr(); - numKeyLists.incr(); - } - - public void incNumVolumeLists() { - numVolumeOps.incr(); - numVolumeLists.incr(); - } - - public void incNumListS3Buckets() { - numBucketOps.incr(); - numBucketS3Lists.incr(); - } - - public void incNumListS3BucketsFails() { - numBucketOps.incr(); - numBucketS3ListFails.incr(); - } - - public void incNumInitiateMultipartUploads() { - numKeyOps.incr(); - numInitiateMultipartUploads.incr(); - } - - public void incNumInitiateMultipartUploadFails() { - numInitiateMultipartUploadFails.incr(); - } - - public void incNumCommitMultipartUploadParts() { - numKeyOps.incr(); - numCommitMultipartUploadParts.incr(); - } - - public void incNumCommitMultipartUploadPartFails() { - numCommitMultipartUploadPartFails.incr(); - } - - public void incNumCompleteMultipartUploads() { - numKeyOps.incr(); - numCompleteMultipartUploads.incr(); - } - - public void incNumCompleteMultipartUploadFails() { - numCompleteMultipartUploadFails.incr(); - } - - public void incNumAbortMultipartUploads() { - numKeyOps.incr(); - numAbortMultipartUploads.incr(); - } - - public void incNumListMultipartUploadFails() { - numListMultipartUploadFails.incr(); - } - - public void incNumListMultipartUploads() { - numKeyOps.incr(); - numListMultipartUploads.incr(); - } - - public void incNumAbortMultipartUploadFails() { - numAbortMultipartUploadFails.incr(); - } - public void incNumListMultipartUploadParts() { - numKeyOps.incr(); - numListMultipartUploadParts.incr(); - } - - public void incNumGetFileStatus() { - numKeyOps.incr(); - numFSOps.incr(); - numGetFileStatus.incr(); - } - - public void incNumGetFileStatusFails() { - numGetFileStatusFails.incr(); - } - - public void incNumCreateDirectory() { - numKeyOps.incr(); - numFSOps.incr(); - numCreateDirectory.incr(); - } - - public void incNumCreateDirectoryFails() { - numCreateDirectoryFails.incr(); - } - - public void incNumCreateFile() { - numKeyOps.incr(); - numFSOps.incr(); - numCreateFile.incr(); - } - - public void incNumCreateFileFails() { - numCreateFileFails.incr(); - } - - public void incNumLookupFile() { - numKeyOps.incr(); - numFSOps.incr(); - numLookupFile.incr(); - } - - public void incNumLookupFileFails() { - numLookupFileFails.incr(); - } - - public void incNumListStatus() { - numKeyOps.incr(); - numFSOps.incr(); - numListStatus.incr(); - } - - public void incNumListStatusFails() { - numListStatusFails.incr(); - } - - public void incNumListMultipartUploadPartFails() { - numListMultipartUploadPartFails.incr(); - } - - public void incNumGetServiceLists() { - numGetServiceLists.incr(); - } - - public void incNumVolumeCreateFails() { - numVolumeCreateFails.incr(); - } - - public void incNumVolumeUpdateFails() { - numVolumeUpdateFails.incr(); - } - - public void incNumVolumeInfoFails() { - numVolumeInfoFails.incr(); - } - - public void incNumVolumeDeleteFails() { - numVolumeDeleteFails.incr(); - } - - public void incNumVolumeCheckAccessFails() { - numVolumeCheckAccessFails.incr(); - } - - public void incNumBucketCreateFails() { - numBucketCreateFails.incr(); - } - - public void incNumBucketInfoFails() { - numBucketInfoFails.incr(); - } - - public void incNumBucketUpdateFails() { - numBucketUpdateFails.incr(); - } - - public void incNumBucketDeleteFails() { - numBucketDeleteFails.incr(); - } - - public void incNumKeyAllocates() { - numKeyOps.incr(); - numKeyAllocate.incr(); - } - - public void incNumKeyAllocateFails() { - numKeyAllocateFails.incr(); - } - - public void incNumKeyLookups() { - numKeyOps.incr(); - numKeyLookup.incr(); - } - - public void incNumKeyLookupFails() { - numKeyLookupFails.incr(); - } - - public void incNumKeyRenames() { - numKeyOps.incr(); - numKeyRenames.incr(); - } - - public void incNumKeyRenameFails() { - numKeyOps.incr(); - numKeyRenameFails.incr(); - } - - public void incNumKeyDeleteFails() { - numKeyDeleteFails.incr(); - } - - public void incNumKeyDeletes() { - numKeyOps.incr(); - numKeyDeletes.incr(); - } - - public void incNumKeyCommits() { - numKeyOps.incr(); - numKeyCommits.incr(); - } - - public void incNumKeyCommitFails() { - numKeyCommitFails.incr(); - } - - public void incNumBlockAllocateCalls() { - numBlockAllocations.incr(); - } - - public void incNumBlockAllocateCallFails() { - numBlockAllocationFails.incr(); - } - - public void incNumBucketListFails() { - numBucketListFails.incr(); - } - - public void incNumKeyListFails() { - numKeyListFails.incr(); - } - - public void incNumVolumeListFails() { - numVolumeListFails.incr(); - } - - public void incNumGetServiceListFails() { - numGetServiceListFails.incr(); - } - - public void setLastCheckpointCreationTimeTaken(long val) { - this.lastCheckpointCreationTimeTaken.set(val); - } - - public void setLastCheckpointStreamingTimeTaken(long val) { - this.lastCheckpointStreamingTimeTaken.set(val); - } - - @VisibleForTesting - public long getNumVolumeCreates() { - return numVolumeCreates.value(); - } - - @VisibleForTesting - public long getNumVolumeUpdates() { - return numVolumeUpdates.value(); - } - - @VisibleForTesting - public long getNumVolumeInfos() { - return numVolumeInfos.value(); - } - - @VisibleForTesting - public long getNumVolumeDeletes() { - return numVolumeDeletes.value(); - } - - @VisibleForTesting - public long getNumVolumeCheckAccesses() { - return numVolumeCheckAccesses.value(); - } - - @VisibleForTesting - public long getNumBucketCreates() { - return numBucketCreates.value(); - } - - @VisibleForTesting - public long getNumBucketInfos() { - return numBucketInfos.value(); - } - - @VisibleForTesting - public long getNumBucketUpdates() { - return numBucketUpdates.value(); - } - - @VisibleForTesting - public long getNumBucketDeletes() { - return numBucketDeletes.value(); - } - - @VisibleForTesting - public long getNumBucketLists() { - return numBucketLists.value(); - } - - @VisibleForTesting - public long getNumVolumeLists() { - return numVolumeLists.value(); - } - - @VisibleForTesting - public long getNumKeyLists() { - return numKeyLists.value(); - } - - @VisibleForTesting - public long getNumGetServiceLists() { - return numGetServiceLists.value(); - } - - @VisibleForTesting - public long getNumVolumeCreateFails() { - return numVolumeCreateFails.value(); - } - - @VisibleForTesting - public long getNumVolumeUpdateFails() { - return numVolumeUpdateFails.value(); - } - - @VisibleForTesting - public long getNumVolumeInfoFails() { - return numVolumeInfoFails.value(); - } - - @VisibleForTesting - public long getNumVolumeDeleteFails() { - return numVolumeDeleteFails.value(); - } - - @VisibleForTesting - public long getNumVolumeCheckAccessFails() { - return numVolumeCheckAccessFails.value(); - } - - @VisibleForTesting - public long getNumBucketCreateFails() { - return numBucketCreateFails.value(); - } - - @VisibleForTesting - public long getNumBucketInfoFails() { - return numBucketInfoFails.value(); - } - - @VisibleForTesting - public long getNumBucketUpdateFails() { - return numBucketUpdateFails.value(); - } - - @VisibleForTesting - public long getNumBucketDeleteFails() { - return numBucketDeleteFails.value(); - } - - @VisibleForTesting - public long getNumKeyAllocates() { - return numKeyAllocate.value(); - } - - @VisibleForTesting - public long getNumKeyAllocateFails() { - return numKeyAllocateFails.value(); - } - - @VisibleForTesting - public long getNumKeyLookups() { - return numKeyLookup.value(); - } - - @VisibleForTesting - public long getNumKeyLookupFails() { - return numKeyLookupFails.value(); - } - - @VisibleForTesting - public long getNumKeyRenames() { - return numKeyRenames.value(); - } - - @VisibleForTesting - public long getNumKeyRenameFails() { - return numKeyRenameFails.value(); - } - - @VisibleForTesting - public long getNumKeyDeletes() { - return numKeyDeletes.value(); - } - - @VisibleForTesting - public long getNumKeyDeletesFails() { - return numKeyDeleteFails.value(); - } - - @VisibleForTesting - public long getNumBucketListFails() { - return numBucketListFails.value(); - } - - @VisibleForTesting - public long getNumKeyListFails() { - return numKeyListFails.value(); - } - - - @VisibleForTesting - public long getNumFSOps() { - return numFSOps.value(); - } - - @VisibleForTesting - public long getNumGetFileStatus() { - return numGetFileStatus.value(); - } - - @VisibleForTesting - public long getNumListStatus() { - return numListStatus.value(); - } - - @VisibleForTesting - public long getNumVolumeListFails() { - return numVolumeListFails.value(); - } - - @VisibleForTesting - public long getNumKeyCommits() { - return numKeyCommits.value(); - } - - @VisibleForTesting - public long getNumKeyCommitFails() { - return numKeyCommitFails.value(); - } - - @VisibleForTesting - public long getNumBlockAllocates() { - return numBlockAllocations.value(); - } - - @VisibleForTesting - public long getNumBlockAllocateFails() { - return numBlockAllocationFails.value(); - } - - @VisibleForTesting - public long getNumGetServiceListFails() { - return numGetServiceListFails.value(); - } - - @VisibleForTesting - public long getNumListS3Buckets() { - return numBucketS3Lists.value(); - } - - @VisibleForTesting - public long getNumListS3BucketsFails() { - return numBucketS3ListFails.value(); - } - - public long getNumInitiateMultipartUploads() { - return numInitiateMultipartUploads.value(); - } - - public long getNumInitiateMultipartUploadFails() { - return numInitiateMultipartUploadFails.value(); - } - - public long getNumAbortMultipartUploads() { - return numAbortMultipartUploads.value(); - } - - public long getNumAbortMultipartUploadFails() { - return numAbortMultipartUploadFails.value(); - } - - @VisibleForTesting - public long getLastCheckpointCreationTimeTaken() { - return lastCheckpointCreationTimeTaken.value(); - } - - @VisibleForTesting - public long getLastCheckpointStreamingTimeTaken() { - return lastCheckpointStreamingTimeTaken.value(); - } - - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java deleted file mode 100644 index 67c7eb8bbc4..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.classification.InterfaceStability.Unstable; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.security.authorize.PolicyProvider; -import org.apache.hadoop.security.authorize.Service; - -import java.util.concurrent.atomic.AtomicReference; - -import static org.apache.hadoop.ozone.om.OMConfigKeys - .OZONE_OM_SECURITY_CLIENT_PROTOCOL_ACL; - -/** - * {@link PolicyProvider} for OM protocols. - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -public final class OMPolicyProvider extends PolicyProvider { - - private static AtomicReference atomicReference = - new AtomicReference<>(); - - private OMPolicyProvider() { - } - - @Private - @Unstable - public static OMPolicyProvider getInstance() { - if (atomicReference.get() == null) { - atomicReference.compareAndSet(null, new OMPolicyProvider()); - } - return atomicReference.get(); - } - - private static final Service[] OM_SERVICES = - new Service[]{ - new Service(OZONE_OM_SECURITY_CLIENT_PROTOCOL_ACL, - OzoneManagerProtocol.class), - }; - - @SuppressFBWarnings("EI_EXPOSE_REP") - @Override - public Service[] getServices() { - return OM_SERVICES; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java deleted file mode 100644 index f632ad143c4..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStarterInterface.java +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import java.io.IOException; - -/** - * This interface is used by the OzoneManagerStarter class to allow the - * dependencies to be injected to the CLI class. - */ -public interface OMStarterInterface { - void start(OzoneConfiguration conf) throws IOException, - AuthenticationException; - boolean init(OzoneConfiguration conf) throws IOException, - AuthenticationException; -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java deleted file mode 100644 index b84cc5d1cc9..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om; - -import java.io.IOException; -import java.util.Properties; -import java.util.UUID; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.common.Storage; - -import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID; - -/** - * OMStorage is responsible for management of the StorageDirectories used by - * the Ozone Manager. - */ -public class OMStorage extends Storage { - - public static final String STORAGE_DIR = "om"; - public static final String OM_ID = "omUuid"; - public static final String OM_CERT_SERIAL_ID = "omCertSerialId"; - - /** - * Construct OMStorage. - * @throws IOException if any directories are inaccessible. - */ - public OMStorage(OzoneConfiguration conf) throws IOException { - super(NodeType.OM, OmUtils.getOmDbDir(conf), STORAGE_DIR); - } - - public void setScmId(String scmId) throws IOException { - if (getState() == StorageState.INITIALIZED) { - throw new IOException("OM is already initialized."); - } else { - getStorageInfo().setProperty(SCM_ID, scmId); - } - } - - public void setOmCertSerialId(String certSerialId) throws IOException { - getStorageInfo().setProperty(OM_CERT_SERIAL_ID, certSerialId); - } - - public void setOmId(String omId) throws IOException { - if (getState() == StorageState.INITIALIZED) { - throw new IOException("OM is already initialized."); - } else { - getStorageInfo().setProperty(OM_ID, omId); - } - } - - /** - * Retrieves the SCM ID from the version file. - * @return SCM_ID - */ - public String getScmId() { - return getStorageInfo().getProperty(SCM_ID); - } - - /** - * Retrieves the OM ID from the version file. - * @return OM_ID - */ - public String getOmId() { - return getStorageInfo().getProperty(OM_ID); - } - - /** - * Retrieves the serial id of certificate issued by SCM. - * @return OM_ID - */ - public String getOmCertSerialId() { - return getStorageInfo().getProperty(OM_CERT_SERIAL_ID); - } - - @Override - protected Properties getNodeProperties() { - String omId = getOmId(); - if (omId == null) { - omId = UUID.randomUUID().toString(); - } - Properties omProperties = new Properties(); - omProperties.setProperty(OM_ID, omId); - - if (getOmCertSerialId() != null) { - omProperties.setProperty(OM_CERT_SERIAL_ID, getOmCertSerialId()); - } - return omProperties; - } -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java deleted file mode 100644 index 95f21ae0ca3..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ /dev/null @@ -1,943 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.Table.KeyValue; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.hdds.utils.db.TypedTable; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.hdds.utils.db.cache.TableCacheImpl; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.om.codec.OmBucketInfoCodec; -import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec; -import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec; -import org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec; -import org.apache.hadoop.ozone.om.codec.OmVolumeArgsCodec; -import org.apache.hadoop.ozone.om.codec.RepeatedOmKeyInfoCodec; -import org.apache.hadoop.ozone.om.codec.S3SecretValueCodec; -import org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec; -import org.apache.hadoop.ozone.om.codec.UserVolumeInfoCodec; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.UserVolumeInfo; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Strings; -import com.google.common.collect.Lists; -import org.apache.commons.lang3.StringUtils; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; -import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import org.eclipse.jetty.util.StringUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Ozone metadata manager interface. - */ -public class OmMetadataManagerImpl implements OMMetadataManager { - private static final Logger LOG = - LoggerFactory.getLogger(OmMetadataManagerImpl.class); - - /** - * OM RocksDB Structure . - *

- * OM DB stores metadata as KV pairs in different column families. - *

- * OM DB Schema: - * |----------------------------------------------------------------------| - * | Column Family | VALUE | - * |----------------------------------------------------------------------| - * | userTable | /user->UserVolumeInfo | - * |----------------------------------------------------------------------| - * | volumeTable | /volume->VolumeInfo | - * |----------------------------------------------------------------------| - * | bucketTable | /volume/bucket-> BucketInfo | - * |----------------------------------------------------------------------| - * | keyTable | /volumeName/bucketName/keyName->KeyInfo | - * |----------------------------------------------------------------------| - * | deletedTable | /volumeName/bucketName/keyName->RepeatedKeyInfo | - * |----------------------------------------------------------------------| - * | openKey | /volumeName/bucketName/keyName/id->KeyInfo | - * |----------------------------------------------------------------------| - * | s3Table | s3BucketName -> /volumeName/bucketName | - * |----------------------------------------------------------------------| - * | s3SecretTable | s3g_access_key_id -> s3Secret | - * |----------------------------------------------------------------------| - * | dTokenTable | s3g_access_key_id -> s3Secret | - * |----------------------------------------------------------------------| - * | prefixInfoTable | prefix -> PrefixInfo | - * |----------------------------------------------------------------------| - * | multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->... | - * |----------------------------------------------------------------------| - */ - - public static final String USER_TABLE = "userTable"; - public static final String VOLUME_TABLE = "volumeTable"; - public static final String BUCKET_TABLE = "bucketTable"; - public static final String KEY_TABLE = "keyTable"; - public static final String DELETED_TABLE = "deletedTable"; - public static final String OPEN_KEY_TABLE = "openKeyTable"; - public static final String S3_TABLE = "s3Table"; - public static final String MULTIPARTINFO_TABLE = "multipartInfoTable"; - public static final String S3_SECRET_TABLE = "s3SecretTable"; - public static final String DELEGATION_TOKEN_TABLE = "dTokenTable"; - public static final String PREFIX_TABLE = "prefixTable"; - - private DBStore store; - - private final OzoneManagerLock lock; - private final long openKeyExpireThresholdMS; - - private Table userTable; - private Table volumeTable; - private Table bucketTable; - private Table keyTable; - private Table deletedTable; - private Table openKeyTable; - private Table s3Table; - private Table multipartInfoTable; - private Table s3SecretTable; - private Table dTokenTable; - private Table prefixTable; - private boolean isRatisEnabled; - - public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException { - this.lock = new OzoneManagerLock(conf); - this.openKeyExpireThresholdMS = 1000L * conf.getInt( - OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, - OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT); - // TODO: This is a temporary check. Once fully implemented, all OM state - // change should go through Ratis - be it standalone (for non-HA) or - // replicated (for HA). - isRatisEnabled = conf.getBoolean( - OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, - OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); - start(conf); - } - - /** - * For subclass overriding. - */ - protected OmMetadataManagerImpl() { - this.lock = new OzoneManagerLock(new OzoneConfiguration()); - this.openKeyExpireThresholdMS = - OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT; - } - - @Override - public Table getUserTable() { - return userTable; - } - - public Table getDelegationTokenTable() { - return dTokenTable; - } - - @Override - public Table getVolumeTable() { - return volumeTable; - } - - @Override - public Table getBucketTable() { - return bucketTable; - } - - @Override - public Table getKeyTable() { - return keyTable; - } - - @Override - public Table getDeletedTable() { - return deletedTable; - } - - @Override - public Table getOpenKeyTable() { - return openKeyTable; - } - - @Override - public Table getS3Table() { - return s3Table; - } - - @Override - public Table getPrefixTable() { - return prefixTable; - } - - @Override - public Table getMultipartInfoTable() { - return multipartInfoTable; - } - - - private void checkTableStatus(Table table, String name) throws IOException { - String logMessage = "Unable to get a reference to %s table. Cannot " + - "continue."; - String errMsg = "Inconsistent DB state, Table - %s. Please check the logs" + - "for more info."; - if (table == null) { - LOG.error(String.format(logMessage, name)); - throw new IOException(String.format(errMsg, name)); - } - } - - /** - * Start metadata manager. - */ - @Override - public void start(OzoneConfiguration configuration) throws IOException { - // We need to create the DB here, as when during restart, stop closes the - // db, so we need to create the store object and initialize DB. - if (store == null) { - File metaDir = OmUtils.getOmDbDir(configuration); - - DBStoreBuilder dbStoreBuilder = DBStoreBuilder.newBuilder(configuration) - .setName(OM_DB_NAME) - .setPath(Paths.get(metaDir.getPath())); - this.store = addOMTablesAndCodecs(dbStoreBuilder).build(); - initializeOmTables(); - } - } - - protected DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) { - - return builder.addTable(USER_TABLE) - .addTable(VOLUME_TABLE) - .addTable(BUCKET_TABLE) - .addTable(KEY_TABLE) - .addTable(DELETED_TABLE) - .addTable(OPEN_KEY_TABLE) - .addTable(S3_TABLE) - .addTable(MULTIPARTINFO_TABLE) - .addTable(DELEGATION_TOKEN_TABLE) - .addTable(S3_SECRET_TABLE) - .addTable(PREFIX_TABLE) - .addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec()) - .addCodec(OmKeyInfo.class, new OmKeyInfoCodec()) - .addCodec(RepeatedOmKeyInfo.class, new RepeatedOmKeyInfoCodec()) - .addCodec(OmBucketInfo.class, new OmBucketInfoCodec()) - .addCodec(OmVolumeArgs.class, new OmVolumeArgsCodec()) - .addCodec(UserVolumeInfo.class, new UserVolumeInfoCodec()) - .addCodec(OmMultipartKeyInfo.class, new OmMultipartKeyInfoCodec()) - .addCodec(S3SecretValue.class, new S3SecretValueCodec()) - .addCodec(OmPrefixInfo.class, new OmPrefixInfoCodec()); - } - - /** - * Initialize OM Tables. - * - * @throws IOException - */ - protected void initializeOmTables() throws IOException { - userTable = - this.store.getTable(USER_TABLE, String.class, UserVolumeInfo.class); - checkTableStatus(userTable, USER_TABLE); - - TableCacheImpl.CacheCleanupPolicy cleanupPolicy = - TableCacheImpl.CacheCleanupPolicy.NEVER; - - volumeTable = - this.store.getTable(VOLUME_TABLE, String.class, OmVolumeArgs.class, - cleanupPolicy); - checkTableStatus(volumeTable, VOLUME_TABLE); - - bucketTable = - this.store.getTable(BUCKET_TABLE, String.class, OmBucketInfo.class, - cleanupPolicy); - - checkTableStatus(bucketTable, BUCKET_TABLE); - - keyTable = this.store.getTable(KEY_TABLE, String.class, OmKeyInfo.class); - checkTableStatus(keyTable, KEY_TABLE); - - deletedTable = this.store.getTable(DELETED_TABLE, String.class, - RepeatedOmKeyInfo.class); - checkTableStatus(deletedTable, DELETED_TABLE); - - openKeyTable = - this.store.getTable(OPEN_KEY_TABLE, String.class, OmKeyInfo.class); - checkTableStatus(openKeyTable, OPEN_KEY_TABLE); - - s3Table = this.store.getTable(S3_TABLE, String.class, String.class); - checkTableStatus(s3Table, S3_TABLE); - - multipartInfoTable = this.store.getTable(MULTIPARTINFO_TABLE, - String.class, OmMultipartKeyInfo.class); - checkTableStatus(multipartInfoTable, MULTIPARTINFO_TABLE); - - dTokenTable = this.store.getTable(DELEGATION_TOKEN_TABLE, - OzoneTokenIdentifier.class, Long.class); - checkTableStatus(dTokenTable, DELEGATION_TOKEN_TABLE); - - s3SecretTable = this.store.getTable(S3_SECRET_TABLE, String.class, - S3SecretValue.class); - checkTableStatus(s3SecretTable, S3_SECRET_TABLE); - - prefixTable = this.store.getTable(PREFIX_TABLE, String.class, - OmPrefixInfo.class); - checkTableStatus(prefixTable, PREFIX_TABLE); - } - - /** - * Stop metadata manager. - */ - @Override - public void stop() throws Exception { - if (store != null) { - store.close(); - store = null; - } - } - - /** - * Get metadata store. - * - * @return store - metadata store. - */ - @VisibleForTesting - @Override - public DBStore getStore() { - return store; - } - - /** - * Given a volume return the corresponding DB key. - * - * @param volume - Volume name - */ - @Override - public String getVolumeKey(String volume) { - return OzoneConsts.OM_KEY_PREFIX + volume; - } - - /** - * Given a user return the corresponding DB key. - * - * @param user - User name - */ - @Override - public String getUserKey(String user) { - return user; - } - - /** - * Given a volume and bucket, return the corresponding DB key. - * - * @param volume - User name - * @param bucket - Bucket name - */ - @Override - public String getBucketKey(String volume, String bucket) { - StringBuilder builder = - new StringBuilder().append(OM_KEY_PREFIX).append(volume); - - if (StringUtils.isNotBlank(bucket)) { - builder.append(OM_KEY_PREFIX).append(bucket); - } - return builder.toString(); - } - - @Override - public String getOzoneKey(String volume, String bucket, String key) { - StringBuilder builder = new StringBuilder() - .append(OM_KEY_PREFIX).append(volume); - // TODO : Throw if the Bucket is null? - builder.append(OM_KEY_PREFIX).append(bucket); - if (StringUtil.isNotBlank(key)) { - builder.append(OM_KEY_PREFIX); - if (!key.equals(OM_KEY_PREFIX)) { - builder.append(key); - } - } - return builder.toString(); - } - - @Override - public String getOzoneDirKey(String volume, String bucket, String key) { - key = OzoneFSUtils.addTrailingSlashIfNeeded(key); - return getOzoneKey(volume, bucket, key); - } - - @Override - public String getOpenKey(String volume, String bucket, - String key, long id) { - String openKey = OM_KEY_PREFIX + volume + OM_KEY_PREFIX + bucket + - OM_KEY_PREFIX + key + OM_KEY_PREFIX + id; - return openKey; - } - - @Override - public String getMultipartKey(String volume, String bucket, String key, - String - uploadId) { - return OmMultipartUpload.getDbKey(volume, bucket, key, uploadId); - } - - /** - * Returns the OzoneManagerLock used on Metadata DB. - * - * @return OzoneManagerLock - */ - @Override - public org.apache.hadoop.ozone.om.lock.OzoneManagerLock getLock() { - return lock; - } - - /** - * Returns true if the firstArray startsWith the bytes of secondArray. - * - * @param firstArray - Byte array - * @param secondArray - Byte array - * @return true if the first array bytes match the bytes in the second array. - */ - private boolean startsWith(byte[] firstArray, byte[] secondArray) { - - if (firstArray == null) { - // if both are null, then the arrays match, else if first is null and - // second is not, then this function returns false. - return secondArray == null; - } - - - if (secondArray != null) { - // If the second array is longer then first array cannot be starting with - // the bytes of second array. - if (secondArray.length > firstArray.length) { - return false; - } - - for (int ndx = 0; ndx < secondArray.length; ndx++) { - if (firstArray[ndx] != secondArray[ndx]) { - return false; - } - } - return true; //match, return true. - } - return false; // if first is not null and second is null, we define that - // array does not start with same chars. - } - - /** - * Given a volume, check if it is empty, i.e there are no buckets inside it. - * We iterate in the bucket table and see if there is any key that starts with - * the volume prefix. We actually look for /volume/, since if we don't have - * the trailing slash it is possible that we might match some other volume. - *

- * For example, vol1 and vol122 might match, to avoid that we look for /vol1/ - * - * @param volume - Volume name - * @return true if the volume is empty - */ - @Override - public boolean isVolumeEmpty(String volume) throws IOException { - String volumePrefix = getVolumeKey(volume + OM_KEY_PREFIX); - - // First check in bucket table cache. - Iterator, CacheValue>> iterator = - ((TypedTable< String, OmBucketInfo>) bucketTable).cacheIterator(); - while (iterator.hasNext()) { - Map.Entry< CacheKey< String >, CacheValue< OmBucketInfo > > entry = - iterator.next(); - String key = entry.getKey().getCacheKey(); - OmBucketInfo omBucketInfo = entry.getValue().getCacheValue(); - // Making sure that entry is not for delete bucket request. - if (key.startsWith(volumePrefix) && omBucketInfo != null) { - return false; - } - } - - try (TableIterator> - bucketIter = bucketTable.iterator()) { - KeyValue kv = bucketIter.seek(volumePrefix); - - if (kv != null) { - // Check the entry in db is not marked for delete. This can happen - // while entry is marked for delete, but it is not flushed to DB. - CacheValue cacheValue = - bucketTable.getCacheValue(new CacheKey(kv.getKey())); - if (cacheValue != null) { - if (kv.getKey().startsWith(volumePrefix) - && cacheValue.getCacheValue() != null) { - return false; // we found at least one bucket with this volume - // prefix. - } - } else { - if (kv.getKey().startsWith(volumePrefix)) { - return false; // we found at least one bucket with this volume - // prefix. - } - } - } - - } - return true; - } - - /** - * Given a volume/bucket, check if it is empty, i.e there are no keys inside - * it. Prefix is /volume/bucket/, and we lookup the keyTable. - * - * @param volume - Volume name - * @param bucket - Bucket name - * @return true if the bucket is empty - */ - @Override - public boolean isBucketEmpty(String volume, String bucket) - throws IOException { - String keyPrefix = getBucketKey(volume, bucket); - - // First check in key table cache. - Iterator, CacheValue>> iterator = - ((TypedTable< String, OmKeyInfo>) keyTable).cacheIterator(); - while (iterator.hasNext()) { - Map.Entry< CacheKey, CacheValue> entry = - iterator.next(); - String key = entry.getKey().getCacheKey(); - OmKeyInfo omKeyInfo = entry.getValue().getCacheValue(); - // Making sure that entry is not for delete key request. - if (key.startsWith(keyPrefix) && omKeyInfo != null) { - return false; - } - } - try (TableIterator> keyIter = - keyTable.iterator()) { - KeyValue kv = keyIter.seek(keyPrefix); - - if (kv != null) { - // Check the entry in db is not marked for delete. This can happen - // while entry is marked for delete, but it is not flushed to DB. - CacheValue cacheValue = - keyTable.getCacheValue(new CacheKey(kv.getKey())); - if (cacheValue != null) { - if (kv.getKey().startsWith(keyPrefix) - && cacheValue.getCacheValue() != null) { - return false; // we found at least one key with this vol/bucket - // prefix. - } - } else { - if (kv.getKey().startsWith(keyPrefix)) { - return false; // we found at least one key with this vol/bucket - // prefix. - } - } - } - - } - return true; - } - - /** - * {@inheritDoc} - */ - @Override - public List listBuckets(final String volumeName, - final String startBucket, final String bucketPrefix, - final int maxNumOfBuckets) throws IOException { - List result = new ArrayList<>(); - if (Strings.isNullOrEmpty(volumeName)) { - throw new OMException("Volume name is required.", - ResultCodes.VOLUME_NOT_FOUND); - } - - String volumeNameBytes = getVolumeKey(volumeName); - if (volumeTable.get(volumeNameBytes) == null) { - throw new OMException("Volume " + volumeName + " not found.", - ResultCodes.VOLUME_NOT_FOUND); - } - - String startKey; - boolean skipStartKey = false; - if (StringUtil.isNotBlank(startBucket)) { - // if the user has specified a start key, we need to seek to that key - // and avoid that key in the response set. - startKey = getBucketKey(volumeName, startBucket); - skipStartKey = true; - } else { - // If the user has specified a prefix key, we need to get to the first - // of the keys with the prefix match. We can leverage RocksDB to do that. - // However, if the user has specified only a prefix, we cannot skip - // the first prefix key we see, the boolean skipStartKey allows us to - // skip the startkey or not depending on what patterns are specified. - startKey = getBucketKey(volumeName, bucketPrefix); - } - - String seekPrefix; - if (StringUtil.isNotBlank(bucketPrefix)) { - seekPrefix = getBucketKey(volumeName, bucketPrefix); - } else { - seekPrefix = getVolumeKey(volumeName + OM_KEY_PREFIX); - } - int currentCount = 0; - - - // For Bucket it is full cache, so we can just iterate in-memory table - // cache. - Iterator, CacheValue>> iterator = - bucketTable.cacheIterator(); - - - while (currentCount < maxNumOfBuckets && iterator.hasNext()) { - Map.Entry, CacheValue> entry = - iterator.next(); - - String key = entry.getKey().getCacheKey(); - OmBucketInfo omBucketInfo = entry.getValue().getCacheValue(); - // Making sure that entry in cache is not for delete bucket request. - - if (omBucketInfo != null) { - if (key.equals(startKey) && skipStartKey) { - continue; - } - - // We should return only the keys, whose keys match with prefix and - // the keys after the startBucket. - if (key.startsWith(seekPrefix) && key.compareTo(startKey) > 0) { - result.add(omBucketInfo); - currentCount++; - } - } - } - return result; - } - - @Override - public List listKeys(String volumeName, String bucketName, - String startKey, String keyPrefix, int maxKeys) throws IOException { - - List result = new ArrayList<>(); - if (maxKeys <= 0) { - return result; - } - - if (Strings.isNullOrEmpty(volumeName)) { - throw new OMException("Volume name is required.", - ResultCodes.VOLUME_NOT_FOUND); - } - - if (Strings.isNullOrEmpty(bucketName)) { - throw new OMException("Bucket name is required.", - ResultCodes.BUCKET_NOT_FOUND); - } - - String bucketNameBytes = getBucketKey(volumeName, bucketName); - if (getBucketTable().get(bucketNameBytes) == null) { - throw new OMException("Bucket " + bucketName + " not found.", - ResultCodes.BUCKET_NOT_FOUND); - } - - String seekKey; - boolean skipStartKey = false; - if (StringUtil.isNotBlank(startKey)) { - // Seek to the specified key. - seekKey = getOzoneKey(volumeName, bucketName, startKey); - skipStartKey = true; - } else { - // This allows us to seek directly to the first key with the right prefix. - seekKey = getOzoneKey(volumeName, bucketName, keyPrefix); - } - - String seekPrefix; - if (StringUtil.isNotBlank(keyPrefix)) { - seekPrefix = getOzoneKey(volumeName, bucketName, keyPrefix); - } else { - seekPrefix = getBucketKey(volumeName, bucketName + OM_KEY_PREFIX); - } - int currentCount = 0; - - - TreeMap cacheKeyMap = new TreeMap<>(); - Set deletedKeySet = new TreeSet<>(); - Iterator, CacheValue>> iterator = - keyTable.cacheIterator(); - - //TODO: We can avoid this iteration if table cache has stored entries in - // treemap. Currently HashMap is used in Cache. HashMap get operation is an - // constant time operation, where as for treeMap get is log(n). - // So if we move to treemap, the get operation will be affected. As get - // is frequent operation on table. So, for now in list we iterate cache map - // and construct treeMap which match with keyPrefix and are greater than or - // equal to startKey. Later we can revisit this, if list operation - // is becoming slow. - while (iterator.hasNext()) { - Map.Entry< CacheKey, CacheValue> entry = - iterator.next(); - - String key = entry.getKey().getCacheKey(); - OmKeyInfo omKeyInfo = entry.getValue().getCacheValue(); - // Making sure that entry in cache is not for delete key request. - - if (omKeyInfo != null) { - if (key.startsWith(seekPrefix) && key.compareTo(seekKey) >= 0) { - cacheKeyMap.put(key, omKeyInfo); - } - } else { - deletedKeySet.add(key); - } - } - - // Get maxKeys from DB if it has. - - try (TableIterator> - keyIter = getKeyTable().iterator()) { - KeyValue< String, OmKeyInfo > kv; - keyIter.seek(seekKey); - // we need to iterate maxKeys + 1 here because if skipStartKey is true, - // we should skip that entry and return the result. - while (currentCount < maxKeys + 1 && keyIter.hasNext()) { - kv = keyIter.next(); - if (kv != null && kv.getKey().startsWith(seekPrefix)) { - - // Entry should not be marked for delete, consider only those - // entries. - if(!deletedKeySet.contains(kv.getKey())) { - cacheKeyMap.put(kv.getKey(), kv.getValue()); - currentCount++; - } - } else { - // The SeekPrefix does not match any more, we can break out of the - // loop. - break; - } - } - } - - // Finally DB entries and cache entries are merged, then return the count - // of maxKeys from the sorted map. - currentCount = 0; - - for (Map.Entry cacheKey : cacheKeyMap.entrySet()) { - if (cacheKey.getKey().equals(seekKey) && skipStartKey) { - continue; - } - - result.add(cacheKey.getValue()); - currentCount++; - - if (currentCount == maxKeys) { - break; - } - } - - // Clear map and set. - cacheKeyMap.clear(); - deletedKeySet.clear(); - - return result; - } - - @Override - public List listVolumes(String userName, - String prefix, String startKey, int maxKeys) throws IOException { - List result = Lists.newArrayList(); - UserVolumeInfo volumes; - if (StringUtil.isBlank(userName)) { - throw new OMException("User name is required to list Volumes.", - ResultCodes.USER_NOT_FOUND); - } - volumes = getVolumesByUser(userName); - - if (volumes == null || volumes.getVolumeNamesCount() == 0) { - return result; - } - - boolean startKeyFound = Strings.isNullOrEmpty(startKey); - for (String volumeName : volumes.getVolumeNamesList()) { - if (!Strings.isNullOrEmpty(prefix)) { - if (!volumeName.startsWith(prefix)) { - continue; - } - } - - if (!startKeyFound && volumeName.equals(startKey)) { - startKeyFound = true; - continue; - } - if (startKeyFound && result.size() < maxKeys) { - OmVolumeArgs volumeArgs = - getVolumeTable().get(this.getVolumeKey(volumeName)); - if (volumeArgs == null) { - // Could not get volume info by given volume name, - // since the volume name is loaded from db, - // this probably means om db is corrupted or some entries are - // accidentally removed. - throw new OMException("Volume info not found for " + volumeName, - ResultCodes.VOLUME_NOT_FOUND); - } - result.add(volumeArgs); - } - } - - return result; - } - - private UserVolumeInfo getVolumesByUser(String userNameKey) - throws OMException { - try { - UserVolumeInfo userVolInfo = getUserTable().get(userNameKey); - if (userVolInfo == null) { - // No volume found for this user, return an empty list - return UserVolumeInfo.newBuilder().build(); - } else { - return userVolInfo; - } - } catch (IOException e) { - throw new OMException("Unable to get volumes info by the given user, " - + "metadata might be corrupted", e, - ResultCodes.METADATA_ERROR); - } - } - - @Override - public List getPendingDeletionKeys(final int keyCount) - throws IOException { - List keyBlocksList = Lists.newArrayList(); - try (TableIterator> - keyIter = getDeletedTable().iterator()) { - int currentCount = 0; - while (keyIter.hasNext() && currentCount < keyCount) { - KeyValue kv = keyIter.next(); - if (kv != null) { - RepeatedOmKeyInfo infoList = kv.getValue(); - // Get block keys as a list. - for(OmKeyInfo info : infoList.getOmKeyInfoList()){ - OmKeyLocationInfoGroup latest = info.getLatestVersionLocations(); - List item = latest.getLocationList().stream() - .map(b -> new BlockID(b.getContainerID(), b.getLocalID())) - .collect(Collectors.toList()); - BlockGroup keyBlocks = BlockGroup.newBuilder() - .setKeyName(kv.getKey()) - .addAllBlockIDs(item) - .build(); - keyBlocksList.add(keyBlocks); - currentCount++; - } - } - } - } - return keyBlocksList; - } - - @Override - public List getExpiredOpenKeys() throws IOException { - List keyBlocksList = Lists.newArrayList(); - // TODO: Fix the getExpiredOpenKeys, Not part of this patch. - return keyBlocksList; - } - - @Override - public long countRowsInTable(Table table) - throws IOException { - long count = 0; - if (table != null) { - try (TableIterator> - keyValueTableIterator = table.iterator()) { - while (keyValueTableIterator.hasNext()) { - keyValueTableIterator.next(); - count++; - } - } - } - return count; - } - - @Override - public long countEstimatedRowsInTable(Table table) - throws IOException { - long count = 0; - if (table != null) { - count = table.getEstimatedKeyCount(); - } - return count; - } - - @Override - public List getMultipartUploadKeys( - String volumeName, String bucketName, String prefix) throws IOException { - List response = new ArrayList<>(); - - TableIterator> - iterator = getMultipartInfoTable().iterator(); - - String prefixKey = - OmMultipartUpload.getDbKey(volumeName, bucketName, prefix); - iterator.seek(prefixKey); - - while (iterator.hasNext()) { - KeyValue entry = iterator.next(); - if (entry.getKey().startsWith(prefixKey)) { - response.add(entry.getKey()); - } else { - break; - } - } - return response; - } - - @Override - public Table getS3SecretTable() { - return s3SecretTable; - } - - /** - * Update store used by subclass. - * - * @param store DB store. - */ - protected void setStore(DBStore store) { - this.store = store; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetricsInfo.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetricsInfo.java deleted file mode 100644 index e9b1f432fe5..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetricsInfo.java +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om; - -import com.fasterxml.jackson.annotation.JsonProperty; - -/** - * OmMetricsInfo stored in a file, which will be used during OM restart to - * initialize the metrics. Currently this stores only numKeys. - */ -public class OmMetricsInfo { - - @JsonProperty - private long numKeys; - - OmMetricsInfo() { - this.numKeys = 0; - } - - public long getNumKeys() { - return numKeys; - } - - public void setNumKeys(long numKeys) { - this.numKeys = numKeys; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java deleted file mode 100644 index 79bc39f4984..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.hdds.utils.BackgroundService; -import org.apache.hadoop.hdds.utils.BackgroundTask; -import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; -import org.apache.hadoop.hdds.utils.BackgroundTaskResult; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; -import java.util.concurrent.TimeUnit; - -/** - * This is the background service to delete hanging open keys. - * Scan the metadata of om periodically to get - * the keys with prefix "#open#" and ask scm to - * delete metadata accordingly, if scm returns - * success for keys, then clean up those keys. - */ -public class OpenKeyCleanupService extends BackgroundService { - - private static final Logger LOG = - LoggerFactory.getLogger(OpenKeyCleanupService.class); - - private final static int OPEN_KEY_DELETING_CORE_POOL_SIZE = 2; - - private final KeyManager keyManager; - private final ScmBlockLocationProtocol scmClient; - - public OpenKeyCleanupService(ScmBlockLocationProtocol scmClient, - KeyManager keyManager, int serviceInterval, - long serviceTimeout) { - super("OpenKeyCleanupService", serviceInterval, TimeUnit.SECONDS, - OPEN_KEY_DELETING_CORE_POOL_SIZE, serviceTimeout); - this.keyManager = keyManager; - this.scmClient = scmClient; - } - - @Override - public BackgroundTaskQueue getTasks() { - BackgroundTaskQueue queue = new BackgroundTaskQueue(); - queue.add(new OpenKeyDeletingTask()); - return queue; - } - - private class OpenKeyDeletingTask - implements BackgroundTask { - - @Override - public int getPriority() { - return 0; - } - - @Override - public BackgroundTaskResult call() throws Exception { - try { - List keyBlocksList = keyManager.getExpiredOpenKeys(); - if (keyBlocksList.size() > 0) { - int toDeleteSize = keyBlocksList.size(); - LOG.debug("Found {} to-delete open keys in OM", toDeleteSize); - List results = - scmClient.deleteKeyBlocks(keyBlocksList); - int deletedSize = 0; - for (DeleteBlockGroupResult result : results) { - if (result.isSuccess()) { - try { - keyManager.deleteExpiredOpenKey(result.getObjectKey()); - if (LOG.isDebugEnabled()) { - LOG.debug("Key {} deleted from OM DB", result.getObjectKey()); - } - deletedSize += 1; - } catch (IOException e) { - LOG.warn("Failed to delete hanging-open key {}", - result.getObjectKey(), e); - } - } else { - LOG.warn("Deleting open Key {} failed because some of the blocks" - + " were failed to delete, failed blocks: {}", - result.getObjectKey(), - StringUtils.join(",", result.getFailedBlocks())); - } - } - LOG.info("Found {} expired open key entries, successfully " + - "cleaned up {} entries", toDeleteSize, deletedSize); - return results::size; - } else { - LOG.debug("No hanging open key found in OM"); - } - } catch (IOException e) { - LOG.error("Unable to get hanging open keys, retry in" - + " next interval", e); - } - return BackgroundTaskResult.EmptyTaskResult.newResult(); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java deleted file mode 100644 index 0cd087eee23..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ /dev/null @@ -1,3295 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.ObjectReader; -import com.fasterxml.jackson.databind.ObjectWriter; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.protobuf.BlockingService; - -import java.net.InetAddress; -import java.nio.file.Path; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.KeyPair; -import java.security.cert.CertificateException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Objects; - -import org.apache.commons.codec.digest.DigestUtils; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; -import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.client.OMCertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider; -import org.apache.hadoop.ozone.om.ha.OMHANodeDetails; -import org.apache.hadoop.ozone.om.ha.OMNodeDetails; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerServerProtocol; -import org.apache.hadoop.ozone.om.ratis.OMRatisSnapshotInfo; -import org.apache.hadoop.ozone.om.snapshot.OzoneManagerSnapshotProvider; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocolPB.ProtocolMessageMetrics; -import org.apache.hadoop.ozone.security.OzoneSecurityException; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.AuditAction; -import org.apache.hadoop.ozone.audit.AuditEventStatus; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditLoggerType; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.audit.Auditor; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.common.Storage.StorageState; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisClient; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort; -import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; -import org.apache.hadoop.ozone.security.acl.OzoneAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType; -import org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.ozone.security.acl.RequestContext; -import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; -import org.apache.hadoop.ozone.security.OzoneDelegationTokenSecretManager; -import org.apache.hadoop.ozone.util.OzoneVersionInfo; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.security.token.SecretManager.InvalidToken; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.JvmPauseMonitor; -import org.apache.hadoop.util.KMSUtil; -import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.util.ShutdownHookManager; -import org.apache.hadoop.hdds.utils.RetriableTask; -import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper; -import org.apache.hadoop.hdds.utils.db.SequenceNumberNotFoundException; -import org.apache.hadoop.hdds.utils.db.DBCheckpoint; -import org.apache.hadoop.hdds.utils.db.DBStore; - -import org.apache.ratis.server.protocol.TermIndex; -import org.apache.ratis.util.FileUtils; -import org.apache.ratis.util.LifeCycle; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.ObjectName; -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.StandardCopyOption; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Timer; -import java.util.TimerTask; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; -import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients; -import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; -import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString; -import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress; -import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithFixedSleep; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_FILE; -import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_TEMP_FILE; -import static org.apache.hadoop.ozone.OzoneConsts.RPC_PORT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME_DEFAULT; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_AUTH_METHOD; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; -import static org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneManagerService - .newReflectiveBlockingService; - -/** - * Ozone Manager is the metadata manager of ozone. - */ -@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"}) -public final class OzoneManager extends ServiceRuntimeInfoImpl - implements OzoneManagerServerProtocol, OMMXBean, Auditor { - public static final Logger LOG = - LoggerFactory.getLogger(OzoneManager.class); - - private static final AuditLogger AUDIT = new AuditLogger( - AuditLoggerType.OMLOGGER); - - private static final String OM_DAEMON = "om"; - private static boolean securityEnabled = false; - private OzoneDelegationTokenSecretManager delegationTokenMgr; - private OzoneBlockTokenSecretManager blockTokenMgr; - private CertificateClient certClient; - private String caCertPem = null; - private static boolean testSecureOmFlag = false; - private final Text omRpcAddressTxt; - private final OzoneConfiguration configuration; - private RPC.Server omRpcServer; - private InetSocketAddress omRpcAddress; - private String omId; - - private OMMetadataManager metadataManager; - private VolumeManager volumeManager; - private BucketManager bucketManager; - private KeyManager keyManager; - private PrefixManagerImpl prefixManager; - private S3BucketManager s3BucketManager; - - private final OMMetrics metrics; - private final ProtocolMessageMetrics omClientProtocolMetrics; - private OzoneManagerHttpServer httpServer; - private final OMStorage omStorage; - private final ScmBlockLocationProtocol scmBlockClient; - private final StorageContainerLocationProtocol scmContainerClient; - private ObjectName omInfoBeanName; - private Timer metricsTimer; - private ScheduleOMMetricsWriteTask scheduleOMMetricsWriteTask; - private static final ObjectWriter WRITER = - new ObjectMapper().writerWithDefaultPrettyPrinter(); - private static final ObjectReader READER = - new ObjectMapper().readerFor(OmMetricsInfo.class); - private static final int SHUTDOWN_HOOK_PRIORITY = 30; - private final Runnable shutdownHook; - private final File omMetaDir; - private final boolean isAclEnabled; - private IAccessAuthorizer accessAuthorizer; - private JvmPauseMonitor jvmPauseMonitor; - private final SecurityConfig secConfig; - private S3SecretManager s3SecretManager; - private volatile boolean isOmRpcServerRunning = false; - private String omComponent; - private OzoneManagerProtocolServerSideTranslatorPB omServerProtocol; - - private boolean isRatisEnabled; - private OzoneManagerRatisServer omRatisServer; - private OzoneManagerRatisClient omRatisClient; - private OzoneManagerSnapshotProvider omSnapshotProvider; - private OMNodeDetails omNodeDetails; - private List peerNodes; - private File omRatisSnapshotDir; - private final OMRatisSnapshotInfo omRatisSnapshotInfo; - private final Collection ozAdmins; - - private KeyProviderCryptoExtension kmsProvider = null; - private static String keyProviderUriKeyName = - CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH; - - // Adding parameters needed for VolumeRequests here, so that during request - // execution, we can get from ozoneManager. - private long maxUserVolumeCount; - - private final ScmClient scmClient; - private final long scmBlockSize; - private final int preallocateBlocksMax; - private final boolean grpcBlockTokenEnabled; - private final boolean useRatisForReplication; - - private OzoneManager(OzoneConfiguration conf) throws IOException, - AuthenticationException { - super(OzoneVersionInfo.OZONE_VERSION_INFO); - Preconditions.checkNotNull(conf); - configuration = conf; - // Load HA related configurations - OMHANodeDetails omhaNodeDetails = - OMHANodeDetails.loadOMHAConfig(configuration); - - this.peerNodes = omhaNodeDetails.getPeerNodeDetails(); - this.omNodeDetails = omhaNodeDetails.getLocalNodeDetails(); - - omStorage = new OMStorage(conf); - omId = omStorage.getOmId(); - - // In case of single OM Node Service there will be no OM Node ID - // specified, set it to value from om storage - if (this.omNodeDetails.getOMNodeId() == null) { - this.omNodeDetails = - OMHANodeDetails.getOMNodeDetails(conf, omNodeDetails.getOMServiceId(), - omStorage.getOmId(), omNodeDetails.getRpcAddress(), - omNodeDetails.getRatisPort()); - } - - loginOMUserIfSecurityEnabled(conf); - - this.maxUserVolumeCount = conf.getInt(OZONE_OM_USER_MAX_VOLUME, - OZONE_OM_USER_MAX_VOLUME_DEFAULT); - Preconditions.checkArgument(this.maxUserVolumeCount > 0, - OZONE_OM_USER_MAX_VOLUME + " value should be greater than zero"); - - if (omStorage.getState() != StorageState.INITIALIZED) { - throw new OMException("OM not initialized.", - ResultCodes.OM_NOT_INITIALIZED); - } - - // Read configuration and set values. - ozAdmins = conf.getTrimmedStringCollection(OZONE_ADMINISTRATORS); - omMetaDir = OmUtils.getOmDbDir(configuration); - this.isAclEnabled = conf.getBoolean(OZONE_ACL_ENABLED, - OZONE_ACL_ENABLED_DEFAULT); - this.scmBlockSize = (long) conf.getStorageSize(OZONE_SCM_BLOCK_SIZE, - OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); - this.preallocateBlocksMax = conf.getInt( - OZONE_KEY_PREALLOCATION_BLOCKS_MAX, - OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT); - this.grpcBlockTokenEnabled = conf.getBoolean(HDDS_BLOCK_TOKEN_ENABLED, - HDDS_BLOCK_TOKEN_ENABLED_DEFAULT); - this.useRatisForReplication = conf.getBoolean( - DFS_CONTAINER_RATIS_ENABLED_KEY, DFS_CONTAINER_RATIS_ENABLED_DEFAULT); - // TODO: This is a temporary check. Once fully implemented, all OM state - // change should go through Ratis - be it standalone (for non-HA) or - // replicated (for HA). - isRatisEnabled = configuration.getBoolean( - OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, - OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); - - - InetSocketAddress omNodeRpcAddr = omNodeDetails.getRpcAddress(); - omRpcAddressTxt = new Text(omNodeDetails.getRpcAddressString()); - - scmContainerClient = getScmContainerClient(configuration); - // verifies that the SCM info in the OM Version file is correct. - scmBlockClient = getScmBlockClient(configuration); - this.scmClient = new ScmClient(scmBlockClient, scmContainerClient); - - // For testing purpose only, not hit scm from om as Hadoop UGI can't login - // two principals in the same JVM. - if (!testSecureOmFlag) { - ScmInfo scmInfo = getScmInfo(configuration); - if (!(scmInfo.getClusterId().equals(omStorage.getClusterID()) && scmInfo - .getScmId().equals(omStorage.getScmId()))) { - throw new OMException("SCM version info mismatch.", - ResultCodes.SCM_VERSION_MISMATCH_ERROR); - } - } - - RPC.setProtocolEngine(configuration, OzoneManagerProtocolPB.class, - ProtobufRpcEngine.class); - - secConfig = new SecurityConfig(configuration); - // Create the KMS Key Provider - try { - kmsProvider = createKeyProviderExt(configuration); - } catch (IOException ioe) { - kmsProvider = null; - LOG.error("Fail to create Key Provider"); - } - if (secConfig.isSecurityEnabled()) { - omComponent = OM_DAEMON + "-" + omId; - if(omStorage.getOmCertSerialId() == null) { - throw new RuntimeException("OzoneManager started in secure mode but " + - "doesn't have SCM signed certificate."); - } - certClient = new OMCertificateClient(new SecurityConfig(conf), - omStorage.getOmCertSerialId()); - } - if (secConfig.isBlockTokenEnabled()) { - blockTokenMgr = createBlockTokenSecretManager(configuration); - } - - instantiateServices(); - - this.omRatisSnapshotInfo = new OMRatisSnapshotInfo( - omStorage.getCurrentDir()); - - initializeRatisServer(); - initializeRatisClient(); - - if (isRatisEnabled) { - // Create Ratis storage dir - String omRatisDirectory = OmUtils.getOMRatisDirectory(configuration); - if (omRatisDirectory == null || omRatisDirectory.isEmpty()) { - throw new IllegalArgumentException(HddsConfigKeys.OZONE_METADATA_DIRS + - " must be defined."); - } - OmUtils.createOMDir(omRatisDirectory); - // Create Ratis snapshot dir - omRatisSnapshotDir = OmUtils.createOMDir( - OmUtils.getOMRatisSnapshotDirectory(configuration)); - - if (peerNodes != null && !peerNodes.isEmpty()) { - this.omSnapshotProvider = new OzoneManagerSnapshotProvider( - configuration, omRatisSnapshotDir, peerNodes); - } - } - - metrics = OMMetrics.create(); - - omClientProtocolMetrics = ProtocolMessageMetrics - .create("OmClientProtocol", "Ozone Manager RPC endpoint", - OzoneManagerProtocolProtos.Type.values()); - - // Start Om Rpc Server. - omRpcServer = getRpcServer(configuration); - omRpcAddress = updateRPCListenAddress(configuration, - OZONE_OM_ADDRESS_KEY, omNodeRpcAddr, omRpcServer); - - shutdownHook = () -> { - saveOmMetrics(); - }; - ShutdownHookManager.get().addShutdownHook(shutdownHook, - SHUTDOWN_HOOK_PRIORITY); - } - - /** - * Instantiate services which are dependent on the OM DB state. - * When OM state is reloaded, these services are re-initialized with the - * new OM state. - */ - private void instantiateServices() throws IOException { - - metadataManager = new OmMetadataManagerImpl(configuration); - volumeManager = new VolumeManagerImpl(metadataManager, configuration); - bucketManager = new BucketManagerImpl(metadataManager, getKmsProvider(), - isRatisEnabled); - s3BucketManager = new S3BucketManagerImpl(configuration, metadataManager, - volumeManager, bucketManager); - if (secConfig.isSecurityEnabled()) { - s3SecretManager = new S3SecretManagerImpl(configuration, metadataManager); - delegationTokenMgr = createDelegationTokenSecretManager(configuration); - } - - prefixManager = new PrefixManagerImpl(metadataManager, isRatisEnabled); - keyManager = new KeyManagerImpl(this, scmClient, configuration, - omStorage.getOmId()); - - if (isAclEnabled) { - accessAuthorizer = getACLAuthorizerInstance(configuration); - if (accessAuthorizer instanceof OzoneNativeAuthorizer) { - OzoneNativeAuthorizer authorizer = - (OzoneNativeAuthorizer) accessAuthorizer; - authorizer.setVolumeManager(volumeManager); - authorizer.setBucketManager(bucketManager); - authorizer.setKeyManager(keyManager); - authorizer.setPrefixManager(prefixManager); - } - } else { - accessAuthorizer = null; - } - } - - /** - * Return configuration value of - * {@link OzoneConfigKeys#DFS_CONTAINER_RATIS_ENABLED_KEY}. - */ - public boolean shouldUseRatis() { - return useRatisForReplication; - } - - /** - * Return scmClient. - */ - public ScmClient getScmClient() { - return scmClient; - } - - /** - * Return SecretManager for OM. - */ - public OzoneBlockTokenSecretManager getBlockTokenSecretManager() { - return blockTokenMgr; - } - - /** - * Return config value of {@link OzoneConfigKeys#OZONE_SCM_BLOCK_SIZE}. - */ - public long getScmBlockSize() { - return scmBlockSize; - } - - /** - * Return config value of - * {@link OzoneConfigKeys#OZONE_KEY_PREALLOCATION_BLOCKS_MAX}. - */ - public int getPreallocateBlocksMax() { - return preallocateBlocksMax; - } - - /** - * Return config value of - * {@link HddsConfigKeys#HDDS_BLOCK_TOKEN_ENABLED}. - */ - public boolean isGrpcBlockTokenEnabled() { - return grpcBlockTokenEnabled; - } - - private KeyProviderCryptoExtension createKeyProviderExt( - OzoneConfiguration conf) throws IOException { - KeyProvider keyProvider = KMSUtil.createKeyProvider(conf, - keyProviderUriKeyName); - if (keyProvider == null) { - return null; - } - KeyProviderCryptoExtension cryptoProvider = KeyProviderCryptoExtension - .createKeyProviderCryptoExtension(keyProvider); - return cryptoProvider; - } - - /** - * Returns an instance of {@link IAccessAuthorizer}. - * Looks up the configuration to see if there is custom class specified. - * Constructs the instance by passing the configuration directly to the - * constructor to achieve thread safety using final fields. - * @param conf - * @return IAccessAuthorizer - */ - private IAccessAuthorizer getACLAuthorizerInstance(OzoneConfiguration conf) { - Class clazz = conf.getClass( - OZONE_ACL_AUTHORIZER_CLASS, OzoneAccessAuthorizer.class, - IAccessAuthorizer.class); - return ReflectionUtils.newInstance(clazz, conf); - } - - @Override - public void close() throws IOException { - stop(); - } - - /** - * Class which schedule saving metrics to a file. - */ - private class ScheduleOMMetricsWriteTask extends TimerTask { - public void run() { - saveOmMetrics(); - } - } - - private void saveOmMetrics() { - try { - boolean success; - Files.createDirectories( - getTempMetricsStorageFile().getParentFile().toPath()); - try (BufferedWriter writer = new BufferedWriter( - new OutputStreamWriter(new FileOutputStream( - getTempMetricsStorageFile()), "UTF-8"))) { - OmMetricsInfo metricsInfo = new OmMetricsInfo(); - metricsInfo.setNumKeys(metrics.getNumKeys()); - WRITER.writeValue(writer, metricsInfo); - success = true; - } - - if (success) { - Files.move(getTempMetricsStorageFile().toPath(), - getMetricsStorageFile().toPath(), StandardCopyOption - .ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); - } - } catch (IOException ex) { - LOG.error("Unable to write the om Metrics file", ex); - } - } - - /** - * Returns temporary metrics storage file. - * @return File - */ - private File getTempMetricsStorageFile() { - return new File(omMetaDir, OM_METRICS_TEMP_FILE); - } - - /** - * Returns metrics storage file. - * @return File - */ - private File getMetricsStorageFile() { - return new File(omMetaDir, OM_METRICS_FILE); - } - - - private OzoneDelegationTokenSecretManager createDelegationTokenSecretManager( - OzoneConfiguration conf) throws IOException { - long tokenRemoverScanInterval = - conf.getTimeDuration(OMConfigKeys.DELEGATION_REMOVER_SCAN_INTERVAL_KEY, - OMConfigKeys.DELEGATION_REMOVER_SCAN_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - long tokenMaxLifetime = - conf.getTimeDuration(OMConfigKeys.DELEGATION_TOKEN_MAX_LIFETIME_KEY, - OMConfigKeys.DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT, - TimeUnit.MILLISECONDS); - long tokenRenewInterval = - conf.getTimeDuration(OMConfigKeys.DELEGATION_TOKEN_RENEW_INTERVAL_KEY, - OMConfigKeys.DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - - return new OzoneDelegationTokenSecretManager(conf, tokenMaxLifetime, - tokenRenewInterval, tokenRemoverScanInterval, omRpcAddressTxt, - s3SecretManager, certClient); - } - - private OzoneBlockTokenSecretManager createBlockTokenSecretManager( - OzoneConfiguration conf) { - - long expiryTime = conf.getTimeDuration( - HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME, - HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME_DEFAULT, - TimeUnit.MILLISECONDS); - // TODO: Pass OM cert serial ID. - if (testSecureOmFlag) { - return new OzoneBlockTokenSecretManager(secConfig, expiryTime, "1"); - } - Objects.requireNonNull(certClient); - return new OzoneBlockTokenSecretManager(secConfig, expiryTime, - certClient.getCertificate().getSerialNumber().toString()); - } - - private void stopSecretManager() { - if (blockTokenMgr != null) { - LOG.info("Stopping OM block token manager."); - try { - blockTokenMgr.stop(); - } catch (IOException e) { - LOG.error("Failed to stop block token manager", e); - } - } - - if (delegationTokenMgr != null) { - LOG.info("Stopping OM delegation token secret manager."); - try { - delegationTokenMgr.stop(); - } catch (IOException e) { - LOG.error("Failed to stop delegation token manager", e); - } - } - } - - @VisibleForTesting - public void startSecretManager() { - try { - readKeyPair(); - } catch (OzoneSecurityException e) { - LOG.error("Unable to read key pair for OM.", e); - throw new RuntimeException(e); - } - if (secConfig.isBlockTokenEnabled() && blockTokenMgr != null) { - try { - LOG.info("Starting OM block token secret manager"); - blockTokenMgr.start(certClient); - } catch (IOException e) { - // Unable to start secret manager. - LOG.error("Error starting block token secret manager.", e); - throw new RuntimeException(e); - } - } - - if (delegationTokenMgr != null) { - try { - LOG.info("Starting OM delegation token secret manager"); - delegationTokenMgr.start(certClient); - } catch (IOException e) { - // Unable to start secret manager. - LOG.error("Error starting delegation token secret manager.", e); - throw new RuntimeException(e); - } - } - } - - /** - * For testing purpose only. - * */ - public void setCertClient(CertificateClient certClient) { - // TODO: Initialize it in constructor with implementation for certClient. - this.certClient = certClient; - } - - /** - * Read private key from file. - */ - private void readKeyPair() throws OzoneSecurityException { - try { - LOG.info("Reading keypair and certificate from file system."); - PublicKey pubKey = certClient.getPublicKey(); - PrivateKey pvtKey = certClient.getPrivateKey(); - Objects.requireNonNull(pubKey); - Objects.requireNonNull(pvtKey); - Objects.requireNonNull(certClient.getCertificate()); - } catch (Exception e) { - throw new OzoneSecurityException("Error reading keypair & certificate " - + "OzoneManager.", e, OzoneSecurityException - .ResultCodes.OM_PUBLIC_PRIVATE_KEY_FILE_NOT_EXIST); - } - } - - /** - * Login OM service user if security and Kerberos are enabled. - * - * @param conf - * @throws IOException, AuthenticationException - */ - private static void loginOMUser(OzoneConfiguration conf) - throws IOException, AuthenticationException { - - if (SecurityUtil.getAuthenticationMethod(conf).equals( - AuthenticationMethod.KERBEROS)) { - if (LOG.isDebugEnabled()) { - LOG.debug("Ozone security is enabled. Attempting login for OM user. " - + "Principal: {}, keytab: {}", conf.get( - OZONE_OM_KERBEROS_PRINCIPAL_KEY), - conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY)); - } - - UserGroupInformation.setConfiguration(conf); - - InetSocketAddress socAddr = OmUtils.getOmAddress(conf); - SecurityUtil.login(conf, OZONE_OM_KERBEROS_KEYTAB_FILE_KEY, - OZONE_OM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); - } else { - throw new AuthenticationException(SecurityUtil.getAuthenticationMethod( - conf) + " authentication method not supported. OM user login " - + "failed."); - } - LOG.info("Ozone Manager login successful."); - } - - /** - * Create a scm block client, used by putKey() and getKey(). - * - * @return {@link ScmBlockLocationProtocol} - * @throws IOException - */ - private static ScmBlockLocationProtocol getScmBlockClient( - OzoneConfiguration conf) throws IOException { - RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class, - ProtobufRpcEngine.class); - long scmVersion = - RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class); - InetSocketAddress scmBlockAddress = - getScmAddressForBlockClients(conf); - ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient = - new ScmBlockLocationProtocolClientSideTranslatorPB( - RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion, - scmBlockAddress, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))); - return TracingUtil - .createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class, - conf); - } - - /** - * Returns a scm container client. - * - * @return {@link StorageContainerLocationProtocol} - * @throws IOException - */ - private static StorageContainerLocationProtocol getScmContainerClient( - OzoneConfiguration conf) throws IOException { - RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class, - ProtobufRpcEngine.class); - long scmVersion = - RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class); - InetSocketAddress scmAddr = getScmAddressForClients( - conf); - StorageContainerLocationProtocol scmContainerClient = - TracingUtil.createProxy( - new StorageContainerLocationProtocolClientSideTranslatorPB( - RPC.getProxy(StorageContainerLocationProtocolPB.class, - scmVersion, - scmAddr, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))), - StorageContainerLocationProtocol.class, conf); - return scmContainerClient; - } - - /** - * Starts an RPC server, if configured. - * - * @param conf configuration - * @param addr configured address of RPC server - * @param protocol RPC protocol provided by RPC server - * @param instance RPC protocol implementation instance - * @param handlerCount RPC server handler count - * @return RPC server - * @throws IOException if there is an I/O error while creating RPC server - */ - private RPC.Server startRpcServer(OzoneConfiguration conf, - InetSocketAddress addr, Class protocol, BlockingService instance, - int handlerCount) throws IOException { - RPC.Server rpcServer = new RPC.Builder(conf) - .setProtocol(protocol) - .setInstance(instance) - .setBindAddress(addr.getHostString()) - .setPort(addr.getPort()) - .setNumHandlers(handlerCount) - .setVerbose(false) - .setSecretManager(delegationTokenMgr) - .build(); - - DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer); - - if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, - false)) { - rpcServer.refreshServiceAcl(conf, OMPolicyProvider.getInstance()); - } - return rpcServer; - } - - private static boolean isOzoneSecurityEnabled() { - return securityEnabled; - } - - /** - * Constructs OM instance based on the configuration. - * - * @param conf OzoneConfiguration - * @return OM instance - * @throws IOException, AuthenticationException in case OM instance - * creation fails. - */ - public static OzoneManager createOm(OzoneConfiguration conf) - throws IOException, AuthenticationException { - return new OzoneManager(conf); - } - - /** - * Logs in the OM use if security is enabled in the configuration. - * - * @param conf OzoneConfiguration - * @throws IOException, AuthenticationException in case login failes. - */ - private static void loginOMUserIfSecurityEnabled(OzoneConfiguration conf) - throws IOException, AuthenticationException { - securityEnabled = OzoneSecurityUtil.isSecurityEnabled(conf); - if (securityEnabled) { - loginOMUser(conf); - } - } - - /** - * Initializes the OM instance. - * - * @param conf OzoneConfiguration - * @return true if OM initialization succeeds, false otherwise - * @throws IOException in case ozone metadata directory path is not - * accessible - */ - @VisibleForTesting - public static boolean omInit(OzoneConfiguration conf) throws IOException, - AuthenticationException { - OMHANodeDetails.loadOMHAConfig(conf); - loginOMUserIfSecurityEnabled(conf); - OMStorage omStorage = new OMStorage(conf); - StorageState state = omStorage.getState(); - if (state != StorageState.INITIALIZED) { - try { - ScmInfo scmInfo = getScmInfo(conf); - String clusterId = scmInfo.getClusterId(); - String scmId = scmInfo.getScmId(); - if (clusterId == null || clusterId.isEmpty()) { - throw new IOException("Invalid Cluster ID"); - } - if (scmId == null || scmId.isEmpty()) { - throw new IOException("Invalid SCM ID"); - } - omStorage.setClusterId(clusterId); - omStorage.setScmId(scmId); - if (OzoneSecurityUtil.isSecurityEnabled(conf)) { - initializeSecurity(conf, omStorage); - } - omStorage.initialize(); - System.out.println( - "OM initialization succeeded.Current cluster id for sd=" - + omStorage.getStorageDir() + ";cid=" + omStorage - .getClusterID()); - - return true; - } catch (IOException ioe) { - LOG.error("Could not initialize OM version file", ioe); - return false; - } - } else { - if(OzoneSecurityUtil.isSecurityEnabled(conf) && - omStorage.getOmCertSerialId() == null) { - LOG.info("OM storage is already initialized. Initializing security"); - initializeSecurity(conf, omStorage); - omStorage.persistCurrentState(); - } - System.out.println( - "OM already initialized.Reusing existing cluster id for sd=" - + omStorage.getStorageDir() + ";cid=" + omStorage - .getClusterID()); - return true; - } - } - - /** - * Initializes secure OzoneManager. - * */ - @VisibleForTesting - public static void initializeSecurity(OzoneConfiguration conf, - OMStorage omStore) - throws IOException { - LOG.info("Initializing secure OzoneManager."); - - CertificateClient certClient = - new OMCertificateClient(new SecurityConfig(conf), - omStore.getOmCertSerialId()); - CertificateClient.InitResponse response = certClient.init(); - LOG.info("Init response: {}", response); - switch (response) { - case SUCCESS: - LOG.info("Initialization successful."); - break; - case GETCERT: - getSCMSignedCert(certClient, conf, omStore); - LOG.info("Successfully stored SCM signed certificate."); - break; - case FAILURE: - LOG.error("OM security initialization failed."); - throw new RuntimeException("OM security initialization failed."); - case RECOVER: - LOG.error("OM security initialization failed. OM certificate is " + - "missing."); - throw new RuntimeException("OM security initialization failed."); - default: - LOG.error("OM security initialization failed. Init response: {}", - response); - throw new RuntimeException("OM security initialization failed."); - } - } - - private static ScmInfo getScmInfo(OzoneConfiguration conf) - throws IOException { - try { - RetryPolicy retryPolicy = retryUpToMaximumCountWithFixedSleep( - 10, 5, TimeUnit.SECONDS); - RetriableTask retriable = new RetriableTask<>( - retryPolicy, "OM#getScmInfo", - () -> getScmBlockClient(conf).getScmInfo()); - return retriable.call(); - } catch (IOException e) { - throw e; - } catch (Exception e) { - throw new IOException("Failed to get SCM info", e); - } - } - - /** - * Builds a message for logging startup information about an RPC server. - * - * @param description RPC server description - * @param addr RPC server listening address - * @return server startup message - */ - private static String buildRpcServerStartMessage(String description, - InetSocketAddress addr) { - return addr != null ? String.format("%s is listening at %s", - description, addr.toString()) : - String.format("%s not started", description); - } - - @VisibleForTesting - public KeyManager getKeyManager() { - return keyManager; - } - - @VisibleForTesting - public ScmInfo getScmInfo() throws IOException { - return scmBlockClient.getScmInfo(); - } - - @VisibleForTesting - public OMStorage getOmStorage() { - return omStorage; - } - - @VisibleForTesting - public OzoneManagerRatisServer getOmRatisServer() { - return omRatisServer; - } - - @VisibleForTesting - public OzoneManagerSnapshotProvider getOmSnapshotProvider() { - return omSnapshotProvider; - } - - @VisibleForTesting - public InetSocketAddress getOmRpcServerAddr() { - return omRpcAddress; - } - - @VisibleForTesting - public LifeCycle.State getOmRatisServerState() { - if (omRatisServer == null) { - return null; - } else { - return omRatisServer.getServerState(); - } - } - - @VisibleForTesting - public KeyProviderCryptoExtension getKmsProvider() { - return kmsProvider; - } - - public PrefixManager getPrefixManager() { - return prefixManager; - } - - /** - * Get metadata manager. - * - * @return metadata manager. - */ - public OMMetadataManager getMetadataManager() { - return metadataManager; - } - - public OzoneBlockTokenSecretManager getBlockTokenMgr() { - return blockTokenMgr; - } - - public OzoneManagerProtocolServerSideTranslatorPB getOmServerProtocol() { - return omServerProtocol; - } - - public OMMetrics getMetrics() { - return metrics; - } - - /** - * Start service. - */ - public void start() throws IOException { - - omClientProtocolMetrics.register(); - - LOG.info(buildRpcServerStartMessage("OzoneManager RPC server", - omRpcAddress)); - - DefaultMetricsSystem.initialize("OzoneManager"); - - // Start Ratis services - if (omRatisServer != null) { - omRatisServer.start(); - } - if (omRatisClient != null) { - omRatisClient.connect(); - } - - metadataManager.start(configuration); - startSecretManagerIfNecessary(); - - if (certClient != null) { - caCertPem = CertificateCodec.getPEMEncodedString( - certClient.getCACertificate()); - } - // Set metrics and start metrics back ground thread - metrics.setNumVolumes(metadataManager.countRowsInTable(metadataManager - .getVolumeTable())); - metrics.setNumBuckets(metadataManager.countRowsInTable(metadataManager - .getBucketTable())); - - if (getMetricsStorageFile().exists()) { - OmMetricsInfo metricsInfo = READER.readValue(getMetricsStorageFile()); - metrics.setNumKeys(metricsInfo.getNumKeys()); - } - - // Schedule save metrics - long period = configuration.getTimeDuration(OZONE_OM_METRICS_SAVE_INTERVAL, - OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); - scheduleOMMetricsWriteTask = new ScheduleOMMetricsWriteTask(); - metricsTimer = new Timer(); - metricsTimer.schedule(scheduleOMMetricsWriteTask, 0, period); - - keyManager.start(configuration); - omRpcServer.start(); - isOmRpcServerRunning = true; - try { - httpServer = new OzoneManagerHttpServer(configuration, this); - httpServer.start(); - } catch (Exception ex) { - // Allow OM to start as Http Server failure is not fatal. - LOG.error("OM HttpServer failed to start.", ex); - } - registerMXBean(); - setStartTime(); - } - - /** - * Restarts the service. This method re-initializes the rpc server. - */ - public void restart() throws IOException { - LOG.info(buildRpcServerStartMessage("OzoneManager RPC server", - omRpcAddress)); - - HddsUtils.initializeMetrics(configuration, "OzoneManager"); - - instantiateServices(); - - startSecretManagerIfNecessary(); - - // Set metrics and start metrics back ground thread - metrics.setNumVolumes(metadataManager.countRowsInTable(metadataManager - .getVolumeTable())); - metrics.setNumBuckets(metadataManager.countRowsInTable(metadataManager - .getBucketTable())); - - if (getMetricsStorageFile().exists()) { - OmMetricsInfo metricsInfo = READER.readValue(getMetricsStorageFile()); - metrics.setNumKeys(metricsInfo.getNumKeys()); - } - - // Schedule save metrics - long period = configuration.getTimeDuration(OZONE_OM_METRICS_SAVE_INTERVAL, - OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); - scheduleOMMetricsWriteTask = new ScheduleOMMetricsWriteTask(); - metricsTimer = new Timer(); - metricsTimer.schedule(scheduleOMMetricsWriteTask, 0, period); - - omRpcServer = getRpcServer(configuration); - omRpcServer.start(); - isOmRpcServerRunning = true; - - initializeRatisServer(); - if (omRatisServer != null) { - omRatisServer.start(); - } - initializeRatisClient(); - if (omRatisClient != null) { - omRatisClient.connect(); - } - - try { - httpServer = new OzoneManagerHttpServer(configuration, this); - httpServer.start(); - } catch (Exception ex) { - // Allow OM to start as Http Server failure is not fatal. - LOG.error("OM HttpServer failed to start.", ex); - } - registerMXBean(); - - // Start jvm monitor - jvmPauseMonitor = new JvmPauseMonitor(); - jvmPauseMonitor.init(configuration); - jvmPauseMonitor.start(); - setStartTime(); - } - - /** - * Creates a new instance of rpc server. If an earlier instance is already - * running then returns the same. - */ - private RPC.Server getRpcServer(OzoneConfiguration conf) throws IOException { - if (isOmRpcServerRunning) { - return omRpcServer; - } - - InetSocketAddress omNodeRpcAddr = OmUtils.getOmAddress(conf); - - final int handlerCount = conf.getInt(OZONE_OM_HANDLER_COUNT_KEY, - OZONE_OM_HANDLER_COUNT_DEFAULT); - RPC.setProtocolEngine(configuration, OzoneManagerProtocolPB.class, - ProtobufRpcEngine.class); - this.omServerProtocol = new OzoneManagerProtocolServerSideTranslatorPB( - this, omRatisServer, omClientProtocolMetrics, isRatisEnabled); - - BlockingService omService = newReflectiveBlockingService(omServerProtocol); - - return startRpcServer(configuration, omNodeRpcAddr, - OzoneManagerProtocolPB.class, omService, - handlerCount); - } - - /** - * Creates an instance of ratis server. - */ - private void initializeRatisServer() throws IOException { - if (isRatisEnabled) { - if (omRatisServer == null) { - omRatisServer = OzoneManagerRatisServer.newOMRatisServer( - configuration, this, omNodeDetails, peerNodes); - } - LOG.info("OzoneManager Ratis server initialized at port {}", - omRatisServer.getServerPort()); - } else { - omRatisServer = null; - } - } - - /** - * Creates an instance of ratis client. - */ - private void initializeRatisClient() throws IOException { - if (isRatisEnabled) { - if (omRatisClient == null) { - omRatisClient = OzoneManagerRatisClient.newOzoneManagerRatisClient( - omNodeDetails.getOMNodeId(), omRatisServer.getRaftGroup(), - configuration); - } - } else { - omRatisClient = null; - } - } - - public OMRatisSnapshotInfo getSnapshotInfo() { - return omRatisSnapshotInfo; - } - - @VisibleForTesting - public long getRatisSnapshotIndex() { - return omRatisSnapshotInfo.getIndex(); - } - - @Override - public long saveRatisSnapshot() throws IOException { - long snapshotIndex = omRatisServer.getStateMachineLastAppliedIndex(); - - // Flush the OM state to disk - metadataManager.getStore().flush(); - - omRatisSnapshotInfo.saveRatisSnapshotToDisk(snapshotIndex); - - return snapshotIndex; - } - - /** - * Stop service. - */ - public void stop() { - try { - // Cancel the metrics timer and set to null. - if (metricsTimer!= null) { - metricsTimer.cancel(); - metricsTimer = null; - scheduleOMMetricsWriteTask = null; - } - omRpcServer.stop(); - // When ratis is not enabled, we need to call stop() to stop - // OzoneManageDoubleBuffer in OM server protocol. - if (!isRatisEnabled) { - omServerProtocol.stop(); - } - if (omRatisServer != null) { - omRatisServer.stop(); - omRatisServer = null; - } - if (omRatisClient != null) { - omRatisClient.close(); - omRatisClient = null; - } - isOmRpcServerRunning = false; - keyManager.stop(); - stopSecretManager(); - if (httpServer != null) { - httpServer.stop(); - } - metadataManager.stop(); - metrics.unRegister(); - omClientProtocolMetrics.unregister(); - unregisterMXBean(); - if (jvmPauseMonitor != null) { - jvmPauseMonitor.stop(); - } - } catch (Exception e) { - LOG.error("OzoneManager stop failed.", e); - } - } - - /** - * Wait until service has completed shutdown. - */ - public void join() { - try { - omRpcServer.join(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - LOG.info("Interrupted during OzoneManager join.", e); - } - } - - private void startSecretManagerIfNecessary() { - boolean shouldRun = isOzoneSecurityEnabled(); - if (shouldRun) { - boolean running = delegationTokenMgr.isRunning() - && blockTokenMgr.isRunning(); - if(!running){ - startSecretManager(); - } - } - } - - /** - * Get SCM signed certificate and store it using certificate client. - * */ - private static void getSCMSignedCert(CertificateClient client, - OzoneConfiguration config, OMStorage omStore) throws IOException { - CertificateSignRequest.Builder builder = client.getCSRBuilder(); - KeyPair keyPair = new KeyPair(client.getPublicKey(), - client.getPrivateKey()); - InetSocketAddress omRpcAdd; - omRpcAdd = OmUtils.getOmAddress(config); - if (omRpcAdd == null || omRpcAdd.getAddress() == null) { - LOG.error("Incorrect om rpc address. omRpcAdd:{}", omRpcAdd); - throw new RuntimeException("Can't get SCM signed certificate. " + - "omRpcAdd: " + omRpcAdd); - } - // Get host name. - String hostname = omRpcAdd.getAddress().getHostName(); - String ip = omRpcAdd.getAddress().getHostAddress(); - - String subject = UserGroupInformation.getCurrentUser() - .getShortUserName() + "@" + hostname; - - builder.setCA(false) - .setKey(keyPair) - .setConfiguration(config) - .setScmID(omStore.getScmId()) - .setClusterID(omStore.getClusterID()) - .setSubject(subject) - .addIpAddress(ip); - - LOG.info("Creating csr for OM->dns:{},ip:{},scmId:{},clusterId:{}," + - "subject:{}", hostname, ip, - omStore.getScmId(), omStore.getClusterID(), subject); - - HddsProtos.OzoneManagerDetailsProto.Builder omDetailsProtoBuilder = - HddsProtos.OzoneManagerDetailsProto.newBuilder() - .setHostName(omRpcAdd.getHostName()) - .setIpAddress(ip) - .setUuid(omStore.getOmId()) - .addPorts(HddsProtos.Port.newBuilder() - .setName(RPC_PORT) - .setValue(omRpcAdd.getPort()) - .build()); - - PKCS10CertificationRequest csr = builder.build(); - HddsProtos.OzoneManagerDetailsProto omDetailsProto = - omDetailsProtoBuilder.build(); - LOG.info("OzoneManager ports added:{}", omDetailsProto.getPortsList()); - SCMSecurityProtocolClientSideTranslatorPB secureScmClient = - HddsUtils.getScmSecurityClient(config); - - SCMGetCertResponseProto response = secureScmClient. - getOMCertChain(omDetailsProto, getEncodedString(csr)); - String pemEncodedCert = response.getX509Certificate(); - - try { - - - // Store SCM CA certificate. - if(response.hasX509CACertificate()) { - String pemEncodedRootCert = response.getX509CACertificate(); - client.storeCertificate(pemEncodedRootCert, true, true); - client.storeCertificate(pemEncodedCert, true); - // Persist om cert serial id. - omStore.setOmCertSerialId(CertificateCodec. - getX509Certificate(pemEncodedCert).getSerialNumber().toString()); - } else { - throw new RuntimeException("Unable to retrieve OM certificate " + - "chain"); - } - } catch (IOException | CertificateException e) { - LOG.error("Error while storing SCM signed certificate.", e); - throw new RuntimeException(e); - } - - } - - /** - * - * @return true if delegation token operation is allowed - */ - private boolean isAllowedDelegationTokenOp() throws IOException { - AuthenticationMethod authMethod = getConnectionAuthenticationMethod(); - if (UserGroupInformation.isSecurityEnabled() - && (authMethod != AuthenticationMethod.KERBEROS) - && (authMethod != AuthenticationMethod.KERBEROS_SSL) - && (authMethod != AuthenticationMethod.CERTIFICATE)) { - return false; - } - return true; - } - - /** - * Returns authentication method used to establish the connection. - * @return AuthenticationMethod used to establish connection - * @throws IOException - */ - private AuthenticationMethod getConnectionAuthenticationMethod() - throws IOException { - UserGroupInformation ugi = getRemoteUser(); - AuthenticationMethod authMethod = ugi.getAuthenticationMethod(); - if (authMethod == AuthenticationMethod.PROXY) { - authMethod = ugi.getRealUser().getAuthenticationMethod(); - } - return authMethod; - } - - // optimize ugi lookup for RPC operations to avoid a trip through - // UGI.getCurrentUser which is synch'ed - private static UserGroupInformation getRemoteUser() throws IOException { - UserGroupInformation ugi = Server.getRemoteUser(); - return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser(); - } - - /** - * Get delegation token from OzoneManager. - * @param renewer Renewer information - * @return delegationToken DelegationToken signed by OzoneManager - * @throws IOException on error - */ - @Override - public Token getDelegationToken(Text renewer) - throws OMException { - Token token; - try { - if (!isAllowedDelegationTokenOp()) { - throw new OMException("Delegation Token can be issued only with " - + "kerberos or web authentication", - INVALID_AUTH_METHOD); - } - if (delegationTokenMgr == null || !delegationTokenMgr.isRunning()) { - LOG.warn("trying to get DT with no secret manager running in OM."); - return null; - } - - UserGroupInformation ugi = getRemoteUser(); - String user = ugi.getUserName(); - Text owner = new Text(user); - Text realUser = null; - if (ugi.getRealUser() != null) { - realUser = new Text(ugi.getRealUser().getUserName()); - } - - return delegationTokenMgr.createToken(owner, renewer, realUser); - } catch (OMException oex) { - throw oex; - } catch (IOException ex) { - LOG.error("Get Delegation token failed, cause: {}", ex.getMessage()); - throw new OMException("Get Delegation token failed.", ex, - TOKEN_ERROR_OTHER); - } - } - - /** - * Method to renew a delegationToken issued by OzoneManager. - * @param token token to renew - * @return new expiryTime of the token - * @throws InvalidToken if {@code token} is invalid - * @throws IOException on other errors - */ - @Override - public long renewDelegationToken(Token token) - throws OMException { - long expiryTime; - - try { - - if (!isAllowedDelegationTokenOp()) { - throw new OMException("Delegation Token can be renewed only with " - + "kerberos or web authentication", - INVALID_AUTH_METHOD); - } - String renewer = getRemoteUser().getShortUserName(); - expiryTime = delegationTokenMgr.renewToken(token, renewer); - - } catch (OMException oex) { - throw oex; - } catch (IOException ex) { - OzoneTokenIdentifier id = null; - try { - id = OzoneTokenIdentifier.readProtoBuf(token.getIdentifier()); - } catch (IOException exe) { - } - LOG.error("Delegation token renewal failed for dt id: {}, cause: {}", - id, ex.getMessage()); - throw new OMException("Delegation token renewal failed for dt: " + token, - ex, TOKEN_ERROR_OTHER); - } - return expiryTime; - } - - /** - * Cancels a delegation token. - * @param token token to cancel - * @throws IOException on error - */ - @Override - public void cancelDelegationToken(Token token) - throws OMException { - OzoneTokenIdentifier id = null; - try { - String canceller = getRemoteUser().getUserName(); - id = delegationTokenMgr.cancelToken(token, canceller); - LOG.trace("Delegation token cancelled for dt: {}", id); - } catch (OMException oex) { - throw oex; - } catch (IOException ex) { - LOG.error("Delegation token cancellation failed for dt id: {}, cause: {}", - id, ex.getMessage()); - throw new OMException("Delegation token renewal failed for dt: " + token, - ex, TOKEN_ERROR_OTHER); - } - } - /** - * Creates a volume. - * - * @param args - Arguments to create Volume. - * @throws IOException - */ - @Override - public void createVolume(OmVolumeArgs args) throws IOException { - try { - if(isAclEnabled) { - if (!ozAdmins.contains(OZONE_ADMINISTRATORS_WILDCARD) && - !ozAdmins.contains(ProtobufRpcEngine.Server.getRemoteUser() - .getUserName())) { - LOG.error("Only admin users are authorized to create " + - "Ozone volumes. User :{} is not an admin.", - ProtobufRpcEngine.Server.getRemoteUser().getUserName()); - throw new OMException("Only admin users are authorized to create " + - "Ozone volumes.", ResultCodes.PERMISSION_DENIED); - } - } - metrics.incNumVolumeCreates(); - volumeManager.createVolume(args); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.CREATE_VOLUME, - (args == null) ? null : args.toAuditMap())); - metrics.incNumVolumes(); - } catch (Exception ex) { - metrics.incNumVolumeCreateFails(); - AUDIT.logWriteFailure( - buildAuditMessageForFailure(OMAction.CREATE_VOLUME, - (args == null) ? null : args.toAuditMap(), ex) - ); - throw ex; - } - } - - /** - * Checks if current caller has acl permissions. - * - * @param resType - Type of ozone resource. Ex volume, bucket. - * @param store - Store type. i.e Ozone, S3. - * @param acl - type of access to be checked. - * @param vol - name of volume - * @param bucket - bucket name - * @param key - key - * @throws OMException - */ - private void checkAcls(ResourceType resType, StoreType store, - ACLType acl, String vol, String bucket, String key) - throws OMException { - checkAcls(resType, store, acl, vol, bucket, key, - ProtobufRpcEngine.Server.getRemoteUser(), - ProtobufRpcEngine.Server.getRemoteIp()); - } - - /** - * CheckAcls for the ozone object. - * @param resType - * @param storeType - * @param aclType - * @param vol - * @param bucket - * @param key - * @param ugi - * @param remoteAddress - * @throws OMException - */ - @SuppressWarnings("parameternumber") - public void checkAcls(ResourceType resType, StoreType storeType, - ACLType aclType, String vol, String bucket, String key, - UserGroupInformation ugi, InetAddress remoteAddress) - throws OMException { - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setResType(resType) - .setStoreType(storeType) - .setVolumeName(vol) - .setBucketName(bucket) - .setKeyName(key).build(); - RequestContext context = RequestContext.newBuilder() - .setClientUgi(ugi) - .setIp(remoteAddress) - .setAclType(ACLIdentityType.USER) - .setAclRights(aclType) - .build(); - if (!accessAuthorizer.checkAccess(obj, context)) { - LOG.warn("User {} doesn't have {} permission to access {}", - ugi.getUserName(), aclType, resType); - throw new OMException("User " + ugi.getUserName() + " doesn't " + - "have " + aclType + " permission to access " + resType, - ResultCodes.PERMISSION_DENIED); - } - } - - /** - * - * Return true if Ozone acl's are enabled, else false. - * @return boolean - */ - public boolean getAclsEnabled() { - return isAclEnabled; - } - - /** - * Changes the owner of a volume. - * - * @param volume - Name of the volume. - * @param owner - Name of the owner. - * @throws IOException - */ - @Override - public void setOwner(String volume, String owner) throws IOException { - if(isAclEnabled) { - checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.WRITE_ACL, volume, - null, null); - } - Map auditMap = buildAuditMap(volume); - auditMap.put(OzoneConsts.OWNER, owner); - try { - metrics.incNumVolumeUpdates(); - volumeManager.setOwner(volume, owner); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.SET_OWNER, - auditMap)); - } catch (Exception ex) { - metrics.incNumVolumeUpdateFails(); - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.SET_OWNER, - auditMap, ex) - ); - throw ex; - } - } - - /** - * Changes the Quota on a volume. - * - * @param volume - Name of the volume. - * @param quota - Quota in bytes. - * @throws IOException - */ - @Override - public void setQuota(String volume, long quota) throws IOException { - if(isAclEnabled) { - checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.WRITE, volume, - null, null); - } - - Map auditMap = buildAuditMap(volume); - auditMap.put(OzoneConsts.QUOTA, String.valueOf(quota)); - try { - metrics.incNumVolumeUpdates(); - volumeManager.setQuota(volume, quota); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.SET_QUOTA, - auditMap)); - } catch (Exception ex) { - metrics.incNumVolumeUpdateFails(); - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.SET_QUOTA, - auditMap, ex)); - throw ex; - } - } - - /** - * Checks if the specified user can access this volume. - * - * @param volume - volume - * @param userAcl - user acls which needs to be checked for access - * @return true if the user has required access for the volume, false - * otherwise - * @throws IOException - */ - @Override - public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) - throws IOException { - if(isAclEnabled) { - checkAcls(ResourceType.VOLUME, StoreType.OZONE, - ACLType.READ, volume, null, null); - } - boolean auditSuccess = true; - Map auditMap = buildAuditMap(volume); - auditMap.put(OzoneConsts.USER_ACL, - (userAcl == null) ? null : userAcl.getName()); - try { - metrics.incNumVolumeCheckAccesses(); - return volumeManager.checkVolumeAccess(volume, userAcl); - } catch (Exception ex) { - metrics.incNumVolumeCheckAccessFails(); - auditSuccess = false; - AUDIT.logReadFailure(buildAuditMessageForFailure( - OMAction.CHECK_VOLUME_ACCESS, auditMap, ex)); - throw ex; - } finally { - if(auditSuccess){ - AUDIT.logReadSuccess(buildAuditMessageForSuccess( - OMAction.CHECK_VOLUME_ACCESS, auditMap)); - } - } - } - - /** - * Gets the volume information. - * - * @param volume - Volume name. - * @return VolumeArgs or exception is thrown. - * @throws IOException - */ - @Override - public OmVolumeArgs getVolumeInfo(String volume) throws IOException { - if(isAclEnabled) { - checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.READ, volume, - null, null); - } - - boolean auditSuccess = true; - Map auditMap = buildAuditMap(volume); - try { - metrics.incNumVolumeInfos(); - return volumeManager.getVolumeInfo(volume); - } catch (Exception ex) { - metrics.incNumVolumeInfoFails(); - auditSuccess = false; - AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.READ_VOLUME, - auditMap, ex)); - throw ex; - } finally { - if(auditSuccess){ - AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_VOLUME, - auditMap)); - } - } - } - - /** - * Deletes an existing empty volume. - * - * @param volume - Name of the volume. - * @throws IOException - */ - @Override - public void deleteVolume(String volume) throws IOException { - try { - if(isAclEnabled) { - checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.DELETE, volume, - null, null); - } - metrics.incNumVolumeDeletes(); - volumeManager.deleteVolume(volume); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.DELETE_VOLUME, - buildAuditMap(volume))); - metrics.decNumVolumes(); - } catch (Exception ex) { - metrics.incNumVolumeDeleteFails(); - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.DELETE_VOLUME, - buildAuditMap(volume), ex)); - throw ex; - } - } - - /** - * Lists volume owned by a specific user. - * - * @param userName - user name - * @param prefix - Filter prefix -- Return only entries that match this. - * @param prevKey - Previous key -- List starts from the next from the - * prevkey - * @param maxKeys - Max number of keys to return. - * @return List of Volumes. - * @throws IOException - */ - @Override - public List listVolumeByUser(String userName, String prefix, - String prevKey, int maxKeys) throws IOException { - if(isAclEnabled) { - UserGroupInformation remoteUserUgi = ProtobufRpcEngine.Server. - getRemoteUser(); - if (remoteUserUgi == null) { - LOG.error("Rpc user UGI is null. Authorization failed."); - throw new OMException("Rpc user UGI is null. Authorization " + - "failed.", ResultCodes.PERMISSION_DENIED); - } - } - boolean auditSuccess = true; - Map auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.PREV_KEY, prevKey); - auditMap.put(OzoneConsts.PREFIX, prefix); - auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys)); - auditMap.put(OzoneConsts.USERNAME, userName); - try { - metrics.incNumVolumeLists(); - return volumeManager.listVolumes(userName, prefix, prevKey, maxKeys); - } catch (Exception ex) { - metrics.incNumVolumeListFails(); - auditSuccess = false; - AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_VOLUMES, - auditMap, ex)); - throw ex; - } finally { - if(auditSuccess){ - AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_VOLUMES, - auditMap)); - } - } - } - - /** - * Lists volume all volumes in the cluster. - * - * @param prefix - Filter prefix -- Return only entries that match this. - * @param prevKey - Previous key -- List starts from the next from the - * prevkey - * @param maxKeys - Max number of keys to return. - * @return List of Volumes. - * @throws IOException - */ - @Override - public List listAllVolumes(String prefix, String prevKey, int - maxKeys) throws IOException { - if(isAclEnabled) { - if (!ozAdmins.contains(ProtobufRpcEngine.Server. - getRemoteUser().getUserName()) - && !ozAdmins.contains(OZONE_ADMINISTRATORS_WILDCARD)) { - LOG.error("Only admin users are authorized to create " + - "Ozone volumes."); - throw new OMException("Only admin users are authorized to create " + - "Ozone volumes.", ResultCodes.PERMISSION_DENIED); - } - } - boolean auditSuccess = true; - Map auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.PREV_KEY, prevKey); - auditMap.put(OzoneConsts.PREFIX, prefix); - auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys)); - auditMap.put(OzoneConsts.USERNAME, null); - try { - metrics.incNumVolumeLists(); - return volumeManager.listVolumes(null, prefix, prevKey, maxKeys); - } catch (Exception ex) { - metrics.incNumVolumeListFails(); - auditSuccess = false; - AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_VOLUMES, - auditMap, ex)); - throw ex; - } finally { - if(auditSuccess){ - AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_VOLUMES, - auditMap)); - } - } - } - - /** - * Creates a bucket. - * - * @param bucketInfo - BucketInfo to create bucket. - * @throws IOException - */ - @Override - public void createBucket(OmBucketInfo bucketInfo) throws IOException { - try { - if(isAclEnabled) { - checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.CREATE, - bucketInfo.getVolumeName(), bucketInfo.getBucketName(), null); - } - metrics.incNumBucketCreates(); - bucketManager.createBucket(bucketInfo); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.CREATE_BUCKET, - (bucketInfo == null) ? null : bucketInfo.toAuditMap())); - metrics.incNumBuckets(); - } catch (Exception ex) { - metrics.incNumBucketCreateFails(); - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.CREATE_BUCKET, - (bucketInfo == null) ? null : bucketInfo.toAuditMap(), ex)); - throw ex; - } - } - - /** - * {@inheritDoc} - */ - @Override - public List listBuckets(String volumeName, - String startKey, String prefix, int maxNumOfBuckets) - throws IOException { - if(isAclEnabled) { - checkAcls(ResourceType.VOLUME, StoreType.OZONE, ACLType.LIST, volumeName, - null, null); - } - boolean auditSuccess = true; - Map auditMap = buildAuditMap(volumeName); - auditMap.put(OzoneConsts.START_KEY, startKey); - auditMap.put(OzoneConsts.PREFIX, prefix); - auditMap.put(OzoneConsts.MAX_NUM_OF_BUCKETS, - String.valueOf(maxNumOfBuckets)); - try { - metrics.incNumBucketLists(); - return bucketManager.listBuckets(volumeName, - startKey, prefix, maxNumOfBuckets); - } catch (IOException ex) { - metrics.incNumBucketListFails(); - auditSuccess = false; - AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_BUCKETS, - auditMap, ex)); - throw ex; - } finally { - if(auditSuccess){ - AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_BUCKETS, - auditMap)); - } - } - } - - /** - * Gets the bucket information. - * - * @param volume - Volume name. - * @param bucket - Bucket name. - * @return OmBucketInfo or exception is thrown. - * @throws IOException - */ - @Override - public OmBucketInfo getBucketInfo(String volume, String bucket) - throws IOException { - if(isAclEnabled) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.READ, volume, - bucket, null); - } - boolean auditSuccess = true; - Map auditMap = buildAuditMap(volume); - auditMap.put(OzoneConsts.BUCKET, bucket); - try { - metrics.incNumBucketInfos(); - return bucketManager.getBucketInfo(volume, bucket); - } catch (Exception ex) { - metrics.incNumBucketInfoFails(); - auditSuccess = false; - AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.READ_BUCKET, - auditMap, ex)); - throw ex; - } finally { - if(auditSuccess){ - AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_BUCKET, - auditMap)); - } - } - } - - /** - * Allocate a key. - * - * @param args - attributes of the key. - * @return OmKeyInfo - the info about the allocated key. - * @throws IOException - */ - @Override - public OpenKeySession openKey(OmKeyArgs args) throws IOException { - if(isAclEnabled) { - try { - checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } catch (OMException ex) { - // For new keys key checkAccess call will fail as key doesn't exist. - // Check user access for bucket. - if (ex.getResult().equals(KEY_NOT_FOUND)) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } else { - throw ex; - } - } - } - boolean auditSuccess = true; - try { - metrics.incNumKeyAllocates(); - return keyManager.openKey(args); - } catch (Exception ex) { - metrics.incNumKeyAllocateFails(); - auditSuccess = false; - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.ALLOCATE_KEY, - (args == null) ? null : args.toAuditMap(), ex)); - throw ex; - } finally { - if(auditSuccess){ - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.ALLOCATE_KEY, (args == null) ? null : args.toAuditMap())); - } - } - } - - private Map toAuditMap(KeyArgs omKeyArgs) { - Map auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.VOLUME, omKeyArgs.getVolumeName()); - auditMap.put(OzoneConsts.BUCKET, omKeyArgs.getBucketName()); - auditMap.put(OzoneConsts.KEY, omKeyArgs.getKeyName()); - auditMap.put(OzoneConsts.DATA_SIZE, - String.valueOf(omKeyArgs.getDataSize())); - auditMap.put(OzoneConsts.REPLICATION_TYPE, - omKeyArgs.hasType() ? omKeyArgs.getType().name() : null); - auditMap.put(OzoneConsts.REPLICATION_FACTOR, - omKeyArgs.hasFactor() ? omKeyArgs.getFactor().name() : null); - auditMap.put(OzoneConsts.KEY_LOCATION_INFO, - (omKeyArgs.getKeyLocationsList() != null) ? - omKeyArgs.getKeyLocationsList().toString() : null); - return auditMap; - } - - @Override - public void commitKey(OmKeyArgs args, long clientID) - throws IOException { - if(isAclEnabled) { - try { - checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } catch (OMException ex) { - // For new keys key checkAccess call will fail as key doesn't exist. - // Check user access for bucket. - if (ex.getResult().equals(KEY_NOT_FOUND)) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } else { - throw ex; - } - } - } - Map auditMap = (args == null) ? new LinkedHashMap<>() : - args.toAuditMap(); - auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID)); - try { - metrics.incNumKeyCommits(); - keyManager.commitKey(args, clientID); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.COMMIT_KEY, - auditMap)); - // As when we commit the key it is visible, so we should increment here. - // As key also can have multiple versions, we need to increment keys - // only if version is 0. Currently we have not complete support of - // versioning of keys. So, this can be revisited later. - if (args != null && args.getLocationInfoList() != null && - args.getLocationInfoList().size() > 0 && - args.getLocationInfoList().get(0) != null && - args.getLocationInfoList().get(0).getCreateVersion() == 0) { - metrics.incNumKeys(); - } - } catch (Exception ex) { - metrics.incNumKeyCommitFails(); - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.COMMIT_KEY, - auditMap, ex)); - throw ex; - } - } - - @Override - public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, - ExcludeList excludeList) throws IOException { - if(isAclEnabled) { - try { - checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } catch (OMException ex) { - // For new keys key checkAccess call will fail as key doesn't exist. - // Check user access for bucket. - if (ex.getResult().equals(KEY_NOT_FOUND)) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } else { - throw ex; - } - } - } - boolean auditSuccess = true; - Map auditMap = (args == null) ? new LinkedHashMap<>() : - args.toAuditMap(); - auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID)); - try { - metrics.incNumBlockAllocateCalls(); - return keyManager.allocateBlock(args, clientID, excludeList); - } catch (Exception ex) { - metrics.incNumBlockAllocateCallFails(); - auditSuccess = false; - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.ALLOCATE_BLOCK, - auditMap, ex)); - throw ex; - } finally { - if(auditSuccess){ - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.ALLOCATE_BLOCK, auditMap)); - } - } - } - - /** - * Lookup a key. - * - * @param args - attributes of the key. - * @return OmKeyInfo - the info about the requested key. - * @throws IOException - */ - @Override - public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { - if(isAclEnabled) { - checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } - boolean auditSuccess = true; - try { - metrics.incNumKeyLookups(); - return keyManager.lookupKey(args, getClientAddress()); - } catch (Exception ex) { - metrics.incNumKeyLookupFails(); - auditSuccess = false; - AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.READ_KEY, - (args == null) ? null : args.toAuditMap(), ex)); - throw ex; - } finally { - if(auditSuccess){ - AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_KEY, - (args == null) ? null : args.toAuditMap())); - } - } - } - - @Override - public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { - if(isAclEnabled) { - checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } - Map auditMap = (args == null) ? new LinkedHashMap<>() : - args.toAuditMap(); - auditMap.put(OzoneConsts.TO_KEY_NAME, toKeyName); - try { - metrics.incNumKeyRenames(); - keyManager.renameKey(args, toKeyName); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.RENAME_KEY, - auditMap)); - } catch (IOException e) { - metrics.incNumKeyRenameFails(); - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.RENAME_KEY, - auditMap, e)); - throw e; - } - } - - /** - * Deletes an existing key. - * - * @param args - attributes of the key. - * @throws IOException - */ - @Override - public void deleteKey(OmKeyArgs args) throws IOException { - try { - if(isAclEnabled) { - checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.DELETE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } - metrics.incNumKeyDeletes(); - keyManager.deleteKey(args); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.DELETE_KEY, - (args == null) ? null : args.toAuditMap())); - metrics.decNumKeys(); - } catch (Exception ex) { - metrics.incNumKeyDeleteFails(); - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.DELETE_KEY, - (args == null) ? null : args.toAuditMap(), ex)); - throw ex; - } - } - - @Override - public List listKeys(String volumeName, String bucketName, - String startKey, String keyPrefix, int maxKeys) throws IOException { - if(isAclEnabled) { - checkAcls(ResourceType.BUCKET, - StoreType.OZONE, ACLType.LIST, volumeName, bucketName, keyPrefix); - } - boolean auditSuccess = true; - Map auditMap = buildAuditMap(volumeName); - auditMap.put(OzoneConsts.BUCKET, bucketName); - auditMap.put(OzoneConsts.START_KEY, startKey); - auditMap.put(OzoneConsts.MAX_KEYS, String.valueOf(maxKeys)); - auditMap.put(OzoneConsts.KEY_PREFIX, keyPrefix); - try { - metrics.incNumKeyLists(); - return keyManager.listKeys(volumeName, bucketName, - startKey, keyPrefix, maxKeys); - } catch (IOException ex) { - metrics.incNumKeyListFails(); - auditSuccess = false; - AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_KEYS, - auditMap, ex)); - throw ex; - } finally { - if(auditSuccess){ - AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.LIST_KEYS, - auditMap)); - } - } - } - - /** - * Sets bucket property from args. - * - * @param args - BucketArgs. - * @throws IOException - */ - @Override - public void setBucketProperty(OmBucketArgs args) - throws IOException { - if(isAclEnabled) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), null); - } - try { - metrics.incNumBucketUpdates(); - bucketManager.setBucketProperty(args); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.UPDATE_BUCKET, - (args == null) ? null : args.toAuditMap())); - } catch (Exception ex) { - metrics.incNumBucketUpdateFails(); - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.UPDATE_BUCKET, - (args == null) ? null : args.toAuditMap(), ex)); - throw ex; - } - } - - /** - * Deletes an existing empty bucket from volume. - * - * @param volume - Name of the volume. - * @param bucket - Name of the bucket. - * @throws IOException - */ - @Override - public void deleteBucket(String volume, String bucket) throws IOException { - if (isAclEnabled) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, volume, - bucket, null); - } - Map auditMap = buildAuditMap(volume); - auditMap.put(OzoneConsts.BUCKET, bucket); - try { - metrics.incNumBucketDeletes(); - bucketManager.deleteBucket(volume, bucket); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.DELETE_BUCKET, - auditMap)); - metrics.decNumBuckets(); - } catch (Exception ex) { - metrics.incNumBucketDeleteFails(); - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.DELETE_BUCKET, - auditMap, ex)); - throw ex; - } - } - - private Map buildAuditMap(String volume){ - Map auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.VOLUME, volume); - return auditMap; - } - - public AuditLogger getAuditLogger() { - return AUDIT; - } - - @Override - public AuditMessage buildAuditMessageForSuccess(AuditAction op, - Map auditMap) { - return new AuditMessage.Builder() - .setUser((Server.getRemoteUser() == null) ? null : - Server.getRemoteUser().getUserName()) - .atIp((Server.getRemoteIp() == null) ? null : - Server.getRemoteIp().getHostAddress()) - .forOperation(op.getAction()) - .withParams(auditMap) - .withResult(AuditEventStatus.SUCCESS.toString()) - .withException(null) - .build(); - } - - @Override - public AuditMessage buildAuditMessageForFailure(AuditAction op, - Map auditMap, Throwable throwable) { - return new AuditMessage.Builder() - .setUser((Server.getRemoteUser() == null) ? null : - Server.getRemoteUser().getUserName()) - .atIp((Server.getRemoteIp() == null) ? null : - Server.getRemoteIp().getHostAddress()) - .forOperation(op.getAction()) - .withParams(auditMap) - .withResult(AuditEventStatus.FAILURE.toString()) - .withException(throwable) - .build(); - } - - private void registerMXBean() { - Map jmxProperties = new HashMap<>(); - jmxProperties.put("component", "ServerRuntime"); - this.omInfoBeanName = HddsUtils.registerWithJmxProperties( - "OzoneManager", "OzoneManagerInfo", jmxProperties, this); - } - - private void unregisterMXBean() { - if (this.omInfoBeanName != null) { - MBeans.unregister(this.omInfoBeanName); - this.omInfoBeanName = null; - } - } - - private static String getClientAddress() { - String clientMachine = Server.getRemoteAddress(); - if (clientMachine == null) { //not a RPC client - clientMachine = ""; - } - return clientMachine; - } - - @Override - public String getRpcPort() { - return "" + omRpcAddress.getPort(); - } - - @VisibleForTesting - public OzoneManagerHttpServer getHttpServer() { - return httpServer; - } - - @Override - public List getServiceList() throws IOException { - // When we implement multi-home this call has to be handled properly. - List services = new ArrayList<>(); - ServiceInfo.Builder omServiceInfoBuilder = ServiceInfo.newBuilder() - .setNodeType(HddsProtos.NodeType.OM) - .setHostname(omRpcAddress.getHostName()) - .addServicePort(ServicePort.newBuilder() - .setType(ServicePort.Type.RPC) - .setValue(omRpcAddress.getPort()) - .build()); - if (httpServer.getHttpAddress() != null) { - omServiceInfoBuilder.addServicePort(ServicePort.newBuilder() - .setType(ServicePort.Type.HTTP) - .setValue(httpServer.getHttpAddress().getPort()) - .build()); - } - if (httpServer.getHttpsAddress() != null) { - omServiceInfoBuilder.addServicePort(ServicePort.newBuilder() - .setType(ServicePort.Type.HTTPS) - .setValue(httpServer.getHttpsAddress().getPort()) - .build()); - } - services.add(omServiceInfoBuilder.build()); - - // For client we have to return SCM with container protocol port, - // not block protocol. - InetSocketAddress scmAddr = getScmAddressForClients( - configuration); - ServiceInfo.Builder scmServiceInfoBuilder = ServiceInfo.newBuilder() - .setNodeType(HddsProtos.NodeType.SCM) - .setHostname(scmAddr.getHostName()) - .addServicePort(ServicePort.newBuilder() - .setType(ServicePort.Type.RPC) - .setValue(scmAddr.getPort()).build()); - services.add(scmServiceInfoBuilder.build()); - - List nodes = scmContainerClient.queryNode(HEALTHY, - HddsProtos.QueryScope.CLUSTER, ""); - - for (HddsProtos.Node node : nodes) { - HddsProtos.DatanodeDetailsProto datanode = node.getNodeID(); - - ServiceInfo.Builder dnServiceInfoBuilder = ServiceInfo.newBuilder() - .setNodeType(HddsProtos.NodeType.DATANODE) - .setHostname(datanode.getHostName()); - - if(DatanodeDetails.getFromProtoBuf(datanode) - .getPort(DatanodeDetails.Port.Name.REST) != null) { - dnServiceInfoBuilder.addServicePort(ServicePort.newBuilder() - .setType(ServicePort.Type.HTTP) - .setValue(DatanodeDetails.getFromProtoBuf(datanode) - .getPort(DatanodeDetails.Port.Name.REST).getValue()) - .build()); - } - - services.add(dnServiceInfoBuilder.build()); - } - - metrics.incNumGetServiceLists(); - // For now there is no exception that can can happen in this call, - // so failure metrics is not handled. In future if there is any need to - // handle exception in this method, we need to incorporate - // metrics.incNumGetServiceListFails() - return services; - } - - @Override - public ServiceInfoEx getServiceInfo() throws IOException { - return new ServiceInfoEx(getServiceList(), caCertPem); - } - - @Override - /** - * {@inheritDoc} - */ - public void createS3Bucket(String userName, String s3BucketName) - throws IOException { - - boolean acquiredS3Lock = false; - boolean acquiredVolumeLock = false; - try { - metrics.incNumBucketCreates(); - acquiredS3Lock = metadataManager.getLock().acquireLock(S3_BUCKET_LOCK, - s3BucketName); - try { - acquiredVolumeLock = metadataManager.getLock().acquireLock(VOLUME_LOCK, - s3BucketManager.formatOzoneVolumeName(userName)); - boolean newVolumeCreate = s3BucketManager.createOzoneVolumeIfNeeded( - userName); - if (newVolumeCreate) { - metrics.incNumVolumeCreates(); - metrics.incNumVolumes(); - } - } catch (IOException ex) { - // We need to increment volume creates also because this is first - // time we are trying to create a volume, it failed. As we increment - // ops and create when we try to do that operation. - metrics.incNumVolumeCreates(); - metrics.incNumVolumeCreateFails(); - throw ex; - } - s3BucketManager.createS3Bucket(userName, s3BucketName); - metrics.incNumBuckets(); - } catch (IOException ex) { - metrics.incNumBucketCreateFails(); - throw ex; - } finally { - if (acquiredVolumeLock) { - metadataManager.getLock().releaseLock(VOLUME_LOCK, - s3BucketManager.formatOzoneVolumeName(userName)); - } - if (acquiredS3Lock) { - metadataManager.getLock().releaseLock(S3_BUCKET_LOCK, s3BucketName); - } - } - } - - @Override - /** - * {@inheritDoc} - */ - public void deleteS3Bucket(String s3BucketName) throws IOException { - try { - if(isAclEnabled) { - checkAcls(ResourceType.BUCKET, StoreType.S3, ACLType.DELETE, - getS3VolumeName(), s3BucketName, null); - } - metrics.incNumBucketDeletes(); - s3BucketManager.deleteS3Bucket(s3BucketName); - metrics.decNumBuckets(); - } catch (IOException ex) { - metrics.incNumBucketDeleteFails(); - } - } - - @Override - /** - * {@inheritDoc} - */ - public S3SecretValue getS3Secret(String kerberosID) throws IOException{ - UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser(); - - // Check whether user name passed is matching with the current user or not. - if (!user.getUserName().equals(kerberosID)) { - throw new OMException("User mismatch. Requested user name is " + - "mismatched " + kerberosID +", with current user " + - user.getUserName(), OMException.ResultCodes.USER_MISMATCH); - } - return s3SecretManager.getS3Secret(kerberosID); - } - - @Override - /** - * {@inheritDoc} - */ - public String getOzoneBucketMapping(String s3BucketName) - throws IOException { - if(isAclEnabled) { - checkAcls(ResourceType.BUCKET, StoreType.S3, ACLType.READ, - getS3VolumeName(), s3BucketName, null); - } - return s3BucketManager.getOzoneBucketMapping(s3BucketName); - } - - /** - * Helper function to return volume name for S3 users. - * */ - private String getS3VolumeName() { - return s3BucketManager.formatOzoneVolumeName(DigestUtils.md5Hex( - ProtobufRpcEngine.Server.getRemoteUser().getUserName().toLowerCase())); - } - - @Override - public List listS3Buckets(String userName, String startKey, - String prefix, int maxNumOfBuckets) - throws IOException { - if(isAclEnabled) { - checkAcls(ResourceType.VOLUME, StoreType.S3, ACLType.LIST, - s3BucketManager.getOzoneVolumeNameForUser(userName), null, null); - } - boolean auditSuccess = true; - Map auditMap = buildAuditMap(userName); - auditMap.put(OzoneConsts.START_KEY, startKey); - auditMap.put(OzoneConsts.PREFIX, prefix); - auditMap.put(OzoneConsts.MAX_NUM_OF_BUCKETS, - String.valueOf(maxNumOfBuckets)); - try { - metrics.incNumListS3Buckets(); - String volumeName = s3BucketManager.getOzoneVolumeNameForUser(userName); - return bucketManager.listBuckets(volumeName, startKey, prefix, - maxNumOfBuckets); - } catch (IOException ex) { - metrics.incNumListS3BucketsFails(); - auditSuccess = false; - AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.LIST_S3BUCKETS, - auditMap, ex)); - throw ex; - } finally { - if(auditSuccess){ - AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction - .LIST_S3BUCKETS, auditMap)); - } - } - } - @Override - public OmMultipartInfo initiateMultipartUpload(OmKeyArgs keyArgs) throws - IOException { - OmMultipartInfo multipartInfo; - metrics.incNumInitiateMultipartUploads(); - try { - multipartInfo = keyManager.initiateMultipartUpload(keyArgs); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.INITIATE_MULTIPART_UPLOAD, (keyArgs == null) ? null : - keyArgs.toAuditMap())); - } catch (IOException ex) { - AUDIT.logWriteFailure(buildAuditMessageForFailure( - OMAction.INITIATE_MULTIPART_UPLOAD, - (keyArgs == null) ? null : keyArgs.toAuditMap(), ex)); - metrics.incNumInitiateMultipartUploadFails(); - throw ex; - } - return multipartInfo; - } - - @Override - public OmMultipartCommitUploadPartInfo commitMultipartUploadPart( - OmKeyArgs keyArgs, long clientID) throws IOException { - boolean auditSuccess = false; - OmMultipartCommitUploadPartInfo commitUploadPartInfo; - metrics.incNumCommitMultipartUploadParts(); - try { - commitUploadPartInfo = keyManager.commitMultipartUploadPart(keyArgs, - clientID); - auditSuccess = true; - } catch (IOException ex) { - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction - .INITIATE_MULTIPART_UPLOAD, (keyArgs == null) ? null : keyArgs - .toAuditMap(), ex)); - metrics.incNumCommitMultipartUploadPartFails(); - throw ex; - } finally { - if(auditSuccess) { - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY, (keyArgs == null) ? null : - keyArgs.toAuditMap())); - } - } - return commitUploadPartInfo; - } - - @Override - public OmMultipartUploadCompleteInfo completeMultipartUpload( - OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList) - throws IOException { - OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo; - metrics.incNumCompleteMultipartUploads(); - - Map auditMap = (omKeyArgs == null) ? new LinkedHashMap<>() : - omKeyArgs.toAuditMap(); - auditMap.put(OzoneConsts.MULTIPART_LIST, multipartUploadList - .getMultipartMap().toString()); - try { - omMultipartUploadCompleteInfo = keyManager.completeMultipartUpload( - omKeyArgs, multipartUploadList); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction - .COMPLETE_MULTIPART_UPLOAD, auditMap)); - return omMultipartUploadCompleteInfo; - } catch (IOException ex) { - metrics.incNumCompleteMultipartUploadFails(); - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction - .COMPLETE_MULTIPART_UPLOAD, auditMap, ex)); - throw ex; - } - } - - @Override - public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException { - - Map auditMap = (omKeyArgs == null) ? new LinkedHashMap<>() : - omKeyArgs.toAuditMap(); - metrics.incNumAbortMultipartUploads(); - try { - keyManager.abortMultipartUpload(omKeyArgs); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction - .COMPLETE_MULTIPART_UPLOAD, auditMap)); - } catch (IOException ex) { - metrics.incNumAbortMultipartUploadFails(); - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction - .COMPLETE_MULTIPART_UPLOAD, auditMap, ex)); - throw ex; - } - - } - - @Override - public OmMultipartUploadListParts listParts(String volumeName, - String bucketName, String keyName, String uploadID, int partNumberMarker, - int maxParts) throws IOException { - Map auditMap = new HashMap<>(); - auditMap.put(OzoneConsts.VOLUME, volumeName); - auditMap.put(OzoneConsts.BUCKET, bucketName); - auditMap.put(OzoneConsts.KEY, keyName); - auditMap.put(OzoneConsts.UPLOAD_ID, uploadID); - auditMap.put(OzoneConsts.PART_NUMBER_MARKER, - Integer.toString(partNumberMarker)); - auditMap.put(OzoneConsts.MAX_PARTS, Integer.toString(maxParts)); - metrics.incNumListMultipartUploadParts(); - try { - OmMultipartUploadListParts omMultipartUploadListParts = - keyManager.listParts(volumeName, bucketName, keyName, uploadID, - partNumberMarker, maxParts); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction - .LIST_MULTIPART_UPLOAD_PARTS, auditMap)); - return omMultipartUploadListParts; - } catch (IOException ex) { - metrics.incNumListMultipartUploadPartFails(); - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction - .LIST_MULTIPART_UPLOAD_PARTS, auditMap, ex)); - throw ex; - } - } - - @Override - public OmMultipartUploadList listMultipartUploads(String volumeName, - String bucketName, String prefix) throws IOException { - - Map auditMap = new HashMap<>(); - auditMap.put(OzoneConsts.VOLUME, volumeName); - auditMap.put(OzoneConsts.BUCKET, bucketName); - auditMap.put(OzoneConsts.PREFIX, prefix); - - metrics.incNumListMultipartUploads(); - try { - OmMultipartUploadList omMultipartUploadList = - keyManager.listMultipartUploads(volumeName, bucketName, prefix); - AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction - .LIST_MULTIPART_UPLOADS, auditMap)); - return omMultipartUploadList; - - } catch (IOException ex) { - metrics.incNumListMultipartUploadFails(); - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction - .LIST_MULTIPART_UPLOADS, auditMap, ex)); - throw ex; - } - - - } - - @Override - public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { - if (isAclEnabled) { - checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } - boolean auditSuccess = true; - try { - metrics.incNumGetFileStatus(); - return keyManager.getFileStatus(args); - } catch (IOException ex) { - metrics.incNumGetFileStatusFails(); - auditSuccess = false; - AUDIT.logWriteFailure( - buildAuditMessageForFailure(OMAction.GET_FILE_STATUS, - (args == null) ? null : args.toAuditMap(), ex)); - throw ex; - } finally { - if (auditSuccess) { - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(OMAction.GET_FILE_STATUS, - (args == null) ? null : args.toAuditMap())); - } - } - } - - private ResourceType getResourceType(OmKeyArgs args) { - if (args.getKeyName() == null || args.getKeyName().length() == 0) { - return ResourceType.BUCKET; - } - return ResourceType.KEY; - } - - @Override - public void createDirectory(OmKeyArgs args) throws IOException { - if (isAclEnabled) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } - boolean auditSuccess = true; - try { - metrics.incNumCreateDirectory(); - keyManager.createDirectory(args); - } catch (IOException ex) { - metrics.incNumCreateDirectoryFails(); - auditSuccess = false; - AUDIT.logWriteFailure( - buildAuditMessageForFailure(OMAction.CREATE_DIRECTORY, - (args == null) ? null : args.toAuditMap(), ex)); - throw ex; - } finally { - if (auditSuccess) { - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(OMAction.CREATE_DIRECTORY, - (args == null) ? null : args.toAuditMap())); - } - } - } - - @Override - public OpenKeySession createFile(OmKeyArgs args, boolean overWrite, - boolean recursive) throws IOException { - if (isAclEnabled) { - checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.WRITE, - args.getVolumeName(), args.getBucketName(), null); - } - boolean auditSuccess = true; - try { - metrics.incNumCreateFile(); - return keyManager.createFile(args, overWrite, recursive); - } catch (Exception ex) { - metrics.incNumCreateFileFails(); - auditSuccess = false; - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.CREATE_FILE, - (args == null) ? null : args.toAuditMap(), ex)); - throw ex; - } finally { - if(auditSuccess){ - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.CREATE_FILE, (args == null) ? null : args.toAuditMap())); - } - } - } - - @Override - public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { - if(isAclEnabled) { - checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } - boolean auditSuccess = true; - try { - metrics.incNumLookupFile(); - return keyManager.lookupFile(args, getClientAddress()); - } catch (Exception ex) { - metrics.incNumLookupFileFails(); - auditSuccess = false; - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.LOOKUP_FILE, - (args == null) ? null : args.toAuditMap(), ex)); - throw ex; - } finally { - if(auditSuccess){ - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.LOOKUP_FILE, (args == null) ? null : args.toAuditMap())); - } - } - } - - @Override - public List listStatus(OmKeyArgs args, boolean recursive, - String startKey, long numEntries) throws IOException { - if(isAclEnabled) { - checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ, - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - } - boolean auditSuccess = true; - try { - metrics.incNumListStatus(); - return keyManager.listStatus(args, recursive, startKey, numEntries); - } catch (Exception ex) { - metrics.incNumListStatusFails(); - auditSuccess = false; - AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.LIST_STATUS, - (args == null) ? null : args.toAuditMap(), ex)); - throw ex; - } finally { - if(auditSuccess){ - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - OMAction.LIST_STATUS, (args == null) ? null : args.toAuditMap())); - } - } - } - - private void auditAcl(OzoneObj ozoneObj, List ozoneAcl, - OMAction omAction, Exception ex) { - Map auditMap = ozoneObj.toAuditMap(); - if(ozoneAcl != null) { - auditMap.put(OzoneConsts.ACL, ozoneAcl.toString()); - } - - if(ex == null) { - AUDIT.logWriteSuccess( - buildAuditMessageForSuccess(omAction, auditMap)); - } else { - AUDIT.logWriteFailure( - buildAuditMessageForFailure(omAction, auditMap, ex)); - } - } - - /** - * Add acl for Ozone object. Return true if acl is added successfully else - * false. - * - * @param obj Ozone object for which acl should be added. - * @param acl ozone acl top be added. - * @throws IOException if there is error. - */ - @Override - public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - boolean auditSuccess = true; - - try{ - if(isAclEnabled) { - checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.WRITE_ACL, - obj.getVolumeName(), obj.getBucketName(), obj.getKeyName()); - } - switch (obj.getResourceType()) { - case VOLUME: - return volumeManager.addAcl(obj, acl); - case BUCKET: - return bucketManager.addAcl(obj, acl); - case KEY: - return keyManager.addAcl(obj, acl); - case PREFIX: - return prefixManager.addAcl(obj, acl); - default: - throw new OMException("Unexpected resource type: " + - obj.getResourceType(), INVALID_REQUEST); - } - } catch(Exception ex) { - auditSuccess = false; - auditAcl(obj, Arrays.asList(acl), OMAction.ADD_ACL, ex); - throw ex; - } finally { - if(auditSuccess){ - auditAcl(obj, Arrays.asList(acl), OMAction.ADD_ACL, null); - } - } - } - - /** - * Remove acl for Ozone object. Return true if acl is removed successfully - * else false. - * - * @param obj Ozone object. - * @param acl Ozone acl to be removed. - * @throws IOException if there is error. - */ - @Override - public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - boolean auditSuccess = true; - - try{ - if(isAclEnabled) { - checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.WRITE_ACL, - obj.getVolumeName(), obj.getBucketName(), obj.getKeyName()); - } - switch (obj.getResourceType()) { - case VOLUME: - return volumeManager.removeAcl(obj, acl); - case BUCKET: - return bucketManager.removeAcl(obj, acl); - case KEY: - return keyManager.removeAcl(obj, acl); - case PREFIX: - return prefixManager.removeAcl(obj, acl); - - default: - throw new OMException("Unexpected resource type: " + - obj.getResourceType(), INVALID_REQUEST); - } - } catch(Exception ex) { - auditSuccess = false; - auditAcl(obj, Arrays.asList(acl), OMAction.REMOVE_ACL, ex); - throw ex; - } finally { - if(auditSuccess){ - auditAcl(obj, Arrays.asList(acl), OMAction.REMOVE_ACL, null); - } - } - } - - /** - * Acls to be set for given Ozone object. This operations reset ACL for given - * object to list of ACLs provided in argument. - * - * @param obj Ozone object. - * @param acls List of acls. - * @throws IOException if there is error. - */ - @Override - public boolean setAcl(OzoneObj obj, List acls) throws IOException { - boolean auditSuccess = true; - - try{ - if(isAclEnabled) { - checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.WRITE_ACL, - obj.getVolumeName(), obj.getBucketName(), obj.getKeyName()); - } - switch (obj.getResourceType()) { - case VOLUME: - return volumeManager.setAcl(obj, acls); - case BUCKET: - return bucketManager.setAcl(obj, acls); - case KEY: - return keyManager.setAcl(obj, acls); - case PREFIX: - return prefixManager.setAcl(obj, acls); - default: - throw new OMException("Unexpected resource type: " + - obj.getResourceType(), INVALID_REQUEST); - } - } catch(Exception ex) { - auditSuccess = false; - auditAcl(obj, acls, OMAction.SET_ACL, ex); - throw ex; - } finally { - if(auditSuccess){ - auditAcl(obj, acls, OMAction.SET_ACL, null); - } - } - } - - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @throws IOException if there is error. - */ - @Override - public List getAcl(OzoneObj obj) throws IOException { - boolean auditSuccess = true; - - try{ - if(isAclEnabled) { - checkAcls(obj.getResourceType(), obj.getStoreType(), ACLType.READ_ACL, - obj.getVolumeName(), obj.getBucketName(), obj.getKeyName()); - } - switch (obj.getResourceType()) { - case VOLUME: - return volumeManager.getAcl(obj); - case BUCKET: - return bucketManager.getAcl(obj); - case KEY: - return keyManager.getAcl(obj); - case PREFIX: - return prefixManager.getAcl(obj); - - default: - throw new OMException("Unexpected resource type: " + - obj.getResourceType(), INVALID_REQUEST); - } - } catch(Exception ex) { - auditSuccess = false; - auditAcl(obj, null, OMAction.GET_ACL, ex); - throw ex; - } finally { - if(auditSuccess){ - auditAcl(obj, null, OMAction.GET_ACL, null); - } - } - } - - /** - * Download and install latest checkpoint from leader OM. - * If the download checkpoints snapshot index is greater than this OM's - * last applied transaction index, then re-initialize the OM state via this - * checkpoint. Before re-initializing OM state, the OM Ratis server should - * be stopped so that no new transactions can be applied. - * @param leaderId peerNodeID of the leader OM - * @return If checkpoint is installed, return the corresponding termIndex. - * Otherwise, return null. - */ - public TermIndex installSnapshot(String leaderId) { - if (omSnapshotProvider == null) { - LOG.error("OM Snapshot Provider is not configured as there are no peer " + - "nodes."); - return null; - } - - DBCheckpoint omDBcheckpoint = getDBCheckpointFromLeader(leaderId); - Path newDBlocation = omDBcheckpoint.getCheckpointLocation(); - - // Check if current ratis log index is smaller than the downloaded - // snapshot index. If yes, proceed by stopping the ratis server so that - // the OM state can be re-initialized. If no, then do not proceed with - // installSnapshot. - long lastAppliedIndex = omRatisServer.getStateMachineLastAppliedIndex(); - long checkpointSnapshotIndex = omDBcheckpoint.getRatisSnapshotIndex(); - if (checkpointSnapshotIndex <= lastAppliedIndex) { - LOG.error("Failed to install checkpoint from OM leader: {}. The last " + - "applied index: {} is greater than or equal to the checkpoint's " + - "snapshot index: {}. Deleting the downloaded checkpoint {}", leaderId, - lastAppliedIndex, checkpointSnapshotIndex, - newDBlocation); - try { - FileUtils.deleteFully(newDBlocation); - } catch (IOException e) { - LOG.error("Failed to fully delete the downloaded DB checkpoint {} " + - "from OM leader {}.", newDBlocation, - leaderId, e); - } - return null; - } - - // Pause the State Machine so that no new transactions can be applied. - // This action also clears the OM Double Buffer so that if there are any - // pending transactions in the buffer, they are discarded. - // TODO: The Ratis server should also be paused here. This is required - // because a leader election might happen while the snapshot - // installation is in progress and the new leader might start sending - // append log entries to the ratis server. - omRatisServer.getOmStateMachine().pause(); - - File dbBackup; - try { - dbBackup = replaceOMDBWithCheckpoint(lastAppliedIndex, newDBlocation); - } catch (Exception e) { - LOG.error("OM DB checkpoint replacement with new downloaded checkpoint " + - "failed.", e); - return null; - } - - // Reload the OM DB store with the new checkpoint. - // Restart (unpause) the state machine and update its last applied index - // to the installed checkpoint's snapshot index. - try { - reloadOMState(checkpointSnapshotIndex); - omRatisServer.getOmStateMachine().unpause(checkpointSnapshotIndex); - } catch (IOException e) { - LOG.error("Failed to reload OM state with new DB checkpoint.", e); - return null; - } - - // Delete the backup DB - try { - FileUtils.deleteFully(dbBackup); - } catch (IOException e) { - LOG.error("Failed to delete the backup of the original DB {}", dbBackup); - } - - // TODO: We should only return the snpashotIndex to the leader. - // Should be fixed after RATIS-586 - TermIndex newTermIndex = TermIndex.newTermIndex(0, - checkpointSnapshotIndex); - - return newTermIndex; - } - - /** - * Download the latest OM DB checkpoint from the leader OM. - * @param leaderId OMNodeID of the leader OM node. - * @return latest DB checkpoint from leader OM. - */ - private DBCheckpoint getDBCheckpointFromLeader(String leaderId) { - LOG.info("Downloading checkpoint from leader OM {} and reloading state " + - "from the checkpoint.", leaderId); - - try { - return omSnapshotProvider.getOzoneManagerDBSnapshot(leaderId); - } catch (IOException e) { - LOG.error("Failed to download checkpoint from OM leader {}", leaderId, e); - } - return null; - } - - /** - * Replace the current OM DB with the new DB checkpoint. - * @param lastAppliedIndex the last applied index in the current OM DB. - * @param checkpointPath path to the new DB checkpoint - * @return location of the backup of the original DB - * @throws Exception - */ - File replaceOMDBWithCheckpoint(long lastAppliedIndex, Path checkpointPath) - throws Exception { - // Stop the DB first - DBStore store = metadataManager.getStore(); - store.close(); - - // Take a backup of the current DB - File db = store.getDbLocation(); - String dbBackupName = OzoneConsts.OM_DB_BACKUP_PREFIX + - lastAppliedIndex + "_" + System.currentTimeMillis(); - File dbBackup = new File(db.getParentFile(), dbBackupName); - - try { - Files.move(db.toPath(), dbBackup.toPath()); - } catch (IOException e) { - LOG.error("Failed to create a backup of the current DB. Aborting " + - "snapshot installation."); - throw e; - } - - // Move the new DB checkpoint into the om metadata dir - try { - Files.move(checkpointPath, db.toPath()); - } catch (IOException e) { - LOG.error("Failed to move downloaded DB checkpoint {} to metadata " + - "directory {}. Resetting to original DB.", checkpointPath, - db.toPath()); - Files.move(dbBackup.toPath(), db.toPath()); - throw e; - } - return dbBackup; - } - - /** - * Re-instantiate MetadataManager with new DB checkpoint. - * All the classes which use/ store MetadataManager should also be updated - * with the new MetadataManager instance. - */ - void reloadOMState(long newSnapshotIndex) throws IOException { - - instantiateServices(); - - // Restart required services - metadataManager.start(configuration); - keyManager.start(configuration); - - // Set metrics and start metrics back ground thread - metrics.setNumVolumes(metadataManager.countRowsInTable(metadataManager - .getVolumeTable())); - metrics.setNumBuckets(metadataManager.countRowsInTable(metadataManager - .getBucketTable())); - metrics.setNumKeys(metadataManager.countEstimatedRowsInTable(metadataManager - .getKeyTable())); - - // Delete the omMetrics file if it exists and save the a new metrics file - // with new data - Files.deleteIfExists(getMetricsStorageFile().toPath()); - saveOmMetrics(); - - // Update OM snapshot index with the new snapshot index (from the new OM - // DB state) and save the snapshot index to disk - omRatisSnapshotInfo.saveRatisSnapshotToDisk(newSnapshotIndex); - } - - public static Logger getLogger() { - return LOG; - } - - public OzoneConfiguration getConfiguration() { - return configuration; - } - - public static void setTestSecureOmFlag(boolean testSecureOmFlag) { - OzoneManager.testSecureOmFlag = testSecureOmFlag; - } - - public String getOMNodeId() { - return omNodeDetails.getOMNodeId(); - } - - public String getOMServiceId() { - return omNodeDetails.getOMServiceId(); - } - - @VisibleForTesting - public List getPeerNodes() { - return peerNodes; - } - - @VisibleForTesting - public CertificateClient getCertificateClient() { - return certClient; - } - - public String getComponent() { - return omComponent; - } - - @Override - public OMFailoverProxyProvider getOMFailoverProxyProvider() { - return null; - } - - /** - * Return maximum volumes count per user. - * @return maxUserVolumeCount - */ - public long getMaxUserVolumeCount() { - return maxUserVolumeCount; - } - - /** - * Checks the Leader status of OM Ratis Server. - * Note that this status has a small window of error. It should not be used - * to determine the absolute leader status. - * If it is the leader, the role status is cached till Ratis server - * notifies of leader change. If it is not leader, the role information is - * retrieved through by submitting a GroupInfoRequest to Ratis server. - * - * If ratis is not enabled, then it always returns true. - * - * @return Return true if this node is the leader, false otherwsie. - */ - public boolean isLeader() { - return isRatisEnabled ? omRatisServer.isLeader() : true; - } - - /** - * Return if Ratis is enabled or not. - * @return - */ - public boolean isRatisEnabled() { - return isRatisEnabled; - } - - /** - * Get DB updates since a specific sequence number. - * @param dbUpdatesRequest request that encapsulates a sequence number. - * @return Wrapper containing the updates. - * @throws SequenceNumberNotFoundException if db is unable to read the data. - */ - @Override - public DBUpdatesWrapper getDBUpdates( - DBUpdatesRequest dbUpdatesRequest) - throws SequenceNumberNotFoundException { - return metadataManager.getStore() - .getUpdatesSince(dbUpdatesRequest.getSequenceNumber()); - - } - - public OzoneDelegationTokenSecretManager getDelegationTokenMgr() { - return delegationTokenMgr; - } - - /** - * Return list of OzoneAdministrators. - */ - public Collection getOzoneAdmins() { - return ozAdmins; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java deleted file mode 100644 index b98d6d3a1ef..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdds.server.BaseHttpServer; - -import java.io.IOException; - -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT; - -/** - * HttpServer wrapper for the OzoneManager. - */ -public class OzoneManagerHttpServer extends BaseHttpServer { - - public OzoneManagerHttpServer(Configuration conf, OzoneManager om) - throws IOException { - super(conf, "ozoneManager"); - addServlet("serviceList", OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT, - ServiceListJSONServlet.class); - addServlet("dbCheckpoint", OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT, - OMDBCheckpointServlet.class); - getWebAppContext().setAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE, om); - } - - @Override protected String getHttpAddressKey() { - return OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY; - } - - @Override protected String getHttpBindHostKey() { - return OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY; - } - - @Override protected String getHttpsAddressKey() { - return OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY; - } - - @Override protected String getHttpsBindHostKey() { - return OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY; - } - - @Override protected String getBindHostDefault() { - return OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_DEFAULT; - } - - @Override protected int getHttpBindPortDefault() { - return OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT; - } - - @Override protected int getHttpsBindPortDefault() { - return OMConfigKeys.OZONE_OM_HTTPS_BIND_PORT_DEFAULT; - } - - @Override protected String getKeytabFile() { - return OMConfigKeys.OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE; - } - - @Override protected String getSpnegoPrincipal() { - return OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY; - } - - @Override protected String getEnabledKey() { - return OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java deleted file mode 100644 index fa229aa2eca..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java +++ /dev/null @@ -1,130 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.util.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine; -import picocli.CommandLine.Command; - -import java.io.IOException; - -/** - * This class provides a command line interface to start the OM - * using Picocli. - */ -@Command(name = "ozone om", - hidden = true, description = "Start or initialize the Ozone Manager.", - versionProvider = HddsVersionProvider.class, - mixinStandardHelpOptions = true) -public class OzoneManagerStarter extends GenericCli { - - private OzoneConfiguration conf; - private OMStarterInterface receiver; - private static final Logger LOG = - LoggerFactory.getLogger(OzoneManagerStarter.class); - - public static void main(String[] args) throws Exception { - TracingUtil.initTracing("OzoneManager"); - new OzoneManagerStarter( - new OzoneManagerStarter.OMStarterHelper()).run(args); - } - - public OzoneManagerStarter(OMStarterInterface receiverObj) { - super(); - receiver = receiverObj; - } - - @Override - public Void call() throws Exception { - /** - * This method is invoked only when a sub-command is not called. Therefore - * if someone runs "ozone om" with no parameters, this is the method - * which runs and starts the OM. - */ - commonInit(); - startOm(); - return null; - } - - /** - * This function is used by the command line to start the OM. - */ - private void startOm() throws Exception { - receiver.start(conf); - } - - /** - * This function implements a sub-command to allow the OM to be - * initialized from the command line. - */ - @CommandLine.Command(name = "--init", - customSynopsis = "ozone om [global options] --init", - hidden = false, - description = "Initialize the Ozone Manager if not already initialized", - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) - public void initOm() - throws Exception { - commonInit(); - boolean result = receiver.init(conf); - if (!result) { - throw new IOException("OM Init failed."); - } - } - - /** - * This function should be called by each command to ensure the configuration - * is set and print the startup banner message. - */ - private void commonInit() { - conf = createOzoneConfiguration(); - - String[] originalArgs = getCmd().getParseResult().originalArgs() - .toArray(new String[0]); - StringUtils.startupShutdownMessage(OzoneManager.class, - originalArgs, LOG); - } - - /** - * This static class wraps the external dependencies needed for this command - * to execute its tasks. This allows the dependency to be injected for unit - * testing. - */ - static class OMStarterHelper implements OMStarterInterface{ - - public void start(OzoneConfiguration conf) throws IOException, - AuthenticationException { - OzoneManager om = OzoneManager.createOm(conf); - om.start(); - om.join(); - } - - public boolean init(OzoneConfiguration conf) throws IOException, - AuthenticationException { - return OzoneManager.omInit(conf); - } - } - -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManager.java deleted file mode 100644 index a505b8d7fc0..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManager.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; - -import java.util.List; - -/** - * Handles prefix commands. - * //TODO: support OzoneManagerFS for ozfs optimization using prefix tree. - */ -public interface PrefixManager extends IOzoneAcl { - - /** - * Returns the metadataManager. - * @return OMMetadataManager. - */ - OMMetadataManager getMetadataManager(); - - /** - * Get the list of path components that match with obj's path. - * longest prefix. - * Note: the number of the entries include a root "/" - * so if you have a longtest prefix path /a/b/c/ - * the returned list will be ["/", "a", "b", "c"] - * @param path ozone object path - * @return list of longest path components that matches obj's path. - */ - List getLongestPrefixPath(String path); -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java deleted file mode 100644 index c89b32ee734..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java +++ /dev/null @@ -1,417 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import com.google.common.base.Strings; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.RequestContext; -import org.apache.hadoop.ozone.util.RadixNode; -import org.apache.hadoop.ozone.util.RadixTree; -import org.apache.hadoop.hdds.utils.db.Table.KeyValue; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; - -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PREFIX_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.PREFIX_LOCK; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.PREFIX; - -/** - * Implementation of PrefixManager. - */ -public class PrefixManagerImpl implements PrefixManager { - private static final Logger LOG = - LoggerFactory.getLogger(PrefixManagerImpl.class); - - private static final List EMPTY_ACL_LIST = new ArrayList<>(); - private final OMMetadataManager metadataManager; - - // In-memory prefix tree to optimize ACL evaluation - private RadixTree prefixTree; - - // TODO: This isRatisEnabled check will be removed as part of HDDS-1909, - // where we integrate both HA and Non-HA code. - private boolean isRatisEnabled; - - public PrefixManagerImpl(OMMetadataManager metadataManager, - boolean isRatisEnabled) { - this.isRatisEnabled = isRatisEnabled; - this.metadataManager = metadataManager; - loadPrefixTree(); - } - - private void loadPrefixTree() { - prefixTree = new RadixTree<>(); - try (TableIterator> iterator = - getMetadataManager().getPrefixTable().iterator()) { - iterator.seekToFirst(); - while (iterator.hasNext()) { - KeyValue kv = iterator.next(); - prefixTree.insert(kv.getKey(), kv.getValue()); - } - } catch (IOException ex) { - LOG.error("Fail to load prefix tree"); - } - } - - - @Override - public OMMetadataManager getMetadataManager() { - return metadataManager; - } - - /** - * Add acl for Ozone object. Return true if acl is added successfully else - * false. - * - * @param obj Ozone object for which acl should be added. - * @param acl ozone acl top be added. - * @throws IOException if there is error. - */ - @Override - public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - validateOzoneObj(obj); - - String prefixPath = obj.getPath(); - metadataManager.getLock().acquireLock(PREFIX_LOCK, prefixPath); - try { - OmPrefixInfo prefixInfo = - metadataManager.getPrefixTable().get(prefixPath); - - OMPrefixAclOpResult omPrefixAclOpResult = addAcl(obj, acl, prefixInfo); - - return omPrefixAclOpResult.isOperationsResult(); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Add acl operation failed for prefix path:{} acl:{}", - prefixPath, acl, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(PREFIX_LOCK, prefixPath); - } - } - - /** - * Remove acl for Ozone object. Return true if acl is removed successfully - * else false. - * - * @param obj Ozone object. - * @param acl Ozone acl to be removed. - * @throws IOException if there is error. - */ - @Override - public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - validateOzoneObj(obj); - String prefixPath = obj.getPath(); - metadataManager.getLock().acquireLock(PREFIX_LOCK, prefixPath); - try { - OmPrefixInfo prefixInfo = - metadataManager.getPrefixTable().get(prefixPath); - OMPrefixAclOpResult omPrefixAclOpResult = removeAcl(obj, acl, prefixInfo); - - if (!omPrefixAclOpResult.isOperationsResult()) { - if (LOG.isDebugEnabled()) { - LOG.debug("acl {} does not exist for prefix path {} ", - acl, prefixPath); - } - return false; - } - - return omPrefixAclOpResult.isOperationsResult(); - - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Remove prefix acl operation failed for prefix path:{}" + - " acl:{}", prefixPath, acl, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(PREFIX_LOCK, prefixPath); - } - } - - /** - * Acls to be set for given Ozone object. This operations reset ACL for given - * object to list of ACLs provided in argument. - * - * @param obj Ozone object. - * @param acls List of acls. - * @throws IOException if there is error. - */ - @Override - public boolean setAcl(OzoneObj obj, List acls) throws IOException { - validateOzoneObj(obj); - String prefixPath = obj.getPath(); - metadataManager.getLock().acquireLock(PREFIX_LOCK, prefixPath); - try { - OmPrefixInfo prefixInfo = - metadataManager.getPrefixTable().get(prefixPath); - - OMPrefixAclOpResult omPrefixAclOpResult = setAcl(obj, acls, prefixInfo); - - return omPrefixAclOpResult.isOperationsResult(); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Set prefix acl operation failed for prefix path:{} acls:{}", - prefixPath, acls, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(PREFIX_LOCK, prefixPath); - } - } - - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @throws IOException if there is error. - */ - @Override - public List getAcl(OzoneObj obj) throws IOException { - validateOzoneObj(obj); - String prefixPath = obj.getPath(); - metadataManager.getLock().acquireLock(PREFIX_LOCK, prefixPath); - try { - String longestPrefix = prefixTree.getLongestPrefix(prefixPath); - if (prefixPath.equals(longestPrefix)) { - RadixNode lastNode = - prefixTree.getLastNodeInPrefixPath(prefixPath); - if (lastNode != null && lastNode.getValue() != null) { - return lastNode.getValue().getAcls(); - } - } - } finally { - metadataManager.getLock().releaseLock(PREFIX_LOCK, prefixPath); - } - return EMPTY_ACL_LIST; - } - - /** - * Check access for given ozoneObject. - * - * @param ozObject object for which access needs to be checked. - * @param context Context object encapsulating all user related information. - * @return true if user has access else false. - */ - @Override - public boolean checkAccess(OzoneObj ozObject, RequestContext context) - throws OMException { - Objects.requireNonNull(ozObject); - Objects.requireNonNull(context); - - String prefixPath = ozObject.getPath(); - metadataManager.getLock().acquireLock(PREFIX_LOCK, prefixPath); - try { - String longestPrefix = prefixTree.getLongestPrefix(prefixPath); - if (prefixPath.equals(longestPrefix)) { - RadixNode lastNode = - prefixTree.getLastNodeInPrefixPath(prefixPath); - if (lastNode != null && lastNode.getValue() != null) { - boolean hasAccess = OzoneAclUtil.checkAclRights(lastNode.getValue(). - getAcls(), context); - if (LOG.isDebugEnabled()) { - LOG.debug("user:{} has access rights for ozObj:{} ::{} ", - context.getClientUgi(), ozObject, hasAccess); - } - return hasAccess; - } else { - return true; - } - } else { - return true; - } - } finally { - metadataManager.getLock().releaseLock(PREFIX_LOCK, prefixPath); - } - } - - @Override - public List getLongestPrefixPath(String path) { - String prefixPath = prefixTree.getLongestPrefix(path); - metadataManager.getLock().acquireLock(PREFIX_LOCK, prefixPath); - try { - return getLongestPrefixPathHelper(prefixPath); - } finally { - metadataManager.getLock().releaseLock(PREFIX_LOCK, prefixPath); - } - } - - /** - * Get longest prefix path assuming caller take prefix lock. - * @param prefixPath - * @return list of prefix info. - */ - private List getLongestPrefixPathHelper(String prefixPath) { - return prefixTree.getLongestPrefixPath(prefixPath).stream() - .map(c -> c.getValue()).collect(Collectors.toList()); - } - - /** - * Helper method to validate ozone object. - * @param obj - * */ - public void validateOzoneObj(OzoneObj obj) throws OMException { - Objects.requireNonNull(obj); - - if (!obj.getResourceType().equals(PREFIX)) { - throw new IllegalArgumentException("Unexpected argument passed to " + - "PrefixManager. OzoneObj type:" + obj.getResourceType()); - } - String volume = obj.getVolumeName(); - String bucket = obj.getBucketName(); - String prefixName = obj.getPrefixName(); - - if (Strings.isNullOrEmpty(volume)) { - throw new OMException("Volume name is required.", VOLUME_NOT_FOUND); - } - if (Strings.isNullOrEmpty(bucket)) { - throw new OMException("Bucket name is required.", BUCKET_NOT_FOUND); - } - if (Strings.isNullOrEmpty(prefixName)) { - throw new OMException("Prefix name is required.", PREFIX_NOT_FOUND); - } - if (!prefixName.endsWith("/")) { - throw new OMException("Invalid prefix name: " + prefixName, - PREFIX_NOT_FOUND); - } - } - - public OMPrefixAclOpResult addAcl(OzoneObj ozoneObj, OzoneAcl ozoneAcl, - OmPrefixInfo prefixInfo) throws IOException { - - if (prefixInfo == null) { - prefixInfo = new OmPrefixInfo.Builder().setName(ozoneObj - .getPath()).build(); - } - boolean changed = prefixInfo.addAcl(ozoneAcl); - if (changed) { - // update the in-memory prefix tree - prefixTree.insert(ozoneObj.getPath(), prefixInfo); - - if (!isRatisEnabled) { - metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); - } - } - return new OMPrefixAclOpResult(prefixInfo, changed); - } - - public OMPrefixAclOpResult removeAcl(OzoneObj ozoneObj, OzoneAcl ozoneAcl, - OmPrefixInfo prefixInfo) throws IOException { - boolean removed = false; - if (prefixInfo != null) { - removed = prefixInfo.removeAcl(ozoneAcl); - } - - // Nothing is matching to remove. - if (removed) { - // Update in-memory prefix tree. - if (prefixInfo.getAcls().isEmpty()) { - prefixTree.removePrefixPath(ozoneObj.getPath()); - if (!isRatisEnabled) { - metadataManager.getPrefixTable().delete(ozoneObj.getPath()); - } - } else { - prefixTree.insert(ozoneObj.getPath(), prefixInfo); - if (!isRatisEnabled) { - metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); - } - } - } - return new OMPrefixAclOpResult(prefixInfo, removed); - } - - public OMPrefixAclOpResult setAcl(OzoneObj ozoneObj, List ozoneAcls, - OmPrefixInfo prefixInfo) throws IOException { - if (prefixInfo == null) { - prefixInfo = new OmPrefixInfo.Builder().setName(ozoneObj - .getPath()).build(); - } - - boolean changed = prefixInfo.setAcls(ozoneAcls); - if (changed) { - List aclsToBeSet = prefixInfo.getAcls(); - // Inherit DEFAULT acls from prefix. - boolean prefixParentFound = false; - List prefixList = getLongestPrefixPathHelper( - prefixTree.getLongestPrefix(ozoneObj.getPath())); - - if (prefixList.size() > 0) { - // Add all acls from direct parent to key. - OmPrefixInfo parentPrefixInfo = prefixList.get(prefixList.size() - 1); - if (parentPrefixInfo != null) { - prefixParentFound = OzoneAclUtil.inheritDefaultAcls(aclsToBeSet, - parentPrefixInfo.getAcls()); - } - } - - // If no parent prefix is found inherit DEFAULT acls from bucket. - if (!prefixParentFound) { - String bucketKey = metadataManager.getBucketKey(ozoneObj - .getVolumeName(), ozoneObj.getBucketName()); - OmBucketInfo bucketInfo = metadataManager.getBucketTable(). - get(bucketKey); - if (bucketInfo != null) { - OzoneAclUtil.inheritDefaultAcls(aclsToBeSet, bucketInfo.getAcls()); - } - } - - prefixTree.insert(ozoneObj.getPath(), prefixInfo); - if (!isRatisEnabled) { - metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); - } - } - return new OMPrefixAclOpResult(prefixInfo, changed); - } - - /** - * Result of the prefix acl operation. - */ - public static class OMPrefixAclOpResult { - private OmPrefixInfo omPrefixInfo; - private boolean operationsResult; - - public OMPrefixAclOpResult(OmPrefixInfo omPrefixInfo, - boolean operationsResult) { - this.omPrefixInfo = omPrefixInfo; - this.operationsResult = operationsResult; - } - - public OmPrefixInfo getOmPrefixInfo() { - return omPrefixInfo; - } - - public boolean isOperationsResult() { - return operationsResult; - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManager.java deleted file mode 100644 index dfd0ac3ea34..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManager.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.ozone.om; - -import java.io.IOException; - -/** - * An interface that maps S3 buckets to Ozone - * volume/bucket. - */ -public interface S3BucketManager { - /** - * Creates an s3 bucket and maps it to Ozone volume/bucket. - * @param userName - Name of the user who owns the bucket. - * @param bucketName - S3 Bucket Name. - * @throws IOException in case the bucket cannot be created. - */ - void createS3Bucket(String userName, String bucketName) throws IOException; - - /** - * Deletes an s3 bucket and removes mapping of Ozone volume/bucket. - * @param bucketName - S3 Bucket Name. - * @throws IOException in case the bucket cannot be deleted. - */ - void deleteS3Bucket(String bucketName) throws IOException; - - /** - * Returns the Ozone volume/bucket where the S3 Bucket points to. - * @param s3BucketName - S3 Bucket Name - * @return String - Ozone volume/bucket - * @throws IOException in case of failure to retrieve mapping. - */ - String getOzoneBucketMapping(String s3BucketName) throws IOException; - - /** - * Returns Ozone volume name for a given S3Bucket. - * @param s3BucketName - S3 bucket name. - * @return String - Ozone volume name where is s3bucket resides. - * @throws IOException - in case of failure to retrieve mapping. - */ - String getOzoneVolumeName(String s3BucketName) throws IOException; - - /** - * Returns Ozone bucket name for a given s3Bucket. - * @param s3BucketName - S3 bucket Name. - * @return Ozone bucket name for this given S3 bucket - * @throws IOException - in case of failure to retrieve mapping. - */ - String getOzoneBucketName(String s3BucketName) throws IOException; - - /** - * Returns volume Name for a user. - * @param userName - */ - String getOzoneVolumeNameForUser(String userName) throws IOException; - - /** - * Create ozone volume if required, this will be needed during creates3Bucket. - * @param userName - * @return true - if volume is successfully created. false - if volume - * already exists or volume creation failure. - * @throws IOException - incase of volume creation failure. - */ - boolean createOzoneVolumeIfNeeded(String userName) throws IOException; - - /** - * Return volume name from userName. - * @param userName - */ - String formatOzoneVolumeName(String userName); -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManagerImpl.java deleted file mode 100644 index 8a581bbe259..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManagerImpl.java +++ /dev/null @@ -1,254 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.ozone.om; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.security.UserGroupInformation; - -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_ALREADY_EXISTS; - -import org.apache.logging.log4j.util.Strings; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; -import java.util.Objects; - -import static org.apache.hadoop.ozone.OzoneConsts.OM_S3_VOLUME_PREFIX; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_BUCKET_LOCK; - -/** - * S3 Bucket Manager, this class maintains a mapping between S3 Bucket and Ozone - * Volume/bucket. - */ -public class S3BucketManagerImpl implements S3BucketManager { - private static final Logger LOG = - LoggerFactory.getLogger(S3BucketManagerImpl.class); - - private static final String S3_ADMIN_NAME = "OzoneS3Manager"; - private final OzoneConfiguration configuration; - private final OMMetadataManager omMetadataManager; - private final VolumeManager volumeManager; - private final BucketManager bucketManager; - - /** - * Construct an S3 Bucket Manager Object. - * - * @param configuration - Ozone Configuration. - * @param omMetadataManager - Ozone Metadata Manager. - */ - public S3BucketManagerImpl( - OzoneConfiguration configuration, - OMMetadataManager omMetadataManager, - VolumeManager volumeManager, - BucketManager bucketManager) { - this.configuration = configuration; - this.omMetadataManager = omMetadataManager; - this.volumeManager = volumeManager; - this.bucketManager = bucketManager; - } - - @Override - public void createS3Bucket(String userName, String bucketName) - throws IOException { - Preconditions.checkArgument(Strings.isNotBlank(bucketName), "Bucket" + - " name cannot be null or empty."); - Preconditions.checkArgument(Strings.isNotBlank(userName), "User name " + - "cannot be null or empty."); - - Preconditions.checkArgument(bucketName.length() >=3 && - bucketName.length() < 64, "Length of the S3 Bucket is not correct."); - - - // TODO: Decide if we want to enforce S3 Bucket Creation Rules in this - // code path? - // https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html - - // Generate an Ozone volume name. For the time being, we are going to use - // s3userName as the Ozone volume name. Since S3 advices 100 buckets max - // for a user and we have no limit to the number of Ozone buckets under a - // volume we will stick to very simple model. - // - // s3Bucket -> ozoneVolume/OzoneBucket name - // s3BucketName ->s3userName/s3Bucketname - // - // You might wonder if all names map to this pattern, why we need to - // store the S3 bucketName in a table at all. This is to support - // anonymous access to bucket where the user name is absent. - String ozoneVolumeName = formatOzoneVolumeName(userName); - - omMetadataManager.getLock().acquireLock(S3_BUCKET_LOCK, bucketName); - try { - String bucket = omMetadataManager.getS3Table().get(bucketName); - - if (bucket != null) { - LOG.debug("Bucket already exists. {}", bucketName); - throw new OMException( - "Unable to create S3 bucket. " + bucketName + " already exists.", - OMException.ResultCodes.S3_BUCKET_ALREADY_EXISTS); - } - String ozoneBucketName = bucketName; - createOzoneBucket(ozoneVolumeName, ozoneBucketName); - String finalName = String.format("%s/%s", ozoneVolumeName, - ozoneBucketName); - - omMetadataManager.getS3Table().put(bucketName, finalName); - } finally { - omMetadataManager.getLock().releaseLock(S3_BUCKET_LOCK, bucketName); - } - - } - - @Override - public void deleteS3Bucket(String bucketName) throws IOException { - Preconditions.checkArgument( - Strings.isNotBlank(bucketName), "Bucket name cannot be null or empty"); - - omMetadataManager.getLock().acquireLock(S3_BUCKET_LOCK, bucketName); - try { - String map = omMetadataManager.getS3Table().get(bucketName); - - if (map == null) { - throw new OMException("No such S3 bucket. " + bucketName, - OMException.ResultCodes.S3_BUCKET_NOT_FOUND); - } - - bucketManager.deleteBucket(getOzoneVolumeName(bucketName), bucketName); - omMetadataManager.getS3Table().delete(bucketName); - } catch(IOException ex) { - throw ex; - } finally { - omMetadataManager.getLock().releaseLock(S3_BUCKET_LOCK, bucketName); - } - - } - - @Override - public String formatOzoneVolumeName(String userName) { - return String.format(OM_S3_VOLUME_PREFIX + "%s", userName); - } - - @Override - public boolean createOzoneVolumeIfNeeded(String userName) - throws IOException { - // We don't have to time of check. time of use problem here because - // this call is invoked while holding the s3Bucket lock. - boolean newVolumeCreate = true; - String ozoneVolumeName = formatOzoneVolumeName(userName); - try { - OmVolumeArgs.Builder builder = - OmVolumeArgs.newBuilder() - .setAdminName(S3_ADMIN_NAME) - .setOwnerName(userName) - .setVolume(ozoneVolumeName) - .setQuotaInBytes(OzoneConsts.MAX_QUOTA_IN_BYTES); - for (OzoneAcl acl : getDefaultAcls(userName)) { - builder.addOzoneAcls(OzoneAcl.toProtobuf(acl)); - } - - OmVolumeArgs args = builder.build(); - - volumeManager.createVolume(args); - - } catch (OMException exp) { - newVolumeCreate = false; - if (exp.getResult().compareTo(VOLUME_ALREADY_EXISTS) == 0) { - if (LOG.isDebugEnabled()) { - LOG.debug("Volume already exists. {}", exp.getMessage()); - } - } else { - throw exp; - } - } - - return newVolumeCreate; - } - - /** - * Get default acls. - * */ - private List getDefaultAcls(String userName) { - UserGroupInformation ugi = ProtobufRpcEngine.Server.getRemoteUser(); - return OzoneAcl.parseAcls("user:" + (ugi == null ? userName : - ugi.getUserName()) + ":a,user:" + S3_ADMIN_NAME + ":a"); - } - - private void createOzoneBucket(String volumeName, String bucketName) - throws IOException { - OmBucketInfo.Builder builder = OmBucketInfo.newBuilder(); - OmBucketInfo bucketInfo = - builder - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setIsVersionEnabled(Boolean.FALSE) - .setStorageType(StorageType.DEFAULT) - .setAcls(getDefaultAcls(null)) - .build(); - bucketManager.createBucket(bucketInfo); - } - - @Override - public String getOzoneBucketMapping(String s3BucketName) throws IOException { - Preconditions.checkArgument( - Strings.isNotBlank(s3BucketName), - "Bucket name cannot be null or empty."); - Preconditions.checkArgument(s3BucketName.length() >=3 && - s3BucketName.length() < 64, - "Length of the S3 Bucket is not correct."); - omMetadataManager.getLock().acquireLock(S3_BUCKET_LOCK, s3BucketName); - try { - String mapping = omMetadataManager.getS3Table().get(s3BucketName); - if (mapping != null) { - return mapping; - } - throw new OMException("No such S3 bucket.", - OMException.ResultCodes.S3_BUCKET_NOT_FOUND); - } finally { - omMetadataManager.getLock().releaseLock(S3_BUCKET_LOCK, s3BucketName); - } - } - - @Override - public String getOzoneVolumeName(String s3BucketName) throws IOException { - String mapping = getOzoneBucketMapping(s3BucketName); - return mapping.split("/")[0]; - } - - @Override - public String getOzoneBucketName(String s3BucketName) throws IOException { - String mapping = getOzoneBucketMapping(s3BucketName); - return mapping.split("/")[1]; - } - - @Override - public String getOzoneVolumeNameForUser(String userName) throws IOException { - Objects.requireNonNull(userName, "UserName cannot be null"); - return formatOzoneVolumeName(userName); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ScmClient.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ScmClient.java deleted file mode 100644 index 6a8ef375553..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ScmClient.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; - -/** - * Wrapper class for Scm protocol clients. - */ -public class ScmClient { - - private final ScmBlockLocationProtocol blockClient; - private final StorageContainerLocationProtocol containerClient; - - ScmClient(ScmBlockLocationProtocol blockClient, - StorageContainerLocationProtocol containerClient) { - this.containerClient = containerClient; - this.blockClient = blockClient; - } - - public ScmBlockLocationProtocol getBlockClient() { - return this.blockClient; - } - - StorageContainerLocationProtocol getContainerClient() { - return this.containerClient; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java deleted file mode 100644 index 9aab823d49b..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.SerializationFeature; - -import org.apache.hadoop.ozone.OzoneConsts; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import java.io.IOException; -import java.io.PrintWriter; - - -/** - * Provides REST access to Ozone Service List. - *

- * This servlet generally will be placed under the /serviceList URL of - * OzoneManager HttpServer. - * - * The return format is of JSON and in the form - *

- *


- *  {
- *    "services" : [
- *      {
- *        "NodeType":"OM",
- *        "Hostname" "$hostname",
- *        "ports" : {
- *          "$PortType" : "$port",
- *          ...
- *        }
- *      }
- *    ]
- *  }
- *  
- *

- * - */ -public class ServiceListJSONServlet extends HttpServlet { - - private static final Logger LOG = - LoggerFactory.getLogger(ServiceListJSONServlet.class); - private static final long serialVersionUID = 1L; - - private transient OzoneManager om; - - @Override - public void init() throws ServletException { - this.om = (OzoneManager) getServletContext() - .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE); - } - - /** - * Process a GET request for the specified resource. - * - * @param request - * The servlet request we are processing - * @param response - * The servlet response we are creating - */ - @Override - public void doGet(HttpServletRequest request, HttpServletResponse response) { - try { - ObjectMapper objectMapper = new ObjectMapper(); - objectMapper.enable(SerializationFeature.INDENT_OUTPUT); - response.setContentType("application/json; charset=utf8"); - PrintWriter writer = response.getWriter(); - try { - writer.write(objectMapper.writeValueAsString(om.getServiceList())); - } finally { - if (writer != null) { - writer.close(); - } - } - } catch (IOException e) { - LOG.error( - "Caught an exception while processing ServiceList request", e); - response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); - } - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java deleted file mode 100644 index 01c277fc977..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneAclInfo; - -import java.io.IOException; -import java.util.List; - -/** - * OM volume manager interface. - */ -public interface VolumeManager extends IOzoneAcl { - - /** - * Create a new volume. - * @param args - Volume args to create a volume - */ - void createVolume(OmVolumeArgs args) - throws IOException; - - /** - * Changes the owner of a volume. - * - * @param volume - Name of the volume. - * @param owner - Name of the owner. - * @throws IOException - */ - void setOwner(String volume, String owner) - throws IOException; - - /** - * Changes the Quota on a volume. - * - * @param volume - Name of the volume. - * @param quota - Quota in bytes. - * @throws IOException - */ - void setQuota(String volume, long quota) throws IOException; - - /** - * Gets the volume information. - * @param volume - Volume name. - * @return VolumeArgs or exception is thrown. - * @throws IOException - */ - OmVolumeArgs getVolumeInfo(String volume) throws IOException; - - /** - * Deletes an existing empty volume. - * - * @param volume - Name of the volume. - * @throws IOException - */ - void deleteVolume(String volume) throws IOException; - - /** - * Checks if the specified user with a role can access this volume. - * - * @param volume - volume - * @param userAcl - user acl which needs to be checked for access - * @return true if the user has access for the volume, false otherwise - * @throws IOException - */ - boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) - throws IOException; - - /** - * Returns a list of volumes owned by a given user; if user is null, - * returns all volumes. - * - * @param userName - * volume owner - * @param prefix - * the volume prefix used to filter the listing result. - * @param startKey - * the start volume name determines where to start listing from, - * this key is excluded from the result. - * @param maxKeys - * the maximum number of volumes to return. - * @return a list of {@link OmVolumeArgs} - * @throws IOException - */ - List listVolumes(String userName, String prefix, - String startKey, int maxKeys) throws IOException; - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java deleted file mode 100644 index 7375eb89b26..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java +++ /dev/null @@ -1,705 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.ozone.protocol.proto - .OzoneManagerProtocolProtos.UserVolumeInfo; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.RequestContext; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import com.google.common.base.Preconditions; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME_DEFAULT; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * OM volume management code. - */ -public class VolumeManagerImpl implements VolumeManager { - private static final Logger LOG = - LoggerFactory.getLogger(VolumeManagerImpl.class); - - private final OMMetadataManager metadataManager; - private final int maxUserVolumeCount; - private final boolean aclEnabled; - - - /** - * Constructor. - * @param conf - Ozone configuration. - * @throws IOException - */ - public VolumeManagerImpl(OMMetadataManager metadataManager, - OzoneConfiguration conf) { - this.metadataManager = metadataManager; - this.maxUserVolumeCount = conf.getInt(OZONE_OM_USER_MAX_VOLUME, - OZONE_OM_USER_MAX_VOLUME_DEFAULT); - aclEnabled = conf.getBoolean(OzoneConfigKeys.OZONE_ACL_ENABLED, - OzoneConfigKeys.OZONE_ACL_ENABLED_DEFAULT); - } - - // Helpers to add and delete volume from user list - private UserVolumeInfo addVolumeToOwnerList(String volume, String owner) - throws IOException { - // Get the volume list - String dbUserKey = metadataManager.getUserKey(owner); - UserVolumeInfo volumeList = metadataManager.getUserTable().get(dbUserKey); - List prevVolList = new ArrayList<>(); - if (volumeList != null) { - prevVolList.addAll(volumeList.getVolumeNamesList()); - } - - // Check the volume count - if (prevVolList.size() >= maxUserVolumeCount) { - LOG.debug("Too many volumes for user:{}", owner); - throw new OMException("Too many volumes for user:" + owner, - ResultCodes.USER_TOO_MANY_VOLUMES); - } - - // Add the new volume to the list - prevVolList.add(volume); - UserVolumeInfo newVolList = UserVolumeInfo.newBuilder() - .addAllVolumeNames(prevVolList).build(); - - return newVolList; - } - - private UserVolumeInfo delVolumeFromOwnerList(String volume, String owner) - throws IOException { - // Get the volume list - UserVolumeInfo volumeList = metadataManager.getUserTable().get(owner); - List prevVolList = new ArrayList<>(); - if (volumeList != null) { - prevVolList.addAll(volumeList.getVolumeNamesList()); - } else { - LOG.debug("volume:{} not found for user:{}", volume, owner); - throw new OMException(ResultCodes.USER_NOT_FOUND); - } - - // Remove the volume from the list - prevVolList.remove(volume); - UserVolumeInfo newVolList = UserVolumeInfo.newBuilder() - .addAllVolumeNames(prevVolList).build(); - return newVolList; - } - - /** - * Creates a volume. - * @param omVolumeArgs - OmVolumeArgs. - */ - @Override - public void createVolume(OmVolumeArgs omVolumeArgs) throws IOException { - Preconditions.checkNotNull(omVolumeArgs); - - boolean acquiredUserLock = false; - metadataManager.getLock().acquireLock(VOLUME_LOCK, - omVolumeArgs.getVolume()); - try { - acquiredUserLock = metadataManager.getLock().acquireLock(USER_LOCK, - omVolumeArgs.getOwnerName()); - String dbVolumeKey = metadataManager.getVolumeKey( - omVolumeArgs.getVolume()); - String dbUserKey = metadataManager.getUserKey( - omVolumeArgs.getOwnerName()); - OmVolumeArgs volumeInfo = - metadataManager.getVolumeTable().get(dbVolumeKey); - - // Check of the volume already exists - if (volumeInfo != null) { - LOG.debug("volume:{} already exists", omVolumeArgs.getVolume()); - throw new OMException(ResultCodes.VOLUME_ALREADY_EXISTS); - } - - UserVolumeInfo volumeList = addVolumeToOwnerList(omVolumeArgs.getVolume(), - omVolumeArgs.getOwnerName()); - - // Set creation time - omVolumeArgs.setCreationTime(System.currentTimeMillis()); - - - createVolumeCommitToDB(omVolumeArgs, volumeList, dbVolumeKey, - dbUserKey); - - LOG.debug("created volume:{} user:{}", omVolumeArgs.getVolume(), - omVolumeArgs.getOwnerName()); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Volume creation failed for user:{} volume:{}", - omVolumeArgs.getOwnerName(), omVolumeArgs.getVolume(), ex); - } - throw ex; - } finally { - if (acquiredUserLock) { - metadataManager.getLock().releaseLock(USER_LOCK, - omVolumeArgs.getOwnerName()); - } - metadataManager.getLock().releaseLock(VOLUME_LOCK, - omVolumeArgs.getVolume()); - } - } - - private void createVolumeCommitToDB(OmVolumeArgs omVolumeArgs, - UserVolumeInfo volumeList, String dbVolumeKey, String dbUserKey) - throws IOException { - try (BatchOperation batch = metadataManager.getStore() - .initBatchOperation()) { - // Write the vol info - metadataManager.getVolumeTable().putWithBatch(batch, dbVolumeKey, - omVolumeArgs); - metadataManager.getUserTable().putWithBatch(batch, dbUserKey, - volumeList); - // Add volume to user list - metadataManager.getStore().commitBatchOperation(batch); - } catch (IOException ex) { - throw ex; - } - } - - /** - * Changes the owner of a volume. - * - * @param volume - Name of the volume. - * @param owner - Name of the owner. - * @throws IOException - */ - @Override - public void setOwner(String volume, String owner) - throws IOException { - Preconditions.checkNotNull(volume); - Preconditions.checkNotNull(owner); - boolean acquiredUsersLock = false; - String actualOwner = null; - metadataManager.getLock().acquireLock(VOLUME_LOCK, volume); - try { - String dbVolumeKey = metadataManager.getVolumeKey(volume); - OmVolumeArgs volumeArgs = metadataManager - .getVolumeTable().get(dbVolumeKey); - if (volumeArgs == null) { - LOG.debug("Changing volume ownership failed for user:{} volume:{}", - owner, volume); - throw new OMException("Volume " + volume + " is not found", - ResultCodes.VOLUME_NOT_FOUND); - } - - Preconditions.checkState(volume.equals(volumeArgs.getVolume())); - - actualOwner = volumeArgs.getOwnerName(); - String originalOwner = metadataManager.getUserKey(actualOwner); - - acquiredUsersLock = metadataManager.getLock().acquireMultiUserLock(owner, - originalOwner); - UserVolumeInfo oldOwnerVolumeList = delVolumeFromOwnerList(volume, - originalOwner); - - String newOwner = metadataManager.getUserKey(owner); - UserVolumeInfo newOwnerVolumeList = addVolumeToOwnerList(volume, - newOwner); - - volumeArgs.setOwnerName(owner); - setOwnerCommitToDB(oldOwnerVolumeList, newOwnerVolumeList, - volumeArgs, owner); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Changing volume ownership failed for user:{} volume:{}", - owner, volume, ex); - } - throw ex; - } finally { - if (acquiredUsersLock) { - metadataManager.getLock().releaseMultiUserLock(owner, actualOwner); - } - metadataManager.getLock().releaseLock(VOLUME_LOCK, volume); - } - } - - - private void setOwnerCommitToDB(UserVolumeInfo oldOwnerVolumeList, - UserVolumeInfo newOwnerVolumeList, OmVolumeArgs newOwnerVolumeArgs, - String oldOwner) throws IOException { - try (BatchOperation batch = metadataManager.getStore() - .initBatchOperation()) { - if (oldOwnerVolumeList.getVolumeNamesList().size() == 0) { - metadataManager.getUserTable().deleteWithBatch(batch, oldOwner); - } else { - metadataManager.getUserTable().putWithBatch(batch, oldOwner, - oldOwnerVolumeList); - } - metadataManager.getUserTable().putWithBatch(batch, - newOwnerVolumeArgs.getOwnerName(), - newOwnerVolumeList); - - String dbVolumeKey = - metadataManager.getVolumeKey(newOwnerVolumeArgs.getVolume()); - metadataManager.getVolumeTable().putWithBatch(batch, - dbVolumeKey, newOwnerVolumeArgs); - metadataManager.getStore().commitBatchOperation(batch); - } - } - - - /** - * Changes the Quota on a volume. - * - * @param volume - Name of the volume. - * @param quota - Quota in bytes. - * - * @throws IOException - */ - @Override - public void setQuota(String volume, long quota) throws IOException { - Preconditions.checkNotNull(volume); - metadataManager.getLock().acquireLock(VOLUME_LOCK, volume); - try { - String dbVolumeKey = metadataManager.getVolumeKey(volume); - OmVolumeArgs volumeArgs = - metadataManager.getVolumeTable().get(dbVolumeKey); - if (volumeArgs == null) { - LOG.debug("volume:{} does not exist", volume); - throw new OMException(ResultCodes.VOLUME_NOT_FOUND); - } - - Preconditions.checkState(volume.equals(volumeArgs.getVolume())); - - volumeArgs.setQuotaInBytes(quota); - - metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Changing volume quota failed for volume:{} quota:{}", volume, - quota, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(VOLUME_LOCK, volume); - } - } - - /** - * Gets the volume information. - * @param volume - Volume name. - * @return VolumeArgs or exception is thrown. - * @throws IOException - */ - @Override - public OmVolumeArgs getVolumeInfo(String volume) throws IOException { - Preconditions.checkNotNull(volume); - metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volume); - try { - String dbVolumeKey = metadataManager.getVolumeKey(volume); - OmVolumeArgs volumeArgs = - metadataManager.getVolumeTable().get(dbVolumeKey); - if (volumeArgs == null) { - LOG.debug("volume:{} does not exist", volume); - throw new OMException("Volume " + volume + " is not found", - ResultCodes.VOLUME_NOT_FOUND); - } - - return volumeArgs; - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.warn("Info volume failed for volume:{}", volume, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volume); - } - } - - /** - * Deletes an existing empty volume. - * - * @param volume - Name of the volume. - * @throws IOException - */ - @Override - public void deleteVolume(String volume) throws IOException { - Preconditions.checkNotNull(volume); - String owner = null; - boolean acquiredUserLock = false; - metadataManager.getLock().acquireLock(VOLUME_LOCK, volume); - try { - owner = getVolumeInfo(volume).getOwnerName(); - acquiredUserLock = metadataManager.getLock().acquireLock(USER_LOCK, - owner); - String dbVolumeKey = metadataManager.getVolumeKey(volume); - OmVolumeArgs volumeArgs = - metadataManager.getVolumeTable().get(dbVolumeKey); - if (volumeArgs == null) { - LOG.debug("volume:{} does not exist", volume); - throw new OMException("Volume " + volume + " is not found", - ResultCodes.VOLUME_NOT_FOUND); - } - - if (!metadataManager.isVolumeEmpty(volume)) { - LOG.debug("volume:{} is not empty", volume); - throw new OMException(ResultCodes.VOLUME_NOT_EMPTY); - } - Preconditions.checkState(volume.equals(volumeArgs.getVolume())); - // delete the volume from the owner list - // as well as delete the volume entry - UserVolumeInfo newVolumeList = delVolumeFromOwnerList(volume, - volumeArgs.getOwnerName()); - - - deleteVolumeCommitToDB(newVolumeList, volume, owner); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Delete volume failed for volume:{}", volume, ex); - } - throw ex; - } finally { - if (acquiredUserLock) { - metadataManager.getLock().releaseLock(USER_LOCK, owner); - } - metadataManager.getLock().releaseLock(VOLUME_LOCK, volume); - - } - } - - - private void deleteVolumeCommitToDB(UserVolumeInfo newVolumeList, - String volume, String owner) throws IOException { - try (BatchOperation batch = metadataManager.getStore() - .initBatchOperation()) { - String dbUserKey = metadataManager.getUserKey(owner); - if (newVolumeList.getVolumeNamesList().size() == 0) { - metadataManager.getUserTable().deleteWithBatch(batch, dbUserKey); - } else { - metadataManager.getUserTable().putWithBatch(batch, dbUserKey, - newVolumeList); - } - metadataManager.getVolumeTable().deleteWithBatch(batch, - metadataManager.getVolumeKey(volume)); - metadataManager.getStore().commitBatchOperation(batch); - } - } - - /** - * Checks if the specified user with a role can access this volume. - * - * @param volume - volume - * @param userAcl - user acl which needs to be checked for access - * @return true if the user has access for the volume, false otherwise - * @throws IOException - */ - @Override - public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) - throws IOException { - Preconditions.checkNotNull(volume); - Preconditions.checkNotNull(userAcl); - metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volume); - try { - String dbVolumeKey = metadataManager.getVolumeKey(volume); - OmVolumeArgs volumeArgs = - metadataManager.getVolumeTable().get(dbVolumeKey); - if (volumeArgs == null) { - LOG.debug("volume:{} does not exist", volume); - throw new OMException("Volume " + volume + " is not found", - ResultCodes.VOLUME_NOT_FOUND); - } - - Preconditions.checkState(volume.equals(volumeArgs.getVolume())); - return volumeArgs.getAclMap().hasAccess(userAcl); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Check volume access failed for volume:{} user:{} rights:{}", - volume, userAcl.getName(), userAcl.getRights().toString(), ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volume); - } - } - - /** - * {@inheritDoc} - */ - @Override - public List listVolumes(String userName, - String prefix, String startKey, int maxKeys) throws IOException { - metadataManager.getLock().acquireLock(USER_LOCK, userName); - try { - List volumes = metadataManager.listVolumes( - userName, prefix, startKey, maxKeys); - UserGroupInformation userUgi = ProtobufRpcEngine.Server. - getRemoteUser(); - if (userUgi == null || !aclEnabled) { - return volumes; - } - - List filteredVolumes = volumes.stream(). - filter(v -> v.getAclMap(). - hasAccess(IAccessAuthorizer.ACLType.LIST, userUgi)) - .collect(Collectors.toList()); - return filteredVolumes; - } finally { - metadataManager.getLock().releaseLock(USER_LOCK, userName); - } - } - - /** - * Add acl for Ozone object. Return true if acl is added successfully else - * false. - * - * @param obj Ozone object for which acl should be added. - * @param acl ozone acl top be added. - * @throws IOException if there is error. - */ - @Override - public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - Objects.requireNonNull(obj); - Objects.requireNonNull(acl); - if (!obj.getResourceType().equals(OzoneObj.ResourceType.VOLUME)) { - throw new IllegalArgumentException("Unexpected argument passed to " + - "VolumeManager. OzoneObj type:" + obj.getResourceType()); - } - String volume = obj.getVolumeName(); - metadataManager.getLock().acquireLock(VOLUME_LOCK, volume); - try { - String dbVolumeKey = metadataManager.getVolumeKey(volume); - OmVolumeArgs volumeArgs = - metadataManager.getVolumeTable().get(dbVolumeKey); - if (volumeArgs == null) { - LOG.debug("volume:{} does not exist", volume); - throw new OMException("Volume " + volume + " is not found", - ResultCodes.VOLUME_NOT_FOUND); - } - try { - volumeArgs.addAcl(acl); - } catch (OMException ex) { - if (LOG.isDebugEnabled()) { - LOG.debug("Add acl failed.", ex); - } - return false; - } - metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); - - Preconditions.checkState(volume.equals(volumeArgs.getVolume())); - //return volumeArgs.getAclMap().hasAccess(userAcl); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Add acl operation failed for volume:{} acl:{}", - volume, acl, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(VOLUME_LOCK, volume); - } - - return true; - } - - /** - * Remove acl for Ozone object. Return true if acl is removed successfully - * else false. - * - * @param obj Ozone object. - * @param acl Ozone acl to be removed. - * @throws IOException if there is error. - */ - @Override - public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException { - Objects.requireNonNull(obj); - Objects.requireNonNull(acl); - if (!obj.getResourceType().equals(OzoneObj.ResourceType.VOLUME)) { - throw new IllegalArgumentException("Unexpected argument passed to " + - "VolumeManager. OzoneObj type:" + obj.getResourceType()); - } - String volume = obj.getVolumeName(); - metadataManager.getLock().acquireLock(VOLUME_LOCK, volume); - try { - String dbVolumeKey = metadataManager.getVolumeKey(volume); - OmVolumeArgs volumeArgs = - metadataManager.getVolumeTable().get(dbVolumeKey); - if (volumeArgs == null) { - LOG.debug("volume:{} does not exist", volume); - throw new OMException("Volume " + volume + " is not found", - ResultCodes.VOLUME_NOT_FOUND); - } - try { - volumeArgs.removeAcl(acl); - } catch (OMException ex) { - if (LOG.isDebugEnabled()) { - LOG.debug("Remove acl failed.", ex); - } - return false; - } - metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); - - Preconditions.checkState(volume.equals(volumeArgs.getVolume())); - //return volumeArgs.getAclMap().hasAccess(userAcl); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Remove acl operation failed for volume:{} acl:{}", - volume, acl, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(VOLUME_LOCK, volume); - } - - return true; - } - - /** - * Acls to be set for given Ozone object. This operations reset ACL for given - * object to list of ACLs provided in argument. - * - * @param obj Ozone object. - * @param acls List of acls. - * @throws IOException if there is error. - */ - @Override - public boolean setAcl(OzoneObj obj, List acls) throws IOException { - Objects.requireNonNull(obj); - Objects.requireNonNull(acls); - - if (!obj.getResourceType().equals(OzoneObj.ResourceType.VOLUME)) { - throw new IllegalArgumentException("Unexpected argument passed to " + - "VolumeManager. OzoneObj type:" + obj.getResourceType()); - } - String volume = obj.getVolumeName(); - metadataManager.getLock().acquireLock(VOLUME_LOCK, volume); - try { - String dbVolumeKey = metadataManager.getVolumeKey(volume); - OmVolumeArgs volumeArgs = - metadataManager.getVolumeTable().get(dbVolumeKey); - if (volumeArgs == null) { - LOG.debug("volume:{} does not exist", volume); - throw new OMException("Volume " + volume + " is not found", - ResultCodes.VOLUME_NOT_FOUND); - } - volumeArgs.setAcls(acls); - metadataManager.getVolumeTable().put(dbVolumeKey, volumeArgs); - - Preconditions.checkState(volume.equals(volumeArgs.getVolume())); - //return volumeArgs.getAclMap().hasAccess(userAcl); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Set acl operation failed for volume:{} acls:{}", - volume, acls, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseLock(VOLUME_LOCK, volume); - } - - return true; - } - - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @throws IOException if there is error. - */ - @Override - public List getAcl(OzoneObj obj) throws IOException { - Objects.requireNonNull(obj); - - if (!obj.getResourceType().equals(OzoneObj.ResourceType.VOLUME)) { - throw new IllegalArgumentException("Unexpected argument passed to " + - "VolumeManager. OzoneObj type:" + obj.getResourceType()); - } - String volume = obj.getVolumeName(); - metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volume); - try { - String dbVolumeKey = metadataManager.getVolumeKey(volume); - OmVolumeArgs volumeArgs = - metadataManager.getVolumeTable().get(dbVolumeKey); - if (volumeArgs == null) { - LOG.debug("volume:{} does not exist", volume); - throw new OMException("Volume " + volume + " is not found", - ResultCodes.VOLUME_NOT_FOUND); - } - - Preconditions.checkState(volume.equals(volumeArgs.getVolume())); - return volumeArgs.getAclMap().getAcl(); - } catch (IOException ex) { - if (!(ex instanceof OMException)) { - LOG.error("Get acl operation failed for volume:{}", volume, ex); - } - throw ex; - } finally { - metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volume); - } - } - - /** - * Check access for given ozoneObject. - * - * @param ozObject object for which access needs to be checked. - * @param context Context object encapsulating all user related information. - * @return true if user has access else false. - */ - @Override - public boolean checkAccess(OzoneObj ozObject, RequestContext context) - throws OMException { - Objects.requireNonNull(ozObject); - Objects.requireNonNull(context); - - String volume = ozObject.getVolumeName(); - metadataManager.getLock().acquireLock(VOLUME_LOCK, volume); - try { - String dbVolumeKey = metadataManager.getVolumeKey(volume); - OmVolumeArgs volumeArgs = - metadataManager.getVolumeTable().get(dbVolumeKey); - if (volumeArgs == null) { - LOG.debug("volume:{} does not exist", volume); - throw new OMException("Volume " + volume + " is not found", - ResultCodes.VOLUME_NOT_FOUND); - } - - Preconditions.checkState(volume.equals(volumeArgs.getVolume())); - boolean hasAccess = volumeArgs.getAclMap().hasAccess( - context.getAclRights(), context.getClientUgi()); - if (LOG.isDebugEnabled()) { - LOG.debug("user:{} has access rights for volume:{} :{} ", - context.getClientUgi(), ozObject.getVolumeName(), hasAccess); - } - return hasAccess; - } catch (IOException ex) { - LOG.error("Check access operation failed for volume:{}", volume, ex); - throw new OMException("Check access operation failed for " + - "volume:" + volume, ex, ResultCodes.INTERNAL_ERROR); - } finally { - metadataManager.getLock().releaseLock(VOLUME_LOCK, volume); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java deleted file mode 100644 index 647931af0d0..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.fs; - -import org.apache.hadoop.ozone.om.IOzoneAcl; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; - -import java.io.IOException; -import java.util.List; - -/** - * Ozone Manager FileSystem interface. - */ -public interface OzoneManagerFS extends IOzoneAcl { - OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException; - - void createDirectory(OmKeyArgs args) throws IOException; - - OpenKeySession createFile(OmKeyArgs args, boolean isOverWrite, - boolean isRecursive) throws IOException; - - /** - * Look up a file. Return the info of the file to client side. - * - * @param args the args of the key provided by client. - * @param clientAddress a hint to key manager, order the datanode in returned - * pipeline by distance between client and datanode. - * @return a OmKeyInfo instance client uses to talk to container. - * @throws IOException - */ - OmKeyInfo lookupFile(OmKeyArgs args, String clientAddress) throws IOException; - - List listStatus(OmKeyArgs keyArgs, boolean recursive, - String startKey, long numEntries) throws IOException; -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/package-info.java deleted file mode 100644 index 32551852032..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.fs; -/* - This package contains the Ozone Manager FileSystem interface classes. - */ \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java deleted file mode 100644 index 8d9e70977a3..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMHANodeDetails.java +++ /dev/null @@ -1,306 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.ha; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.OzoneIllegalArgumentException; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODES_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODE_ID_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; - -/** - * Class which maintains peer information and it's own OM node information. - */ -public class OMHANodeDetails { - - private static String[] genericConfigKeys = new String[] { - OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, - OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, - OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY, - OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY, - OMConfigKeys.OZONE_OM_DB_DIRS, - OMConfigKeys.OZONE_OM_ADDRESS_KEY, - }; - - public static final Logger LOG = - LoggerFactory.getLogger(OMHANodeDetails.class); - private final OMNodeDetails localNodeDetails; - private final List peerNodeDetails; - - public OMHANodeDetails(OMNodeDetails localNodeDetails, - List peerNodeDetails) { - this.localNodeDetails = localNodeDetails; - this.peerNodeDetails = peerNodeDetails; - } - - public OMNodeDetails getLocalNodeDetails() { - return localNodeDetails; - } - - public List< OMNodeDetails > getPeerNodeDetails() { - return peerNodeDetails; - } - - - /** - * Inspects and loads OM node configurations. - * - * If {@link OMConfigKeys#OZONE_OM_SERVICE_IDS_KEY} is configured with - * multiple ids and/ or if {@link OMConfigKeys#OZONE_OM_NODE_ID_KEY} is not - * specifically configured , this method determines the omServiceId - * and omNodeId by matching the node's address with the configured - * addresses. When a match is found, it sets the omServicId and omNodeId from - * the corresponding configuration key. This method also finds the OM peers - * nodes belonging to the same OM service. - * - * @param conf - */ - public static OMHANodeDetails loadOMHAConfig(OzoneConfiguration conf) { - InetSocketAddress localRpcAddress = null; - String localOMServiceId = null; - String localOMNodeId = null; - int localRatisPort = 0; - Collection omServiceIds = conf.getTrimmedStringCollection( - OZONE_OM_SERVICE_IDS_KEY); - - String knownOMNodeId = conf.get(OZONE_OM_NODE_ID_KEY); - int found = 0; - boolean isOMAddressSet = false; - - for (String serviceId : omServiceIds) { - Collection omNodeIds = OmUtils.getOMNodeIds(conf, serviceId); - - if (omNodeIds.size() == 0) { - String msg = "Configuration does not have any value set for " + - OZONE_OM_NODES_KEY + " for service ID " + serviceId + ". List of " + - "OM Node ID's should be specified for the service ID"; - throw new OzoneIllegalArgumentException(msg); - } - - List peerNodesList = new ArrayList<>(); - boolean isPeer; - for (String nodeId : omNodeIds) { - if (knownOMNodeId != null && !knownOMNodeId.equals(nodeId)) { - isPeer = true; - } else { - isPeer = false; - } - String rpcAddrKey = OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY, - serviceId, nodeId); - String rpcAddrStr = OmUtils.getOmRpcAddress(conf, rpcAddrKey); - if (rpcAddrStr == null || rpcAddrStr.isEmpty()) { - String msg = "Configuration does not have any value set for " + - rpcAddrKey + "." + "OM Rpc Address should be set for all node " + - "IDs for a service ID."; - throw new OzoneIllegalArgumentException(msg); - } - - // If OM address is set for any node id, we will not fallback to the - // default - isOMAddressSet = true; - - String ratisPortKey = OmUtils.addKeySuffixes(OZONE_OM_RATIS_PORT_KEY, - serviceId, nodeId); - int ratisPort = conf.getInt(ratisPortKey, OZONE_OM_RATIS_PORT_DEFAULT); - - InetSocketAddress addr = null; - try { - addr = NetUtils.createSocketAddr(rpcAddrStr); - } catch (Exception e) { - LOG.warn("Exception in creating socket address " + addr, e); - continue; - } - if (!addr.isUnresolved()) { - if (!isPeer && OmUtils.isAddressLocal(addr)) { - localRpcAddress = addr; - localOMServiceId = serviceId; - localOMNodeId = nodeId; - localRatisPort = ratisPort; - found++; - } else { - // This OMNode belongs to same OM service as the current OMNode. - // Add it to peerNodes list. - // This OMNode belongs to same OM service as the current OMNode. - // Add it to peerNodes list. - peerNodesList.add(getHAOMNodeDetails(conf, serviceId, - nodeId, addr, ratisPort)); - } - } - } - if (found == 1) { - LOG.debug("Found one matching OM address with service ID: {} and node" + - " ID: {}", localOMServiceId, localOMNodeId); - - LOG.info("Found matching OM address with OMServiceId: {}, " + - "OMNodeId: {}, RPC Address: {} and Ratis port: {}", - localOMServiceId, localOMNodeId, - NetUtils.getHostPortString(localRpcAddress), localRatisPort); - - - setOMNodeSpecificConfigs(conf, localOMServiceId, localOMNodeId); - return new OMHANodeDetails(getHAOMNodeDetails(conf, localOMServiceId, - localOMNodeId, localRpcAddress, localRatisPort), peerNodesList); - - } else if (found > 1) { - String msg = "Configuration has multiple " + OZONE_OM_ADDRESS_KEY + - " addresses that match local node's address. Please configure the" + - " system with " + OZONE_OM_SERVICE_IDS_KEY + " and " + - OZONE_OM_ADDRESS_KEY; - throw new OzoneIllegalArgumentException(msg); - } - } - - if (!isOMAddressSet) { - // No OM address is set. Fallback to default - InetSocketAddress omAddress = OmUtils.getOmAddress(conf); - int ratisPort = conf.getInt(OZONE_OM_RATIS_PORT_KEY, - OZONE_OM_RATIS_PORT_DEFAULT); - - LOG.info("Configuration either no {} set. Falling back to the default " + - "OM address {}", OZONE_OM_ADDRESS_KEY, omAddress); - - return new OMHANodeDetails(getOMNodeDetails(conf, null, - null, omAddress, ratisPort), new ArrayList<>()); - - } else { - String msg = "Configuration has no " + OZONE_OM_ADDRESS_KEY + " " + - "address that matches local node's address. Please configure the " + - "system with " + OZONE_OM_ADDRESS_KEY; - LOG.info(msg); - throw new OzoneIllegalArgumentException(msg); - } - } - - /** - * Create Local OM Node Details. - * @param serviceId - Service ID this OM belongs to, - * @param nodeId - Node ID of this OM. - * @param rpcAddress - Rpc Address of the OM. - * @param ratisPort - Ratis port of the OM. - * @return OMNodeDetails - */ - public static OMNodeDetails getOMNodeDetails(OzoneConfiguration conf, - String serviceId, String nodeId, InetSocketAddress rpcAddress, - int ratisPort) { - - if (serviceId == null) { - // If no serviceId is set, take the default serviceID om-service - serviceId = OzoneConsts.OM_SERVICE_ID_DEFAULT; - LOG.info("OM Service ID is not set. Setting it to the default ID: {}", - serviceId); - } - - - // We need to pass null for serviceID and nodeID as this is set for - // non-HA cluster. This means one node OM cluster. - String httpAddr = OmUtils.getHttpAddressForOMPeerNode(conf, - null, null, rpcAddress.getHostName()); - String httpsAddr = OmUtils.getHttpsAddressForOMPeerNode(conf, - null, null, rpcAddress.getHostName()); - - return new OMNodeDetails.Builder() - .setOMServiceId(serviceId) - .setOMNodeId(nodeId) - .setRpcAddress(rpcAddress) - .setRatisPort(ratisPort) - .setHttpAddress(httpAddr) - .setHttpsAddress(httpsAddr) - .build(); - - } - - - /** - * Create Local OM Node Details. - * @param serviceId - Service ID this OM belongs to, - * @param nodeId - Node ID of this OM. - * @param rpcAddress - Rpc Address of the OM. - * @param ratisPort - Ratis port of the OM. - * @return OMNodeDetails - */ - public static OMNodeDetails getHAOMNodeDetails(OzoneConfiguration conf, - String serviceId, String nodeId, InetSocketAddress rpcAddress, - int ratisPort) { - Preconditions.checkNotNull(serviceId); - Preconditions.checkNotNull(nodeId); - - String httpAddr = OmUtils.getHttpAddressForOMPeerNode(conf, - serviceId, nodeId, rpcAddress.getHostName()); - String httpsAddr = OmUtils.getHttpsAddressForOMPeerNode(conf, - serviceId, nodeId, rpcAddress.getHostName()); - - return new OMNodeDetails.Builder() - .setOMServiceId(serviceId) - .setOMNodeId(nodeId) - .setRpcAddress(rpcAddress) - .setRatisPort(ratisPort) - .setHttpAddress(httpAddr) - .setHttpsAddress(httpsAddr) - .build(); - - } - - - /** - * Check if any of the following configuration keys have been set using OM - * Node ID suffixed to the key. If yes, then set the base key with the - * configured valued. - * 1. {@link OMConfigKeys#OZONE_OM_HTTP_ADDRESS_KEY} - * 2. {@link OMConfigKeys#OZONE_OM_HTTPS_ADDRESS_KEY} - * 3. {@link OMConfigKeys#OZONE_OM_HTTP_BIND_HOST_KEY} - * 4. {@link OMConfigKeys#OZONE_OM_HTTPS_BIND_HOST_KEY}\ - * 5. {@link OMConfigKeys#OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE} - * 6. {@link OMConfigKeys#OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY} - * 7. {@link OMConfigKeys#OZONE_OM_KERBEROS_KEYTAB_FILE_KEY} - * 8. {@link OMConfigKeys#OZONE_OM_KERBEROS_PRINCIPAL_KEY} - * 9. {@link OMConfigKeys#OZONE_OM_DB_DIRS} - * 10. {@link OMConfigKeys#OZONE_OM_ADDRESS_KEY} - */ - private static void setOMNodeSpecificConfigs( - OzoneConfiguration ozoneConfiguration, String omServiceId, - String omNodeId) { - - for (String confKey : genericConfigKeys) { - String confValue = OmUtils.getConfSuffixedWithOMNodeId( - ozoneConfiguration, confKey, omServiceId, omNodeId); - if (confValue != null) { - LOG.info("Setting configuration key {} with value of key {}: {}", - confKey, OmUtils.addKeySuffixes(confKey, omNodeId), confValue); - ozoneConfiguration.set(confKey, confValue); - } - } - } - - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMNodeDetails.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMNodeDetails.java deleted file mode 100644 index 7d69b933e82..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/OMNodeDetails.java +++ /dev/null @@ -1,161 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.ha; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.http.HttpConfig; -import org.apache.hadoop.net.NetUtils; - -import java.net.InetAddress; -import java.net.InetSocketAddress; - -import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; - -/** - * This class stores OM node details. - */ -public final class OMNodeDetails { - private String omServiceId; - private String omNodeId; - private InetSocketAddress rpcAddress; - private int rpcPort; - private int ratisPort; - private String httpAddress; - private String httpsAddress; - - /** - * Constructs OMNodeDetails object. - */ - private OMNodeDetails(String serviceId, String nodeId, - InetSocketAddress rpcAddr, int rpcPort, int ratisPort, - String httpAddress, String httpsAddress) { - this.omServiceId = serviceId; - this.omNodeId = nodeId; - this.rpcAddress = rpcAddr; - this.rpcPort = rpcPort; - this.ratisPort = ratisPort; - this.httpAddress = httpAddress; - this.httpsAddress = httpsAddress; - } - - @Override - public String toString() { - return "OMNodeDetails[" - + "omServiceId=" + omServiceId + - ", omNodeId=" + omNodeId + - ", rpcAddress=" + rpcAddress + - ", rpcPort=" + rpcPort + - ", ratisPort=" + ratisPort + - ", httpAddress=" + httpAddress + - ", httpsAddress=" + httpsAddress + - "]"; - } - - /** - * Builder class for OMNodeDetails. - */ - public static class Builder { - private String omServiceId; - private String omNodeId; - private InetSocketAddress rpcAddress; - private int rpcPort; - private int ratisPort; - private String httpAddr; - private String httpsAddr; - - public Builder setRpcAddress(InetSocketAddress rpcAddr) { - this.rpcAddress = rpcAddr; - this.rpcPort = rpcAddress.getPort(); - return this; - } - - public Builder setRatisPort(int port) { - this.ratisPort = port; - return this; - } - - public Builder setOMServiceId(String serviceId) { - this.omServiceId = serviceId; - return this; - } - - public Builder setOMNodeId(String nodeId) { - this.omNodeId = nodeId; - return this; - } - - public Builder setHttpAddress(String httpAddress) { - this.httpAddr = httpAddress; - return this; - } - - public Builder setHttpsAddress(String httpsAddress) { - this.httpsAddr = httpsAddress; - return this; - } - - public OMNodeDetails build() { - return new OMNodeDetails(omServiceId, omNodeId, rpcAddress, rpcPort, - ratisPort, httpAddr, httpsAddr); - } - } - - public String getOMServiceId() { - return omServiceId; - } - - public String getOMNodeId() { - return omNodeId; - } - - public InetSocketAddress getRpcAddress() { - return rpcAddress; - } - - public InetAddress getAddress() { - return rpcAddress.getAddress(); - } - - public int getRatisPort() { - return ratisPort; - } - - public int getRpcPort() { - return rpcPort; - } - - public String getRpcAddressString() { - return NetUtils.getHostPortString(rpcAddress); - } - - public String getOMDBCheckpointEnpointUrl(HttpConfig.Policy httpPolicy) { - if (httpPolicy.isHttpEnabled()) { - if (StringUtils.isNotEmpty(httpAddress)) { - return "http://" + httpAddress + OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT - + "?" + OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT + "=true"; - } - } else { - if (StringUtils.isNotEmpty(httpsAddress)) { - return "https://" + httpsAddress + OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT - + "?" + OM_RATIS_SNAPSHOT_BEFORE_DB_CHECKPOINT + "=true"; - } - } - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java deleted file mode 100644 index 3c40c88b954..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ha/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.ha; - -/** - * This package contains classes related to OM HA. - */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java deleted file mode 100644 index 7904d5da083..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om; -/* - This package contains the Ozone Manager classes. - */ \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisSnapshotInfo.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisSnapshotInfo.java deleted file mode 100644 index 520c1171f83..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OMRatisSnapshotInfo.java +++ /dev/null @@ -1,180 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.ratis; - -import org.apache.ratis.server.protocol.TermIndex; -import org.apache.ratis.server.storage.FileInfo; -import org.apache.ratis.statemachine.SnapshotInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.yaml.snakeyaml.DumperOptions; -import org.yaml.snakeyaml.Yaml; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.io.Writer; -import java.util.List; - -import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_INDEX; - -/** - * This class captures the snapshotIndex and term of the latest snapshot in - * the OM. - * Ratis server loads the snapshotInfo during startup and updates the - * lastApplied index to this snapshotIndex. OM SnapshotInfo does not contain - * any files. It is used only to store/ update the last applied index and term. - */ -public class OMRatisSnapshotInfo implements SnapshotInfo { - - static final Logger LOG = LoggerFactory.getLogger(OMRatisSnapshotInfo.class); - - private volatile long term = 0; - private volatile long snapshotIndex = -1; - - private final File ratisSnapshotFile; - - public OMRatisSnapshotInfo(File ratisDir) throws IOException { - ratisSnapshotFile = new File(ratisDir, OM_RATIS_SNAPSHOT_INDEX); - loadRatisSnapshotIndex(); - } - - public void updateTerm(long newTerm) { - term = newTerm; - } - - private void updateSnapshotIndex(long newSnapshotIndex) { - snapshotIndex = newSnapshotIndex; - } - - private void updateTermIndex(long newTerm, long newIndex) { - this.term = newTerm; - this.snapshotIndex = newIndex; - } - - /** - * Load the snapshot index and term from the snapshot file on disk, - * if it exists. - * @throws IOException - */ - private void loadRatisSnapshotIndex() throws IOException { - if (ratisSnapshotFile.exists()) { - RatisSnapshotYaml ratisSnapshotYaml = readRatisSnapshotYaml(); - updateTermIndex(ratisSnapshotYaml.term, ratisSnapshotYaml.snapshotIndex); - } - } - - /** - * Read and parse the snapshot yaml file. - */ - private RatisSnapshotYaml readRatisSnapshotYaml() throws IOException { - try (FileInputStream inputFileStream = new FileInputStream( - ratisSnapshotFile)) { - Yaml yaml = new Yaml(); - try { - return yaml.loadAs(inputFileStream, RatisSnapshotYaml.class); - } catch (Exception e) { - throw new IOException("Unable to parse RatisSnapshot yaml file.", e); - } - } - } - - /** - * Update and persist the snapshot index and term to disk. - * @param index new snapshot index to be persisted to disk. - * @throws IOException - */ - public void saveRatisSnapshotToDisk(long index) throws IOException { - updateSnapshotIndex(index); - writeRatisSnapshotYaml(); - LOG.info("Saved Ratis Snapshot on the OM with snapshotIndex {}", index); - } - - /** - * Write snapshot details to disk in yaml format. - */ - private void writeRatisSnapshotYaml() throws IOException { - DumperOptions options = new DumperOptions(); - options.setPrettyFlow(true); - options.setDefaultFlowStyle(DumperOptions.FlowStyle.FLOW); - Yaml yaml = new Yaml(options); - - RatisSnapshotYaml ratisSnapshotYaml = new RatisSnapshotYaml(term, - snapshotIndex); - - try (Writer writer = new OutputStreamWriter( - new FileOutputStream(ratisSnapshotFile), "UTF-8")) { - yaml.dump(ratisSnapshotYaml, writer); - } - } - - @Override - public TermIndex getTermIndex() { - return TermIndex.newTermIndex(term, snapshotIndex); - } - - @Override - public long getTerm() { - return term; - } - - @Override - public long getIndex() { - return snapshotIndex; - } - - @Override - public List getFiles() { - return null; - } - - /** - * Ratis Snapshot details to be written to the yaml file. - */ - public static class RatisSnapshotYaml { - private long term; - private long snapshotIndex; - - public RatisSnapshotYaml() { - // Needed for snake-yaml introspection. - } - - RatisSnapshotYaml(long term, long snapshotIndex) { - this.term = term; - this.snapshotIndex = snapshotIndex; - } - - public void setTerm(long term) { - this.term = term; - } - - public long getTerm() { - return this.term; - } - - public void setSnapshotIndex(long index) { - this.snapshotIndex = index; - } - - public long getSnapshotIndex() { - return this.snapshotIndex; - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java deleted file mode 100644 index e5cadffc400..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ /dev/null @@ -1,350 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.ratis; - -import java.io.IOException; -import java.util.Queue; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; - -import com.google.common.annotations.VisibleForTesting; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.ratis.helpers.DoubleBufferEntry; -import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.util.Daemon; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.ratis.util.ExitUtils; - -/** - * This class implements DoubleBuffer implementation of OMClientResponse's. In - * DoubleBuffer it has 2 buffers one is currentBuffer and other is - * readyBuffer. The current OM requests will be always added to currentBuffer. - * Flush thread will be running in background, it check's if currentBuffer has - * any entries, it swaps the buffer and creates a batch and commit to DB. - * Adding OM request to doubleBuffer and swap of buffer are synchronized - * methods. - * - */ -public class OzoneManagerDoubleBuffer { - - private static final Logger LOG = - LoggerFactory.getLogger(OzoneManagerDoubleBuffer.class); - - // Taken unbounded queue, if sync thread is taking too long time, we - // might end up taking huge memory to add entries to the buffer. - // TODO: We can avoid this using unbounded queue and use queue with - // capacity, if queue is full we can wait for sync to be completed to - // add entries. But in this also we might block rpc handlers, as we - // clear entries after sync. Or we can come up with a good approach to - // solve this. - private Queue> currentBuffer; - private Queue> readyBuffer; - - - // future objects which hold the future returned by add method. - private volatile Queue> currentFutureQueue; - - // Once we have an entry in current buffer, we swap the currentFutureQueue - // with readyFutureQueue. After flush is completed in flushTransaction - // daemon thread, we complete the futures in readyFutureQueue and clear them. - private volatile Queue> readyFutureQueue; - - private Daemon daemon; - private final OMMetadataManager omMetadataManager; - private final AtomicLong flushedTransactionCount = new AtomicLong(0); - private final AtomicLong flushIterations = new AtomicLong(0); - private final AtomicBoolean isRunning = new AtomicBoolean(false); - private OzoneManagerDoubleBufferMetrics ozoneManagerDoubleBufferMetrics; - private long maxFlushedTransactionsInOneIteration; - - private final OzoneManagerRatisSnapshot ozoneManagerRatisSnapShot; - - private final boolean isRatisEnabled; - - public OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager, - OzoneManagerRatisSnapshot ozoneManagerRatisSnapShot) { - this(omMetadataManager, ozoneManagerRatisSnapShot, true); - } - - public OzoneManagerDoubleBuffer(OMMetadataManager omMetadataManager, - OzoneManagerRatisSnapshot ozoneManagerRatisSnapShot, - boolean isRatisEnabled) { - this.currentBuffer = new ConcurrentLinkedQueue<>(); - this.readyBuffer = new ConcurrentLinkedQueue<>(); - - this.isRatisEnabled = isRatisEnabled; - - if (!isRatisEnabled) { - this.currentFutureQueue = new ConcurrentLinkedQueue<>(); - this.readyFutureQueue = new ConcurrentLinkedQueue<>(); - } else { - this.currentFutureQueue = null; - this.readyFutureQueue = null; - } - - this.omMetadataManager = omMetadataManager; - this.ozoneManagerRatisSnapShot = ozoneManagerRatisSnapShot; - this.ozoneManagerDoubleBufferMetrics = - OzoneManagerDoubleBufferMetrics.create(); - - isRunning.set(true); - // Daemon thread which runs in back ground and flushes transactions to DB. - daemon = new Daemon(this::flushTransactions); - daemon.setName("OMDoubleBufferFlushThread"); - daemon.start(); - - } - - - - - /** - * Runs in a background thread and batches the transaction in currentBuffer - * and commit to DB. - */ - private void flushTransactions() { - while (isRunning.get()) { - try { - if (canFlush()) { - setReadyBuffer(); - final BatchOperation batchOperation = omMetadataManager.getStore() - .initBatchOperation(); - - readyBuffer.iterator().forEachRemaining((entry) -> { - try { - entry.getResponse().addToDBBatch(omMetadataManager, - batchOperation); - } catch (IOException ex) { - // During Adding to RocksDB batch entry got an exception. - // We should terminate the OM. - terminate(ex); - } - }); - - omMetadataManager.getStore().commitBatchOperation(batchOperation); - int flushedTransactionsSize = readyBuffer.size(); - flushedTransactionCount.addAndGet(flushedTransactionsSize); - flushIterations.incrementAndGet(); - - if (LOG.isDebugEnabled()) { - LOG.debug("Sync Iteration {} flushed transactions in this " + - "iteration{}", flushIterations.get(), - flushedTransactionsSize); - } - - long lastRatisTransactionIndex = - readyBuffer.stream().map(DoubleBufferEntry::getTrxLogIndex) - .max(Long::compareTo).get(); - - readyBuffer.clear(); - - // cleanup cache. - cleanupCache(lastRatisTransactionIndex); - - // TODO: Need to revisit this logic, once we have multiple - // executors for volume/bucket request handling. As for now - // transactions are serialized this should be fine. - // update the last updated index in OzoneManagerStateMachine. - ozoneManagerRatisSnapShot.updateLastAppliedIndex( - lastRatisTransactionIndex); - - // set metrics. - updateMetrics(flushedTransactionsSize); - - if (!isRatisEnabled) { - // Once all entries are flushed, we can complete their future. - readyFutureQueue.iterator().forEachRemaining((entry) -> { - entry.complete(null); - }); - - readyFutureQueue.clear(); - } - } - } catch (InterruptedException ex) { - Thread.currentThread().interrupt(); - if (isRunning.get()) { - final String message = "OMDoubleBuffer flush thread " + - Thread.currentThread().getName() + " encountered Interrupted " + - "exception while running"; - ExitUtils.terminate(1, message, ex, LOG); - } else { - LOG.info("OMDoubleBuffer flush thread " + - Thread.currentThread().getName() + " is interrupted and will " + - "exit. {}", Thread.currentThread().getName()); - } - } catch (IOException ex) { - terminate(ex); - } catch (Throwable t) { - final String s = "OMDoubleBuffer flush thread" + - Thread.currentThread().getName() + "encountered Throwable error"; - ExitUtils.terminate(2, s, t, LOG); - } - } - } - - private void cleanupCache(long lastRatisTransactionIndex) { - // As now only volume and bucket transactions are handled only called - // cleanupCache on bucketTable. - // TODO: After supporting all write operations we need to call - // cleanupCache on the tables only when buffer has entries for that table. - omMetadataManager.getBucketTable().cleanupCache(lastRatisTransactionIndex); - omMetadataManager.getVolumeTable().cleanupCache(lastRatisTransactionIndex); - omMetadataManager.getUserTable().cleanupCache(lastRatisTransactionIndex); - - //TODO: Optimization we can do here is for key transactions we can only - // cleanup cache when it is key commit transaction. In this way all - // intermediate transactions for a key will be read from in-memory cache. - omMetadataManager.getOpenKeyTable().cleanupCache(lastRatisTransactionIndex); - omMetadataManager.getKeyTable().cleanupCache(lastRatisTransactionIndex); - omMetadataManager.getDeletedTable().cleanupCache(lastRatisTransactionIndex); - omMetadataManager.getS3Table().cleanupCache(lastRatisTransactionIndex); - omMetadataManager.getMultipartInfoTable().cleanupCache( - lastRatisTransactionIndex); - - } - - /** - * Update OzoneManagerDoubleBuffer metrics values. - * @param flushedTransactionsSize - */ - private void updateMetrics( - long flushedTransactionsSize) { - ozoneManagerDoubleBufferMetrics.incrTotalNumOfFlushOperations(); - ozoneManagerDoubleBufferMetrics.incrTotalSizeOfFlushedTransactions( - flushedTransactionsSize); - if (maxFlushedTransactionsInOneIteration < flushedTransactionsSize) { - maxFlushedTransactionsInOneIteration = flushedTransactionsSize; - ozoneManagerDoubleBufferMetrics - .setMaxNumberOfTransactionsFlushedInOneIteration( - flushedTransactionsSize); - } - } - - /** - * Stop OM DoubleBuffer flush thread. - */ - public void stop() { - if (isRunning.compareAndSet(true, false)) { - LOG.info("Stopping OMDoubleBuffer flush thread"); - daemon.interrupt(); - try { - // Wait for daemon thread to exit - daemon.join(); - } catch (InterruptedException e) { - LOG.error("Interrupted while waiting for daemon to exit."); - } - - // stop metrics. - ozoneManagerDoubleBufferMetrics.unRegister(); - } else { - LOG.info("OMDoubleBuffer flush thread is not running."); - } - - } - - private void terminate(IOException ex) { - String message = "During flush to DB encountered error in " + - "OMDoubleBuffer flush thread " + Thread.currentThread().getName(); - ExitUtils.terminate(1, message, ex, LOG); - } - - /** - * Returns the flushed transaction count to OM DB. - * @return flushedTransactionCount - */ - public long getFlushedTransactionCount() { - return flushedTransactionCount.get(); - } - - /** - * Returns total number of flush iterations run by sync thread. - * @return flushIterations - */ - public long getFlushIterations() { - return flushIterations.get(); - } - - /** - * Add OmResponseBufferEntry to buffer. - * @param response - * @param transactionIndex - */ - public synchronized CompletableFuture add(OMClientResponse response, - long transactionIndex) { - currentBuffer.add(new DoubleBufferEntry<>(transactionIndex, response)); - notify(); - - if (!isRatisEnabled) { - CompletableFuture future = new CompletableFuture<>(); - currentFutureQueue.add(future); - return future; - } else { - // In Non-HA case we don't need future to be returned, and this return - // status is not used. - return null; - } - } - - /** - * Check can we flush transactions or not. This method wait's until - * currentBuffer size is greater than zero, once currentBuffer size is - * greater than zero it gets notify signal, and it returns true - * indicating that we are ready to flush. - * - * @return boolean - */ - private synchronized boolean canFlush() throws InterruptedException { - // When transactions are added to buffer it notifies, then we check if - // currentBuffer size once and return from this method. - while (currentBuffer.size() == 0) { - wait(Long.MAX_VALUE); - } - return true; - } - - /** - * Prepares the readyBuffer which is used by sync thread to flush - * transactions to OM DB. This method swaps the currentBuffer and readyBuffer. - */ - private synchronized void setReadyBuffer() { - Queue> temp = currentBuffer; - currentBuffer = readyBuffer; - readyBuffer = temp; - - if (!isRatisEnabled) { - // Swap future queue. - Queue> tempFuture = currentFutureQueue; - currentFutureQueue = readyFutureQueue; - readyFutureQueue = tempFuture; - } - } - - @VisibleForTesting - public OzoneManagerDoubleBufferMetrics getOzoneManagerDoubleBufferMetrics() { - return ozoneManagerDoubleBufferMetrics; - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java deleted file mode 100644 index 6f97f56241b..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisClient.java +++ /dev/null @@ -1,210 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.ratis; - -import static org.apache.hadoop.ozone.om.exceptions.OMException.STATUS_CODE; - -import java.io.Closeable; -import java.io.IOException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CompletionException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.ratis.client.RaftClient; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientReply; -import org.apache.ratis.protocol.RaftException; -import org.apache.ratis.protocol.RaftGroup; -import org.apache.ratis.protocol.StateMachineException; -import org.apache.ratis.retry.RetryPolicies; -import org.apache.ratis.retry.RetryPolicy; -import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.rpc.SupportedRpcType; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.util.TimeDuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.base.Preconditions; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.ServiceException; - -/** - * OM Ratis client to interact with OM Ratis server endpoint. - */ -public final class OzoneManagerRatisClient implements Closeable { - static final Logger LOG = LoggerFactory.getLogger( - OzoneManagerRatisClient.class); - - private final RaftGroup raftGroup; - private final String omNodeID; - private final RpcType rpcType; - private RaftClient raftClient; - private final RetryPolicy retryPolicy; - private final Configuration conf; - - private OzoneManagerRatisClient(String omNodeId, RaftGroup raftGroup, - RpcType rpcType, RetryPolicy retryPolicy, - Configuration config) { - this.raftGroup = raftGroup; - this.omNodeID = omNodeId; - this.rpcType = rpcType; - this.retryPolicy = retryPolicy; - this.conf = config; - } - - public static OzoneManagerRatisClient newOzoneManagerRatisClient( - String omNodeId, RaftGroup raftGroup, Configuration conf) { - final String rpcType = conf.get( - OMConfigKeys.OZONE_OM_RATIS_RPC_TYPE_KEY, - OMConfigKeys.OZONE_OM_RATIS_RPC_TYPE_DEFAULT); - - final int maxRetryCount = conf.getInt( - OMConfigKeys.OZONE_OM_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, - OMConfigKeys.OZONE_OM_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT); - final long retryInterval = conf.getTimeDuration( - OMConfigKeys.OZONE_OM_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY, - OMConfigKeys.OZONE_OM_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT - .toIntExact(TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS); - final TimeDuration sleepDuration = TimeDuration.valueOf( - retryInterval, TimeUnit.MILLISECONDS); - final RetryPolicy retryPolicy = RetryPolicies - .retryUpToMaximumCountWithFixedSleep(maxRetryCount, sleepDuration); - - return new OzoneManagerRatisClient(omNodeId, raftGroup, - SupportedRpcType.valueOfIgnoreCase(rpcType), retryPolicy, conf); - } - - public void connect() { - if (LOG.isDebugEnabled()) { - LOG.debug("Connecting to OM Ratis Server GroupId:{} OM:{}", - raftGroup.getGroupId().getUuid().toString(), omNodeID); - } - - // TODO : XceiverClient ratis should pass the config value of - // maxOutstandingRequests so as to set the upper bound on max no of async - // requests to be handled by raft client - - raftClient = OMRatisHelper.newRaftClient(rpcType, omNodeID, raftGroup, - retryPolicy, conf); - } - - @Override - public void close() { - if (raftClient != null) { - try { - raftClient.close(); - } catch (IOException e) { - throw new IllegalStateException(e); - } - } - } - - /** - * Sends a given request to server and gets the reply back. - * @param request Request - * @return Response to the command - */ - public OMResponse sendCommand(OMRequest request) throws ServiceException { - try { - CompletableFuture reply = sendCommandAsync(request); - return reply.get(); - } catch (ExecutionException | InterruptedException e) { - if (e.getCause() instanceof StateMachineException) { - OMResponse.Builder omResponse = OMResponse.newBuilder(); - omResponse.setCmdType(request.getCmdType()); - omResponse.setSuccess(false); - omResponse.setMessage(e.getCause().getMessage()); - omResponse.setStatus(parseErrorStatus(e.getCause().getMessage())); - return omResponse.build(); - } - throw new ServiceException(e); - } - } - - private OzoneManagerProtocolProtos.Status parseErrorStatus(String message) { - if (message.contains(STATUS_CODE)) { - String errorCode = message.substring(message.indexOf(STATUS_CODE) + - STATUS_CODE.length()); - LOG.debug("Parsing error message for error code {}", errorCode); - return OzoneManagerProtocolProtos.Status.valueOf(errorCode.trim()); - } else { - return OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; - } - - } - - /** - * Sends a given command to server gets a waitable future back. - * - * @param request Request - * @return Response to the command - */ - private CompletableFuture sendCommandAsync(OMRequest request) { - CompletableFuture raftClientReply = - sendRequestAsync(request); - - return raftClientReply.whenComplete((reply, e) -> { - if (LOG.isDebugEnabled()) { - LOG.debug("received reply {} for request: cmdType={} traceID={} " + - "exception: {}", reply, request.getCmdType(), - request.getTraceID(), e); - } - }).thenApply(reply -> { - try { - Preconditions.checkNotNull(reply); - if (!reply.isSuccess()) { - RaftException exception = reply.getException(); - Preconditions.checkNotNull(exception, "Raft reply failure " + - "but no exception propagated."); - throw new CompletionException(exception); - } - return OMRatisHelper.getOMResponseFromRaftClientReply(reply); - - } catch (InvalidProtocolBufferException e) { - throw new CompletionException(e); - } - }); - } - - /** - * Submits {@link RaftClient#sendReadOnlyAsync(Message)} request to Ratis - * server if the request is readOnly. Otherwise, submits - * {@link RaftClient#sendAsync(Message)} request. - * @param request OMRequest - * @return RaftClient response - */ - private CompletableFuture sendRequestAsync( - OMRequest request) { - boolean isReadOnlyRequest = OmUtils.isReadOnly(request); - ByteString byteString = OMRatisHelper.convertRequestToByteString(request); - if (LOG.isDebugEnabled()) { - LOG.debug("sendOMRequestAsync {} {}", isReadOnlyRequest, request); - } - return isReadOnlyRequest ? raftClient.sendReadOnlyAsync(() -> byteString) : - raftClient.sendAsync(() -> byteString); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java deleted file mode 100644 index 7cab9d2738a..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ /dev/null @@ -1,648 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.ratis; - -import com.google.common.annotations.VisibleForTesting; -import java.io.File; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.ServiceException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.ha.OMNodeDetails; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.ratis.RaftConfigKeys; -import org.apache.ratis.client.RaftClientConfigKeys; -import org.apache.ratis.conf.RaftProperties; -import org.apache.ratis.grpc.GrpcConfigKeys; -import org.apache.ratis.netty.NettyConfigKeys; -import org.apache.ratis.proto.RaftProtos.RoleInfoProto; -import org.apache.ratis.proto.RaftProtos.RaftPeerRole; -import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.GroupInfoReply; -import org.apache.ratis.protocol.GroupInfoRequest; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.NotLeaderException; -import org.apache.ratis.protocol.RaftClientReply; -import org.apache.ratis.protocol.RaftClientRequest; -import org.apache.ratis.protocol.RaftGroup; -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.protocol.RaftPeer; -import org.apache.ratis.protocol.RaftPeerId; -import org.apache.ratis.protocol.StateMachineException; -import org.apache.ratis.rpc.RpcType; -import org.apache.ratis.rpc.SupportedRpcType; -import org.apache.ratis.server.RaftServer; -import org.apache.ratis.server.RaftServerConfigKeys; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.util.LifeCycle; -import org.apache.ratis.util.SizeInBytes; -import org.apache.ratis.util.TimeDuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.ozone.om.exceptions.OMException.STATUS_CODE; - -/** - * Creates a Ratis server endpoint for OM. - */ -public final class OzoneManagerRatisServer { - private static final Logger LOG = LoggerFactory - .getLogger(OzoneManagerRatisServer.class); - - private final int port; - private final InetSocketAddress omRatisAddress; - private final RaftServer server; - private final RaftGroupId raftGroupId; - private final RaftGroup raftGroup; - private final RaftPeerId raftPeerId; - - private final OzoneManager ozoneManager; - private final OzoneManagerStateMachine omStateMachine; - private final ClientId clientId = ClientId.randomId(); - - private final ScheduledExecutorService scheduledRoleChecker; - private long roleCheckInitialDelayMs = 1000; // 1 second default - private long roleCheckIntervalMs; - private ReentrantReadWriteLock roleCheckLock = new ReentrantReadWriteLock(); - private Optional cachedPeerRole = Optional.empty(); - private Optional cachedLeaderPeerId = Optional.empty(); - - private static final AtomicLong CALL_ID_COUNTER = new AtomicLong(); - - private static long nextCallId() { - return CALL_ID_COUNTER.getAndIncrement() & Long.MAX_VALUE; - } - - /** - * Submit request to Ratis server. - * @param omRequest - * @return OMResponse - response returned to the client. - * @throws ServiceException - */ - public OMResponse submitRequest(OMRequest omRequest) throws ServiceException { - RaftClientRequest raftClientRequest = - createWriteRaftClientRequest(omRequest); - RaftClientReply raftClientReply; - try { - raftClientReply = server.submitClientRequestAsync(raftClientRequest) - .get(); - } catch (Exception ex) { - throw new ServiceException(ex.getMessage(), ex); - } - - return processReply(omRequest, raftClientReply); - } - - /** - * Create Write RaftClient request from OMRequest. - * @param omRequest - * @return RaftClientRequest - Raft Client request which is submitted to - * ratis server. - */ - private RaftClientRequest createWriteRaftClientRequest(OMRequest omRequest) { - return new RaftClientRequest(clientId, server.getId(), raftGroupId, - nextCallId(), - Message.valueOf(OMRatisHelper.convertRequestToByteString(omRequest)), - RaftClientRequest.writeRequestType(), null); - } - - /** - * Process the raftClientReply and return OMResponse. - * @param omRequest - * @param reply - * @return OMResponse - response which is returned to client. - * @throws ServiceException - */ - private OMResponse processReply(OMRequest omRequest, RaftClientReply reply) - throws ServiceException { - // NotLeader exception is thrown only when the raft server to which the - // request is submitted is not the leader. This can happen first time - // when client is submitting request to OM. - NotLeaderException notLeaderException = reply.getNotLeaderException(); - if (notLeaderException != null) { - throw new ServiceException(notLeaderException); - } - StateMachineException stateMachineException = - reply.getStateMachineException(); - if (stateMachineException != null) { - OMResponse.Builder omResponse = OMResponse.newBuilder(); - omResponse.setCmdType(omRequest.getCmdType()); - omResponse.setSuccess(false); - omResponse.setMessage(stateMachineException.getCause().getMessage()); - omResponse.setStatus(parseErrorStatus( - stateMachineException.getCause().getMessage())); - if (LOG.isDebugEnabled()) { - LOG.debug("Error while executing ratis request. " + - "stateMachineException: ", stateMachineException); - } - return omResponse.build(); - } - - try { - return OMRatisHelper.getOMResponseFromRaftClientReply(reply); - } catch (InvalidProtocolBufferException ex) { - if (ex.getMessage() != null) { - throw new ServiceException(ex.getMessage(), ex); - } else { - throw new ServiceException(ex); - } - } - - // TODO: Still need to handle RaftRetry failure exception and - // NotReplicated exception. - } - - /** - * Parse errorMessage received from the exception and convert to - * {@link OzoneManagerProtocolProtos.Status}. - * @param errorMessage - * @return OzoneManagerProtocolProtos.Status - */ - private OzoneManagerProtocolProtos.Status parseErrorStatus( - String errorMessage) { - if (errorMessage.contains(STATUS_CODE)) { - String errorCode = errorMessage.substring( - errorMessage.indexOf(STATUS_CODE) + STATUS_CODE.length()); - LOG.debug("Parsing error message for error code " + - errorCode); - return OzoneManagerProtocolProtos.Status.valueOf(errorCode.trim()); - } else { - return OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; - } - - } - - - /** - * Returns an OM Ratis server. - * @param conf configuration - * @param om the OM instance starting the ratis server - * @param raftGroupIdStr raft group id string - * @param localRaftPeerId raft peer id of this Ratis server - * @param addr address of the ratis server - * @param raftPeers peer nodes in the raft ring - * @throws IOException - */ - private OzoneManagerRatisServer(Configuration conf, - OzoneManager om, - String raftGroupIdStr, RaftPeerId localRaftPeerId, - InetSocketAddress addr, List raftPeers) - throws IOException { - this.ozoneManager = om; - this.omRatisAddress = addr; - this.port = addr.getPort(); - RaftProperties serverProperties = newRaftProperties(conf); - - this.raftPeerId = localRaftPeerId; - this.raftGroupId = RaftGroupId.valueOf( - getRaftGroupIdFromOmServiceId(raftGroupIdStr)); - this.raftGroup = RaftGroup.valueOf(raftGroupId, raftPeers); - - StringBuilder raftPeersStr = new StringBuilder(); - for (RaftPeer peer : raftPeers) { - raftPeersStr.append(", ").append(peer.getAddress()); - } - LOG.info("Instantiating OM Ratis server with GroupID: {} and " + - "Raft Peers: {}", raftGroupIdStr, raftPeersStr.toString().substring(2)); - - this.omStateMachine = getStateMachine(); - - this.server = RaftServer.newBuilder() - .setServerId(this.raftPeerId) - .setGroup(this.raftGroup) - .setProperties(serverProperties) - .setStateMachine(omStateMachine) - .build(); - - // Run a scheduler to check and update the server role on the leader - // periodically - this.scheduledRoleChecker = Executors.newSingleThreadScheduledExecutor(); - this.scheduledRoleChecker.scheduleWithFixedDelay(new Runnable() { - @Override - public void run() { - // Run this check only on the leader OM - if (cachedPeerRole.isPresent() && - cachedPeerRole.get() == RaftPeerRole.LEADER) { - updateServerRole(); - } - } - }, roleCheckInitialDelayMs, roleCheckIntervalMs, TimeUnit.MILLISECONDS); - } - - /** - * Creates an instance of OzoneManagerRatisServer. - */ - public static OzoneManagerRatisServer newOMRatisServer( - Configuration ozoneConf, OzoneManager omProtocol, - OMNodeDetails omNodeDetails, List peerNodes) - throws IOException { - - // RaftGroupId is the omServiceId - String omServiceId = omNodeDetails.getOMServiceId(); - - String omNodeId = omNodeDetails.getOMNodeId(); - RaftPeerId localRaftPeerId = RaftPeerId.getRaftPeerId(omNodeId); - - InetSocketAddress ratisAddr = new InetSocketAddress( - omNodeDetails.getAddress(), omNodeDetails.getRatisPort()); - - RaftPeer localRaftPeer = new RaftPeer(localRaftPeerId, ratisAddr); - - List raftPeers = new ArrayList<>(); - // Add this Ratis server to the Ratis ring - raftPeers.add(localRaftPeer); - - for (OMNodeDetails peerInfo : peerNodes) { - String peerNodeId = peerInfo.getOMNodeId(); - InetSocketAddress peerRatisAddr = new InetSocketAddress( - peerInfo.getAddress(), peerInfo.getRatisPort()); - RaftPeerId raftPeerId = RaftPeerId.valueOf(peerNodeId); - RaftPeer raftPeer = new RaftPeer(raftPeerId, peerRatisAddr); - - // Add other OM nodes belonging to the same OM service to the Ratis ring - raftPeers.add(raftPeer); - } - - return new OzoneManagerRatisServer(ozoneConf, omProtocol, omServiceId, - localRaftPeerId, ratisAddr, raftPeers); - } - - public RaftGroup getRaftGroup() { - return this.raftGroup; - } - - /** - * Initializes and returns OzoneManager StateMachine. - */ - private OzoneManagerStateMachine getStateMachine() { - return new OzoneManagerStateMachine(this); - } - - @VisibleForTesting - public OzoneManagerStateMachine getOmStateMachine() { - return omStateMachine; - } - - public OzoneManager getOzoneManager() { - return ozoneManager; - } - - /** - * Start the Ratis server. - * @throws IOException - */ - public void start() throws IOException { - LOG.info("Starting {} {} at port {}", getClass().getSimpleName(), - server.getId(), port); - server.start(); - } - - public void stop() { - try { - server.close(); - omStateMachine.stop(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - //TODO simplify it to make it shorter - @SuppressWarnings("methodlength") - private RaftProperties newRaftProperties(Configuration conf) { - final RaftProperties properties = new RaftProperties(); - - // Set RPC type - final String rpcType = conf.get( - OMConfigKeys.OZONE_OM_RATIS_RPC_TYPE_KEY, - OMConfigKeys.OZONE_OM_RATIS_RPC_TYPE_DEFAULT); - final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType); - RaftConfigKeys.Rpc.setType(properties, rpc); - - // Set the ratis port number - if (rpc == SupportedRpcType.GRPC) { - GrpcConfigKeys.Server.setPort(properties, port); - } else if (rpc == SupportedRpcType.NETTY) { - NettyConfigKeys.Server.setPort(properties, port); - } - - // Set Ratis storage directory - String storageDir = OmUtils.getOMRatisDirectory(conf); - RaftServerConfigKeys.setStorageDirs(properties, - Collections.singletonList(new File(storageDir))); - - // Set RAFT segment size - final int raftSegmentSize = (int) conf.getStorageSize( - OMConfigKeys.OZONE_OM_RATIS_SEGMENT_SIZE_KEY, - OMConfigKeys.OZONE_OM_RATIS_SEGMENT_SIZE_DEFAULT, - StorageUnit.BYTES); - RaftServerConfigKeys.Log.setSegmentSizeMax(properties, - SizeInBytes.valueOf(raftSegmentSize)); - - // Set RAFT segment pre-allocated size - final int raftSegmentPreallocatedSize = (int) conf.getStorageSize( - OMConfigKeys.OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, - OMConfigKeys.OZONE_OM_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, - StorageUnit.BYTES); - int logAppenderQueueNumElements = conf.getInt( - OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, - OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); - final int logAppenderQueueByteLimit = (int) conf.getStorageSize( - OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, - OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, - StorageUnit.BYTES); - RaftServerConfigKeys.Log.Appender.setBufferElementLimit(properties, - logAppenderQueueNumElements); - RaftServerConfigKeys.Log.Appender.setBufferByteLimit(properties, - SizeInBytes.valueOf(logAppenderQueueByteLimit)); - RaftServerConfigKeys.Log.setPreallocatedSize(properties, - SizeInBytes.valueOf(raftSegmentPreallocatedSize)); - RaftServerConfigKeys.Log.Appender.setInstallSnapshotEnabled(properties, - false); - final int logPurgeGap = conf.getInt( - OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, - OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP_DEFAULT); - RaftServerConfigKeys.Log.setPurgeGap(properties, logPurgeGap); - - // For grpc set the maximum message size - // TODO: calculate the optimal max message size - GrpcConfigKeys.setMessageSizeMax(properties, - SizeInBytes.valueOf(logAppenderQueueByteLimit)); - - // Set the server request timeout - TimeUnit serverRequestTimeoutUnit = - OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT.getUnit(); - long serverRequestTimeoutDuration = conf.getTimeDuration( - OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_KEY, - OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT - .getDuration(), serverRequestTimeoutUnit); - final TimeDuration serverRequestTimeout = TimeDuration.valueOf( - serverRequestTimeoutDuration, serverRequestTimeoutUnit); - RaftServerConfigKeys.Rpc.setRequestTimeout(properties, - serverRequestTimeout); - - // Set timeout for server retry cache entry - TimeUnit retryCacheTimeoutUnit = OMConfigKeys - .OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT.getUnit(); - long retryCacheTimeoutDuration = conf.getTimeDuration( - OMConfigKeys.OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_KEY, - OMConfigKeys.OZONE_OM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT - .getDuration(), retryCacheTimeoutUnit); - final TimeDuration retryCacheTimeout = TimeDuration.valueOf( - retryCacheTimeoutDuration, retryCacheTimeoutUnit); - RaftServerConfigKeys.RetryCache.setExpiryTime(properties, - retryCacheTimeout); - - // Set the server min and max timeout - TimeUnit serverMinTimeoutUnit = - OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_DEFAULT.getUnit(); - long serverMinTimeoutDuration = conf.getTimeDuration( - OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_KEY, - OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_DEFAULT - .getDuration(), serverMinTimeoutUnit); - final TimeDuration serverMinTimeout = TimeDuration.valueOf( - serverMinTimeoutDuration, serverMinTimeoutUnit); - long serverMaxTimeoutDuration = - serverMinTimeout.toLong(TimeUnit.MILLISECONDS) + 200; - final TimeDuration serverMaxTimeout = TimeDuration.valueOf( - serverMaxTimeoutDuration, serverMinTimeoutUnit); - RaftServerConfigKeys.Rpc.setTimeoutMin(properties, - serverMinTimeout); - RaftServerConfigKeys.Rpc.setTimeoutMax(properties, - serverMaxTimeout); - - // Set the number of maximum cached segments - RaftServerConfigKeys.Log.setMaxCachedSegmentNum(properties, 2); - - // Set the client request timeout - TimeUnit clientRequestTimeoutUnit = OMConfigKeys - .OZONE_OM_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT .getUnit(); - long clientRequestTimeoutDuration = conf.getTimeDuration( - OMConfigKeys.OZONE_OM_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY, - OMConfigKeys.OZONE_OM_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT - .getDuration(), clientRequestTimeoutUnit); - final TimeDuration clientRequestTimeout = TimeDuration.valueOf( - clientRequestTimeoutDuration, clientRequestTimeoutUnit); - RaftClientConfigKeys.Rpc.setRequestTimeout(properties, - clientRequestTimeout); - - // TODO: set max write buffer size - - // Set the ratis leader election timeout - TimeUnit leaderElectionMinTimeoutUnit = - OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT - .getUnit(); - long leaderElectionMinTimeoutduration = conf.getTimeDuration( - OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, - OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT - .getDuration(), leaderElectionMinTimeoutUnit); - final TimeDuration leaderElectionMinTimeout = TimeDuration.valueOf( - leaderElectionMinTimeoutduration, leaderElectionMinTimeoutUnit); - RaftServerConfigKeys.Rpc.setTimeoutMin(properties, - leaderElectionMinTimeout); - long leaderElectionMaxTimeout = leaderElectionMinTimeout.toLong( - TimeUnit.MILLISECONDS) + 200; - RaftServerConfigKeys.Rpc.setTimeoutMax(properties, - TimeDuration.valueOf(leaderElectionMaxTimeout, TimeUnit.MILLISECONDS)); - - TimeUnit nodeFailureTimeoutUnit = - OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT - .getUnit(); - long nodeFailureTimeoutDuration = conf.getTimeDuration( - OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_KEY, - OMConfigKeys.OZONE_OM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT - .getDuration(), nodeFailureTimeoutUnit); - final TimeDuration nodeFailureTimeout = TimeDuration.valueOf( - nodeFailureTimeoutDuration, nodeFailureTimeoutUnit); - RaftServerConfigKeys.Notification.setNoLeaderTimeout(properties, - nodeFailureTimeout); - RaftServerConfigKeys.Rpc.setSlownessTimeout(properties, - nodeFailureTimeout); - - TimeUnit roleCheckIntervalUnit = - OMConfigKeys.OZONE_OM_RATIS_SERVER_ROLE_CHECK_INTERVAL_DEFAULT - .getUnit(); - long roleCheckIntervalDuration = conf.getTimeDuration( - OMConfigKeys.OZONE_OM_RATIS_SERVER_ROLE_CHECK_INTERVAL_KEY, - OMConfigKeys.OZONE_OM_RATIS_SERVER_ROLE_CHECK_INTERVAL_DEFAULT - .getDuration(), nodeFailureTimeoutUnit); - this.roleCheckIntervalMs = TimeDuration.valueOf( - roleCheckIntervalDuration, roleCheckIntervalUnit) - .toLong(TimeUnit.MILLISECONDS); - this.roleCheckInitialDelayMs = leaderElectionMinTimeout - .toLong(TimeUnit.MILLISECONDS); - - long snapshotAutoTriggerThreshold = conf.getLong( - OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY, - OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_DEFAULT); - RaftServerConfigKeys.Snapshot.setAutoTriggerEnabled( - properties, true); - RaftServerConfigKeys.Snapshot.setAutoTriggerThreshold( - properties, snapshotAutoTriggerThreshold); - - return properties; - } - - /** - * Check the cached leader status. - * @return true if cached role is Leader, false otherwise. - */ - private boolean checkCachedPeerRoleIsLeader() { - this.roleCheckLock.readLock().lock(); - try { - if (cachedPeerRole.isPresent() && - cachedPeerRole.get() == RaftPeerRole.LEADER) { - return true; - } - return false; - } finally { - this.roleCheckLock.readLock().unlock(); - } - } - - /** - * Check if the current OM node is the leader node. - * @return true if Leader, false otherwise. - */ - public boolean isLeader() { - if (checkCachedPeerRoleIsLeader()) { - return true; - } - - // Get the server role from ratis server and update the cached values. - updateServerRole(); - - // After updating the server role, check and return if leader or not. - return checkCachedPeerRoleIsLeader(); - } - - /** - * Get the suggested leader peer id. - * @return RaftPeerId of the suggested leader node. - */ - public Optional getCachedLeaderPeerId() { - this.roleCheckLock.readLock().lock(); - try { - return cachedLeaderPeerId; - } finally { - this.roleCheckLock.readLock().unlock(); - } - } - - /** - * Get the gorup info (peer role and leader peer id) from Ratis server and - * update the OM server role. - */ - public void updateServerRole() { - try { - GroupInfoReply groupInfo = getGroupInfo(); - RoleInfoProto roleInfoProto = groupInfo.getRoleInfoProto(); - RaftPeerRole thisNodeRole = roleInfoProto.getRole(); - - if (thisNodeRole.equals(RaftPeerRole.LEADER)) { - setServerRole(thisNodeRole, raftPeerId); - - } else if (thisNodeRole.equals(RaftPeerRole.FOLLOWER)) { - ByteString leaderNodeId = roleInfoProto.getFollowerInfo() - .getLeaderInfo().getId().getId(); - // There may be a chance, here we get leaderNodeId as null. For - // example, in 3 node OM Ratis, if 2 OM nodes are down, there will - // be no leader. - RaftPeerId leaderPeerId = null; - if (leaderNodeId != null && !leaderNodeId.isEmpty()) { - leaderPeerId = RaftPeerId.valueOf(leaderNodeId); - } - - setServerRole(thisNodeRole, leaderPeerId); - - } else { - setServerRole(thisNodeRole, null); - - } - } catch (IOException e) { - LOG.error("Failed to retrieve RaftPeerRole. Setting cached role to " + - "{} and resetting leader info.", RaftPeerRole.UNRECOGNIZED, e); - setServerRole(null, null); - } - } - - /** - * Set the current server role and the leader peer id. - */ - private void setServerRole(RaftPeerRole currentRole, - RaftPeerId leaderPeerId) { - this.roleCheckLock.writeLock().lock(); - try { - this.cachedPeerRole = Optional.ofNullable(currentRole); - this.cachedLeaderPeerId = Optional.ofNullable(leaderPeerId); - } finally { - this.roleCheckLock.writeLock().unlock(); - } - } - - private GroupInfoReply getGroupInfo() throws IOException { - GroupInfoRequest groupInfoRequest = new GroupInfoRequest(clientId, - raftPeerId, raftGroupId, nextCallId()); - GroupInfoReply groupInfo = server.getGroupInfo(groupInfoRequest); - return groupInfo; - } - - public int getServerPort() { - return port; - } - - @VisibleForTesting - public LifeCycle.State getServerState() { - return server.getLifeCycleState(); - } - - @VisibleForTesting - public RaftPeerId getRaftPeerId() { - return this.raftPeerId; - } - - private UUID getRaftGroupIdFromOmServiceId(String omServiceId) { - return UUID.nameUUIDFromBytes(omServiceId.getBytes(StandardCharsets.UTF_8)); - } - - public long getStateMachineLastAppliedIndex() { - return omStateMachine.getLastAppliedIndex(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisSnapshot.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisSnapshot.java deleted file mode 100644 index 518026184a9..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisSnapshot.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.ratis; - -/** - * Functional interface for OM RatisSnapshot. - */ - -public interface OzoneManagerRatisSnapshot { - - /** - * Update lastAppliedIndex with the specified value in OzoneManager - * StateMachine. - * @param lastAppliedIndex - */ - void updateLastAppliedIndex(long lastAppliedIndex); -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java deleted file mode 100644 index e302956d396..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ /dev/null @@ -1,377 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.ratis; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import com.google.protobuf.ServiceException; -import java.io.IOException; -import java.util.Collection; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocolPB.OzoneManagerHARequestHandler; -import org.apache.hadoop.ozone.protocolPB.OzoneManagerHARequestHandlerImpl; -import org.apache.hadoop.util.concurrent.HadoopExecutors; -import org.apache.ratis.proto.RaftProtos; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.protocol.RaftPeerId; -import org.apache.ratis.server.RaftServer; -import org.apache.ratis.server.protocol.TermIndex; -import org.apache.ratis.server.storage.RaftStorage; -import org.apache.ratis.statemachine.SnapshotInfo; -import org.apache.ratis.statemachine.TransactionContext; -import org.apache.ratis.statemachine.impl.BaseStateMachine; -import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.util.LifeCycle; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The OM StateMachine is the state machine for OM Ratis server. It is - * responsible for applying ratis committed transactions to - * {@link OzoneManager}. - */ -public class OzoneManagerStateMachine extends BaseStateMachine { - - static final Logger LOG = - LoggerFactory.getLogger(OzoneManagerStateMachine.class); - private final SimpleStateMachineStorage storage = - new SimpleStateMachineStorage(); - private final OzoneManagerRatisServer omRatisServer; - private final OzoneManager ozoneManager; - private OzoneManagerHARequestHandler handler; - private RaftGroupId raftGroupId; - private long lastAppliedIndex; - private OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; - private final OMRatisSnapshotInfo snapshotInfo; - private final ExecutorService executorService; - private final ExecutorService installSnapshotExecutor; - - public OzoneManagerStateMachine(OzoneManagerRatisServer ratisServer) { - this.omRatisServer = ratisServer; - this.ozoneManager = omRatisServer.getOzoneManager(); - - this.snapshotInfo = ozoneManager.getSnapshotInfo(); - updateLastAppliedIndexWithSnaphsotIndex(); - - this.ozoneManagerDoubleBuffer = - new OzoneManagerDoubleBuffer(ozoneManager.getMetadataManager(), - this::updateLastAppliedIndex); - - this.handler = new OzoneManagerHARequestHandlerImpl(ozoneManager, - ozoneManagerDoubleBuffer); - - ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("OM StateMachine ApplyTransaction Thread - %d").build(); - this.executorService = HadoopExecutors.newSingleThreadExecutor(build); - this.installSnapshotExecutor = HadoopExecutors.newSingleThreadExecutor(); - } - - /** - * Initializes the State Machine with the given server, group and storage. - */ - @Override - public void initialize(RaftServer server, RaftGroupId id, - RaftStorage raftStorage) throws IOException { - lifeCycle.startAndTransition(() -> { - super.initialize(server, id, raftStorage); - this.raftGroupId = id; - storage.init(raftStorage); - }); - } - - @Override - public SnapshotInfo getLatestSnapshot() { - return snapshotInfo; - } - - /** - * Called to notify state machine about indexes which are processed - * internally by Raft Server, this currently happens when conf entries are - * processed in raft Server. This keep state machine to keep a track of index - * updates. - * @param term term of the current log entry - * @param index index which is being updated - */ - @Override - public void notifyIndexUpdate(long term, long index) { - // SnapshotInfo should be updated when the term changes. - // The index here refers to the log entry index and the index in - // SnapshotInfo represents the snapshotIndex i.e. the index of the last - // transaction included in the snapshot. Hence, snaphsotInfo#index is not - // updated here. - snapshotInfo.updateTerm(term); - } - - /** - * Validate/pre-process the incoming update request in the state machine. - * @return the content to be written to the log entry. Null means the request - * should be rejected. - * @throws IOException thrown by the state machine while validating - */ - @Override - public TransactionContext startTransaction( - RaftClientRequest raftClientRequest) throws IOException { - ByteString messageContent = raftClientRequest.getMessage().getContent(); - OMRequest omRequest = OMRatisHelper.convertByteStringToOMRequest( - messageContent); - - Preconditions.checkArgument(raftClientRequest.getRaftGroupId().equals( - raftGroupId)); - try { - handler.validateRequest(omRequest); - } catch (IOException ioe) { - TransactionContext ctxt = TransactionContext.newBuilder() - .setClientRequest(raftClientRequest) - .setStateMachine(this) - .setServerRole(RaftProtos.RaftPeerRole.LEADER) - .build(); - ctxt.setException(ioe); - return ctxt; - } - return handleStartTransactionRequests(raftClientRequest, omRequest); - } - - /* - * Apply a committed log entry to the state machine. - */ - @Override - public CompletableFuture applyTransaction(TransactionContext trx) { - try { - OMRequest request = OMRatisHelper.convertByteStringToOMRequest( - trx.getStateMachineLogEntry().getLogData()); - long trxLogIndex = trx.getLogEntry().getIndex(); - // In the current approach we have one single global thread executor. - // with single thread. Right now this is being done for correctness, as - // applyTransaction will be run on multiple OM's we want to execute the - // transactions in the same order on all OM's, otherwise there is a - // chance that OM replica's can be out of sync. - // TODO: In this way we are making all applyTransactions in - // OM serial order. Revisit this in future to use multiple executors for - // volume/bucket. - - // Reason for not immediately implementing executor per volume is, if - // one executor operations are slow, we cannot update the - // lastAppliedIndex in OzoneManager StateMachine, even if other - // executor has completed the transactions with id more. - - // We have 300 transactions, And for each volume we have transactions - // of 150. Volume1 transactions 0 - 149 and Volume2 transactions 150 - - // 299. - // Example: Executor1 - Volume1 - 100 (current completed transaction) - // Example: Executor2 - Volume2 - 299 (current completed transaction) - - // Now we have applied transactions of 0 - 100 and 149 - 299. We - // cannot update lastAppliedIndex to 299. We need to update it to 100, - // since 101 - 149 are not applied. When OM restarts it will - // applyTransactions from lastAppliedIndex. - // We can update the lastAppliedIndex to 100, and update it to 299, - // only after completing 101 - 149. In initial stage, we are starting - // with single global executor. Will revisit this when needed. - - CompletableFuture future = CompletableFuture.supplyAsync( - () -> runCommand(request, trxLogIndex), executorService); - return future; - } catch (IOException e) { - return completeExceptionally(e); - } - } - - /** - * Query the state machine. The request must be read-only. - */ - @Override - public CompletableFuture query(Message request) { - try { - OMRequest omRequest = OMRatisHelper.convertByteStringToOMRequest( - request.getContent()); - return CompletableFuture.completedFuture(queryCommand(omRequest)); - } catch (IOException e) { - return completeExceptionally(e); - } - } - - @Override - public void pause() { - lifeCycle.transition(LifeCycle.State.PAUSING); - lifeCycle.transition(LifeCycle.State.PAUSED); - ozoneManagerDoubleBuffer.stop(); - } - - /** - * Unpause the StateMachine, re-initialize the DoubleBuffer and update the - * lastAppliedIndex. This should be done after uploading new state to the - * StateMachine. - */ - public void unpause(long newLastAppliedSnaphsotIndex) { - lifeCycle.startAndTransition(() -> { - this.ozoneManagerDoubleBuffer = - new OzoneManagerDoubleBuffer(ozoneManager.getMetadataManager(), - this::updateLastAppliedIndex); - this.updateLastAppliedIndex(newLastAppliedSnaphsotIndex); - }); - } - - /** - * Take OM Ratis snapshot. Write the snapshot index to file. Snapshot index - * is the log index corresponding to the last applied transaction on the OM - * State Machine. - * - * @return the last applied index on the state machine which has been - * stored in the snapshot file. - */ - @Override - public long takeSnapshot() throws IOException { - LOG.info("Saving Ratis snapshot on the OM."); - if (ozoneManager != null) { - return ozoneManager.saveRatisSnapshot(); - } - return 0; - } - - /** - * Leader OM has purged entries from its log. To catch up, OM must download - * the latest checkpoint from the leader OM and install it. - * @param roleInfoProto the leader node information - * @param firstTermIndexInLog TermIndex of the first append entry available - * in the Leader's log. - * @return the last term index included in the installed snapshot. - */ - @Override - public CompletableFuture notifyInstallSnapshotFromLeader( - RaftProtos.RoleInfoProto roleInfoProto, TermIndex firstTermIndexInLog) { - - String leaderNodeId = RaftPeerId.valueOf(roleInfoProto.getSelf().getId()) - .toString(); - - LOG.info("Received install snapshot notificaiton form OM leader: {} with " + - "term index: {}", leaderNodeId, firstTermIndexInLog); - - if (!roleInfoProto.getRole().equals(RaftProtos.RaftPeerRole.LEADER)) { - // A non-leader Ratis server should not send this notification. - LOG.error("Received Install Snapshot notification from non-leader OM " + - "node: {}. Ignoring the notification.", leaderNodeId); - return completeExceptionally(new OMException("Received notification to " + - "install snaphost from non-leader OM node", - OMException.ResultCodes.RATIS_ERROR)); - } - - CompletableFuture future = CompletableFuture.supplyAsync( - () -> ozoneManager.installSnapshot(leaderNodeId), - installSnapshotExecutor); - return future; - } - - /** - * Notifies the state machine that the raft peer is no longer leader. - */ - @Override - public void notifyNotLeader(Collection pendingEntries) - throws IOException { - omRatisServer.updateServerRole(); - } - - /** - * Handle the RaftClientRequest and return TransactionContext object. - * @param raftClientRequest - * @param omRequest - * @return TransactionContext - */ - private TransactionContext handleStartTransactionRequests( - RaftClientRequest raftClientRequest, OMRequest omRequest) { - - return TransactionContext.newBuilder() - .setClientRequest(raftClientRequest) - .setStateMachine(this) - .setServerRole(RaftProtos.RaftPeerRole.LEADER) - .setLogData(raftClientRequest.getMessage().getContent()) - .build(); - } - - /** - * Submits write request to OM and returns the response Message. - * @param request OMRequest - * @return response from OM - * @throws ServiceException - */ - private Message runCommand(OMRequest request, long trxLogIndex) { - OMResponse response = handler.handleApplyTransaction(request, trxLogIndex); - lastAppliedIndex = trxLogIndex; - return OMRatisHelper.convertResponseToMessage(response); - } - - @SuppressWarnings("HiddenField") - public void updateLastAppliedIndex(long lastAppliedIndex) { - this.lastAppliedIndex = lastAppliedIndex; - } - - public void updateLastAppliedIndexWithSnaphsotIndex() { - this.lastAppliedIndex = snapshotInfo.getIndex(); - } - - /** - * Submits read request to OM and returns the response Message. - * @param request OMRequest - * @return response from OM - * @throws ServiceException - */ - private Message queryCommand(OMRequest request) { - OMResponse response = handler.handle(request); - return OMRatisHelper.convertResponseToMessage(response); - } - - public long getLastAppliedIndex() { - return lastAppliedIndex; - } - - private static CompletableFuture completeExceptionally(Exception e) { - final CompletableFuture future = new CompletableFuture<>(); - future.completeExceptionally(e); - return future; - } - - @VisibleForTesting - public void setHandler(OzoneManagerHARequestHandler handler) { - this.handler = handler; - } - - @VisibleForTesting - public void setRaftGroupId(RaftGroupId raftGroupId) { - this.raftGroupId = raftGroupId; - } - - public void stop() { - ozoneManagerDoubleBuffer.stop(); - HadoopExecutors.shutdown(executorService, LOG, 5, TimeUnit.SECONDS); - HadoopExecutors.shutdown(installSnapshotExecutor, LOG, 5, TimeUnit.SECONDS); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/DoubleBufferEntry.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/DoubleBufferEntry.java deleted file mode 100644 index cd4c5ae8b25..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/DoubleBufferEntry.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.ratis.helpers; - -import org.apache.hadoop.ozone.om.response.OMClientResponse; - -/** - * Entry in OzoneManagerDouble Buffer. - * @param - */ -public class DoubleBufferEntry { - - private long trxLogIndex; - private Response response; - - public DoubleBufferEntry(long trxLogIndex, Response response) { - this.trxLogIndex = trxLogIndex; - this.response = response; - } - - public long getTrxLogIndex() { - return trxLogIndex; - } - - public Response getResponse() { - return response; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/package-info.java deleted file mode 100644 index b12a324d681..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/helpers/package-info.java +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -/** - * package which contains helper classes for each OM request response. - */ -package org.apache.hadoop.ozone.om.ratis.helpers; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java deleted file mode 100644 index e2d7f72e44c..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.ratis.metrics; - -import org.apache.hadoop.metrics2.MetricsSystem; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; - -/** - * Class which maintains metrics related to OzoneManager DoubleBuffer. - */ -public class OzoneManagerDoubleBufferMetrics { - - private static final String SOURCE_NAME = - OzoneManagerDoubleBufferMetrics.class.getSimpleName(); - - @Metric(about = "Total Number of flush operations happened in " + - "OzoneManagerDoubleBuffer.") - private MutableCounterLong totalNumOfFlushOperations; - - @Metric(about = "Total Number of flushed transactions happened in " + - "OzoneManagerDoubleBuffer.") - private MutableCounterLong totalNumOfFlushedTransactions; - - @Metric(about = "Max Number of transactions flushed in a iteration in " + - "OzoneManagerDoubleBuffer. This will provide a value which is maximum " + - "number of transactions flushed in a single flush iteration till now.") - private MutableCounterLong maxNumberOfTransactionsFlushedInOneIteration; - - - public static OzoneManagerDoubleBufferMetrics create() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - return ms.register(SOURCE_NAME, - "OzoneManager DoubleBuffer Metrics", - new OzoneManagerDoubleBufferMetrics()); - } - - public void incrTotalNumOfFlushOperations() { - this.totalNumOfFlushOperations.incr(); - } - - public void incrTotalSizeOfFlushedTransactions( - long flushedTransactions) { - this.totalNumOfFlushedTransactions.incr(flushedTransactions); - } - - public void setMaxNumberOfTransactionsFlushedInOneIteration( - long maxTransactions) { - // We should set the value with maxTransactions, so decrement old value - // first and then add the new value. - this.maxNumberOfTransactionsFlushedInOneIteration.incr( - Math.negateExact(getMaxNumberOfTransactionsFlushedInOneIteration()) - + maxTransactions); - } - - public long getTotalNumOfFlushOperations() { - return totalNumOfFlushOperations.value(); - } - - public long getTotalNumOfFlushedTransactions() { - return totalNumOfFlushedTransactions.value(); - } - - public long getMaxNumberOfTransactionsFlushedInOneIteration() { - return maxNumberOfTransactionsFlushedInOneIteration.value(); - } - - public void unRegister() { - MetricsSystem ms = DefaultMetricsSystem.instance(); - ms.unregisterSource(SOURCE_NAME); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java deleted file mode 100644 index e41c645b581..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * package which contains metrics classes. - */ -package org.apache.hadoop.ozone.om.ratis.metrics; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java deleted file mode 100644 index ea25f133ebb..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.ratis; - -/** - * This package contains classes for the OM Ratis server implementation. - */ \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerDoubleBufferHelper.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerDoubleBufferHelper.java deleted file mode 100644 index d893f522f6c..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerDoubleBufferHelper.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.ratis.utils; - -import org.apache.hadoop.ozone.om.response.OMClientResponse; - -import java.util.concurrent.CompletableFuture; - -/** - * Helper interface for OzoneManagerDoubleBuffer. - * - */ -public interface OzoneManagerDoubleBufferHelper { - - CompletableFuture add(OMClientResponse response, - long transactionIndex); -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java deleted file mode 100644 index 4f01960ff57..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ /dev/null @@ -1,203 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.ratis.utils; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest; -import org.apache.hadoop.ozone.om.request.bucket.OMBucketDeleteRequest; -import org.apache.hadoop.ozone.om.request.bucket.OMBucketSetPropertyRequest; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketAddAclRequest; -import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketRemoveAclRequest; -import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketSetAclRequest; -import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest; -import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequest; -import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequest; -import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest; -import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest; -import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest; -import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest; -import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest; -import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest; -import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequest; -import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequest; -import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixAddAclRequest; -import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixRemoveAclRequest; -import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixSetAclRequest; -import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest; -import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketDeleteRequest; -import org.apache.hadoop.ozone.om.request.s3.multipart.S3InitiateMultipartUploadRequest; -import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadAbortRequest; -import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCommitPartRequest; -import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest; -import org.apache.hadoop.ozone.om.request.s3.security.S3GetSecretRequest; -import org.apache.hadoop.ozone.om.request.security.OMCancelDelegationTokenRequest; -import org.apache.hadoop.ozone.om.request.security.OMGetDelegationTokenRequest; -import org.apache.hadoop.ozone.om.request.security.OMRenewDelegationTokenRequest; -import org.apache.hadoop.ozone.om.request.volume.OMVolumeCreateRequest; -import org.apache.hadoop.ozone.om.request.volume.OMVolumeDeleteRequest; -import org.apache.hadoop.ozone.om.request.volume.OMVolumeSetOwnerRequest; -import org.apache.hadoop.ozone.om.request.volume.OMVolumeSetQuotaRequest; -import org.apache.hadoop.ozone.om.request.volume.acl.OMVolumeAddAclRequest; -import org.apache.hadoop.ozone.om.request.volume.acl.OMVolumeRemoveAclRequest; -import org.apache.hadoop.ozone.om.request.volume.acl.OMVolumeSetAclRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; - -import java.io.IOException; - -/** - * Utility class used by OzoneManager HA. - */ -public final class OzoneManagerRatisUtils { - - private OzoneManagerRatisUtils() { - } - /** - * Create OMClientRequest which enacpsulates the OMRequest. - * @param omRequest - * @return OMClientRequest - * @throws IOException - */ - public static OMClientRequest createClientRequest(OMRequest omRequest) { - Type cmdType = omRequest.getCmdType(); - switch (cmdType) { - case CreateVolume: - return new OMVolumeCreateRequest(omRequest); - case SetVolumeProperty: - boolean hasQuota = omRequest.getSetVolumePropertyRequest() - .hasQuotaInBytes(); - boolean hasOwner = omRequest.getSetVolumePropertyRequest().hasOwnerName(); - Preconditions.checkState(hasOwner || hasQuota, "Either Quota or owner " + - "should be set in the SetVolumeProperty request"); - Preconditions.checkState(!(hasOwner && hasQuota), "Either Quota or " + - "owner should be set in the SetVolumeProperty request. Should not " + - "set both"); - if (hasQuota) { - return new OMVolumeSetQuotaRequest(omRequest); - } else { - return new OMVolumeSetOwnerRequest(omRequest); - } - case DeleteVolume: - return new OMVolumeDeleteRequest(omRequest); - case CreateBucket: - return new OMBucketCreateRequest(omRequest); - case DeleteBucket: - return new OMBucketDeleteRequest(omRequest); - case SetBucketProperty: - return new OMBucketSetPropertyRequest(omRequest); - case AllocateBlock: - return new OMAllocateBlockRequest(omRequest); - case CreateKey: - return new OMKeyCreateRequest(omRequest); - case CommitKey: - return new OMKeyCommitRequest(omRequest); - case DeleteKey: - return new OMKeyDeleteRequest(omRequest); - case RenameKey: - return new OMKeyRenameRequest(omRequest); - case CreateDirectory: - return new OMDirectoryCreateRequest(omRequest); - case CreateFile: - return new OMFileCreateRequest(omRequest); - case PurgeKeys: - return new OMKeyPurgeRequest(omRequest); - case CreateS3Bucket: - return new S3BucketCreateRequest(omRequest); - case DeleteS3Bucket: - return new S3BucketDeleteRequest(omRequest); - case InitiateMultiPartUpload: - return new S3InitiateMultipartUploadRequest(omRequest); - case CommitMultiPartUpload: - return new S3MultipartUploadCommitPartRequest(omRequest); - case AbortMultiPartUpload: - return new S3MultipartUploadAbortRequest(omRequest); - case CompleteMultiPartUpload: - return new S3MultipartUploadCompleteRequest(omRequest); - case AddAcl: - case RemoveAcl: - case SetAcl: - return getOMAclRequest(omRequest); - case GetDelegationToken: - return new OMGetDelegationTokenRequest(omRequest); - case CancelDelegationToken: - return new OMCancelDelegationTokenRequest(omRequest); - case RenewDelegationToken: - return new OMRenewDelegationTokenRequest(omRequest); - case GetS3Secret: - return new S3GetSecretRequest(omRequest); - default: - return null; - } - } - - private static OMClientRequest getOMAclRequest(OMRequest omRequest) { - Type cmdType = omRequest.getCmdType(); - if (Type.AddAcl == cmdType) { - ObjectType type = omRequest.getAddAclRequest().getObj().getResType(); - if (ObjectType.VOLUME == type) { - return new OMVolumeAddAclRequest(omRequest); - } else if (ObjectType.BUCKET == type) { - return new OMBucketAddAclRequest(omRequest); - } else if (ObjectType.KEY == type) { - return new OMKeyAddAclRequest(omRequest); - } else { - return new OMPrefixAddAclRequest(omRequest); - } - } else if (Type.RemoveAcl == cmdType) { - ObjectType type = omRequest.getRemoveAclRequest().getObj().getResType(); - if (ObjectType.VOLUME == type) { - return new OMVolumeRemoveAclRequest(omRequest); - } else if (ObjectType.BUCKET == type) { - return new OMBucketRemoveAclRequest(omRequest); - } else if (ObjectType.KEY == type) { - return new OMKeyRemoveAclRequest(omRequest); - } else { - return new OMPrefixRemoveAclRequest(omRequest); - } - } else { - ObjectType type = omRequest.getSetAclRequest().getObj().getResType(); - if (ObjectType.VOLUME == type) { - return new OMVolumeSetAclRequest(omRequest); - } else if (ObjectType.BUCKET == type) { - return new OMBucketSetAclRequest(omRequest); - } else if (ObjectType.KEY == type) { - return new OMKeySetAclRequest(omRequest); - } else { - return new OMPrefixSetAclRequest(omRequest); - } - } - } - - /** - * Convert exception result to {@link OzoneManagerProtocolProtos.Status}. - * @param exception - * @return OzoneManagerProtocolProtos.Status - */ - public static Status exceptionToResponseStatus(IOException exception) { - if (exception instanceof OMException) { - return Status.values()[((OMException) exception).getResult().ordinal()]; - } else { - return Status.INTERNAL_ERROR; - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/package-info.java deleted file mode 100644 index 94fd0c89565..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om.ratis.utils; - -/** - * Utility class used by OzoneManager HA. - */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java deleted file mode 100644 index 306527f2a4f..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ /dev/null @@ -1,220 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request; - -import java.io.IOException; -import java.net.InetAddress; -import java.util.LinkedHashMap; -import java.util.Map; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.AuditAction; -import org.apache.hadoop.ozone.audit.AuditEventStatus; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.security.UserGroupInformation; - -import javax.annotation.Nonnull; - -/** - * OMClientRequest provides methods which every write OM request should - * implement. - */ -public abstract class OMClientRequest implements RequestAuditor { - - private OMRequest omRequest; - - public OMClientRequest(OMRequest omRequest) { - Preconditions.checkNotNull(omRequest); - this.omRequest = omRequest; - } - /** - * Perform pre-execute steps on a OMRequest. - * - * Called from the RPC context, and generates a OMRequest object which has - * all the information that will be either persisted - * in RocksDB or returned to the caller once this operation - * is executed. - * - * @return OMRequest that will be serialized and handed off to Ratis for - * consensus. - */ - public OMRequest preExecute(OzoneManager ozoneManager) - throws IOException { - omRequest = getOmRequest().toBuilder().setUserInfo(getUserInfo()).build(); - return omRequest; - } - - /** - * Validate the OMRequest and update the cache. - * This step should verify that the request can be executed, perform - * any authorization steps and update the in-memory cache. - - * This step does not persist the changes to the database. - * - * @return the response that will be returned to the client. - */ - public abstract OMClientResponse validateAndUpdateCache( - OzoneManager ozoneManager, long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper); - - @VisibleForTesting - public OMRequest getOmRequest() { - return omRequest; - } - - /** - * Get User information which needs to be set in the OMRequest object. - * @return User Info. - */ - public OzoneManagerProtocolProtos.UserInfo getUserInfo() { - UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser(); - InetAddress remoteAddress = ProtobufRpcEngine.Server.getRemoteIp(); - OzoneManagerProtocolProtos.UserInfo.Builder userInfo = - OzoneManagerProtocolProtos.UserInfo.newBuilder(); - - // Added not null checks, as in UT's these values might be null. - if (user != null) { - userInfo.setUserName(user.getUserName()); - } - - if (remoteAddress != null) { - userInfo.setRemoteAddress(remoteAddress.getHostAddress()).build(); - } - - return userInfo.build(); - } - - /** - * Check Acls of ozone object. - * @param ozoneManager - * @param resType - * @param storeType - * @param aclType - * @param vol - * @param bucket - * @param key - * @throws IOException - */ - public void checkAcls(OzoneManager ozoneManager, - OzoneObj.ResourceType resType, - OzoneObj.StoreType storeType, IAccessAuthorizer.ACLType aclType, - String vol, String bucket, String key) throws IOException { - ozoneManager.checkAcls(resType, storeType, aclType, vol, bucket, key, - createUGI(), getRemoteAddress()); - } - - /** - * Return UGI object created from OMRequest userInfo. If userInfo is not - * set, returns null. - * @return UserGroupInformation. - */ - @VisibleForTesting - public UserGroupInformation createUGI() { - if (omRequest.hasUserInfo() && - !StringUtils.isBlank(omRequest.getUserInfo().getUserName())) { - return UserGroupInformation.createRemoteUser( - omRequest.getUserInfo().getUserName()); - } else { - // This will never happen, as for every OM request preExecute, we - // should add userInfo. - return null; - } - } - - /** - * Return InetAddress created from OMRequest userInfo. If userInfo is not - * set, returns null. - * @return InetAddress - * @throws IOException - */ - @VisibleForTesting - public InetAddress getRemoteAddress() throws IOException { - if (omRequest.hasUserInfo()) { - return InetAddress.getByName(omRequest.getUserInfo() - .getRemoteAddress()); - } else { - return null; - } - } - - /** - * Set parameters needed for return error response to client. - * @param omResponse - * @param ex - IOException - * @return error response need to be returned to client - OMResponse. - */ - protected OMResponse createErrorOMResponse( - @Nonnull OMResponse.Builder omResponse, @Nonnull IOException ex) { - - omResponse.setSuccess(false); - if (ex.getMessage() != null) { - omResponse.setMessage(ex.getMessage()); - } - omResponse.setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(ex)); - return omResponse.build(); - } - - /** - * Log the auditMessage. - * @param auditLogger - * @param auditMessage - */ - protected void auditLog(AuditLogger auditLogger, AuditMessage auditMessage) { - auditLogger.logWrite(auditMessage); - } - - @Override - public AuditMessage buildAuditMessage(AuditAction op, - Map< String, String > auditMap, Throwable throwable, - OzoneManagerProtocolProtos.UserInfo userInfo) { - return new AuditMessage.Builder() - .setUser(userInfo != null ? userInfo.getUserName() : null) - .atIp(userInfo != null ? userInfo.getRemoteAddress() : null) - .forOperation(op.getAction()) - .withParams(auditMap) - .withResult(throwable != null ? AuditEventStatus.FAILURE.toString() : - AuditEventStatus.SUCCESS.toString()) - .withException(throwable) - .build(); - } - - @Override - public Map buildVolumeAuditMap(String volume) { - Map auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.VOLUME, volume); - return auditMap; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java deleted file mode 100644 index 9aa8fc48132..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request; - -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.Map; - -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.AuditAction; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .UserInfo; - -/** - * Interface for OM Requests to convert to audit objects. - */ -public interface RequestAuditor { - - /** - * Build AuditMessage. - * @param op - * @param auditMap - * @param throwable - * @param userInfo - * @return - */ - AuditMessage buildAuditMessage(AuditAction op, - Map auditMap, Throwable throwable, UserInfo userInfo); - - /** - * Build auditMap with specified volume. - * @param volume - * @return auditMap. - */ - Map buildVolumeAuditMap(String volume); - - /** - * Build auditMap for KeyArgs. - * @param keyArgs - */ - default Map buildKeyArgsAuditMap(KeyArgs keyArgs) { - - if (keyArgs == null) { - return new HashMap<>(0); - } else { - Map< String, String > auditMap = new LinkedHashMap<>(); - auditMap.put(OzoneConsts.VOLUME, keyArgs.getVolumeName()); - auditMap.put(OzoneConsts.BUCKET, keyArgs.getBucketName()); - auditMap.put(OzoneConsts.KEY, keyArgs.getKeyName()); - auditMap.put(OzoneConsts.DATA_SIZE, - String.valueOf(keyArgs.getDataSize())); - auditMap.put(OzoneConsts.REPLICATION_TYPE, - (keyArgs.getType() != null) ? keyArgs.getType().name() : null); - auditMap.put(OzoneConsts.REPLICATION_FACTOR, - (keyArgs.getFactor() != null) ? keyArgs.getFactor().name() : null); - auditMap.put(OzoneConsts.KEY_LOCATION_INFO, - (keyArgs.getKeyLocationsList() != null) ? - keyArgs.getKeyLocationsList().toString() : null); - return auditMap; - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java deleted file mode 100644 index 2b2448db770..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ /dev/null @@ -1,280 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.bucket; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -import com.google.common.base.Optional; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.crypto.CipherSuite; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .BucketEncryptionInfoProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .BucketInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocolPB.OMPBHelper; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CryptoProtocolVersionProto.ENCRYPTION_ZONES; - -/** - * Handles CreateBucket Request. - */ -public class OMBucketCreateRequest extends OMClientRequest { - private static final Logger LOG = - LoggerFactory.getLogger(OMBucketCreateRequest.class); - - public OMBucketCreateRequest(OMRequest omRequest) { - super(omRequest); - } - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - - // Get original request. - CreateBucketRequest createBucketRequest = - getOmRequest().getCreateBucketRequest(); - BucketInfo bucketInfo = createBucketRequest.getBucketInfo(); - - // Get KMS provider. - KeyProviderCryptoExtension kmsProvider = - ozoneManager.getKmsProvider(); - - // Create new Bucket request with new bucket info. - CreateBucketRequest.Builder newCreateBucketRequest = - createBucketRequest.toBuilder(); - - BucketInfo.Builder newBucketInfo = bucketInfo.toBuilder(); - - // Set creation time. - newBucketInfo.setCreationTime(Time.now()); - - if (bucketInfo.hasBeinfo()) { - newBucketInfo.setBeinfo(getBeinfo(kmsProvider, bucketInfo)); - } - - newCreateBucketRequest.setBucketInfo(newBucketInfo.build()); - - return getOmRequest().toBuilder().setUserInfo(getUserInfo()) - .setCreateBucketRequest(newCreateBucketRequest.build()).build(); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumBucketCreates(); - - OMMetadataManager metadataManager = ozoneManager.getMetadataManager(); - - BucketInfo bucketInfo = getBucketInfoFromRequest(); - - String volumeName = bucketInfo.getVolumeName(); - String bucketName = bucketInfo.getBucketName(); - - OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.CreateBucket).setStatus( - OzoneManagerProtocolProtos.Status.OK); - OmBucketInfo omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo); - - AuditLogger auditLogger = ozoneManager.getAuditLogger(); - OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); - - String volumeKey = metadataManager.getVolumeKey(volumeName); - String bucketKey = metadataManager.getBucketKey(volumeName, bucketName); - IOException exception = null; - boolean acquiredBucketLock = false; - boolean acquiredVolumeLock = false; - OMClientResponse omClientResponse = null; - - try { - // check Acl - if (ozoneManager.getAclsEnabled()) { - checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME, - OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.CREATE, - volumeName, bucketName, null); - } - - acquiredVolumeLock = - metadataManager.getLock().acquireReadLock(VOLUME_LOCK, volumeName); - acquiredBucketLock = metadataManager.getLock().acquireWriteLock( - BUCKET_LOCK, volumeName, bucketName); - - OmVolumeArgs omVolumeArgs = - metadataManager.getVolumeTable().get(volumeKey); - //Check if the volume exists - if (omVolumeArgs == null) { - LOG.debug("volume: {} not found ", volumeName); - throw new OMException("Volume doesn't exist", - OMException.ResultCodes.VOLUME_NOT_FOUND); - } - - //Check if bucket already exists - if (metadataManager.getBucketTable().get(bucketKey) != null) { - LOG.debug("bucket: {} already exists ", bucketName); - throw new OMException("Bucket already exist", - OMException.ResultCodes.BUCKET_ALREADY_EXISTS); - } - - // Add default acls from volume. - addDefaultAcls(omBucketInfo, omVolumeArgs); - - // Update table cache. - metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), - new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex)); - - omResponse.setCreateBucketResponse( - CreateBucketResponse.newBuilder().build()); - omClientResponse = new OMBucketCreateResponse(omBucketInfo, - omResponse.build()); - } catch (IOException ex) { - exception = ex; - omClientResponse = new OMBucketCreateResponse(omBucketInfo, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredBucketLock) { - metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - if (acquiredVolumeLock) { - metadataManager.getLock().releaseReadLock(VOLUME_LOCK, volumeName); - } - } - - // Performing audit logging outside of the lock. - auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_BUCKET, - omBucketInfo.toAuditMap(), exception, userInfo)); - - // return response. - if (exception == null) { - LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName); - omMetrics.incNumBuckets(); - return omClientResponse; - } else { - omMetrics.incNumBucketCreateFails(); - LOG.error("Bucket creation failed for bucket:{} in volume:{}", - bucketName, volumeName, exception); - return omClientResponse; - } - } - - - /** - * Add default acls for bucket. These acls are inherited from volume - * default acl list. - * @param omBucketInfo - * @param omVolumeArgs - */ - private void addDefaultAcls(OmBucketInfo omBucketInfo, - OmVolumeArgs omVolumeArgs) { - // Add default acls from volume. - List acls = new ArrayList<>(); - if (omBucketInfo.getAcls() != null) { - acls.addAll(omBucketInfo.getAcls()); - } - - List defaultVolumeAclList = omVolumeArgs.getAclMap() - .getDefaultAclList().stream().map(OzoneAcl::fromProtobuf) - .collect(Collectors.toList()); - - OzoneAclUtil.inheritDefaultAcls(acls, defaultVolumeAclList); - omBucketInfo.setAcls(acls); - } - - - private BucketInfo getBucketInfoFromRequest() { - CreateBucketRequest createBucketRequest = - getOmRequest().getCreateBucketRequest(); - return createBucketRequest.getBucketInfo(); - } - - private BucketEncryptionInfoProto getBeinfo( - KeyProviderCryptoExtension kmsProvider, BucketInfo bucketInfo) - throws IOException { - BucketEncryptionInfoProto bek = bucketInfo.getBeinfo(); - BucketEncryptionInfoProto.Builder bekb = null; - if (kmsProvider == null) { - throw new OMException("Invalid KMS provider, check configuration " + - CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH, - OMException.ResultCodes.INVALID_KMS_PROVIDER); - } - if (bek.getKeyName() == null) { - throw new OMException("Bucket encryption key needed.", OMException - .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); - } - // Talk to KMS to retrieve the bucket encryption key info. - KeyProvider.Metadata metadata = kmsProvider.getMetadata( - bek.getKeyName()); - if (metadata == null) { - throw new OMException("Bucket encryption key " + bek.getKeyName() - + " doesn't exist.", - OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND); - } - // If the provider supports pool for EDEKs, this will fill in the pool - kmsProvider.warmUpEncryptedKeys(bek.getKeyName()); - bekb = BucketEncryptionInfoProto.newBuilder() - .setKeyName(bek.getKeyName()) - .setCryptoProtocolVersion(ENCRYPTION_ZONES) - .setSuite(OMPBHelper.convert( - CipherSuite.convert(metadata.getCipher()))); - return bekb.build(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java deleted file mode 100644 index 9469f887e18..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java +++ /dev/null @@ -1,174 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.bucket; - -import java.io.IOException; -import java.util.Map; - -import com.google.common.base.Optional; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; - -/** - * Handles DeleteBucket Request. - */ -public class OMBucketDeleteRequest extends OMClientRequest { - private static final Logger LOG = - LoggerFactory.getLogger(OMBucketDeleteRequest.class); - - public OMBucketDeleteRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumBucketDeletes(); - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - - OMRequest omRequest = getOmRequest(); - String volumeName = omRequest.getDeleteBucketRequest().getVolumeName(); - String bucketName = omRequest.getDeleteBucketRequest().getBucketName(); - - // Generate end user response - OMResponse.Builder omResponse = OMResponse.newBuilder() - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setCmdType(omRequest.getCmdType()); - - - AuditLogger auditLogger = ozoneManager.getAuditLogger(); - Map auditMap = buildVolumeAuditMap(volumeName); - auditMap.put(OzoneConsts.BUCKET, bucketName); - - OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); - IOException exception = null; - - boolean acquiredBucketLock = false; - boolean acquiredVolumeLock = false; - OMClientResponse omClientResponse = null; - try { - // check Acl - if (ozoneManager.getAclsEnabled()) { - checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET, - OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE, - volumeName, bucketName, null); - } - - - // acquire lock - acquiredVolumeLock = - omMetadataManager.getLock().acquireReadLock(VOLUME_LOCK, volumeName); - acquiredBucketLock = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, - volumeName, bucketName); - - // No need to check volume exists here, as bucket cannot be created - // with out volume creation. - //Check if bucket exists - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); - OmBucketInfo omBucketInfo = - omMetadataManager.getBucketTable().get(bucketKey); - if (omBucketInfo == null) { - LOG.debug("bucket: {} not found ", bucketName); - throw new OMException("Bucket doesn't exist", - OMException.ResultCodes.BUCKET_NOT_FOUND); - } - //Check if bucket is empty - if (!omMetadataManager.isBucketEmpty(volumeName, bucketName)) { - LOG.debug("bucket: {} is not empty ", bucketName); - throw new OMException("Bucket is not empty", - OMException.ResultCodes.BUCKET_NOT_EMPTY); - } - omMetrics.decNumBuckets(); - - // Update table cache. - omMetadataManager.getBucketTable().addCacheEntry( - new CacheKey<>(bucketKey), - new CacheValue<>(Optional.absent(), transactionLogIndex)); - - omResponse.setDeleteBucketResponse( - DeleteBucketResponse.newBuilder().build()); - - // Add to double buffer. - omClientResponse = new OMBucketDeleteResponse(volumeName, bucketName, - omResponse.build()); - } catch (IOException ex) { - exception = ex; - omClientResponse = new OMBucketDeleteResponse(volumeName, bucketName, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredBucketLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - if (acquiredVolumeLock) { - omMetadataManager.getLock().releaseReadLock(VOLUME_LOCK, volumeName); - } - } - - // Performing audit logging outside of the lock. - auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_BUCKET, - auditMap, exception, userInfo)); - - // return response. - if (exception == null) { - LOG.debug("Deleted bucket:{} in volume:{}", bucketName, volumeName); - return omClientResponse; - } else { - omMetrics.incNumBucketDeleteFails(); - LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName, - volumeName, exception); - return omClientResponse; - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java deleted file mode 100644 index 6c5f5fa4146..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ /dev/null @@ -1,205 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.bucket; - -import java.io.IOException; - -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.om.request.OMClientRequest; - -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; -import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.response.bucket.OMBucketSetPropertyResponse; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .BucketArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetBucketPropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetBucketPropertyResponse; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; - -/** - * Handle SetBucketProperty Request. - */ -public class OMBucketSetPropertyRequest extends OMClientRequest { - private static final Logger LOG = - LoggerFactory.getLogger(OMBucketSetPropertyRequest.class); - - public OMBucketSetPropertyRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - - SetBucketPropertyRequest setBucketPropertyRequest = - getOmRequest().getSetBucketPropertyRequest(); - - Preconditions.checkNotNull(setBucketPropertyRequest); - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumBucketUpdates(); - - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - - BucketArgs bucketArgs = setBucketPropertyRequest.getBucketArgs(); - OmBucketArgs omBucketArgs = OmBucketArgs.getFromProtobuf(bucketArgs); - - String volumeName = bucketArgs.getVolumeName(); - String bucketName = bucketArgs.getBucketName(); - - - OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.CreateBucket).setStatus( - OzoneManagerProtocolProtos.Status.OK); - OmBucketInfo omBucketInfo = null; - - AuditLogger auditLogger = ozoneManager.getAuditLogger(); - OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); - IOException exception = null; - boolean acquiredBucketLock = false; - OMClientResponse omClientResponse = null; - try { - // check Acl - if (ozoneManager.getAclsEnabled()) { - checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET, - OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE, - volumeName, bucketName, null); - } - - // acquire lock. - acquiredBucketLock = omMetadataManager.getLock().acquireWriteLock( - BUCKET_LOCK, volumeName, bucketName); - - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); - OmBucketInfo oldBucketInfo = - omMetadataManager.getBucketTable().get(bucketKey); - //Check if bucket exist - if (oldBucketInfo == null) { - LOG.debug("bucket: {} not found ", bucketName); - throw new OMException("Bucket doesn't exist", - OMException.ResultCodes.BUCKET_NOT_FOUND); - } - OmBucketInfo.Builder bucketInfoBuilder = OmBucketInfo.newBuilder(); - bucketInfoBuilder.setVolumeName(oldBucketInfo.getVolumeName()) - .setBucketName(oldBucketInfo.getBucketName()); - bucketInfoBuilder.addAllMetadata(KeyValueUtil - .getFromProtobuf(bucketArgs.getMetadataList())); - - //Check StorageType to update - StorageType storageType = omBucketArgs.getStorageType(); - if (storageType != null) { - bucketInfoBuilder.setStorageType(storageType); - LOG.debug("Updating bucket storage type for bucket: {} in volume: {}", - bucketName, volumeName); - } else { - bucketInfoBuilder.setStorageType(oldBucketInfo.getStorageType()); - } - - //Check Versioning to update - Boolean versioning = omBucketArgs.getIsVersionEnabled(); - if (versioning != null) { - bucketInfoBuilder.setIsVersionEnabled(versioning); - LOG.debug("Updating bucket versioning for bucket: {} in volume: {}", - bucketName, volumeName); - } else { - bucketInfoBuilder - .setIsVersionEnabled(oldBucketInfo.getIsVersionEnabled()); - } - - bucketInfoBuilder.setCreationTime(oldBucketInfo.getCreationTime()); - - // Set acls from oldBucketInfo if it has any. - if (oldBucketInfo.getAcls() != null) { - bucketInfoBuilder.setAcls(oldBucketInfo.getAcls()); - } - - omBucketInfo = bucketInfoBuilder.build(); - - // Update table cache. - omMetadataManager.getBucketTable().addCacheEntry( - new CacheKey<>(bucketKey), - new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex)); - - omResponse.setSetBucketPropertyResponse( - SetBucketPropertyResponse.newBuilder().build()); - omClientResponse = new OMBucketSetPropertyResponse(omBucketInfo, - omResponse.build()); - } catch (IOException ex) { - exception = ex; - omClientResponse = new OMBucketSetPropertyResponse(omBucketInfo, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredBucketLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - // Performing audit logging outside of the lock. - auditLog(auditLogger, buildAuditMessage(OMAction.UPDATE_BUCKET, - omBucketArgs.toAuditMap(), exception, userInfo)); - - // return response. - if (exception == null) { - LOG.debug("Setting bucket property for bucket:{} in volume:{}", - bucketName, volumeName); - return omClientResponse; - } else { - LOG.error("Setting bucket property failed for bucket:{} in volume:{}", - bucketName, volumeName, exception); - omMetrics.incNumBucketUpdateFails(); - return omClientResponse; - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java deleted file mode 100644 index 87ad6000bc5..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java +++ /dev/null @@ -1,188 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.bucket.acl; - -import java.io.IOException; -import java.util.List; - -import com.google.common.base.Optional; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.util.BooleanBiFunction; -import org.apache.hadoop.ozone.om.request.util.ObjectParser; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; - -/** - * Base class for Bucket acl request. - */ -public abstract class OMBucketAclRequest extends OMClientRequest { - - private BooleanBiFunction, OmBucketInfo> omBucketAclOp; - - public OMBucketAclRequest(OMRequest omRequest, - BooleanBiFunction, OmBucketInfo> aclOp) { - super(omRequest); - omBucketAclOp = aclOp; - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - // protobuf guarantees acls are non-null. - List ozoneAcls = getAcls(); - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumBucketUpdates(); - OmBucketInfo omBucketInfo = null; - - OMResponse.Builder omResponse = onInit(); - OMClientResponse omClientResponse = null; - IOException exception = null; - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - boolean lockAcquired = false; - String volume = null; - String bucket = null; - boolean operationResult = false; - try { - ObjectParser objectParser = new ObjectParser(getPath(), - ObjectType.BUCKET); - - volume = objectParser.getVolume(); - bucket = objectParser.getBucket(); - - // check Acl - if (ozoneManager.getAclsEnabled()) { - checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME, - OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL, - volume, null, null); - } - lockAcquired = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volume, - bucket); - - String dbBucketKey = omMetadataManager.getBucketKey(volume, bucket); - omBucketInfo = omMetadataManager.getBucketTable().get(dbBucketKey); - if (omBucketInfo == null) { - throw new OMException(OMException.ResultCodes.BUCKET_NOT_FOUND); - } - - operationResult = omBucketAclOp.apply(ozoneAcls, omBucketInfo); - - if (operationResult) { - // update cache. - omMetadataManager.getBucketTable().addCacheEntry( - new CacheKey<>(dbBucketKey), - new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex)); - } - - omClientResponse = onSuccess(omResponse, omBucketInfo, operationResult); - - } catch (IOException ex) { - exception = ex; - omClientResponse = onFailure(omResponse, ex); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (lockAcquired) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volume, - bucket); - } - } - - - onComplete(operationResult, exception, ozoneManager.getMetrics()); - - return omClientResponse; - } - - /** - * Get the Acls from the request. - * @return List of OzoneAcls, for add/remove it is a single element list - * for set it can be non-single element list. - */ - abstract List getAcls(); - - /** - * Get the path name from the request. - * @return path name - */ - abstract String getPath(); - - // TODO: Finer grain metrics can be moved to these callbacks. They can also - // be abstracted into separate interfaces in future. - /** - * Get the initial om response builder with lock. - * @return om response builder. - */ - abstract OMResponse.Builder onInit(); - - /** - * Get the om client response on success case with lock. - * @param omResponse - * @param omBucketInfo - * @param operationResult - * @return OMClientResponse - */ - abstract OMClientResponse onSuccess( - OMResponse.Builder omResponse, OmBucketInfo omBucketInfo, - boolean operationResult); - - /** - * Get the om client response on failure case with lock. - * @param omResponse - * @param exception - * @return OMClientResponse - */ - abstract OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException exception); - - /** - * Completion hook for final processing before return without lock. - * Usually used for logging without lock and metric update. - * @param operationResult - * @param exception - * @param omMetrics - */ - abstract void onComplete(boolean operationResult, IOException exception, - OMMetrics omMetrics); - - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java deleted file mode 100644 index 41aef6db6d3..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.bucket.acl; - -import java.io.IOException; -import java.util.List; - -import com.google.common.collect.Lists; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.util.BooleanBiFunction; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.bucket.acl.OMBucketAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .AddAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; - -/** - * Handle add Acl request for bucket. - */ -public class OMBucketAddAclRequest extends OMBucketAclRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMBucketAddAclRequest.class); - - private static BooleanBiFunction, OmBucketInfo> bucketAddAclOp; - private String path; - private List ozoneAcls; - - static { - bucketAddAclOp = (ozoneAcls, omBucketInfo) -> { - return omBucketInfo.addAcl(ozoneAcls.get(0)); - }; - } - - public OMBucketAddAclRequest(OMRequest omRequest) { - super(omRequest, bucketAddAclOp); - OzoneManagerProtocolProtos.AddAclRequest addAclRequest = - getOmRequest().getAddAclRequest(); - path = addAclRequest.getObj().getPath(); - ozoneAcls = Lists.newArrayList( - OzoneAcl.fromProtobuf(addAclRequest.getAcl())); - } - - @Override - List getAcls() { - return ozoneAcls; - } - - @Override - String getPath() { - return path; - } - - @Override - OMResponse.Builder onInit() { - return OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.AddAcl).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - } - - @Override - OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmBucketInfo omBucketInfo, boolean operationResult) { - omResponse.setSuccess(operationResult); - omResponse.setAddAclResponse(AddAclResponse.newBuilder() - .setResponse(operationResult)); - return new OMBucketAclResponse(omBucketInfo, - omResponse.build()); - } - - @Override - OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException exception) { - return new OMBucketAclResponse(null, - createErrorOMResponse(omResponse, exception)); - } - - @Override - void onComplete(boolean operationResult, IOException exception, - OMMetrics omMetrics) { - if (operationResult) { - LOG.debug("Add acl: {} to path: {} success!", getAcls(), getPath()); - } else { - omMetrics.incNumBucketUpdateFails(); - if (exception == null) { - LOG.error("Add acl {} to path {} failed, because acl already exist", - getAcls(), getPath()); - } else { - LOG.error("Add acl {} to path {} failed!", getAcls(), getPath(), - exception); - } - } - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java deleted file mode 100644 index 1d62677b348..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.bucket.acl; - -import java.io.IOException; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.Lists; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.util.BooleanBiFunction; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.bucket.acl.OMBucketAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; - - -/** - * Handle removeAcl request for bucket. - */ -public class OMBucketRemoveAclRequest extends OMBucketAclRequest { - private static final Logger LOG = - LoggerFactory.getLogger(OMBucketAddAclRequest.class); - - private static BooleanBiFunction, OmBucketInfo> bucketAddAclOp; - private String path; - private List ozoneAcls; - - static { - bucketAddAclOp = (ozoneAcls, omBucketInfo) -> { - return omBucketInfo.removeAcl(ozoneAcls.get(0)); - }; - } - - public OMBucketRemoveAclRequest(OMRequest omRequest) { - super(omRequest, bucketAddAclOp); - OzoneManagerProtocolProtos.RemoveAclRequest removeAclRequest = - getOmRequest().getRemoveAclRequest(); - path = removeAclRequest.getObj().getPath(); - ozoneAcls = Lists.newArrayList( - OzoneAcl.fromProtobuf(removeAclRequest.getAcl())); - } - - @Override - List getAcls() { - return ozoneAcls; - } - - @Override - String getPath() { - return path; - } - - @Override - OMResponse.Builder onInit() { - return OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.RemoveAcl).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - } - - @Override - OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmBucketInfo omBucketInfo, boolean operationResult) { - omResponse.setSuccess(operationResult); - omResponse.setRemoveAclResponse(RemoveAclResponse.newBuilder() - .setResponse(operationResult)); - return new OMBucketAclResponse(omBucketInfo, - omResponse.build()); - } - - @Override - OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException exception) { - return new OMBucketAclResponse(null, - createErrorOMResponse(omResponse, exception)); - } - - @Override - void onComplete(boolean operationResult, IOException exception, - OMMetrics omMetrics) { - if (operationResult) { - LOG.debug("Remove acl: {} for path: {} success!", getAcls(), getPath()); - } else { - omMetrics.incNumBucketUpdateFails(); - if (exception == null) { - LOG.error("Remove acl {} for path {} failed, because acl does not " + - "exist", - getAcls(), getPath()); - } else { - LOG.error("Remove acl {} for path {} failed!", getAcls(), getPath(), - exception); - } - } - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java deleted file mode 100644 index b97de955a51..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.bucket.acl; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.util.BooleanBiFunction; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.bucket.acl.OMBucketAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse; - -/** - * Handle setAcl request for bucket. - */ -public class OMBucketSetAclRequest extends OMBucketAclRequest { - private static final Logger LOG = - LoggerFactory.getLogger(OMBucketAddAclRequest.class); - - private static BooleanBiFunction< List, - OmBucketInfo > bucketAddAclOp; - private String path; - private List ozoneAcls; - - static { - bucketAddAclOp = (ozoneAcls, omBucketInfo) -> { - return omBucketInfo.setAcls(ozoneAcls); - }; - } - - public OMBucketSetAclRequest(OMRequest omRequest) { - super(omRequest, bucketAddAclOp); - OzoneManagerProtocolProtos.SetAclRequest setAclRequest = - getOmRequest().getSetAclRequest(); - path = setAclRequest.getObj().getPath(); - ozoneAcls = new ArrayList<>(); - setAclRequest.getAclList().forEach(aclInfo -> - ozoneAcls.add(OzoneAcl.fromProtobuf(aclInfo))); - } - - @Override - List getAcls() { - return ozoneAcls; - } - - @Override - String getPath() { - return path; - } - - @Override - OMResponse.Builder onInit() { - return OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.SetAcl).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - } - - @Override - OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmBucketInfo omBucketInfo, boolean operationResult) { - omResponse.setSuccess(operationResult); - omResponse.setSetAclResponse(SetAclResponse.newBuilder() - .setResponse(operationResult)); - return new OMBucketAclResponse(omBucketInfo, - omResponse.build()); - } - - @Override - OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException exception) { - return new OMBucketAclResponse(null, - createErrorOMResponse(omResponse, exception)); - } - - @Override - void onComplete(boolean operationResult, IOException exception, - OMMetrics omMetrics) { - if (operationResult) { - if (LOG.isDebugEnabled()) { - LOG.debug("Set acl: {} for path: {} success!", getAcls(), getPath()); - } - } else { - omMetrics.incNumBucketUpdateFails(); - if (exception == null) { - LOG.error("Set acl {} for path {} failed", getAcls(), getPath()); - } else { - LOG.error("Set acl {} for path {} failed!", getAcls(), getPath(), - exception); - } - } - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/package-info.java deleted file mode 100644 index 7b3b43d5678..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains classes for handling acl requests for bucket. - */ -package org.apache.hadoop.ozone.om.request.bucket.acl; - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java deleted file mode 100644 index f0ca3b4d23a..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains classes related to bucket requests. - */ -package org.apache.hadoop.ozone.om.request.bucket; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java deleted file mode 100644 index 4b591dbed2d..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ /dev/null @@ -1,239 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.file; - -import java.io.IOException; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Map; - -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.file.OMDirectoryCreateResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateDirectoryRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateDirectoryResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - - -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; -import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; -import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.NONE; -import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; -/** - * Handle create directory request. - */ -public class OMDirectoryCreateRequest extends OMKeyRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMDirectoryCreateRequest.class); - - public OMDirectoryCreateRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) { - CreateDirectoryRequest createDirectoryRequest = - getOmRequest().getCreateDirectoryRequest(); - Preconditions.checkNotNull(createDirectoryRequest); - - KeyArgs.Builder newKeyArgs = createDirectoryRequest.getKeyArgs() - .toBuilder().setModificationTime(Time.now()); - - CreateDirectoryRequest.Builder newCreateDirectoryRequest = - createDirectoryRequest.toBuilder().setKeyArgs(newKeyArgs); - - return getOmRequest().toBuilder().setCreateDirectoryRequest( - newCreateDirectoryRequest).setUserInfo(getUserInfo()).build(); - - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - KeyArgs keyArgs = getOmRequest().getCreateDirectoryRequest().getKeyArgs(); - - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - - OMResponse.Builder omResponse = - OzoneManagerProtocolProtos.OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.CreateDirectory).setStatus( - OzoneManagerProtocolProtos.Status.OK); - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumCreateDirectory(); - - AuditLogger auditLogger = ozoneManager.getAuditLogger(); - OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); - - Map auditMap = buildKeyArgsAuditMap(keyArgs); - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - boolean acquiredLock = false; - IOException exception = null; - OMClientResponse omClientResponse = null; - try { - // check Acl - checkBucketAcls(ozoneManager, volumeName, bucketName, keyName); - - // Check if this is the root of the filesystem. - if (keyName.length() == 0) { - return new OMDirectoryCreateResponse(null, - omResponse.setCreateDirectoryResponse( - CreateDirectoryResponse.newBuilder()).build()); - } - // acquire lock - acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, - volumeName, bucketName); - - // TODO: Not checking volume exist here, once we have full cache we can - // add volume exist check also. - - OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( - omMetadataManager.getBucketKey(volumeName, bucketName)); - - if (omBucketInfo == null) { - throw new OMException("Bucket not found " + bucketName, - BUCKET_NOT_FOUND); - } - - // Need to check if any files exist in the given path, if they exist we - // cannot create a directory with the given key. - OMFileRequest.OMDirectoryResult omDirectoryResult = - OMFileRequest.verifyFilesInPath(omMetadataManager, - volumeName, bucketName, keyName, Paths.get(keyName)); - - OmKeyInfo dirKeyInfo = null; - if (omDirectoryResult == FILE_EXISTS || - omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) { - throw new OMException("Unable to create directory: " +keyName - + " in volume/bucket: " + volumeName + "/" + bucketName, - FILE_ALREADY_EXISTS); - } else if (omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH || - omDirectoryResult == NONE) { - dirKeyInfo = createDirectoryKeyInfo(ozoneManager, omBucketInfo, - volumeName, bucketName, keyName, keyArgs); - - omMetadataManager.getKeyTable().addCacheEntry( - new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, - dirKeyInfo.getKeyName())), - new CacheValue<>(Optional.of(dirKeyInfo), transactionLogIndex)); - } - // if directory already exists do nothing or do we need to throw - // exception? Current KeyManagerImpl code does just return, following - // similar approach. - - omResponse.setCreateDirectoryResponse( - CreateDirectoryResponse.newBuilder()); - omClientResponse = new OMDirectoryCreateResponse(dirKeyInfo, - omResponse.build()); - - } catch (IOException ex) { - exception = ex; - omClientResponse = new OMDirectoryCreateResponse(null, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY, - auditMap, exception, userInfo)); - - if (exception == null) { - LOG.debug("Directory is successfully created for Key: {} in " + - "volume/bucket:{}/{}", keyName, volumeName, bucketName); - return omClientResponse; - } else { - LOG.error("CreateDirectory failed for Key: {} in volume/bucket:{}/{}", - keyName, volumeName, bucketName, exception); - omMetrics.incNumCreateDirectoryFails(); - return omClientResponse; - } - } - - private OmKeyInfo createDirectoryKeyInfo(OzoneManager ozoneManager, - OmBucketInfo omBucketInfo, String volumeName, String bucketName, - String keyName, KeyArgs keyArgs) - throws IOException { - Optional encryptionInfo = - getFileEncryptionInfo(ozoneManager, omBucketInfo); - String dirName = OzoneFSUtils.addTrailingSlashIfNeeded(keyName); - - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(dirName) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setCreationTime(keyArgs.getModificationTime()) - .setModificationTime(keyArgs.getModificationTime()) - .setDataSize(0) - .setReplicationType(HddsProtos.ReplicationType.RATIS) - .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) - .setFileEncryptionInfo(encryptionInfo.orNull()) - .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())) - .build(); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java deleted file mode 100644 index 20b51747caa..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ /dev/null @@ -1,357 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.file; - -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import javax.annotation.Nonnull; - -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateFileRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.UniqueId; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - - -import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; -import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; -import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; -import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.NONE; - -/** - * Handles create file request. - */ -public class OMFileCreateRequest extends OMKeyRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMFileCreateRequest.class); - public OMFileCreateRequest(OMRequest omRequest) { - super(omRequest); - } - - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); - Preconditions.checkNotNull(createFileRequest); - - KeyArgs keyArgs = createFileRequest.getKeyArgs(); - - if (keyArgs.getKeyName().length() == 0) { - // Check if this is the root of the filesystem. - // Not throwing exception here, as need to throw exception after - // checking volume/bucket exists. - return getOmRequest().toBuilder().setUserInfo(getUserInfo()).build(); - } - - long scmBlockSize = ozoneManager.getScmBlockSize(); - - // NOTE size of a key is not a hard limit on anything, it is a value that - // client should expect, in terms of current size of key. If client sets - // a value, then this value is used, otherwise, we allocate a single - // block which is the current size, if read by the client. - final long requestedSize = keyArgs.getDataSize() > 0 ? - keyArgs.getDataSize() : scmBlockSize; - - boolean useRatis = ozoneManager.shouldUseRatis(); - - HddsProtos.ReplicationFactor factor = keyArgs.getFactor(); - if (factor == null) { - factor = useRatis ? HddsProtos.ReplicationFactor.THREE : - HddsProtos.ReplicationFactor.ONE; - } - - HddsProtos.ReplicationType type = keyArgs.getType(); - if (type == null) { - type = useRatis ? HddsProtos.ReplicationType.RATIS : - HddsProtos.ReplicationType.STAND_ALONE; - } - - // TODO: Here we are allocating block with out any check for - // bucket/key/volume or not and also with out any authorization checks. - - List< OmKeyLocationInfo > omKeyLocationInfoList = - allocateBlock(ozoneManager.getScmClient(), - ozoneManager.getBlockTokenSecretManager(), type, factor, - new ExcludeList(), requestedSize, scmBlockSize, - ozoneManager.getPreallocateBlocksMax(), - ozoneManager.isGrpcBlockTokenEnabled(), - ozoneManager.getOMNodeId()); - - KeyArgs.Builder newKeyArgs = keyArgs.toBuilder() - .setModificationTime(Time.now()).setType(type).setFactor(factor) - .setDataSize(requestedSize); - - newKeyArgs.addAllKeyLocations(omKeyLocationInfoList.stream() - .map(OmKeyLocationInfo::getProtobuf).collect(Collectors.toList())); - - CreateFileRequest.Builder newCreateFileRequest = - createFileRequest.toBuilder().setKeyArgs(newKeyArgs) - .setClientID(UniqueId.next()); - - return getOmRequest().toBuilder() - .setCreateFileRequest(newCreateFileRequest).setUserInfo(getUserInfo()) - .build(); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); - KeyArgs keyArgs = createFileRequest.getKeyArgs(); - - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - - // if isRecursive is true, file would be created even if parent - // directories does not exist. - boolean isRecursive = createFileRequest.getIsRecursive(); - - // if isOverWrite is true, file would be over written. - boolean isOverWrite = createFileRequest.getIsOverwrite(); - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumCreateFile(); - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - - boolean acquiredLock = false; - IOException exception = null; - Optional encryptionInfo = Optional.absent(); - OmKeyInfo omKeyInfo = null; - - final List locations = new ArrayList<>(); - OMClientResponse omClientResponse = null; - try { - // check Acl - checkBucketAcls(ozoneManager, volumeName, bucketName, keyName); - - // acquire lock - acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, - volumeName, bucketName); - - OmBucketInfo bucketInfo = - omMetadataManager.getBucketTable().get( - omMetadataManager.getBucketKey(volumeName, bucketName)); - - if (bucketInfo == null) { - throw new OMException("Bucket " + bucketName + " not found", - OMException.ResultCodes.BUCKET_NOT_FOUND); - } - - if (keyName.length() == 0) { - // Check if this is the root of the filesystem. - throw new OMException("Can not write to directory: " + keyName, - OMException.ResultCodes.NOT_A_FILE); - } - - OMFileRequest.OMDirectoryResult omDirectoryResult = - OMFileRequest.verifyFilesInPath(omMetadataManager, volumeName, - bucketName, keyName, Paths.get(keyName)); - - // Check if a file or directory exists with same key name. - if (omDirectoryResult == FILE_EXISTS) { - if (!isOverWrite) { - throw new OMException("File " + keyName + " already exists", - OMException.ResultCodes.FILE_ALREADY_EXISTS); - } - } else if (omDirectoryResult == DIRECTORY_EXISTS) { - throw new OMException("Can not write to directory: " + keyName, - OMException.ResultCodes.NOT_A_FILE); - } else if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) { - throw new OMException("Can not create file: " + keyName + "as there " + - "is already file in the given path", - OMException.ResultCodes.NOT_A_FILE); - } - - if (!isRecursive) { - // We cannot create a file if complete parent directories does not exist - - // verifyFilesInPath, checks only the path and its parent directories. - // But there may be some keys below the given path. So this method - // checks them. - - // Example: - // Existing keys in table - // a/b/c/d/e - // a/b/c/d/f - // a/b - - // Take an example if given key to be created with isRecursive set - // to false is "a/b/c/e". - - // There is no key in keyTable with the provided path. - // Check in case if there are keys exist in given path. (This can - // happen if keys are directly created using key requests.) - - // We need to do this check only in the case of non-recursive, so - // not included the checks done in checkKeysUnderPath in - // verifyFilesInPath method, as that method is common method for - // directory and file create request. This also avoid's this - // unnecessary check which is not required for those cases. - if (omDirectoryResult == NONE || - omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH) { - boolean canBeCreated = checkKeysUnderPath(omMetadataManager, - volumeName, bucketName, keyName); - if (!canBeCreated) { - throw new OMException("Can not create file: " + keyName + "as one" + - " of parent directory is not created", - OMException.ResultCodes.NOT_A_FILE); - } - } - } - - // do open key - encryptionInfo = getFileEncryptionInfo(ozoneManager, bucketInfo); - omKeyInfo = prepareKeyInfo(omMetadataManager, keyArgs, - omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName), keyArgs.getDataSize(), locations, - encryptionInfo.orNull(), ozoneManager.getPrefixManager(), bucketInfo); - - omClientResponse = prepareCreateKeyResponse(keyArgs, omKeyInfo, - locations, encryptionInfo.orNull(), exception, - createFileRequest.getClientID(), transactionLogIndex, volumeName, - bucketName, keyName, ozoneManager, - OMAction.CREATE_FILE, ozoneManager.getPrefixManager(), bucketInfo); - } catch (IOException ex) { - exception = ex; - omClientResponse = prepareCreateKeyResponse(keyArgs, omKeyInfo, - locations, encryptionInfo.orNull(), exception, - createFileRequest.getClientID(), transactionLogIndex, - volumeName, bucketName, keyName, ozoneManager, - OMAction.CREATE_FILE, ozoneManager.getPrefixManager(), null); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - return omClientResponse; - } - - - - /** - * Check if any keys exist under given path. - * @param omMetadataManager - * @param volumeName - * @param bucketName - * @param keyName - * @return if exists true, else false. If key name is one level path return - * true. - * @throws IOException - */ - private boolean checkKeysUnderPath(OMMetadataManager omMetadataManager, - @Nonnull String volumeName, @Nonnull String bucketName, - @Nonnull String keyName) throws IOException { - - Path parentPath = Paths.get(keyName).getParent(); - - if (parentPath != null) { - String dbKeyPath = omMetadataManager.getOzoneDirKey(volumeName, - bucketName, parentPath.toString()); - - // First check in key table cache. - Iterator< Map.Entry, CacheValue>> iterator = - omMetadataManager.getKeyTable().cacheIterator(); - - while (iterator.hasNext()) { - Map.Entry< CacheKey< String >, CacheValue< OmKeyInfo > > entry = - iterator.next(); - String key = entry.getKey().getCacheKey(); - OmKeyInfo omKeyInfo = entry.getValue().getCacheValue(); - // Making sure that entry is not for delete key request. - if (key.startsWith(dbKeyPath) && omKeyInfo != null) { - return true; - } - } - try (TableIterator> - keyIter = omMetadataManager.getKeyTable().iterator()) { - Table.KeyValue kv = keyIter.seek(dbKeyPath); - - - if (kv != null) { - // Check the entry in db is not marked for delete. This can happen - // while entry is marked for delete, but it is not flushed to DB. - CacheValue cacheValue = omMetadataManager.getKeyTable() - .getCacheValue(new CacheKey<>(kv.getKey())); - if (cacheValue != null) { - if (kv.getKey().startsWith(dbKeyPath) - && cacheValue.getCacheValue() != null) { - return true; // we found at least one key with this db key path - } - } else { - if (kv.getKey().startsWith(dbKeyPath)) { - return true; // we found at least one key with this db key path - } - } - } - } - } else { - // one level key path. - // We can safely return true, as this method is called after - // verifyFilesInPath, so with this keyName there is no file and directory. - return true; - } - return false; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java deleted file mode 100644 index dbe056cd2a9..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ /dev/null @@ -1,116 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.file; - -import java.io.IOException; -import java.nio.file.Path; - -import org.apache.hadoop.ozone.om.OMMetadataManager; - -import javax.annotation.Nonnull; - -/** - * Base class for file requests. - */ -public final class OMFileRequest { - - private OMFileRequest() { - } - /** - * Verify any files exist in the given path in the specified volume/bucket. - * @param omMetadataManager - * @param volumeName - * @param bucketName - * @param keyPath - * @return true - if file exist in the given path, else false. - * @throws IOException - */ - public static OMDirectoryResult verifyFilesInPath( - @Nonnull OMMetadataManager omMetadataManager, - @Nonnull String volumeName, - @Nonnull String bucketName, @Nonnull String keyName, - @Nonnull Path keyPath) throws IOException { - - String fileNameFromDetails = omMetadataManager.getOzoneKey(volumeName, - bucketName, keyName); - String dirNameFromDetails = omMetadataManager.getOzoneDirKey(volumeName, - bucketName, keyName); - - while (keyPath != null) { - String pathName = keyPath.toString(); - - String dbKeyName = omMetadataManager.getOzoneKey(volumeName, - bucketName, pathName); - String dbDirKeyName = omMetadataManager.getOzoneDirKey(volumeName, - bucketName, pathName); - - if (omMetadataManager.getKeyTable().get(dbKeyName) != null) { - // Found a file in the given path. - // Check if this is actual file or a file in the given path - if (dbKeyName.equals(fileNameFromDetails)) { - return OMDirectoryResult.FILE_EXISTS; - } else { - return OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; - } - } else if (omMetadataManager.getKeyTable().get(dbDirKeyName) != null) { - // Found a directory in the given path. - // Check if this is actual directory or a directory in the given path - if (dbDirKeyName.equals(dirNameFromDetails)) { - return OMDirectoryResult.DIRECTORY_EXISTS; - } else { - return OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; - } - } - keyPath = keyPath.getParent(); - } - - // Found no files/ directories in the given path. - return OMDirectoryResult.NONE; - } - - /** - * Return codes used by verifyFilesInPath method. - */ - enum OMDirectoryResult { - - // In below examples path is assumed as "a/b/c" in volume volume1 and - // bucket b1. - - // When a directory exists in given path. - // If we have a directory with name "a/b" we return this enum value. - DIRECTORY_EXISTS_IN_GIVENPATH, - - // When a file exists in given path. - // If we have a file with name "a/b" we return this enum value. - FILE_EXISTS_IN_GIVENPATH, - - // When file already exists with the given path. - // If we have a file with name "a/b/c" we return this enum value. - FILE_EXISTS, - - // When directory exists with the given path. - // If we have a file with name "a/b/c" we return this enum value. - DIRECTORY_EXISTS, - - // If no file/directory exists with the given path. - // If we don't have any file/directory name with "a/b/c" or any - // sub-directory or file name from the given path we return this enum value. - NONE - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/package-info.java deleted file mode 100644 index 3184500f385..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains classes related to file requests. - */ -package org.apache.hadoop.ozone.om.request.file; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java deleted file mode 100644 index e8009270e45..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ /dev/null @@ -1,227 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.key.OMAllocateBlockResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .AllocateBlockRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .AllocateBlockResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - - -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes - .KEY_NOT_FOUND; - -/** - * Handles allocate block request. - */ -public class OMAllocateBlockRequest extends OMKeyRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMAllocateBlockRequest.class); - - public OMAllocateBlockRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - - AllocateBlockRequest allocateBlockRequest = - getOmRequest().getAllocateBlockRequest(); - - Preconditions.checkNotNull(allocateBlockRequest); - - KeyArgs keyArgs = allocateBlockRequest.getKeyArgs(); - - ExcludeList excludeList = new ExcludeList(); - if (allocateBlockRequest.hasExcludeList()) { - excludeList = - ExcludeList.getFromProtoBuf(allocateBlockRequest.getExcludeList()); - } - - // TODO: Here we are allocating block with out any check for key exist in - // open table or not and also with out any authorization checks. - // Assumption here is that allocateBlocks with out openKey will be less. - // There is a chance some one can misuse this api to flood allocateBlock - // calls. But currently allocateBlock is internally called from - // BlockOutputStreamEntryPool, so we are fine for now. But if one some - // one uses direct omclient we might be in trouble. - - - // To allocate atleast one block passing requested size and scmBlockSize - // as same value. When allocating block requested size is same as - // scmBlockSize. - List omKeyLocationInfoList = - allocateBlock(ozoneManager.getScmClient(), - ozoneManager.getBlockTokenSecretManager(), keyArgs.getType(), - keyArgs.getFactor(), excludeList, ozoneManager.getScmBlockSize(), - ozoneManager.getScmBlockSize(), - ozoneManager.getPreallocateBlocksMax(), - ozoneManager.isGrpcBlockTokenEnabled(), ozoneManager.getOMNodeId()); - - // Set modification time - KeyArgs.Builder newKeyArgs = keyArgs.toBuilder() - .setModificationTime(Time.now()); - - AllocateBlockRequest.Builder newAllocatedBlockRequest = - AllocateBlockRequest.newBuilder() - .setClientID(allocateBlockRequest.getClientID()) - .setKeyArgs(newKeyArgs); - - - - if (allocateBlockRequest.hasExcludeList()) { - newAllocatedBlockRequest.setExcludeList( - allocateBlockRequest.getExcludeList()); - } - - // Add allocated block info. - newAllocatedBlockRequest.setKeyLocation( - omKeyLocationInfoList.get(0).getProtobuf()); - - return getOmRequest().toBuilder().setUserInfo(getUserInfo()) - .setAllocateBlockRequest(newAllocatedBlockRequest).build(); - - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - OzoneManagerProtocolProtos.AllocateBlockRequest allocateBlockRequest = - getOmRequest().getAllocateBlockRequest(); - - OzoneManagerProtocolProtos.KeyArgs keyArgs = - allocateBlockRequest.getKeyArgs(); - - OzoneManagerProtocolProtos.KeyLocation blockLocation = - allocateBlockRequest.getKeyLocation(); - Preconditions.checkNotNull(blockLocation); - - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - long clientID = allocateBlockRequest.getClientID(); - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumBlockAllocateCalls(); - - AuditLogger auditLogger = ozoneManager.getAuditLogger(); - - Map auditMap = buildKeyArgsAuditMap(keyArgs); - auditMap.put(OzoneConsts.CLIENT_ID, String.valueOf(clientID)); - - OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.AllocateBlock).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - IOException exception = null; - OmKeyInfo omKeyInfo = null; - try { - // check Acl - checkBucketAcls(ozoneManager, volumeName, bucketName, keyName); - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - validateBucketAndVolume(omMetadataManager, volumeName, - bucketName); - - String openKey = omMetadataManager.getOpenKey( - volumeName, bucketName, keyName, clientID); - - // Here we don't acquire bucket/volume lock because for a single client - // allocateBlock is called in serial fashion. - - omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - if (omKeyInfo == null) { - throw new OMException("Open Key not found " + openKey, KEY_NOT_FOUND); - } - - // Append new block - omKeyInfo.appendNewBlocks(Collections.singletonList( - OmKeyLocationInfo.getFromProtobuf(blockLocation)), false); - - // Set modification time. - omKeyInfo.setModificationTime(keyArgs.getModificationTime()); - - // Add to cache. - omMetadataManager.getOpenKeyTable().addCacheEntry( - new CacheKey<>(openKey), new CacheValue<>(Optional.of(omKeyInfo), - transactionLogIndex)); - - } catch (IOException ex) { - exception = ex; - } - - auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap, - exception, getOmRequest().getUserInfo())); - - OMClientResponse omClientResponse = null; - if (exception == null) { - omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder() - .setKeyLocation(blockLocation).build()); - omClientResponse = new OMAllocateBlockResponse(omKeyInfo, - clientID, omResponse.build()); - } else { - omMetrics.incNumBlockAllocateCallFails(); - omClientResponse = new OMAllocateBlockResponse(null, -1L, - createErrorOMResponse(omResponse, exception)); - } - - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - return omClientResponse; - - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java deleted file mode 100644 index 196d61c15d0..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ /dev/null @@ -1,199 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CommitKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CommitKeyResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; - -/** - * Handles CommitKey request. - */ -public class OMKeyCommitRequest extends OMKeyRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMKeyCommitRequest.class); - - public OMKeyCommitRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest(); - Preconditions.checkNotNull(commitKeyRequest); - - KeyArgs keyArgs = commitKeyRequest.getKeyArgs(); - - KeyArgs.Builder newKeyArgs = - keyArgs.toBuilder().setModificationTime(Time.now()); - - return getOmRequest().toBuilder() - .setCommitKeyRequest(commitKeyRequest.toBuilder() - .setKeyArgs(newKeyArgs)).setUserInfo(getUserInfo()).build(); - - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest(); - - KeyArgs commitKeyArgs = commitKeyRequest.getKeyArgs(); - - String volumeName = commitKeyArgs.getVolumeName(); - String bucketName = commitKeyArgs.getBucketName(); - String keyName = commitKeyArgs.getKeyName(); - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumKeyCommits(); - - AuditLogger auditLogger = ozoneManager.getAuditLogger(); - - Map auditMap = buildKeyArgsAuditMap(commitKeyArgs); - - OzoneManagerProtocolProtos.OMResponse.Builder omResponse = - OzoneManagerProtocolProtos.OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.CommitKey).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - IOException exception = null; - OmKeyInfo omKeyInfo = null; - OMClientResponse omClientResponse = null; - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - try { - // check Acl - checkBucketAcls(ozoneManager, volumeName, bucketName, keyName); - - List locationInfoList = commitKeyArgs - .getKeyLocationsList().stream() - .map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList()); - - String dbOzoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, commitKeyRequest.getClientID()); - - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); - - validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey); - if (omKeyInfo == null) { - throw new OMException("Failed to commit key, as " + dbOpenKey + - "entry is not found in the openKey table", KEY_NOT_FOUND); - } - omKeyInfo.setDataSize(commitKeyArgs.getDataSize()); - - omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime()); - - //update the block length for each block - omKeyInfo.updateLocationInfoList(locationInfoList); - - // Add to cache of open key table and key table. - omMetadataManager.getOpenKeyTable().addCacheEntry( - new CacheKey<>(dbOpenKey), - new CacheValue<>(Optional.absent(), transactionLogIndex)); - - omMetadataManager.getKeyTable().addCacheEntry( - new CacheKey<>(dbOzoneKey), - new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex)); - - omResponse.setCommitKeyResponse(CommitKeyResponse.newBuilder().build()); - omClientResponse = - new OMKeyCommitResponse(omKeyInfo, commitKeyRequest.getClientID(), - omResponse.build()); - } catch (IOException ex) { - exception = ex; - omClientResponse = new OMKeyCommitResponse(null, -1L, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - - // Performing audit logging outside of the lock. - auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap, - exception, getOmRequest().getUserInfo())); - - // return response after releasing lock. - if (exception == null) { - omResponse.setCommitKeyResponse(CommitKeyResponse.newBuilder().build()); - - // As when we commit the key, then it is visible in ozone, so we should - // increment here. - // As key also can have multiple versions, we need to increment keys - // only if version is 0. Currently we have not complete support of - // versioning of keys. So, this can be revisited later. - - if (omKeyInfo.getKeyLocationVersions().size() == 1) { - omMetrics.incNumKeys(); - } - return omClientResponse; - } else { - LOG.error("CommitKey failed for Key: {} in volume/bucket:{}/{}", - keyName, bucketName, volumeName, exception); - omMetrics.incNumKeyCommitFails(); - return omClientResponse; - } - - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java deleted file mode 100644 index baa13ad87fe..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ /dev/null @@ -1,209 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.UniqueId; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; -/** - * Handles CreateKey request. - */ - -public class OMKeyCreateRequest extends OMKeyRequest { - private static final Logger LOG = - LoggerFactory.getLogger(OMKeyCreateRequest.class); - - public OMKeyCreateRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - CreateKeyRequest createKeyRequest = getOmRequest().getCreateKeyRequest(); - Preconditions.checkNotNull(createKeyRequest); - - KeyArgs keyArgs = createKeyRequest.getKeyArgs(); - - // We cannot allocate block for multipart upload part when - // createMultipartKey is called, as we will not know type and factor with - // which initiateMultipartUpload has started for this key. When - // allocateBlock call happen's we shall know type and factor, as we set - // the type and factor read from multipart table, and set the KeyInfo in - // validateAndUpdateCache and return to the client. TODO: See if we can fix - // this. We do not call allocateBlock in openKey for multipart upload. - - CreateKeyRequest.Builder newCreateKeyRequest = null; - KeyArgs.Builder newKeyArgs = null; - if (!keyArgs.getIsMultipartKey()) { - - long scmBlockSize = ozoneManager.getScmBlockSize(); - - // NOTE size of a key is not a hard limit on anything, it is a value that - // client should expect, in terms of current size of key. If client sets - // a value, then this value is used, otherwise, we allocate a single - // block which is the current size, if read by the client. - final long requestedSize = keyArgs.getDataSize() > 0 ? - keyArgs.getDataSize() : scmBlockSize; - - boolean useRatis = ozoneManager.shouldUseRatis(); - - HddsProtos.ReplicationFactor factor = keyArgs.getFactor(); - if (factor == null) { - factor = useRatis ? HddsProtos.ReplicationFactor.THREE : - HddsProtos.ReplicationFactor.ONE; - } - - HddsProtos.ReplicationType type = keyArgs.getType(); - if (type == null) { - type = useRatis ? HddsProtos.ReplicationType.RATIS : - HddsProtos.ReplicationType.STAND_ALONE; - } - - // TODO: Here we are allocating block with out any check for - // bucket/key/volume or not and also with out any authorization checks. - // As for a client for the first time this can be executed on any OM, - // till leader is identified. - - List< OmKeyLocationInfo > omKeyLocationInfoList = - allocateBlock(ozoneManager.getScmClient(), - ozoneManager.getBlockTokenSecretManager(), type, factor, - new ExcludeList(), requestedSize, scmBlockSize, - ozoneManager.getPreallocateBlocksMax(), - ozoneManager.isGrpcBlockTokenEnabled(), - ozoneManager.getOMNodeId()); - - newKeyArgs = keyArgs.toBuilder().setModificationTime(Time.now()) - .setType(type).setFactor(factor) - .setDataSize(requestedSize); - - newKeyArgs.addAllKeyLocations(omKeyLocationInfoList.stream() - .map(OmKeyLocationInfo::getProtobuf).collect(Collectors.toList())); - } else { - newKeyArgs = keyArgs.toBuilder().setModificationTime(Time.now()); - } - - newCreateKeyRequest = - createKeyRequest.toBuilder().setKeyArgs(newKeyArgs) - .setClientID(UniqueId.next()); - - return getOmRequest().toBuilder() - .setCreateKeyRequest(newCreateKeyRequest).setUserInfo(getUserInfo()) - .build(); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - CreateKeyRequest createKeyRequest = getOmRequest().getCreateKeyRequest(); - - - KeyArgs keyArgs = createKeyRequest.getKeyArgs(); - - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumKeyAllocates(); - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - OmKeyInfo omKeyInfo = null; - final List< OmKeyLocationInfo > locations = new ArrayList<>(); - Optional encryptionInfo = Optional.absent(); - IOException exception = null; - boolean acquireLock = false; - OMClientResponse omClientResponse = null; - try { - // check Acl - checkBucketAcls(ozoneManager, volumeName, bucketName, keyName); - - acquireLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, - volumeName, bucketName); - validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - //TODO: We can optimize this get here, if getKmsProvider is null, then - // bucket encryptionInfo will be not set. If this assumption holds - // true, we can avoid get from bucket table. - - OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get( - omMetadataManager.getBucketKey(volumeName, bucketName)); - - encryptionInfo = getFileEncryptionInfo(ozoneManager, bucketInfo); - - omKeyInfo = prepareKeyInfo(omMetadataManager, keyArgs, - omMetadataManager.getOzoneKey(volumeName, bucketName, keyName), - keyArgs.getDataSize(), locations, encryptionInfo.orNull(), - ozoneManager.getPrefixManager(), bucketInfo); - omClientResponse = prepareCreateKeyResponse(keyArgs, omKeyInfo, - locations, encryptionInfo.orNull(), exception, - createKeyRequest.getClientID(), transactionLogIndex, volumeName, - bucketName, keyName, ozoneManager, OMAction.ALLOCATE_KEY, - ozoneManager.getPrefixManager(), bucketInfo); - } catch (IOException ex) { - exception = ex; - omClientResponse = prepareCreateKeyResponse(keyArgs, omKeyInfo, locations, - encryptionInfo.orNull(), exception, createKeyRequest.getClientID(), - transactionLogIndex, volumeName, bucketName, keyName, ozoneManager, - OMAction.ALLOCATE_KEY, ozoneManager.getPrefixManager(), null); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquireLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - return omClientResponse; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java deleted file mode 100644 index ee4b9b2dc0d..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key; - -import java.io.IOException; -import java.util.Map; - -import com.google.common.base.Optional; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteKeyResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes - .KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; - -/** - * Handles DeleteKey request. - */ -public class OMKeyDeleteRequest extends OMKeyRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMKeyDeleteRequest.class); - - public OMKeyDeleteRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest(); - Preconditions.checkNotNull(deleteKeyRequest); - - OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs(); - - OzoneManagerProtocolProtos.KeyArgs.Builder newKeyArgs = - keyArgs.toBuilder().setModificationTime(Time.now()); - - return getOmRequest().toBuilder() - .setDeleteKeyRequest(deleteKeyRequest.toBuilder() - .setKeyArgs(newKeyArgs)).setUserInfo(getUserInfo()).build(); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest(); - - OzoneManagerProtocolProtos.KeyArgs deleteKeyArgs = - deleteKeyRequest.getKeyArgs(); - - String volumeName = deleteKeyArgs.getVolumeName(); - String bucketName = deleteKeyArgs.getBucketName(); - String keyName = deleteKeyArgs.getKeyName(); - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumKeyDeletes(); - - AuditLogger auditLogger = ozoneManager.getAuditLogger(); - OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); - - Map auditMap = buildKeyArgsAuditMap(deleteKeyArgs); - - OzoneManagerProtocolProtos.OMResponse.Builder omResponse = - OzoneManagerProtocolProtos.OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.DeleteKey).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - IOException exception = null; - boolean acquiredLock = false; - OMClientResponse omClientResponse = null; - try { - // check Acl - checkKeyAcls(ozoneManager, volumeName, bucketName, keyName); - - String objectKey = omMetadataManager.getOzoneKey( - volumeName, bucketName, keyName); - - acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, - volumeName, bucketName); - - // Not doing bucket/volume checks here. In this way we can avoid db - // checks for them. - // TODO: Once we have volume/bucket full cache, we can add - // them back, as these checks will be inexpensive at that time. - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(objectKey); - - if (omKeyInfo == null) { - throw new OMException("Key not found", KEY_NOT_FOUND); - } - - // Update table cache. - omMetadataManager.getKeyTable().addCacheEntry( - new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName)), - new CacheValue<>(Optional.absent(), transactionLogIndex)); - - // No need to add cache entries to delete table. As delete table will - // be used by DeleteKeyService only, not used for any client response - // validation, so we don't need to add to cache. - // TODO: Revisit if we need it later. - - omClientResponse = new OMKeyDeleteResponse(omKeyInfo, - omResponse.setDeleteKeyResponse( - DeleteKeyResponse.newBuilder()).build()); - - } catch (IOException ex) { - exception = ex; - omClientResponse = new OMKeyDeleteResponse(null, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - // Performing audit logging outside of the lock. - auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap, - exception, userInfo)); - - // return response. - if (exception == null) { - omMetrics.decNumKeys(); - return omClientResponse; - } else { - omMetrics.incNumKeyDeleteFails(); - return omClientResponse; - } - - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java deleted file mode 100644 index 0699b2a1bca..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key; - -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgeKeysRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; - -/** - * Handles purging of keys from OM DB. - */ -public class OMKeyPurgeRequest extends OMKeyRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMKeyPurgeRequest.class); - - public OMKeyPurgeRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - PurgeKeysRequest purgeKeysRequest = getOmRequest().getPurgeKeysRequest(); - List purgeKeysList = purgeKeysRequest.getKeysList(); - - LOG.debug("Processing Purge Keys for {} number of keys.", - purgeKeysList.size()); - - OMResponse omResponse = OMResponse.newBuilder() - .setCmdType(Type.PurgeKeys) - .setPurgeKeysResponse( - OzoneManagerProtocolProtos.PurgeKeysResponse.newBuilder().build()) - .setStatus(Status.OK) - .setSuccess(true) - .build(); - - OMClientResponse omClientResponse = new OMKeyPurgeResponse(purgeKeysList, - omResponse); - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - return omClientResponse; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java deleted file mode 100644 index 526473c2399..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ /dev/null @@ -1,202 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key; - -import java.io.IOException; -import java.util.Map; - -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .RenameKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .RenameKeyResponse; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; - -/** - * Handles rename key request. - */ -public class OMKeyRenameRequest extends OMKeyRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMKeyRenameRequest.class); - - public OMKeyRenameRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - - RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest(); - Preconditions.checkNotNull(renameKeyRequest); - - // Set modification time. - KeyArgs.Builder newKeyArgs = renameKeyRequest.getKeyArgs().toBuilder() - .setModificationTime(Time.now()); - - return getOmRequest().toBuilder() - .setRenameKeyRequest(renameKeyRequest.toBuilder() - .setKeyArgs(newKeyArgs)).setUserInfo(getUserInfo()).build(); - - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest(); - - OzoneManagerProtocolProtos.KeyArgs renameKeyArgs = - renameKeyRequest.getKeyArgs(); - - String volumeName = renameKeyArgs.getVolumeName(); - String bucketName = renameKeyArgs.getBucketName(); - String fromKeyName = renameKeyArgs.getKeyName(); - String toKeyName = renameKeyRequest.getToKeyName(); - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumKeyRenames(); - - AuditLogger auditLogger = ozoneManager.getAuditLogger(); - - Map auditMap = buildKeyArgsAuditMap(renameKeyArgs); - - OzoneManagerProtocolProtos.OMResponse.Builder omResponse = - OzoneManagerProtocolProtos.OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.CommitKey).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - boolean acquiredLock = false; - OMClientResponse omClientResponse = null; - IOException exception = null; - OmKeyInfo fromKeyValue = null; - try { - if (toKeyName.length() == 0 || fromKeyName.length() == 0) { - throw new OMException("Key name is empty", - OMException.ResultCodes.INVALID_KEY_NAME); - } - // check Acl - checkKeyAcls(ozoneManager, volumeName, bucketName, fromKeyName); - - acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, - volumeName, bucketName); - - // Not doing bucket/volume checks here. In this way we can avoid db - // checks for them. - // TODO: Once we have volume/bucket full cache, we can add - // them back, as these checks will be inexpensive at that time. - - // fromKeyName should exist - String fromKey = omMetadataManager.getOzoneKey( - volumeName, bucketName, fromKeyName); - fromKeyValue = omMetadataManager.getKeyTable().get(fromKey); - if (fromKeyValue == null) { - // TODO: Add support for renaming open key - throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND); - } - - // toKeyName should not exist - String toKey = - omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName); - OmKeyInfo toKeyValue = omMetadataManager.getKeyTable().get(toKey); - if (toKeyValue != null) { - throw new OMException("Key already exists " + toKeyName, - OMException.ResultCodes.KEY_ALREADY_EXISTS); - } - - fromKeyValue.setKeyName(toKeyName); - - //Set modification time - fromKeyValue.setModificationTime(renameKeyArgs.getModificationTime()); - - // Add to cache. - // fromKey should be deleted, toKey should be added with newly updated - // omKeyInfo. - Table keyTable = omMetadataManager.getKeyTable(); - - keyTable.addCacheEntry(new CacheKey<>(fromKey), - new CacheValue<>(Optional.absent(), transactionLogIndex)); - - keyTable.addCacheEntry(new CacheKey<>(toKey), - new CacheValue<>(Optional.of(fromKeyValue), transactionLogIndex)); - - omClientResponse = new OMKeyRenameResponse(fromKeyValue, toKeyName, - fromKeyName, omResponse.setRenameKeyResponse( - RenameKeyResponse.newBuilder()).build()); - } catch (IOException ex) { - exception = ex; - omClientResponse = new OMKeyRenameResponse(null, null, null, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - - auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEY, auditMap, - exception, getOmRequest().getUserInfo())); - - if (exception == null) { - LOG.debug("Rename Key is successfully completed for volume:{} bucket:{}" + - " fromKey:{} toKey:{}. ", volumeName, bucketName, fromKeyName, - toKeyName); - return omClientResponse; - } else { - ozoneManager.getMetrics().incNumKeyRenameFails(); - LOG.error( - "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. " - + "Key: {} not found.", volumeName, bucketName, fromKeyName, - toKeyName, fromKeyName); - return omClientResponse; - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java deleted file mode 100644 index 8e1e7605a12..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ /dev/null @@ -1,536 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; -import java.util.Collections; -import java.util.EnumSet; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.PrefixManager; -import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; -import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension - .EncryptedKeyVersion; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ScmClient; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; -import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateFileResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateKeyResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes - .BUCKET_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes - .VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateFile; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CreateKey; -import static org.apache.hadoop.util.Time.monotonicNow; - -/** - * Interface for key write requests. - */ -public abstract class OMKeyRequest extends OMClientRequest { - - private static final Logger LOG = LoggerFactory.getLogger(OMKeyRequest.class); - - public OMKeyRequest(OMRequest omRequest) { - super(omRequest); - } - - /** - * This methods avoids multiple rpc calls to SCM by allocating multiple blocks - * in one rpc call. - * @throws IOException - */ - @SuppressWarnings("parameternumber") - protected List< OmKeyLocationInfo > allocateBlock(ScmClient scmClient, - OzoneBlockTokenSecretManager secretManager, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, - ExcludeList excludeList, long requestedSize, long scmBlockSize, - int preallocateBlocksMax, boolean grpcBlockTokenEnabled, String omID) - throws IOException { - - int numBlocks = Math.min((int) ((requestedSize - 1) / scmBlockSize + 1), - preallocateBlocksMax); - - List locationInfos = new ArrayList<>(numBlocks); - String remoteUser = getRemoteUser().getShortUserName(); - List allocatedBlocks; - try { - allocatedBlocks = scmClient.getBlockClient() - .allocateBlock(scmBlockSize, numBlocks, replicationType, - replicationFactor, omID, excludeList); - } catch (SCMException ex) { - if (ex.getResult() - .equals(SCMException.ResultCodes.SAFE_MODE_EXCEPTION)) { - throw new OMException(ex.getMessage(), - OMException.ResultCodes.SCM_IN_SAFE_MODE); - } - throw ex; - } - for (AllocatedBlock allocatedBlock : allocatedBlocks) { - OmKeyLocationInfo.Builder builder = new OmKeyLocationInfo.Builder() - .setBlockID(new BlockID(allocatedBlock.getBlockID())) - .setLength(scmBlockSize) - .setOffset(0) - .setPipeline(allocatedBlock.getPipeline()); - if (grpcBlockTokenEnabled) { - builder.setToken(secretManager - .generateToken(remoteUser, allocatedBlock.getBlockID().toString(), - getAclForUser(remoteUser), scmBlockSize)); - } - locationInfos.add(builder.build()); - } - return locationInfos; - } - - /* Optimize ugi lookup for RPC operations to avoid a trip through - * UGI.getCurrentUser which is synch'ed. - */ - private UserGroupInformation getRemoteUser() throws IOException { - UserGroupInformation ugi = Server.getRemoteUser(); - return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser(); - } - - /** - * Return acl for user. - * @param user - * - * */ - private EnumSet< HddsProtos.BlockTokenSecretProto.AccessModeProto> - getAclForUser(String user) { - // TODO: Return correct acl for user. - return EnumSet.allOf( - HddsProtos.BlockTokenSecretProto.AccessModeProto.class); - } - - /** - * Validate bucket and volume exists or not. - * @param omMetadataManager - * @param volumeName - * @param bucketName - * @throws IOException - */ - public void validateBucketAndVolume(OMMetadataManager omMetadataManager, - String volumeName, String bucketName) - throws IOException { - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); - // Check if bucket exists - if (!omMetadataManager.getBucketTable().isExist(bucketKey)) { - String volumeKey = omMetadataManager.getVolumeKey(volumeName); - // If the volume also does not exist, we should throw volume not found - // exception - if (!omMetadataManager.getVolumeTable().isExist(volumeKey)) { - throw new OMException("Volume not found " + volumeName, - VOLUME_NOT_FOUND); - } - - // if the volume exists but bucket does not exist, throw bucket not found - // exception - throw new OMException("Bucket not found " + bucketName, BUCKET_NOT_FOUND); - } - } - - protected Optional getFileEncryptionInfo( - OzoneManager ozoneManager, OmBucketInfo bucketInfo) throws IOException { - Optional encInfo = Optional.absent(); - BucketEncryptionKeyInfo ezInfo = bucketInfo.getEncryptionKeyInfo(); - if (ezInfo != null) { - if (ozoneManager.getKmsProvider() == null) { - throw new OMException("Invalid KMS provider, check configuration " + - CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH, - OMException.ResultCodes.INVALID_KMS_PROVIDER); - } - - final String ezKeyName = ezInfo.getKeyName(); - EncryptedKeyVersion edek = generateEDEK(ozoneManager, ezKeyName); - encInfo = Optional.of(new FileEncryptionInfo(ezInfo.getSuite(), - ezInfo.getVersion(), - edek.getEncryptedKeyVersion().getMaterial(), - edek.getEncryptedKeyIv(), ezKeyName, - edek.getEncryptionKeyVersionName())); - } - return encInfo; - } - - private EncryptedKeyVersion generateEDEK(OzoneManager ozoneManager, - String ezKeyName) throws IOException { - if (ezKeyName == null) { - return null; - } - long generateEDEKStartTime = monotonicNow(); - EncryptedKeyVersion edek = SecurityUtil.doAsLoginUser( - new PrivilegedExceptionAction() { - @Override - public EncryptedKeyVersion run() throws IOException { - try { - return ozoneManager.getKmsProvider() - .generateEncryptedKey(ezKeyName); - } catch (GeneralSecurityException e) { - throw new IOException(e); - } - } - }); - long generateEDEKTime = monotonicNow() - generateEDEKStartTime; - LOG.debug("generateEDEK takes {} ms", generateEDEKTime); - Preconditions.checkNotNull(edek); - return edek; - } - - /** - * Prepare the response returned to the client. - * @return OMClientResponse - */ - @SuppressWarnings("parameternumber") - protected OMClientResponse prepareCreateKeyResponse(@Nonnull KeyArgs keyArgs, - OmKeyInfo omKeyInfo, @Nonnull List locations, - FileEncryptionInfo encryptionInfo, @Nullable IOException exception, - long clientID, long transactionLogIndex, @Nonnull String volumeName, - @Nonnull String bucketName, @Nonnull String keyName, - @Nonnull OzoneManager ozoneManager, @Nonnull OMAction omAction, - @Nonnull PrefixManager prefixManager, - @Nullable OmBucketInfo omBucketInfo) { - - OMResponse.Builder omResponse = OMResponse.newBuilder() - .setStatus(OzoneManagerProtocolProtos.Status.OK); - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - - Map auditMap = buildKeyArgsAuditMap(keyArgs); - - OMClientResponse omClientResponse = null; - if (exception == null) { - if (omKeyInfo == null) { - // the key does not exist, create a new object, the new blocks are the - // version 0 - omKeyInfo = createKeyInfo(keyArgs, locations, keyArgs.getFactor(), - keyArgs.getType(), keyArgs.getDataSize(), - encryptionInfo, prefixManager, omBucketInfo); - } - - long openVersion = omKeyInfo.getLatestVersionLocations().getVersion(); - - // Append blocks - try { - omKeyInfo.appendNewBlocks(keyArgs.getKeyLocationsList().stream() - .map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList()), false); - - } catch (IOException ex) { - exception = ex; - } - - if (exception != null) { - LOG.error("{} failed for Key: {} in volume/bucket:{}/{}", - omAction.getAction(), keyName, bucketName, volumeName, exception); - omClientResponse = createKeyErrorResponse(ozoneManager.getMetrics(), - omAction, exception, omResponse); - } else { - String dbOpenKeyName = omMetadataManager.getOpenKey(volumeName, - bucketName, keyName, clientID); - - // Add to cache entry can be done outside of lock for this openKey. - // Even if bucket gets deleted, when commitKey we shall identify if - // bucket gets deleted. - omMetadataManager.getOpenKeyTable().addCacheEntry( - new CacheKey<>(dbOpenKeyName), - new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex)); - - LOG.debug("{} for Key: {} in volume/bucket: {}/{}", - omAction.getAction(), keyName, volumeName, bucketName); - - - if (omAction == OMAction.CREATE_FILE) { - omResponse.setCreateFileResponse(CreateFileResponse.newBuilder() - .setKeyInfo(omKeyInfo.getProtobuf()) - .setID(clientID) - .setOpenVersion(openVersion).build()); - omResponse.setCmdType(CreateFile); - omClientResponse = new OMFileCreateResponse(omKeyInfo, clientID, - omResponse.build()); - } else { - omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder() - .setKeyInfo(omKeyInfo.getProtobuf()) - .setID(clientID).setOpenVersion(openVersion) - .build()); - omResponse.setCmdType(CreateKey); - omClientResponse = new OMKeyCreateResponse(omKeyInfo, clientID, - omResponse.build()); - } - } - - } else { - LOG.error("{} failed for Key: {} in volume/bucket:{}/{}", - omAction.getAction(), keyName, volumeName, bucketName, exception); - omClientResponse = createKeyErrorResponse(ozoneManager.getMetrics(), - omAction, exception, omResponse); - } - // audit log - auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(omAction, - auditMap, exception, getOmRequest().getUserInfo())); - return omClientResponse; - } - - /** - * Create OmKeyInfo object. - * @return OmKeyInfo - */ - @SuppressWarnings("parameterNumber") - protected OmKeyInfo createKeyInfo(@Nonnull KeyArgs keyArgs, - @Nonnull List locations, - @Nonnull HddsProtos.ReplicationFactor factor, - @Nonnull HddsProtos.ReplicationType type, long size, - @Nullable FileEncryptionInfo encInfo, - @Nonnull PrefixManager prefixManager, - @Nullable OmBucketInfo omBucketInfo) { - return new OmKeyInfo.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, locations))) - .setCreationTime(keyArgs.getModificationTime()) - .setModificationTime(keyArgs.getModificationTime()) - .setDataSize(size) - .setReplicationType(type) - .setReplicationFactor(factor) - .setFileEncryptionInfo(encInfo) - .setAcls(getAclsForKey(keyArgs, omBucketInfo, prefixManager)) - .addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList())) - .build(); - } - - private List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, - OmBucketInfo bucketInfo, PrefixManager prefixManager) { - List acls = new ArrayList<>(); - - if(keyArgs.getAclsList() != null) { - acls.addAll(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); - } - - // Inherit DEFAULT acls from prefix. - if(prefixManager != null) { - List< OmPrefixInfo > prefixList = prefixManager.getLongestPrefixPath( - OZONE_URI_DELIMITER + - keyArgs.getVolumeName() + OZONE_URI_DELIMITER + - keyArgs.getBucketName() + OZONE_URI_DELIMITER + - keyArgs.getKeyName()); - - if(prefixList.size() > 0) { - // Add all acls from direct parent to key. - OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1); - if(prefixInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls())) { - return acls; - } - } - } - } - - // Inherit DEFAULT acls from bucket only if DEFAULT acls for - // prefix are not set. - if (bucketInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls())) { - return acls; - } - } - - return acls; - } - - /** - * Prepare OmKeyInfo which will be persisted to openKeyTable. - * @return OmKeyInfo - * @throws IOException - */ - @SuppressWarnings("parameternumber") - protected OmKeyInfo prepareKeyInfo( - @Nonnull OMMetadataManager omMetadataManager, - @Nonnull KeyArgs keyArgs, @Nonnull String dbKeyName, long size, - @Nonnull List locations, - @Nullable FileEncryptionInfo encInfo, - @Nonnull PrefixManager prefixManager, @Nullable OmBucketInfo omBucketInfo) - throws IOException { - OmKeyInfo keyInfo = null; - if (keyArgs.getIsMultipartKey()) { - keyInfo = prepareMultipartKeyInfo(omMetadataManager, keyArgs, size, - locations, encInfo, prefixManager, omBucketInfo); - //TODO args.getMetadata - } else if (omMetadataManager.getKeyTable().isExist(dbKeyName)) { - // TODO: Need to be fixed, as when key already exists, we are - // appending new blocks to existing key. - keyInfo = omMetadataManager.getKeyTable().get(dbKeyName); - // the key already exist, the new blocks will be added as new version - // when locations.size = 0, the new version will have identical blocks - // as its previous version - keyInfo.addNewVersion(locations, false); - keyInfo.setDataSize(size + keyInfo.getDataSize()); - // The modification time is set in preExecute, use the same as - // modification time when key already exists. - keyInfo.setModificationTime(keyArgs.getModificationTime()); - } - return keyInfo; - } - - /** - * Prepare OmKeyInfo for multi-part upload part key which will be persisted - * to openKeyTable. - * @return OmKeyInfo - * @throws IOException - */ - private OmKeyInfo prepareMultipartKeyInfo( - @Nonnull OMMetadataManager omMetadataManager, - @Nonnull KeyArgs args, long size, - @Nonnull List locations, - FileEncryptionInfo encInfo, @Nonnull PrefixManager prefixManager, - @Nullable OmBucketInfo omBucketInfo) throws IOException { - HddsProtos.ReplicationFactor factor; - HddsProtos.ReplicationType type; - - Preconditions.checkArgument(args.getMultipartNumber() > 0, - "PartNumber Should be greater than zero"); - // When key is multipart upload part key, we should take replication - // type and replication factor from original key which has done - // initiate multipart upload. If we have not found any such, we throw - // error no such multipart upload. - String uploadID = args.getMultipartUploadID(); - Preconditions.checkNotNull(uploadID); - String multipartKey = omMetadataManager - .getMultipartKey(args.getVolumeName(), args.getBucketName(), - args.getKeyName(), uploadID); - OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get( - multipartKey); - if (partKeyInfo == null) { - throw new OMException("No such Multipart upload is with specified " + - "uploadId " + uploadID, - OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } else { - factor = partKeyInfo.getFactor(); - type = partKeyInfo.getType(); - } - // For this upload part we don't need to check in KeyTable. As this - // is not an actual key, it is a part of the key. - return createKeyInfo(args, locations, factor, type, size, encInfo, - prefixManager, omBucketInfo); - } - - - private OMClientResponse createKeyErrorResponse(@Nonnull OMMetrics omMetrics, - @Nonnull OMAction omAction, @Nonnull IOException exception, - @Nonnull OMResponse.Builder omResponse) { - if (omAction == OMAction.CREATE_FILE) { - omMetrics.incNumCreateFileFails(); - omResponse.setCmdType(CreateFile); - return new OMFileCreateResponse(null, -1L, - createErrorOMResponse(omResponse, exception)); - } else { - omMetrics.incNumKeyAllocateFails(); - omResponse.setCmdType(CreateKey); - return new OMKeyCreateResponse(null, -1L, - createErrorOMResponse(omResponse, exception)); - } - } - - /** - * Check Acls for the ozone bucket. - * @param ozoneManager - * @param volume - * @param bucket - * @param key - * @throws IOException - */ - protected void checkBucketAcls(OzoneManager ozoneManager, String volume, - String bucket, String key) throws IOException { - if (ozoneManager.getAclsEnabled()) { - checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET, - OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE, - volume, bucket, key); - } - } - - - /** - * Check Acls for the ozone key. - * @param ozoneManager - * @param volume - * @param bucket - * @param key - * @throws IOException - */ - protected void checkKeyAcls(OzoneManager ozoneManager, String volume, - String bucket, String key) throws IOException { - if (ozoneManager.getAclsEnabled()) { - checkAcls(ozoneManager, OzoneObj.ResourceType.KEY, - OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE, - volume, bucket, key); - } - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java deleted file mode 100644 index d1fac4feb74..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key.acl; - -import java.io.IOException; - -import com.google.common.base.Optional; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.request.util.ObjectParser; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; - -/** - * Base class for Bucket acl request. - */ -public abstract class OMKeyAclRequest extends OMClientRequest { - - - public OMKeyAclRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - OmKeyInfo omKeyInfo = null; - - OMResponse.Builder omResponse = onInit(); - OMClientResponse omClientResponse = null; - IOException exception = null; - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - boolean lockAcquired = false; - String volume = null; - String bucket = null; - String key = null; - boolean operationResult = false; - try { - ObjectParser objectParser = new ObjectParser(getPath(), - ObjectType.KEY); - - volume = objectParser.getVolume(); - bucket = objectParser.getBucket(); - key = objectParser.getKey(); - - // check Acl - if (ozoneManager.getAclsEnabled()) { - checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME, - OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL, - volume, bucket, key); - } - lockAcquired = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volume, - bucket); - - String dbKey = omMetadataManager.getOzoneKey(volume, bucket, key); - omKeyInfo = omMetadataManager.getKeyTable().get(dbKey); - - if (omKeyInfo == null) { - throw new OMException(OMException.ResultCodes.KEY_NOT_FOUND); - } - - operationResult = apply(omKeyInfo); - - if (operationResult) { - // update cache. - omMetadataManager.getKeyTable().addCacheEntry( - new CacheKey<>(dbKey), - new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex)); - } - - omClientResponse = onSuccess(omResponse, omKeyInfo, operationResult); - - } catch (IOException ex) { - exception = ex; - omClientResponse = onFailure(omResponse, ex); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (lockAcquired) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volume, - bucket); - } - } - - - onComplete(operationResult, exception); - - return omClientResponse; - } - - /** - * Get the path name from the request. - * @return path name - */ - abstract String getPath(); - - // TODO: Finer grain metrics can be moved to these callbacks. They can also - // be abstracted into separate interfaces in future. - /** - * Get the initial om response builder with lock. - * @return om response builder. - */ - abstract OMResponse.Builder onInit(); - - /** - * Get the om client response on success case with lock. - * @param omResponse - * @param omKeyInfo - * @param operationResult - * @return OMClientResponse - */ - abstract OMClientResponse onSuccess( - OMResponse.Builder omResponse, OmKeyInfo omKeyInfo, - boolean operationResult); - - /** - * Get the om client response on failure case with lock. - * @param omResponse - * @param exception - * @return OMClientResponse - */ - abstract OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException exception); - - /** - * Completion hook for final processing before return without lock. - * Usually used for logging without lock and metric update. - * @param operationResult - * @param exception - */ - abstract void onComplete(boolean operationResult, IOException exception); - - /** - * Apply the acl operation, if successfully completed returns true, - * else false. - * @param omKeyInfo - */ - abstract boolean apply(OmKeyInfo omKeyInfo); -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java deleted file mode 100644 index 8d69a245905..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key.acl; - -import java.io.IOException; -import java.util.List; - -import com.google.common.collect.Lists; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; - -/** - * Handle add Acl request for bucket. - */ -public class OMKeyAddAclRequest extends OMKeyAclRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMKeyAddAclRequest.class); - - private String path; - private List ozoneAcls; - - public OMKeyAddAclRequest(OMRequest omRequest) { - super(omRequest); - OzoneManagerProtocolProtos.AddAclRequest addAclRequest = - getOmRequest().getAddAclRequest(); - path = addAclRequest.getObj().getPath(); - ozoneAcls = Lists.newArrayList( - OzoneAcl.fromProtobuf(addAclRequest.getAcl())); - } - - @Override - String getPath() { - return path; - } - - @Override - OMResponse.Builder onInit() { - return OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.AddAcl).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - } - - @Override - OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmKeyInfo omKeyInfo, boolean operationResult) { - omResponse.setSuccess(operationResult); - omResponse.setAddAclResponse(AddAclResponse.newBuilder() - .setResponse(operationResult)); - return new OMKeyAclResponse(omKeyInfo, - omResponse.build()); - } - - @Override - OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException exception) { - return new OMKeyAclResponse(null, - createErrorOMResponse(omResponse, exception)); - } - - @Override - void onComplete(boolean operationResult, IOException exception) { - if (operationResult) { - LOG.debug("Add acl: {} to path: {} success!", ozoneAcls, path); - } else { - if (exception == null) { - LOG.debug("Add acl {} to path {} failed, because acl already exist", - ozoneAcls, path); - } else { - LOG.error("Add acl {} to path {} failed!", ozoneAcls, path, exception); - } - } - } - - @Override - boolean apply(OmKeyInfo omKeyInfo) { - // No need to check not null here, this will be never called with null. - return omKeyInfo.addAcl(ozoneAcls.get(0)); - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java deleted file mode 100644 index 0bd81d31a22..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key.acl; - -import java.io.IOException; -import java.util.List; - -import com.google.common.collect.Lists; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; - -/** - * Handle add Acl request for bucket. - */ -public class OMKeyRemoveAclRequest extends OMKeyAclRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMKeyAddAclRequest.class); - - private String path; - private List ozoneAcls; - - public OMKeyRemoveAclRequest(OMRequest omRequest) { - super(omRequest); - OzoneManagerProtocolProtos.RemoveAclRequest removeAclRequest = - getOmRequest().getRemoveAclRequest(); - path = removeAclRequest.getObj().getPath(); - ozoneAcls = Lists.newArrayList( - OzoneAcl.fromProtobuf(removeAclRequest.getAcl())); - } - - @Override - String getPath() { - return path; - } - - @Override - OMResponse.Builder onInit() { - return OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.RemoveAcl).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - } - - @Override - OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmKeyInfo omKeyInfo, boolean operationResult) { - omResponse.setSuccess(operationResult); - omResponse.setRemoveAclResponse(RemoveAclResponse.newBuilder() - .setResponse(operationResult)); - return new OMKeyAclResponse(omKeyInfo, - omResponse.build()); - } - - @Override - OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException exception) { - return new OMKeyAclResponse(null, - createErrorOMResponse(omResponse, exception)); - } - - @Override - void onComplete(boolean operationResult, IOException exception) { - if (operationResult) { - LOG.debug("Remove acl: {} to path: {} success!", ozoneAcls, path); - } else { - if (exception == null) { - LOG.debug("Remove acl {} to path {} failed, because acl already exist", - ozoneAcls, path); - } else { - LOG.error("Remove acl {} to path {} failed!", ozoneAcls, path, - exception); - } - } - } - - @Override - boolean apply(OmKeyInfo omKeyInfo) { - // No need to check not null here, this will be never called with null. - return omKeyInfo.removeAcl(ozoneAcls.get(0)); - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java deleted file mode 100644 index 24d46f83deb..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key.acl; - -import java.io.IOException; -import java.util.List; - -import com.google.common.collect.Lists; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse; - -/** - * Handle add Acl request for bucket. - */ -public class OMKeySetAclRequest extends OMKeyAclRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMKeyAddAclRequest.class); - - private String path; - private List ozoneAcls; - - public OMKeySetAclRequest(OMRequest omRequest) { - super(omRequest); - OzoneManagerProtocolProtos.SetAclRequest setAclRequest = - getOmRequest().getSetAclRequest(); - path = setAclRequest.getObj().getPath(); - ozoneAcls = Lists.newArrayList( - OzoneAclUtil.fromProtobuf(setAclRequest.getAclList())); - } - - @Override - String getPath() { - return path; - } - - @Override - OMResponse.Builder onInit() { - return OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.SetAcl).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - } - - @Override - OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmKeyInfo omKeyInfo, boolean operationResult) { - omResponse.setSuccess(operationResult); - omResponse.setSetAclResponse(SetAclResponse.newBuilder() - .setResponse(operationResult)); - return new OMKeyAclResponse(omKeyInfo, - omResponse.build()); - } - - @Override - OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException exception) { - return new OMKeyAclResponse(null, - createErrorOMResponse(omResponse, exception)); - } - - @Override - void onComplete(boolean operationResult, IOException exception) { - if (operationResult) { - LOG.debug("Set acl: {} to path: {} success!", ozoneAcls, path); - } else { - if (exception == null) { - LOG.debug("Set acl {} to path {} failed!", ozoneAcls, path); - } else { - LOG.error("Set acl {} to path {} failed!", ozoneAcls, path, exception); - } - } - } - - @Override - boolean apply(OmKeyInfo omKeyInfo) { - // No need to check not null here, this will be never called with null. - return omKeyInfo.setAcls(ozoneAcls); - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/package-info.java deleted file mode 100644 index c532519dd35..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains classes related to acl requests for keys. - */ -package org.apache.hadoop.ozone.om.request.key.acl; - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java deleted file mode 100644 index 3b30e4a3f19..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java +++ /dev/null @@ -1,197 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key.acl.prefix; - -import java.io.IOException; - -import com.google.common.base.Optional; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.PrefixManagerImpl; -import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.PREFIX_LOCK; - -/** - * Base class for Prefix acl request. - */ -public abstract class OMPrefixAclRequest extends OMClientRequest { - - public OMPrefixAclRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - - OmPrefixInfo omPrefixInfo = null; - - OMResponse.Builder omResponse = onInit(); - OMClientResponse omClientResponse = null; - IOException exception = null; - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - boolean lockAcquired = false; - String volume = null; - String bucket = null; - String key = null; - OMPrefixAclOpResult operationResult = null; - boolean result = false; - - PrefixManagerImpl prefixManager = - (PrefixManagerImpl) ozoneManager.getPrefixManager(); - try { - String prefixPath = getOzoneObj().getPath(); - - // check Acl - if (ozoneManager.getAclsEnabled()) { - checkAcls(ozoneManager, OzoneObj.ResourceType.PREFIX, - OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL, - volume, bucket, key); - } - - lockAcquired = - omMetadataManager.getLock().acquireWriteLock(PREFIX_LOCK, prefixPath); - - omPrefixInfo = omMetadataManager.getPrefixTable().get(prefixPath); - - try { - operationResult = apply(prefixManager, omPrefixInfo); - } catch (IOException ex) { - // In HA case this will never happen. - // As in add/remove/setAcl method we have logic to update database, - // that can throw exception. But in HA case we shall not update DB. - // The code in prefixManagerImpl is being done, because update - // in-memory should be done after DB update for Non-HA code path. - operationResult = new OMPrefixAclOpResult(null, false); - } - - if (operationResult.isOperationsResult()) { - // As for remove acl list, for a prefix if after removing acl from - // the existing acl list, if list size becomes zero, delete the - // prefix from prefix table. - if (getOmRequest().hasRemoveAclRequest() && - operationResult.getOmPrefixInfo().getAcls().size() == 0) { - omMetadataManager.getPrefixTable().addCacheEntry( - new CacheKey<>(prefixPath), - new CacheValue<>(Optional.absent(), transactionLogIndex)); - } else { - // update cache. - omMetadataManager.getPrefixTable().addCacheEntry( - new CacheKey<>(prefixPath), - new CacheValue<>(Optional.of(operationResult.getOmPrefixInfo()), - transactionLogIndex)); - } - } - - result = operationResult.isOperationsResult(); - omClientResponse = onSuccess(omResponse, - operationResult.getOmPrefixInfo(), result); - - } catch (IOException ex) { - exception = ex; - omClientResponse = onFailure(omResponse, ex); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (lockAcquired) { - omMetadataManager.getLock().releaseWriteLock(PREFIX_LOCK, - getOzoneObj().getPath()); - } - } - - onComplete(result, exception, ozoneManager.getMetrics()); - - return omClientResponse; - } - - /** - * Get the path name from the request. - * @return path name - */ - abstract OzoneObj getOzoneObj(); - - // TODO: Finer grain metrics can be moved to these callbacks. They can also - // be abstracted into separate interfaces in future. - /** - * Get the initial om response builder with lock. - * @return om response builder. - */ - abstract OMResponse.Builder onInit(); - - /** - * Get the om client response on success case with lock. - * @param omResponse - * @param omPrefixInfo - * @param operationResult - * @return OMClientResponse - */ - abstract OMClientResponse onSuccess( - OMResponse.Builder omResponse, OmPrefixInfo omPrefixInfo, - boolean operationResult); - - /** - * Get the om client response on failure case with lock. - * @param omResponse - * @param exception - * @return OMClientResponse - */ - abstract OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException exception); - - /** - * Completion hook for final processing before return without lock. - * Usually used for logging without lock and metric update. - * @param operationResult - * @param exception - * @param omMetrics - */ - abstract void onComplete(boolean operationResult, IOException exception, - OMMetrics omMetrics); - - /** - * Apply the acl operation, if successfully completed returns true, - * else false. - * @param prefixManager - * @param omPrefixInfo - * @throws IOException - */ - abstract OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, - OmPrefixInfo omPrefixInfo) throws IOException; - - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java deleted file mode 100644 index 086190aa31a..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key.acl.prefix; - -import java.io.IOException; -import java.util.List; - -import com.google.common.collect.Lists; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.PrefixManagerImpl; -import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.om.response.key.acl.prefix.OMPrefixAclResponse; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .AddAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; - -/** - * Handle add Acl request for prefix. - */ -public class OMPrefixAddAclRequest extends OMPrefixAclRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMPrefixAddAclRequest.class); - - private OzoneObj ozoneObj; - private List ozoneAcls; - - public OMPrefixAddAclRequest(OMRequest omRequest) { - super(omRequest); - OzoneManagerProtocolProtos.AddAclRequest addAclRequest = - getOmRequest().getAddAclRequest(); - // TODO: conversion of OzoneObj to protobuf can be avoided when we have - // single code path for HA and Non-HA - ozoneObj = OzoneObjInfo.fromProtobuf(addAclRequest.getObj()); - ozoneAcls = Lists.newArrayList( - OzoneAcl.fromProtobuf(addAclRequest.getAcl())); - } - - @Override - OzoneObj getOzoneObj() { - return ozoneObj; - } - - @Override - OMResponse.Builder onInit() { - return OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.AddAcl).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - } - - @Override - OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmPrefixInfo omPrefixInfo, boolean operationResult) { - omResponse.setSuccess(operationResult); - omResponse.setAddAclResponse(AddAclResponse.newBuilder() - .setResponse(operationResult)); - return new OMPrefixAclResponse(omPrefixInfo, - omResponse.build()); - } - - @Override - OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException exception) { - return new OMPrefixAclResponse(null, - createErrorOMResponse(omResponse, exception)); - } - - @Override - void onComplete(boolean operationResult, IOException exception, - OMMetrics omMetrics) { - if (operationResult) { - LOG.debug("Add acl: {} to path: {} success!", ozoneAcls, - ozoneObj.getPath()); - } else { - omMetrics.incNumBucketUpdateFails(); - if (exception == null) { - LOG.debug("Add acl {} to path {} failed, because acl already exist", - ozoneAcls, ozoneObj.getPath()); - } else { - LOG.error("Add acl {} to path {} failed!", ozoneAcls, - ozoneObj.getPath(), exception); - } - } - } - - @Override - OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, - OmPrefixInfo omPrefixInfo) throws IOException { - return prefixManager.addAcl(ozoneObj, ozoneAcls.get(0), omPrefixInfo); - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java deleted file mode 100644 index 32d9b22bfff..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key.acl.prefix; - -import java.io.IOException; -import java.util.List; - -import com.google.common.collect.Lists; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.PrefixManagerImpl; -import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.om.response.key.acl.prefix.OMPrefixAclResponse; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; - -/** - * Handle add Acl request for prefix. - */ -public class OMPrefixRemoveAclRequest extends OMPrefixAclRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMPrefixAddAclRequest.class); - - private OzoneObj ozoneObj; - private List ozoneAcls; - - public OMPrefixRemoveAclRequest(OMRequest omRequest) { - super(omRequest); - OzoneManagerProtocolProtos.RemoveAclRequest removeAclRequest = - getOmRequest().getRemoveAclRequest(); - // TODO: conversion of OzoneObj to protobuf can be avoided when we have - // single code path for HA and Non-HA - ozoneObj = OzoneObjInfo.fromProtobuf(removeAclRequest.getObj()); - ozoneAcls = Lists.newArrayList( - OzoneAcl.fromProtobuf(removeAclRequest.getAcl())); - } - - @Override - OzoneObj getOzoneObj() { - return ozoneObj; - } - - @Override - OMResponse.Builder onInit() { - return OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.RemoveAcl).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - } - - @Override - OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmPrefixInfo omPrefixInfo, boolean operationResult) { - omResponse.setSuccess(operationResult); - omResponse.setRemoveAclResponse(RemoveAclResponse.newBuilder() - .setResponse(operationResult)); - return new OMPrefixAclResponse(omPrefixInfo, - omResponse.build()); - } - - @Override - OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException exception) { - return new OMPrefixAclResponse(null, - createErrorOMResponse(omResponse, exception)); - } - - @Override - void onComplete(boolean operationResult, IOException exception, - OMMetrics omMetrics) { - if (operationResult) { - LOG.debug("Remove acl: {} to path: {} success!", ozoneAcls, - ozoneObj.getPath()); - } else { - omMetrics.incNumBucketUpdateFails(); - if (exception == null) { - LOG.debug("Remove acl {} to path {} failed, because acl does not exist", - ozoneAcls, ozoneObj.getPath()); - } else { - LOG.error("Remove acl {} to path {} failed!", ozoneAcls, - ozoneObj.getPath(), exception); - } - } - } - - @Override - OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, - OmPrefixInfo omPrefixInfo) throws IOException { - return prefixManager.removeAcl(ozoneObj, ozoneAcls.get(0), omPrefixInfo); - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java deleted file mode 100644 index 563d76e7442..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key.acl.prefix; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.PrefixManagerImpl; -import org.apache.hadoop.ozone.om.PrefixManagerImpl.OMPrefixAclOpResult; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.om.response.key.acl.prefix.OMPrefixAclResponse; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse; - -/** - * Handle add Acl request for prefix. - */ -public class OMPrefixSetAclRequest extends OMPrefixAclRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMPrefixAddAclRequest.class); - - private OzoneObj ozoneObj; - private List ozoneAcls; - - public OMPrefixSetAclRequest(OMRequest omRequest) { - super(omRequest); - OzoneManagerProtocolProtos.SetAclRequest setAclRequest = - getOmRequest().getSetAclRequest(); - // TODO: conversion of OzoneObj to protobuf can be avoided when we have - // single code path for HA and Non-HA - ozoneObj = OzoneObjInfo.fromProtobuf(setAclRequest.getObj()); - ozoneAcls = new ArrayList<>(); - setAclRequest.getAclList().forEach(aclInfo -> - ozoneAcls.add(OzoneAcl.fromProtobuf(aclInfo))); - } - - @Override - OzoneObj getOzoneObj() { - return ozoneObj; - } - - @Override - OMResponse.Builder onInit() { - return OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.SetAcl).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - } - - @Override - OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmPrefixInfo omPrefixInfo, boolean operationResult) { - omResponse.setSuccess(operationResult); - omResponse.setSetAclResponse(SetAclResponse.newBuilder() - .setResponse(operationResult)); - return new OMPrefixAclResponse(omPrefixInfo, - omResponse.build()); - } - - @Override - OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException exception) { - return new OMPrefixAclResponse(null, - createErrorOMResponse(omResponse, exception)); - } - - @Override - void onComplete(boolean operationResult, IOException exception, - OMMetrics omMetrics) { - if (operationResult) { - LOG.debug("Set acl: {} to path: {} success!", ozoneAcls, - ozoneObj.getPath()); - } else { - omMetrics.incNumBucketUpdateFails(); - if (exception == null) { - LOG.debug("Set acl {} to path {} failed", ozoneAcls, - ozoneObj.getPath()); - } else { - LOG.error("Set acl {} to path {} failed!", ozoneAcls, - ozoneObj.getPath(), exception); - } - } - } - - @Override - OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, - OmPrefixInfo omPrefixInfo) throws IOException { - return prefixManager.setAcl(ozoneObj, ozoneAcls, omPrefixInfo); - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/package-info.java deleted file mode 100644 index 0a027cc916e..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Package contains classes related to acl requests for prefix. - */ -package org.apache.hadoop.ozone.om.request.key.acl.prefix; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/package-info.java deleted file mode 100644 index af20fe1f18e..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains classes related to key requests. - */ -package org.apache.hadoop.ozone.om.request.key; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java deleted file mode 100644 index ee324cf7df9..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains classes for handling OMRequests. - */ \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java deleted file mode 100644 index f3a352a2fbf..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketCreateRequest.java +++ /dev/null @@ -1,391 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.s3.bucket; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.volume.OMVolumeRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; -import org.apache.hadoop.ozone.om.response.s3.bucket.S3BucketCreateResponse; -import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .S3CreateBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .S3CreateBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .S3CreateVolumeInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - - -import static org.apache.hadoop.ozone.OzoneConsts.OM_S3_VOLUME_PREFIX; -import static org.apache.hadoop.ozone.OzoneConsts.S3_BUCKET_MAX_LENGTH; -import static org.apache.hadoop.ozone.OzoneConsts.S3_BUCKET_MIN_LENGTH; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; - -/** - * Handles S3 Bucket create request. - */ -public class S3BucketCreateRequest extends OMVolumeRequest { - - private static final String S3_ADMIN_NAME = "OzoneS3Manager"; - - private static final Logger LOG = - LoggerFactory.getLogger(S3CreateBucketRequest.class); - - public S3BucketCreateRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - S3CreateBucketRequest s3CreateBucketRequest = - getOmRequest().getCreateS3BucketRequest(); - Preconditions.checkNotNull(s3CreateBucketRequest); - - S3CreateBucketRequest.Builder newS3CreateBucketRequest = - s3CreateBucketRequest.toBuilder().setS3CreateVolumeInfo( - S3CreateVolumeInfo.newBuilder().setCreationTime(Time.now())); - - // TODO: Do we need to enforce the bucket rules in this code path? - // https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html - - // For now only checked the length. - int bucketLength = s3CreateBucketRequest.getS3Bucketname().length(); - if (bucketLength < S3_BUCKET_MIN_LENGTH || - bucketLength >= S3_BUCKET_MAX_LENGTH) { - throw new OMException("S3BucketName must be at least 3 and not more " + - "than 63 characters long", - OMException.ResultCodes.S3_BUCKET_INVALID_LENGTH); - } - - return getOmRequest().toBuilder() - .setCreateS3BucketRequest(newS3CreateBucketRequest) - .setUserInfo(getUserInfo()).build(); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - S3CreateBucketRequest s3CreateBucketRequest = - getOmRequest().getCreateS3BucketRequest(); - - String userName = s3CreateBucketRequest.getUserName(); - String s3BucketName = s3CreateBucketRequest.getS3Bucketname(); - - OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.CreateS3Bucket).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumS3BucketCreates(); - - // When s3 Bucket is created, we internally create ozone volume/ozone - // bucket. - - // ozone volume name is generated from userName by calling - // formatOzoneVolumeName. - - // ozone bucket name is same as s3 bucket name. - // In S3 buckets are unique, so we create a mapping like s3BucketName -> - // ozoneVolume/ozoneBucket and add it to s3 mapping table. If - // s3BucketName exists in mapping table, bucket already exist or we go - // ahead and create a bucket. - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - IOException exception = null; - - boolean volumeCreated = false; - boolean acquiredVolumeLock = false; - boolean acquiredUserLock = false; - boolean acquiredS3Lock = false; - String volumeName = formatOzoneVolumeName(userName); - OMClientResponse omClientResponse = null; - try { - - // TODO to support S3 ACL later. - acquiredS3Lock = omMetadataManager.getLock().acquireWriteLock( - S3_BUCKET_LOCK, s3BucketName); - - // First check if this s3Bucket exists - if (omMetadataManager.getS3Table().isExist(s3BucketName)) { - throw new OMException("S3Bucket " + s3BucketName + " already exists", - OMException.ResultCodes.S3_BUCKET_ALREADY_EXISTS); - } - - OMVolumeCreateResponse omVolumeCreateResponse = null; - try { - acquiredVolumeLock = - omMetadataManager.getLock().acquireWriteLock(VOLUME_LOCK, - volumeName); - acquiredUserLock = omMetadataManager.getLock().acquireWriteLock( - USER_LOCK, userName); - // Check if volume exists, if it does not exist create - // ozone volume. - String volumeKey = omMetadataManager.getVolumeKey(volumeName); - if (!omMetadataManager.getVolumeTable().isExist(volumeKey)) { - OmVolumeArgs omVolumeArgs = createOmVolumeArgs(volumeName, userName, - s3CreateBucketRequest.getS3CreateVolumeInfo() - .getCreationTime()); - UserVolumeInfo volumeList = omMetadataManager.getUserTable().get( - omMetadataManager.getUserKey(userName)); - volumeList = addVolumeToOwnerList(volumeList, - volumeName, userName, ozoneManager.getMaxUserVolumeCount(), - transactionLogIndex); - createVolume(omMetadataManager, omVolumeArgs, volumeList, volumeKey, - omMetadataManager.getUserKey(userName), transactionLogIndex); - volumeCreated = true; - omVolumeCreateResponse = new OMVolumeCreateResponse(omVolumeArgs, - volumeList, omResponse.build()); - } - } finally { - if (acquiredUserLock) { - omMetadataManager.getLock().releaseWriteLock(USER_LOCK, userName); - } - if (acquiredVolumeLock) { - omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volumeName); - } - } - - // check if ozone bucket exists, if it does not exist create ozone - // bucket - OmBucketInfo omBucketInfo = createBucket(omMetadataManager, volumeName, - s3BucketName, userName, - s3CreateBucketRequest.getS3CreateVolumeInfo().getCreationTime(), - transactionLogIndex); - - // Now finally add it to s3 table cache. - omMetadataManager.getS3Table().addCacheEntry( - new CacheKey<>(s3BucketName), new CacheValue<>( - Optional.of(formatS3MappingName(volumeName, s3BucketName)), - transactionLogIndex)); - - OMBucketCreateResponse omBucketCreateResponse = - new OMBucketCreateResponse(omBucketInfo, omResponse.build()); - - omClientResponse = new S3BucketCreateResponse(omVolumeCreateResponse, - omBucketCreateResponse, s3BucketName, - formatS3MappingName(volumeName, s3BucketName), - omResponse.setCreateS3BucketResponse( - S3CreateBucketResponse.newBuilder()).build()); - } catch (IOException ex) { - exception = ex; - omClientResponse = new S3BucketCreateResponse(null, null, null, null, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredS3Lock) { - omMetadataManager.getLock().releaseWriteLock( - S3_BUCKET_LOCK, s3BucketName); - } - } - - // Performing audit logging outside of the lock. - auditLog(ozoneManager.getAuditLogger(), - buildAuditMessage(OMAction.CREATE_S3_BUCKET, - buildAuditMap(userName, s3BucketName), exception, - getOmRequest().getUserInfo())); - - if (exception == null) { - LOG.debug("S3Bucket is successfully created for userName: {}, " + - "s3BucketName {}, volumeName {}", userName, s3BucketName, volumeName); - OMVolumeCreateResponse omVolumeCreateResponse = null; - if (volumeCreated) { - omMetrics.incNumVolumes(); - } - omMetrics.incNumBuckets(); - omMetrics.incNumS3Buckets(); - - return omClientResponse; - } else { - LOG.error("S3Bucket Creation Failed for userName: {}, s3BucketName {}, " + - "VolumeName {}", userName, s3BucketName, volumeName); - omMetrics.incNumS3BucketCreateFails(); - return omClientResponse; - } - } - - - private OmBucketInfo createBucket(OMMetadataManager omMetadataManager, - String volumeName, String s3BucketName, String userName, - long creationTime, long transactionLogIndex) throws IOException { - // check if ozone bucket exists, if it does not exist create ozone - // bucket - boolean acquireBucketLock = false; - OmBucketInfo omBucketInfo = null; - try { - acquireBucketLock = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - s3BucketName); - String bucketKey = omMetadataManager.getBucketKey(volumeName, - s3BucketName); - if (!omMetadataManager.getBucketTable().isExist(bucketKey)) { - omBucketInfo = createOmBucketInfo(volumeName, s3BucketName, userName, - creationTime); - // Add to bucket table cache. - omMetadataManager.getBucketTable().addCacheEntry( - new CacheKey<>(bucketKey), - new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex)); - } else { - // This can happen when a ozone bucket exists already in the - // volume, but this is not a s3 bucket. - throw new OMException("Bucket " + s3BucketName + " already exists", - OMException.ResultCodes.BUCKET_ALREADY_EXISTS); - } - } finally { - if (acquireBucketLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - s3BucketName); - } - } - return omBucketInfo; - } - - /** - * Generate Ozone volume name from userName. - * @param userName - * @return volume name - */ - @VisibleForTesting - public static String formatOzoneVolumeName(String userName) { - return String.format(OM_S3_VOLUME_PREFIX + "%s", userName); - } - - /** - * Generate S3Mapping for provided volume and bucket. This information will - * be persisted in s3 table in OM DB. - * @param volumeName - * @param bucketName - * @return s3Mapping - */ - @VisibleForTesting - public static String formatS3MappingName(String volumeName, - String bucketName) { - return String.format("%s" + OzoneConsts.OM_KEY_PREFIX + "%s", volumeName, - bucketName); - } - - /** - * Create {@link OmVolumeArgs} which needs to be persisted in volume table - * in OM DB. - * @param volumeName - * @param userName - * @param creationTime - * @return {@link OmVolumeArgs} - */ - private OmVolumeArgs createOmVolumeArgs(String volumeName, String userName, - long creationTime) throws IOException { - OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder() - .setAdminName(S3_ADMIN_NAME).setVolume(volumeName) - .setQuotaInBytes(OzoneConsts.MAX_QUOTA_IN_BYTES) - .setOwnerName(userName) - .setCreationTime(creationTime); - - // Set default acls. - for (OzoneAcl acl : getDefaultAcls(userName)) { - builder.addOzoneAcls(OzoneAcl.toProtobuf(acl)); - } - - return builder.build(); - } - - /** - * Create {@link OmBucketInfo} which needs to be persisted in to bucket table - * in OM DB. - * @param volumeName - * @param s3BucketName - * @param creationTime - * @return {@link OmBucketInfo} - */ - private OmBucketInfo createOmBucketInfo(String volumeName, - String s3BucketName, String userName, long creationTime) { - //TODO: Now S3Bucket API takes only bucketName as param. In future if we - // support some configurable options we need to fix this. - OmBucketInfo.Builder builder = - OmBucketInfo.newBuilder().setVolumeName(volumeName) - .setBucketName(s3BucketName).setIsVersionEnabled(Boolean.FALSE) - .setStorageType(StorageType.DEFAULT).setCreationTime(creationTime); - - // Set default acls. - builder.setAcls(getDefaultAcls(userName)); - - return builder.build(); - } - - /** - * Build auditMap. - * @param userName - * @param s3BucketName - * @return auditMap - */ - private Map buildAuditMap(String userName, - String s3BucketName) { - Map auditMap = new HashMap<>(); - auditMap.put(userName, OzoneConsts.USERNAME); - auditMap.put(s3BucketName, OzoneConsts.S3_BUCKET); - return auditMap; - } - - /** - * Get default acls. - * */ - private List getDefaultAcls(String userName) { - UserGroupInformation ugi = createUGI(); - return OzoneAcl.parseAcls("user:" + (ugi == null ? userName : - ugi.getUserName()) + ":a,user:" + S3_ADMIN_NAME + ":a"); - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketDeleteRequest.java deleted file mode 100644 index 5d5932ff3f5..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketDeleteRequest.java +++ /dev/null @@ -1,199 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.s3.bucket; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import com.google.common.base.Optional; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.request.volume.OMVolumeRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.s3.bucket.S3BucketDeleteResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .S3DeleteBucketRequest; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.OzoneConsts.S3_BUCKET_MAX_LENGTH; -import static org.apache.hadoop.ozone.OzoneConsts.S3_BUCKET_MIN_LENGTH; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_BUCKET_LOCK; - -/** - * Handle Create S3Bucket request. - */ -public class S3BucketDeleteRequest extends OMVolumeRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(S3BucketDeleteRequest.class); - - public S3BucketDeleteRequest(OMRequest omRequest) { - super(omRequest); - } - - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - S3DeleteBucketRequest s3DeleteBucketRequest = - getOmRequest().getDeleteS3BucketRequest(); - - // TODO: Do we need to enforce the bucket rules in this code path? - // https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html - - // For now only checked the length. - int bucketLength = s3DeleteBucketRequest.getS3BucketName().length(); - if (bucketLength < S3_BUCKET_MIN_LENGTH || - bucketLength >= S3_BUCKET_MAX_LENGTH) { - throw new OMException("S3BucketName must be at least 3 and not more " + - "than 63 characters long", - OMException.ResultCodes.S3_BUCKET_INVALID_LENGTH); - } - - return getOmRequest().toBuilder().setUserInfo(getUserInfo()).build(); - - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - S3DeleteBucketRequest s3DeleteBucketRequest = - getOmRequest().getDeleteS3BucketRequest(); - - String s3BucketName = s3DeleteBucketRequest.getS3BucketName(); - - OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.DeleteS3Bucket).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumS3BucketDeletes(); - IOException exception = null; - boolean acquiredS3Lock = false; - boolean acquiredBucketLock = false; - String volumeName = null; - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - OMClientResponse omClientResponse = null; - try { - // TODO to support S3 ACL later. - acquiredS3Lock = omMetadataManager.getLock().acquireWriteLock( - S3_BUCKET_LOCK, s3BucketName); - - String s3Mapping = omMetadataManager.getS3Table().get(s3BucketName); - - if (s3Mapping == null) { - throw new OMException("S3Bucket " + s3BucketName + " not found", - OMException.ResultCodes.S3_BUCKET_NOT_FOUND); - } else { - volumeName = getOzoneVolumeName(s3Mapping); - - acquiredBucketLock = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, - volumeName, s3BucketName); - - String bucketKey = omMetadataManager.getBucketKey(volumeName, - s3BucketName); - - // Update bucket table cache and s3 table cache. - omMetadataManager.getBucketTable().addCacheEntry( - new CacheKey<>(bucketKey), - new CacheValue<>(Optional.absent(), transactionLogIndex)); - omMetadataManager.getS3Table().addCacheEntry( - new CacheKey<>(s3BucketName), - new CacheValue<>(Optional.absent(), transactionLogIndex)); - } - - omResponse.setDeleteS3BucketResponse( - OzoneManagerProtocolProtos.S3DeleteBucketResponse.newBuilder()); - - omClientResponse = new S3BucketDeleteResponse(s3BucketName, volumeName, - omResponse.build()); - } catch (IOException ex) { - exception = ex; - omClientResponse = new S3BucketDeleteResponse(null, null, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredBucketLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - s3BucketName); - } - if (acquiredS3Lock) { - omMetadataManager.getLock().releaseWriteLock(S3_BUCKET_LOCK, - s3BucketName); - } - } - - // Performing audit logging outside of the lock. - auditLog(ozoneManager.getAuditLogger(), - buildAuditMessage(OMAction.DELETE_S3_BUCKET, - buildAuditMap(s3BucketName), exception, - getOmRequest().getUserInfo())); - - if (exception == null) { - // Decrement s3 bucket and ozone bucket count. As S3 bucket is mapped to - // ozonevolume/ozone bucket. - LOG.debug("S3Bucket {} successfully deleted", s3BucketName); - omMetrics.decNumS3Buckets(); - omMetrics.decNumBuckets(); - - return omClientResponse; - } else { - LOG.error("S3Bucket Deletion failed for S3Bucket:{}", s3BucketName, - exception); - omMetrics.incNumS3BucketDeleteFails(); - return omClientResponse; - } - } - - /** - * Extract volumeName from s3Mapping. - * @param s3Mapping - * @return volumeName - * @throws IOException - */ - private String getOzoneVolumeName(String s3Mapping) throws IOException { - return s3Mapping.split("/")[0]; - } - - private Map buildAuditMap(String s3BucketName) { - Map auditMap = new HashMap<>(); - auditMap.put(s3BucketName, OzoneConsts.S3_BUCKET); - return auditMap; - } - -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/package-info.java deleted file mode 100644 index 7296585886a..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains classes related to s3 bucket requests. - */ -package org.apache.hadoop.ozone.om.request.s3.bucket; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java deleted file mode 100644 index df0e168e2e0..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ /dev/null @@ -1,219 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.s3.multipart; - -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.s3.multipart.S3InitiateMultipartUploadResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.UniqueId; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.UUID; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; - -/** - * Handles initiate multipart upload request. - */ -public class S3InitiateMultipartUploadRequest extends OMKeyRequest { - - - private static final Logger LOG = - LoggerFactory.getLogger(S3InitiateMultipartUploadRequest.class); - - public S3InitiateMultipartUploadRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) { - MultipartInfoInitiateRequest multipartInfoInitiateRequest = - getOmRequest().getInitiateMultiPartUploadRequest(); - Preconditions.checkNotNull(multipartInfoInitiateRequest); - - OzoneManagerProtocolProtos.KeyArgs.Builder newKeyArgs = - multipartInfoInitiateRequest.getKeyArgs().toBuilder() - .setMultipartUploadID(UUID.randomUUID().toString() + "-" + - UniqueId.next()).setModificationTime(Time.now()); - - return getOmRequest().toBuilder() - .setUserInfo(getUserInfo()) - .setInitiateMultiPartUploadRequest( - multipartInfoInitiateRequest.toBuilder().setKeyArgs(newKeyArgs)) - .build(); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - MultipartInfoInitiateRequest multipartInfoInitiateRequest = - getOmRequest().getInitiateMultiPartUploadRequest(); - - OzoneManagerProtocolProtos.KeyArgs keyArgs = - multipartInfoInitiateRequest.getKeyArgs(); - - Preconditions.checkNotNull(keyArgs.getMultipartUploadID()); - - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - - ozoneManager.getMetrics().incNumInitiateMultipartUploads(); - boolean acquiredBucketLock = false; - IOException exception = null; - OmMultipartKeyInfo multipartKeyInfo = null; - OmKeyInfo omKeyInfo = null; - - OMResponse.Builder omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true); - OMClientResponse omClientResponse = null; - try { - // TODO to support S3 ACL later. - acquiredBucketLock = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); - - validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - - // We are adding uploadId to key, because if multiple users try to - // perform multipart upload on the same key, each will try to upload, who - // ever finally commit the key, we see that key in ozone. Suppose if we - // don't add id, and use the same key /volume/bucket/key, when multiple - // users try to upload the key, we update the parts of the key's from - // multiple users to same key, and the key output can be a mix of the - // parts from multiple users. - - // So on same key if multiple time multipart upload is initiated we - // store multiple entries in the openKey Table. - // Checked AWS S3, when we try to run multipart upload, each time a - // new uploadId is returned. And also even if a key exist when initiate - // multipart upload request is received, it returns multipart upload id - // for the key. - - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, keyArgs.getMultipartUploadID()); - - // Not checking if there is an already key for this in the keyTable, as - // during final complete multipart upload we take care of this. AWS S3 - // behavior is also like this, even when key exists in a bucket, user - // can still initiate MPU. - - - multipartKeyInfo = new OmMultipartKeyInfo( - keyArgs.getMultipartUploadID(), new HashMap<>()); - - omKeyInfo = new OmKeyInfo.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setCreationTime(keyArgs.getModificationTime()) - .setModificationTime(keyArgs.getModificationTime()) - .setReplicationType(keyArgs.getType()) - .setReplicationFactor(keyArgs.getFactor()) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())) - .build(); - - - // Add to cache - omMetadataManager.getOpenKeyTable().addCacheEntry( - new CacheKey<>(multipartKey), - new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex)); - omMetadataManager.getMultipartInfoTable().addCacheEntry( - new CacheKey<>(multipartKey), - new CacheValue<>(Optional.of(multipartKeyInfo), transactionLogIndex)); - - - omClientResponse = - new S3InitiateMultipartUploadResponse(multipartKeyInfo, omKeyInfo, - omResponse.setInitiateMultiPartUploadResponse( - MultipartInfoInitiateResponse.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setMultipartUploadID(keyArgs.getMultipartUploadID())) - .build()); - } catch (IOException ex) { - exception = ex; - omClientResponse = new S3InitiateMultipartUploadResponse(null, null, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredBucketLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - - - // audit log - auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.INITIATE_MULTIPART_UPLOAD, buildKeyArgsAuditMap(keyArgs), - exception, getOmRequest().getUserInfo())); - - if (exception == null) { - LOG.debug("S3 InitiateMultipart Upload request for Key {} in " + - "Volume/Bucket {}/{} is successfully completed", keyName, - volumeName, bucketName); - - return omClientResponse; - - } else { - ozoneManager.getMetrics().incNumInitiateMultipartUploadFails(); - LOG.error("S3 InitiateMultipart Upload request for Key {} in " + - "Volume/Bucket {}/{} is failed", keyName, volumeName, bucketName, - exception); - return omClientResponse; - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java deleted file mode 100644 index b65328d325a..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.s3.multipart; - -import java.io.IOException; - -import com.google.common.base.Optional; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.s3.multipart - .S3MultipartUploadAbortResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .MultipartUploadAbortResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; - -/** - * Handles Abort of multipart upload request. - */ -public class S3MultipartUploadAbortRequest extends OMKeyRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(S3MultipartUploadAbortRequest.class); - - public S3MultipartUploadAbortRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - KeyArgs keyArgs = - getOmRequest().getAbortMultiPartUploadRequest().getKeyArgs(); - - return getOmRequest().toBuilder().setAbortMultiPartUploadRequest( - getOmRequest().getAbortMultiPartUploadRequest().toBuilder() - .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now()))) - .setUserInfo(getUserInfo()).build(); - - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - OzoneManagerProtocolProtos.KeyArgs keyArgs = - getOmRequest().getAbortMultiPartUploadRequest().getKeyArgs(); - - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - - ozoneManager.getMetrics().incNumAbortMultipartUploads(); - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - boolean acquiredLock = false; - IOException exception = null; - OmMultipartKeyInfo multipartKeyInfo = null; - String multipartKey = null; - OMResponse.Builder omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.AbortMultiPartUpload) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true); - OMClientResponse omClientResponse = null; - try { - // TODO to support S3 ACL later. - acquiredLock = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); - - validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - - multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, keyArgs.getMultipartUploadID()); - - OmKeyInfo omKeyInfo = - omMetadataManager.getOpenKeyTable().get(multipartKey); - - // If there is no entry in openKeyTable, then there is no multipart - // upload initiated for this key. - if (omKeyInfo == null) { - throw new OMException("Abort Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, - OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } else { - multipartKeyInfo = omMetadataManager - .getMultipartInfoTable().get(multipartKey); - - - // Update cache of openKeyTable and multipartInfo table. - // No need to add the cache entries to delete table, as the entries - // in delete table are not used by any read/write operations. - omMetadataManager.getOpenKeyTable().addCacheEntry( - new CacheKey<>(multipartKey), - new CacheValue<>(Optional.absent(), transactionLogIndex)); - omMetadataManager.getMultipartInfoTable().addCacheEntry( - new CacheKey<>(multipartKey), - new CacheValue<>(Optional.absent(), transactionLogIndex)); - } - - omClientResponse = new S3MultipartUploadAbortResponse(multipartKey, - multipartKeyInfo, - omResponse.setAbortMultiPartUploadResponse( - MultipartUploadAbortResponse.newBuilder()).build()); - } catch (IOException ex) { - exception = ex; - omClientResponse = new S3MultipartUploadAbortResponse(multipartKey, - multipartKeyInfo, createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - // audit log - auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.ABORT_MULTIPART_UPLOAD, buildKeyArgsAuditMap(keyArgs), - exception, getOmRequest().getUserInfo())); - - if (exception == null) { - LOG.debug("Abort Multipart request is successfully completed for " + - "KeyName {} in VolumeName/Bucket {}/{}", keyName, volumeName, - bucketName); - } else { - ozoneManager.getMetrics().incNumAbortMultipartUploadFails(); - LOG.error("Abort Multipart request is failed for " + - "KeyName {} in VolumeName/Bucket {}/{}", keyName, volumeName, - bucketName, exception); - } - return omClientResponse; - - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java deleted file mode 100644 index cf7db655a02..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ /dev/null @@ -1,228 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.s3.multipart; - -import com.google.common.base.Optional; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.s3.multipart - .S3MultipartUploadCommitPartResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .MultipartCommitUploadPartRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .MultipartCommitUploadPartResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.stream.Collectors; - -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; - -/** - * Handle Multipart upload commit upload part file. - */ -public class S3MultipartUploadCommitPartRequest extends OMKeyRequest { - - - private static final Logger LOG = - LoggerFactory.getLogger(S3MultipartUploadCommitPartRequest.class); - - public S3MultipartUploadCommitPartRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) { - MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = - getOmRequest().getCommitMultiPartUploadRequest(); - - return getOmRequest().toBuilder().setCommitMultiPartUploadRequest( - multipartCommitUploadPartRequest.toBuilder() - .setKeyArgs(multipartCommitUploadPartRequest.getKeyArgs() - .toBuilder().setModificationTime(Time.now()))) - .setUserInfo(getUserInfo()).build(); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = - getOmRequest().getCommitMultiPartUploadRequest(); - - OzoneManagerProtocolProtos.KeyArgs keyArgs = - multipartCommitUploadPartRequest.getKeyArgs(); - - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - ozoneManager.getMetrics().incNumCommitMultipartUploadParts(); - - boolean acquiredLock = false; - - IOException exception = null; - String partName = null; - OMResponse.Builder omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CommitMultiPartUpload) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true); - OMClientResponse omClientResponse = null; - OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo = null; - String openKey = null; - OmKeyInfo omKeyInfo = null; - String multipartKey = null; - OmMultipartKeyInfo multipartKeyInfo = null; - try { - // TODO to support S3 ACL later. - acquiredLock = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); - - validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - - String uploadID = keyArgs.getMultipartUploadID(); - multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, uploadID); - - multipartKeyInfo = - omMetadataManager.getMultipartInfoTable().get(multipartKey); - - long clientID = multipartCommitUploadPartRequest.getClientID(); - - openKey = omMetadataManager.getOpenKey( - volumeName, bucketName, keyName, clientID); - - omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - - if (omKeyInfo == null) { - throw new OMException("Failed to commit Multipart Upload key, as " + - openKey + "entry is not found in the openKey table", KEY_NOT_FOUND); - } - - // set the data size and location info list - omKeyInfo.setDataSize(keyArgs.getDataSize()); - omKeyInfo.updateLocationInfoList(keyArgs.getKeyLocationsList().stream() - .map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList())); - // Set Modification time - omKeyInfo.setModificationTime(keyArgs.getModificationTime()); - - partName = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName) + clientID; - - if (multipartKeyInfo == null) { - // This can occur when user started uploading part by the time commit - // of that part happens, in between the user might have requested - // abort multipart upload. If we just throw exception, then the data - // will not be garbage collected, so move this part to delete table - // and throw error - // Move this part to delete table. - throw new OMException("No such Multipart upload is with specified " + - "uploadId " + uploadID, - OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } else { - int partNumber = keyArgs.getMultipartNumber(); - oldPartKeyInfo = multipartKeyInfo.getPartKeyInfo(partNumber); - - // Build this multipart upload part info. - OzoneManagerProtocolProtos.PartKeyInfo.Builder partKeyInfo = - OzoneManagerProtocolProtos.PartKeyInfo.newBuilder(); - partKeyInfo.setPartName(partName); - partKeyInfo.setPartNumber(partNumber); - partKeyInfo.setPartKeyInfo(omKeyInfo.getProtobuf()); - - // Add this part information in to multipartKeyInfo. - multipartKeyInfo.addPartKeyInfo(partNumber, partKeyInfo.build()); - - // Add to cache. - - // Delete from open key table and add it to multipart info table. - // No need to add cache entries to delete table, as no - // read/write requests that info for validation. - omMetadataManager.getMultipartInfoTable().addCacheEntry( - new CacheKey<>(multipartKey), - new CacheValue<>(Optional.of(multipartKeyInfo), - transactionLogIndex)); - - omMetadataManager.getOpenKeyTable().addCacheEntry( - new CacheKey<>(openKey), - new CacheValue<>(Optional.absent(), transactionLogIndex)); - } - - omResponse.setCommitMultiPartUploadResponse( - MultipartCommitUploadPartResponse.newBuilder().setPartName(partName)); - omClientResponse = new S3MultipartUploadCommitPartResponse(multipartKey, - openKey, omKeyInfo, multipartKeyInfo, - oldPartKeyInfo, omResponse.build()); - - } catch (IOException ex) { - exception = ex; - omClientResponse = new S3MultipartUploadCommitPartResponse(multipartKey, - openKey, omKeyInfo, multipartKeyInfo, - oldPartKeyInfo, createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - // audit log - auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.COMMIT_MULTIPART_UPLOAD_PARTKEY, buildKeyArgsAuditMap(keyArgs), - exception, getOmRequest().getUserInfo())); - - if (exception == null) { - LOG.debug("MultipartUpload Commit is successfully for Key:{} in " + - "Volume/Bucket {}/{}", keyName, volumeName, bucketName); - - } else { - LOG.error("MultipartUpload Commit is failed for Key:{} in " + - "Volume/Bucket {}/{}", keyName, volumeName, bucketName, exception); - ozoneManager.getMetrics().incNumCommitMultipartUploadPartFails(); - } - return omClientResponse; - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java deleted file mode 100644 index ace2dbc4f13..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ /dev/null @@ -1,319 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.s3.multipart; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; -import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import com.google.common.base.Optional; -import org.apache.commons.codec.digest.DigestUtils; -import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Handle Multipart upload complete request. - */ -public class S3MultipartUploadCompleteRequest extends OMKeyRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(S3MultipartUploadCompleteRequest.class); - - public S3MultipartUploadCompleteRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - MultipartUploadCompleteRequest multipartUploadCompleteRequest = - getOmRequest().getCompleteMultiPartUploadRequest(); - - KeyArgs keyArgs = multipartUploadCompleteRequest.getKeyArgs(); - - return getOmRequest().toBuilder() - .setCompleteMultiPartUploadRequest(multipartUploadCompleteRequest - .toBuilder().setKeyArgs(keyArgs.toBuilder() - .setModificationTime(Time.now()))) - .setUserInfo(getUserInfo()).build(); - - } - - @Override - @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - MultipartUploadCompleteRequest multipartUploadCompleteRequest = - getOmRequest().getCompleteMultiPartUploadRequest(); - - KeyArgs keyArgs = multipartUploadCompleteRequest.getKeyArgs(); - - List partsList = - multipartUploadCompleteRequest.getPartsListList(); - - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - String uploadID = keyArgs.getMultipartUploadID(); - - ozoneManager.getMetrics().incNumCompleteMultipartUploads(); - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - - boolean acquiredLock = false; - OMResponse.Builder omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CommitMultiPartUpload) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true); - OMClientResponse omClientResponse = null; - IOException exception = null; - OmMultipartUploadCompleteList multipartUploadList = null; - try { - // TODO to support S3 ACL later. - TreeMap partsMap = new TreeMap<>(); - for (OzoneManagerProtocolProtos.Part part : partsList) { - partsMap.put(part.getPartNumber(), part.getPartName()); - } - - multipartUploadList = new OmMultipartUploadCompleteList(partsMap); - - acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, - volumeName, bucketName); - - validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, uploadID); - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - - OmMultipartKeyInfo multipartKeyInfo = omMetadataManager - .getMultipartInfoTable().get(multipartKey); - - if (multipartKeyInfo == null) { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, - OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } - TreeMap partKeyInfoMap = - multipartKeyInfo.getPartKeyInfoMap(); - - TreeMap multipartMap = multipartUploadList - .getMultipartMap(); - - // Last key in the map should be having key value as size, as map's - // are sorted. Last entry in both maps should have partNumber as size - // of the map. As we have part entries 1, 2, 3, 4 and then we get - // complete multipart upload request so the map last entry should have 4, - // if it is having value greater or less than map size, then there is - // some thing wrong throw error. - - Map.Entry multipartMapLastEntry = multipartMap - .lastEntry(); - Map.Entry partKeyInfoLastEntry = - partKeyInfoMap.lastEntry(); - if (partKeyInfoMap.size() != multipartMap.size()) { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, - OMException.ResultCodes.MISMATCH_MULTIPART_LIST); - } - - // Last entry part Number should be the size of the map, otherwise this - // means we have missing some parts but we got a complete request. - if (multipartMapLastEntry.getKey() != partKeyInfoMap.size() || - partKeyInfoLastEntry.getKey() != partKeyInfoMap.size()) { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, - OMException.ResultCodes.MISSING_UPLOAD_PARTS); - } - HddsProtos.ReplicationType type = partKeyInfoLastEntry.getValue() - .getPartKeyInfo().getType(); - HddsProtos.ReplicationFactor factor = partKeyInfoLastEntry.getValue() - .getPartKeyInfo().getFactor(); - List< OmKeyLocationInfo > locations = new ArrayList<>(); - long size = 0; - int partsCount =1; - int partsMapSize = partKeyInfoMap.size(); - for(Map.Entry partKeyInfoEntry : partKeyInfoMap - .entrySet()) { - int partNumber = partKeyInfoEntry.getKey(); - PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue(); - // Check we have all parts to complete multipart upload and also - // check partNames provided match with actual part names - String providedPartName = multipartMap.get(partNumber); - String actualPartName = partKeyInfo.getPartName(); - if (partNumber == partsCount) { - if (!actualPartName.equals(providedPartName)) { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, - OMException.ResultCodes.MISMATCH_MULTIPART_LIST); - } - OmKeyInfo currentPartKeyInfo = OmKeyInfo - .getFromProtobuf(partKeyInfo.getPartKeyInfo()); - // Check if any part size is less than 5mb, last part can be less - // than 5 mb. - if (partsCount != partsMapSize && - currentPartKeyInfo.getDataSize() < OM_MULTIPART_MIN_SIZE) { - LOG.error("MultipartUpload: " + ozoneKey + "Part number: " + - partKeyInfo.getPartNumber() + "size " + currentPartKeyInfo - .getDataSize() + " is less than minimum part size " + - OzoneConsts.OM_MULTIPART_MIN_SIZE); - throw new OMException("Complete Multipart Upload Failed: Entity " + - "too small: volume: " + volumeName + "bucket: " + bucketName - + "key: " + keyName, OMException.ResultCodes.ENTITY_TOO_SMALL); - } - // As all part keys will have only one version. - OmKeyLocationInfoGroup currentKeyInfoGroup = currentPartKeyInfo - .getKeyLocationVersions().get(0); - locations.addAll(currentKeyInfoGroup.getLocationList()); - size += currentPartKeyInfo.getDataSize(); - } else { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, - OMException.ResultCodes.MISSING_UPLOAD_PARTS); - } - partsCount++; - } - if (omKeyInfo == null) { - // This is a newly added key, it does not have any versions. - OmKeyLocationInfoGroup keyLocationInfoGroup = new - OmKeyLocationInfoGroup(0, locations); - // A newly created key, this is the first version. - omKeyInfo = new OmKeyInfo.Builder().setVolumeName(volumeName) - .setBucketName(bucketName).setKeyName(keyName) - .setReplicationFactor(factor).setReplicationType(type) - .setCreationTime(keyArgs.getModificationTime()) - .setModificationTime(keyArgs.getModificationTime()) - .setDataSize(size) - .setOmKeyLocationInfos( - Collections.singletonList(keyLocationInfoGroup)) - .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())) - .build(); - } else { - // Already a version exists, so we should add it as a new version. - // But now as versioning is not supported, just following the commit - // key approach. When versioning support comes, then we can uncomment - // below code keyInfo.addNewVersion(locations); - omKeyInfo.updateLocationInfoList(locations); - omKeyInfo.setModificationTime(keyArgs.getModificationTime()); - } - - updateCache(omMetadataManager, ozoneKey, multipartKey, omKeyInfo, - transactionLogIndex); - - omResponse.setCompleteMultiPartUploadResponse( - MultipartUploadCompleteResponse.newBuilder() - .setVolume(volumeName) - .setBucket(bucketName) - .setKey(keyName) - .setHash(DigestUtils.sha256Hex(keyName))); - - omClientResponse = new S3MultipartUploadCompleteResponse(multipartKey, - omKeyInfo, omResponse.build()); - - } catch (IOException ex) { - exception = ex; - omClientResponse = new S3MultipartUploadCompleteResponse(null, null, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - Map auditMap = buildKeyArgsAuditMap(keyArgs); - if (multipartUploadList != null) { - auditMap.put(OzoneConsts.MULTIPART_LIST, multipartUploadList - .getMultipartMap().toString()); - } - - // audit log - auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.COMPLETE_MULTIPART_UPLOAD, auditMap, exception, - getOmRequest().getUserInfo())); - - if (exception == null) { - LOG.debug("MultipartUpload Complete request is successfull for Key: {} " + - "in Volume/Bucket {}/{}", keyName, volumeName, bucketName); - } else { - LOG.error("MultipartUpload Complete request failed for Key: {} " + - "in Volume/Bucket {}/{}", keyName, volumeName, bucketName, exception); - ozoneManager.getMetrics().incNumCompleteMultipartUploadFails(); - } - - return omClientResponse; - } - - private void updateCache(OMMetadataManager omMetadataManager, - String ozoneKey, String multipartKey, OmKeyInfo omKeyInfo, - long transactionLogIndex) { - // Update cache. - // 1. Add key entry to key table. - // 2. Delete multipartKey entry from openKeyTable and multipartInfo table. - omMetadataManager.getKeyTable().addCacheEntry( - new CacheKey<>(ozoneKey), - new CacheValue<>(Optional.of(omKeyInfo), transactionLogIndex)); - - omMetadataManager.getOpenKeyTable().addCacheEntry( - new CacheKey<>(multipartKey), - new CacheValue<>(Optional.absent(), transactionLogIndex)); - omMetadataManager.getMultipartInfoTable().addCacheEntry( - new CacheKey<>(multipartKey), - new CacheValue<>(Optional.absent(), transactionLogIndex)); - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/package-info.java deleted file mode 100644 index 42b99200562..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains classes related to S3 multipart upload requests. - */ -package org.apache.hadoop.ozone.om.request.s3.multipart; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java deleted file mode 100644 index d8f6478576a..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java +++ /dev/null @@ -1,195 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.s3.security; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import com.google.common.base.Optional; -import org.apache.commons.codec.digest.DigestUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.s3.security.S3GetSecretResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UpdateGetS3SecretRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Secret; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_SECRET_LOCK; - -/** - * Handles GetS3Secret request. - */ -public class S3GetSecretRequest extends OMClientRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(S3GetSecretRequest.class); - - public S3GetSecretRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - GetS3SecretRequest s3GetSecretRequest = - getOmRequest().getGetS3SecretRequest(); - - // Generate S3 Secret to be used by OM quorum. - String kerberosID = s3GetSecretRequest.getKerberosID(); - - UserGroupInformation user = ProtobufRpcEngine.Server.getRemoteUser(); - if (!user.getUserName().equals(kerberosID)) { - throw new OMException("User mismatch. Requested user name is " + - "mismatched " + kerberosID +", with current user " + - user.getUserName(), OMException.ResultCodes.USER_MISMATCH); - } - - String s3Secret = DigestUtils.sha256Hex(OmUtils.getSHADigest()); - - UpdateGetS3SecretRequest updateGetS3SecretRequest = - UpdateGetS3SecretRequest.newBuilder() - .setAwsSecret(s3Secret) - .setKerberosID(kerberosID).build(); - - // Client issues GetS3Secret request, when received by OM leader - // it will generate s3Secret. Original GetS3Secret request is - // converted to UpdateGetS3Secret request with the generated token - // information. This updated request will be submitted to Ratis. In this - // way S3Secret created by leader, will be replicated across all - // OMs. With this approach, original GetS3Secret request from - // client does not need any proto changes. - OMRequest.Builder omRequest = OMRequest.newBuilder() - .setUserInfo(getUserInfo()) - .setUpdateGetS3SecretRequest(updateGetS3SecretRequest) - .setCmdType(getOmRequest().getCmdType()) - .setClientId(getOmRequest().getClientId()); - - if (getOmRequest().hasTraceID()) { - omRequest.setTraceID(getOmRequest().getTraceID()); - } - - return omRequest.build(); - - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - - OMClientResponse omClientResponse = null; - OMResponse.Builder omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.GetS3Secret) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true); - boolean acquiredLock = false; - IOException exception = null; - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - UpdateGetS3SecretRequest updateGetS3SecretRequest = - getOmRequest().getUpdateGetS3SecretRequest(); - String kerberosID = updateGetS3SecretRequest.getKerberosID(); - try { - String awsSecret = updateGetS3SecretRequest.getAwsSecret(); - acquiredLock = - omMetadataManager.getLock().acquireWriteLock(S3_SECRET_LOCK, - kerberosID); - - S3SecretValue s3SecretValue = - omMetadataManager.getS3SecretTable().get(kerberosID); - - // If s3Secret for user is not in S3Secret table, add the Secret to cache. - if (s3SecretValue == null) { - omMetadataManager.getS3SecretTable().addCacheEntry( - new CacheKey<>(kerberosID), - new CacheValue<>(Optional.of(new S3SecretValue(kerberosID, - awsSecret)), transactionLogIndex)); - } else { - // If it already exists, use the existing one. - awsSecret = s3SecretValue.getAwsSecret(); - } - - GetS3SecretResponse.Builder getS3SecretResponse = GetS3SecretResponse - .newBuilder().setS3Secret(S3Secret.newBuilder() - .setAwsSecret(awsSecret).setKerberosID(kerberosID)); - - if (s3SecretValue == null) { - omClientResponse = - new S3GetSecretResponse(new S3SecretValue(kerberosID, awsSecret), - omResponse.setGetS3SecretResponse(getS3SecretResponse).build()); - } else { - // As when it already exists, we don't need to add to DB again. So - // set the value to null. - omClientResponse = new S3GetSecretResponse(null, - omResponse.setGetS3SecretResponse(getS3SecretResponse).build()); - } - - } catch (IOException ex) { - exception = ex; - omClientResponse = new S3GetSecretResponse(null, - createErrorOMResponse(omResponse, ex)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture(ozoneManagerDoubleBufferHelper.add( - omClientResponse, transactionLogIndex)); - } - if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock(S3_SECRET_LOCK, - kerberosID); - } - } - - - Map auditMap = new HashMap<>(); - auditMap.put(OzoneConsts.S3_GETSECRET_USER, kerberosID); - - // audit log - auditLog(ozoneManager.getAuditLogger(), buildAuditMessage( - OMAction.GET_S3_SECRET, auditMap, - exception, getOmRequest().getUserInfo())); - - if (exception == null) { - LOG.debug("Secret for accessKey:{} is generated Successfully", - kerberosID); - } else { - LOG.error("Secret for accessKey:{} is generation failed", kerberosID, - exception); - } - return omClientResponse; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/package-info.java deleted file mode 100644 index 94a6b116869..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Package contains classes related to S3 security requests. - */ -package org.apache.hadoop.ozone.om.request.s3.security; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java deleted file mode 100644 index 7bf7a0b5b3a..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java +++ /dev/null @@ -1,125 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.security; - -import com.google.common.base.Optional; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.security.OMCancelDelegationTokenResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelDelegationTokenResponseProto; -import org.apache.hadoop.ozone.protocolPB.OMPBHelper; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.security.proto.SecurityProtos; -import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Handle CancelDelegationToken Request. - */ -public class OMCancelDelegationTokenRequest extends OMClientRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMGetDelegationTokenRequest.class); - - public OMCancelDelegationTokenRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - - // Call OM to cancel token, this does check whether we can cancel token - // or not. This does not remove token from DB/in-memory. - ozoneManager.cancelDelegationToken(getToken()); - - return super.preExecute(ozoneManager); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - - OMClientResponse omClientResponse = null; - OMResponse.Builder omResponse = - OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CancelDelegationToken) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true); - OzoneTokenIdentifier ozoneTokenIdentifier = null; - try { - ozoneTokenIdentifier = - OzoneTokenIdentifier.readProtoBuf(getToken().getIdentifier()); - - // Remove token from in-memory. - ozoneManager.getDelegationTokenMgr().removeToken(ozoneTokenIdentifier); - - // Update Cache. - omMetadataManager.getDelegationTokenTable().addCacheEntry( - new CacheKey<>(ozoneTokenIdentifier), - new CacheValue<>(Optional.absent(), transactionLogIndex)); - - omClientResponse = - new OMCancelDelegationTokenResponse(ozoneTokenIdentifier, - omResponse.setCancelDelegationTokenResponse( - CancelDelegationTokenResponseProto.newBuilder().setResponse( - SecurityProtos.CancelDelegationTokenResponseProto - .newBuilder())).build()); - } catch (IOException ex) { - LOG.error("Error in cancel DelegationToken {}", ozoneTokenIdentifier, ex); - omClientResponse = new OMCancelDelegationTokenResponse(null, - createErrorOMResponse(omResponse, ex)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - } - - if (LOG.isDebugEnabled()) { - LOG.debug("Cancelled delegation token: {}", ozoneTokenIdentifier); - } - - return omClientResponse; - } - - - public Token getToken() { - CancelDelegationTokenRequestProto cancelDelegationTokenRequest = - getOmRequest().getCancelDelegationTokenRequest(); - - return OMPBHelper.convertToDelegationToken( - cancelDelegationTokenRequest.getToken()); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java deleted file mode 100644 index 9c667e8ba11..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java +++ /dev/null @@ -1,187 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.security; - -import com.google.common.base.Optional; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.security.OMGetDelegationTokenResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetDelegationTokenResponseProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UpdateGetDelegationTokenRequest; -import org.apache.hadoop.ozone.protocolPB.OMPBHelper; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.security.proto.SecurityProtos; -import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Handle GetDelegationToken Request. - */ -public class OMGetDelegationTokenRequest extends OMClientRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMGetDelegationTokenRequest.class); - - public OMGetDelegationTokenRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - GetDelegationTokenRequestProto getDelegationTokenRequest = - getOmRequest().getGetDelegationTokenRequest(); - - // Call OM to create token - Token token = ozoneManager - .getDelegationToken(new Text(getDelegationTokenRequest.getRenewer())); - - - // Client issues GetDelegationToken request, when received by OM leader - // it will generate a token. Original GetDelegationToken request is - // converted to UpdateGetDelegationToken request with the generated token - // information. This updated request will be submitted to Ratis. In this - // way delegation token created by leader, will be replicated across all - // OMs. With this approach, original GetDelegationToken request from - // client does not need any proto changes. - - // Create UpdateGetDelegationTokenRequest with token response. - - OMRequest.Builder omRequest; - if (token != null) { - omRequest = OMRequest.newBuilder().setUserInfo(getUserInfo()) - .setUpdateGetDelegationTokenRequest( - UpdateGetDelegationTokenRequest.newBuilder() - .setGetDelegationTokenResponse( - GetDelegationTokenResponseProto.newBuilder() - .setResponse( - SecurityProtos.GetDelegationTokenResponseProto - .newBuilder().setToken(OMPBHelper - .convertToTokenProto(token)).build()) - .build())) - .setCmdType(getOmRequest().getCmdType()) - .setClientId(getOmRequest().getClientId()); - - - } else { - // If token is null, do not set GetDelegationTokenResponse with response. - omRequest = OMRequest.newBuilder().setUserInfo(getUserInfo()) - .setUpdateGetDelegationTokenRequest( - UpdateGetDelegationTokenRequest.newBuilder() - .setGetDelegationTokenResponse( - GetDelegationTokenResponseProto.newBuilder())) - .setCmdType(getOmRequest().getCmdType()) - .setClientId(getOmRequest().getClientId()); - } - if (getOmRequest().hasTraceID()) { - omRequest.setTraceID(getOmRequest().getTraceID()); - } - return omRequest.build(); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - UpdateGetDelegationTokenRequest updateGetDelegationTokenRequest = - getOmRequest().getUpdateGetDelegationTokenRequest(); - - OMResponse.Builder omResponse = - OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.GetDelegationToken) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true); - - OMClientResponse omClientResponse = null; - - - // If security is not enabled and token request is received, leader - // returns token null. So, check here if updatedGetDelegationTokenResponse - // has response set or not. If it is not set, then token is null. - if (!updateGetDelegationTokenRequest.getGetDelegationTokenResponse() - .hasResponse()) { - omClientResponse = new OMGetDelegationTokenResponse(null, -1L, - omResponse.setGetDelegationTokenResponse( - GetDelegationTokenResponseProto.newBuilder()).build()); - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - return omClientResponse; - } - - SecurityProtos.TokenProto tokenProto = updateGetDelegationTokenRequest - .getGetDelegationTokenResponse().getResponse().getToken(); - - Token ozoneTokenIdentifierToken = - OMPBHelper.convertToDelegationToken(tokenProto); - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - - try { - OzoneTokenIdentifier ozoneTokenIdentifier = OzoneTokenIdentifier. - readProtoBuf(ozoneTokenIdentifierToken.getIdentifier()); - - // Update in memory map of token. - long renewTime = ozoneManager.getDelegationTokenMgr() - .updateToken(ozoneTokenIdentifierToken, ozoneTokenIdentifier); - - // Update Cache. - omMetadataManager.getDelegationTokenTable().addCacheEntry( - new CacheKey<>(ozoneTokenIdentifier), - new CacheValue<>(Optional.of(renewTime), transactionLogIndex)); - - omClientResponse = - new OMGetDelegationTokenResponse(ozoneTokenIdentifier, renewTime, - omResponse.setGetDelegationTokenResponse( - updateGetDelegationTokenRequest - .getGetDelegationTokenResponse()).build()); - } catch (IOException ex) { - LOG.error("Error in Updating DelegationToken {}", - ozoneTokenIdentifierToken, ex); - omClientResponse = new OMGetDelegationTokenResponse(null, -1L, - createErrorOMResponse(omResponse, ex)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - } - - if (LOG.isDebugEnabled()) { - LOG.debug("Updated delegation token in-memory map: {}", - ozoneTokenIdentifierToken); - } - - return omClientResponse; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java deleted file mode 100644 index b2c03bbbe34..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java +++ /dev/null @@ -1,164 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.security; - -import java.io.IOException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.base.Optional; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.security.OMRenewDelegationTokenResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenewDelegationTokenResponseProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UpdateRenewDelegationTokenRequest; -import org.apache.hadoop.ozone.protocolPB.OMPBHelper; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -/** - * Handle RenewDelegationToken Request. - */ -public class OMRenewDelegationTokenRequest extends OMClientRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMRenewDelegationTokenRequest.class); - - public OMRenewDelegationTokenRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - RenewDelegationTokenRequestProto renewDelegationTokenRequest = - getOmRequest().getRenewDelegationTokenRequest(); - - // Call OM to renew token - long renewTime = ozoneManager.renewDelegationToken( - OMPBHelper.convertToDelegationToken( - renewDelegationTokenRequest.getToken())); - - RenewDelegationTokenResponseProto.Builder renewResponse = - RenewDelegationTokenResponseProto.newBuilder(); - - renewResponse.setResponse(org.apache.hadoop.security.proto.SecurityProtos - .RenewDelegationTokenResponseProto.newBuilder() - .setNewExpiryTime(renewTime)); - - - // Client issues RenewDelegationToken request, when received by OM leader - // it will renew the token. Original RenewDelegationToken request is - // converted to UpdateRenewDelegationToken request with the token and renew - // information. This updated request will be submitted to Ratis. In this - // way delegation token renewd by leader, will be replicated across all - // OMs. With this approach, original RenewDelegationToken request from - // client does not need any proto changes. - - // Create UpdateRenewDelegationTokenRequest with original request and - // expiry time. - OMRequest.Builder omRequest = OMRequest.newBuilder() - .setUserInfo(getUserInfo()) - .setUpdatedRenewDelegationTokenRequest( - UpdateRenewDelegationTokenRequest.newBuilder() - .setRenewDelegationTokenRequest(renewDelegationTokenRequest) - .setRenewDelegationTokenResponse(renewResponse)) - .setCmdType(getOmRequest().getCmdType()) - .setClientId(getOmRequest().getClientId()); - - if (getOmRequest().hasTraceID()) { - omRequest.setTraceID(getOmRequest().getTraceID()); - } - - return omRequest.build(); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - UpdateRenewDelegationTokenRequest updateRenewDelegationTokenRequest = - getOmRequest().getUpdatedRenewDelegationTokenRequest(); - - Token ozoneTokenIdentifierToken = - OMPBHelper.convertToDelegationToken(updateRenewDelegationTokenRequest - .getRenewDelegationTokenRequest().getToken()); - - long renewTime = updateRenewDelegationTokenRequest - .getRenewDelegationTokenResponse().getResponse().getNewExpiryTime(); - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - - OMClientResponse omClientResponse = null; - OMResponse.Builder omResponse = - OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.RenewDelegationToken) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true); - try { - - OzoneTokenIdentifier ozoneTokenIdentifier = OzoneTokenIdentifier. - readProtoBuf(ozoneTokenIdentifierToken.getIdentifier()); - - // Update in memory map of token. - ozoneManager.getDelegationTokenMgr() - .updateRenewToken(ozoneTokenIdentifierToken, ozoneTokenIdentifier, - renewTime); - - // Update Cache. - omMetadataManager.getDelegationTokenTable().addCacheEntry( - new CacheKey<>(ozoneTokenIdentifier), - new CacheValue<>(Optional.of(renewTime), transactionLogIndex)); - - omClientResponse = - new OMRenewDelegationTokenResponse(ozoneTokenIdentifier, renewTime, - omResponse.setRenewDelegationTokenResponse( - updateRenewDelegationTokenRequest - .getRenewDelegationTokenResponse()).build()); - } catch (IOException ex) { - LOG.error("Error in Updating Renew DelegationToken {}", - ozoneTokenIdentifierToken, ex); - omClientResponse = new OMRenewDelegationTokenResponse(null, -1L, - createErrorOMResponse(omResponse, ex)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - } - - if (LOG.isDebugEnabled()) { - LOG.debug("Updated renew delegation token in-memory map: {} with expiry" + - " time {}", ozoneTokenIdentifierToken, renewTime); - } - - return omClientResponse; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/package-info.java deleted file mode 100644 index c7608e88a28..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains classes which handle security requests. - */ -package org.apache.hadoop.ozone.om.request.security; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/ObjectParser.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/ObjectParser.java deleted file mode 100644 index c12cdac6c86..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/ObjectParser.java +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.util; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OzoneObj.ObjectType; -import org.apache.hadoop.ozone.security.acl.OzoneObj; - -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; - -/** - * Utility class to parse {@link OzoneObj#getPath()}. - */ -public class ObjectParser { - - private String volume; - private String bucket; - private String key; - - /** - * Parse the path and extract volume, bucket and key names. - * @param path - */ - public ObjectParser(String path, ObjectType objectType) throws OMException { - Preconditions.checkNotNull(path); - String[] tokens = StringUtils.split(path, OZONE_URI_DELIMITER, 3); - - if (objectType == ObjectType.VOLUME && tokens.length == 1) { - volume = tokens[0]; - } else if (objectType == ObjectType.BUCKET && tokens.length == 2) { - volume = tokens[0]; - bucket = tokens[1]; - } else if (objectType == ObjectType.KEY && tokens.length == 3) { - volume = tokens[0]; - bucket = tokens[1]; - key = tokens[2]; - } else { - throw new OMException("Illegal path " + path, - OMException.ResultCodes.INVALID_PATH_IN_ACL_REQUEST); - } - } - - public String getVolume() { - return volume; - } - - public String getBucket() { - return bucket; - } - - public String getKey() { - return key; - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/package-info.java deleted file mode 100644 index 72fc09a53a3..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Package contains helper/utility classes for requests. - */ -package org.apache.hadoop.ozone.om.request.util; - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java deleted file mode 100644 index 69da19f244b..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java +++ /dev/null @@ -1,204 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.volume; - -import java.io.IOException; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .VolumeInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo; -import org.apache.hadoop.util.Time; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.USER_LOCK; - -/** - * Handles volume create request. - */ -public class OMVolumeCreateRequest extends OMVolumeRequest { - private static final Logger LOG = - LoggerFactory.getLogger(OMVolumeCreateRequest.class); - - public OMVolumeCreateRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { - - VolumeInfo volumeInfo = - getOmRequest().getCreateVolumeRequest().getVolumeInfo(); - - // Set creation time - VolumeInfo updatedVolumeInfo = - volumeInfo.toBuilder().setCreationTime(Time.now()).build(); - - - return getOmRequest().toBuilder().setCreateVolumeRequest( - CreateVolumeRequest.newBuilder().setVolumeInfo(updatedVolumeInfo)) - .setUserInfo(getUserInfo()) - .build(); - - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - CreateVolumeRequest createVolumeRequest = - getOmRequest().getCreateVolumeRequest(); - Preconditions.checkNotNull(createVolumeRequest); - VolumeInfo volumeInfo = createVolumeRequest.getVolumeInfo(); - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumVolumeCreates(); - - String volume = volumeInfo.getVolume(); - String owner = volumeInfo.getOwnerName(); - - OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.CreateVolume).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - - // Doing this here, so we can do protobuf conversion outside of lock. - boolean acquiredVolumeLock = false; - boolean acquiredUserLock = false; - IOException exception = null; - OMClientResponse omClientResponse = null; - OmVolumeArgs omVolumeArgs = null; - Map auditMap = new HashMap<>(); - Collection ozAdmins = ozoneManager.getOzoneAdmins(); - try { - omVolumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo); - // when you create a volume, we set both Object ID and update ID to the - // same ratis transaction ID. The Object ID will never change, but update - // ID will be set to transactionID each time we update the object. - omVolumeArgs.setUpdateID(transactionLogIndex); - omVolumeArgs.setObjectID(transactionLogIndex); - auditMap = omVolumeArgs.toAuditMap(); - - // check Acl - if (ozoneManager.getAclsEnabled()) { - if (!ozAdmins.contains(OZONE_ADMINISTRATORS_WILDCARD) && - !ozAdmins.contains(getUserInfo().getUserName())) { - throw new OMException("Only admin users are authorized to create " + - "Ozone volumes. User: " + getUserInfo().getUserName(), - OMException.ResultCodes.PERMISSION_DENIED); - } - } - - UserVolumeInfo volumeList = null; - - // acquire lock. - acquiredVolumeLock = omMetadataManager.getLock().acquireWriteLock( - VOLUME_LOCK, volume); - - acquiredUserLock = omMetadataManager.getLock().acquireWriteLock(USER_LOCK, - owner); - - String dbVolumeKey = omMetadataManager.getVolumeKey(volume); - - OmVolumeArgs dbVolumeArgs = - omMetadataManager.getVolumeTable().get(dbVolumeKey); - - if (dbVolumeArgs == null) { - String dbUserKey = omMetadataManager.getUserKey(owner); - volumeList = omMetadataManager.getUserTable().get(dbUserKey); - volumeList = addVolumeToOwnerList(volumeList, volume, owner, - ozoneManager.getMaxUserVolumeCount(), transactionLogIndex); - createVolume(omMetadataManager, omVolumeArgs, volumeList, dbVolumeKey, - dbUserKey, transactionLogIndex); - - omResponse.setCreateVolumeResponse(CreateVolumeResponse.newBuilder() - .build()); - omClientResponse = new OMVolumeCreateResponse(omVolumeArgs, volumeList, - omResponse.build()); - LOG.debug("volume:{} successfully created", omVolumeArgs.getVolume()); - } else { - LOG.debug("volume:{} already exists", omVolumeArgs.getVolume()); - throw new OMException("Volume already exists", - OMException.ResultCodes.VOLUME_ALREADY_EXISTS); - } - - } catch (IOException ex) { - exception = ex; - omClientResponse = new OMVolumeCreateResponse(null, null, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredUserLock) { - omMetadataManager.getLock().releaseWriteLock(USER_LOCK, owner); - } - if (acquiredVolumeLock) { - omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume); - } - } - - // Performing audit logging outside of the lock. - auditLog(ozoneManager.getAuditLogger(), - buildAuditMessage(OMAction.CREATE_VOLUME, auditMap, exception, - getOmRequest().getUserInfo())); - - // return response after releasing lock. - if (exception == null) { - LOG.info("created volume:{} for user:{}", volume, owner); - omMetrics.incNumVolumes(); - } else { - LOG.error("Volume creation failed for user:{} volume:{}", owner, - volume, exception); - omMetrics.incNumVolumeCreateFails(); - } - return omClientResponse; - } -} - - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java deleted file mode 100644 index f91b02d21f8..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java +++ /dev/null @@ -1,191 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.volume; - -import java.io.IOException; - -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.volume.OMVolumeDeleteResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.USER_LOCK; -/** - * Handles volume delete request. - */ -public class OMVolumeDeleteRequest extends OMVolumeRequest { - - private static final Logger LOG = - LoggerFactory.getLogger(OMVolumeDeleteRequest.class); - - public OMVolumeDeleteRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - DeleteVolumeRequest deleteVolumeRequest = - getOmRequest().getDeleteVolumeRequest(); - Preconditions.checkNotNull(deleteVolumeRequest); - - String volume = deleteVolumeRequest.getVolumeName(); - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumVolumeDeletes(); - - OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.DeleteVolume).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - boolean acquiredUserLock = false; - boolean acquiredVolumeLock = false; - IOException exception = null; - String owner = null; - OMClientResponse omClientResponse = null; - try { - // check Acl - if (ozoneManager.getAclsEnabled()) { - checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME, - OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.DELETE, volume, - null, null); - } - - OmVolumeArgs omVolumeArgs = null; - OzoneManagerProtocolProtos.UserVolumeInfo newVolumeList = null; - - acquiredVolumeLock = omMetadataManager.getLock().acquireWriteLock( - VOLUME_LOCK, volume); - owner = getVolumeInfo(omMetadataManager, volume).getOwnerName(); - acquiredUserLock = omMetadataManager.getLock().acquireWriteLock(USER_LOCK, - owner); - - String dbUserKey = omMetadataManager.getUserKey(owner); - String dbVolumeKey = omMetadataManager.getVolumeKey(volume); - - if (!omMetadataManager.isVolumeEmpty(volume)) { - LOG.debug("volume:{} is not empty", volume); - throw new OMException(OMException.ResultCodes.VOLUME_NOT_EMPTY); - } - - newVolumeList = omMetadataManager.getUserTable().get(owner); - - // delete the volume from the owner list - // as well as delete the volume entry - newVolumeList = delVolumeFromOwnerList(newVolumeList, volume, owner, - transactionLogIndex); - - omMetadataManager.getUserTable().addCacheEntry(new CacheKey<>(dbUserKey), - new CacheValue<>(Optional.of(newVolumeList), transactionLogIndex)); - - omMetadataManager.getVolumeTable().addCacheEntry( - new CacheKey<>(dbVolumeKey), new CacheValue<>(Optional.absent(), - transactionLogIndex)); - - omResponse.setDeleteVolumeResponse( - DeleteVolumeResponse.newBuilder().build()); - omClientResponse = new OMVolumeDeleteResponse(volume, owner, - newVolumeList, omResponse.build()); - - } catch (IOException ex) { - exception = ex; - omClientResponse = new OMVolumeDeleteResponse(null, null, null, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredUserLock) { - omMetadataManager.getLock().releaseWriteLock(USER_LOCK, owner); - } - if (acquiredVolumeLock) { - omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume); - } - } - - // Performing audit logging outside of the lock. - auditLog(ozoneManager.getAuditLogger(), - buildAuditMessage(OMAction.DELETE_VOLUME, buildVolumeAuditMap(volume), - exception, getOmRequest().getUserInfo())); - - // return response after releasing lock. - if (exception == null) { - LOG.debug("Volume deleted for user:{} volume:{}", owner, volume); - omMetrics.decNumVolumes(); - } else { - LOG.error("Volume deletion failed for user:{} volume:{}", - owner, volume, exception); - omMetrics.incNumVolumeDeleteFails(); - } - return omClientResponse; - - } - - /** - * Return volume info for the specified volume. This method should be - * called after acquiring volume lock. - * @param omMetadataManager - * @param volume - * @return OmVolumeArgs - * @throws IOException - */ - private OmVolumeArgs getVolumeInfo(OMMetadataManager omMetadataManager, - String volume) throws IOException { - - String dbVolumeKey = omMetadataManager.getVolumeKey(volume); - OmVolumeArgs volumeArgs = - omMetadataManager.getVolumeTable().get(dbVolumeKey); - if (volumeArgs == null) { - throw new OMException("Volume " + volume + " is not found", - OMException.ResultCodes.VOLUME_NOT_FOUND); - } - return volumeArgs; - - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java deleted file mode 100644 index 7c38c41320d..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeRequest.java +++ /dev/null @@ -1,143 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.volume; - -import com.google.common.base.Optional; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .UserVolumeInfo; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** - * Defines common methods required for volume requests. - */ -public abstract class OMVolumeRequest extends OMClientRequest { - - public OMVolumeRequest(OMRequest omRequest) { - super(omRequest); - } - - /** - * Delete volume from user volume list. This method should be called after - * acquiring user lock. - * @param volumeList - current volume list owned by user. - * @param volume - volume which needs to deleted from the volume list. - * @param owner - Name of the Owner. - * @param txID - The transaction ID that is updating this value. - * @return UserVolumeInfo - updated UserVolumeInfo. - * @throws IOException - */ - protected UserVolumeInfo delVolumeFromOwnerList(UserVolumeInfo volumeList, - String volume, String owner, long txID) throws IOException { - - List prevVolList = new ArrayList<>(); - - if (volumeList != null) { - prevVolList.addAll(volumeList.getVolumeNamesList()); - } else { - // No Volumes for this user - throw new OMException("User not found: " + owner, - OMException.ResultCodes.USER_NOT_FOUND); - } - - // Remove the volume from the list - prevVolList.remove(volume); - UserVolumeInfo newVolList = UserVolumeInfo.newBuilder() - .addAllVolumeNames(prevVolList) - .setObjectID(volumeList.getObjectID()) - .setUpdateID(txID) - .build(); - return newVolList; - } - - - /** - * Add volume to user volume list. This method should be called after - * acquiring user lock. - * @param volumeList - current volume list owned by user. - * @param volume - volume which needs to be added to this list. - * @param owner - * @param maxUserVolumeCount - * @return VolumeList - which is updated volume list. - * @throws OMException - if user has volumes greater than - * maxUserVolumeCount, an exception is thrown. - */ - protected UserVolumeInfo addVolumeToOwnerList(UserVolumeInfo volumeList, - String volume, String owner, long maxUserVolumeCount, long txID) - throws IOException { - - // Check the volume count - if (volumeList != null && - volumeList.getVolumeNamesList().size() >= maxUserVolumeCount) { - throw new OMException("Too many volumes for user:" + owner, - OMException.ResultCodes.USER_TOO_MANY_VOLUMES); - } - - List prevVolList = new ArrayList<>(); - long objectID = txID; - if (volumeList != null) { - prevVolList.addAll(volumeList.getVolumeNamesList()); - objectID = volumeList.getObjectID(); - } - - - // Add the new volume to the list - prevVolList.add(volume); - UserVolumeInfo newVolList = UserVolumeInfo.newBuilder() - .setObjectID(objectID) - .setUpdateID(txID) - .addAllVolumeNames(prevVolList).build(); - - return newVolList; - } - - /** - * Create Ozone Volume. This method should be called after acquiring user - * and volume Lock. - * @param omMetadataManager - * @param omVolumeArgs - * @param volumeList - * @param dbVolumeKey - * @param dbUserKey - * @param transactionLogIndex - * @throws IOException - */ - protected void createVolume(final OMMetadataManager omMetadataManager, - OmVolumeArgs omVolumeArgs, UserVolumeInfo volumeList, String dbVolumeKey, - String dbUserKey, long transactionLogIndex) { - // Update cache: Update user and volume cache. - omMetadataManager.getUserTable().addCacheEntry(new CacheKey<>(dbUserKey), - new CacheValue<>(Optional.of(volumeList), transactionLogIndex)); - - omMetadataManager.getVolumeTable().addCacheEntry( - new CacheKey<>(dbVolumeKey), - new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex)); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java deleted file mode 100644 index d1f1e8bfe4b..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java +++ /dev/null @@ -1,211 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.volume; - -import java.io.IOException; -import java.util.Map; - -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; - -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.volume.OMVolumeSetOwnerResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetVolumePropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetVolumePropertyResponse; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; - -/** - * Handle set owner request for volume. - */ -public class OMVolumeSetOwnerRequest extends OMVolumeRequest { - private static final Logger LOG = - LoggerFactory.getLogger(OMVolumeSetOwnerRequest.class); - - public OMVolumeSetOwnerRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - SetVolumePropertyRequest setVolumePropertyRequest = - getOmRequest().getSetVolumePropertyRequest(); - - Preconditions.checkNotNull(setVolumePropertyRequest); - - OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.SetVolumeProperty).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - // In production this will never happen, this request will be called only - // when we have ownerName in setVolumePropertyRequest. - if (!setVolumePropertyRequest.hasOwnerName()) { - omResponse.setStatus(OzoneManagerProtocolProtos.Status.INVALID_REQUEST) - .setSuccess(false); - return new OMVolumeSetOwnerResponse(null, null, null, null, - omResponse.build()); - } - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumVolumeUpdates(); - String volume = setVolumePropertyRequest.getVolumeName(); - String newOwner = setVolumePropertyRequest.getOwnerName(); - - AuditLogger auditLogger = ozoneManager.getAuditLogger(); - OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); - - Map auditMap = buildVolumeAuditMap(volume); - auditMap.put(OzoneConsts.OWNER, newOwner); - - boolean acquiredUserLocks = false; - boolean acquiredVolumeLock = false; - IOException exception = null; - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - String oldOwner = null; - OMClientResponse omClientResponse = null; - try { - // check Acl - if (ozoneManager.getAclsEnabled()) { - checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME, - OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL, - volume, null, null); - } - - - long maxUserVolumeCount = ozoneManager.getMaxUserVolumeCount(); - - String dbVolumeKey = omMetadataManager.getVolumeKey(volume); - - OzoneManagerProtocolProtos.UserVolumeInfo oldOwnerVolumeList = null; - OzoneManagerProtocolProtos.UserVolumeInfo newOwnerVolumeList = null; - OmVolumeArgs omVolumeArgs = null; - - - - acquiredVolumeLock = omMetadataManager.getLock().acquireWriteLock( - VOLUME_LOCK, volume); - - omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey); - - if (omVolumeArgs == null) { - LOG.debug("Changing volume ownership failed for user:{} volume:{}", - newOwner, volume); - throw new OMException("Volume " + volume + " is not found", - OMException.ResultCodes.VOLUME_NOT_FOUND); - } - - oldOwner = omVolumeArgs.getOwnerName(); - - acquiredUserLocks = - omMetadataManager.getLock().acquireMultiUserLock(newOwner, oldOwner); - - oldOwnerVolumeList = - omMetadataManager.getUserTable().get(oldOwner); - - oldOwnerVolumeList = delVolumeFromOwnerList( - oldOwnerVolumeList, volume, oldOwner, transactionLogIndex); - - newOwnerVolumeList = omMetadataManager.getUserTable().get(newOwner); - newOwnerVolumeList = addVolumeToOwnerList( - newOwnerVolumeList, volume, newOwner, - maxUserVolumeCount, transactionLogIndex); - - // Set owner with new owner name. - omVolumeArgs.setOwnerName(newOwner); - omVolumeArgs.setUpdateID(transactionLogIndex); - - // Update cache. - omMetadataManager.getUserTable().addCacheEntry( - new CacheKey<>(omMetadataManager.getUserKey(newOwner)), - new CacheValue<>(Optional.of(newOwnerVolumeList), - transactionLogIndex)); - omMetadataManager.getUserTable().addCacheEntry( - new CacheKey<>(omMetadataManager.getUserKey(oldOwner)), - new CacheValue<>(Optional.of(oldOwnerVolumeList), - transactionLogIndex)); - omMetadataManager.getVolumeTable().addCacheEntry( - new CacheKey<>(dbVolumeKey), - new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex)); - - omResponse.setSetVolumePropertyResponse( - SetVolumePropertyResponse.newBuilder().build()); - omClientResponse = new OMVolumeSetOwnerResponse(oldOwner, - oldOwnerVolumeList, newOwnerVolumeList, omVolumeArgs, - omResponse.build()); - - } catch (IOException ex) { - exception = ex; - omClientResponse = new OMVolumeSetOwnerResponse(null, null, null, null, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquiredUserLocks) { - omMetadataManager.getLock().releaseMultiUserLock(newOwner, oldOwner); - } - if (acquiredVolumeLock) { - omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume); - } - } - - // Performing audit logging outside of the lock. - auditLog(auditLogger, buildAuditMessage(OMAction.SET_OWNER, auditMap, - exception, userInfo)); - - // return response after releasing lock. - if (exception == null) { - LOG.debug("Successfully changed Owner of Volume {} from {} -> {}", volume, - oldOwner, newOwner); - } else { - LOG.error("Changing volume ownership failed for user:{} volume:{}", - newOwner, volume, exception); - omMetrics.incNumVolumeUpdateFails(); - } - return omClientResponse; - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java deleted file mode 100644 index ef6d8ae0166..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java +++ /dev/null @@ -1,172 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.volume; - -import java.io.IOException; -import java.util.Map; - -import com.google.common.base.Optional; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.volume.OMVolumeSetQuotaResponse; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetVolumePropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetVolumePropertyResponse; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; - - -/** - * Handles set Quota request for volume. - */ -public class OMVolumeSetQuotaRequest extends OMVolumeRequest { - private static final Logger LOG = - LoggerFactory.getLogger(OMVolumeSetQuotaRequest.class); - - public OMVolumeSetQuotaRequest(OMRequest omRequest) { - super(omRequest); - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - - SetVolumePropertyRequest setVolumePropertyRequest = - getOmRequest().getSetVolumePropertyRequest(); - - Preconditions.checkNotNull(setVolumePropertyRequest); - - OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.SetVolumeProperty).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - - - - // In production this will never happen, this request will be called only - // when we have quota in bytes is set in setVolumePropertyRequest. - if (!setVolumePropertyRequest.hasQuotaInBytes()) { - omResponse.setStatus(OzoneManagerProtocolProtos.Status.INVALID_REQUEST) - .setSuccess(false); - return new OMVolumeSetQuotaResponse(null, - omResponse.build()); - } - - String volume = setVolumePropertyRequest.getVolumeName(); - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumVolumeUpdates(); - - AuditLogger auditLogger = ozoneManager.getAuditLogger(); - OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); - Map auditMap = buildVolumeAuditMap(volume); - auditMap.put(OzoneConsts.QUOTA, - String.valueOf(setVolumePropertyRequest.getQuotaInBytes())); - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - IOException exception = null; - boolean acquireVolumeLock = false; - OMClientResponse omClientResponse = null; - try { - // check Acl - if (ozoneManager.getAclsEnabled()) { - checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME, - OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE, volume, - null, null); - } - - OmVolumeArgs omVolumeArgs = null; - - acquireVolumeLock = omMetadataManager.getLock().acquireWriteLock( - VOLUME_LOCK, volume); - String dbVolumeKey = omMetadataManager.getVolumeKey(volume); - omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey); - - if (omVolumeArgs == null) { - LOG.debug("volume:{} does not exist", volume); - throw new OMException(OMException.ResultCodes.VOLUME_NOT_FOUND); - } - - omVolumeArgs.setQuotaInBytes(setVolumePropertyRequest.getQuotaInBytes()); - - // update cache. - omMetadataManager.getVolumeTable().addCacheEntry( - new CacheKey<>(dbVolumeKey), - new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex)); - - omResponse.setSetVolumePropertyResponse( - SetVolumePropertyResponse.newBuilder().build()); - omClientResponse = new OMVolumeSetQuotaResponse(omVolumeArgs, - omResponse.build()); - } catch (IOException ex) { - exception = ex; - omClientResponse = new OMVolumeSetQuotaResponse(null, - createErrorOMResponse(omResponse, exception)); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (acquireVolumeLock) { - omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume); - } - } - - // Performing audit logging outside of the lock. - auditLog(auditLogger, buildAuditMessage(OMAction.SET_QUOTA, auditMap, - exception, userInfo)); - - // return response after releasing lock. - if (exception == null) { - LOG.debug("Changing volume quota is successfully completed for volume: " + - "{} quota:{}", volume, setVolumePropertyRequest.getQuotaInBytes()); - } else { - omMetrics.incNumVolumeUpdateFails(); - LOG.error("Changing volume quota failed for volume:{} quota:{}", volume, - setVolumePropertyRequest.getQuotaInBytes(), exception); - } - return omClientResponse; - } - - -} - - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java deleted file mode 100644 index 6b4dc75e820..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.volume.acl; - -import com.google.common.base.Optional; -import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -import java.io.IOException; -import java.util.List; - -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; - -/** - * Base class for OMVolumeAcl Request. - */ -public abstract class OMVolumeAclRequest extends OMClientRequest { - - private CheckedBiFunction, OmVolumeArgs, IOException> - omVolumeAclOp; - - public OMVolumeAclRequest(OzoneManagerProtocolProtos.OMRequest omRequest, - CheckedBiFunction, OmVolumeArgs, IOException> aclOp) { - super(omRequest); - omVolumeAclOp = aclOp; - } - - @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - long transactionLogIndex, - OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { - // protobuf guarantees volume and acls are non-null. - String volume = getVolumeName(); - List ozoneAcls = getAcls(); - - OMMetrics omMetrics = ozoneManager.getMetrics(); - omMetrics.incNumVolumeUpdates(); - OmVolumeArgs omVolumeArgs = null; - - OMResponse.Builder omResponse = onInit(); - OMClientResponse omClientResponse = null; - IOException exception = null; - - OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); - boolean lockAcquired = false; - try { - // check Acl - if (ozoneManager.getAclsEnabled()) { - checkAcls(ozoneManager, OzoneObj.ResourceType.VOLUME, - OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL, - volume, null, null); - } - lockAcquired = - omMetadataManager.getLock().acquireWriteLock(VOLUME_LOCK, volume); - String dbVolumeKey = omMetadataManager.getVolumeKey(volume); - omVolumeArgs = omMetadataManager.getVolumeTable().get(dbVolumeKey); - if (omVolumeArgs == null) { - throw new OMException(OMException.ResultCodes.VOLUME_NOT_FOUND); - } - - // result is false upon add existing acl or remove non-existing acl - boolean result = true; - try { - omVolumeAclOp.apply(ozoneAcls, omVolumeArgs); - } catch (OMException ex) { - result = false; - } - - if (result) { - // update cache. - omMetadataManager.getVolumeTable().addCacheEntry( - new CacheKey<>(dbVolumeKey), - new CacheValue<>(Optional.of(omVolumeArgs), transactionLogIndex)); - } - - omClientResponse = onSuccess(omResponse, omVolumeArgs, result); - } catch (IOException ex) { - exception = ex; - omMetrics.incNumVolumeUpdateFails(); - omClientResponse = onFailure(omResponse, ex); - } finally { - if (omClientResponse != null) { - omClientResponse.setFlushFuture( - ozoneManagerDoubleBufferHelper.add(omClientResponse, - transactionLogIndex)); - } - if (lockAcquired) { - omMetadataManager.getLock().releaseWriteLock(VOLUME_LOCK, volume); - } - } - - onComplete(exception); - - return omClientResponse; - } - - /** - * Get the Acls from the request. - * @return List of OzoneAcls, for add/remove it is a single element list - * for set it can be non-single element list. - */ - abstract List getAcls(); - - /** - * Get the volume name from the request. - * @return volume name - * This is needed for case where volume does not exist and the omVolumeArgs is - * null. - */ - abstract String getVolumeName(); - - // TODO: Finer grain metrics can be moved to these callbacks. They can also - // be abstracted into separate interfaces in future. - /** - * Get the initial om response builder with lock. - * @return om response builder. - */ - abstract OMResponse.Builder onInit(); - - /** - * Get the om client response on success case with lock. - * @param omResponse - * @param omVolumeArgs - * @param result - * @return OMClientResponse - */ - abstract OMClientResponse onSuccess( - OMResponse.Builder omResponse, OmVolumeArgs omVolumeArgs, boolean result); - - /** - * Get the om client response on failure case with lock. - * @param omResponse - * @param ex - * @return OMClientResponse - */ - abstract OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException ex); - - /** - * Completion hook for final processing before return without lock. - * Usually used for logging without lock. - * @param ex - */ - abstract void onComplete(IOException ex); -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java deleted file mode 100644 index 6bb8564ccf1..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.request.volume.acl; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.volume.OMVolumeAclOpResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; - -/** - * Handles volume add acl request. - */ -public class OMVolumeAddAclRequest extends OMVolumeAclRequest { - private static final Logger LOG = - LoggerFactory.getLogger(OMVolumeAddAclRequest.class); - - private static CheckedBiFunction, - OmVolumeArgs, IOException> volumeAddAclOp; - - static { - volumeAddAclOp = (acls, volArgs) -> volArgs.addAcl(acls.get(0)); - } - - private List ozoneAcls; - private String volumeName; - - public OMVolumeAddAclRequest(OMRequest omRequest) { - super(omRequest, volumeAddAclOp); - OzoneManagerProtocolProtos.AddAclRequest addAclRequest = - getOmRequest().getAddAclRequest(); - Preconditions.checkNotNull(addAclRequest); - ozoneAcls = Lists.newArrayList( - OzoneAcl.fromProtobuf(addAclRequest.getAcl())); - volumeName = addAclRequest.getObj().getPath().substring(1); - } - - @Override - public List getAcls() { - return ozoneAcls; - } - - @Override - public String getVolumeName() { - return volumeName; - } - - private OzoneAcl getAcl() { - return ozoneAcls.get(0); - } - - - @Override - OMResponse.Builder onInit() { - return OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.AddAcl) - .setStatus(OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - } - - @Override - OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmVolumeArgs omVolumeArgs, boolean result){ - omResponse.setAddAclResponse(OzoneManagerProtocolProtos.AddAclResponse - .newBuilder().setResponse(result).build()); - return new OMVolumeAclOpResponse(omVolumeArgs, omResponse.build()); - } - - @Override - OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException ex) { - return new OMVolumeAclOpResponse(null, - createErrorOMResponse(omResponse, ex)); - } - - @Override - void onComplete(IOException ex) { - if (ex == null) { - LOG.debug("Add acl: {} to volume: {} success!", - getAcl(), getVolumeName()); - } else { - LOG.error("Add acl {} to volume {} failed!", - getAcl(), getVolumeName(), ex); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java deleted file mode 100644 index 188e20518f4..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.request.volume.acl; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.volume.OMVolumeAclOpResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.List; - -/** - * Handles volume remove acl request. - */ -public class OMVolumeRemoveAclRequest extends OMVolumeAclRequest { - private static final Logger LOG = - LoggerFactory.getLogger(OMVolumeRemoveAclRequest.class); - - private static CheckedBiFunction, - OmVolumeArgs, IOException> volumeRemoveAclOp; - - static { - volumeRemoveAclOp = (acls, volArgs) -> volArgs.removeAcl(acls.get(0)); - } - - private List ozoneAcls; - private String volumeName; - - public OMVolumeRemoveAclRequest(OMRequest omRequest) { - super(omRequest, volumeRemoveAclOp); - OzoneManagerProtocolProtos.RemoveAclRequest removeAclRequest = - getOmRequest().getRemoveAclRequest(); - Preconditions.checkNotNull(removeAclRequest); - ozoneAcls = Lists.newArrayList( - OzoneAcl.fromProtobuf(removeAclRequest.getAcl())); - volumeName = removeAclRequest.getObj().getPath().substring(1); - } - - @Override - public List getAcls() { - return ozoneAcls; - } - - @Override - public String getVolumeName() { - return volumeName; - } - - private OzoneAcl getAcl() { - return ozoneAcls.get(0); - } - - @Override - OMResponse.Builder onInit() { - return OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.RemoveAcl) - .setStatus(OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - } - - @Override - OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmVolumeArgs omVolumeArgs, boolean result){ - omResponse.setRemoveAclResponse(OzoneManagerProtocolProtos.RemoveAclResponse - .newBuilder().setResponse(result).build()); - return new OMVolumeAclOpResponse(omVolumeArgs, omResponse.build()); - } - - @Override - OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException ex) { - return new OMVolumeAclOpResponse(null, - createErrorOMResponse(omResponse, ex)); - } - - @Override - void onComplete(IOException ex) { - if (ex == null) { - LOG.debug("Remove acl: {} from volume: {} success!", - getAcl(), getVolumeName()); - } else { - LOG.error("Remove acl {} from volume {} failed!", - getAcl(), getVolumeName(), ex); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java deleted file mode 100644 index a5abbcca012..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.request.volume.acl; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.scm.storage.CheckedBiFunction; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.volume.OMVolumeAclOpResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** - * Handles volume set acl request. - */ -public class OMVolumeSetAclRequest extends OMVolumeAclRequest { - private static final Logger LOG = - LoggerFactory.getLogger(OMVolumeSetAclRequest.class); - - private static CheckedBiFunction, - OmVolumeArgs, IOException> volumeSetAclOp; - - static { - volumeSetAclOp = (acls, volArgs) -> volArgs.setAcls(acls); - } - - private List ozoneAcls; - private String volumeName; - - public OMVolumeSetAclRequest(OMRequest omRequest) { - super(omRequest, volumeSetAclOp); - OzoneManagerProtocolProtos.SetAclRequest setAclRequest = - getOmRequest().getSetAclRequest(); - Preconditions.checkNotNull(setAclRequest); - ozoneAcls = new ArrayList<>(); - setAclRequest.getAclList().forEach(oai -> - ozoneAcls.add(OzoneAcl.fromProtobuf(oai))); - volumeName = setAclRequest.getObj().getPath().substring(1); - } - - @Override - public List getAcls() { - return ozoneAcls; - } - - @Override - public String getVolumeName() { - return volumeName; - } - - @Override - OMResponse.Builder onInit() { - return OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.RemoveAcl) - .setStatus(OzoneManagerProtocolProtos.Status.OK).setSuccess(true); - } - - @Override - OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmVolumeArgs omVolumeArgs, boolean result){ - omResponse.setSetAclResponse(OzoneManagerProtocolProtos.SetAclResponse - .newBuilder().setResponse(result).build()); - return new OMVolumeAclOpResponse(omVolumeArgs, omResponse.build()); - } - - @Override - OMClientResponse onFailure(OMResponse.Builder omResponse, - IOException ex) { - return new OMVolumeAclOpResponse(null, - createErrorOMResponse(omResponse, ex)); - } - - @Override - void onComplete(IOException ex) { - if (ex == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Set acls: {} to volume: {} success!", - getAcls(), getVolumeName()); - } - } else { - LOG.error("Set acls {} to volume {} failed!", - getAcls(), getVolumeName(), ex); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java deleted file mode 100644 index 79c4afd1449..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Package contains classes related to volume acl requests and responses. - */ -package org.apache.hadoop.ozone.om.request.volume.acl; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java deleted file mode 100644 index 708f7083351..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Package contains classes related to volume requests. - */ -package org.apache.hadoop.ozone.om.request.volume; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java deleted file mode 100644 index 92d75eb2e51..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/OMClientResponse.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response; - -import java.io.IOException; -import java.util.concurrent.CompletableFuture; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -/** - * Interface for OM Responses, each OM response should implement this interface. - */ -public abstract class OMClientResponse { - - private OMResponse omResponse; - private CompletableFuture flushFuture = null; - - public OMClientResponse(OMResponse omResponse) { - Preconditions.checkNotNull(omResponse); - this.omResponse = omResponse; - } - - /** - * Implement logic to add the response to batch. - * @param omMetadataManager - * @param batchOperation - * @throws IOException - */ - public abstract void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException; - - /** - * Return OMResponse. - * @return OMResponse - */ - public OMResponse getOMResponse() { - return omResponse; - } - - public void setFlushFuture(CompletableFuture flushFuture) { - this.flushFuture = flushFuture; - } - - public CompletableFuture getFlushFuture() { - return flushFuture; - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java deleted file mode 100644 index 3f800d395a2..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketCreateResponse.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.bucket; - -import java.io.IOException; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import javax.annotation.Nullable; -import javax.annotation.Nonnull; - -/** - * Response for CreateBucket request. - */ -public final class OMBucketCreateResponse extends OMClientResponse { - - private final OmBucketInfo omBucketInfo; - - public OMBucketCreateResponse(@Nullable OmBucketInfo omBucketInfo, - @Nonnull OMResponse omResponse) { - super(omResponse); - this.omBucketInfo = omBucketInfo; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - String dbBucketKey = - omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), - omBucketInfo.getBucketName()); - omMetadataManager.getBucketTable().putWithBatch(batchOperation, - dbBucketKey, omBucketInfo); - } - } - - @Nullable - public OmBucketInfo getOmBucketInfo() { - return omBucketInfo; - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java deleted file mode 100644 index 0e0b3982bbf..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketDeleteResponse.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.bucket; - -import java.io.IOException; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import javax.annotation.Nonnull; - -/** - * Response for DeleteBucket request. - */ -public final class OMBucketDeleteResponse extends OMClientResponse { - - private String volumeName; - private String bucketName; - - public OMBucketDeleteResponse( - String volumeName, String bucketName, - @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse) { - super(omResponse); - this.volumeName = volumeName; - this.bucketName = bucketName; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - String dbBucketKey = - omMetadataManager.getBucketKey(volumeName, bucketName); - omMetadataManager.getBucketTable().deleteWithBatch(batchOperation, - dbBucketKey); - } - } - - public String getVolumeName() { - return volumeName; - } - - public String getBucketName() { - return bucketName; - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java deleted file mode 100644 index f9ce2046754..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetPropertyResponse.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.response.bucket; - -import java.io.IOException; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import javax.annotation.Nullable; -import javax.annotation.Nonnull; - -/** - * Response for SetBucketProperty request. - */ -public class OMBucketSetPropertyResponse extends OMClientResponse { - private OmBucketInfo omBucketInfo; - - public OMBucketSetPropertyResponse(@Nullable OmBucketInfo omBucketInfo, - @Nonnull OMResponse omResponse) { - super(omResponse); - this.omBucketInfo = omBucketInfo; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - String dbBucketKey = - omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), - omBucketInfo.getBucketName()); - omMetadataManager.getBucketTable().putWithBatch(batchOperation, - dbBucketKey, omBucketInfo); - } - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java deleted file mode 100644 index b534a56f370..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/OMBucketAclResponse.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.bucket.acl; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.io.IOException; - -/** - * Response for Bucket acl request. - */ -public class OMBucketAclResponse extends OMClientResponse { - - private final OmBucketInfo omBucketInfo; - - public OMBucketAclResponse(@Nullable OmBucketInfo omBucketInfo, - @Nonnull OMResponse omResponse) { - super(omResponse); - this.omBucketInfo = omBucketInfo; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // If response status is OK and success is true, add to DB batch. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK && - getOMResponse().getSuccess()) { - String dbBucketKey = - omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), - omBucketInfo.getBucketName()); - omMetadataManager.getBucketTable().putWithBatch(batchOperation, - dbBucketKey, omBucketInfo); - } - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/package-info.java deleted file mode 100644 index dd262727e1f..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/acl/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains classes for handling bucket acl responses. - */ -package org.apache.hadoop.ozone.om.response.bucket.acl; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java deleted file mode 100644 index e70c1c33f7c..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains classes related to bucket responses. - */ -package org.apache.hadoop.ozone.om.response.bucket; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java deleted file mode 100644 index 2690dda1b3b..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponse.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.file; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.annotation.Nullable; -import javax.annotation.Nonnull; -import java.io.IOException; - -/** - * Response for create directory request. - */ -public class OMDirectoryCreateResponse extends OMClientResponse { - - public static final Logger LOG = - LoggerFactory.getLogger(OMDirectoryCreateResponse.class); - private OmKeyInfo dirKeyInfo; - - public OMDirectoryCreateResponse(@Nullable OmKeyInfo dirKeyInfo, - @Nonnull OMResponse omResponse) { - super(omResponse); - this.dirKeyInfo = dirKeyInfo; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - if (dirKeyInfo != null) { - String dirKey = - omMetadataManager.getOzoneKey(dirKeyInfo.getVolumeName(), - dirKeyInfo.getBucketName(), dirKeyInfo.getKeyName()); - omMetadataManager.getKeyTable().putWithBatch(batchOperation, dirKey, - dirKeyInfo); - } else { - // When directory already exists, we don't add it to cache. And it is - // not an error, in this case dirKeyInfo will be null. - LOG.debug("Response Status is OK, dirKeyInfo is null in " + - "OMDirectoryCreateResponse"); - } - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java deleted file mode 100644 index 8da7313865b..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponse.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.file; - -import javax.annotation.Nullable; -import javax.annotation.Nonnull; - -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; - - - -/** - * Response for crate file request. - */ -public class OMFileCreateResponse extends OMKeyCreateResponse { - - public OMFileCreateResponse(@Nullable OmKeyInfo omKeyInfo, - long openKeySessionID, @Nonnull OMResponse omResponse) { - super(omKeyInfo, openKeySessionID, omResponse); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/package-info.java deleted file mode 100644 index 135eca9d3cb..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains classes related to file responses. - */ -package org.apache.hadoop.ozone.om.response.file; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java deleted file mode 100644 index c35fa6c89dd..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.key; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import java.io.IOException; -import javax.annotation.Nullable; -import javax.annotation.Nonnull; - -/** - * Response for AllocateBlock request. - */ -public class OMAllocateBlockResponse extends OMClientResponse { - - private final OmKeyInfo omKeyInfo; - private final long clientID; - - public OMAllocateBlockResponse(@Nullable OmKeyInfo omKeyInfo, - long clientID, @Nonnull OMResponse omResponse) { - super(omResponse); - this.omKeyInfo = omKeyInfo; - this.clientID = clientID; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - String openKey = omMetadataManager.getOpenKey(omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), omKeyInfo.getKeyName(), clientID); - omMetadataManager.getOpenKeyTable().putWithBatch(batchOperation, openKey, - omKeyInfo); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java deleted file mode 100644 index 0eb97f326a2..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.key; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import java.io.IOException; -import javax.annotation.Nullable; -import javax.annotation.Nonnull; - -/** - * Response for CommitKey request. - */ -public class OMKeyCommitResponse extends OMClientResponse { - - private OmKeyInfo omKeyInfo; - private long openKeySessionID; - - public OMKeyCommitResponse(@Nullable OmKeyInfo omKeyInfo, - long openKeySessionID, - @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse) { - super(omResponse); - this.omKeyInfo = omKeyInfo; - this.openKeySessionID = openKeySessionID; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - String volumeName = omKeyInfo.getVolumeName(); - String bucketName = omKeyInfo.getBucketName(); - String keyName = omKeyInfo.getKeyName(); - String openKey = omMetadataManager.getOpenKey(volumeName, - bucketName, keyName, openKeySessionID); - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - // Delete from open key table and add entry to key table. - omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation, - openKey); - omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKey, - omKeyInfo); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java deleted file mode 100644 index fde646cdbfa..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.key; - -import java.io.IOException; -import javax.annotation.Nullable; -import javax.annotation.Nonnull; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -/** - * Response for CreateKey request. - */ -public class OMKeyCreateResponse extends OMClientResponse { - - private OmKeyInfo omKeyInfo; - private long openKeySessionID; - - public OMKeyCreateResponse(@Nullable OmKeyInfo omKeyInfo, - long openKeySessionID, @Nonnull OMResponse omResponse) { - super(omResponse); - this.omKeyInfo = omKeyInfo; - this.openKeySessionID = openKeySessionID; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - String openKey = omMetadataManager.getOpenKey(omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), omKeyInfo.getKeyName(), - openKeySessionID); - omMetadataManager.getOpenKeyTable().putWithBatch(batchOperation, - openKey, omKeyInfo); - } - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java deleted file mode 100644 index 96aedd18106..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.key; - -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import java.io.IOException; -import javax.annotation.Nullable; -import javax.annotation.Nonnull; - -/** - * Response for DeleteKey request. - */ -public class OMKeyDeleteResponse extends OMClientResponse { - private OmKeyInfo omKeyInfo; - - public OMKeyDeleteResponse(@Nullable OmKeyInfo omKeyInfo, - @Nonnull OMResponse omResponse) { - super(omResponse); - this.omKeyInfo = omKeyInfo; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), omKeyInfo.getKeyName()); - omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, - ozoneKey); - - // If Key is not empty add this to delete table. - if (!isKeyEmpty(omKeyInfo)) { - // If a deleted key is put in the table where a key with the same - // name already exists, then the old deleted key information would be - // lost. To avoid this, first check if a key with same name exists. - // deletedTable in OM Metadata stores . - // The RepeatedOmKeyInfo is the structure that allows us to store a - // list of OmKeyInfo that can be tied to same key name. For a keyName - // if RepeatedOMKeyInfo structure is null, we create a new instance, - // if it is not null, then we simply add to the list and store this - // instance in deletedTable. - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(ozoneKey); - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - omKeyInfo, repeatedOmKeyInfo); - omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - ozoneKey, repeatedOmKeyInfo); - } - } - } - - /** - * Check if the key is empty or not. Key will be empty if it does not have - * blocks. - * - * @param keyInfo - * @return if empty true, else false. - */ - private boolean isKeyEmpty(@Nullable OmKeyInfo keyInfo) { - if (keyInfo == null) { - return true; - } - for (OmKeyLocationInfoGroup keyLocationList : keyInfo - .getKeyLocationVersions()) { - if (keyLocationList.getLocationList().size() != 0) { - return false; - } - } - return true; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java deleted file mode 100644 index 513b94d29ac..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.key; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import java.io.IOException; -import java.util.List; -import javax.annotation.Nonnull; - -/** - * Response for {@link OMKeyPurgeRequest} request. - */ -public class OMKeyPurgeResponse extends OMClientResponse { - - private List purgeKeyList; - - public OMKeyPurgeResponse(List keyList, - @Nonnull OMResponse omResponse) { - super(omResponse); - this.purgeKeyList = keyList; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - for (String key : purgeKeyList) { - omMetadataManager.getDeletedTable().deleteWithBatch(batchOperation, - key); - } - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java deleted file mode 100644 index 0e9ae17577c..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.key; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import java.io.IOException; -import javax.annotation.Nullable; -import javax.annotation.Nonnull; - -/** - * Response for RenameKey request. - */ -public class OMKeyRenameResponse extends OMClientResponse { - - private final OmKeyInfo renameKeyInfo; - private final String toKeyName; - private final String fromKeyName; - - public OMKeyRenameResponse(@Nullable OmKeyInfo renameKeyInfo, - String toKeyName, String fromKeyName, @Nonnull OMResponse omResponse) { - super(omResponse); - this.renameKeyInfo = renameKeyInfo; - this.toKeyName = toKeyName; - this.fromKeyName = fromKeyName; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - - // If both from and toKeyName are equal do nothing - if (!toKeyName.equals(fromKeyName)) { - String volumeName = renameKeyInfo.getVolumeName(); - String bucketName = renameKeyInfo.getBucketName(); - omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, - omMetadataManager.getOzoneKey(volumeName, bucketName, fromKeyName)); - omMetadataManager.getKeyTable().putWithBatch(batchOperation, - omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName), - renameKeyInfo); - } - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java deleted file mode 100644 index 8c8bc974957..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/OMKeyAclResponse.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.key.acl; - -import java.io.IOException; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -/** - * Response for Bucket acl request. - */ -public class OMKeyAclResponse extends OMClientResponse { - - private final OmKeyInfo omKeyInfo; - - public OMKeyAclResponse(@Nullable OmKeyInfo omKeyInfo, - @Nonnull OMResponse omResponse) { - super(omResponse); - this.omKeyInfo = omKeyInfo; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // If response status is OK and success is true, add to DB batch. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK && - getOMResponse().getSuccess()) { - String dbKey = - omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), omKeyInfo.getKeyName()); - omMetadataManager.getKeyTable().putWithBatch(batchOperation, - dbKey, omKeyInfo); - } - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/package-info.java deleted file mode 100644 index 6a172316ef1..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains classes related to bucket acl responses. - */ -package org.apache.hadoop.ozone.om.response.key.acl; - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java deleted file mode 100644 index 1fa02da4098..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/OMPrefixAclResponse.java +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.key.acl.prefix; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.io.IOException; - -/** - * Response for Prefix Acl request. - */ -public class OMPrefixAclResponse extends OMClientResponse { - private final OmPrefixInfo prefixInfo; - - public OMPrefixAclResponse(@Nullable OmPrefixInfo omPrefixInfo, - @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse) { - super(omResponse); - this.prefixInfo = omPrefixInfo; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // If response status is OK and success is true, add to DB batch. - if (getOMResponse().getSuccess()) { - if ((getOMResponse().hasAddAclResponse() - && getOMResponse().getAddAclResponse().getResponse()) || - (getOMResponse().hasSetAclResponse() - && getOMResponse().getSetAclResponse().getResponse())) { - omMetadataManager.getPrefixTable().putWithBatch(batchOperation, - prefixInfo.getName(), prefixInfo); - } else if ((getOMResponse().hasRemoveAclResponse() - && getOMResponse().getRemoveAclResponse().getResponse())) { - if (prefixInfo.getAcls().size() == 0) { - // if acl list size is zero delete. - omMetadataManager.getPrefixTable().deleteWithBatch(batchOperation, - prefixInfo.getName()); - } else { - omMetadataManager.getPrefixTable().putWithBatch(batchOperation, - prefixInfo.getName(), prefixInfo); - } - } - } - } - -} - - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/package-info.java deleted file mode 100644 index 4b53e963d24..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains classes related to prefix acl response. - */ -package org.apache.hadoop.ozone.om.response.key.acl.prefix; - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/package-info.java deleted file mode 100644 index 2097d225f19..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains classes related to key responses. - */ -package org.apache.hadoop.ozone.om.response.key; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java deleted file mode 100644 index d66cac7c021..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response; - - -/** - * This package contains classes for the OM Responses. - */ \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketCreateResponse.java deleted file mode 100644 index f91c205991f..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketCreateResponse.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.response.s3.bucket; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.io.IOException; - -import com.google.common.base.Preconditions; -import com.google.common.annotations.VisibleForTesting; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; -import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -/** - * Response for S3Bucket create request. - */ -public class S3BucketCreateResponse extends OMClientResponse { - - private OMVolumeCreateResponse omVolumeCreateResponse; - private OMBucketCreateResponse omBucketCreateResponse; - private String s3Bucket; - private String s3Mapping; - - public S3BucketCreateResponse( - @Nullable OMVolumeCreateResponse omVolumeCreateResponse, - @Nullable OMBucketCreateResponse omBucketCreateResponse, - @Nullable String s3BucketName, - @Nullable String s3Mapping, @Nonnull OMResponse omResponse) { - super(omResponse); - this.omVolumeCreateResponse = omVolumeCreateResponse; - this.omBucketCreateResponse = omBucketCreateResponse; - this.s3Bucket = s3BucketName; - this.s3Mapping = s3Mapping; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - if (omVolumeCreateResponse != null) { - omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - } - - Preconditions.checkState(omBucketCreateResponse != null); - omBucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - - omMetadataManager.getS3Table().putWithBatch(batchOperation, s3Bucket, - s3Mapping); - } - } - - @VisibleForTesting - public String getS3Mapping() { - return s3Mapping; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketDeleteResponse.java deleted file mode 100644 index 979318d16f6..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketDeleteResponse.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om.response.s3.bucket; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.io.IOException; - -/** - * Response for S3Bucket Delete request. - */ -public class S3BucketDeleteResponse extends OMClientResponse { - - private String s3BucketName; - private String volumeName; - public S3BucketDeleteResponse(@Nullable String s3BucketName, - @Nullable String volumeName, @Nonnull OMResponse omResponse) { - super(omResponse); - this.s3BucketName = s3BucketName; - this.volumeName = volumeName; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - omMetadataManager.getBucketTable().deleteWithBatch(batchOperation, - omMetadataManager.getBucketKey(volumeName, s3BucketName)); - omMetadataManager.getS3Table().deleteWithBatch(batchOperation, - s3BucketName); - } - } -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/package-info.java deleted file mode 100644 index f484ecc9067..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains classes related to s3 bucket responses. - */ -package org.apache.hadoop.ozone.om.response.s3.bucket; - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java deleted file mode 100644 index a63edd8fcc7..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3InitiateMultipartUploadResponse.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.response.s3.multipart; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.io.IOException; - -/** - * Response for S3 Initiate Multipart Upload request. - */ -public class S3InitiateMultipartUploadResponse extends OMClientResponse { - - private OmMultipartKeyInfo omMultipartKeyInfo; - private OmKeyInfo omKeyInfo; - - public S3InitiateMultipartUploadResponse( - @Nullable OmMultipartKeyInfo omMultipartKeyInfo, - @Nullable OmKeyInfo omKeyInfo, - @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse) { - super(omResponse); - this.omMultipartKeyInfo = omMultipartKeyInfo; - this.omKeyInfo = omKeyInfo; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - - String multipartKey = - omMetadataManager.getMultipartKey(omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), omKeyInfo.getKeyName(), - omMultipartKeyInfo.getUploadID()); - - omMetadataManager.getOpenKeyTable().putWithBatch(batchOperation, - multipartKey, omKeyInfo); - omMetadataManager.getMultipartInfoTable().putWithBatch(batchOperation, - multipartKey, omMultipartKeyInfo); - } - } - - @VisibleForTesting - public OmMultipartKeyInfo getOmMultipartKeyInfo() { - return omMultipartKeyInfo; - } - - @VisibleForTesting - public OmKeyInfo getOmKeyInfo() { - return omKeyInfo; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java deleted file mode 100644 index a9a4024fd3c..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.s3.multipart; - -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .PartKeyInfo; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import java.io.IOException; -import java.util.Map; -import java.util.TreeMap; -import javax.annotation.Nullable; -import javax.annotation.Nonnull; - -/** - * Response for Multipart Abort Request. - */ -public class S3MultipartUploadAbortResponse extends OMClientResponse { - - private String multipartKey; - private OmMultipartKeyInfo omMultipartKeyInfo; - - public S3MultipartUploadAbortResponse(String multipartKey, - @Nullable OmMultipartKeyInfo omMultipartKeyInfo, - @Nonnull OMResponse omResponse) { - super(omResponse); - this.multipartKey = multipartKey; - this.omMultipartKeyInfo = omMultipartKeyInfo; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - - // Delete from openKey table and multipart info table. - omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation, - multipartKey); - omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation, - multipartKey); - - // Move all the parts to delete table - TreeMap partKeyInfoMap = - omMultipartKeyInfo.getPartKeyInfoMap(); - for (Map.Entry partKeyInfoEntry : - partKeyInfoMap.entrySet()) { - PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue(); - OmKeyInfo currentKeyPartInfo = - OmKeyInfo.getFromProtobuf(partKeyInfo.getPartKeyInfo()); - - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(partKeyInfo.getPartName()); - - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - currentKeyPartInfo, repeatedOmKeyInfo); - - omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - partKeyInfo.getPartName(), - repeatedOmKeyInfo); - } - - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java deleted file mode 100644 index fef3698534e..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java +++ /dev/null @@ -1,125 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.s3.multipart; - -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import java.io.IOException; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .Status.NO_SUCH_MULTIPART_UPLOAD_ERROR; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .Status.OK; - -import javax.annotation.Nullable; -import javax.annotation.Nonnull; - -/** - * Response for S3MultipartUploadCommitPart request. - */ -public class S3MultipartUploadCommitPartResponse extends OMClientResponse { - - private String multipartKey; - private String openKey; - private OmKeyInfo deletePartKeyInfo; - private OmMultipartKeyInfo omMultipartKeyInfo; - private OzoneManagerProtocolProtos.PartKeyInfo oldMultipartKeyInfo; - - - public S3MultipartUploadCommitPartResponse(String multipartKey, - String openKey, @Nullable OmKeyInfo deletePartKeyInfo, - @Nullable OmMultipartKeyInfo omMultipartKeyInfo, - @Nullable OzoneManagerProtocolProtos.PartKeyInfo oldPartKeyInfo, - @Nonnull OMResponse omResponse) { - super(omResponse); - this.multipartKey = multipartKey; - this.openKey = openKey; - this.deletePartKeyInfo = deletePartKeyInfo; - this.omMultipartKeyInfo = omMultipartKeyInfo; - this.oldMultipartKeyInfo = oldPartKeyInfo; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - if (getOMResponse().getStatus() == NO_SUCH_MULTIPART_UPLOAD_ERROR) { - // Means by the time we try to commit part, some one has aborted this - // multipart upload. So, delete this part information. - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(openKey); - - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - deletePartKeyInfo, repeatedOmKeyInfo); - - - omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - openKey, - repeatedOmKeyInfo); - } - - if (getOMResponse().getStatus() == OK) { - - // If we have old part info: - // Need to do 3 steps: - // 0. Strip GDPR related metadata from multipart info - // 1. add old part to delete table - // 2. Commit multipart info which has information about this new part. - // 3. delete this new part entry from open key table. - - // This means for this multipart upload part upload, we have an old - // part information, so delete it. - if (oldMultipartKeyInfo != null) { - OmKeyInfo partKey = - OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo()); - - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable() - .get(oldMultipartKeyInfo.getPartName()); - - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(partKey, - repeatedOmKeyInfo); - - omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - oldMultipartKeyInfo.getPartName(), - repeatedOmKeyInfo); - } - - omMetadataManager.getMultipartInfoTable().putWithBatch(batchOperation, - multipartKey, omMultipartKeyInfo); - - // This information has been added to multipartKeyInfo. So, we can - // safely delete part key info from open key table. - omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation, - openKey); - } - } - - - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java deleted file mode 100644 index b0cc8b56bd3..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.s3.multipart; - -import java.io.IOException; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import javax.annotation.Nullable; -import javax.annotation.Nonnull; - -/** - * Response for Multipart Upload Complete request. - */ -public class S3MultipartUploadCompleteResponse extends OMClientResponse { - private String multipartKey; - private OmKeyInfo omKeyInfo; - - - public S3MultipartUploadCompleteResponse(@Nullable String multipartKey, - @Nullable OmKeyInfo omKeyInfo, @Nonnull OMResponse omResponse) { - super(omResponse); - this.multipartKey = multipartKey; - this.omKeyInfo = omKeyInfo; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - omMetadataManager.getKeyTable().putWithBatch(batchOperation, - omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), omKeyInfo.getKeyName()), omKeyInfo); - omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation, - multipartKey); - omMetadataManager.getMultipartInfoTable().deleteWithBatch(batchOperation, - multipartKey); - } - } -} - - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/package-info.java deleted file mode 100644 index 2e1474de3e9..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Package contains classes related to S3 multipart upload responses. - */ -package org.apache.hadoop.ozone.om.response.s3.multipart; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java deleted file mode 100644 index 6467c72f492..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.s3.security; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.io.IOException; - -/** - * Response for GetS3Secret request. - */ -public class S3GetSecretResponse extends OMClientResponse { - - - private S3SecretValue s3SecretValue; - - public S3GetSecretResponse(@Nullable S3SecretValue s3SecretValue, - @Nonnull OMResponse omResponse) { - super(omResponse); - this.s3SecretValue = s3SecretValue; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - if (s3SecretValue != null && - getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - omMetadataManager.getS3SecretTable().putWithBatch(batchOperation, - s3SecretValue.getKerberosID(), s3SecretValue); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java deleted file mode 100644 index d9024d1c85f..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Package contains classes related to S3 security responses. - */ -package org.apache.hadoop.ozone.om.request.s3.security; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMCancelDelegationTokenResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMCancelDelegationTokenResponse.java deleted file mode 100644 index 8f2632d4678..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMCancelDelegationTokenResponse.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.security; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.Table; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.io.IOException; - -/** - * Handle response for CancelDelegationToken request. - */ -public class OMCancelDelegationTokenResponse extends OMClientResponse { - - private OzoneTokenIdentifier ozoneTokenIdentifier; - - public OMCancelDelegationTokenResponse( - @Nullable OzoneTokenIdentifier ozoneTokenIdentifier, - @Nonnull OMResponse omResponse) { - super(omResponse); - this.ozoneTokenIdentifier = ozoneTokenIdentifier; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - Table table = omMetadataManager.getDelegationTokenTable(); - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - table.deleteWithBatch(batchOperation, ozoneTokenIdentifier); - } - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMGetDelegationTokenResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMGetDelegationTokenResponse.java deleted file mode 100644 index 7f902d9b36a..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMGetDelegationTokenResponse.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.security; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.Table; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.io.IOException; - -/** - * Handle response for GetDelegationToken request. - */ -public class OMGetDelegationTokenResponse extends OMClientResponse { - - private OzoneTokenIdentifier ozoneTokenIdentifier; - private long renewTime = -1L; - - public OMGetDelegationTokenResponse( - @Nullable OzoneTokenIdentifier ozoneTokenIdentifier, - long renewTime, @Nonnull OMResponse omResponse) { - super(omResponse); - this.ozoneTokenIdentifier = ozoneTokenIdentifier; - this.renewTime = renewTime; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - Table table = omMetadataManager.getDelegationTokenTable(); - if (ozoneTokenIdentifier != null && - getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - table.putWithBatch(batchOperation, ozoneTokenIdentifier, renewTime); - } - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMRenewDelegationTokenResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMRenewDelegationTokenResponse.java deleted file mode 100644 index f0f1cd3ebe1..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/OMRenewDelegationTokenResponse.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.security; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.Table; - -import javax.annotation.Nonnull; -import javax.annotation.Nullable; -import java.io.IOException; - -/** - * Handle response for RenewDelegationToken request. - */ -public class OMRenewDelegationTokenResponse extends OMClientResponse { - - private OzoneTokenIdentifier ozoneTokenIdentifier; - private long renewTime = -1L; - - public OMRenewDelegationTokenResponse( - @Nullable OzoneTokenIdentifier ozoneTokenIdentifier, - long renewTime, @Nonnull OMResponse omResponse) { - super(omResponse); - this.ozoneTokenIdentifier = ozoneTokenIdentifier; - this.renewTime = renewTime; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - Table table = omMetadataManager.getDelegationTokenTable(); - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - table.putWithBatch(batchOperation, ozoneTokenIdentifier, renewTime); - } - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/package-info.java deleted file mode 100644 index 014bc42b5ee..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/security/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains classes which handle security request responses. - */ -package org.apache.hadoop.ozone.om.response.security; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java deleted file mode 100644 index 2b797d9b7a7..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeAclOpResponse.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.volume; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import javax.annotation.Nonnull; -import java.io.IOException; - -/** - * Response for om volume acl operation request. - */ -public class OMVolumeAclOpResponse extends OMClientResponse { - - private OmVolumeArgs omVolumeArgs; - - public OMVolumeAclOpResponse(OmVolumeArgs omVolumeArgs, - @Nonnull OMResponse omResponse) { - super(omResponse); - this.omVolumeArgs = omVolumeArgs; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getSuccess()) { - if ((getOMResponse().hasAddAclResponse() && - getOMResponse().getAddAclResponse().getResponse()) || - (getOMResponse().hasRemoveAclResponse() && - getOMResponse().getRemoveAclResponse().getResponse()) || - (getOMResponse().hasSetAclResponse() && - getOMResponse().getSetAclResponse().getResponse())) { - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); - } - } - } - - @VisibleForTesting - public OmVolumeArgs getOmVolumeArgs() { - return omVolumeArgs; - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java deleted file mode 100644 index 1bd3e4fd8c7..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeCreateResponse.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.volume; - -import java.io.IOException; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserVolumeInfo; - -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import javax.annotation.Nonnull; - -/** - * Response for CreateBucket request. - */ -public class OMVolumeCreateResponse extends OMClientResponse { - - private UserVolumeInfo userVolumeInfo; - private OmVolumeArgs omVolumeArgs; - - public OMVolumeCreateResponse(OmVolumeArgs omVolumeArgs, - UserVolumeInfo userVolumeInfo, @Nonnull OMResponse omResponse) { - super(omResponse); - this.omVolumeArgs = omVolumeArgs; - this.userVolumeInfo = userVolumeInfo; - } - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - String dbVolumeKey = - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()); - String dbUserKey = - omMetadataManager.getUserKey(omVolumeArgs.getOwnerName()); - - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - dbVolumeKey, omVolumeArgs); - omMetadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey, - userVolumeInfo); - } - } - - @VisibleForTesting - public OmVolumeArgs getOmVolumeArgs() { - return omVolumeArgs; - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java deleted file mode 100644 index 6718ce5be6c..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeDeleteResponse.java +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.volume; - -import java.io.IOException; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .UserVolumeInfo; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import javax.annotation.Nonnull; - -/** - * Response for CreateVolume request. - */ -public class OMVolumeDeleteResponse extends OMClientResponse { - private String volume; - private String owner; - private UserVolumeInfo updatedVolumeList; - - public OMVolumeDeleteResponse(String volume, String owner, - UserVolumeInfo updatedVolumeList, @Nonnull OMResponse omResponse) { - super(omResponse); - this.volume = volume; - this.owner = owner; - this.updatedVolumeList = updatedVolumeList; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - String dbUserKey = omMetadataManager.getUserKey(owner); - UserVolumeInfo volumeList = updatedVolumeList; - if (updatedVolumeList.getVolumeNamesList().size() == 0) { - omMetadataManager.getUserTable().deleteWithBatch(batchOperation, - dbUserKey); - } else { - omMetadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey, - volumeList); - } - omMetadataManager.getVolumeTable().deleteWithBatch(batchOperation, - omMetadataManager.getVolumeKey(volume)); - } - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java deleted file mode 100644 index 8e0270215f2..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetOwnerResponse.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.volume; - -import java.io.IOException; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.response.OMClientResponse; - -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .UserVolumeInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import javax.annotation.Nonnull; - -/** - * Response for set owner request. - */ -public class OMVolumeSetOwnerResponse extends OMClientResponse { - - private String oldOwner; - private UserVolumeInfo oldOwnerVolumeList; - private UserVolumeInfo newOwnerVolumeList; - private OmVolumeArgs newOwnerVolumeArgs; - - public OMVolumeSetOwnerResponse(String oldOwner, - UserVolumeInfo oldOwnerVolumeList, UserVolumeInfo newOwnerVolumeList, - OmVolumeArgs newOwnerVolumeArgs, @Nonnull OMResponse omResponse) { - super(omResponse); - this.oldOwner = oldOwner; - this.oldOwnerVolumeList = oldOwnerVolumeList; - this.newOwnerVolumeList = newOwnerVolumeList; - this.newOwnerVolumeArgs = newOwnerVolumeArgs; - } - - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - String oldOwnerKey = omMetadataManager.getUserKey(oldOwner); - String newOwnerKey = - omMetadataManager.getUserKey(newOwnerVolumeArgs.getOwnerName()); - if (oldOwnerVolumeList.getVolumeNamesList().size() == 0) { - omMetadataManager.getUserTable().deleteWithBatch(batchOperation, - oldOwnerKey); - } else { - omMetadataManager.getUserTable().putWithBatch(batchOperation, - oldOwnerKey, oldOwnerVolumeList); - } - omMetadataManager.getUserTable().putWithBatch(batchOperation, newOwnerKey, - newOwnerVolumeList); - - String dbVolumeKey = - omMetadataManager.getVolumeKey(newOwnerVolumeArgs.getVolume()); - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - dbVolumeKey, newOwnerVolumeArgs); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java deleted file mode 100644 index 13e05fa6dd8..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/OMVolumeSetQuotaResponse.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.volume; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import java.io.IOException; - -import javax.annotation.Nonnull; - -/** - * Response for set quota request. - */ -public class OMVolumeSetQuotaResponse extends OMClientResponse { - private OmVolumeArgs omVolumeArgs; - - public OMVolumeSetQuotaResponse(OmVolumeArgs omVolumeArgs, - @Nonnull OMResponse omResponse) { - super(omResponse); - this.omVolumeArgs = omVolumeArgs; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java deleted file mode 100644 index 478a19de18b..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/volume/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Package contains classes related to volume requests. - */ -package org.apache.hadoop.ozone.om.response.volume; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java deleted file mode 100644 index 5bca52dee40..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java +++ /dev/null @@ -1,210 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.snapshot; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.http.HttpConfig; -import org.apache.hadoop.ozone.om.ha.OMNodeDetails; -import org.apache.hadoop.hdds.utils.db.DBCheckpoint; -import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpResponse; -import org.apache.http.client.HttpClient; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.util.EntityUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import static java.net.HttpURLConnection.HTTP_CREATED; -import static java.net.HttpURLConnection.HTTP_OK; -import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_INDEX; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_KEY; - -/** - * OzoneManagerSnapshotProvider downloads the latest checkpoint from the - * leader OM and loads the checkpoint into State Machine. - */ -public class OzoneManagerSnapshotProvider { - - private static final Logger LOG = - LoggerFactory.getLogger(OzoneManagerSnapshotProvider.class); - - private final File omSnapshotDir; - private Map peerNodesMap; - private final HttpConfig.Policy httpPolicy; - private final RequestConfig httpRequestConfig; - private CloseableHttpClient httpClient; - - private static final String OM_SNAPSHOT_DB = "om.snapshot.db"; - - public OzoneManagerSnapshotProvider(Configuration conf, - File omRatisSnapshotDir, List peerNodes) { - - LOG.info("Initializing OM Snapshot Provider"); - this.omSnapshotDir = omRatisSnapshotDir; - - this.peerNodesMap = new HashMap<>(); - for (OMNodeDetails peerNode : peerNodes) { - this.peerNodesMap.put(peerNode.getOMNodeId(), peerNode); - } - - this.httpPolicy = DFSUtil.getHttpPolicy(conf); - this.httpRequestConfig = getHttpRequestConfig(conf); - } - - private RequestConfig getHttpRequestConfig(Configuration conf) { - TimeUnit socketTimeoutUnit = - OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT.getUnit(); - int socketTimeoutMS = (int) conf.getTimeDuration( - OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_KEY, - OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT.getDuration(), - socketTimeoutUnit); - - TimeUnit connectionTimeoutUnit = - OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT.getUnit(); - int connectionTimeoutMS = (int) conf.getTimeDuration( - OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_KEY, - OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT.getDuration(), - connectionTimeoutUnit); - - TimeUnit requestTimeoutUnit = - OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT.getUnit(); - int requestTimeoutMS = (int) conf.getTimeDuration( - OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_KEY, - OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT.getDuration(), - requestTimeoutUnit); - - RequestConfig requestConfig = RequestConfig.custom() - .setSocketTimeout(socketTimeoutMS) - .setConnectTimeout(connectionTimeoutMS) - .setConnectionRequestTimeout(requestTimeoutMS) - .build(); - - return requestConfig; - } - - /** - * Create and return http client object. - */ - private HttpClient getHttpClient() { - if (httpClient == null) { - httpClient = HttpClientBuilder - .create() - .setDefaultRequestConfig(httpRequestConfig) - .build(); - } - return httpClient; - } - - /** - * Close http client object. - */ - private void closeHttpClient() throws IOException { - if (httpClient != null) { - httpClient.close(); - httpClient = null; - } - } - - /** - * Download the latest checkpoint from OM Leader via HTTP. - * @param leaderOMNodeID leader OM Node ID. - * @return the DB checkpoint (including the ratis snapshot index) - */ - public DBCheckpoint getOzoneManagerDBSnapshot(String leaderOMNodeID) - throws IOException { - String snapshotFileName = OM_SNAPSHOT_DB + "_" + System.currentTimeMillis(); - File targetFile = new File(omSnapshotDir, snapshotFileName + ".tar.gz"); - - String omCheckpointUrl = peerNodesMap.get(leaderOMNodeID) - .getOMDBCheckpointEnpointUrl(httpPolicy); - - LOG.info("Downloading latest checkpoint from Leader OM {}. Checkpoint " + - "URL: {}", leaderOMNodeID, omCheckpointUrl); - - try { - HttpGet httpGet = new HttpGet(omCheckpointUrl); - HttpResponse response = getHttpClient().execute(httpGet); - int errorCode = response.getStatusLine().getStatusCode(); - HttpEntity entity = response.getEntity(); - - if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) { - - Header header = response.getFirstHeader(OM_RATIS_SNAPSHOT_INDEX); - if (header == null) { - throw new IOException("The HTTP response header " + - OM_RATIS_SNAPSHOT_INDEX + " is missing."); - } - - long snapshotIndex = Long.parseLong(header.getValue()); - - try (InputStream inputStream = entity.getContent()) { - FileUtils.copyInputStreamToFile(inputStream, targetFile); - } - - // Untar the checkpoint file. - Path untarredDbDir = Paths.get(omSnapshotDir.getAbsolutePath(), - snapshotFileName); - FileUtil.unTar(targetFile, untarredDbDir.toFile()); - FileUtils.deleteQuietly(targetFile); - - LOG.info("Sucessfully downloaded latest checkpoint with snapshot " + - "index {} from leader OM: {}", snapshotIndex, leaderOMNodeID); - - RocksDBCheckpoint omCheckpoint = new RocksDBCheckpoint(untarredDbDir); - omCheckpoint.setRatisSnapshotIndex(snapshotIndex); - return omCheckpoint; - } - - if (entity != null) { - throw new IOException("Unexpected exception when trying to reach " + - "OM to download latest checkpoint. Checkpoint URL: " + - omCheckpointUrl + ". Entity: " + EntityUtils.toString(entity)); - } else { - throw new IOException("Unexpected null in http payload, while " + - "processing request to OM to download latest checkpoint. " + - "Checkpoint Url: " + omCheckpointUrl); - } - } finally { - closeHttpClient(); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java deleted file mode 100644 index 3c82a694078..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.snapshot; - -/** - * This package contains OM Ratis Snapshot related classes. - */ \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandler.java deleted file mode 100644 index f84e623dd12..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandler.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.protocolPB; - -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; - -/** - * Handler to handle OM requests in OM HA. - */ -public interface OzoneManagerHARequestHandler extends RequestHandler { - - /** - * Handle Apply Transaction Requests from OzoneManager StateMachine. - * @param omRequest - * @param transactionLogIndex - ratis transaction log index - * @return OMResponse - */ - OMResponse handleApplyTransaction(OMRequest omRequest, - long transactionLogIndex); - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java deleted file mode 100644 index 2d305d7831a..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.protocolPB; - -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .Type; - -/** - * Command Handler for OM requests. OM State Machine calls this handler for - * deserializing the client request and sending it to OM. - */ -public class OzoneManagerHARequestHandlerImpl - extends OzoneManagerRequestHandler implements OzoneManagerHARequestHandler { - - private OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; - - public OzoneManagerHARequestHandlerImpl(OzoneManager om, - OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer) { - super(om); - this.ozoneManagerDoubleBuffer = ozoneManagerDoubleBuffer; - } - - - @Override - public OMResponse handleApplyTransaction(OMRequest omRequest, - long transactionLogIndex) { - if (LOG.isDebugEnabled()) { - LOG.debug("Received OMRequest: {}, ", omRequest); - } - Type cmdType = omRequest.getCmdType(); - switch (cmdType) { - case CreateVolume: - case SetVolumeProperty: - case DeleteVolume: - case CreateBucket: - case DeleteBucket: - case SetBucketProperty: - case AllocateBlock: - case CreateKey: - case CommitKey: - case DeleteKey: - case RenameKey: - case CreateDirectory: - case CreateFile: - case PurgeKeys: - case CreateS3Bucket: - case DeleteS3Bucket: - case InitiateMultiPartUpload: - case CommitMultiPartUpload: - case AbortMultiPartUpload: - case CompleteMultiPartUpload: - case AddAcl: - case RemoveAcl: - case SetAcl: - case GetDelegationToken: - case CancelDelegationToken: - case RenewDelegationToken: - //TODO: We don't need to pass transactionID, this will be removed when - // complete write requests is changed to new model. And also we can - // return OMClientResponse, then adding to doubleBuffer can be taken - // care by stateMachine. And also integrate both HA and NON HA code - // paths. - OMClientRequest omClientRequest = - OzoneManagerRatisUtils.createClientRequest(omRequest); - if (omClientRequest != null) { - OMClientResponse omClientResponse = - omClientRequest.validateAndUpdateCache(getOzoneManager(), - transactionLogIndex, ozoneManagerDoubleBuffer::add); - return omClientResponse.getOMResponse(); - } else { - //TODO: remove this once we have all HA support for all write request. - return handle(omRequest); - } - - default: - // As all request types are not changed so we need to call handle - // here. - return handle(omRequest); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java deleted file mode 100644 index ff2c966983f..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ /dev/null @@ -1,246 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -import com.google.common.base.Preconditions; - -import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.NotLeaderException; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; -import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; - -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -import org.apache.ratis.protocol.RaftPeerId; -import org.apache.ratis.util.ExitUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.Optional; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicLong; - -/** - * This class is the server-side translator that forwards requests received on - * {@link OzoneManagerProtocolPB} - * to the OzoneManagerService server implementation. - */ -public class OzoneManagerProtocolServerSideTranslatorPB implements - OzoneManagerProtocolPB { - private static final Logger LOG = LoggerFactory - .getLogger(OzoneManagerProtocolServerSideTranslatorPB.class); - private final OzoneManagerRatisServer omRatisServer; - private final RequestHandler handler; - private final boolean isRatisEnabled; - private final OzoneManager ozoneManager; - private final OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; - private final AtomicLong transactionIndex = new AtomicLong(0L); - private final OzoneProtocolMessageDispatcher - dispatcher; - - /** - * Constructs an instance of the server handler. - * - * @param impl OzoneManagerProtocolPB - */ - public OzoneManagerProtocolServerSideTranslatorPB( - OzoneManager impl, - OzoneManagerRatisServer ratisServer, - ProtocolMessageMetrics metrics, - boolean enableRatis) { - this.ozoneManager = impl; - handler = new OzoneManagerRequestHandler(impl); - this.omRatisServer = ratisServer; - this.isRatisEnabled = enableRatis; - this.ozoneManagerDoubleBuffer = - new OzoneManagerDoubleBuffer(ozoneManager.getMetadataManager(), (i) -> { - // Do nothing. - // For OM NON-HA code, there is no need to save transaction index. - // As we wait until the double buffer flushes DB to disk. - }, isRatisEnabled); - - dispatcher = new OzoneProtocolMessageDispatcher<>("OzoneProtocol", - metrics, LOG); - - } - - /** - * Submit requests to Ratis server for OM HA implementation. - * TODO: Once HA is implemented fully, we should have only one server side - * translator for OM protocol. - */ - @Override - public OMResponse submitRequest(RpcController controller, - OMRequest request) throws ServiceException { - - return dispatcher.processRequest(request, this::processRequest, - request.getCmdType(), request.getTraceID()); - } - - private OMResponse processRequest(OMRequest request) throws - ServiceException { - - if (isRatisEnabled) { - // Check if the request is a read only request - if (OmUtils.isReadOnly(request)) { - return submitReadRequestToOM(request); - } else { - if (omRatisServer.isLeader()) { - try { - OMClientRequest omClientRequest = - OzoneManagerRatisUtils.createClientRequest(request); - Preconditions.checkState(omClientRequest != null, - "Unrecognized write command type request" + request.toString()); - request = omClientRequest.preExecute(ozoneManager); - } catch (IOException ex) { - // As some of the preExecute returns error. So handle here. - return createErrorResponse(request, ex); - } - return submitRequestToRatis(request); - } else { - // throw not leader exception. This is being done, so to avoid - // unnecessary execution of preExecute on follower OM's. This - // will be helpful in the case like where we we reduce the - // chance of allocate blocks on follower OM's. Right now our - // leader status is updated every 1 second. - throw createNotLeaderException(); - } - } - } else { - return submitRequestDirectlyToOM(request); - } - } - - /** - * Create OMResponse from the specified OMRequest and exception. - * - * @param omRequest - * @param exception - * @return OMResponse - */ - private OMResponse createErrorResponse( - OMRequest omRequest, IOException exception) { - OzoneManagerProtocolProtos.Type cmdType = omRequest.getCmdType(); - // Added all write command types here, because in future if any of the - // preExecute is changed to return IOException, we can return the error - // OMResponse to the client. - OMResponse.Builder omResponse = OMResponse.newBuilder() - .setStatus( - OzoneManagerRatisUtils.exceptionToResponseStatus(exception)) - .setCmdType(cmdType) - .setSuccess(false); - if (exception.getMessage() != null) { - omResponse.setMessage(exception.getMessage()); - } - return omResponse.build(); - } - - /** - * Submits request to OM's Ratis server. - */ - private OMResponse submitRequestToRatis(OMRequest request) - throws ServiceException { - //TODO: Need to remove OzoneManagerRatisClient, as now we are using - // RatisServer Api's. - return omRatisServer.submitRequest(request); - } - - private OMResponse submitReadRequestToOM(OMRequest request) - throws ServiceException { - // Check if this OM is the leader. - if (omRatisServer.isLeader()) { - return handler.handle(request); - } else { - throw createNotLeaderException(); - } - } - - private ServiceException createNotLeaderException() { - RaftPeerId raftPeerId = omRatisServer.getRaftPeerId(); - Optional leaderRaftPeerId = omRatisServer - .getCachedLeaderPeerId(); - - NotLeaderException notLeaderException; - if (leaderRaftPeerId.isPresent()) { - notLeaderException = new NotLeaderException(raftPeerId.toString()); - } else { - notLeaderException = new NotLeaderException( - raftPeerId.toString(), leaderRaftPeerId.toString()); - } - - if (LOG.isDebugEnabled()) { - LOG.debug(notLeaderException.getMessage()); - } - - return new ServiceException(notLeaderException); - } - - /** - * Submits request directly to OM. - */ - private OMResponse submitRequestDirectlyToOM(OMRequest request) { - OMClientResponse omClientResponse = null; - long index = 0L; - try { - if (OmUtils.isReadOnly(request)) { - return handler.handle(request); - } else { - OMClientRequest omClientRequest = - OzoneManagerRatisUtils.createClientRequest(request); - Preconditions.checkState(omClientRequest != null, - "Unrecognized write command type request" + request.toString()); - request = omClientRequest.preExecute(ozoneManager); - index = transactionIndex.incrementAndGet(); - omClientRequest = OzoneManagerRatisUtils.createClientRequest(request); - omClientResponse = omClientRequest.validateAndUpdateCache( - ozoneManager, index, ozoneManagerDoubleBuffer::add); - } - } catch(IOException ex) { - // As some of the preExecute returns error. So handle here. - return createErrorResponse(request, ex); - } - try { - omClientResponse.getFlushFuture().get(); - if (LOG.isTraceEnabled()) { - LOG.trace("Future for {} is completed", request); - } - } catch (ExecutionException | InterruptedException ex) { - // terminate OM. As if we are in this stage means, while getting - // response from flush future, we got an exception. - String errorMessage = "Got error during waiting for flush to be " + - "completed for " + "request" + request.toString(); - ExitUtils.terminate(1, errorMessage, ex, LOG); - } - return omClientResponse.getOMResponse(); - } - - public void stop() { - if (!isRatisEnabled) { - ozoneManagerDoubleBuffer.stop(); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java deleted file mode 100644 index ef96e0cc27e..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ /dev/null @@ -1,1132 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.protocolPB; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.TreeMap; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; -import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; -import org.apache.hadoop.ozone.om.helpers.OmPartInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.helpers.ServiceInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AddAclResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelDelegationTokenResponseProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CheckVolumeAccessRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CheckVolumeAccessResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateKeyResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetDelegationTokenResponseProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LookupKeyResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadAbortRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadAbortResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadCompleteResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadListPartsRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadListPartsResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenewDelegationTokenResponseProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3BucketInfoRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3BucketInfoResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3CreateBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3CreateBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3DeleteBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3DeleteBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3ListBucketsRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3ListBucketsResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; -import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; -import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; -import org.apache.hadoop.security.token.Token; - -import com.google.common.collect.Lists; - -import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper; -import org.apache.hadoop.hdds.utils.db.SequenceNumberNotFoundException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; - -/** - * Command Handler for OM requests. OM State Machine calls this handler for - * deserializing the client request and sending it to OM. - */ -public class OzoneManagerRequestHandler implements RequestHandler { - static final Logger LOG = - LoggerFactory.getLogger(OzoneManagerRequestHandler.class); - private final OzoneManager impl; - - public OzoneManagerRequestHandler(OzoneManager om) { - this.impl = om; - } - - //TODO simplify it to make it shorter - @SuppressWarnings("methodlength") - @Override - public OMResponse handle(OMRequest request) { - if (LOG.isDebugEnabled()) { - LOG.debug("Received OMRequest: {}, ", request); - } - Type cmdType = request.getCmdType(); - OMResponse.Builder responseBuilder = OMResponse.newBuilder() - .setCmdType(cmdType) - .setStatus(Status.OK); - try { - switch (cmdType) { - case CreateVolume: - CreateVolumeResponse createVolumeResponse = createVolume( - request.getCreateVolumeRequest()); - responseBuilder.setCreateVolumeResponse(createVolumeResponse); - break; - case SetVolumeProperty: - SetVolumePropertyResponse setVolumePropertyResponse = setVolumeProperty( - request.getSetVolumePropertyRequest()); - responseBuilder.setSetVolumePropertyResponse(setVolumePropertyResponse); - break; - case CheckVolumeAccess: - CheckVolumeAccessResponse checkVolumeAccessResponse = checkVolumeAccess( - request.getCheckVolumeAccessRequest()); - responseBuilder.setCheckVolumeAccessResponse(checkVolumeAccessResponse); - break; - case InfoVolume: - InfoVolumeResponse infoVolumeResponse = infoVolume( - request.getInfoVolumeRequest()); - responseBuilder.setInfoVolumeResponse(infoVolumeResponse); - break; - case DeleteVolume: - DeleteVolumeResponse deleteVolumeResponse = deleteVolume( - request.getDeleteVolumeRequest()); - responseBuilder.setDeleteVolumeResponse(deleteVolumeResponse); - break; - case ListVolume: - ListVolumeResponse listVolumeResponse = listVolumes( - request.getListVolumeRequest()); - responseBuilder.setListVolumeResponse(listVolumeResponse); - break; - case CreateBucket: - CreateBucketResponse createBucketResponse = createBucket( - request.getCreateBucketRequest()); - responseBuilder.setCreateBucketResponse(createBucketResponse); - break; - case InfoBucket: - InfoBucketResponse infoBucketResponse = infoBucket( - request.getInfoBucketRequest()); - responseBuilder.setInfoBucketResponse(infoBucketResponse); - break; - case SetBucketProperty: - SetBucketPropertyResponse setBucketPropertyResponse = setBucketProperty( - request.getSetBucketPropertyRequest()); - responseBuilder.setSetBucketPropertyResponse(setBucketPropertyResponse); - break; - case DeleteBucket: - DeleteBucketResponse deleteBucketResponse = deleteBucket( - request.getDeleteBucketRequest()); - responseBuilder.setDeleteBucketResponse(deleteBucketResponse); - break; - case ListBuckets: - ListBucketsResponse listBucketsResponse = listBuckets( - request.getListBucketsRequest()); - responseBuilder.setListBucketsResponse(listBucketsResponse); - break; - case CreateKey: - CreateKeyResponse createKeyResponse = createKey( - request.getCreateKeyRequest()); - responseBuilder.setCreateKeyResponse(createKeyResponse); - break; - case LookupKey: - LookupKeyResponse lookupKeyResponse = lookupKey( - request.getLookupKeyRequest()); - responseBuilder.setLookupKeyResponse(lookupKeyResponse); - break; - case RenameKey: - RenameKeyResponse renameKeyResponse = renameKey( - request.getRenameKeyRequest()); - responseBuilder.setRenameKeyResponse(renameKeyResponse); - break; - case DeleteKey: - DeleteKeyResponse deleteKeyResponse = deleteKey( - request.getDeleteKeyRequest()); - responseBuilder.setDeleteKeyResponse(deleteKeyResponse); - break; - case ListKeys: - ListKeysResponse listKeysResponse = listKeys( - request.getListKeysRequest()); - responseBuilder.setListKeysResponse(listKeysResponse); - break; - case CommitKey: - CommitKeyResponse commitKeyResponse = commitKey( - request.getCommitKeyRequest()); - responseBuilder.setCommitKeyResponse(commitKeyResponse); - break; - case AllocateBlock: - AllocateBlockResponse allocateBlockResponse = allocateBlock( - request.getAllocateBlockRequest()); - responseBuilder.setAllocateBlockResponse(allocateBlockResponse); - break; - case CreateS3Bucket: - S3CreateBucketResponse s3CreateBucketResponse = createS3Bucket( - request.getCreateS3BucketRequest()); - responseBuilder.setCreateS3BucketResponse(s3CreateBucketResponse); - break; - case DeleteS3Bucket: - S3DeleteBucketResponse s3DeleteBucketResponse = deleteS3Bucket( - request.getDeleteS3BucketRequest()); - responseBuilder.setDeleteS3BucketResponse(s3DeleteBucketResponse); - break; - case InfoS3Bucket: - S3BucketInfoResponse s3BucketInfoResponse = getS3Bucketinfo( - request.getInfoS3BucketRequest()); - responseBuilder.setInfoS3BucketResponse(s3BucketInfoResponse); - break; - case ListS3Buckets: - S3ListBucketsResponse s3ListBucketsResponse = listS3Buckets( - request.getListS3BucketsRequest()); - responseBuilder.setListS3BucketsResponse(s3ListBucketsResponse); - break; - case InitiateMultiPartUpload: - MultipartInfoInitiateResponse multipartInfoInitiateResponse = - initiateMultiPartUpload( - request.getInitiateMultiPartUploadRequest()); - responseBuilder.setInitiateMultiPartUploadResponse( - multipartInfoInitiateResponse); - break; - case CommitMultiPartUpload: - MultipartCommitUploadPartResponse commitUploadPartResponse = - commitMultipartUploadPart( - request.getCommitMultiPartUploadRequest()); - responseBuilder.setCommitMultiPartUploadResponse( - commitUploadPartResponse); - break; - case CompleteMultiPartUpload: - MultipartUploadCompleteResponse completeMultiPartUploadResponse = - completeMultipartUpload( - request.getCompleteMultiPartUploadRequest()); - responseBuilder.setCompleteMultiPartUploadResponse( - completeMultiPartUploadResponse); - break; - case AbortMultiPartUpload: - MultipartUploadAbortResponse abortMultiPartAbortResponse = - abortMultipartUpload(request.getAbortMultiPartUploadRequest()); - responseBuilder.setAbortMultiPartUploadResponse( - abortMultiPartAbortResponse); - break; - case ListMultiPartUploadParts: - MultipartUploadListPartsResponse listPartsResponse = - listParts(request.getListMultipartUploadPartsRequest()); - responseBuilder.setListMultipartUploadPartsResponse(listPartsResponse); - break; - case ListMultipartUploads: - ListMultipartUploadsResponse response = - listMultipartUploads(request.getListMultipartUploadsRequest()); - responseBuilder.setListMultipartUploadsResponse(response); - break; - case ServiceList: - ServiceListResponse serviceListResponse = getServiceList( - request.getServiceListRequest()); - responseBuilder.setServiceListResponse(serviceListResponse); - break; - case DBUpdates: - DBUpdatesResponse dbUpdatesResponse = getOMDBUpdates( - request.getDbUpdatesRequest()); - responseBuilder.setDbUpdatesResponse(dbUpdatesResponse); - break; - case GetDelegationToken: - GetDelegationTokenResponseProto getDtResp = getDelegationToken( - request.getGetDelegationTokenRequest()); - responseBuilder.setGetDelegationTokenResponse(getDtResp); - break; - case RenewDelegationToken: - RenewDelegationTokenResponseProto renewDtResp = renewDelegationToken( - request.getRenewDelegationTokenRequest()); - responseBuilder.setRenewDelegationTokenResponse(renewDtResp); - break; - case CancelDelegationToken: - CancelDelegationTokenResponseProto cancelDtResp = cancelDelegationToken( - request.getCancelDelegationTokenRequest()); - responseBuilder.setCancelDelegationTokenResponse(cancelDtResp); - break; - case GetS3Secret: - GetS3SecretResponse getS3SecretResp = getS3Secret(request - .getGetS3SecretRequest()); - responseBuilder.setGetS3SecretResponse(getS3SecretResp); - break; - case GetFileStatus: - GetFileStatusResponse getFileStatusResponse = - getOzoneFileStatus(request.getGetFileStatusRequest()); - responseBuilder.setGetFileStatusResponse(getFileStatusResponse); - break; - case CreateDirectory: - createDirectory(request.getCreateDirectoryRequest()); - break; - case CreateFile: - CreateFileResponse createFileResponse = - createFile(request.getCreateFileRequest()); - responseBuilder.setCreateFileResponse(createFileResponse); - break; - case LookupFile: - LookupFileResponse lookupFileResponse = - lookupFile(request.getLookupFileRequest()); - responseBuilder.setLookupFileResponse(lookupFileResponse); - break; - case ListStatus: - ListStatusResponse listStatusResponse = - listStatus(request.getListStatusRequest()); - responseBuilder.setListStatusResponse(listStatusResponse); - break; - case AddAcl: - AddAclResponse addAclResponse = - addAcl(request.getAddAclRequest()); - responseBuilder.setAddAclResponse(addAclResponse); - break; - case RemoveAcl: - RemoveAclResponse removeAclResponse = - removeAcl(request.getRemoveAclRequest()); - responseBuilder.setRemoveAclResponse(removeAclResponse); - break; - case SetAcl: - SetAclResponse setAclResponse = - setAcl(request.getSetAclRequest()); - responseBuilder.setSetAclResponse(setAclResponse); - break; - case GetAcl: - GetAclResponse getAclResponse = - getAcl(request.getGetAclRequest()); - responseBuilder.setGetAclResponse(getAclResponse); - break; - default: - responseBuilder.setSuccess(false); - responseBuilder.setMessage("Unrecognized Command Type: " + cmdType); - break; - } - responseBuilder.setSuccess(true); - } catch (IOException ex) { - responseBuilder.setSuccess(false); - responseBuilder.setStatus(exceptionToResponseStatus(ex)); - if (ex.getMessage() != null) { - responseBuilder.setMessage(ex.getMessage()); - } - } - return responseBuilder.build(); - } - - private DBUpdatesResponse getOMDBUpdates( - DBUpdatesRequest dbUpdatesRequest) - throws SequenceNumberNotFoundException { - - DBUpdatesResponse.Builder builder = DBUpdatesResponse - .newBuilder(); - DBUpdatesWrapper dbUpdatesWrapper = - impl.getDBUpdates(dbUpdatesRequest); - for (int i = 0; i < dbUpdatesWrapper.getData().size(); i++) { - builder.addData(OMPBHelper.getByteString( - dbUpdatesWrapper.getData().get(i))); - } - builder.setSequenceNumber(dbUpdatesWrapper.getCurrentSequenceNumber()); - return builder.build(); - } - - private GetAclResponse getAcl(GetAclRequest req) throws IOException { - List acls = new ArrayList<>(); - List aclList = - impl.getAcl(OzoneObjInfo.fromProtobuf(req.getObj())); - if (aclList != null) { - aclList.forEach(a -> acls.add(OzoneAcl.toProtobuf(a))); - } - return GetAclResponse.newBuilder().addAllAcls(acls).build(); - } - - private RemoveAclResponse removeAcl(RemoveAclRequest req) - throws IOException { - boolean response = impl.removeAcl(OzoneObjInfo.fromProtobuf(req.getObj()), - OzoneAcl.fromProtobuf(req.getAcl())); - return RemoveAclResponse.newBuilder().setResponse(response).build(); - } - - private SetAclResponse setAcl(SetAclRequest req) throws IOException { - boolean response = impl.setAcl(OzoneObjInfo.fromProtobuf(req.getObj()), - req.getAclList().stream().map(a -> OzoneAcl.fromProtobuf(a)). - collect(Collectors.toList())); - return SetAclResponse.newBuilder().setResponse(response).build(); - } - - private AddAclResponse addAcl(AddAclRequest req) throws IOException { - boolean response = impl.addAcl(OzoneObjInfo.fromProtobuf(req.getObj()), - OzoneAcl.fromProtobuf(req.getAcl())); - return AddAclResponse.newBuilder().setResponse(response).build(); - } - - // Convert and exception to corresponding status code - protected Status exceptionToResponseStatus(IOException ex) { - if (ex instanceof OMException) { - return Status.values()[((OMException) ex).getResult().ordinal()]; - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("Unknown error occurs", ex); - } - return Status.INTERNAL_ERROR; - } - } - - /** - * Validates that the incoming OM request has required parameters. - * TODO: Add more validation checks before writing the request to Ratis log. - * - * @param omRequest client request to OM - * @throws OMException thrown if required parameters are set to null. - */ - @Override - public void validateRequest(OMRequest omRequest) throws OMException { - Type cmdType = omRequest.getCmdType(); - if (cmdType == null) { - throw new OMException("CmdType is null", - OMException.ResultCodes.INVALID_REQUEST); - } - if (omRequest.getClientId() == null) { - throw new OMException("ClientId is null", - OMException.ResultCodes.INVALID_REQUEST); - } - } - - private CreateVolumeResponse createVolume(CreateVolumeRequest request) - throws IOException { - impl.createVolume(OmVolumeArgs.getFromProtobuf(request.getVolumeInfo())); - return - CreateVolumeResponse.newBuilder().build(); - } - - private SetVolumePropertyResponse setVolumeProperty( - SetVolumePropertyRequest request) throws IOException { - SetVolumePropertyResponse.Builder resp = - SetVolumePropertyResponse.newBuilder(); - - String volume = request.getVolumeName(); - - if (request.hasQuotaInBytes()) { - long quota = request.getQuotaInBytes(); - impl.setQuota(volume, quota); - } else { - String owner = request.getOwnerName(); - impl.setOwner(volume, owner); - } - - return resp.build(); - } - - private CheckVolumeAccessResponse checkVolumeAccess( - CheckVolumeAccessRequest request) throws IOException { - CheckVolumeAccessResponse.Builder resp = - CheckVolumeAccessResponse.newBuilder(); - boolean access = impl.checkVolumeAccess(request.getVolumeName(), - request.getUserAcl()); - // if no access, set the response status as access denied - - if (!access) { - throw new OMException(OMException.ResultCodes.ACCESS_DENIED); - } - - return resp.build(); - } - - private InfoVolumeResponse infoVolume(InfoVolumeRequest request) - throws IOException { - InfoVolumeResponse.Builder resp = InfoVolumeResponse.newBuilder(); - String volume = request.getVolumeName(); - - OmVolumeArgs ret = impl.getVolumeInfo(volume); - resp.setVolumeInfo(ret.getProtobuf()); - - return resp.build(); - } - - private DeleteVolumeResponse deleteVolume(DeleteVolumeRequest request) - throws IOException { - DeleteVolumeResponse.Builder resp = DeleteVolumeResponse.newBuilder(); - - impl.deleteVolume(request.getVolumeName()); - - return resp.build(); - } - - private ListVolumeResponse listVolumes(ListVolumeRequest request) - throws IOException { - ListVolumeResponse.Builder resp = ListVolumeResponse.newBuilder(); - List result = Lists.newArrayList(); - - if (request.getScope() - == ListVolumeRequest.Scope.VOLUMES_BY_USER) { - result = impl.listVolumeByUser(request.getUserName(), - request.getPrefix(), request.getPrevKey(), request.getMaxKeys()); - } else if (request.getScope() - == ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER) { - result = - impl.listAllVolumes(request.getPrefix(), request.getPrevKey(), - request.getMaxKeys()); - } - - result.forEach(item -> resp.addVolumeInfo(item.getProtobuf())); - - return resp.build(); - } - - private CreateBucketResponse createBucket(CreateBucketRequest request) - throws IOException { - CreateBucketResponse.Builder resp = - CreateBucketResponse.newBuilder(); - impl.createBucket(OmBucketInfo.getFromProtobuf( - request.getBucketInfo())); - return resp.build(); - } - - private InfoBucketResponse infoBucket(InfoBucketRequest request) - throws IOException { - InfoBucketResponse.Builder resp = - InfoBucketResponse.newBuilder(); - OmBucketInfo omBucketInfo = impl.getBucketInfo( - request.getVolumeName(), request.getBucketName()); - resp.setBucketInfo(omBucketInfo.getProtobuf()); - - return resp.build(); - } - - private CreateKeyResponse createKey(CreateKeyRequest request) - throws IOException { - CreateKeyResponse.Builder resp = - CreateKeyResponse.newBuilder(); - KeyArgs keyArgs = request.getKeyArgs(); - HddsProtos.ReplicationType type = - keyArgs.hasType() ? keyArgs.getType() : null; - HddsProtos.ReplicationFactor factor = - keyArgs.hasFactor() ? keyArgs.getFactor() : null; - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setDataSize(keyArgs.getDataSize()) - .setType(type) - .setFactor(factor) - .setIsMultipartKey(keyArgs.getIsMultipartKey()) - .setMultipartUploadID(keyArgs.getMultipartUploadID()) - .setMultipartUploadPartNumber(keyArgs.getMultipartNumber()) - .setAcls(keyArgs.getAclsList().stream().map(a -> - OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) - .addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList())) - .build(); - if (keyArgs.hasDataSize()) { - omKeyArgs.setDataSize(keyArgs.getDataSize()); - } else { - omKeyArgs.setDataSize(0); - } - OpenKeySession openKey = impl.openKey(omKeyArgs); - resp.setKeyInfo(openKey.getKeyInfo().getProtobuf()); - resp.setID(openKey.getId()); - resp.setOpenVersion(openKey.getOpenVersion()); - return resp.build(); - } - - private LookupKeyResponse lookupKey(LookupKeyRequest request) - throws IOException { - LookupKeyResponse.Builder resp = - LookupKeyResponse.newBuilder(); - KeyArgs keyArgs = request.getKeyArgs(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setRefreshPipeline(true) - .setSortDatanodesInPipeline(keyArgs.getSortDatanodes()) - .build(); - OmKeyInfo keyInfo = impl.lookupKey(omKeyArgs); - resp.setKeyInfo(keyInfo.getProtobuf()); - - return resp.build(); - } - - private RenameKeyResponse renameKey(RenameKeyRequest request) - throws IOException { - RenameKeyResponse.Builder resp = RenameKeyResponse.newBuilder(); - - KeyArgs keyArgs = request.getKeyArgs(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setRefreshPipeline(true) - .build(); - impl.renameKey(omKeyArgs, request.getToKeyName()); - - return resp.build(); - } - - private SetBucketPropertyResponse setBucketProperty( - SetBucketPropertyRequest request) throws IOException { - SetBucketPropertyResponse.Builder resp = - SetBucketPropertyResponse.newBuilder(); - impl.setBucketProperty(OmBucketArgs.getFromProtobuf( - request.getBucketArgs())); - - return resp.build(); - } - - private DeleteKeyResponse deleteKey(DeleteKeyRequest request) - throws IOException { - DeleteKeyResponse.Builder resp = - DeleteKeyResponse.newBuilder(); - - KeyArgs keyArgs = request.getKeyArgs(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .build(); - impl.deleteKey(omKeyArgs); - - return resp.build(); - } - - private DeleteBucketResponse deleteBucket(DeleteBucketRequest request) - throws IOException { - DeleteBucketResponse.Builder resp = DeleteBucketResponse.newBuilder(); - - impl.deleteBucket(request.getVolumeName(), request.getBucketName()); - - return resp.build(); - } - - private ListBucketsResponse listBuckets(ListBucketsRequest request) - throws IOException { - ListBucketsResponse.Builder resp = - ListBucketsResponse.newBuilder(); - - List buckets = impl.listBuckets( - request.getVolumeName(), - request.getStartKey(), - request.getPrefix(), - request.getCount()); - for (OmBucketInfo bucket : buckets) { - resp.addBucketInfo(bucket.getProtobuf()); - } - - return resp.build(); - } - - private ListKeysResponse listKeys(ListKeysRequest request) - throws IOException { - ListKeysResponse.Builder resp = - ListKeysResponse.newBuilder(); - - List keys = impl.listKeys( - request.getVolumeName(), - request.getBucketName(), - request.getStartKey(), - request.getPrefix(), - request.getCount()); - for (OmKeyInfo key : keys) { - resp.addKeyInfo(key.getProtobuf()); - } - - return resp.build(); - } - - private CommitKeyResponse commitKey(CommitKeyRequest request) - throws IOException { - CommitKeyResponse.Builder resp = - CommitKeyResponse.newBuilder(); - - KeyArgs keyArgs = request.getKeyArgs(); - HddsProtos.ReplicationType type = - keyArgs.hasType() ? keyArgs.getType() : null; - HddsProtos.ReplicationFactor factor = - keyArgs.hasFactor() ? keyArgs.getFactor() : null; - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setLocationInfoList(keyArgs.getKeyLocationsList().stream() - .map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList())) - .setType(type) - .setFactor(factor) - .setDataSize(keyArgs.getDataSize()) - .build(); - impl.commitKey(omKeyArgs, request.getClientID()); - - return resp.build(); - } - - private AllocateBlockResponse allocateBlock(AllocateBlockRequest request) - throws IOException { - AllocateBlockResponse.Builder resp = - AllocateBlockResponse.newBuilder(); - - KeyArgs keyArgs = request.getKeyArgs(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .build(); - - OmKeyLocationInfo newLocation = impl.allocateBlock(omKeyArgs, - request.getClientID(), ExcludeList.getFromProtoBuf( - request.getExcludeList())); - - resp.setKeyLocation(newLocation.getProtobuf()); - - return resp.build(); - } - - private ServiceListResponse getServiceList(ServiceListRequest request) - throws IOException { - ServiceListResponse.Builder resp = ServiceListResponse.newBuilder(); - - resp.addAllServiceInfo(impl.getServiceInfo().getServiceInfoList().stream() - .map(ServiceInfo::getProtobuf) - .collect(Collectors.toList())); - if (impl.getServiceInfo().getCaCertificate() != null) { - resp.setCaCertificate(impl.getServiceInfo().getCaCertificate()); - } - return resp.build(); - } - - private S3CreateBucketResponse createS3Bucket(S3CreateBucketRequest request) - throws IOException { - S3CreateBucketResponse.Builder resp = S3CreateBucketResponse.newBuilder(); - - impl.createS3Bucket(request.getUserName(), request.getS3Bucketname()); - - return resp.build(); - } - - private S3DeleteBucketResponse deleteS3Bucket(S3DeleteBucketRequest request) - throws IOException { - S3DeleteBucketResponse.Builder resp = S3DeleteBucketResponse.newBuilder(); - - impl.deleteS3Bucket(request.getS3BucketName()); - - return resp.build(); - } - - private S3BucketInfoResponse getS3Bucketinfo(S3BucketInfoRequest request) - throws IOException { - S3BucketInfoResponse.Builder resp = S3BucketInfoResponse.newBuilder(); - - resp.setOzoneMapping( - impl.getOzoneBucketMapping(request.getS3BucketName())); - return resp.build(); - } - - private S3ListBucketsResponse listS3Buckets(S3ListBucketsRequest request) - throws IOException { - S3ListBucketsResponse.Builder resp = S3ListBucketsResponse.newBuilder(); - - List buckets = impl.listS3Buckets( - request.getUserName(), - request.getStartKey(), - request.getPrefix(), - request.getCount()); - for (OmBucketInfo bucket : buckets) { - resp.addBucketInfo(bucket.getProtobuf()); - } - - return resp.build(); - } - - private MultipartInfoInitiateResponse initiateMultiPartUpload( - MultipartInfoInitiateRequest request) throws IOException { - MultipartInfoInitiateResponse.Builder resp = MultipartInfoInitiateResponse - .newBuilder(); - - KeyArgs keyArgs = request.getKeyArgs(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setType(keyArgs.getType()) - .setFactor(keyArgs.getFactor()) - .setAcls(keyArgs.getAclsList().stream().map(a -> - OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) - .build(); - OmMultipartInfo multipartInfo = impl.initiateMultipartUpload(omKeyArgs); - resp.setVolumeName(multipartInfo.getVolumeName()); - resp.setBucketName(multipartInfo.getBucketName()); - resp.setKeyName(multipartInfo.getKeyName()); - resp.setMultipartUploadID(multipartInfo.getUploadID()); - - return resp.build(); - } - - private MultipartCommitUploadPartResponse commitMultipartUploadPart( - MultipartCommitUploadPartRequest request) throws IOException { - MultipartCommitUploadPartResponse.Builder resp = - MultipartCommitUploadPartResponse.newBuilder(); - - KeyArgs keyArgs = request.getKeyArgs(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setMultipartUploadID(keyArgs.getMultipartUploadID()) - .setIsMultipartKey(keyArgs.getIsMultipartKey()) - .setMultipartUploadPartNumber(keyArgs.getMultipartNumber()) - .setDataSize(keyArgs.getDataSize()) - .setLocationInfoList(keyArgs.getKeyLocationsList().stream() - .map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList())) - .build(); - OmMultipartCommitUploadPartInfo commitUploadPartInfo = - impl.commitMultipartUploadPart(omKeyArgs, request.getClientID()); - resp.setPartName(commitUploadPartInfo.getPartName()); - - return resp.build(); - } - - private MultipartUploadCompleteResponse completeMultipartUpload( - MultipartUploadCompleteRequest request) throws IOException { - MultipartUploadCompleteResponse.Builder response = - MultipartUploadCompleteResponse.newBuilder(); - - KeyArgs keyArgs = request.getKeyArgs(); - List partsList = request.getPartsListList(); - - TreeMap partsMap = new TreeMap<>(); - for (Part part : partsList) { - partsMap.put(part.getPartNumber(), part.getPartName()); - } - - OmMultipartUploadCompleteList omMultipartUploadCompleteList = - new OmMultipartUploadCompleteList(partsMap); - - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setAcls(keyArgs.getAclsList().stream().map(a -> - OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) - .setMultipartUploadID(keyArgs.getMultipartUploadID()) - .build(); - OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = impl - .completeMultipartUpload(omKeyArgs, omMultipartUploadCompleteList); - - response.setVolume(omMultipartUploadCompleteInfo.getVolume()) - .setBucket(omMultipartUploadCompleteInfo.getBucket()) - .setKey(omMultipartUploadCompleteInfo.getKey()) - .setHash(omMultipartUploadCompleteInfo.getHash()); - - return response.build(); - } - - private MultipartUploadAbortResponse abortMultipartUpload( - MultipartUploadAbortRequest multipartUploadAbortRequest) - throws IOException { - MultipartUploadAbortResponse.Builder response = - MultipartUploadAbortResponse.newBuilder(); - - KeyArgs keyArgs = multipartUploadAbortRequest.getKeyArgs(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setMultipartUploadID(keyArgs.getMultipartUploadID()) - .build(); - impl.abortMultipartUpload(omKeyArgs); - - return response.build(); - } - - private MultipartUploadListPartsResponse listParts( - MultipartUploadListPartsRequest multipartUploadListPartsRequest) - throws IOException { - - MultipartUploadListPartsResponse.Builder response = - MultipartUploadListPartsResponse.newBuilder(); - - OmMultipartUploadListParts omMultipartUploadListParts = - impl.listParts(multipartUploadListPartsRequest.getVolume(), - multipartUploadListPartsRequest.getBucket(), - multipartUploadListPartsRequest.getKey(), - multipartUploadListPartsRequest.getUploadID(), - multipartUploadListPartsRequest.getPartNumbermarker(), - multipartUploadListPartsRequest.getMaxParts()); - - List omPartInfoList = - omMultipartUploadListParts.getPartInfoList(); - - List partInfoList = - new ArrayList<>(); - - omPartInfoList.forEach(partInfo -> partInfoList.add(partInfo.getProto())); - - response.setType(omMultipartUploadListParts.getReplicationType()); - response.setFactor(omMultipartUploadListParts.getReplicationFactor()); - response.setNextPartNumberMarker( - omMultipartUploadListParts.getNextPartNumberMarker()); - response.setIsTruncated(omMultipartUploadListParts.isTruncated()); - - return response.addAllPartsList(partInfoList).build(); - - - } - - private ListMultipartUploadsResponse listMultipartUploads( - ListMultipartUploadsRequest request) - throws IOException { - - OmMultipartUploadList omMultipartUploadList = - impl.listMultipartUploads(request.getVolume(), request.getBucket(), - request.getPrefix()); - - List info = omMultipartUploadList - .getUploads() - .stream() - .map(upload -> MultipartUploadInfo.newBuilder() - .setVolumeName(upload.getVolumeName()) - .setBucketName(upload.getBucketName()) - .setKeyName(upload.getKeyName()) - .setUploadId(upload.getUploadId()) - .setType(upload.getReplicationType()) - .setFactor(upload.getReplicationFactor()) - .setCreationTime(upload.getCreationTime().toEpochMilli()) - .build()) - .collect(Collectors.toList()); - - ListMultipartUploadsResponse response = - ListMultipartUploadsResponse.newBuilder() - .addAllUploadsList(info) - .build(); - - return response; - } - - private GetDelegationTokenResponseProto getDelegationToken( - GetDelegationTokenRequestProto request) throws OMException { - GetDelegationTokenResponseProto.Builder rb = - GetDelegationTokenResponseProto.newBuilder(); - - Token token = impl - .getDelegationToken(new Text(request.getRenewer())); - if (token != null) { - rb.setResponse(org.apache.hadoop.security.proto.SecurityProtos - .GetDelegationTokenResponseProto.newBuilder().setToken(OMPBHelper - .convertToTokenProto(token)).build()); - } - - return rb.build(); - } - - private RenewDelegationTokenResponseProto renewDelegationToken( - RenewDelegationTokenRequestProto request) throws OMException { - RenewDelegationTokenResponseProto.Builder rb = - RenewDelegationTokenResponseProto.newBuilder(); - - if (request.hasToken()) { - long expiryTime = impl - .renewDelegationToken( - OMPBHelper.convertToDelegationToken(request.getToken())); - rb.setResponse(org.apache.hadoop.security.proto.SecurityProtos - .RenewDelegationTokenResponseProto.newBuilder() - .setNewExpiryTime(expiryTime).build()); - } - - return rb.build(); - } - - private CancelDelegationTokenResponseProto cancelDelegationToken( - CancelDelegationTokenRequestProto req) throws OMException { - CancelDelegationTokenResponseProto.Builder rb = - CancelDelegationTokenResponseProto.newBuilder(); - - if (req.hasToken()) { - impl.cancelDelegationToken( - OMPBHelper.convertToDelegationToken(req.getToken())); - } - rb.setResponse(org.apache.hadoop.security.proto.SecurityProtos - .CancelDelegationTokenResponseProto.getDefaultInstance()); - - return rb.build(); - } - - private GetS3SecretResponse getS3Secret( - GetS3SecretRequest request) - throws IOException { - GetS3SecretResponse.Builder rb = - GetS3SecretResponse.newBuilder(); - - rb.setS3Secret(impl.getS3Secret(request.getKerberosID()).getProtobuf()); - - return rb.build(); - } - - private GetFileStatusResponse getOzoneFileStatus( - GetFileStatusRequest request) throws IOException { - KeyArgs keyArgs = request.getKeyArgs(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .build(); - - GetFileStatusResponse.Builder rb = GetFileStatusResponse.newBuilder(); - rb.setStatus(impl.getFileStatus(omKeyArgs).getProtobuf()); - - return rb.build(); - } - - private void createDirectory(CreateDirectoryRequest request) - throws IOException { - KeyArgs keyArgs = request.getKeyArgs(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setAcls(keyArgs.getAclsList().stream().map(a -> - OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) - .build(); - impl.createDirectory(omKeyArgs); - } - - private CreateFileResponse createFile( - CreateFileRequest request) throws IOException { - KeyArgs keyArgs = request.getKeyArgs(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setDataSize(keyArgs.getDataSize()) - .setType(keyArgs.getType()) - .setFactor(keyArgs.getFactor()) - .setAcls(keyArgs.getAclsList().stream().map(a -> - OzoneAcl.fromProtobuf(a)).collect(Collectors.toList())) - .build(); - OpenKeySession keySession = - impl.createFile(omKeyArgs, request.getIsOverwrite(), - request.getIsRecursive()); - return CreateFileResponse.newBuilder() - .setKeyInfo(keySession.getKeyInfo().getProtobuf()) - .setID(keySession.getId()) - .setOpenVersion(keySession.getOpenVersion()) - .build(); - } - - private LookupFileResponse lookupFile( - LookupFileRequest request) - throws IOException { - KeyArgs keyArgs = request.getKeyArgs(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setSortDatanodesInPipeline(keyArgs.getSortDatanodes()) - .build(); - return LookupFileResponse.newBuilder() - .setKeyInfo(impl.lookupFile(omKeyArgs).getProtobuf()) - .build(); - } - - private ListStatusResponse listStatus( - ListStatusRequest request) throws IOException { - KeyArgs keyArgs = request.getKeyArgs(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .build(); - List statuses = - impl.listStatus(omKeyArgs, request.getRecursive(), - request.getStartKey(), request.getNumEntries()); - ListStatusResponse.Builder - listStatusResponseBuilder = - ListStatusResponse.newBuilder(); - for (OzoneFileStatus status : statuses) { - listStatusResponseBuilder.addStatuses(status.getProtobuf()); - } - return listStatusResponseBuilder.build(); - } - - protected OzoneManager getOzoneManager() { - return impl; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java deleted file mode 100644 index f19dc48023b..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.protocolPB; - -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos. - OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos. - OMResponse; - -/** - * Handler to handle the OmRequests. - */ -public interface RequestHandler { - - /** - * Handle the OmRequest, and returns OmResponse. - * @param request - * @return OmResponse - */ - OMResponse handle(OMRequest request); - - - /** - * Validates that the incoming OM request has required parameters. - * TODO: Add more validation checks before writing the request to Ratis log. - * - * @param omRequest client request to OM - * @throws OMException thrown if required parameters are set to null. - */ - void validateRequest(OMRequest omRequest) throws OMException; - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java deleted file mode 100644 index 9bc393dd18b..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.protocolPB; - -/** - * OM protocol buffer translators. - */ \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java deleted file mode 100644 index 0b7c51a4064..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAuthorizer.java +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.security.acl; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.ozone.om.BucketManager; -import org.apache.hadoop.ozone.om.KeyManager; -import org.apache.hadoop.ozone.om.PrefixManager; -import org.apache.hadoop.ozone.om.VolumeManager; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Objects; - -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; - -/** - * Public API for Ozone ACLs. Security providers providing support for Ozone - * ACLs should implement this. - */ -@InterfaceAudience.LimitedPrivate({"HDFS", "Yarn", "Ranger", "Hive", "HBase"}) -@InterfaceStability.Evolving -public class OzoneNativeAuthorizer implements IAccessAuthorizer { - - private static final Logger LOG = - LoggerFactory.getLogger(OzoneNativeAuthorizer.class); - private VolumeManager volumeManager; - private BucketManager bucketManager; - private KeyManager keyManager; - private PrefixManager prefixManager; - - public OzoneNativeAuthorizer() { - } - - public OzoneNativeAuthorizer(VolumeManager volumeManager, - BucketManager bucketManager, KeyManager keyManager, - PrefixManager prefixManager) { - this.volumeManager = volumeManager; - this.bucketManager = bucketManager; - this.keyManager = keyManager; - this.prefixManager = prefixManager; - } - - /** - * Check access for given ozoneObject. - * - * @param ozObject object for which access needs to be checked. - * @param context Context object encapsulating all user related information. - * @return true if user has access else false. - */ - public boolean checkAccess(IOzoneObj ozObject, RequestContext context) - throws OMException { - Objects.requireNonNull(ozObject); - Objects.requireNonNull(context); - OzoneObjInfo objInfo; - - if (ozObject instanceof OzoneObjInfo) { - objInfo = (OzoneObjInfo) ozObject; - } else { - throw new OMException("Unexpected input received. OM native acls are " + - "configured to work with OzoneObjInfo type only.", INVALID_REQUEST); - } - - switch (objInfo.getResourceType()) { - case VOLUME: - LOG.trace("Checking access for volume: {}", objInfo); - return volumeManager.checkAccess(objInfo, context); - case BUCKET: - LOG.trace("Checking access for bucket: {}", objInfo); - return (bucketManager.checkAccess(objInfo, context) - && volumeManager.checkAccess(objInfo, context)); - case KEY: - LOG.trace("Checking access for Key: {}", objInfo); - return (keyManager.checkAccess(objInfo, context) - && prefixManager.checkAccess(objInfo, context) - && bucketManager.checkAccess(objInfo, context) - && volumeManager.checkAccess(objInfo, context)); - case PREFIX: - LOG.trace("Checking access for Prefix: {]", objInfo); - return (prefixManager.checkAccess(objInfo, context) - && bucketManager.checkAccess(objInfo, context) - && volumeManager.checkAccess(objInfo, context)); - default: - throw new OMException("Unexpected object type:" + - objInfo.getResourceType(), INVALID_REQUEST); - } - } - - public void setVolumeManager(VolumeManager volumeManager) { - this.volumeManager = volumeManager; - } - - public void setBucketManager(BucketManager bucketManager) { - this.bucketManager = bucketManager; - } - - public void setKeyManager(KeyManager keyManager) { - this.keyManager = keyManager; - } - - public void setPrefixManager(PrefixManager prefixManager) { - this.prefixManager = prefixManager; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java deleted file mode 100644 index 20e747a1974..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/acl/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.security.acl; - -/** - * OM native acl implementation. - */ \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java deleted file mode 100644 index 6405eef2a9d..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.GenericParentCommand; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; - -/** - * Common interface for command handling. - */ -@Command(mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public abstract class Handler implements Callable { - - protected static final Logger LOG = LoggerFactory.getLogger(Handler.class); - - @ParentCommand - private GenericParentCommand parent; - - @Override - public Void call() throws Exception { - throw new UnsupportedOperationException(); - } - - public boolean isVerbose() { - return parent.isVerbose(); - } - - public OzoneConfiguration createOzoneConfiguration() { - return parent.createOzoneConfiguration(); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java deleted file mode 100644 index 2a17275a566..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/ObjectPrinter.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell; - -import java.io.IOException; - -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -/** - * Utility to print out response object in human readable form. - */ -public final class ObjectPrinter { - private ObjectPrinter() { - } - - public static String getObjectAsJson(Object o) throws IOException { - return JsonUtils.toJsonStringWithDefaultPrettyPrinter(o); - } - - public static void printObjectAsJson(Object o) throws IOException { - System.out.println(getObjectAsJson(o)); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java deleted file mode 100644 index 4cb283ec99d..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java +++ /dev/null @@ -1,263 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell; - -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; -import org.apache.hadoop.ozone.client.OzoneClientFactory; - -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_HTTP_SCHEME; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RPC_SCHEME; -import org.apache.http.client.utils.URIBuilder; - -/** - * Address of an ozone object for ozone shell. - */ -public class OzoneAddress { - - private static final String EMPTY_HOST = "___DEFAULT___"; - - private URI ozoneURI; - - private String volumeName = ""; - - private String bucketName = ""; - - private String keyName = ""; - - public OzoneAddress() throws OzoneClientException { - this("o3:///"); - } - - public OzoneAddress(String address) - throws OzoneClientException { - if (address == null || address.equals("")) { - address = OZONE_RPC_SCHEME + ":///"; - } - this.ozoneURI = parseURI(address); - String path = this.ozoneURI.getPath(); - - path = path.replaceAll("^/+", ""); - - int sep1 = path.indexOf('/'); - int sep2 = path.indexOf('/', sep1 + 1); - - if (sep1 == -1) { - volumeName = path; - } else { - //we have vol/bucket - volumeName = path.substring(0, sep1); - if (sep2 == -1) { - bucketName = path.substring(sep1 + 1); - } else { - //we have vol/bucket/key/.../... - bucketName = path.substring(sep1 + 1, sep2); - keyName = path.substring(sep2 + 1); - } - } - - } - - public OzoneClient createClient(OzoneConfiguration conf) - throws IOException, OzoneClientException { - OzoneClient client; - String scheme = ozoneURI.getScheme(); - if (ozoneURI.getScheme() == null || scheme.isEmpty()) { - scheme = OZONE_RPC_SCHEME; - } - if (scheme.equals(OZONE_HTTP_SCHEME)) { - throw new UnsupportedOperationException( - "REST schema is not supported any more. Please use AWS S3 protocol " - + "if you need REST interface."); - } else if (scheme.equals(OZONE_RPC_SCHEME)) { - if (ozoneURI.getHost() != null && !ozoneURI.getAuthority() - .equals(EMPTY_HOST)) { - if (OmUtils.isOmHAServiceId(conf, ozoneURI.getHost())) { - // When host is an HA service ID - if (ozoneURI.getPort() != -1) { - throw new OzoneClientException( - "Port " + ozoneURI.getPort() + " specified in URI but host '" - + ozoneURI.getHost() + "' is a logical (HA) OzoneManager " - + "and does not use port information."); - } - client = OzoneClientFactory.getRpcClient(ozoneURI.getHost(), conf); - } else if (ozoneURI.getPort() == -1) { - client = OzoneClientFactory.getRpcClient(ozoneURI.getHost()); - } else { - client = OzoneClientFactory - .getRpcClient(ozoneURI.getHost(), ozoneURI.getPort(), conf); - } - } else { - // When host is not specified - if (OmUtils.isServiceIdsDefined(conf)) { - throw new OzoneClientException("Service ID or host name must not" - + " be omitted when ozone.om.service.ids is defined."); - } - client = OzoneClientFactory.getRpcClient(conf); - } - } else { - throw new OzoneClientException( - "Invalid URI, unknown protocol scheme: " + scheme); - } - return client; - } - - /** - * verifies user provided URI. - * - * @param uri - UriString - * @return URI - * @throws URISyntaxException - * @throws OzoneException - */ - protected URI parseURI(String uri) - throws OzoneClientException { - if ((uri == null) || uri.isEmpty()) { - throw new OzoneClientException( - "Ozone URI is needed to execute this command."); - } - URIBuilder uriBuilder = new URIBuilder(stringToUri(uri)); - if (uriBuilder.getPort() == 0) { - uriBuilder.setPort(Shell.DEFAULT_OZONE_PORT); - } - - try { - return uriBuilder.build(); - } catch (URISyntaxException e) { - throw new OzoneClientException("Invalid URI: " + ozoneURI, e); - } - } - - /** - * Construct a URI from a String with unescaped special characters - * that have non-standard semantics. e.g. /, ?, #. A custom parsing - * is needed to prevent misbehavior. - * - * @param pathString The input path in string form - * @return URI - */ - private static URI stringToUri(String pathString) { - // parse uri components - String scheme = null; - String authority = null; - int start = 0; - - // parse uri scheme, if any - int colon = pathString.indexOf(':'); - int slash = pathString.indexOf('/'); - if (colon > 0 && (slash == colon + 1)) { - // has a non zero-length scheme - scheme = pathString.substring(0, colon); - start = colon + 1; - } - - // parse uri authority, if any - if (pathString.startsWith("//", start) && - (pathString.length() - start > 2)) { - start += 2; - int nextSlash = pathString.indexOf('/', start); - int authEnd = nextSlash > 0 ? nextSlash : pathString.length(); - authority = pathString.substring(start, authEnd); - start = authEnd; - } - // uri path is the rest of the string. ? or # are not interpreted, - // but any occurrence of them will be quoted by the URI ctor. - String path = pathString.substring(start, pathString.length()); - - // add leading slash to the path, if it does not exist - int firstSlash = path.indexOf('/'); - if(firstSlash != 0) { - path = "/" + path; - } - - if (authority == null || authority.equals("")) { - authority = EMPTY_HOST; - } - // Construct the URI - try { - return new URI(scheme, authority, path, null, null); - } catch (URISyntaxException e) { - throw new IllegalArgumentException(e); - } - } - - public String getVolumeName() { - return volumeName; - } - - public String getBucketName() { - return bucketName; - } - - public String getKeyName() { - return keyName; - } - - public void ensureBucketAddress() throws OzoneClientException { - if (keyName.length() > 0) { - throw new OzoneClientException( - "Invalid bucket name. Delimiters (/) not allowed in bucket name"); - } else if (volumeName.length() == 0) { - throw new OzoneClientException( - "Volume name is required."); - } else if (bucketName.length() == 0) { - throw new OzoneClientException( - "Bucket name is required."); - } - } - - public void ensureKeyAddress() throws OzoneClientException { - if (keyName.length() == 0) { - throw new OzoneClientException( - "Key name is missing."); - } else if (volumeName.length() == 0) { - throw new OzoneClientException( - "Volume name is missing"); - } else if (bucketName.length() == 0) { - throw new OzoneClientException( - "Bucket name is missing"); - } - } - - public void ensureVolumeAddress() throws OzoneClientException { - if (keyName.length() != 0) { - throw new OzoneClientException( - "Invalid volume name. Delimiters (/) not allowed in volume name"); - } else if (volumeName.length() == 0) { - throw new OzoneClientException( - "Volume name is required"); - } else if (bucketName.length() != 0) { - throw new OzoneClientException( - "Invalid volume name. Delimiters (/) not allowed in volume name"); - } - } - - public void ensureRootAddress() throws OzoneClientException { - if (keyName.length() != 0 || bucketName.length() != 0 - || volumeName.length() != 0) { - throw new OzoneClientException( - "Invalid URI. Volume/bucket/key elements should not been used"); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneShell.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneShell.java deleted file mode 100644 index 239cee928fb..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneShell.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell; - -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.web.ozShell.bucket.BucketCommands; -import org.apache.hadoop.ozone.web.ozShell.keys.KeyCommands; -import org.apache.hadoop.ozone.web.ozShell.token.TokenCommands; -import org.apache.hadoop.ozone.web.ozShell.volume.VolumeCommands; - -import io.opentracing.Scope; -import io.opentracing.util.GlobalTracer; -import picocli.CommandLine.Command; - -/** - * Shell commands for native rpc object manipulation. - */ -@Command(name = "ozone sh", - description = "Shell for Ozone object store", - subcommands = { - VolumeCommands.class, - BucketCommands.class, - KeyCommands.class, - TokenCommands.class - }, - versionProvider = HddsVersionProvider.class, - mixinStandardHelpOptions = true) -public class OzoneShell extends Shell { - - /** - * Main for the ozShell Command handling. - * - * @param argv - System Args Strings[] - * @throws Exception - */ - public static void main(String[] argv) throws Exception { - new OzoneShell().run(argv); - } - - @Override - public void execute(String[] argv) { - TracingUtil.initTracing("shell"); - try (Scope scope = GlobalTracer.get().buildSpan("main").startActive(true)) { - super.execute(argv); - } - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java deleted file mode 100644 index 999eede1c7f..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell; - -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.ozone.om.exceptions.OMException; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Ozone user interface commands. - * - * This class uses dispatch method to make calls - * to appropriate handlers that execute the ozone functions. - */ -public abstract class Shell extends GenericCli { - - private static final Logger LOG = LoggerFactory.getLogger(Shell.class); - - public static final String OZONE_URI_DESCRIPTION = "Ozone URI could start " - + "with o3:// or without prefix. URI may contain the host and port " - + "of the OM server. Both are optional. " - + "If they are not specified it will be identified from " - + "the config files."; - - public static final String OZONE_VOLUME_URI_DESCRIPTION = - "URI of the volume.\n" + OZONE_URI_DESCRIPTION; - - public static final String OZONE_BUCKET_URI_DESCRIPTION = - "URI of the volume/bucket.\n" + OZONE_URI_DESCRIPTION; - - public static final String OZONE_KEY_URI_DESCRIPTION = - "URI of the volume/bucket/key.\n" + OZONE_URI_DESCRIPTION; - - public static final String OZONE_S3BUCKET_URI_DESCRIPTION = "URI of the " + - "S3Bucket.\n" + OZONE_URI_DESCRIPTION; - - // General options - public static final int DEFAULT_OZONE_PORT = 50070; - - - - @Override - protected void printError(Throwable errorArg) { - if (errorArg instanceof OMException) { - if (isVerbose()) { - errorArg.printStackTrace(System.err); - } else { - OMException omException = (OMException) errorArg; - System.err.println(String - .format("%s %s", omException.getResult().name(), - omException.getMessage())); - } - } else { - super.printError(errorArg); - } - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java deleted file mode 100644 index 112e8f38079..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/AddAclBucketHandler.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.bucket; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import picocli.CommandLine; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -import java.util.Objects; - -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; - -/** - * Add acl handler for bucket. - */ -@Command(name = "addacl", - description = "Add a new Acl.") -public class AddAclBucketHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @CommandLine.Option(names = {"--acl", "-a"}, - required = true, - description = "new acl." + - "r = READ," + - "w = WRITE," + - "c = CREATE," + - "d = DELETE," + - "l = LIST," + - "a = ALL," + - "n = NONE," + - "x = READ_AC," + - "y = WRITE_AC" + - "Ex user:user1:rw or group:hadoop:rw") - private String acl; - - @CommandLine.Option(names = {"--store", "-s"}, - required = false, - description = "store type. i.e OZONE or S3") - private String storeType; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - Objects.requireNonNull(acl, "New acl to be added not specified."); - OzoneAddress address = new OzoneAddress(uri); - address.ensureBucketAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - } - - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setBucketName(bucketName) - .setVolumeName(volumeName) - .setResType(OzoneObj.ResourceType.BUCKET) - .setStoreType(storeType == null ? OZONE : - OzoneObj.StoreType.valueOf(storeType)) - .build(); - - boolean result = client.getObjectStore().addAcl(obj, - OzoneAcl.parseAcl(acl)); - - System.out.printf("%s%n", "Acl added successfully: " + result); - - client.close(); - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java deleted file mode 100644 index ba1ef8ceab4..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.bucket; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.GenericParentCommand; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; - -/** - * Subcommands for the bucket related operations. - */ -@Command(name = "bucket", - description = "Bucket specific operations", - subcommands = { - InfoBucketHandler.class, - ListBucketHandler.class, - CreateBucketHandler.class, - DeleteBucketHandler.class, - AddAclBucketHandler.class, - RemoveAclBucketHandler.class, - GetAclBucketHandler.class, - SetAclBucketHandler.class - }, - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class BucketCommands implements GenericParentCommand, Callable { - - @ParentCommand - private Shell shell; - - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.shell.getCmd().getSubcommands().get("bucket")); - } - - @Override - public boolean isVerbose() { - return shell.isVerbose(); - } - - @Override - public OzoneConfiguration createOzoneConfiguration() { - return shell.createOzoneConfiguration(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java deleted file mode 100644 index b4951e81b96..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.bucket; - -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.BucketArgs; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Option; -import picocli.CommandLine.Parameters; -/** - * create bucket handler. - */ -@Command(name = "create", - description = "creates a bucket in a given volume") -public class CreateBucketHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @Option(names = {"--bucketkey", "-k"}, - description = "bucket encryption key name") - private String bekName; - - @Option(names = {"--enforcegdpr", "-g"}, - description = "if true, indicates GDPR enforced bucket, " + - "false/unspecified indicates otherwise") - private Boolean isGdprEnforced; - - /** - * Executes create bucket. - */ - @Override - public Void call() throws Exception { - - OzoneAddress address = new OzoneAddress(uri); - address.ensureBucketAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - - BucketArgs.Builder bb = new BucketArgs.Builder() - .setStorageType(StorageType.DEFAULT) - .setVersioning(false); - - if(isGdprEnforced != null) { - if(isGdprEnforced) { - bb.addMetadata(OzoneConsts.GDPR_FLAG, String.valueOf(Boolean.TRUE)); - } else { - bb.addMetadata(OzoneConsts.GDPR_FLAG, String.valueOf(Boolean.FALSE)); - } - } - - if (bekName != null) { - if (!bekName.isEmpty()) { - bb.setBucketEncryptionKey(bekName); - } else { - throw new IllegalArgumentException("Bucket encryption key name must " + - "be specified to enable bucket encryption!"); - } - } - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - if (bekName != null) { - System.out.printf("Bucket Encryption enabled with Key Name: %s%n", - bekName); - } - } - - OzoneVolume vol = client.getObjectStore().getVolume(volumeName); - vol.createBucket(bucketName, bb.build()); - - if (isVerbose()) { - OzoneBucket bucket = vol.getBucket(bucketName); - ObjectPrinter.printObjectAsJson(bucket); - } - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java deleted file mode 100644 index 6ed6ddff1c0..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.bucket; - -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -/** - * Delete bucket Handler. - */ -@Command(name = "delete", - description = "deletes an empty bucket") -public class DeleteBucketHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - - OzoneAddress address = new OzoneAddress(uri); - address.ensureBucketAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - } - - OzoneVolume vol = client.getObjectStore().getVolume(volumeName); - vol.deleteBucket(bucketName); - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java deleted file mode 100644 index ccb5d46fcb1..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/GetAclBucketHandler.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.bucket; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; -import picocli.CommandLine; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -import java.util.List; - -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; - -/** - * Get acl handler for bucket. - */ -@Command(name = "getacl", - description = "List all acls.") -public class GetAclBucketHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @CommandLine.Option(names = {"--store", "-s"}, - required = false, - description = "store type. i.e OZONE or S3") - private String storeType; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - OzoneAddress address = new OzoneAddress(uri); - address.ensureBucketAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - } - - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setBucketName(bucketName) - .setVolumeName(volumeName) - .setResType(OzoneObj.ResourceType.BUCKET) - .setStoreType(storeType == null ? OZONE : - OzoneObj.StoreType.valueOf(storeType)) - .build(); - - List result = client.getObjectStore().getAcl(obj); - - System.out.printf("%s%n", - JsonUtils.toJsonStringWithDefaultPrettyPrinter(result)); - client.close(); - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java deleted file mode 100644 index e5677a4b3d4..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.bucket; - -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -/** - * Executes Info bucket. - */ -@Command(name = "info", - description = "returns information about a bucket") -public class InfoBucketHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - OzoneAddress address = new OzoneAddress(uri); - address.ensureBucketAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - } - - OzoneVolume vol = client.getObjectStore().getVolume(volumeName); - OzoneBucket bucket = vol.getBucket(bucketName); - - ObjectPrinter.printObjectAsJson(bucket); - - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java deleted file mode 100644 index 746c7274de3..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.bucket; - -import java.util.Iterator; - -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Help.Visibility; -import picocli.CommandLine.Option; -import picocli.CommandLine.Parameters; - -/** - * Executes List Bucket. - */ -@Command(name = "list", - aliases = "ls", - description = "lists the buckets in a volume.") -public class ListBucketHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_VOLUME_URI_DESCRIPTION) - private String uri; - - @Option(names = {"--length", "-l"}, - description = "Limit of the max results", - defaultValue = "100", - showDefaultValue = Visibility.ALWAYS) - private int maxBuckets; - - @Option(names = {"--start", "-s"}, - description = "The first bucket to start the listing") - private String startBucket; - - @Option(names = {"--prefix", "-p"}, - description = "Prefix to filter the buckets") - private String prefix; - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - - OzoneAddress address = new OzoneAddress(uri); - address.ensureVolumeAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - if (maxBuckets < 1) { - throw new IllegalArgumentException( - "the length should be a positive number"); - } - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - } - - - OzoneVolume vol = client.getObjectStore().getVolume(volumeName); - Iterator bucketIterator = - vol.listBuckets(prefix, startBucket); - - int counter = 0; - while (maxBuckets > 0 && bucketIterator.hasNext()) { - ObjectPrinter.printObjectAsJson(bucketIterator.next()); - - maxBuckets -= 1; - counter++; - } - - if (isVerbose()) { - System.out.printf("Found : %d buckets for volume : %s ", - counter, volumeName); - } - - return null; - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java deleted file mode 100644 index 216f66c5629..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/RemoveAclBucketHandler.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.bucket; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import picocli.CommandLine; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -import java.util.Objects; - -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; - -/** - * Executes Info bucket. - */ -@Command(name = "removeacl", - description = "Remove an acl.") -public class RemoveAclBucketHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @CommandLine.Option(names = {"--acl", "-a"}, - required = true, - description = "Remove acl." + - "r = READ," + - "w = WRITE," + - "c = CREATE," + - "d = DELETE," + - "l = LIST," + - "a = ALL," + - "n = NONE," + - "x = READ_AC," + - "y = WRITE_AC" + - "Ex user:user1:rw or group:hadoop:rw") - private String acl; - - @CommandLine.Option(names = {"--store", "-s"}, - required = false, - description = "store type. i.e OZONE or S3") - private String storeType; - - /** - * Remove acl handler for bucket. - */ - @Override - public Void call() throws Exception { - Objects.requireNonNull(acl, "ACL to be removed not specified."); - OzoneAddress address = new OzoneAddress(uri); - address.ensureBucketAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - } - - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setBucketName(bucketName) - .setVolumeName(volumeName) - .setResType(OzoneObj.ResourceType.BUCKET) - .setStoreType(storeType == null ? OZONE : - OzoneObj.StoreType.valueOf(storeType)) - .build(); - - boolean result = client.getObjectStore().removeAcl(obj, - OzoneAcl.parseAcl(acl)); - - System.out.printf("%s%n", "Acl removed successfully: " + result); - - client.close(); - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/S3BucketMapping.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/S3BucketMapping.java deleted file mode 100644 index d147f9b278c..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/S3BucketMapping.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.bucket; - -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -/** - * S3Bucket mapping handler, which returns volume name and Ozone fs uri for - * that bucket. - */ -@Command(name = "path", - description = "Returns the ozone path for S3Bucket") -public class S3BucketMapping extends Handler { - - @Parameters(arity = "1..1", description = "Name of the s3 bucket.") - private String s3BucketName; - - /** - * Executes create bucket. - */ - @Override - public Void call() throws Exception { - - OzoneAddress ozoneAddress = new OzoneAddress(); - OzoneClient client = ozoneAddress.createClient(createOzoneConfiguration()); - - String mapping = - client.getObjectStore().getOzoneBucketMapping(s3BucketName); - String volumeName = - client.getObjectStore().getOzoneVolumeName(s3BucketName); - - if (isVerbose()) { - System.out.printf("Mapping created for S3Bucket is : %s%n", mapping); - } - - System.out.printf("Volume name for S3Bucket is : %s%n", volumeName); - - String ozoneFsUri = String.format("%s://%s.%s", OzoneConsts - .OZONE_URI_SCHEME, s3BucketName, volumeName); - - System.out.printf("Ozone FileSystem Uri is : %s%n", ozoneFsUri); - - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java deleted file mode 100644 index e603068198a..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/SetAclBucketHandler.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.bucket; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import picocli.CommandLine; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -import java.util.Objects; - -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; - -/** - * Set acl handler for bucket. - */ -@Command(name = "setacl", - description = "Set acls.") -public class SetAclBucketHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @CommandLine.Option(names = {"--acls", "-al"}, - required = true, - description = "Comma seperated acls." + - "r = READ," + - "w = WRITE," + - "c = CREATE," + - "d = DELETE," + - "l = LIST," + - "a = ALL," + - "n = NONE," + - "x = READ_AC," + - "y = WRITE_AC" + - "Ex user:user1:rw,user:user2:a,group:hadoop:a") - private String acls; - - @CommandLine.Option(names = {"--store", "-s"}, - required = false, - description = "store type. i.e OZONE or S3") - private String storeType; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - Objects.requireNonNull(acls, "Acls to be set not specified."); - OzoneAddress address = new OzoneAddress(uri); - address.ensureBucketAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - } - - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setBucketName(bucketName) - .setVolumeName(volumeName) - .setResType(OzoneObj.ResourceType.BUCKET) - .setStoreType(storeType == null ? OZONE : - OzoneObj.StoreType.valueOf(storeType)) - .build(); - - boolean result = client.getObjectStore().setAcl(obj, - OzoneAcl.parseAcls(acls)); - - System.out.printf("%s%n", "Acl set successfully: " + result); - - client.close(); - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/package-info.java deleted file mode 100644 index c344c35ae11..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Netty-based HTTP server implementation for Ozone. - */ -package org.apache.hadoop.ozone.web.ozShell.bucket; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java deleted file mode 100644 index b4e81345b84..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/AddAclKeyHandler.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.keys; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import picocli.CommandLine; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -import java.util.Objects; - -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; - -/** - * Add acl handler for key. - */ -@Command(name = "addacl", - description = "Add a new Acl.") -public class AddAclKeyHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @CommandLine.Option(names = {"--acl", "-a"}, - required = true, - description = "Add acl." + - "r = READ," + - "w = WRITE," + - "c = CREATE," + - "d = DELETE," + - "l = LIST," + - "a = ALL," + - "n = NONE," + - "x = READ_AC," + - "y = WRITE_AC" + - "Ex user:user1:rw or group:hadoop:rw") - private String acl; - - @CommandLine.Option(names = {"--store", "-s"}, - required = false, - description = "store type. i.e OZONE or S3") - private String storeType; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - Objects.requireNonNull(acl, "New acl to be added not specified."); - OzoneAddress address = new OzoneAddress(uri); - address.ensureKeyAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - String keyName = address.getKeyName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - System.out.printf("Key Name : %s%n", keyName); - } - - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setBucketName(bucketName) - .setVolumeName(volumeName) - .setKeyName(address.getKeyName()) - .setResType(OzoneObj.ResourceType.KEY) - .setStoreType(storeType == null ? OZONE : - OzoneObj.StoreType.valueOf(storeType)) - .build(); - - boolean result = client.getObjectStore().addAcl(obj, - OzoneAcl.parseAcl(acl)); - - System.out.printf("%s%n", "Acl added successfully: " + result); - - client.close(); - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java deleted file mode 100644 index 6f8bdffc201..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.keys; - -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -/** - * Executes Delete Key. - */ -@Command(name = "delete", - description = "deletes an existing key") -public class DeleteKeyHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_KEY_URI_DESCRIPTION) - private String uri; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - - OzoneAddress address = new OzoneAddress(uri); - address.ensureKeyAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - String keyName = address.getKeyName(); - - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - System.out.printf("Key Name : %s%n", keyName); - } - - OzoneVolume vol = client.getObjectStore().getVolume(volumeName); - OzoneBucket bucket = vol.getBucket(bucketName); - bucket.deleteKey(keyName); - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java deleted file mode 100644 index 6423dbbb6e4..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetAclKeyHandler.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.keys; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; -import picocli.CommandLine; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -import java.util.List; - -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; - -/** - * Get acl handler for Key. - */ -@Command(name = "getacl", - description = "List all acls.") -public class GetAclKeyHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @CommandLine.Option(names = {"--store", "-s"}, - required = false, - description = "store type. i.e OZONE or S3") - private String storeType; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - OzoneAddress address = new OzoneAddress(uri); - address.ensureKeyAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - String keyName = address.getKeyName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - System.out.printf("Key Name : %s%n", keyName); - } - - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setBucketName(bucketName) - .setVolumeName(volumeName) - .setKeyName(keyName) - .setResType(OzoneObj.ResourceType.KEY) - .setStoreType(storeType == null ? OZONE : - OzoneObj.StoreType.valueOf(storeType)) - .build(); - - List result = client.getObjectStore().getAcl(obj); - - System.out.printf("%s%n", - JsonUtils.toJsonStringWithDefaultPrettyPrinter(result)); - client.close(); - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java deleted file mode 100644 index 4e866999356..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.keys; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.nio.file.Path; -import java.nio.file.Paths; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientException; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import org.apache.commons.codec.digest.DigestUtils; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -/** - * Gets an existing key. - */ -@Command(name = "get", - description = "Gets a specific key from ozone server") -public class GetKeyHandler extends Handler { - - @Parameters(index = "0", arity = "1..1", description = - Shell.OZONE_KEY_URI_DESCRIPTION) - private String uri; - - @Parameters(index = "1", arity = "1..1", - description = "File path to download the key to") - private String fileName; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - - OzoneAddress address = new OzoneAddress(uri); - address.ensureKeyAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - String keyName = address.getKeyName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - System.out.printf("Key Name : %s%n", keyName); - } - - Path dataFilePath = Paths.get(fileName); - File dataFile = new File(fileName); - - if (dataFile.exists() && dataFile.isDirectory()) { - dataFile = new File(fileName, keyName); - } - - if (dataFile.exists()) { - throw new OzoneClientException( - fileName + "exists. Download will overwrite an " - + "existing file. Aborting."); - } - - OzoneVolume vol = client.getObjectStore().getVolume(volumeName); - OzoneBucket bucket = vol.getBucket(bucketName); - OzoneInputStream keyInputStream = bucket.readKey(keyName); - if (dataFilePath != null) { - FileOutputStream outputStream = new FileOutputStream(dataFile); - IOUtils.copyBytes(keyInputStream, outputStream, - (int) new OzoneConfiguration() - .getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY, - OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES)); - outputStream.close(); - } else { - throw new OzoneClientException( - "Can not access the file \"" + fileName + "\""); - } - if (isVerbose()) { - FileInputStream stream = new FileInputStream(dataFile); - String hash = DigestUtils.md5Hex(stream); - System.out.printf("Downloaded file hash : %s%n", hash); - stream.close(); - } - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java deleted file mode 100644 index 7cb54f2bf0b..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.keys; - -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneKeyDetails; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -/** - * Executes Info Object. - */ -@Command(name = "info", - description = "returns information about an existing key") -public class InfoKeyHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_KEY_URI_DESCRIPTION) - private String uri; - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - - OzoneAddress address = new OzoneAddress(uri); - address.ensureKeyAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - String keyName = address.getKeyName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - System.out.printf("Key Name : %s%n", keyName); - } - - OzoneVolume vol = client.getObjectStore().getVolume(volumeName); - OzoneBucket bucket = vol.getBucket(bucketName); - OzoneKeyDetails key = bucket.getKey(keyName); - // For compliance/security, GDPR Secret & Algorithm details are removed - // from local copy of metadata before printing. This doesn't remove these - // from Ozone Manager's actual metadata. - key.getMetadata().remove(OzoneConsts.GDPR_SECRET); - key.getMetadata().remove(OzoneConsts.GDPR_ALGORITHM); - - ObjectPrinter.printObjectAsJson(key); - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java deleted file mode 100644 index 4de97c57f2f..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.keys; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.GenericParentCommand; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; - -/** - * Subcommand to group key related operations. - */ -@Command(name = "key", - description = "Key specific operations", - subcommands = { - InfoKeyHandler.class, - ListKeyHandler.class, - GetKeyHandler.class, - PutKeyHandler.class, - RenameKeyHandler.class, - DeleteKeyHandler.class, - AddAclKeyHandler.class, - RemoveAclKeyHandler.class, - SetAclKeyHandler.class, - GetAclKeyHandler.class - }, - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class KeyCommands implements GenericParentCommand, Callable { - - @ParentCommand - private Shell shell; - - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.shell.getCmd().getSubcommands().get("key")); - } - - @Override - public boolean isVerbose() { - return shell.isVerbose(); - } - - @Override - public OzoneConfiguration createOzoneConfiguration() { - return shell.createOzoneConfiguration(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java deleted file mode 100644 index 9829eefed00..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.keys; - -import java.util.Iterator; - -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Option; -import picocli.CommandLine.Parameters; - -/** - * Executes List Keys. - */ -@Command(name = "list", - aliases = "ls", - description = "list all keys in a given bucket") -public class ListKeyHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @Option(names = {"--length", "-l"}, - description = "Limit of the max results", - defaultValue = "100") - private int maxKeys; - - @Option(names = {"--start", "-s"}, - description = "The first key to start the listing") - private String startKey; - - @Option(names = {"--prefix", "-p"}, - description = "Prefix to filter the key") - private String prefix; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - - OzoneAddress address = new OzoneAddress(uri); - address.ensureBucketAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - - if (maxKeys < 1) { - throw new IllegalArgumentException( - "the length should be a positive number"); - } - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("bucket Name : %s%n", bucketName); - } - - OzoneVolume vol = client.getObjectStore().getVolume(volumeName); - OzoneBucket bucket = vol.getBucket(bucketName); - Iterator keyIterator = bucket.listKeys(prefix, - startKey); - - int maxKeyLimit = maxKeys; - - int counter = 0; - while (maxKeys > 0 && keyIterator.hasNext()) { - OzoneKey ozoneKey = keyIterator.next(); - ObjectPrinter.printObjectAsJson(ozoneKey); - maxKeys -= 1; - counter++; - } - - // More keys were returned notify about max length - if (keyIterator.hasNext()) { - System.out.println("Listing first " + maxKeyLimit + " entries of the " + - "result. Use --length (-l) to override max returned keys."); - } else if (isVerbose()) { - System.out.printf("Found : %d keys for bucket %s in volume : %s ", - counter, bucketName, volumeName); - } - - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java deleted file mode 100644 index d80f36b34e7..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.keys; - -import java.io.File; -import java.io.FileInputStream; -import java.util.HashMap; -import java.util.Map; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import org.apache.commons.codec.digest.DigestUtils; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT; -import picocli.CommandLine.Command; -import picocli.CommandLine.Option; -import picocli.CommandLine.Parameters; - -/** - * Puts a file into an ozone bucket. - */ -@Command(name = "put", - description = "creates or overwrites an existing key") -public class PutKeyHandler extends Handler { - - @Parameters(index = "0", arity = "1..1", description = - Shell.OZONE_KEY_URI_DESCRIPTION) - private String uri; - - @Parameters(index = "1", arity = "1..1", description = "File to upload") - private String fileName; - - @Option(names = {"-r", "--replication"}, - description = "Replication factor of the new key. (use ONE or THREE) " - + "Default is specified in the cluster-wide config.") - private ReplicationFactor replicationFactor; - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - - OzoneAddress address = new OzoneAddress(uri); - address.ensureKeyAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - String keyName = address.getKeyName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - System.out.printf("Key Name : %s%n", keyName); - } - - File dataFile = new File(fileName); - - if (isVerbose()) { - FileInputStream stream = new FileInputStream(dataFile); - String hash = DigestUtils.md5Hex(stream); - System.out.printf("File Hash : %s%n", hash); - stream.close(); - } - - Configuration conf = new OzoneConfiguration(); - if (replicationFactor == null) { - replicationFactor = ReplicationFactor.valueOf( - conf.getInt(OZONE_REPLICATION, OZONE_REPLICATION_DEFAULT)); - } - - ReplicationType replicationType = ReplicationType.valueOf( - conf.get(OZONE_REPLICATION_TYPE, OZONE_REPLICATION_TYPE_DEFAULT)); - OzoneVolume vol = client.getObjectStore().getVolume(volumeName); - OzoneBucket bucket = vol.getBucket(bucketName); - Map keyMetadata = new HashMap<>(); - if(Boolean.valueOf(bucket.getMetadata().get(OzoneConsts.GDPR_FLAG))){ - keyMetadata.put(OzoneConsts.GDPR_FLAG, Boolean.TRUE.toString()); - } - OzoneOutputStream outputStream = bucket - .createKey(keyName, dataFile.length(), replicationType, - replicationFactor, keyMetadata); - FileInputStream fileInputStream = new FileInputStream(dataFile); - IOUtils.copyBytes(fileInputStream, outputStream, (int) conf - .getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY, OZONE_SCM_CHUNK_SIZE_DEFAULT, - StorageUnit.BYTES)); - outputStream.close(); - fileInputStream.close(); - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java deleted file mode 100644 index f561aa2aeb6..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RemoveAclKeyHandler.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.keys; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import picocli.CommandLine; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -import java.util.Objects; - -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; - -/** - * Remove acl handler for key. - */ -@Command(name = "removeacl", - description = "Remove an acl.") -public class RemoveAclKeyHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @CommandLine.Option(names = {"--acl", "-a"}, - required = true, - description = "Remove acl." + - "r = READ," + - "w = WRITE," + - "c = CREATE," + - "d = DELETE," + - "l = LIST," + - "a = ALL," + - "n = NONE," + - "x = READ_AC," + - "y = WRITE_AC" + - "Ex user:user1:rw or group:hadoop:rw") - private String acl; - - @CommandLine.Option(names = {"--store", "-s"}, - required = false, - description = "store type. i.e OZONE or S3") - private String storeType; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - Objects.requireNonNull(acl, "ACL to be removed not specified."); - OzoneAddress address = new OzoneAddress(uri); - address.ensureKeyAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - String keyName = address.getKeyName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - System.out.printf("Key Name : %s%n", keyName); - } - - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setBucketName(bucketName) - .setVolumeName(volumeName) - .setKeyName(keyName) - .setResType(OzoneObj.ResourceType.KEY) - .setStoreType(storeType == null ? OZONE : - OzoneObj.StoreType.valueOf(storeType)) - .build(); - - boolean result = client.getObjectStore().removeAcl(obj, - OzoneAcl.parseAcl(acl)); - - System.out.printf("%s%n", "Acl removed successfully: " + result); - - client.close(); - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RenameKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RenameKeyHandler.java deleted file mode 100644 index b2ecbdacfbf..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/RenameKeyHandler.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.keys; - -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -/** - * Renames an existing key. - */ -@Command(name = "rename", - description = "renames an existing key") -public class RenameKeyHandler extends Handler { - - @Parameters(index = "0", arity = "1..1", - description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @Parameters(index = "1", arity = "1..1", - description = "The existing key to be renamed") - private String fromKey; - - @Parameters(index = "2", arity = "1..1", - description = "The new desired name of the key") - private String toKey; - - @Override - public Void call() throws Exception { - OzoneAddress address = new OzoneAddress(uri); - address.ensureBucketAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - } - - OzoneVolume vol = client.getObjectStore().getVolume(volumeName); - OzoneBucket bucket = vol.getBucket(bucketName); - bucket.renameKey(fromKey, toKey); - - if (isVerbose()) { - System.out.printf("Renamed Key : %s to %s%n", fromKey, toKey); - } - - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java deleted file mode 100644 index a6a4872f953..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/SetAclKeyHandler.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.keys; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import picocli.CommandLine; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -import java.util.Objects; - -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; - -/** - * Set acl handler for Key. - */ -@Command(name = "setacl", - description = "Set acls.") -public class SetAclKeyHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @CommandLine.Option(names = {"--acls", "-al"}, - required = true, - description = "Comma separated acls." + - "r = READ," + - "w = WRITE," + - "c = CREATE," + - "d = DELETE," + - "l = LIST," + - "a = ALL," + - "n = NONE," + - "x = READ_AC," + - "y = WRITE_AC" + - "Ex user:user1:rw,user:user2:a,group:hadoop:a") - private String acls; - - @CommandLine.Option(names = {"--store", "-s"}, - required = false, - description = "store type. i.e OZONE or S3") - private String storeType; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - Objects.requireNonNull(acls, "New acls to be added not specified."); - OzoneAddress address = new OzoneAddress(uri); - address.ensureKeyAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - String keyName = address.getKeyName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - } - - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setBucketName(bucketName) - .setVolumeName(volumeName) - .setKeyName(keyName) - .setResType(OzoneObj.ResourceType.KEY) - .setStoreType(storeType == null ? OZONE : - OzoneObj.StoreType.valueOf(storeType)) - .build(); - - boolean result = client.getObjectStore().setAcl(obj, - OzoneAcl.parseAcls(acls)); - - System.out.printf("%s%n", "Acl set successfully: " + result); - - client.close(); - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/package-info.java deleted file mode 100644 index 1deb7ad419b..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Netty-based HTTP server implementation for Ozone. - */ -package org.apache.hadoop.ozone.web.ozShell.keys; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/package-info.java deleted file mode 100644 index e33b6e72124..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/package-info.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * ozShell Class acts as the command line interface to - * the ozone Rest Client. - */ -package org.apache.hadoop.ozone.web.ozShell; - -/** - A simple CLI to work against Ozone. - **/ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/GetS3SecretHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/GetS3SecretHandler.java deleted file mode 100644 index 1a359e2b8a2..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/GetS3SecretHandler.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.s3; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.security.UserGroupInformation; -import picocli.CommandLine.Command; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; - -/** - * Executes getsecret calls. - */ -@Command(name = "getsecret", - description = "Returns s3 secret for current user") -public class GetS3SecretHandler extends Handler { - - public static final String OZONE_GETS3SECRET_ERROR = "This command is not" + - " supported in unsecure clusters."; - /** - * Executes getS3Secret. - */ - @Override - public Void call() throws Exception { - OzoneConfiguration ozoneConfiguration = createOzoneConfiguration(); - OzoneClient client = - new OzoneAddress().createClient(ozoneConfiguration); - - // getS3Secret works only with secured clusters - if (ozoneConfiguration.getBoolean(OZONE_SECURITY_ENABLED_KEY, false)) { - System.out.println( - client.getObjectStore().getS3Secret( - UserGroupInformation.getCurrentUser().getUserName() - ).toString() - ); - } else { - // log a warning message for unsecured cluster - System.out.println(OZONE_GETS3SECRET_ERROR); - } - - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/S3Shell.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/S3Shell.java deleted file mode 100644 index ebb9d6ecafb..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/S3Shell.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.s3; - -import io.opentracing.Scope; -import io.opentracing.util.GlobalTracer; -import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.ozShell.bucket.S3BucketMapping; -import picocli.CommandLine.Command; - -/** - * Shell for s3 related operations. - */ -@Command(name = "ozone s3", - description = "Shell for S3 specific operations", - subcommands = { - GetS3SecretHandler.class, - S3BucketMapping.class - }) - -public class S3Shell extends Shell { - - @Override - public void execute(String[] argv) { - TracingUtil.initTracing("s3shell"); - try (Scope scope = GlobalTracer.get().buildSpan("main").startActive(true)) { - super.execute(argv); - } - } - - /** - * Main for the S3Shell Command handling. - * - * @param argv - System Args Strings[] - * @throws Exception - */ - public static void main(String[] argv) throws Exception { - new S3Shell().run(argv); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/package-info.java deleted file mode 100644 index 079ef189c56..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/s3/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * S3 commands for Ozone. - */ -package org.apache.hadoop.ozone.web.ozShell.s3; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/CancelTokenHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/CancelTokenHandler.java deleted file mode 100644 index a025e248d24..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/CancelTokenHandler.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.token; - -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.security.token.Token; -import picocli.CommandLine; -import picocli.CommandLine.Command; - -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Paths; - -/** - * Executes cancelDelegationToken api. - */ -@Command(name = "cancel", - description = "cancel a delegation token.") -public class CancelTokenHandler extends Handler { - - @CommandLine.Option(names = {"--token", "-t"}, - description = "file containing encoded token", - defaultValue = "/tmp/token.txt", - showDefaultValue = CommandLine.Help.Visibility.ALWAYS) - private String tokenFile; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - OzoneAddress address = new OzoneAddress(""); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - if(!OzoneSecurityUtil.isSecurityEnabled(createOzoneConfiguration())) { - System.err.println("Error:Token operations work only when security is " + - "enabled. To enable security set ozone.security.enabled to true."); - return null; - } - - if (Files.notExists(Paths.get(tokenFile))) { - System.err.println("Error:Cancel token operation failed as token file: " - + tokenFile + " containing encoded token doesn't exist."); - return null; - } - Token token = new Token(); - token.decodeFromUrlString( - new String(Files.readAllBytes(Paths.get(tokenFile)), - StandardCharsets.UTF_8)); - client.getObjectStore().cancelDelegationToken(token); - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/GetTokenHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/GetTokenHandler.java deleted file mode 100644 index 6d1777c7d3b..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/GetTokenHandler.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.token; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.utils.JsonUtils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import picocli.CommandLine; -import picocli.CommandLine.Command; - -import java.util.Objects; - -/** - * Executes getDelegationToken api. - */ -@Command(name = "get", - description = "get a delegation token.") -public class GetTokenHandler extends Handler { - - - - @CommandLine.Option(names = {"--renewer", "-r"}, - description = "Token renewer", - showDefaultValue = CommandLine.Help.Visibility.ALWAYS) - private String renewer; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - OzoneAddress address = new OzoneAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - if(!OzoneSecurityUtil.isSecurityEnabled(createOzoneConfiguration())) { - System.err.println("Error:Token operations work only when security is " + - "enabled. To enable security set ozone.security.enabled to true."); - return null; - } - - if(StringUtils.isEmpty(renewer)){ - renewer = UserGroupInformation.getCurrentUser().getShortUserName(); - } - Token token = client.getObjectStore().getDelegationToken(new Text(renewer)); - if(Objects.isNull(token)){ - System.err.println("Error: Get delegation token operation failed. Check" + - " OzoneManager logs for more details."); - return null; - } - - System.out.printf("%s", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - token.encodeToUrlString())); - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java deleted file mode 100644 index 24f91008112..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/PrintTokenHandler.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.token; - -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.utils.JsonUtils; -import org.apache.hadoop.security.token.Token; -import picocli.CommandLine; -import picocli.CommandLine.Command; - -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Paths; - -/** - * Executes getDelegationToken api. - */ -@Command(name = "print", - description = "print a delegation token.") -public class PrintTokenHandler extends Handler { - - @CommandLine.Option(names = {"--token", "-t"}, - description = "file containing encoded token", - defaultValue = "/tmp/token.txt", - showDefaultValue = CommandLine.Help.Visibility.ALWAYS) - private String tokenFile; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - if (!OzoneSecurityUtil.isSecurityEnabled(createOzoneConfiguration())) { - System.err.println("Error:Token operations work only when security is " + - "enabled. To enable security set ozone.security.enabled to true."); - return null; - } - - if (Files.notExists(Paths.get(tokenFile))) { - System.err.println("Error: Print token operation failed as token file: " - + tokenFile + " containing encoded token doesn't exist."); - return null; - } - - String encodedToken = new String(Files.readAllBytes(Paths.get(tokenFile)), - StandardCharsets.UTF_8); - Token token = new Token(); - token.decodeFromUrlString(encodedToken); - - System.out.printf("%s", JsonUtils.toJsonStringWithDefaultPrettyPrinter( - token.toString())); - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/RenewTokenHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/RenewTokenHandler.java deleted file mode 100644 index faf74ae4afb..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/RenewTokenHandler.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.token; - -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.security.token.Token; -import picocli.CommandLine; -import picocli.CommandLine.Command; - -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Paths; - -/** - * Executes renewDelegationToken api. - */ -@Command(name = "renew", - description = "renew a delegation token.") -public class RenewTokenHandler extends Handler { - - @CommandLine.Option(names = {"--token", "-t"}, - description = "file containing encoded token", - defaultValue = "/tmp/token.txt", - showDefaultValue = CommandLine.Help.Visibility.ALWAYS) - private String tokenFile; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - OzoneAddress address = new OzoneAddress(""); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - if(!OzoneSecurityUtil.isSecurityEnabled(createOzoneConfiguration())) { - System.err.println("Error:Token operations work only when security is " + - "enabled. To enable security set ozone.security.enabled to true."); - return null; - } - - if (Files.notExists(Paths.get(tokenFile))) { - System.err.println("Error:Renew token operation failed as token file: " - + tokenFile + " containing encoded token doesn't exist."); - return null; - } - Token token = new Token(); - token.decodeFromUrlString( - new String(Files.readAllBytes(Paths.get(tokenFile)), - StandardCharsets.UTF_8)); - long expiryTime = client.getObjectStore().renewDelegationToken(token); - - System.out.printf("Token renewed successfully, expiry time: %s", - expiryTime); - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/TokenCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/TokenCommands.java deleted file mode 100644 index 2501ad91714..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/TokenCommands.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.token; - -import org.apache.hadoop.hdds.cli.GenericParentCommand; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; - -import java.util.concurrent.Callable; - -/** - * Sub-command to group token related operations. - */ -@Command(name = "token", - description = "Token specific operations", - subcommands = { - GetTokenHandler.class, - CancelTokenHandler.class, - RenewTokenHandler.class, - PrintTokenHandler.class - }, - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class TokenCommands implements GenericParentCommand, Callable { - - @ParentCommand - private Shell shell; - - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.shell.getCmd().getSubcommands().get("token")); - } - - @Override - public boolean isVerbose() { - return shell.isVerbose(); - } - - @Override - public OzoneConfiguration createOzoneConfiguration() { - return shell.createOzoneConfiguration(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/package-info.java deleted file mode 100644 index 5e03895b1c3..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/token/package-info.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * ozShell Class acts as the command line interface to the ozone Rest Client. - */ -package org.apache.hadoop.ozone.web.ozShell.token; - -/** - Ozone delegation token commands. - **/ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java deleted file mode 100644 index b9d57436287..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/AddAclVolumeHandler.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.volume; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import picocli.CommandLine; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -import java.util.Objects; - -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; - -/** - * Add acl handler for volume. - */ -@Command(name = "addacl", - description = "Add a new Acl.") -public class AddAclVolumeHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @CommandLine.Option(names = {"--acl", "-a"}, - required = true, - description = "Add acl." + - "r = READ," + - "w = WRITE," + - "c = CREATE," + - "d = DELETE," + - "l = LIST," + - "a = ALL," + - "n = NONE," + - "x = READ_AC," + - "y = WRITE_AC" + - "Ex user:user1:rw or group:hadoop:rw") - private String acl; - - @CommandLine.Option(names = {"--store", "-s"}, - required = false, - description = "store type. i.e OZONE or S3") - private String storeType; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - Objects.requireNonNull(acl, "New acl to be added not specified."); - OzoneAddress address = new OzoneAddress(uri); - address.ensureVolumeAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - } - - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setVolumeName(volumeName) - .setResType(OzoneObj.ResourceType.VOLUME) - .setStoreType(storeType == null ? OZONE : - OzoneObj.StoreType.valueOf(storeType)) - .build(); - - boolean result = client.getObjectStore().addAcl(obj, - OzoneAcl.parseAcl(acl)); - - System.out.printf("%s%n", "Acl added successfully: " + result); - - client.close(); - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java deleted file mode 100644 index ddd835069a6..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.volume; - -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.VolumeArgs; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.security.UserGroupInformation; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Option; -import picocli.CommandLine.Parameters; - -/** - * Executes the create volume call for the shell. - */ -@Command(name = "create", - description = "Creates a volume for the specified user") -public class CreateVolumeHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_VOLUME_URI_DESCRIPTION) - private String uri; - - @Option(names = {"--user", "-u"}, - description = "Owner of of the volume") - private String userName; - - @Option(names = {"--quota", "-q"}, - description = - "Quota of the newly created volume (eg. 1G)") - private String quota; - - @Option(names = {"--root"}, - description = "Development flag to execute the " - + "command as the admin (hdfs) user.") - private boolean root; - - /** - * Executes the Create Volume. - */ - @Override - public Void call() throws Exception { - if(userName == null) { - userName = UserGroupInformation.getCurrentUser().getUserName(); - } - - OzoneAddress address = new OzoneAddress(uri); - address.ensureVolumeAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - - if (isVerbose()) { - System.out.printf("Volume name : %s%n", volumeName); - } - - String rootName; - if (root) { - rootName = "hdfs"; - } else { - rootName = UserGroupInformation.getCurrentUser().getShortUserName(); - } - - VolumeArgs.Builder volumeArgsBuilder = VolumeArgs.newBuilder() - .setAdmin(rootName) - .setOwner(userName); - if (quota != null) { - volumeArgsBuilder.setQuota(quota); - } - client.getObjectStore().createVolume(volumeName, volumeArgsBuilder.build()); - - if (isVerbose()) { - OzoneVolume vol = client.getObjectStore().getVolume(volumeName); - ObjectPrinter.printObjectAsJson(vol); - } - return null; - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java deleted file mode 100644 index 87286d255cf..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.volume; - -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -/** - * Executes deleteVolume call for the shell. - */ -@Command(name = "delete", - description = "deletes a volume if it is empty") -public class DeleteVolumeHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_VOLUME_URI_DESCRIPTION) - private String uri; - - /** - * Executes the delete volume call. - */ - @Override - public Void call() throws Exception { - - OzoneAddress address = new OzoneAddress(uri); - address.ensureVolumeAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - - if (isVerbose()) { - System.out.printf("Volume name : %s%n", volumeName); - } - - client.getObjectStore().deleteVolume(volumeName); - System.out.printf("Volume %s is deleted%n", volumeName); - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java deleted file mode 100644 index 6c0bb207dd7..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/GetAclVolumeHandler.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.volume; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.ozone.web.utils.JsonUtils; -import picocli.CommandLine; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -import java.util.List; - -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; - -/** - * Get acl handler for volume. - */ -@Command(name = "getacl", - description = "List all acls.") -public class GetAclVolumeHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @CommandLine.Option(names = {"--store", "-s"}, - required = false, - description = "store type. i.e OZONE or S3") - private String storeType; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - OzoneAddress address = new OzoneAddress(uri); - address.ensureVolumeAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - String volumeName = address.getVolumeName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - } - - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setVolumeName(volumeName) - .setResType(OzoneObj.ResourceType.VOLUME) - .setStoreType(storeType == null ? OZONE : - OzoneObj.StoreType.valueOf(storeType)) - .build(); - List result = client.getObjectStore().getAcl(obj); - System.out.printf("%s%n", - JsonUtils.toJsonStringWithDefaultPrettyPrinter(result)); - client.close(); - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java deleted file mode 100644 index 0d8723fc148..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.volume; - -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -/** - * Executes volume Info calls. - */ -@Command(name = "info", - description = "returns information about a specific volume") -public class InfoVolumeHandler extends Handler{ - - @Parameters(arity = "1..1", description = Shell.OZONE_VOLUME_URI_DESCRIPTION) - private String uri; - - /** - * Executes volume Info. - */ - @Override - public Void call() throws Exception { - - OzoneAddress address = new OzoneAddress(uri); - address.ensureVolumeAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - - OzoneVolume vol = client.getObjectStore().getVolume(volumeName); - ObjectPrinter.printObjectAsJson(vol); - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java deleted file mode 100644 index a486fb1db22..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.volume; - -import java.util.Iterator; - -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import org.apache.hadoop.security.UserGroupInformation; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Option; -import picocli.CommandLine.Parameters; - -/** - * Executes List Volume call. - */ -@Command(name = "list", - aliases = "ls", - description = "List the volumes of a given user") -public class ListVolumeHandler extends Handler { - - @Parameters(arity = "1..1", - description = Shell.OZONE_VOLUME_URI_DESCRIPTION, - defaultValue = "/") - private String uri; - - @Option(names = {"--length", "-l"}, - description = "Limit of the max results", - defaultValue = "100") - private int maxVolumes; - - @Option(names = {"--start", "-s"}, - description = "The first volume to start the listing") - private String startVolume; - - @Option(names = {"--prefix", "-p"}, - description = "Prefix to filter the volumes") - private String prefix; - - @Option(names = {"--user", "-u"}, - description = "Owner of the volumes to list.") - private String userName; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - - OzoneAddress address = new OzoneAddress(uri); - address.ensureRootAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - if (userName == null) { - userName = UserGroupInformation.getCurrentUser().getUserName(); - } - - if (maxVolumes < 1) { - throw new IllegalArgumentException( - "the length should be a positive number"); - } - - Iterator volumeIterator; - if(userName != null) { - volumeIterator = client.getObjectStore() - .listVolumesByUser(userName, prefix, startVolume); - } else { - volumeIterator = client.getObjectStore().listVolumes(prefix); - } - - int counter = 0; - while (maxVolumes > 0 && volumeIterator.hasNext()) { - OzoneVolume next = volumeIterator.next(); - ObjectPrinter.printObjectAsJson(next); - maxVolumes -= 1; - counter++; - } - - if (isVerbose()) { - System.out.printf("Found : %d volumes for user : %s ", counter, - userName); - } - - return null; - } -} - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java deleted file mode 100644 index d984f4891f5..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/RemoveAclVolumeHandler.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.volume; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import picocli.CommandLine; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -import java.util.Objects; - -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; - -/** - * Remove acl handler for volume. - */ -@Command(name = "removeacl", - description = "Remove an acl.") -public class RemoveAclVolumeHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @CommandLine.Option(names = {"--acl", "-a"}, - required = true, - description = "Remove acl." + - "r = READ," + - "w = WRITE," + - "c = CREATE," + - "d = DELETE," + - "l = LIST," + - "a = ALL," + - "n = NONE," + - "x = READ_AC," + - "y = WRITE_AC" + - "Ex user:user1:rw or group:hadoop:rw") - private String acl; - - @CommandLine.Option(names = {"--store", "-s"}, - required = false, - description = "store type. i.e OZONE or S3") - private String storeType; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - Objects.requireNonNull(acl, "ACL to be removed not specified."); - OzoneAddress address = new OzoneAddress(uri); - address.ensureVolumeAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - } - - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setVolumeName(volumeName) - .setResType(OzoneObj.ResourceType.VOLUME) - .setStoreType(storeType == null ? OZONE : - OzoneObj.StoreType.valueOf(storeType)) - .build(); - - boolean result = client.getObjectStore().removeAcl(obj, - OzoneAcl.parseAcl(acl)); - - System.out.printf("%s%n", "Acl removed successfully: " + result); - - client.close(); - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java deleted file mode 100644 index 185f862e294..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/SetAclVolumeHandler.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell.volume; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; -import picocli.CommandLine; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; - -import java.util.Objects; - -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; - -/** - * Set acl handler for volume. - */ -@Command(name = "setacl", - description = "Set acls.") -public class SetAclVolumeHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_BUCKET_URI_DESCRIPTION) - private String uri; - - @CommandLine.Option(names = {"--acls", "-al"}, - required = true, - description = "Comma separated acls." + - "r = READ," + - "w = WRITE," + - "c = CREATE," + - "d = DELETE," + - "l = LIST," + - "a = ALL," + - "n = NONE," + - "x = READ_AC," + - "y = WRITE_AC" + - "Ex user:user1:rw,user:user2:a,group:hadoop:a") - private String acls; - - @CommandLine.Option(names = {"--store", "-s"}, - required = false, - description = "store type. i.e OZONE or S3") - private String storeType; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - Objects.requireNonNull(acls, "New acls to be added not specified."); - OzoneAddress address = new OzoneAddress(uri); - address.ensureVolumeAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - String bucketName = address.getBucketName(); - - if (isVerbose()) { - System.out.printf("Volume Name : %s%n", volumeName); - System.out.printf("Bucket Name : %s%n", bucketName); - } - - OzoneObj obj = OzoneObjInfo.Builder.newBuilder() - .setBucketName(bucketName) - .setVolumeName(volumeName) - .setResType(OzoneObj.ResourceType.VOLUME) - .setStoreType(storeType == null ? OZONE : - OzoneObj.StoreType.valueOf(storeType)) - .build(); - System.out.printf(" acls" +acls.length() + " " + acls); - boolean result = client.getObjectStore().setAcl(obj, - OzoneAcl.parseAcls(acls)); - - System.out.printf("%s%n", "Acl set successfully: " + result); - - client.close(); - return null; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java deleted file mode 100644 index 7ddeae90c34..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.volume; - -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.web.ozShell.Handler; -import org.apache.hadoop.ozone.web.ozShell.ObjectPrinter; -import org.apache.hadoop.ozone.web.ozShell.OzoneAddress; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import picocli.CommandLine.Command; -import picocli.CommandLine.Option; -import picocli.CommandLine.Parameters; - -/** - * Executes update volume calls. - */ -@Command(name = "update", - description = "Updates parameter of the volumes") -public class UpdateVolumeHandler extends Handler { - - @Parameters(arity = "1..1", description = Shell.OZONE_VOLUME_URI_DESCRIPTION) - private String uri; - - @Option(names = {"--user"}, - description = "Owner of the volume to set") - private String ownerName; - - @Option(names = {"--quota"}, - description = "Quota of the volume to set" - + "(eg. 1G)") - private String quota; - - /** - * Executes the Client Calls. - */ - @Override - public Void call() throws Exception { - - OzoneAddress address = new OzoneAddress(uri); - address.ensureVolumeAddress(); - OzoneClient client = address.createClient(createOzoneConfiguration()); - - String volumeName = address.getVolumeName(); - - OzoneVolume volume = client.getObjectStore().getVolume(volumeName); - if (quota != null && !quota.isEmpty()) { - volume.setQuota(OzoneQuota.parseQuota(quota)); - } - - if (ownerName != null && !ownerName.isEmpty()) { - volume.setOwner(ownerName); - } - - ObjectPrinter.printObjectAsJson(volume); - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java deleted file mode 100644 index 833457bcbef..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell.volume; - -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.GenericParentCommand; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.web.ozShell.Shell; - -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; - -/** - * Subcommand to group volume related operations. - */ -@Command(name = "volume", - aliases = "vol", - description = "Volume specific operations", - subcommands = { - InfoVolumeHandler.class, - ListVolumeHandler.class, - CreateVolumeHandler.class, - UpdateVolumeHandler.class, - DeleteVolumeHandler.class, - AddAclVolumeHandler.class, - RemoveAclVolumeHandler.class, - SetAclVolumeHandler.class, - GetAclVolumeHandler.class - }, - mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) -public class VolumeCommands implements GenericParentCommand, Callable { - - @ParentCommand - private Shell shell; - - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.shell.getCmd().getSubcommands().get("volume")); - } - - @Override - public boolean isVerbose() { - return shell.isVerbose(); - } - - @Override - public OzoneConfiguration createOzoneConfiguration() { - return shell.createOzoneConfiguration(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/package-info.java deleted file mode 100644 index fc192741e8d..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/** - * Netty-based HTTP server implementation for Ozone. - */ -package org.apache.hadoop.ozone.web.ozShell.volume; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/package-info.java deleted file mode 100644 index 1a7275c3d66..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web; - -/** - * This package contains generic class for the internal http server - * and REST interfaces. - */ diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html deleted file mode 100644 index 1b5e6936743..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/index.html +++ /dev/null @@ -1,70 +0,0 @@ - - - - - - - - - - - Ozone Manager - - - - - - - - - - - -

- -
- -
- - - - - - - - - - - - diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.css b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.css deleted file mode 100644 index e442adc470e..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.css +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ -body { - padding-top: 50px; -} -.starter-template { - padding: 40px 15px; - text-align: center; -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.html b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.html deleted file mode 100644 index 08218996518..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/main.html +++ /dev/null @@ -1,18 +0,0 @@ - - - \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-metrics.html b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-metrics.html deleted file mode 100644 index 839c64c1ea3..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-metrics.html +++ /dev/null @@ -1,44 +0,0 @@ - -

OzoneManager Metrics

- -
-

{{type}}

-
-
-

Requests ({{numbers.ops || numbers.total}} ops)

- -
-
-

Failures

- -
-
-
- -
-

Other JMX properties

- - - - - - -
{{metric.key}}{{metric.value}}
-
diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js deleted file mode 100644 index fda6d8fc0b5..00000000000 --- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/ozoneManager.js +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -(function () { - "use strict"; - - var isIgnoredJmxKeys = function (key) { - return key == 'name' || key == 'modelerType' || key.match(/tag.*/); - }; - - angular.module('ozoneManager', ['ozone', 'nvd3']); - angular.module('ozoneManager').config(function ($routeProvider) { - $routeProvider - .when("/metrics/ozoneManager", { - template: "" - }); - }); - angular.module('ozoneManager').component('omMetrics', { - templateUrl: 'om-metrics.html', - controller: function ($http) { - var ctrl = this; - - ctrl.graphOptions = { - chart: { - type: 'pieChart', - height: 500, - x: function (d) { - return d.key; - }, - y: function (d) { - return d.value; - }, - showLabels: true, - labelType: 'value', - duration: 500, - labelThreshold: 0.01, - valueFormat: function(d) { - return d3.format('d')(d); - }, - legend: { - margin: { - top: 5, - right: 35, - bottom: 5, - left: 0 - } - } - } - }; - - - $http.get("jmx?qry=Hadoop:service=OzoneManager,name=OMMetrics") - .then(function (result) { - - var groupedMetrics = {others: [], nums: {}}; - var metrics = result.data.beans[0] - for (var key in metrics) { - var numericalStatistic = key.match(/Num([A-Z][a-z]+)([A-Z].+?)(Fails)?$/); - if (numericalStatistic) { - var type = numericalStatistic[1]; - var name = numericalStatistic[2]; - var failed = numericalStatistic[3]; - groupedMetrics.nums[type] = groupedMetrics.nums[type] || { - failures: [], - all: [], - total: 0, - }; - if (failed) { - groupedMetrics.nums[type].failures.push({ - key: name, - value: metrics[key] - }) - } else { - if (name == "Ops") { - groupedMetrics.nums[type].ops = metrics[key] - } else { - groupedMetrics.nums[type].total += metrics[key]; - groupedMetrics.nums[type].all.push({ - key: name, - value: metrics[key] - }) - } - } - } else if (isIgnoredJmxKeys(key)) { - //ignore - } else { - groupedMetrics.others.push({ - 'key': key, - 'value': metrics[key] - }); - } - } - ctrl.metrics = groupedMetrics; - }) - } - }); - -})(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java deleted file mode 100644 index 982e87e56fa..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; -import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.UUID; - -import static org.apache.hadoop.hdds.protocol.proto - .ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result; -import static org.apache.hadoop.hdds.protocol.proto - .ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result.success; -import static org.apache.hadoop.hdds.protocol.proto - .ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result.unknownFailure; - -/** - * This is a testing client that allows us to intercept calls from OzoneManager - * to SCM. - *

- * TODO: OzoneManager#getScmBlockClient -- so that we can load this class up via - * config setting into OzoneManager. Right now, we just pass this to - * KeyDeletingService only. - *

- * TODO: Move this class to a generic test utils so we can use this class in - * other Ozone Manager tests. - */ -public class ScmBlockLocationTestingClient implements ScmBlockLocationProtocol { - private static final Logger LOG = - LoggerFactory.getLogger(ScmBlockLocationTestingClient.class); - private final String clusterID; - private final String scmId; - - // 0 means no calls will fail, +1 means all calls will fail, +2 means every - // second call will fail, +3 means every third and so on. - private final int failCallsFrequency; - private int currentCall = 0; - - /** - * If ClusterID or SCMID is blank a per instance ID is generated. - * - * @param clusterID - String or blank. - * @param scmId - String or Blank. - * @param failCallsFrequency - Set to 0 for no failures, 1 for always to fail, - * a positive number for that frequency of failure. - */ - public ScmBlockLocationTestingClient(String clusterID, String scmId, - int failCallsFrequency) { - this.clusterID = StringUtils.isNotBlank(clusterID) ? clusterID : - UUID.randomUUID().toString(); - this.scmId = StringUtils.isNotBlank(scmId) ? scmId : - UUID.randomUUID().toString(); - this.failCallsFrequency = Math.abs(failCallsFrequency); - switch (this.failCallsFrequency) { - case 0: - LOG.debug("Set to no failure mode, all delete block calls will " + - "succeed."); - break; - case 1: - LOG.debug("Set to all failure mode. All delete block calls to SCM" + - " will fail."); - break; - default: - LOG.debug("Set to Mix mode, every {} -th call will fail", - this.failCallsFrequency); - } - - } - - /** - * Returns Fake blocks to the BlockManager so we get blocks in the Database. - * @param size - size of the block. - * @param type Replication Type - * @param factor - Replication factor - * @param owner - String owner. - * @param excludeList list of dns/pipelines to exclude - * @return - * @throws IOException - */ - @Override - public List allocateBlock(long size, int num, - HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, - String owner, ExcludeList excludeList) throws IOException { - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - Pipeline pipeline = createPipeline(datanodeDetails); - long containerID = Time.monotonicNow(); - long localID = Time.monotonicNow(); - AllocatedBlock.Builder abb = - new AllocatedBlock.Builder() - .setContainerBlockID(new ContainerBlockID(containerID, localID)) - .setPipeline(pipeline); - return Collections.singletonList(abb.build()); - } - - private Pipeline createPipeline(DatanodeDetails datanode) { - List dns = new ArrayList<>(); - dns.add(datanode); - Pipeline pipeline = Pipeline.newBuilder() - .setState(Pipeline.PipelineState.OPEN) - .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(HddsProtos.ReplicationFactor.ONE) - .setNodes(dns) - .build(); - return pipeline; - } - - @Override - public List deleteKeyBlocks( - List keyBlocksInfoList) throws IOException { - List results = new ArrayList<>(); - List blockResultList = new ArrayList<>(); - Result result; - for (BlockGroup keyBlocks : keyBlocksInfoList) { - for (BlockID blockKey : keyBlocks.getBlockIDList()) { - currentCall++; - switch (this.failCallsFrequency) { - case 0: - result = success; - break; - case 1: - result = unknownFailure; - break; - default: - if (currentCall % this.failCallsFrequency == 0) { - result = unknownFailure; - } else { - result = success; - } - } - blockResultList.add(new DeleteBlockResult(blockKey, result)); - } - results.add(new DeleteBlockGroupResult(keyBlocks.getGroupID(), - blockResultList)); - } - return results; - } - - @Override - public ScmInfo getScmInfo() throws IOException { - ScmInfo.Builder builder = - new ScmInfo.Builder() - .setClusterId(clusterID) - .setScmId(scmId); - return builder.build(); - } - - @Override - public List sortDatanodes(List nodes, - String clientMachine) throws IOException { - return null; - } - - @Override - public void close() throws IOException { - - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java deleted file mode 100644 index c151afa8937..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java +++ /dev/null @@ -1,344 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import java.io.File; -import java.io.IOException; - -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.om.helpers.*; - -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.junit.Assert; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; - -/** - * Tests BucketManagerImpl, mocks OMMetadataManager for testing. - */ -@RunWith(MockitoJUnitRunner.class) -@Ignore("Bucket Manager does not use cache, Disable it for now.") -public class TestBucketManagerImpl { - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OzoneConfiguration createNewTestPath() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if (!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - ServerUtils.setOzoneMetaDirPath(conf, newFolder.toString()); - return conf; - } - - private OmMetadataManagerImpl createSampleVol() throws IOException { - OzoneConfiguration conf = createNewTestPath(); - OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(conf); - String volumeKey = metaMgr.getVolumeKey("sampleVol"); - // This is a simple hack for testing, we just test if the volume via a - // null check, do not parse the value part. So just write some dummy value. - OmVolumeArgs args = - OmVolumeArgs.newBuilder() - .setVolume("sampleVol") - .setAdminName("bilbo") - .setOwnerName("bilbo") - .build(); - metaMgr.getVolumeTable().put(volumeKey, args); - return metaMgr; - } - - @Test - public void testCreateBucketWithoutVolume() throws Exception { - thrown.expectMessage("Volume doesn't exist"); - OzoneConfiguration conf = createNewTestPath(); - OmMetadataManagerImpl metaMgr = - new OmMetadataManagerImpl(conf); - try { - BucketManager bucketManager = new BucketManagerImpl(metaMgr); - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sampleVol") - .setBucketName("bucketOne") - .build(); - bucketManager.createBucket(bucketInfo); - } catch (OMException omEx) { - Assert.assertEquals(ResultCodes.VOLUME_NOT_FOUND, - omEx.getResult()); - throw omEx; - } finally { - metaMgr.getStore().close(); - } - } - - @Test - public void testCreateBucket() throws Exception { - OmMetadataManagerImpl metaMgr = createSampleVol(); - - KeyProviderCryptoExtension kmsProvider = Mockito.mock( - KeyProviderCryptoExtension.class); - String testBekName = "key1"; - String testCipherName = "AES/CTR/NoPadding"; - - KeyProvider.Metadata mockMetadata = Mockito.mock(KeyProvider.Metadata - .class); - Mockito.when(kmsProvider.getMetadata(testBekName)).thenReturn(mockMetadata); - Mockito.when(mockMetadata.getCipher()).thenReturn(testCipherName); - - BucketManager bucketManager = new BucketManagerImpl(metaMgr, - kmsProvider); - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sampleVol") - .setBucketName("bucketOne") - .setBucketEncryptionKey(new - BucketEncryptionKeyInfo.Builder().setKeyName("key1").build()) - .build(); - bucketManager.createBucket(bucketInfo); - Assert.assertNotNull(bucketManager.getBucketInfo("sampleVol", "bucketOne")); - - OmBucketInfo bucketInfoRead = - bucketManager.getBucketInfo("sampleVol", "bucketOne"); - - Assert.assertTrue(bucketInfoRead.getEncryptionKeyInfo().getKeyName() - .equals(bucketInfo.getEncryptionKeyInfo().getKeyName())); - metaMgr.getStore().close(); - } - - - @Test - public void testCreateEncryptedBucket() throws Exception { - OmMetadataManagerImpl metaMgr = createSampleVol(); - - BucketManager bucketManager = new BucketManagerImpl(metaMgr); - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sampleVol") - .setBucketName("bucketOne") - .build(); - bucketManager.createBucket(bucketInfo); - Assert.assertNotNull(bucketManager.getBucketInfo("sampleVol", - "bucketOne")); - metaMgr.getStore().close(); - } - - @Test - public void testCreateAlreadyExistingBucket() throws Exception { - thrown.expectMessage("Bucket already exist"); - OmMetadataManagerImpl metaMgr = createSampleVol(); - - try { - BucketManager bucketManager = new BucketManagerImpl(metaMgr); - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sampleVol") - .setBucketName("bucketOne") - .build(); - bucketManager.createBucket(bucketInfo); - bucketManager.createBucket(bucketInfo); - } catch (OMException omEx) { - Assert.assertEquals(ResultCodes.BUCKET_ALREADY_EXISTS, - omEx.getResult()); - throw omEx; - } finally { - metaMgr.getStore().close(); - } - } - - @Test - public void testGetBucketInfoForInvalidBucket() throws Exception { - thrown.expectMessage("Bucket not found"); - OmMetadataManagerImpl metaMgr = createSampleVol(); - try { - - - BucketManager bucketManager = new BucketManagerImpl(metaMgr); - bucketManager.getBucketInfo("sampleVol", "bucketOne"); - } catch (OMException omEx) { - Assert.assertEquals(ResultCodes.BUCKET_NOT_FOUND, - omEx.getResult()); - throw omEx; - } finally { - metaMgr.getStore().close(); - } - } - - @Test - public void testGetBucketInfo() throws Exception { - OmMetadataManagerImpl metaMgr = createSampleVol(); - - BucketManager bucketManager = new BucketManagerImpl(metaMgr); - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sampleVol") - .setBucketName("bucketOne") - .setStorageType(StorageType.DISK) - .setIsVersionEnabled(false) - .build(); - createBucket(metaMgr, bucketInfo); - OmBucketInfo result = bucketManager.getBucketInfo( - "sampleVol", "bucketOne"); - Assert.assertEquals("sampleVol", result.getVolumeName()); - Assert.assertEquals("bucketOne", result.getBucketName()); - Assert.assertEquals(StorageType.DISK, - result.getStorageType()); - Assert.assertEquals(false, result.getIsVersionEnabled()); - metaMgr.getStore().close(); - } - - private void createBucket(OMMetadataManager metadataManager, - OmBucketInfo bucketInfo) throws IOException { - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); - } - - @Test - public void testSetBucketPropertyChangeStorageType() throws Exception { - OmMetadataManagerImpl metaMgr = createSampleVol(); - - BucketManager bucketManager = new BucketManagerImpl(metaMgr); - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sampleVol") - .setBucketName("bucketOne") - .setStorageType(StorageType.DISK) - .build(); - createBucket(metaMgr, bucketInfo); - OmBucketInfo result = bucketManager.getBucketInfo( - "sampleVol", "bucketOne"); - Assert.assertEquals(StorageType.DISK, - result.getStorageType()); - OmBucketArgs bucketArgs = OmBucketArgs.newBuilder() - .setVolumeName("sampleVol") - .setBucketName("bucketOne") - .setStorageType(StorageType.SSD) - .build(); - bucketManager.setBucketProperty(bucketArgs); - OmBucketInfo updatedResult = bucketManager.getBucketInfo( - "sampleVol", "bucketOne"); - Assert.assertEquals(StorageType.SSD, - updatedResult.getStorageType()); - metaMgr.getStore().close(); - } - - @Test - public void testSetBucketPropertyChangeVersioning() throws Exception { - OmMetadataManagerImpl metaMgr = createSampleVol(); - - BucketManager bucketManager = new BucketManagerImpl(metaMgr); - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sampleVol") - .setBucketName("bucketOne") - .setIsVersionEnabled(false) - .build(); - bucketManager.createBucket(bucketInfo); - OmBucketInfo result = bucketManager.getBucketInfo( - "sampleVol", "bucketOne"); - Assert.assertFalse(result.getIsVersionEnabled()); - OmBucketArgs bucketArgs = OmBucketArgs.newBuilder() - .setVolumeName("sampleVol") - .setBucketName("bucketOne") - .setIsVersionEnabled(true) - .build(); - bucketManager.setBucketProperty(bucketArgs); - OmBucketInfo updatedResult = bucketManager.getBucketInfo( - "sampleVol", "bucketOne"); - Assert.assertTrue(updatedResult.getIsVersionEnabled()); - metaMgr.getStore().close(); - } - - @Test - public void testDeleteBucket() throws Exception { - thrown.expectMessage("Bucket not found"); - OmMetadataManagerImpl metaMgr = createSampleVol(); - BucketManager bucketManager = new BucketManagerImpl(metaMgr); - for (int i = 0; i < 5; i++) { - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sampleVol") - .setBucketName("bucket_" + i) - .build(); - bucketManager.createBucket(bucketInfo); - } - for (int i = 0; i < 5; i++) { - Assert.assertEquals("bucket_" + i, - bucketManager.getBucketInfo( - "sampleVol", "bucket_" + i).getBucketName()); - } - try { - bucketManager.deleteBucket("sampleVol", "bucket_1"); - Assert.assertNotNull(bucketManager.getBucketInfo( - "sampleVol", "bucket_2")); - } catch (IOException ex) { - Assert.fail(ex.getMessage()); - } - try { - bucketManager.getBucketInfo("sampleVol", "bucket_1"); - } catch (OMException omEx) { - Assert.assertEquals(ResultCodes.BUCKET_NOT_FOUND, - omEx.getResult()); - throw omEx; - } - metaMgr.getStore().close(); - } - - @Test - public void testDeleteNonEmptyBucket() throws Exception { - thrown.expectMessage("Bucket is not empty"); - OmMetadataManagerImpl metaMgr = createSampleVol(); - BucketManager bucketManager = new BucketManagerImpl(metaMgr); - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sampleVol") - .setBucketName("bucketOne") - .build(); - bucketManager.createBucket(bucketInfo); - //Create keys in bucket - metaMgr.getKeyTable().put("/sampleVol/bucketOne/key_one", - new OmKeyInfo.Builder() - .setBucketName("bucketOne") - .setVolumeName("sampleVol") - .setKeyName("key_one") - .setReplicationFactor(ReplicationFactor.ONE) - .setReplicationType(ReplicationType.STAND_ALONE) - .build()); - metaMgr.getKeyTable().put("/sampleVol/bucketOne/key_two", - new OmKeyInfo.Builder() - .setBucketName("bucketOne") - .setVolumeName("sampleVol") - .setKeyName("key_two") - .setReplicationFactor(ReplicationFactor.ONE) - .setReplicationType(ReplicationType.STAND_ALONE) - .build()); - try { - bucketManager.deleteBucket("sampleVol", "bucketOne"); - } catch (OMException omEx) { - Assert.assertEquals(ResultCodes.BUCKET_NOT_EMPTY, - omEx.getResult()); - throw omEx; - } - metaMgr.getStore().close(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java deleted file mode 100644 index 78e1c4456ee..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java +++ /dev/null @@ -1,166 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.om; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.scm.storage.BlockInputStream; -import org.apache.hadoop.ozone.client.io.KeyInputStream; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.ByteArrayInputStream; -import java.io.IOException; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.junit.Assert.assertEquals; - -/** - * This class tests KeyInputStream and KeyOutputStream. - */ -public class TestChunkStreams { - - @Rule - public ExpectedException exception = ExpectedException.none(); - - @Test - public void testReadGroupInputStream() throws Exception { - try (KeyInputStream groupInputStream = new KeyInputStream()) { - - String dataString = RandomStringUtils.randomAscii(500); - byte[] buf = dataString.getBytes(UTF_8); - int offset = 0; - for (int i = 0; i < 5; i++) { - int tempOffset = offset; - BlockInputStream in = - new BlockInputStream(null, 100, null, null, true, null) { - private long pos = 0; - private ByteArrayInputStream in = - new ByteArrayInputStream(buf, tempOffset, 100); - - @Override - public synchronized void seek(long pos) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public synchronized long getPos() throws IOException { - return pos; - } - - @Override - public boolean seekToNewSource(long targetPos) - throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public synchronized int read() throws IOException { - return in.read(); - } - - @Override - public synchronized int read(byte[] b, int off, int len) - throws IOException { - int readLen = in.read(b, off, len); - pos += readLen; - return readLen; - } - }; - offset += 100; - groupInputStream.addStream(in); - } - - byte[] resBuf = new byte[500]; - int len = groupInputStream.read(resBuf, 0, 500); - - assertEquals(500, len); - assertEquals(dataString, new String(resBuf, UTF_8)); - } - } - - @Test - public void testErrorReadGroupInputStream() throws Exception { - try (KeyInputStream groupInputStream = new KeyInputStream()) { - - String dataString = RandomStringUtils.randomAscii(500); - byte[] buf = dataString.getBytes(UTF_8); - int offset = 0; - for (int i = 0; i < 5; i++) { - int tempOffset = offset; - BlockInputStream in = - new BlockInputStream(null, 100, null, null, true, null) { - private long pos = 0; - private ByteArrayInputStream in = - new ByteArrayInputStream(buf, tempOffset, 100); - - @Override - public synchronized void seek(long pos) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public synchronized long getPos() throws IOException { - return pos; - } - - @Override - public synchronized boolean seekToNewSource(long targetPos) - throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public synchronized int read() throws IOException { - return in.read(); - } - - @Override - public synchronized int read(byte[] b, int off, int len) - throws IOException { - int readLen = in.read(b, off, len); - pos += readLen; - return readLen; - } - }; - offset += 100; - groupInputStream.addStream(in); - } - - byte[] resBuf = new byte[600]; - // read 300 bytes first - int len = groupInputStream.read(resBuf, 0, 340); - assertEquals(3, groupInputStream.getCurrentStreamIndex()); - assertEquals(60, groupInputStream.getRemainingOfIndex(3)); - assertEquals(340, len); - assertEquals(dataString.substring(0, 340), - new String(resBuf, UTF_8).substring(0, 340)); - - // read following 300 bytes, but only 200 left - len = groupInputStream.read(resBuf, 340, 260); - assertEquals(4, groupInputStream.getCurrentStreamIndex()); - assertEquals(0, groupInputStream.getRemainingOfIndex(4)); - assertEquals(160, len); - assertEquals(dataString, new String(resBuf, UTF_8).substring(0, 500)); - - // further read should get EOF - len = groupInputStream.read(resBuf, 0, 1); - // reached EOF, further read should get -1 - assertEquals(-1, len); - } - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java deleted file mode 100644 index 3c707ba1e18..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; - -import org.apache.commons.lang3.RandomStringUtils; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; - -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -/** - * Test Key Deleting Service. - *

- * This test does the following things. - *

- * 1. Creates a bunch of keys. 2. Then executes delete key directly using - * Metadata Manager. 3. Waits for a while for the KeyDeleting Service to pick up - * and call into SCM. 4. Confirms that calls have been successful. - */ -public class TestKeyDeletingService { - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OzoneConfiguration createConfAndInitValues() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if (!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - System.setProperty(DBConfigFromFile.CONFIG_DIR, "/"); - ServerUtils.setOzoneMetaDirPath(conf, newFolder.toString()); - conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, - TimeUnit.MILLISECONDS); - conf.setQuietMode(false); - - return conf; - } - - /** - * In this test, we create a bunch of keys and delete them. Then we start the - * KeyDeletingService and pass a SCMClient which does not fail. We make sure - * that all the keys that we deleted is picked up and deleted by - * OzoneManager. - * - * @throws IOException - on Failure. - */ - - @Test(timeout = 30000) - public void checkIfDeleteServiceisDeletingKeys() - throws IOException, TimeoutException, InterruptedException { - OzoneConfiguration conf = createConfAndInitValues(); - OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(conf); - KeyManager keyManager = - new KeyManagerImpl( - new ScmBlockLocationTestingClient(null, null, 0), - metaMgr, conf, UUID.randomUUID().toString(), null); - keyManager.start(conf); - final int keyCount = 100; - createAndDeleteKeys(keyManager, keyCount, 1); - KeyDeletingService keyDeletingService = - (KeyDeletingService) keyManager.getDeletingService(); - GenericTestUtils.waitFor( - () -> keyDeletingService.getDeletedKeyCount().get() >= keyCount, - 1000, 10000); - Assert.assertTrue(keyDeletingService.getRunCount().get() > 1); - Assert.assertEquals( - keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(), 0); - } - - @Test(timeout = 30000) - public void checkIfDeleteServiceWithFailingSCM() - throws IOException, TimeoutException, InterruptedException { - OzoneConfiguration conf = createConfAndInitValues(); - OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(conf); - //failCallsFrequency = 1 , means all calls fail. - KeyManager keyManager = - new KeyManagerImpl( - new ScmBlockLocationTestingClient(null, null, 1), - metaMgr, conf, UUID.randomUUID().toString(), null); - keyManager.start(conf); - final int keyCount = 100; - createAndDeleteKeys(keyManager, keyCount, 1); - KeyDeletingService keyDeletingService = - (KeyDeletingService) keyManager.getDeletingService(); - keyManager.start(conf); - Assert.assertEquals( - keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(), keyCount); - // Make sure that we have run the background thread 5 times more - GenericTestUtils.waitFor( - () -> keyDeletingService.getRunCount().get() >= 5, - 100, 1000); - // Since SCM calls are failing, deletedKeyCount should be zero. - Assert.assertEquals(keyDeletingService.getDeletedKeyCount().get(), 0); - Assert.assertEquals( - keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(), keyCount); - } - - @Test(timeout = 30000) - public void checkDeletionForEmptyKey() - throws IOException, TimeoutException, InterruptedException { - OzoneConfiguration conf = createConfAndInitValues(); - OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(conf); - //failCallsFrequency = 1 , means all calls fail. - KeyManager keyManager = - new KeyManagerImpl( - new ScmBlockLocationTestingClient(null, null, 1), - metaMgr, conf, UUID.randomUUID().toString(), null); - keyManager.start(conf); - final int keyCount = 100; - createAndDeleteKeys(keyManager, keyCount, 0); - KeyDeletingService keyDeletingService = - (KeyDeletingService) keyManager.getDeletingService(); - keyManager.start(conf); - - // Since empty keys are directly deleted from db there should be no - // pending deletion keys. Also deletedKeyCount should be zero. - Assert.assertEquals( - keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(), 0); - // Make sure that we have run the background thread 2 times or more - GenericTestUtils.waitFor( - () -> keyDeletingService.getRunCount().get() >= 2, - 100, 1000); - Assert.assertEquals(keyDeletingService.getDeletedKeyCount().get(), 0); - } - - private void createAndDeleteKeys(KeyManager keyManager, int keyCount, - int numBlocks) throws IOException { - for (int x = 0; x < keyCount; x++) { - String volumeName = String.format("volume%s", - RandomStringUtils.randomAlphanumeric(5)); - String bucketName = String.format("bucket%s", - RandomStringUtils.randomAlphanumeric(5)); - String keyName = String.format("key%s", - RandomStringUtils.randomAlphanumeric(5)); - String volumeBytes = - keyManager.getMetadataManager().getVolumeKey(volumeName); - String bucketBytes = - keyManager.getMetadataManager().getBucketKey(volumeName, bucketName); - // cheat here, just create a volume and bucket entry so that we can - // create the keys, we put the same data for key and value since the - // system does not decode the object - TestOMRequestUtils.addVolumeToOM(keyManager.getMetadataManager(), - OmVolumeArgs.newBuilder() - .setOwnerName("o") - .setAdminName("a") - .setVolume(volumeName) - .build()); - - TestOMRequestUtils.addBucketToOM(keyManager.getMetadataManager(), - OmBucketInfo.newBuilder().setVolumeName(volumeName) - .setBucketName(bucketName) - .build()); - - OmKeyArgs arg = - new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setAcls(Collections.emptyList()) - .setLocationInfoList(new ArrayList<>()) - .build(); - //Open, Commit and Delete the Keys in the Key Manager. - OpenKeySession session = keyManager.openKey(arg); - for (int i = 0; i < numBlocks; i++) { - arg.addLocationInfo( - keyManager.allocateBlock(arg, session.getId(), new ExcludeList())); - } - keyManager.commitKey(arg, session.getId()); - keyManager.deleteKey(arg); - } - } -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java deleted file mode 100644 index b00bf44d7ea..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om; - -import java.io.IOException; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs.Builder; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; -import org.apache.hadoop.test.GenericTestUtils; - -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -/** - * Unit test key manager. - */ -public class TestKeyManagerUnit { - - private OmMetadataManagerImpl metadataManager; - private KeyManagerImpl keyManager; - - private Instant startDate; - - @Before - public void setup() throws IOException { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, - GenericTestUtils.getRandomizedTestDir().toString()); - metadataManager = new OmMetadataManagerImpl(configuration); - keyManager = new KeyManagerImpl( - Mockito.mock(ScmBlockLocationProtocol.class), - metadataManager, - configuration, - "omtest", - Mockito.mock(OzoneBlockTokenSecretManager.class) - ); - - startDate = Instant.now(); - } - - @Test - public void listMultipartUploadPartsWithZeroUpload() throws IOException { - //GIVEN - createBucket(metadataManager, "vol1", "bucket1"); - - OmMultipartInfo omMultipartInfo = - initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1"); - - //WHEN - OmMultipartUploadListParts omMultipartUploadListParts = keyManager - .listParts("vol1", "bucket1", "dir/key1", omMultipartInfo.getUploadID(), - 0, 10); - - Assert.assertEquals(0, - omMultipartUploadListParts.getPartInfoList().size()); - - this.startDate = Instant.now(); - } - - @Test - public void listMultipartUploads() throws IOException { - - //GIVEN - createBucket(metadataManager, "vol1", "bucket1"); - createBucket(metadataManager, "vol1", "bucket2"); - - OmMultipartInfo upload1 = - initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1"); - - OmMultipartInfo upload2 = - initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key2"); - - OmMultipartInfo upload3 = - initMultipartUpload(keyManager, "vol1", "bucket2", "dir/key1"); - - //WHEN - OmMultipartUploadList omMultipartUploadList = - keyManager.listMultipartUploads("vol1", "bucket1", ""); - - //THEN - List uploads = omMultipartUploadList.getUploads(); - Assert.assertEquals(2, uploads.size()); - Assert.assertEquals("dir/key1", uploads.get(0).getKeyName()); - Assert.assertEquals("dir/key2", uploads.get(1).getKeyName()); - - Assert.assertNotNull(uploads.get(1)); - Assert.assertNotNull(uploads.get(1).getCreationTime()); - Assert.assertTrue("Creation date is too old", - uploads.get(1).getCreationTime().compareTo(startDate) > 0); - } - - @Test - public void listMultipartUploadsWithPrefix() throws IOException { - - //GIVEN - createBucket(metadataManager, "vol1", "bucket1"); - createBucket(metadataManager, "vol1", "bucket2"); - - OmMultipartInfo upload1 = - initMultipartUpload(keyManager, "vol1", "bucket1", "dip/key1"); - - initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1"); - initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key2"); - initMultipartUpload(keyManager, "vol1", "bucket1", "key3"); - - initMultipartUpload(keyManager, "vol1", "bucket2", "dir/key1"); - - //WHEN - OmMultipartUploadList omMultipartUploadList = - keyManager.listMultipartUploads("vol1", "bucket1", "dir"); - - //THEN - List uploads = omMultipartUploadList.getUploads(); - Assert.assertEquals(2, uploads.size()); - Assert.assertEquals("dir/key1", uploads.get(0).getKeyName()); - Assert.assertEquals("dir/key2", uploads.get(1).getKeyName()); - } - - private void createBucket(OmMetadataManagerImpl omMetadataManager, - String volume, String bucket) - throws IOException { - OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() - .setVolumeName(volume) - .setBucketName(bucket) - .setStorageType(StorageType.DISK) - .setIsVersionEnabled(false) - .setAcls(new ArrayList<>()) - .build(); - TestOMRequestUtils.addBucketToOM(metadataManager, omBucketInfo); - } - - private OmMultipartInfo initMultipartUpload(KeyManagerImpl omtest, - String volume, String bucket, String key) - throws IOException { - OmKeyArgs key1 = new Builder() - .setVolumeName(volume) - .setBucketName(bucket) - .setKeyName(key) - .setType(ReplicationType.RATIS) - .setFactor(ReplicationFactor.THREE) - .setAcls(new ArrayList<>()) - .build(); - return omtest.initiateMultipartUpload(key1); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java deleted file mode 100644 index e0e4c61d3e5..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ /dev/null @@ -1,417 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om; -import com.google.common.base.Optional; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.util.List; -import java.util.TreeSet; - -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; - -/** - * Tests OzoneManager MetadataManager. - */ -public class TestOmMetadataManager { - - private OMMetadataManager omMetadataManager; - private OzoneConfiguration ozoneConfiguration; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - - @Before - public void setup() throws Exception { - ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OZONE_OM_DB_DIRS, - folder.getRoot().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - } - @Test - public void testListBuckets() throws Exception { - - String volumeName1 = "volumeA"; - String prefixBucketNameWithOzoneOwner = "ozoneBucket"; - String prefixBucketNameWithHadoopOwner = "hadoopBucket"; - - TestOMRequestUtils.addVolumeToDB(volumeName1, omMetadataManager); - - - TreeSet volumeABucketsPrefixWithOzoneOwner = new TreeSet<>(); - TreeSet volumeABucketsPrefixWithHadoopOwner = new TreeSet<>(); - for (int i=1; i<= 100; i++) { - if (i % 2 == 0) { - volumeABucketsPrefixWithOzoneOwner.add( - prefixBucketNameWithOzoneOwner + i); - addBucketsToCache(volumeName1, prefixBucketNameWithOzoneOwner + i); - } else { - volumeABucketsPrefixWithHadoopOwner.add( - prefixBucketNameWithHadoopOwner + i); - addBucketsToCache(volumeName1, prefixBucketNameWithHadoopOwner + i); - } - } - - String volumeName2 = "volumeB"; - TreeSet volumeBBucketsPrefixWithOzoneOwner = new TreeSet<>(); - TreeSet volumeBBucketsPrefixWithHadoopOwner = new TreeSet<>(); - TestOMRequestUtils.addVolumeToDB(volumeName2, omMetadataManager); - for (int i=1; i<= 100; i++) { - if (i % 2 == 0) { - volumeBBucketsPrefixWithOzoneOwner.add( - prefixBucketNameWithOzoneOwner + i); - addBucketsToCache(volumeName2, prefixBucketNameWithOzoneOwner + i); - } else { - volumeBBucketsPrefixWithHadoopOwner.add( - prefixBucketNameWithHadoopOwner + i); - addBucketsToCache(volumeName2, prefixBucketNameWithHadoopOwner + i); - } - } - - // List all buckets which have prefix ozoneBucket - List omBucketInfoList = - omMetadataManager.listBuckets(volumeName1, - null, prefixBucketNameWithOzoneOwner, 100); - - Assert.assertEquals(omBucketInfoList.size(), 50); - - for (OmBucketInfo omBucketInfo : omBucketInfoList) { - Assert.assertTrue(omBucketInfo.getBucketName().startsWith( - prefixBucketNameWithOzoneOwner)); - } - - - String startBucket = prefixBucketNameWithOzoneOwner + 10; - omBucketInfoList = - omMetadataManager.listBuckets(volumeName1, - startBucket, prefixBucketNameWithOzoneOwner, - 100); - - Assert.assertEquals(volumeABucketsPrefixWithOzoneOwner.tailSet( - startBucket).size() - 1, omBucketInfoList.size()); - - startBucket = prefixBucketNameWithOzoneOwner + 38; - omBucketInfoList = - omMetadataManager.listBuckets(volumeName1, - startBucket, prefixBucketNameWithOzoneOwner, - 100); - - Assert.assertEquals(volumeABucketsPrefixWithOzoneOwner.tailSet( - startBucket).size() - 1, omBucketInfoList.size()); - - for (OmBucketInfo omBucketInfo : omBucketInfoList) { - Assert.assertTrue(omBucketInfo.getBucketName().startsWith( - prefixBucketNameWithOzoneOwner)); - Assert.assertFalse(omBucketInfo.getBucketName().equals( - prefixBucketNameWithOzoneOwner + 10)); - } - - - - omBucketInfoList = omMetadataManager.listBuckets(volumeName2, - null, prefixBucketNameWithHadoopOwner, 100); - - Assert.assertEquals(omBucketInfoList.size(), 50); - - for (OmBucketInfo omBucketInfo : omBucketInfoList) { - Assert.assertTrue(omBucketInfo.getBucketName().startsWith( - prefixBucketNameWithHadoopOwner)); - } - - // Try to get buckets by count 10, like that get all buckets in the - // volumeB with prefixBucketNameWithHadoopOwner. - startBucket = null; - TreeSet expectedBuckets = new TreeSet<>(); - for (int i=0; i<5; i++) { - - omBucketInfoList = omMetadataManager.listBuckets(volumeName2, - startBucket, prefixBucketNameWithHadoopOwner, 10); - - Assert.assertEquals(omBucketInfoList.size(), 10); - - for (OmBucketInfo omBucketInfo : omBucketInfoList) { - expectedBuckets.add(omBucketInfo.getBucketName()); - Assert.assertTrue(omBucketInfo.getBucketName().startsWith( - prefixBucketNameWithHadoopOwner)); - startBucket = omBucketInfo.getBucketName(); - } - } - - - Assert.assertEquals(volumeBBucketsPrefixWithHadoopOwner, expectedBuckets); - // As now we have iterated all 50 buckets, calling next time should - // return empty list. - omBucketInfoList = omMetadataManager.listBuckets(volumeName2, - startBucket, prefixBucketNameWithHadoopOwner, 10); - - Assert.assertEquals(omBucketInfoList.size(), 0); - - } - - - private void addBucketsToCache(String volumeName, String bucketName) { - - OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setStorageType(StorageType.DISK) - .setIsVersionEnabled(false) - .build(); - - omMetadataManager.getBucketTable().addCacheEntry( - new CacheKey<>(omMetadataManager.getBucketKey(volumeName, bucketName)), - new CacheValue<>(Optional.of(omBucketInfo), 1)); - } - - @Test - public void testListKeys() throws Exception { - - String volumeNameA = "volumeA"; - String volumeNameB = "volumeB"; - String ozoneBucket = "ozoneBucket"; - String hadoopBucket = "hadoopBucket"; - - - // Create volumes and buckets. - TestOMRequestUtils.addVolumeToDB(volumeNameA, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeNameB, omMetadataManager); - addBucketsToCache(volumeNameA, ozoneBucket); - addBucketsToCache(volumeNameB, hadoopBucket); - - - String prefixKeyA = "key-a"; - String prefixKeyB = "key-b"; - TreeSet keysASet = new TreeSet<>(); - TreeSet keysBSet = new TreeSet<>(); - for (int i=1; i<= 100; i++) { - if (i % 2 == 0) { - keysASet.add( - prefixKeyA + i); - addKeysToOM(volumeNameA, ozoneBucket, prefixKeyA + i, i); - } else { - keysBSet.add( - prefixKeyB + i); - addKeysToOM(volumeNameA, hadoopBucket, prefixKeyB + i, i); - } - } - - - TreeSet keysAVolumeBSet = new TreeSet<>(); - TreeSet keysBVolumeBSet = new TreeSet<>(); - for (int i=1; i<= 100; i++) { - if (i % 2 == 0) { - keysAVolumeBSet.add( - prefixKeyA + i); - addKeysToOM(volumeNameB, ozoneBucket, prefixKeyA + i, i); - } else { - keysBVolumeBSet.add( - prefixKeyB + i); - addKeysToOM(volumeNameB, hadoopBucket, prefixKeyB + i, i); - } - } - - - // List all keys which have prefix "key-a" - List omKeyInfoList = - omMetadataManager.listKeys(volumeNameA, ozoneBucket, - null, prefixKeyA, 100); - - Assert.assertEquals(omKeyInfoList.size(), 50); - - for (OmKeyInfo omKeyInfo : omKeyInfoList) { - Assert.assertTrue(omKeyInfo.getKeyName().startsWith( - prefixKeyA)); - } - - - String startKey = prefixKeyA + 10; - omKeyInfoList = - omMetadataManager.listKeys(volumeNameA, ozoneBucket, - startKey, prefixKeyA, 100); - - Assert.assertEquals(keysASet.tailSet( - startKey).size() - 1, omKeyInfoList.size()); - - startKey = prefixKeyA + 38; - omKeyInfoList = - omMetadataManager.listKeys(volumeNameA, ozoneBucket, - startKey, prefixKeyA, 100); - - Assert.assertEquals(keysASet.tailSet( - startKey).size() - 1, omKeyInfoList.size()); - - for (OmKeyInfo omKeyInfo : omKeyInfoList) { - Assert.assertTrue(omKeyInfo.getKeyName().startsWith( - prefixKeyA)); - Assert.assertFalse(omKeyInfo.getBucketName().equals( - prefixKeyA + 38)); - } - - - - omKeyInfoList = omMetadataManager.listKeys(volumeNameB, hadoopBucket, - null, prefixKeyB, 100); - - Assert.assertEquals(omKeyInfoList.size(), 50); - - for (OmKeyInfo omKeyInfo : omKeyInfoList) { - Assert.assertTrue(omKeyInfo.getKeyName().startsWith( - prefixKeyB)); - } - - // Try to get keys by count 10, like that get all keys in the - // volumeB/ozoneBucket with "key-a". - startKey = null; - TreeSet expectedKeys = new TreeSet<>(); - for (int i=0; i<5; i++) { - - omKeyInfoList = omMetadataManager.listKeys(volumeNameB, hadoopBucket, - startKey, prefixKeyB, 10); - - Assert.assertEquals(10, omKeyInfoList.size()); - - for (OmKeyInfo omKeyInfo : omKeyInfoList) { - expectedKeys.add(omKeyInfo.getKeyName()); - Assert.assertTrue(omKeyInfo.getKeyName().startsWith( - prefixKeyB)); - startKey = omKeyInfo.getKeyName(); - } - } - - Assert.assertEquals(expectedKeys, keysBVolumeBSet); - - - // As now we have iterated all 50 buckets, calling next time should - // return empty list. - omKeyInfoList = omMetadataManager.listKeys(volumeNameB, hadoopBucket, - startKey, prefixKeyB, 10); - - Assert.assertEquals(omKeyInfoList.size(), 0); - - } - - @Test - public void testListKeysWithFewDeleteEntriesInCache() throws Exception { - String volumeNameA = "volumeA"; - String ozoneBucket = "ozoneBucket"; - - // Create volumes and bucket. - TestOMRequestUtils.addVolumeToDB(volumeNameA, omMetadataManager); - - addBucketsToCache(volumeNameA, ozoneBucket); - - String prefixKeyA = "key-a"; - TreeSet keysASet = new TreeSet<>(); - TreeSet deleteKeySet = new TreeSet<>(); - - - for (int i=1; i<= 100; i++) { - if (i % 2 == 0) { - keysASet.add( - prefixKeyA + i); - addKeysToOM(volumeNameA, ozoneBucket, prefixKeyA + i, i); - } else { - addKeysToOM(volumeNameA, ozoneBucket, prefixKeyA + i, i); - String key = omMetadataManager.getOzoneKey(volumeNameA, - ozoneBucket, prefixKeyA + i); - // Mark as deleted in cache. - omMetadataManager.getKeyTable().addCacheEntry( - new CacheKey<>(key), - new CacheValue<>(Optional.absent(), 100L)); - deleteKeySet.add(key); - } - } - - // Now list keys which match with prefixKeyA. - List omKeyInfoList = - omMetadataManager.listKeys(volumeNameA, ozoneBucket, - null, prefixKeyA, 100); - - // As in total 100, 50 are marked for delete. It should list only 50 keys. - Assert.assertEquals(50, omKeyInfoList.size()); - - TreeSet expectedKeys = new TreeSet<>(); - - for (OmKeyInfo omKeyInfo : omKeyInfoList) { - expectedKeys.add(omKeyInfo.getKeyName()); - Assert.assertTrue(omKeyInfo.getKeyName().startsWith(prefixKeyA)); - } - - Assert.assertEquals(expectedKeys, keysASet); - - - // Now get key count by 10. - String startKey = null; - expectedKeys = new TreeSet<>(); - for (int i=0; i<5; i++) { - - omKeyInfoList = omMetadataManager.listKeys(volumeNameA, ozoneBucket, - startKey, prefixKeyA, 10); - - System.out.println(i); - Assert.assertEquals(10, omKeyInfoList.size()); - - for (OmKeyInfo omKeyInfo : omKeyInfoList) { - expectedKeys.add(omKeyInfo.getKeyName()); - Assert.assertTrue(omKeyInfo.getKeyName().startsWith( - prefixKeyA)); - startKey = omKeyInfo.getKeyName(); - } - } - - Assert.assertEquals(keysASet, expectedKeys); - - - // As now we have iterated all 50 buckets, calling next time should - // return empty list. - omKeyInfoList = omMetadataManager.listKeys(volumeNameA, ozoneBucket, - startKey, prefixKeyA, 10); - - Assert.assertEquals(omKeyInfoList.size(), 0); - - - - } - - private void addKeysToOM(String volumeName, String bucketName, - String keyName, int i) throws Exception { - - if (i%2== 0) { - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - 1000L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); - } else { - TestOMRequestUtils.addKeyToTableCache(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - omMetadataManager); - } - } - -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java deleted file mode 100644 index b071e27302a..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java +++ /dev/null @@ -1,141 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.web.URLConnectionFactory; -import org.apache.hadoop.http.HttpConfig; -import org.apache.hadoop.http.HttpConfig.Policy; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; - -import java.io.File; -import java.net.InetSocketAddress; -import java.net.URL; -import java.net.URLConnection; -import java.util.Arrays; -import java.util.Collection; - -/** - * Test http server of OM with various HTTP option. - */ -@RunWith(value = Parameterized.class) -public class TestOzoneManagerHttpServer { - private static final String BASEDIR = GenericTestUtils - .getTempPath(TestOzoneManagerHttpServer.class.getSimpleName()); - private static String keystoresDir; - private static String sslConfDir; - private static Configuration conf; - private static URLConnectionFactory connectionFactory; - - @Parameters public static Collection policy() { - Object[][] params = new Object[][] { - {HttpConfig.Policy.HTTP_ONLY}, - {HttpConfig.Policy.HTTPS_ONLY}, - {HttpConfig.Policy.HTTP_AND_HTTPS} }; - return Arrays.asList(params); - } - - private final HttpConfig.Policy policy; - - public TestOzoneManagerHttpServer(Policy policy) { - super(); - this.policy = policy; - } - - @BeforeClass public static void setUp() throws Exception { - File base = new File(BASEDIR); - FileUtil.fullyDelete(base); - base.mkdirs(); - conf = new Configuration(); - keystoresDir = new File(BASEDIR).getAbsolutePath(); - sslConfDir = KeyStoreTestUtil.getClasspathDir( - TestOzoneManagerHttpServer.class); - KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); - connectionFactory = - URLConnectionFactory.newDefaultURLConnectionFactory(conf); - conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY, - KeyStoreTestUtil.getClientSSLConfigFileName()); - conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, - KeyStoreTestUtil.getServerSSLConfigFileName()); - } - - @AfterClass public static void tearDown() throws Exception { - FileUtil.fullyDelete(new File(BASEDIR)); - KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); - } - - @Test public void testHttpPolicy() throws Exception { - conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name()); - conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "localhost:0"); - conf.set(OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, "localhost:0"); - - OzoneManagerHttpServer server = null; - try { - server = new OzoneManagerHttpServer(conf, null); - server.start(); - - Assert.assertTrue(implies(policy.isHttpEnabled(), - canAccess("http", server.getHttpAddress()))); - Assert.assertTrue(implies(policy.isHttpEnabled() && - !policy.isHttpsEnabled(), - !canAccess("https", server.getHttpsAddress()))); - - Assert.assertTrue(implies(policy.isHttpsEnabled(), - canAccess("https", server.getHttpsAddress()))); - Assert.assertTrue(implies(policy.isHttpsEnabled(), - !canAccess("http", server.getHttpsAddress()))); - - } finally { - if (server != null) { - server.stop(); - } - } - } - - private static boolean canAccess(String scheme, InetSocketAddress addr) { - if (addr == null) { - return false; - } - try { - URL url = - new URL(scheme + "://" + NetUtils.getHostPortString(addr) + "/jmx"); - URLConnection conn = connectionFactory.openConnection(url); - conn.connect(); - conn.getContent(); - } catch (Exception e) { - return false; - } - return true; - } - - private static boolean implies(boolean a, boolean b) { - return !a || b; - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java deleted file mode 100644 index 80281693c1b..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.PrintStream; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import static org.junit.Assert.*; - -/** - * This class is used to test the CLI provided by OzoneManagerStarter, which is - * used to start and init the OzoneManager. The calls to the Ozone Manager are - * mocked so the tests only validate the CLI calls the correct methods are - * invoked. - */ -public class TestOzoneManagerStarter { - - private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); - private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); - private final PrintStream originalOut = System.out; - private final PrintStream originalErr = System.err; - - private MockOMStarter mock; - - @Before - public void setUpStreams() { - System.setOut(new PrintStream(outContent)); - System.setErr(new PrintStream(errContent)); - mock = new MockOMStarter(); - } - - @After - public void restoreStreams() { - System.setOut(originalOut); - System.setErr(originalErr); - } - - @Test - public void testCallsStartWhenServerStarted() throws Exception { - executeCommand(); - assertTrue(mock.startCalled); - } - - @Test - public void testExceptionThrownWhenStartFails() throws Exception { - mock.throwOnStart = true; - try { - executeCommand(); - fail("Exception should have been thrown"); - } catch (Exception e) { - assertTrue(true); - } - } - - @Test - public void testStartNotCalledWithInvalidParam() throws Exception { - executeCommand("--invalid"); - assertFalse(mock.startCalled); - } - - @Test - public void testPassingInitSwitchCallsInit() { - executeCommand("--init"); - assertTrue(mock.initCalled); - } - - @Test - public void testInitSwitchWithInvalidParamDoesNotRun() { - executeCommand("--init", "--invalid"); - assertFalse(mock.initCalled); - } - - @Test - public void testUnSuccessfulInitThrowsException() { - mock.throwOnInit = true; - try { - executeCommand("--init"); - fail("Exception show have been thrown"); - } catch (Exception e) { - assertTrue(true); - } - } - - @Test - public void testInitThatReturnsFalseThrowsException() { - mock.initStatus = false; - try { - executeCommand("--init"); - fail("Exception show have been thrown"); - } catch (Exception e) { - assertTrue(true); - } - } - - @Test - public void testUsagePrintedOnInvalidInput() { - executeCommand("--invalid"); - Pattern p = Pattern.compile("^Unknown option:.*--invalid.*\nUsage"); - Matcher m = p.matcher(errContent.toString()); - assertTrue(m.find()); - } - - private void executeCommand(String... args) { - new OzoneManagerStarter(mock).execute(args); - } - - static class MockOMStarter implements OMStarterInterface { - - private boolean startCalled = false; - private boolean initCalled = false; - private boolean initStatus = true; - private boolean throwOnStart = false; - private boolean throwOnInit = false; - - public void start(OzoneConfiguration conf) throws IOException, - AuthenticationException { - startCalled = true; - if (throwOnStart) { - throw new IOException("Simulated Exception"); - } - } - - public boolean init(OzoneConfiguration conf) throws IOException, - AuthenticationException { - initCalled = true; - if (throwOnInit) { - throw new IOException("Simulated Exception"); - } - return initStatus; - } - } -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestS3BucketManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestS3BucketManager.java deleted file mode 100644 index ef35d4da8f1..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestS3BucketManager.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.ozone.om; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.io.IOException; - -import static org.junit.Assert.*; - -/** - * Tests for S3 Bucket Manager. - */ -public class TestS3BucketManager { - @Rule - public ExpectedException thrown = ExpectedException.none(); - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - private OzoneConfiguration conf; - private OmMetadataManagerImpl metaMgr; - private BucketManager bucketManager; - private VolumeManager volumeManager; - - @Before - public void init() throws IOException { - conf = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if (!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - ServerUtils.setOzoneMetaDirPath(conf, newFolder.toString()); - metaMgr = new OmMetadataManagerImpl(conf); - volumeManager = new VolumeManagerImpl(metaMgr, conf); - bucketManager = new BucketManagerImpl(metaMgr); - } - - @Test - public void testOzoneVolumeNameForUser() throws IOException { - S3BucketManager s3BucketManager = new S3BucketManagerImpl(conf, metaMgr, - volumeManager, bucketManager); - String userName = "ozone"; - String volumeName = s3BucketManager.getOzoneVolumeNameForUser(userName); - assertEquals(OzoneConsts.OM_S3_VOLUME_PREFIX + userName, volumeName); - } - - @Test - public void testOzoneVolumeNameForUserFails() throws IOException { - S3BucketManager s3BucketManager = new S3BucketManagerImpl(conf, metaMgr, - volumeManager, bucketManager); - String userName = null; - try { - String volumeName = s3BucketManager.getOzoneVolumeNameForUser(userName); - fail("testOzoneVolumeNameForUserFails failed"); - } catch (NullPointerException ex) { - GenericTestUtils.assertExceptionContains("UserName cannot be null", ex); - } - - } - - @Test - public void testGetS3BucketMapping() throws IOException { - S3BucketManager s3BucketManager = new S3BucketManagerImpl(conf, metaMgr, - volumeManager, bucketManager); - String userName = "bilbo"; - metaMgr.getS3Table().put("newBucket", - s3BucketManager.formatOzoneVolumeName(userName) + "/newBucket"); - String mapping = s3BucketManager.getOzoneBucketMapping("newBucket"); - Assert.assertTrue(mapping.startsWith("s3bilbo/")); - Assert.assertTrue(mapping.endsWith("/newBucket")); - } - - @Test - public void testGetOzoneNames() throws IOException { - S3BucketManager s3BucketManager = new S3BucketManagerImpl(conf, metaMgr, - volumeManager, bucketManager); - String userName = "batman"; - String s3BucketName = "gotham"; - metaMgr.getS3Table().put(s3BucketName, - s3BucketManager.formatOzoneVolumeName(userName) + "/" + s3BucketName); - String volumeName = s3BucketManager.getOzoneVolumeName(s3BucketName); - Assert.assertTrue(volumeName.equalsIgnoreCase("s3"+userName)); - String bucketName =s3BucketManager.getOzoneBucketName(s3BucketName); - Assert.assertTrue(bucketName.equalsIgnoreCase(s3BucketName)); - // try to get a bucket that does not exist. - thrown.expectMessage("No such S3 bucket."); - s3BucketManager.getOzoneBucketMapping("raven"); - - } -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java deleted file mode 100644 index 12fcf7c2728..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om; -/** - * OM tests - */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java deleted file mode 100644 index 56c806a5fb4..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ /dev/null @@ -1,171 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.ratis; - -import java.io.IOException; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; - -/** - * This class tests OzoneManagerDoubleBuffer implementation with - * dummy response class. - */ -public class TestOzoneManagerDoubleBufferWithDummyResponse { - - private OMMetadataManager omMetadataManager; - private OzoneManagerDoubleBuffer doubleBuffer; - private AtomicLong trxId = new AtomicLong(0); - private OzoneManagerRatisSnapshot ozoneManagerRatisSnapshot; - private long lastAppliedIndex; - - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Before - public void setup() throws IOException { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OZONE_METADATA_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = - new OmMetadataManagerImpl(configuration); - ozoneManagerRatisSnapshot = index -> { - lastAppliedIndex = index; - }; - doubleBuffer = new OzoneManagerDoubleBuffer(omMetadataManager, - ozoneManagerRatisSnapshot); - } - - @After - public void stop() { - doubleBuffer.stop(); - } - - /** - * This tests add's 100 bucket creation responses to doubleBuffer, and - * check OM DB bucket table has 100 entries or not. In addition checks - * flushed transaction count is matching with expected count or not. - * @throws Exception - */ - @Test(timeout = 300_000) - public void testDoubleBufferWithDummyResponse() throws Exception { - String volumeName = UUID.randomUUID().toString(); - int bucketCount = 100; - OzoneManagerDoubleBufferMetrics ozoneManagerDoubleBufferMetrics = - doubleBuffer.getOzoneManagerDoubleBufferMetrics(); - - // As we have not flushed/added any transactions, all metrics should have - // value zero. - Assert.assertTrue(ozoneManagerDoubleBufferMetrics - .getTotalNumOfFlushOperations() == 0); - Assert.assertTrue(ozoneManagerDoubleBufferMetrics - .getTotalNumOfFlushedTransactions() == 0); - Assert.assertTrue(ozoneManagerDoubleBufferMetrics - .getMaxNumberOfTransactionsFlushedInOneIteration() == 0); - - for (int i=0; i < bucketCount; i++) { - doubleBuffer.add(createDummyBucketResponse(volumeName, - UUID.randomUUID().toString()), trxId.incrementAndGet()); - } - GenericTestUtils.waitFor(() -> - doubleBuffer.getFlushedTransactionCount() == bucketCount, 100, - 60000); - - Assert.assertTrue(ozoneManagerDoubleBufferMetrics - .getTotalNumOfFlushOperations() > 0); - Assert.assertTrue(ozoneManagerDoubleBufferMetrics - .getTotalNumOfFlushedTransactions() == bucketCount); - Assert.assertTrue(ozoneManagerDoubleBufferMetrics - .getMaxNumberOfTransactionsFlushedInOneIteration() > 0); - Assert.assertTrue(omMetadataManager.countRowsInTable( - omMetadataManager.getBucketTable()) == (bucketCount)); - Assert.assertTrue(doubleBuffer.getFlushIterations() > 0); - - // Check lastAppliedIndex is updated correctly or not. - Assert.assertEquals(bucketCount, lastAppliedIndex); - } - - /** - * Create DummyBucketCreate response. - * @param volumeName - * @param bucketName - * @return OMDummyCreateBucketResponse - */ - private OMDummyCreateBucketResponse createDummyBucketResponse( - String volumeName, String bucketName) { - OmBucketInfo omBucketInfo = - OmBucketInfo.newBuilder().setVolumeName(volumeName) - .setBucketName(bucketName).setCreationTime(Time.now()).build(); - return new OMDummyCreateBucketResponse(omBucketInfo, - OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setCreateBucketResponse(CreateBucketResponse.newBuilder().build()) - .build()); - } - - - /** - * DummyCreatedBucket Response class used in testing. - */ - public static class OMDummyCreateBucketResponse extends OMClientResponse { - private final OmBucketInfo omBucketInfo; - - public OMDummyCreateBucketResponse(OmBucketInfo omBucketInfo, - OMResponse omResponse) { - super(omResponse); - this.omBucketInfo = omBucketInfo; - } - - @Override - public void addToDBBatch(OMMetadataManager omMetadataManager, - BatchOperation batchOperation) throws IOException { - String dbBucketKey = - omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), - omBucketInfo.getBucketName()); - omMetadataManager.getBucketTable().putWithBatch(batchOperation, - dbBucketKey, omBucketInfo); - } - - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java deleted file mode 100644 index 441f1c192f7..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ /dev/null @@ -1,496 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.ratis; - -import java.io.IOException; -import java.util.Queue; -import java.util.UUID; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.atomic.AtomicLong; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest; -import org.apache.hadoop.ozone.om.request.bucket.OMBucketDeleteRequest; -import org.apache.hadoop.ozone.om.request.volume.OMVolumeCreateRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; -import org.apache.hadoop.ozone.om.response.bucket.OMBucketDeleteResponse; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.Daemon; -import org.mockito.Mockito; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_LOCK_MAX_CONCURRENCY; -import static org.junit.Assert.fail; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; - -/** - * This class tests OzoneManagerDouble Buffer with actual OMResponse classes. - */ -public class TestOzoneManagerDoubleBufferWithOMResponse { - - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private AuditLogger auditLogger; - private OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper; - private OMMetadataManager omMetadataManager; - private OzoneManagerDoubleBuffer doubleBuffer; - private final AtomicLong trxId = new AtomicLong(0); - private OzoneManagerRatisSnapshot ozoneManagerRatisSnapshot; - private volatile long lastAppliedIndex; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Before - public void setup() throws IOException { - ozoneManager = Mockito.mock(OzoneManager.class, - Mockito.withSettings().stubOnly()); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - ozoneConfiguration.setInt(HDDS_LOCK_MAX_CONCURRENCY, 1000); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L); - auditLogger = Mockito.mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - ozoneManagerRatisSnapshot = index -> { - lastAppliedIndex = index; - }; - doubleBuffer = new OzoneManagerDoubleBuffer(omMetadataManager, - ozoneManagerRatisSnapshot); - ozoneManagerDoubleBufferHelper = doubleBuffer::add; - } - - @After - public void stop() { - doubleBuffer.stop(); - } - - /** - * This tests OzoneManagerDoubleBuffer implementation. It calls - * testDoubleBuffer with number of iterations to do transactions and - * number of buckets to be created in each iteration. It then - * verifies OM DB entries count is matching with total number of - * transactions or not. - * @throws Exception - */ - @Test(timeout = 500_000) - public void testDoubleBuffer() throws Exception { - // This test checks whether count in tables are correct or not. - testDoubleBuffer(1, 10); - testDoubleBuffer(10, 100); - testDoubleBuffer(100, 100); - testDoubleBuffer(1000, 500); - } - - /** - * This test first creates a volume, and then does a mix of transactions - * like create/delete buckets and add them to double buffer. Then it - * verifies OM DB entries are matching with actual responses added to - * double buffer or not. - * @throws Exception - */ - @Test - public void testDoubleBufferWithMixOfTransactions() throws Exception { - // This test checks count, data in table is correct or not. - Queue< OMBucketCreateResponse > bucketQueue = - new ConcurrentLinkedQueue<>(); - Queue< OMBucketDeleteResponse > deleteBucketQueue = - new ConcurrentLinkedQueue<>(); - - String volumeName = UUID.randomUUID().toString(); - OMVolumeCreateResponse omVolumeCreateResponse = - (OMVolumeCreateResponse) createVolume(volumeName, - trxId.incrementAndGet()); - - int bucketCount = 10; - - doMixTransactions(volumeName, 10, deleteBucketQueue, bucketQueue); - - // As for every 2 transactions of create bucket we add deleted bucket. - final int deleteCount = 5; - - // We are doing +1 for volume transaction. - GenericTestUtils.waitFor(() -> - doubleBuffer.getFlushedTransactionCount() == - (bucketCount + deleteCount + 1), 100, 120000); - - Assert.assertTrue(omMetadataManager.countRowsInTable( - omMetadataManager.getVolumeTable()) == 1); - - Assert.assertTrue(omMetadataManager.countRowsInTable( - omMetadataManager.getBucketTable()) == 5); - - // Now after this in our DB we should have 5 buckets and one volume - - checkVolume(volumeName, omVolumeCreateResponse); - - checkCreateBuckets(bucketQueue); - - checkDeletedBuckets(deleteBucketQueue); - - // Check lastAppliedIndex is updated correctly or not. - Assert.assertEquals(bucketCount + deleteCount + 1, lastAppliedIndex); - } - - /** - * This test first creates a volume, and then does a mix of transactions - * like create/delete buckets in parallel and add to double buffer. Then it - * verifies OM DB entries are matching with actual responses added to - * double buffer or not. - * @throws Exception - */ - @Test - public void testDoubleBufferWithMixOfTransactionsParallel() throws Exception { - // This test checks count, data in table is correct or not. - - Queue< OMBucketCreateResponse > bucketQueue = - new ConcurrentLinkedQueue<>(); - Queue< OMBucketDeleteResponse > deleteBucketQueue = - new ConcurrentLinkedQueue<>(); - - String volumeName1 = UUID.randomUUID().toString(); - - OMVolumeCreateResponse omVolumeCreateResponse1 = - (OMVolumeCreateResponse) createVolume(volumeName1, - trxId.incrementAndGet()); - - String volumeName2 = UUID.randomUUID().toString(); - OMVolumeCreateResponse omVolumeCreateResponse2 = - (OMVolumeCreateResponse) createVolume(volumeName2, - trxId.incrementAndGet()); - - - Daemon daemon1 = new Daemon(() -> doMixTransactions(volumeName1, 10, - deleteBucketQueue, bucketQueue)); - Daemon daemon2 = new Daemon(() -> doMixTransactions(volumeName2, 10, - deleteBucketQueue, bucketQueue)); - - daemon1.start(); - daemon2.start(); - - int bucketCount = 20; - - // As for every 2 transactions of create bucket we add deleted bucket. - final int deleteCount = 10; - - // We are doing +1 for volume transaction. - GenericTestUtils.waitFor(() -> doubleBuffer.getFlushedTransactionCount() - == (bucketCount + deleteCount + 2), 100, 120000); - - Assert.assertTrue(omMetadataManager.countRowsInTable( - omMetadataManager.getVolumeTable()) == 2); - - Assert.assertTrue(omMetadataManager.countRowsInTable( - omMetadataManager.getBucketTable()) == 10); - - // Now after this in our DB we should have 5 buckets and one volume - - - checkVolume(volumeName1, omVolumeCreateResponse1); - checkVolume(volumeName2, omVolumeCreateResponse2); - - checkCreateBuckets(bucketQueue); - - checkDeletedBuckets(deleteBucketQueue); - - // Check lastAppliedIndex is updated correctly or not. - Assert.assertEquals(bucketCount + deleteCount + 2, lastAppliedIndex); - } - - /** - * This method add's a mix of createBucket/DeleteBucket responses to double - * buffer. Total number of responses added is specified by bucketCount. - * @param volumeName - * @param bucketCount - * @param deleteBucketQueue - * @param bucketQueue - */ - private void doMixTransactions(String volumeName, int bucketCount, - Queue deleteBucketQueue, - Queue bucketQueue) { - for (int i=0; i < bucketCount; i++) { - String bucketName = UUID.randomUUID().toString(); - long transactionID = trxId.incrementAndGet(); - OMBucketCreateResponse omBucketCreateResponse = createBucket(volumeName, - bucketName, transactionID); - // For every 2 transactions have a deleted bucket. - if (i % 2 == 0) { - OMBucketDeleteResponse omBucketDeleteResponse = - (OMBucketDeleteResponse) deleteBucket(volumeName, bucketName, - trxId.incrementAndGet()); - deleteBucketQueue.add(omBucketDeleteResponse); - } else { - bucketQueue.add(omBucketCreateResponse); - } - } - } - - private OMClientResponse deleteBucket(String volumeName, String bucketName, - long transactionID) { - OzoneManagerProtocolProtos.OMRequest omRequest = - TestOMRequestUtils.createDeleteBucketRequest(volumeName, bucketName); - - OMBucketDeleteRequest omBucketDeleteRequest = - new OMBucketDeleteRequest(omRequest); - - return omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, - transactionID, ozoneManagerDoubleBufferHelper); - } - - /** - * Verifies volume table data is matching with actual response added to - * double buffer. - * @param volumeName - * @param omVolumeCreateResponse - * @throws Exception - */ - private void checkVolume(String volumeName, - OMVolumeCreateResponse omVolumeCreateResponse) throws Exception { - OmVolumeArgs tableVolumeArgs = omMetadataManager.getVolumeTable().get( - omMetadataManager.getVolumeKey(volumeName)); - Assert.assertTrue(tableVolumeArgs != null); - - OmVolumeArgs omVolumeArgs = omVolumeCreateResponse.getOmVolumeArgs(); - - Assert.assertEquals(omVolumeArgs.getVolume(), tableVolumeArgs.getVolume()); - Assert.assertEquals(omVolumeArgs.getAdminName(), - tableVolumeArgs.getAdminName()); - Assert.assertEquals(omVolumeArgs.getOwnerName(), - tableVolumeArgs.getOwnerName()); - Assert.assertEquals(omVolumeArgs.getCreationTime(), - tableVolumeArgs.getCreationTime()); - } - - /** - * Verifies bucket table data is matching with actual response added to - * double buffer. - * @param bucketQueue - */ - private void checkCreateBuckets(Queue bucketQueue) { - bucketQueue.forEach((omBucketCreateResponse) -> { - OmBucketInfo omBucketInfo = omBucketCreateResponse.getOmBucketInfo(); - String bucket = omBucketInfo.getBucketName(); - OmBucketInfo tableBucketInfo = null; - try { - tableBucketInfo = - omMetadataManager.getBucketTable().get( - omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), - bucket)); - } catch (IOException ex) { - fail("testDoubleBufferWithMixOfTransactions failed"); - } - Assert.assertNotNull(tableBucketInfo); - - Assert.assertEquals(omBucketInfo.getVolumeName(), - tableBucketInfo.getVolumeName()); - Assert.assertEquals(omBucketInfo.getBucketName(), - tableBucketInfo.getBucketName()); - Assert.assertEquals(omBucketInfo.getCreationTime(), - tableBucketInfo.getCreationTime()); - }); - } - - /** - * Verifies deleted bucket responses added to double buffer are actually - * removed from the OM DB or not. - * @param deleteBucketQueue - */ - private void checkDeletedBuckets(Queue - deleteBucketQueue) { - deleteBucketQueue.forEach((omBucketDeleteResponse -> { - try { - Assert.assertNull(omMetadataManager.getBucketTable().get( - omMetadataManager.getBucketKey( - omBucketDeleteResponse.getVolumeName(), - omBucketDeleteResponse.getBucketName()))); - } catch (IOException ex) { - fail("testDoubleBufferWithMixOfTransactions failed"); - } - })); - } - - /** - * Create bucketCount number of createBucket responses for each iteration. - * All these iterations are run in parallel. Then verify OM DB has correct - * number of entries or not. - * @param iterations - * @param bucketCount - * @throws Exception - */ - public void testDoubleBuffer(int iterations, int bucketCount) - throws Exception { - try { - // Reset transaction id. - trxId.set(0); - // Calling setup and stop here because this method is called from a - // single test multiple times. - setup(); - for (int i = 0; i < iterations; i++) { - Daemon d1 = new Daemon(() -> - doTransactions(RandomStringUtils.randomAlphabetic(5), - bucketCount)); - d1.start(); - } - - // We are doing +1 for volume transaction. - long expectedTransactions = (bucketCount + 1) * iterations; - GenericTestUtils.waitFor(() -> lastAppliedIndex == expectedTransactions, - 100, 500000); - - Assert.assertEquals(expectedTransactions, - doubleBuffer.getFlushedTransactionCount() - ); - - GenericTestUtils.waitFor(() -> { - long count = 0L; - try { - count = - omMetadataManager.countRowsInTable( - omMetadataManager.getVolumeTable()); - } catch (IOException ex) { - fail("testDoubleBuffer failed"); - } - return count == iterations; - - }, 300, 300000); - - - GenericTestUtils.waitFor(() -> { - long count = 0L; - try { - count = omMetadataManager.countRowsInTable( - omMetadataManager.getBucketTable()); - } catch (IOException ex) { - fail("testDoubleBuffer failed"); - } - return count == bucketCount * iterations; - }, 300, 300000); - - Assert.assertTrue(doubleBuffer.getFlushIterations() > 0); - } finally { - stop(); - } - } - - /** - * This method adds bucketCount number of createBucket responses to double - * buffer. - * @param volumeName - * @param bucketCount - */ - public void doTransactions(String volumeName, int bucketCount) { - createVolume(volumeName, trxId.incrementAndGet()); - for (int i=0; i< bucketCount; i++) { - createBucket(volumeName, UUID.randomUUID().toString(), - trxId.incrementAndGet()); - } - } - - /** - * Create OMVolumeCreateResponse for specified volume. - * @param volumeName - * @return OMVolumeCreateResponse - */ - private OMClientResponse createVolume(String volumeName, - long transactionId) { - - String admin = "ozone"; - String owner = UUID.randomUUID().toString(); - OzoneManagerProtocolProtos.OMRequest omRequest = - TestOMRequestUtils.createVolumeRequest(volumeName, admin, owner); - - OMVolumeCreateRequest omVolumeCreateRequest = - new OMVolumeCreateRequest(omRequest); - - return omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, - transactionId, ozoneManagerDoubleBufferHelper); - } - - /** - * Create OMBucketCreateResponse for specified volume and bucket. - * @param volumeName - * @param bucketName - * @return OMBucketCreateResponse - */ - private OMBucketCreateResponse createBucket(String volumeName, - String bucketName, long transactionID) { - - OzoneManagerProtocolProtos.OMRequest omRequest = - TestOMRequestUtils.createBucketRequest(bucketName, volumeName, false, - OzoneManagerProtocolProtos.StorageTypeProto.DISK); - - OMBucketCreateRequest omBucketCreateRequest = - new OMBucketCreateRequest(omRequest); - - return (OMBucketCreateResponse) omBucketCreateRequest - .validateAndUpdateCache(ozoneManager, transactionID, - ozoneManagerDoubleBufferHelper); - - } - - /** - * Create OMBucketDeleteResponse for specified volume and bucket. - * @param volumeName - * @param bucketName - * @return OMBucketDeleteResponse - */ - private OMBucketDeleteResponse deleteBucket(String volumeName, - String bucketName) { - return new OMBucketDeleteResponse(volumeName, bucketName, - OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setDeleteBucketResponse(DeleteBucketResponse.newBuilder().build()) - .build()); - } - - -} - diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java deleted file mode 100644 index c04fba24e19..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java +++ /dev/null @@ -1,227 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.ratis; - -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Collections; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.ha.OMNodeDetails; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.util.LifeCycle; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; -import org.slf4j.LoggerFactory; - -import static org.junit.Assert.assertFalse; -import static org.mockito.Mockito.when; - -/** - * Test OM Ratis server. - */ -public class TestOzoneManagerRatisServer { - - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OzoneConfiguration conf; - private OzoneManagerRatisServer omRatisServer; - private OzoneManagerRatisClient omRatisClient; - private String omID; - private String clientId = UUID.randomUUID().toString(); - private static final long LEADER_ELECTION_TIMEOUT = 500L; - private OMMetadataManager omMetadataManager; - private OzoneManager ozoneManager; - private OMNodeDetails omNodeDetails; - - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - omID = UUID.randomUUID().toString(); - final String path = GenericTestUtils.getTempPath(omID); - Path metaDirPath = Paths.get(path, "om-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); - conf.setTimeDuration( - OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, - LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS); - int ratisPort = conf.getInt( - OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, - OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT); - InetSocketAddress rpcAddress = new InetSocketAddress( - InetAddress.getLocalHost(), 0); - omNodeDetails = new OMNodeDetails.Builder() - .setRpcAddress(rpcAddress) - .setRatisPort(ratisPort) - .setOMNodeId(omID) - .setOMServiceId(OzoneConsts.OM_SERVICE_ID_DEFAULT) - .build(); - // Starts a single node Ratis server - ozoneManager = Mockito.mock(OzoneManager.class); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - OMRatisSnapshotInfo omRatisSnapshotInfo = new OMRatisSnapshotInfo( - folder.newFolder()); - when(ozoneManager.getSnapshotInfo()).thenReturn(omRatisSnapshotInfo); - omRatisServer = OzoneManagerRatisServer.newOMRatisServer(conf, ozoneManager, - omNodeDetails, Collections.emptyList()); - omRatisServer.start(); - omRatisClient = OzoneManagerRatisClient.newOzoneManagerRatisClient(omID, - omRatisServer.getRaftGroup(), conf); - omRatisClient.connect(); - } - - @After - public void shutdown() { - if (omRatisServer != null) { - omRatisServer.stop(); - } - if (omRatisClient != null) { - omRatisClient.close(); - } - } - - /** - * Start a OM Ratis Server and checks its state. - */ - @Test - public void testStartOMRatisServer() throws Exception { - Assert.assertEquals("Ratis Server should be in running state", - LifeCycle.State.RUNNING, omRatisServer.getServerState()); - } - - @Test - public void testLoadSnapshotInfoOnStart() throws Exception { - // Stop the Ratis server and manually update the snapshotInfo. - long oldSnaphsotIndex = ozoneManager.saveRatisSnapshot(); - ozoneManager.getSnapshotInfo().saveRatisSnapshotToDisk(oldSnaphsotIndex); - omRatisServer.stop(); - long newSnapshotIndex = oldSnaphsotIndex + 100; - ozoneManager.getSnapshotInfo().saveRatisSnapshotToDisk(newSnapshotIndex); - - // Start new Ratis server. It should pick up and load the new SnapshotInfo - omRatisServer = OzoneManagerRatisServer.newOMRatisServer(conf, ozoneManager, - omNodeDetails, Collections.emptyList()); - omRatisServer.start(); - long lastAppliedIndex = omRatisServer.getStateMachineLastAppliedIndex(); - - Assert.assertEquals(newSnapshotIndex, lastAppliedIndex); - } - - /** - * Test that all of {@link OzoneManagerProtocolProtos.Type} enum values are - * categorized in {@link OmUtils#isReadOnly(OMRequest)}. - */ - @Test - public void testIsReadOnlyCapturesAllCmdTypeEnums() throws Exception { - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(LoggerFactory.getLogger(OmUtils.class)); - OzoneManagerProtocolProtos.Type[] cmdTypes = - OzoneManagerProtocolProtos.Type.values(); - - for (OzoneManagerProtocolProtos.Type cmdtype : cmdTypes) { - OMRequest request = OMRequest.newBuilder() - .setCmdType(cmdtype) - .setClientId(clientId) - .build(); - OmUtils.isReadOnly(request); - assertFalse(cmdtype + " is not categorized in " + - "OmUtils#isReadyOnly", - logCapturer.getOutput().contains("CmdType " + cmdtype +" is not " + - "categorized as readOnly or not.")); - logCapturer.clearOutput(); - } - } - - @Test - public void verifyRaftGroupIdGenerationWithDefaultOmServiceId() throws - Exception { - UUID uuid = UUID.nameUUIDFromBytes(OzoneConsts.OM_SERVICE_ID_DEFAULT - .getBytes()); - RaftGroupId raftGroupId = omRatisServer.getRaftGroup().getGroupId(); - Assert.assertEquals(uuid, raftGroupId.getUuid()); - Assert.assertEquals(raftGroupId.toByteString().size(), 16); - } - - @Test - public void verifyRaftGroupIdGenerationWithCustomOmServiceId() throws - Exception { - String customOmServiceId = "omSIdCustom123"; - OzoneConfiguration newConf = new OzoneConfiguration(); - String newOmId = UUID.randomUUID().toString(); - String path = GenericTestUtils.getTempPath(newOmId); - Path metaDirPath = Paths.get(path, "om-meta"); - newConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); - newConf.setTimeDuration( - OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, - LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS); - int ratisPort = 9873; - InetSocketAddress rpcAddress = new InetSocketAddress( - InetAddress.getLocalHost(), 0); - OMNodeDetails nodeDetails = new OMNodeDetails.Builder() - .setRpcAddress(rpcAddress) - .setRatisPort(ratisPort) - .setOMNodeId(newOmId) - .setOMServiceId(customOmServiceId) - .build(); - // Starts a single node Ratis server - omRatisServer.stop(); - OzoneManagerRatisServer newOmRatisServer = OzoneManagerRatisServer - .newOMRatisServer(newConf, ozoneManager, nodeDetails, - Collections.emptyList()); - newOmRatisServer.start(); - OzoneManagerRatisClient newOmRatisClient = OzoneManagerRatisClient - .newOzoneManagerRatisClient( - newOmId, - newOmRatisServer.getRaftGroup(), newConf); - newOmRatisClient.connect(); - - UUID uuid = UUID.nameUUIDFromBytes(customOmServiceId.getBytes()); - RaftGroupId raftGroupId = newOmRatisServer.getRaftGroup().getGroupId(); - Assert.assertEquals(uuid, raftGroupId.getUuid()); - Assert.assertEquals(raftGroupId.toByteString().size(), 16); - newOmRatisServer.stop(); - } - - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java deleted file mode 100644 index bdaee6e0510..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request; - -import java.net.InetAddress; -import java.util.UUID; - -import mockit.Mock; -import mockit.MockUp; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest; -import org.apache.hadoop.security.UserGroupInformation; - -import static org.mockito.Mockito.when; - -/** - * Test OMClient Request with user information. - */ -public class TestOMClientRequestWithUserInfo { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OMMetadataManager omMetadataManager; - private UserGroupInformation userGroupInformation = - UserGroupInformation.createRemoteUser("temp"); - private InetAddress inetAddress; - - @Before - public void setup() throws Exception { - ozoneManager = Mockito.mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - inetAddress = InetAddress.getByName("127.0.0.1"); - - new MockUp() { - @Mock - public UserGroupInformation getRemoteUser() { - return userGroupInformation; - } - - public InetAddress getRemoteAddress() { - return inetAddress; - } - }; - } - - @Test - public void testUserInfo() throws Exception { - - String bucketName = UUID.randomUUID().toString(); - String volumeName = UUID.randomUUID().toString(); - OzoneManagerProtocolProtos.OMRequest omRequest = - TestOMRequestUtils.createBucketRequest(bucketName, volumeName, true, - OzoneManagerProtocolProtos.StorageTypeProto.DISK); - - OMBucketCreateRequest omBucketCreateRequest = - new OMBucketCreateRequest(omRequest); - - Assert.assertFalse(omRequest.hasUserInfo()); - - OzoneManagerProtocolProtos.OMRequest modifiedRequest = - omBucketCreateRequest.preExecute(ozoneManager); - - Assert.assertTrue(modifiedRequest.hasUserInfo()); - - // Now pass modified request to OMBucketCreateRequest and check ugi and - // remote Address. - omBucketCreateRequest = new OMBucketCreateRequest(modifiedRequest); - - InetAddress remoteAddress = omBucketCreateRequest.getRemoteAddress(); - UserGroupInformation ugi = omBucketCreateRequest.createUGI(); - - - // Now check we have original user info and remote address or not. - // Here from OMRequest user info, converted to UGI and InetAddress. - Assert.assertEquals(inetAddress.getHostAddress(), - remoteAddress.getHostAddress()); - Assert.assertEquals(userGroupInformation.getUserName(), ugi.getUserName()); - } -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java deleted file mode 100644 index 472d46a289e..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java +++ /dev/null @@ -1,579 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request; - - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.UUID; - -import com.google.common.base.Optional; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .MultipartUploadAbortRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .MultipartCommitUploadPartRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .MultipartUploadCompleteRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .MultipartInfoInitiateRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetVolumePropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .AddAclRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .RemoveAclRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetAclRequest; -import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType; -import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType; - -import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; - -/** - * Helper class to test OMClientRequest classes. - */ -public final class TestOMRequestUtils { - - private TestOMRequestUtils() { - //Do nothing - } - - /** - * Add's volume and bucket creation entries to OM DB. - * @param volumeName - * @param bucketName - * @param omMetadataManager - * @throws Exception - */ - public static void addVolumeAndBucketToDB(String volumeName, - String bucketName, OMMetadataManager omMetadataManager) throws Exception { - - addVolumeToDB(volumeName, omMetadataManager); - - OmBucketInfo omBucketInfo = - OmBucketInfo.newBuilder().setVolumeName(volumeName) - .setBucketName(bucketName).setCreationTime(Time.now()).build(); - - // Add to cache. - omMetadataManager.getBucketTable().addCacheEntry( - new CacheKey<>(omMetadataManager.getBucketKey(volumeName, bucketName)), - new CacheValue<>(Optional.of(omBucketInfo), 1L)); - } - - /** - * Add key entry to KeyTable. if openKeyTable flag is true, add's entries - * to openKeyTable, else add's it to keyTable. - * @param openKeyTable - * @param volumeName - * @param bucketName - * @param keyName - * @param clientID - * @param replicationType - * @param replicationFactor - * @param omMetadataManager - * @throws Exception - */ - @SuppressWarnings("parameterNumber") - public static void addKeyToTable(boolean openKeyTable, String volumeName, - String bucketName, - String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, - OMMetadataManager omMetadataManager) throws Exception { - - - OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor); - - if (openKeyTable) { - omMetadataManager.getOpenKeyTable().put( - omMetadataManager.getOpenKey(volumeName, bucketName, keyName, - clientID), omKeyInfo); - } else { - omMetadataManager.getKeyTable().put(omMetadataManager.getOzoneKey( - volumeName, bucketName, keyName), omKeyInfo); - } - - } - - /** - * Add key entry to key table cache. - * @param volumeName - * @param bucketName - * @param keyName - * @param replicationType - * @param replicationFactor - * @param omMetadataManager - */ - @SuppressWarnings("parameterNumber") - public static void addKeyToTableCache(String volumeName, - String bucketName, - String keyName, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, - OMMetadataManager omMetadataManager) { - - - OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor); - - omMetadataManager.getKeyTable().addCacheEntry( - new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName)), new CacheValue<>(Optional.of(omKeyInfo), - 1L)); - - } - - private OmKeyInfo createKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor) { - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(1000L) - .setReplicationType(replicationType) - .setReplicationFactor(replicationFactor).build(); - } - - - /** - * Create OmKeyInfo. - */ - - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor) { - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(1000L) - .setReplicationType(replicationType) - .setReplicationFactor(replicationFactor).build(); - } - - - /** - * Add volume creation entry to OM DB. - * @param volumeName - * @param omMetadataManager - * @throws Exception - */ - public static void addVolumeToDB(String volumeName, - OMMetadataManager omMetadataManager) throws Exception { - addVolumeToDB(volumeName, UUID.randomUUID().toString(), omMetadataManager); - } - - public static void addS3BucketToDB(String volumeName, String s3BucketName, - OMMetadataManager omMetadataManager) throws Exception { - omMetadataManager.getS3Table().put(s3BucketName, - S3BucketCreateRequest.formatS3MappingName(volumeName, s3BucketName)); - } - - /** - * Add volume creation entry to OM DB. - * @param volumeName - * @param ownerName - * @param omMetadataManager - * @throws Exception - */ - public static void addVolumeToDB(String volumeName, String ownerName, - OMMetadataManager omMetadataManager) throws Exception { - OmVolumeArgs omVolumeArgs = - OmVolumeArgs.newBuilder().setCreationTime(Time.now()) - .setVolume(volumeName).setAdminName(ownerName) - .setOwnerName(ownerName).build(); - omMetadataManager.getVolumeTable().put( - omMetadataManager.getVolumeKey(volumeName), omVolumeArgs); - - // Add to cache. - omMetadataManager.getVolumeTable().addCacheEntry( - new CacheKey<>(omMetadataManager.getVolumeKey(volumeName)), - new CacheValue<>(Optional.of(omVolumeArgs), 1L)); - } - - - public static OzoneManagerProtocolProtos.OMRequest createBucketRequest( - String bucketName, String volumeName, boolean isVersionEnabled, - OzoneManagerProtocolProtos.StorageTypeProto storageTypeProto) { - OzoneManagerProtocolProtos.BucketInfo bucketInfo = - OzoneManagerProtocolProtos.BucketInfo.newBuilder() - .setBucketName(bucketName) - .setVolumeName(volumeName) - .setIsVersionEnabled(isVersionEnabled) - .setStorageType(storageTypeProto) - .addAllMetadata(getMetadataList()).build(); - OzoneManagerProtocolProtos.CreateBucketRequest.Builder req = - OzoneManagerProtocolProtos.CreateBucketRequest.newBuilder(); - req.setBucketInfo(bucketInfo); - return OzoneManagerProtocolProtos.OMRequest.newBuilder() - .setCreateBucketRequest(req) - .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket) - .setClientId(UUID.randomUUID().toString()).build(); - } - - public static OzoneManagerProtocolProtos.OMRequest createS3BucketRequest( - String userName, String s3BucketName) { - OzoneManagerProtocolProtos.S3CreateBucketRequest request = - OzoneManagerProtocolProtos.S3CreateBucketRequest.newBuilder() - .setUserName(userName) - .setS3Bucketname(s3BucketName).build(); - - return OzoneManagerProtocolProtos.OMRequest.newBuilder() - .setCreateS3BucketRequest(request) - .setCmdType(OzoneManagerProtocolProtos.Type.CreateS3Bucket) - .setClientId(UUID.randomUUID().toString()).build(); - } - - public static OzoneManagerProtocolProtos.OMRequest deleteS3BucketRequest( - String s3BucketName) { - OzoneManagerProtocolProtos.S3DeleteBucketRequest request = - OzoneManagerProtocolProtos.S3DeleteBucketRequest.newBuilder() - .setS3BucketName(s3BucketName).build(); - return OzoneManagerProtocolProtos.OMRequest.newBuilder() - .setDeleteS3BucketRequest(request) - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteS3Bucket) - .setClientId(UUID.randomUUID().toString()).build(); - } - - public static List< HddsProtos.KeyValue> getMetadataList() { - List metadataList = new ArrayList<>(); - metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue( - "value1").build()); - metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key2").setValue( - "value2").build()); - return metadataList; - } - - - /** - * Add user to user table. - * @param volumeName - * @param ownerName - * @param omMetadataManager - * @throws Exception - */ - public static void addUserToDB(String volumeName, String ownerName, - OMMetadataManager omMetadataManager) throws Exception { - OzoneManagerProtocolProtos.UserVolumeInfo userVolumeInfo = - OzoneManagerProtocolProtos.UserVolumeInfo - .newBuilder() - .addVolumeNames(volumeName) - .setObjectID(1) - .setUpdateID(1) - .build(); - omMetadataManager.getUserTable().put( - omMetadataManager.getUserKey(ownerName), userVolumeInfo); - } - - /** - * Create OMRequest for set volume property request with owner set. - * @param volumeName - * @param newOwner - * @return OMRequest - */ - public static OMRequest createSetVolumePropertyRequest(String volumeName, - String newOwner) { - SetVolumePropertyRequest setVolumePropertyRequest = - SetVolumePropertyRequest.newBuilder().setVolumeName(volumeName) - .setOwnerName(newOwner).build(); - - return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) - .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) - .setSetVolumePropertyRequest(setVolumePropertyRequest).build(); - } - - - /** - * Create OMRequest for set volume property request with quota set. - * @param volumeName - * @param quota - * @return OMRequest - */ - public static OMRequest createSetVolumePropertyRequest(String volumeName, - long quota) { - SetVolumePropertyRequest setVolumePropertyRequest = - SetVolumePropertyRequest.newBuilder().setVolumeName(volumeName) - .setQuotaInBytes(quota).build(); - - return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) - .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) - .setSetVolumePropertyRequest(setVolumePropertyRequest).build(); - } - - public static OMRequest createVolumeAddAclRequest(String volumeName, - OzoneAcl acl) { - AddAclRequest.Builder addAclRequestBuilder = AddAclRequest.newBuilder(); - addAclRequestBuilder.setObj(OzoneObj.toProtobuf(new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setResType(ResourceType.VOLUME) - .setStoreType(StoreType.OZONE) - .build())); - if (acl != null) { - addAclRequestBuilder.setAcl(OzoneAcl.toProtobuf(acl)); - } - return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) - .setCmdType(OzoneManagerProtocolProtos.Type.AddAcl) - .setAddAclRequest(addAclRequestBuilder.build()).build(); - } - - public static OMRequest createVolumeRemoveAclRequest(String volumeName, - OzoneAcl acl) { - RemoveAclRequest.Builder removeAclRequestBuilder = - RemoveAclRequest.newBuilder(); - removeAclRequestBuilder.setObj(OzoneObj.toProtobuf( - new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setResType(ResourceType.VOLUME) - .setStoreType(StoreType.OZONE) - .build())); - if (acl != null) { - removeAclRequestBuilder.setAcl(OzoneAcl.toProtobuf(acl)); - } - return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) - .setCmdType(OzoneManagerProtocolProtos.Type.RemoveAcl) - .setRemoveAclRequest(removeAclRequestBuilder.build()).build(); - } - - public static OMRequest createVolumeSetAclRequest(String volumeName, - List acls) { - SetAclRequest.Builder setAclRequestBuilder = SetAclRequest.newBuilder(); - setAclRequestBuilder.setObj(OzoneObj.toProtobuf(new OzoneObjInfo.Builder() - .setVolumeName(volumeName) - .setResType(ResourceType.VOLUME) - .setStoreType(StoreType.OZONE) - .build())); - if (acls != null) { - acls.forEach( - acl -> setAclRequestBuilder.addAcl(OzoneAcl.toProtobuf(acl))); - } - - return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) - .setCmdType(OzoneManagerProtocolProtos.Type.SetAcl) - .setSetAclRequest(setAclRequestBuilder.build()).build(); - } - - /** - * Deletes key from Key table and adds it to DeletedKeys table. - * @return the deletedKey name - */ - public static String deleteKey(String ozoneKey, - OMMetadataManager omMetadataManager) throws IOException { - // Retrieve the keyInfo - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - - // Delete key from KeyTable and put in DeletedKeyTable - omMetadataManager.getKeyTable().delete(ozoneKey); - - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(ozoneKey); - - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(omKeyInfo, - repeatedOmKeyInfo); - - omMetadataManager.getDeletedTable().put(ozoneKey, repeatedOmKeyInfo); - - return ozoneKey; - } - - /** - * Create OMRequest which encapsulates InitiateMultipartUpload request. - * @param volumeName - * @param bucketName - * @param keyName - */ - public static OMRequest createInitiateMPURequest(String volumeName, - String bucketName, String keyName) { - MultipartInfoInitiateRequest - multipartInfoInitiateRequest = - MultipartInfoInitiateRequest.newBuilder().setKeyArgs( - KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName) - .setBucketName(bucketName)).build(); - - return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) - .setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload) - .setInitiateMultiPartUploadRequest(multipartInfoInitiateRequest) - .build(); - } - - /** - * Create OMRequest which encapsulates InitiateMultipartUpload request. - * @param volumeName - * @param bucketName - * @param keyName - */ - public static OMRequest createCommitPartMPURequest(String volumeName, - String bucketName, String keyName, long clientID, long size, - String multipartUploadID, int partNumber) { - - // Just set dummy size. - KeyArgs.Builder keyArgs = - KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName) - .setBucketName(bucketName) - .setDataSize(size) - .setMultipartNumber(partNumber) - .setMultipartUploadID(multipartUploadID) - .addAllKeyLocations(new ArrayList<>()); - // Just adding dummy list. As this is for UT only. - - MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = - MultipartCommitUploadPartRequest.newBuilder() - .setKeyArgs(keyArgs).setClientID(clientID).build(); - - return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) - .setCmdType(OzoneManagerProtocolProtos.Type.CommitMultiPartUpload) - .setCommitMultiPartUploadRequest(multipartCommitUploadPartRequest) - .build(); - } - - public static OMRequest createAbortMPURequest(String volumeName, - String bucketName, String keyName, String multipartUploadID) { - KeyArgs.Builder keyArgs = - KeyArgs.newBuilder().setVolumeName(volumeName) - .setKeyName(keyName) - .setBucketName(bucketName) - .setMultipartUploadID(multipartUploadID); - - MultipartUploadAbortRequest multipartUploadAbortRequest = - MultipartUploadAbortRequest.newBuilder().setKeyArgs(keyArgs).build(); - - return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) - .setCmdType(OzoneManagerProtocolProtos.Type.AbortMultiPartUpload) - .setAbortMultiPartUploadRequest(multipartUploadAbortRequest).build(); - } - - public static OMRequest createCompleteMPURequest(String volumeName, - String bucketName, String keyName, String multipartUploadID, - List partList) { - KeyArgs.Builder keyArgs = - KeyArgs.newBuilder().setVolumeName(volumeName) - .setKeyName(keyName) - .setBucketName(bucketName) - .setMultipartUploadID(multipartUploadID); - - MultipartUploadCompleteRequest multipartUploadCompleteRequest = - MultipartUploadCompleteRequest.newBuilder().setKeyArgs(keyArgs) - .addAllPartsList(partList).build(); - - return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) - .setCmdType(OzoneManagerProtocolProtos.Type.CompleteMultiPartUpload) - .setCompleteMultiPartUploadRequest(multipartUploadCompleteRequest) - .build(); - - } - - /** - * Create OMRequest for create volume. - * @param volumeName - * @param adminName - * @param ownerName - * @return OMRequest - */ - public static OMRequest createVolumeRequest(String volumeName, - String adminName, String ownerName) { - OzoneManagerProtocolProtos.VolumeInfo volumeInfo = - OzoneManagerProtocolProtos.VolumeInfo.newBuilder().setVolume(volumeName) - .setAdminName(adminName).setOwnerName(ownerName).build(); - OzoneManagerProtocolProtos.CreateVolumeRequest createVolumeRequest = - OzoneManagerProtocolProtos.CreateVolumeRequest.newBuilder() - .setVolumeInfo(volumeInfo).build(); - - return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) - .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) - .setCreateVolumeRequest(createVolumeRequest).build(); - } - - /** - * Create OMRequest for delete bucket. - * @param volumeName - * @param bucketName - */ - public static OMRequest createDeleteBucketRequest(String volumeName, - String bucketName) { - return OMRequest.newBuilder().setDeleteBucketRequest( - OzoneManagerProtocolProtos.DeleteBucketRequest.newBuilder() - .setBucketName(bucketName).setVolumeName(volumeName)) - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket) - .setClientId(UUID.randomUUID().toString()).build(); - } - - /** - * Add the Bucket information to OzoneManager DB and cache. - * @param omMetadataManager - * @param omBucketInfo - * @throws IOException - */ - public static void addBucketToOM(OMMetadataManager omMetadataManager, - OmBucketInfo omBucketInfo) throws IOException { - String dbBucketKey = - omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), - omBucketInfo.getBucketName()); - omMetadataManager.getBucketTable().put(dbBucketKey, omBucketInfo); - omMetadataManager.getBucketTable().addCacheEntry( - new CacheKey<>(dbBucketKey), - new CacheValue<>(Optional.of(omBucketInfo), 1L)); - } - - /** - * Add the Volume information to OzoneManager DB and Cache. - * @param omMetadataManager - * @param omVolumeArgs - * @throws IOException - */ - public static void addVolumeToOM(OMMetadataManager omMetadataManager, - OmVolumeArgs omVolumeArgs) throws IOException { - String dbVolumeKey = - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()); - omMetadataManager.getVolumeTable().put(dbVolumeKey, omVolumeArgs); - omMetadataManager.getVolumeTable().addCacheEntry( - new CacheKey<>(dbVolumeKey), - new CacheValue<>(Optional.of(omVolumeArgs), 1L)); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java deleted file mode 100644 index 1ddd7536f79..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestBucketRequest.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request.bucket; - -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; - - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; - -/** - * Base test class for Bucket request. - */ -@SuppressWarnings("visibilityModifier") -public class TestBucketRequest { - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - protected OzoneManager ozoneManager; - protected OMMetrics omMetrics; - protected OMMetadataManager omMetadataManager; - protected AuditLogger auditLogger; - - // Just setting ozoneManagerDoubleBuffer which does nothing. - protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper = - ((response, transactionIndex) -> { - return null; - }); - - - @Before - public void setup() throws Exception { - - ozoneManager = Mockito.mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - auditLogger = Mockito.mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - } - - @After - public void stop() { - omMetrics.unRegister(); - Mockito.framework().clearInlineMocks(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java deleted file mode 100644 index 552aa1581f7..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request.bucket; - -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .StorageTypeProto; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.util.Time; - -/** - * Tests OMBucketCreateRequest class, which handles CreateBucket request. - */ -public class TestOMBucketCreateRequest extends TestBucketRequest { - - @Test - public void testPreExecute() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - doPreExecute(volumeName, bucketName); - } - - - @Test - public void testValidateAndUpdateCache() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - OMBucketCreateRequest omBucketCreateRequest = doPreExecute(volumeName, - bucketName); - - doValidateAndUpdateCache(volumeName, bucketName, - omBucketCreateRequest.getOmRequest()); - - } - - @Test - public void testValidateAndUpdateCacheWithNoVolume() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - OMRequest originalRequest = TestOMRequestUtils.createBucketRequest( - bucketName, volumeName, false, StorageTypeProto.SSD); - - OMBucketCreateRequest omBucketCreateRequest = - new OMBucketCreateRequest(originalRequest); - - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); - - // As we have not still called validateAndUpdateCache, get() should - // return null. - - Assert.assertNull(omMetadataManager.getBucketTable().get(bucketKey)); - - OMClientResponse omClientResponse = - omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OMResponse omResponse = omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getCreateBucketResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, - omResponse.getStatus()); - - // As request is invalid bucket table should not have entry. - Assert.assertNull(omMetadataManager.getBucketTable().get(bucketKey)); - } - - - @Test - public void testValidateAndUpdateCacheWithBucketAlreadyExists() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - OMBucketCreateRequest omBucketCreateRequest = - doPreExecute(volumeName, bucketName); - - doValidateAndUpdateCache(volumeName, bucketName, - omBucketCreateRequest.getOmRequest()); - - // Try create same bucket again - OMClientResponse omClientResponse = - omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - OMResponse omResponse = omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getCreateBucketResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_ALREADY_EXISTS, - omResponse.getStatus()); - } - - - private OMBucketCreateRequest doPreExecute(String volumeName, - String bucketName) throws Exception { - addCreateVolumeToTable(volumeName, omMetadataManager); - OMRequest originalRequest = - TestOMRequestUtils.createBucketRequest(bucketName, volumeName, false, - StorageTypeProto.SSD); - - OMBucketCreateRequest omBucketCreateRequest = - new OMBucketCreateRequest(originalRequest); - - OMRequest modifiedRequest = omBucketCreateRequest.preExecute(ozoneManager); - verifyRequest(modifiedRequest, originalRequest); - return new OMBucketCreateRequest(modifiedRequest); - } - - private void doValidateAndUpdateCache(String volumeName, String bucketName, - OMRequest modifiedRequest) throws Exception { - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); - - // As we have not still called validateAndUpdateCache, get() should - // return null. - - Assert.assertNull(omMetadataManager.getBucketTable().get(bucketKey)); - OMBucketCreateRequest omBucketCreateRequest = - new OMBucketCreateRequest(modifiedRequest); - - - OMClientResponse omClientResponse = - omBucketCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // As now after validateAndUpdateCache it should add entry to cache, get - // should return non null value. - OmBucketInfo omBucketInfo = - omMetadataManager.getBucketTable().get(bucketKey); - Assert.assertNotNull(omMetadataManager.getBucketTable().get(bucketKey)); - - // verify table data with actual request data. - Assert.assertEquals(OmBucketInfo.getFromProtobuf( - modifiedRequest.getCreateBucketRequest().getBucketInfo()), - omBucketInfo); - - // verify OMResponse. - verifySuccessCreateBucketResponse(omClientResponse.getOMResponse()); - - } - - - private void verifyRequest(OMRequest modifiedOmRequest, - OMRequest originalRequest) { - OzoneManagerProtocolProtos.BucketInfo original = - originalRequest.getCreateBucketRequest().getBucketInfo(); - OzoneManagerProtocolProtos.BucketInfo updated = - modifiedOmRequest.getCreateBucketRequest().getBucketInfo(); - - Assert.assertEquals(original.getBucketName(), updated.getBucketName()); - Assert.assertEquals(original.getVolumeName(), updated.getVolumeName()); - Assert.assertEquals(original.getIsVersionEnabled(), - updated.getIsVersionEnabled()); - Assert.assertEquals(original.getStorageType(), updated.getStorageType()); - Assert.assertEquals(original.getMetadataList(), updated.getMetadataList()); - Assert.assertNotEquals(original.getCreationTime(), - updated.getCreationTime()); - } - - public static void verifySuccessCreateBucketResponse(OMResponse omResponse) { - Assert.assertNotNull(omResponse.getCreateBucketResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Type.CreateBucket, - omResponse.getCmdType()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omResponse.getStatus()); - } - - public static void addCreateVolumeToTable(String volumeName, - OMMetadataManager omMetadataManager) throws Exception { - OmVolumeArgs omVolumeArgs = - OmVolumeArgs.newBuilder().setCreationTime(Time.now()) - .setVolume(volumeName).setAdminName(UUID.randomUUID().toString()) - .setOwnerName(UUID.randomUUID().toString()).build(); - TestOMRequestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java deleted file mode 100644 index 6b4bf7a9fa2..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request.bucket; - -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Test; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteBucketRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; - -/** - * Tests OMBucketDeleteRequest class which handles DeleteBucket request. - */ -public class TestOMBucketDeleteRequest extends TestBucketRequest { - - @Test - public void testPreExecute() throws Exception { - OMRequest omRequest = - createDeleteBucketRequest(UUID.randomUUID().toString(), - UUID.randomUUID().toString()); - - OMBucketDeleteRequest omBucketDeleteRequest = - new OMBucketDeleteRequest(omRequest); - - // As user info gets added. - Assert.assertNotEquals(omRequest, - omBucketDeleteRequest.preExecute(ozoneManager)); - } - - - @Test - public void testValidateAndUpdateCache() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - OMRequest omRequest = - createDeleteBucketRequest(volumeName, bucketName); - - OMBucketDeleteRequest omBucketDeleteRequest = - new OMBucketDeleteRequest(omRequest); - - // Create Volume and bucket entries in DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - Assert.assertNull(omMetadataManager.getBucketTable().get( - omMetadataManager.getBucketKey(volumeName, bucketName))); - } - - - @Test - public void testValidateAndUpdateCacheFailure() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - OMRequest omRequest = - createDeleteBucketRequest(volumeName, bucketName); - - OMBucketDeleteRequest omBucketDeleteRequest = - new OMBucketDeleteRequest(omRequest); - - - OMClientResponse omClientResponse = - omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - Assert.assertNull(omMetadataManager.getBucketTable().get( - omMetadataManager.getBucketKey(volumeName, bucketName))); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, - omClientResponse.getOMResponse().getStatus()); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - } - - - - - private OMRequest createDeleteBucketRequest(String volumeName, - String bucketName) { - return OMRequest.newBuilder().setDeleteBucketRequest( - DeleteBucketRequest.newBuilder() - .setBucketName(bucketName).setVolumeName(volumeName)) - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket) - .setClientId(UUID.randomUUID().toString()).build(); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java deleted file mode 100644 index 7df06672904..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request.bucket; - -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos. - BucketArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetBucketPropertyRequest; - -/** - * Tests OMBucketSetPropertyRequest class which handles OMSetBucketProperty - * request. - */ -public class TestOMBucketSetPropertyRequest extends TestBucketRequest { - - @Test - public void testPreExecute() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - - OMRequest omRequest = createSetBucketPropertyRequest(volumeName, - bucketName, true); - - OMBucketSetPropertyRequest omBucketSetPropertyRequest = - new OMBucketSetPropertyRequest(omRequest); - - // As user info gets added. - Assert.assertNotEquals(omRequest, - omBucketSetPropertyRequest.preExecute(ozoneManager)); - } - - @Test - public void testValidateAndUpdateCache() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - - OMRequest omRequest = createSetBucketPropertyRequest(volumeName, - bucketName, true); - - // Create with default BucketInfo values - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - OMBucketSetPropertyRequest omBucketSetPropertyRequest = - new OMBucketSetPropertyRequest(omRequest); - - OMClientResponse omClientResponse = - omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(true, - omMetadataManager.getBucketTable().get( - omMetadataManager.getBucketKey(volumeName, bucketName)) - .getIsVersionEnabled()); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - - } - - @Test - public void testValidateAndUpdateCacheFails() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - - OMRequest omRequest = createSetBucketPropertyRequest(volumeName, - bucketName, true); - - - OMBucketSetPropertyRequest omBucketSetPropertyRequest = - new OMBucketSetPropertyRequest(omRequest); - - OMClientResponse omClientResponse = - omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, - omClientResponse.getOMResponse().getStatus()); - - Assert.assertNull(omMetadataManager.getBucketTable().get( - omMetadataManager.getBucketKey(volumeName, bucketName))); - - } - - private OMRequest createSetBucketPropertyRequest(String volumeName, - String bucketName, boolean isVersionEnabled) { - return OMRequest.newBuilder().setSetBucketPropertyRequest( - SetBucketPropertyRequest.newBuilder().setBucketArgs( - BucketArgs.newBuilder().setBucketName(bucketName) - .setVolumeName(volumeName) - .setIsVersionEnabled(isVersionEnabled).build())) - .setCmdType(OzoneManagerProtocolProtos.Type.SetBucketProperty) - .setClientId(UUID.randomUUID().toString()).build(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java deleted file mode 100644 index b89c65198d7..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains test classes for bucket requests. - */ -package org.apache.hadoop.ozone.om.request.bucket; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java deleted file mode 100644 index 4e93b13ad49..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ /dev/null @@ -1,337 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.file; - -import java.util.UUID; - -import org.apache.commons.lang.RandomStringUtils; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateDirectoryRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; - -/** - * Test OM directory create request. - */ -public class TestOMDirectoryCreateRequest { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OzoneManager ozoneManager; - private OMMetrics omMetrics; - private OMMetadataManager omMetadataManager; - private AuditLogger auditLogger; - // Just setting ozoneManagerDoubleBuffer which does nothing. - private OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper = - ((response, transactionIndex) -> { - return null; - }); - - @Before - public void setup() throws Exception { - ozoneManager = Mockito.mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - auditLogger = Mockito.mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - } - - @After - public void stop() { - omMetrics.unRegister(); - Mockito.framework().clearInlineMocks(); - } - - @Test - public void testPreExecute() throws Exception { - - String volumeName = "vol1"; - String bucketName = "bucket1"; - String keyName = "a/b/c"; - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, - keyName); - OMDirectoryCreateRequest omDirectoryCreateRequest = - new OMDirectoryCreateRequest(omRequest); - - OMRequest modifiedOmRequest = - omDirectoryCreateRequest.preExecute(ozoneManager); - - // As in preExecute, we modify original request. - Assert.assertNotEquals(omRequest, modifiedOmRequest); - - } - - - @Test - public void testValidateAndUpdateCache() throws Exception { - String volumeName = "vol1"; - String bucketName = "bucket1"; - String keyName = RandomStringUtils.randomAlphabetic(5); - for (int i =0; i< 3; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } - - // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, - keyName); - OMDirectoryCreateRequest omDirectoryCreateRequest = - new OMDirectoryCreateRequest(omRequest); - - OMRequest modifiedOmRequest = - omDirectoryCreateRequest.preExecute(ozoneManager); - - omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest); - - OMClientResponse omClientResponse = - omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertTrue(omClientResponse.getOMResponse().getStatus() - == OzoneManagerProtocolProtos.Status.OK); - Assert.assertTrue(omMetadataManager.getKeyTable().get( - omMetadataManager.getOzoneDirKey( - volumeName, bucketName, keyName)) != null); - - } - - - @Test - public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { - String volumeName = "vol1"; - String bucketName = "bucket1"; - String keyName = RandomStringUtils.randomAlphabetic(5); - for (int i =0; i< 3; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } - - OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, - keyName); - OMDirectoryCreateRequest omDirectoryCreateRequest = - new OMDirectoryCreateRequest(omRequest); - - OMRequest modifiedOmRequest = - omDirectoryCreateRequest.preExecute(ozoneManager); - - omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest); - - OMClientResponse omClientResponse = - omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertTrue(omClientResponse.getOMResponse().getStatus() - == OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND); - - // Key should not exist in DB - Assert.assertTrue(omMetadataManager.getKeyTable().get( - omMetadataManager.getOzoneDirKey( - volumeName, bucketName, keyName)) == null); - - } - - @Test - public void testValidateAndUpdateCacheWithSubDirectoryInPath() - throws Exception { - String volumeName = "vol1"; - String bucketName = "bucket1"; - String keyName = RandomStringUtils.randomAlphabetic(5); - for (int i =0; i< 3; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } - - // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(0, 12), 1L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); - OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, - keyName); - OMDirectoryCreateRequest omDirectoryCreateRequest = - new OMDirectoryCreateRequest(omRequest); - - OMRequest modifiedOmRequest = - omDirectoryCreateRequest.preExecute(ozoneManager); - - omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest); - - OMClientResponse omClientResponse = - omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertTrue(omClientResponse.getOMResponse().getStatus() - == OzoneManagerProtocolProtos.Status.OK); - - // Key should exist in DB and cache. - Assert.assertTrue(omMetadataManager.getKeyTable().get( - omMetadataManager.getOzoneDirKey( - volumeName, bucketName, keyName)) != null); - Assert.assertTrue(omMetadataManager.getKeyTable().getCacheValue( - new CacheKey<>(omMetadataManager.getOzoneDirKey( - volumeName, bucketName, keyName))) != null); - - } - - @Test - public void testValidateAndUpdateCacheWithDirectoryAlreadyExists() - throws Exception { - String volumeName = "vol1"; - String bucketName = "bucket1"; - String keyName = RandomStringUtils.randomAlphabetic(5); - for (int i =0; i< 3; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } - - // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, - OzoneFSUtils.addTrailingSlashIfNeeded(keyName), 1L, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - omMetadataManager); - OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, - keyName); - OMDirectoryCreateRequest omDirectoryCreateRequest = - new OMDirectoryCreateRequest(omRequest); - - OMRequest modifiedOmRequest = - omDirectoryCreateRequest.preExecute(ozoneManager); - - omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest); - - OMClientResponse omClientResponse = - omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertTrue(omClientResponse.getOMResponse().getStatus() - == OzoneManagerProtocolProtos.Status.OK); - - // Key should exist in DB - Assert.assertTrue(omMetadataManager.getKeyTable().get( - omMetadataManager.getOzoneDirKey( - volumeName, bucketName, keyName)) != null); - - // As it already exists, it should not be in cache. - Assert.assertTrue(omMetadataManager.getKeyTable().getCacheValue( - new CacheKey<>(omMetadataManager.getOzoneDirKey( - volumeName, bucketName, keyName))) == null); - - } - - @Test - public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { - String volumeName = "vol1"; - String bucketName = "bucket1"; - String keyName = RandomStringUtils.randomAlphabetic(5); - for (int i =0; i< 3; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } - - // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - // Add a key with first two levels. - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(0, 11), 1L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); - OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, - keyName); - OMDirectoryCreateRequest omDirectoryCreateRequest = - new OMDirectoryCreateRequest(omRequest); - - OMRequest modifiedOmRequest = - omDirectoryCreateRequest.preExecute(ozoneManager); - - omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest); - - OMClientResponse omClientResponse = - omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertTrue(omClientResponse.getOMResponse().getStatus() - == OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS); - - // Key should not exist in DB - Assert.assertTrue(omMetadataManager.getKeyTable().get( - omMetadataManager.getOzoneDirKey( - volumeName, bucketName, keyName)) == null); - - } - - /** - * Create OMRequest which encapsulates CreateDirectory request. - * @param volumeName - * @param bucketName - * @param keyName - * @return OMRequest - */ - private OMRequest createDirectoryRequest(String volumeName, String bucketName, - String keyName) { - return OMRequest.newBuilder().setCreateDirectoryRequest( - CreateDirectoryRequest.newBuilder().setKeyArgs( - KeyArgs.newBuilder().setVolumeName(volumeName) - .setBucketName(bucketName).setKeyName(keyName))) - .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory) - .setClientId(UUID.randomUUID().toString()).build(); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java deleted file mode 100644 index 9639af03e59..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ /dev/null @@ -1,374 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.file; - -import java.util.List; -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateFileRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; - -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_A_FILE; - -/** - * Tests OMFileCreateRequest. - */ -public class TestOMFileCreateRequest extends TestOMKeyRequest { - - - @Test - public void testPreExecute() throws Exception{ - OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName, - HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, - false, false); - - OMFileCreateRequest omFileCreateRequest = - new OMFileCreateRequest(omRequest); - - OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); - Assert.assertNotEquals(omRequest, modifiedOmRequest); - - - // Check clientID and modification time is set or not. - Assert.assertTrue(modifiedOmRequest.hasCreateFileRequest()); - Assert.assertTrue( - modifiedOmRequest.getCreateFileRequest().getClientID() > 0); - - KeyArgs keyArgs = modifiedOmRequest.getCreateFileRequest().getKeyArgs(); - Assert.assertNotNull(keyArgs); - Assert.assertTrue(keyArgs.getModificationTime() > 0); - - // As our data size is 100, and scmBlockSize is default to 1000, so we - // shall have only one block. - List< OzoneManagerProtocolProtos.KeyLocation> keyLocations = - keyArgs.getKeyLocationsList(); - - // KeyLocation should be set. - Assert.assertTrue(keyLocations.size() == 1); - Assert.assertEquals(containerID, - keyLocations.get(0).getBlockID().getContainerBlockID() - .getContainerID()); - Assert.assertEquals(localID, - keyLocations.get(0).getBlockID().getContainerBlockID() - .getLocalID()); - Assert.assertTrue(keyLocations.get(0).hasPipeline()); - - Assert.assertEquals(0, keyLocations.get(0).getOffset()); - - Assert.assertEquals(scmBlockSize, keyLocations.get(0).getLength()); - } - - @Test - public void testPreExecuteWithBlankKey() throws Exception{ - OMRequest omRequest = createFileRequest(volumeName, bucketName, "", - HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, - false, false); - - OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest( - omRequest); - - OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); - Assert.assertNotEquals(omRequest, modifiedOmRequest); - - - // When KeyName is root, nothing will be set. - Assert.assertTrue(modifiedOmRequest.hasCreateFileRequest()); - Assert.assertFalse( - modifiedOmRequest.getCreateFileRequest().getClientID() > 0); - - KeyArgs keyArgs = modifiedOmRequest.getCreateFileRequest().getKeyArgs(); - Assert.assertNotNull(keyArgs); - Assert.assertTrue(keyArgs.getModificationTime() == 0); - Assert.assertTrue(keyArgs.getKeyLocationsList().size() == 0); - } - - @Test - public void testValidateAndUpdateCache() throws Exception { - OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName, - HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, - false, true); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest( - omRequest); - - OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); - - - long id = modifiedOmRequest.getCreateFileRequest().getClientID(); - - String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, id); - - // Before calling - OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - Assert.assertNull(omKeyInfo); - - omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest); - - OMClientResponse omFileCreateResponse = - omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omFileCreateResponse.getOMResponse().getStatus()); - - // Check open table whether key is added or not. - - omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - Assert.assertNotNull(omKeyInfo); - - List< OmKeyLocationInfo > omKeyLocationInfoList = - omKeyInfo.getLatestVersionLocations().getLocationList(); - Assert.assertTrue(omKeyLocationInfoList.size() == 1); - - OmKeyLocationInfo omKeyLocationInfo = omKeyLocationInfoList.get(0); - - // Check modification time - Assert.assertEquals(modifiedOmRequest.getCreateFileRequest() - .getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime()); - - Assert.assertEquals(omKeyInfo.getModificationTime(), - omKeyInfo.getCreationTime()); - - - // Check data of the block - OzoneManagerProtocolProtos.KeyLocation keyLocation = - modifiedOmRequest.getCreateFileRequest().getKeyArgs() - .getKeyLocations(0); - - Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID() - .getContainerID(), omKeyLocationInfo.getContainerID()); - Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID() - .getLocalID(), omKeyLocationInfo.getLocalID()); - - } - - - @Test - public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { - OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName, - HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, - false, true); - - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); - OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest( - omRequest); - - OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); - - omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest); - - - OMClientResponse omFileCreateResponse = - omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - Assert.assertEquals(BUCKET_NOT_FOUND, - omFileCreateResponse.getOMResponse().getStatus()); - - } - - @Test - public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { - testNonRecursivePath(UUID.randomUUID().toString(), false, false, false); - testNonRecursivePath("a/b", false, false, true); - - // Create some child keys for the path - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); - testNonRecursivePath("a/b/c", false, false, false); - - // Delete child key and add a path "a/b/ to key table - omMetadataManager.getKeyTable().delete(omMetadataManager.getOzoneKey( - volumeName, bucketName, "a/b/c/d")); - - - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); - testNonRecursivePath("a/b/e", false, false, false); - - } - - @Test - public void testValidateAndUpdateCacheWithRecursive() throws Exception { - // Should be able to create file even if parent directories does not - // exist and key already exist, as this is with overwrite enabled. - testNonRecursivePath(UUID.randomUUID().toString(), false, false, false); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/e/f", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); - testNonRecursivePath("c/d/e/f", true, true, false); - // Create some child keys for the path - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); - testNonRecursivePath("a/b/c", false, true, false); - - - } - - @Test - public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() - throws Exception { - - String key = "c/d/e/f"; - // Should be able to create file even if parent directories does not exist - testNonRecursivePath(key, false, true, false); - - // Add the key to key table - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, - key, 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); - - // Even if key exists, should be able to create file as overwrite is set - // to true - testNonRecursivePath(key, true, true, false); - testNonRecursivePath(key, false, true, true); - } - - @Test - public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() - throws Exception { - - String key = "c/d/e/f"; - // Need to add the path which starts with "c/d/e" to keyTable as this is - // non-recursive parent should exist. - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/e/h", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); - testNonRecursivePath(key, false, false, false); - - // Add the key to key table - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, - key, 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); - - // Even if key exists, should be able to create file as overwrite is set - // to true - testNonRecursivePath(key, true, false, false); - testNonRecursivePath(key, false, false, true); - } - - - private void testNonRecursivePath(String key, - boolean overWrite, boolean recursive, boolean fail) throws Exception { - OMRequest omRequest = createFileRequest(volumeName, bucketName, key, - HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, - overWrite, recursive); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest( - omRequest); - - OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); - - omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest); - - OMClientResponse omFileCreateResponse = - omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - if (fail) { - Assert.assertTrue(omFileCreateResponse.getOMResponse() - .getStatus() == NOT_A_FILE || omFileCreateResponse.getOMResponse() - .getStatus() == FILE_ALREADY_EXISTS); - } else { - long id = modifiedOmRequest.getCreateFileRequest().getClientID(); - - String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, - key, id); - OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - Assert.assertNotNull(omKeyInfo); - - List< OmKeyLocationInfo > omKeyLocationInfoList = - omKeyInfo.getLatestVersionLocations().getLocationList(); - Assert.assertTrue(omKeyLocationInfoList.size() == 1); - - OmKeyLocationInfo omKeyLocationInfo = omKeyLocationInfoList.get(0); - - // Check modification time - Assert.assertEquals(modifiedOmRequest.getCreateFileRequest() - .getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime()); - - - // Check data of the block - OzoneManagerProtocolProtos.KeyLocation keyLocation = - modifiedOmRequest.getCreateFileRequest().getKeyArgs() - .getKeyLocations(0); - - Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID() - .getContainerID(), omKeyLocationInfo.getContainerID()); - Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID() - .getLocalID(), omKeyLocationInfo.getLocalID()); - } - } - - - /** - * Create OMRequest which encapsulates OMFileCreateRequest. - * @param volumeName - * @param bucketName - * @param keyName - * @param replicationFactor - * @param replicationType - * @return OMRequest - */ - private OMRequest createFileRequest( - String volumeName, String bucketName, String keyName, - HddsProtos.ReplicationFactor replicationFactor, - HddsProtos.ReplicationType replicationType, boolean overWrite, - boolean recursive) { - - KeyArgs.Builder keyArgs = KeyArgs.newBuilder() - .setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName).setFactor(replicationFactor) - .setType(replicationType).setDataSize(dataSize); - - CreateFileRequest createFileRequest = CreateFileRequest.newBuilder() - .setKeyArgs(keyArgs) - .setIsOverwrite(overWrite) - .setIsRecursive(recursive).build(); - - return OMRequest.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey) - .setClientId(UUID.randomUUID().toString()) - .setCreateFileRequest(createFileRequest).build(); - - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/package-info.java deleted file mode 100644 index ab81a7ed421..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains test classes for file requests. - */ -package org.apache.hadoop.ozone.om.request.file; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java deleted file mode 100644 index be3e4a76ca7..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java +++ /dev/null @@ -1,245 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package org.apache.hadoop.ozone.om.request.key; - - -import java.util.List; -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .AllocateBlockRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; - -/** - * Tests OMAllocateBlockRequest class. - */ -public class TestOMAllocateBlockRequest extends TestOMKeyRequest { - - @Test - public void testPreExecute() throws Exception { - - doPreExecute(createAllocateBlockRequest()); - - } - - @Test - public void testValidateAndUpdateCache() throws Exception { - - OMRequest modifiedOmRequest = - doPreExecute(createAllocateBlockRequest()); - - OMAllocateBlockRequest omAllocateBlockRequest = - new OMAllocateBlockRequest(modifiedOmRequest); - - - // Add volume, bucket, key entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); - - // Check before calling validateAndUpdateCache. As adding DB entry has - // not added any blocks, so size should be zero. - - OmKeyInfo omKeyInfo = - omMetadataManager.getOpenKeyTable().get(omMetadataManager.getOpenKey( - volumeName, bucketName, keyName, clientID)); - - List omKeyLocationInfo = - omKeyInfo.getLatestVersionLocations().getLocationList(); - - Assert.assertTrue(omKeyLocationInfo.size() == 0); - - OMClientResponse omAllocateBlockResponse = - omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omAllocateBlockResponse.getOMResponse().getStatus()); - - // Check open table whether new block is added or not. - - omKeyInfo = - omMetadataManager.getOpenKeyTable().get(omMetadataManager.getOpenKey( - volumeName, bucketName, keyName, clientID)); - - - // Check modification time - Assert.assertEquals(modifiedOmRequest.getAllocateBlockRequest() - .getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime()); - Assert.assertNotEquals(omKeyInfo.getCreationTime(), - omKeyInfo.getModificationTime()); - - // Check data of the block - OzoneManagerProtocolProtos.KeyLocation keyLocation = - modifiedOmRequest.getAllocateBlockRequest().getKeyLocation(); - - omKeyLocationInfo = - omKeyInfo.getLatestVersionLocations().getLocationList(); - - Assert.assertTrue(omKeyLocationInfo.size() == 1); - - Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID() - .getContainerID(), omKeyLocationInfo.get(0).getContainerID()); - - Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID() - .getLocalID(), omKeyLocationInfo.get(0).getLocalID()); - - } - - @Test - public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception { - - OMRequest modifiedOmRequest = - doPreExecute(createAllocateBlockRequest()); - - OMAllocateBlockRequest omAllocateBlockRequest = - new OMAllocateBlockRequest(modifiedOmRequest); - - - OMClientResponse omAllocateBlockResponse = - omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertTrue(omAllocateBlockResponse.getOMResponse().getStatus() - == OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND); - - } - - @Test - public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { - - OMRequest modifiedOmRequest = - doPreExecute(createAllocateBlockRequest()); - - OMAllocateBlockRequest omAllocateBlockRequest = - new OMAllocateBlockRequest(modifiedOmRequest); - - - // Added only volume to DB. - TestOMRequestUtils.addVolumeToDB(volumeName, "ozone", omMetadataManager); - - OMClientResponse omAllocateBlockResponse = - omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertTrue(omAllocateBlockResponse.getOMResponse().getStatus() - == OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND); - - } - - @Test - public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { - - OMRequest modifiedOmRequest = - doPreExecute(createAllocateBlockRequest()); - - OMAllocateBlockRequest omAllocateBlockRequest = - new OMAllocateBlockRequest(modifiedOmRequest); - - // Add volume, bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - - OMClientResponse omAllocateBlockResponse = - omAllocateBlockRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertTrue(omAllocateBlockResponse.getOMResponse().getStatus() - == OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND); - - } - - /** - * This method calls preExecute and verify the modified request. - * @param originalOMRequest - * @return OMRequest - modified request returned from preExecute. - * @throws Exception - */ - private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception { - - OMAllocateBlockRequest omAllocateBlockRequest = - new OMAllocateBlockRequest(originalOMRequest); - - OMRequest modifiedOmRequest = - omAllocateBlockRequest.preExecute(ozoneManager); - - - Assert.assertEquals(originalOMRequest.getCmdType(), - modifiedOmRequest.getCmdType()); - Assert.assertEquals(originalOMRequest.getClientId(), - modifiedOmRequest.getClientId()); - - Assert.assertTrue(modifiedOmRequest.hasAllocateBlockRequest()); - AllocateBlockRequest allocateBlockRequest = - modifiedOmRequest.getAllocateBlockRequest(); - // Time should be set - Assert.assertTrue(allocateBlockRequest.getKeyArgs() - .getModificationTime() > 0); - - // KeyLocation should be set. - Assert.assertTrue(allocateBlockRequest.hasKeyLocation()); - Assert.assertEquals(containerID, - allocateBlockRequest.getKeyLocation().getBlockID() - .getContainerBlockID().getContainerID()); - Assert.assertEquals(localID, - allocateBlockRequest.getKeyLocation().getBlockID() - .getContainerBlockID().getLocalID()); - Assert.assertTrue(allocateBlockRequest.getKeyLocation().hasPipeline()); - - Assert.assertEquals(allocateBlockRequest.getClientID(), - allocateBlockRequest.getClientID()); - - return modifiedOmRequest; - } - - - private OMRequest createAllocateBlockRequest() { - - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName) - .setFactor(replicationFactor).setType(replicationType) - .build(); - - AllocateBlockRequest allocateBlockRequest = - AllocateBlockRequest.newBuilder().setClientID(clientID) - .setKeyArgs(keyArgs).build(); - - return OMRequest.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.AllocateBlock) - .setClientId(UUID.randomUUID().toString()) - .setAllocateBlockRequest(allocateBlockRequest).build(); - - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java deleted file mode 100644 index 9bfac6c4f3c..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ /dev/null @@ -1,300 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package org.apache.hadoop.ozone.om.request.key; - -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.stream.Collectors; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CommitKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyLocation; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; - - - -/** - * Class tests OMKeyCommitRequest class. - */ -public class TestOMKeyCommitRequest extends TestOMKeyRequest { - - @Test - public void testPreExecute() throws Exception { - doPreExecute(createCommitKeyRequest()); - } - - @Test - public void testValidateAndUpdateCache() throws Exception { - - OMRequest modifiedOmRequest = - doPreExecute(createCommitKeyRequest()); - - OMKeyCommitRequest omKeyCommitRequest = - new OMKeyCommitRequest(modifiedOmRequest); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); - - - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - // Key should not be there in key table, as validateAndUpdateCache is - // still not called. - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - - Assert.assertNull(omKeyInfo); - - OMClientResponse omClientResponse = - omKeyCommitRequest.validateAndUpdateCache(ozoneManager, - 100L, ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - - // Entry should be deleted from openKey Table. - omKeyInfo = omMetadataManager.getOpenKeyTable().get(ozoneKey); - Assert.assertNull(omKeyInfo); - - // Now entry should be created in key Table. - omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - - Assert.assertNotNull(omKeyInfo); - - // Check modification time - - CommitKeyRequest commitKeyRequest = modifiedOmRequest.getCommitKeyRequest(); - Assert.assertEquals(commitKeyRequest.getKeyArgs().getModificationTime(), - omKeyInfo.getModificationTime()); - - // Check block location. - List locationInfoListFromCommitKeyRequest = - commitKeyRequest.getKeyArgs() - .getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList()); - - Assert.assertEquals(locationInfoListFromCommitKeyRequest, - omKeyInfo.getLatestVersionLocations().getLocationList()); - - } - - - - @Test - public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception { - - OMRequest modifiedOmRequest = - doPreExecute(createCommitKeyRequest()); - - OMKeyCommitRequest omKeyCommitRequest = - new OMKeyCommitRequest(modifiedOmRequest); - - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - // Key should not be there in key table, as validateAndUpdateCache is - // still not called. - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - - Assert.assertNull(omKeyInfo); - - OMClientResponse omClientResponse = - omKeyCommitRequest.validateAndUpdateCache(ozoneManager, - 100L, ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, - omClientResponse.getOMResponse().getStatus()); - - omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - - Assert.assertNull(omKeyInfo); - } - - @Test - public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { - - OMRequest modifiedOmRequest = - doPreExecute(createCommitKeyRequest()); - - OMKeyCommitRequest omKeyCommitRequest = - new OMKeyCommitRequest(modifiedOmRequest); - - - TestOMRequestUtils.addVolumeToDB(volumeName, "ozone", omMetadataManager); - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - // Key should not be there in key table, as validateAndUpdateCache is - // still not called. - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - - Assert.assertNull(omKeyInfo); - - OMClientResponse omClientResponse = - omKeyCommitRequest.validateAndUpdateCache(ozoneManager, - 100L, ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, - omClientResponse.getOMResponse().getStatus()); - - omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - - Assert.assertNull(omKeyInfo); - } - - @Test - public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { - - OMRequest modifiedOmRequest = - doPreExecute(createCommitKeyRequest()); - - OMKeyCommitRequest omKeyCommitRequest = - new OMKeyCommitRequest(modifiedOmRequest); - - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - // Key should not be there in key table, as validateAndUpdateCache is - // still not called. - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - - Assert.assertNull(omKeyInfo); - - OMClientResponse omClientResponse = - omKeyCommitRequest.validateAndUpdateCache(ozoneManager, - 100L, ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND, - omClientResponse.getOMResponse().getStatus()); - - omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - - Assert.assertNull(omKeyInfo); - } - - /** - * This method calls preExecute and verify the modified request. - * @param originalOMRequest - * @return OMRequest - modified request returned from preExecute. - * @throws Exception - */ - private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception { - - OMKeyCommitRequest omKeyCommitRequest = - new OMKeyCommitRequest(originalOMRequest); - - OMRequest modifiedOmRequest = omKeyCommitRequest.preExecute(ozoneManager); - - Assert.assertTrue(modifiedOmRequest.hasCommitKeyRequest()); - KeyArgs originalKeyArgs = - originalOMRequest.getCommitKeyRequest().getKeyArgs(); - KeyArgs modifiedKeyArgs = - modifiedOmRequest.getCommitKeyRequest().getKeyArgs(); - verifyKeyArgs(originalKeyArgs, modifiedKeyArgs); - return modifiedOmRequest; - } - - /** - * Verify KeyArgs. - * @param originalKeyArgs - * @param modifiedKeyArgs - */ - private void verifyKeyArgs(KeyArgs originalKeyArgs, KeyArgs modifiedKeyArgs) { - - // Check modification time is set or not. - Assert.assertTrue(modifiedKeyArgs.getModificationTime() > 0); - Assert.assertTrue(originalKeyArgs.getModificationTime() == 0); - - Assert.assertEquals(originalKeyArgs.getVolumeName(), - modifiedKeyArgs.getVolumeName()); - Assert.assertEquals(originalKeyArgs.getBucketName(), - modifiedKeyArgs.getBucketName()); - Assert.assertEquals(originalKeyArgs.getKeyName(), - modifiedKeyArgs.getKeyName()); - Assert.assertEquals(originalKeyArgs.getDataSize(), - modifiedKeyArgs.getDataSize()); - Assert.assertEquals(originalKeyArgs.getKeyLocationsList(), - modifiedKeyArgs.getKeyLocationsList()); - Assert.assertEquals(originalKeyArgs.getType(), - modifiedKeyArgs.getType()); - Assert.assertEquals(originalKeyArgs.getFactor(), - modifiedKeyArgs.getFactor()); - } - - /** - * Create OMRequest which encapsulates CommitKeyRequest. - */ - private OMRequest createCommitKeyRequest() { - KeyArgs keyArgs = - KeyArgs.newBuilder().setDataSize(dataSize).setVolumeName(volumeName) - .setKeyName(keyName).setBucketName(bucketName) - .setType(replicationType).setFactor(replicationFactor) - .addAllKeyLocations(getKeyLocation()).build(); - - CommitKeyRequest commitKeyRequest = - CommitKeyRequest.newBuilder().setKeyArgs(keyArgs) - .setClientID(clientID).build(); - - return OMRequest.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CommitKey) - .setCommitKeyRequest(commitKeyRequest) - .setClientId(UUID.randomUUID().toString()).build(); - } - - /** - * Create KeyLocation list. - */ - private List getKeyLocation() { - List keyLocations = new ArrayList<>(); - - for (int i=0; i < 5; i++) { - KeyLocation keyLocation = - KeyLocation.newBuilder() - .setBlockID(HddsProtos.BlockID.newBuilder() - .setContainerBlockID(HddsProtos.ContainerBlockID.newBuilder() - .setContainerID(i+1000).setLocalID(i+100).build())) - .setOffset(0).setLength(200).build(); - keyLocations.add(keyLocation); - } - return keyLocations; - } - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java deleted file mode 100644 index 340cc048ce0..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ /dev/null @@ -1,329 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key; - -import java.util.List; -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; - -/** - * Tests OMCreateKeyRequest class. - */ -public class TestOMKeyCreateRequest extends TestOMKeyRequest { - - @Test - public void testPreExecuteWithNormalKey() throws Exception { - doPreExecute(createKeyRequest(false, 0)); - } - - @Test - public void testPreExecuteWithMultipartKey() throws Exception { - doPreExecute(createKeyRequest(true, 1)); - } - - - @Test - public void testValidateAndUpdateCache() throws Exception { - - OMRequest modifiedOmRequest = - doPreExecute(createKeyRequest(false, 0)); - - OMKeyCreateRequest omKeyCreateRequest = - new OMKeyCreateRequest(modifiedOmRequest); - - // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - long id = modifiedOmRequest.getCreateKeyRequest().getClientID(); - - String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, id); - - // Before calling - OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - - Assert.assertNull(omKeyInfo); - - OMClientResponse omKeyCreateResponse = - omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omKeyCreateResponse.getOMResponse().getStatus()); - - // Check open table whether key is added or not. - - omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - - Assert.assertNotNull(omKeyInfo); - - List omKeyLocationInfoList = - omKeyInfo.getLatestVersionLocations().getLocationList(); - Assert.assertTrue(omKeyLocationInfoList.size() == 1); - - OmKeyLocationInfo omKeyLocationInfo = omKeyLocationInfoList.get(0); - - // Check modification time - Assert.assertEquals(modifiedOmRequest.getCreateKeyRequest() - .getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime()); - - Assert.assertEquals(omKeyInfo.getModificationTime(), - omKeyInfo.getCreationTime()); - - - // Check data of the block - OzoneManagerProtocolProtos.KeyLocation keyLocation = - modifiedOmRequest.getCreateKeyRequest().getKeyArgs().getKeyLocations(0); - - Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID() - .getContainerID(), omKeyLocationInfo.getContainerID()); - Assert.assertEquals(keyLocation.getBlockID().getContainerBlockID() - .getLocalID(), omKeyLocationInfo.getLocalID()); - - } - - @Test - public void testValidateAndUpdateCacheWithNoSuchMultipartUploadError() - throws Exception { - - - int partNumber = 1; - OMRequest modifiedOmRequest = - doPreExecute(createKeyRequest(true, partNumber)); - - OMKeyCreateRequest omKeyCreateRequest = - new OMKeyCreateRequest(modifiedOmRequest); - - // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - long id = modifiedOmRequest.getCreateKeyRequest().getClientID(); - - String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, id); - - // Before calling - OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - - Assert.assertNull(omKeyInfo); - - OMClientResponse omKeyCreateResponse = - omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals( - OzoneManagerProtocolProtos.Status.NO_SUCH_MULTIPART_UPLOAD_ERROR, - omKeyCreateResponse.getOMResponse().getStatus()); - - // As we got error, no entry should be created in openKeyTable. - - omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - - Assert.assertNull(omKeyInfo); - } - - - - @Test - public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception { - - OMRequest modifiedOmRequest = - doPreExecute(createKeyRequest(false, 0)); - - OMKeyCreateRequest omKeyCreateRequest = - new OMKeyCreateRequest(modifiedOmRequest); - - - long id = modifiedOmRequest.getCreateKeyRequest().getClientID(); - - String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, id); - - - // Before calling - OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - - Assert.assertNull(omKeyInfo); - - OMClientResponse omKeyCreateResponse = - omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, - omKeyCreateResponse.getOMResponse().getStatus()); - - - // As We got an error, openKey Table should not have entry. - omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - - Assert.assertNull(omKeyInfo); - - } - - - @Test - public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { - - - OMRequest modifiedOmRequest = - doPreExecute(createKeyRequest( - false, 0)); - - OMKeyCreateRequest omKeyCreateRequest = - new OMKeyCreateRequest(modifiedOmRequest); - - - long id = modifiedOmRequest.getCreateKeyRequest().getClientID(); - - String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, id); - - TestOMRequestUtils.addVolumeToDB(volumeName, "ozone", omMetadataManager); - - // Before calling - OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - - Assert.assertNull(omKeyInfo); - - OMClientResponse omKeyCreateResponse = - omKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, - omKeyCreateResponse.getOMResponse().getStatus()); - - - // As We got an error, openKey Table should not have entry. - omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); - - Assert.assertNull(omKeyInfo); - - } - - - - /** - * This method calls preExecute and verify the modified request. - * @param originalOMRequest - * @return OMRequest - modified request returned from preExecute. - * @throws Exception - */ - private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception { - - OMKeyCreateRequest omKeyCreateRequest = - new OMKeyCreateRequest(originalOMRequest); - - OMRequest modifiedOmRequest = - omKeyCreateRequest.preExecute(ozoneManager); - - Assert.assertEquals(originalOMRequest.getCmdType(), - modifiedOmRequest.getCmdType()); - Assert.assertEquals(originalOMRequest.getClientId(), - modifiedOmRequest.getClientId()); - - Assert.assertTrue(modifiedOmRequest.hasCreateKeyRequest()); - - CreateKeyRequest createKeyRequest = - modifiedOmRequest.getCreateKeyRequest(); - - KeyArgs keyArgs = createKeyRequest.getKeyArgs(); - // Time should be set - Assert.assertTrue(keyArgs.getModificationTime() > 0); - - - // Client ID should be set. - Assert.assertTrue(createKeyRequest.hasClientID()); - Assert.assertTrue(createKeyRequest.getClientID() > 0); - - - if (!originalOMRequest.getCreateKeyRequest().getKeyArgs() - .getIsMultipartKey()) { - - // As our data size is 100, and scmBlockSize is default to 1000, so we - // shall have only one block. - List< OzoneManagerProtocolProtos.KeyLocation> keyLocations = - keyArgs.getKeyLocationsList(); - // KeyLocation should be set. - Assert.assertTrue(keyLocations.size() == 1); - Assert.assertEquals(containerID, - keyLocations.get(0).getBlockID().getContainerBlockID() - .getContainerID()); - Assert.assertEquals(localID, - keyLocations.get(0).getBlockID().getContainerBlockID() - .getLocalID()); - Assert.assertTrue(keyLocations.get(0).hasPipeline()); - - Assert.assertEquals(0, keyLocations.get(0).getOffset()); - - Assert.assertEquals(scmBlockSize, keyLocations.get(0).getLength()); - } else { - // We don't create blocks for multipart key in createKey preExecute. - Assert.assertTrue(keyArgs.getKeyLocationsList().size() == 0); - } - - return modifiedOmRequest; - - } - - /** - * Create OMRequest which encapsulates CreateKeyRequest. - * @param isMultipartKey - * @param partNumber - * @return OMRequest. - */ - - @SuppressWarnings("parameterNumber") - private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber) { - - KeyArgs.Builder keyArgs = KeyArgs.newBuilder() - .setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName).setIsMultipartKey(isMultipartKey) - .setFactor(replicationFactor).setType(replicationType); - - if (isMultipartKey) { - keyArgs.setDataSize(dataSize).setMultipartNumber(partNumber); - } - - OzoneManagerProtocolProtos.CreateKeyRequest createKeyRequest = - CreateKeyRequest.newBuilder().setKeyArgs(keyArgs).build(); - - return OMRequest.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey) - .setClientId(UUID.randomUUID().toString()) - .setCreateKeyRequest(createKeyRequest).build(); - - } - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java deleted file mode 100644 index e95ecd54396..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java +++ /dev/null @@ -1,166 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key; - -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteKeyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; - -/** - * Tests OmKeyDelete request. - */ -public class TestOMKeyDeleteRequest extends TestOMKeyRequest { - - @Test - public void testPreExecute() throws Exception { - doPreExecute(createDeleteKeyRequest()); - } - - @Test - public void testValidateAndUpdateCache() throws Exception { - OMRequest modifiedOmRequest = - doPreExecute(createDeleteKeyRequest()); - - OMKeyDeleteRequest omKeyDeleteRequest = - new OMKeyDeleteRequest(modifiedOmRequest); - - - // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); - - - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - - // As we added manually to key table. - Assert.assertNotNull(omKeyInfo); - - OMClientResponse omClientResponse = - omKeyDeleteRequest.validateAndUpdateCache(ozoneManager, - 100L, ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - // Now after calling validateAndUpdateCache, it should be deleted. - - omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey); - - Assert.assertNull(omKeyInfo); - - } - - - @Test - public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { - OMRequest modifiedOmRequest = - doPreExecute(createDeleteKeyRequest()); - - OMKeyDeleteRequest omKeyDeleteRequest = - new OMKeyDeleteRequest(modifiedOmRequest); - - // Add only volume and bucket entry to DB. - // In actual implementation we don't check for bucket/volume exists - // during delete key. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - - OMClientResponse omClientResponse = - omKeyDeleteRequest.validateAndUpdateCache(ozoneManager, - 100L, ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND, - omClientResponse.getOMResponse().getStatus()); - } - - @Test - public void testValidateAndUpdateCacheWithOutVolumeAndBucket() - throws Exception { - OMRequest modifiedOmRequest = - doPreExecute(createDeleteKeyRequest()); - - OMKeyDeleteRequest omKeyDeleteRequest = - new OMKeyDeleteRequest(modifiedOmRequest); - - // In actual implementation we don't check for bucket/volume exists - // during delete key. So it should still return error KEY_NOT_FOUND - - OMClientResponse omClientResponse = - omKeyDeleteRequest.validateAndUpdateCache(ozoneManager, - 100L, ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND, - omClientResponse.getOMResponse().getStatus()); - } - - - /** - * This method calls preExecute and verify the modified request. - * @param originalOmRequest - * @return OMRequest - modified request returned from preExecute. - * @throws Exception - */ - private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception { - - OMKeyDeleteRequest omKeyDeleteRequest = - new OMKeyDeleteRequest(originalOmRequest); - - OMRequest modifiedOmRequest = omKeyDeleteRequest.preExecute(ozoneManager); - - // Will not be equal, as UserInfo will be set. - Assert.assertNotEquals(originalOmRequest, modifiedOmRequest); - - return modifiedOmRequest; - } - - /** - * Create OMRequest which encapsulates DeleteKeyRequest. - * @return OMRequest - */ - private OMRequest createDeleteKeyRequest() { - KeyArgs keyArgs = KeyArgs.newBuilder().setBucketName(bucketName) - .setVolumeName(volumeName).setKeyName(keyName).build(); - - DeleteKeyRequest deleteKeyRequest = - DeleteKeyRequest.newBuilder().setKeyArgs(keyArgs).build(); - - return OMRequest.newBuilder().setDeleteKeyRequest(deleteKeyRequest) - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey) - .setClientId(UUID.randomUUID().toString()).build(); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java deleted file mode 100644 index df6b1772565..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ /dev/null @@ -1,147 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgeKeysRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgeKeysResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -/** - * Tests {@link OMKeyPurgeRequest} and {@link OMKeyPurgeResponse}. - */ -public class TestOMKeyPurgeRequestAndResponse extends TestOMKeyRequest { - - private int numKeys = 10; - - /** - * Creates volume, bucket and key entries and adds to OM DB and then - * deletes these keys to move them to deletedKeys table. - */ - private List createAndDeleteKeys() throws Exception { - // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - List ozoneKeyNames = new ArrayList<>(numKeys); - for (int i = 1; i <= numKeys; i++) { - String key = keyName + "-" + i; - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, key, - clientID, replicationType, replicationFactor, omMetadataManager); - ozoneKeyNames.add( - omMetadataManager.getOzoneKey(volumeName, bucketName, key)); - } - - List deletedKeyNames = new ArrayList<>(numKeys); - for (String ozoneKey : ozoneKeyNames) { - String deletedKeyName = TestOMRequestUtils.deleteKey( - ozoneKey, omMetadataManager); - deletedKeyNames.add(deletedKeyName); - } - - return deletedKeyNames; - } - - /** - * Create OMRequest which encapsulates DeleteKeyRequest. - * @return OMRequest - */ - private OMRequest createPurgeKeysRequest(List deletedKeys) { - PurgeKeysRequest purgeKeysRequest = PurgeKeysRequest.newBuilder() - .addAllKeys(deletedKeys) - .build(); - - return OMRequest.newBuilder() - .setPurgeKeysRequest(purgeKeysRequest) - .setCmdType(Type.PurgeKeys) - .setClientId(UUID.randomUUID().toString()) - .build(); - } - - @Test - public void testValidateAndUpdateCache() throws Exception { - // Create and Delete keys. The keys should be moved to DeletedKeys table - List deletedKeyNames = createAndDeleteKeys(); - - // The keys should be present in the DeletedKeys table before purging - for (String deletedKey : deletedKeyNames) { - Assert.assertTrue(omMetadataManager.getDeletedTable().isExist( - deletedKey)); - } - - // Create PurgeKeysRequest to purge the deleted keys - OMRequest omRequest = createPurgeKeysRequest(deletedKeyNames); - - OMRequest preExecutedRequest = preExecute(omRequest); - OMKeyPurgeRequest omKeyPurgeRequest = - new OMKeyPurgeRequest(preExecutedRequest); - - OMClientResponse omClientResponse = - omKeyPurgeRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - OMResponse omResponse = OMResponse.newBuilder() - .setPurgeKeysResponse(PurgeKeysResponse.getDefaultInstance()) - .setCmdType(Type.PurgeKeys) - .setStatus(Status.OK) - .build(); - - BatchOperation batchOperation = - omMetadataManager.getStore().initBatchOperation(); - - OMKeyPurgeResponse omKeyPurgeResponse = - new OMKeyPurgeResponse(deletedKeyNames, omResponse); - omKeyPurgeResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // The keys should not exist in the DeletedKeys table - for (String deletedKey : deletedKeyNames) { - Assert.assertFalse(omMetadataManager.getDeletedTable().isExist( - deletedKey)); - } - } - - private OMRequest preExecute(OMRequest originalOmRequest) throws IOException { - OMKeyPurgeRequest omKeyPurgeRequest = - new OMKeyPurgeRequest(originalOmRequest); - - OMRequest modifiedOmRequest = omKeyPurgeRequest.preExecute(ozoneManager); - - // Will not be equal, as UserInfo will be set. - Assert.assertNotEquals(originalOmRequest, modifiedOmRequest); - - return modifiedOmRequest; - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java deleted file mode 100644 index 864ba06111a..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java +++ /dev/null @@ -1,230 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key; - -import java.util.UUID; -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .RenameKeyRequest; - -/** - * Tests RenameKey request. - */ -public class TestOMKeyRenameRequest extends TestOMKeyRequest { - - @Test - public void testPreExecute() throws Exception { - doPreExecute(createRenameKeyRequest(UUID.randomUUID().toString())); - } - - @Test - public void testValidateAndUpdateCache() throws Exception { - String toKeyName = UUID.randomUUID().toString(); - OMRequest modifiedOmRequest = - doPreExecute(createRenameKeyRequest(toKeyName)); - - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); - - OMKeyRenameRequest omKeyRenameRequest = - new OMKeyRenameRequest(modifiedOmRequest); - - OMClientResponse omKeyRenameResponse = - omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omKeyRenameResponse.getOMResponse().getStatus()); - - String key = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); - // Original key should be deleted, toKey should exist. - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(key); - - Assert.assertNull(omKeyInfo); - - omKeyInfo = - omMetadataManager.getKeyTable().get(omMetadataManager.getOzoneKey( - volumeName, bucketName, toKeyName)); - - Assert.assertNotNull(omKeyInfo); - - // For new key modification time should be updated. - - KeyArgs keyArgs = modifiedOmRequest.getRenameKeyRequest().getKeyArgs(); - - Assert.assertEquals(keyArgs.getModificationTime(), - omKeyInfo.getModificationTime()); - - // KeyName should be updated in OmKeyInfo to toKeyName. - Assert.assertEquals(omKeyInfo.getKeyName(), toKeyName); - - } - - - @Test - public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { - String toKeyName = UUID.randomUUID().toString(); - OMRequest modifiedOmRequest = - doPreExecute(createRenameKeyRequest(toKeyName)); - - // Add only volume and bucket entry to DB. - - // In actual implementation we don't check for bucket/volume exists - // during delete key. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - OMKeyRenameRequest omKeyRenameRequest = - new OMKeyRenameRequest(modifiedOmRequest); - - OMClientResponse omKeyRenameResponse = - omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND, - omKeyRenameResponse.getOMResponse().getStatus()); - - } - - - @Test - public void testValidateAndUpdateCacheWithOutVolumeAndBucket() - throws Exception { - String toKeyName = UUID.randomUUID().toString(); - OMRequest modifiedOmRequest = - doPreExecute(createRenameKeyRequest(toKeyName)); - - // In actual implementation we don't check for bucket/volume exists - // during delete key. So it should still return error KEY_NOT_FOUND - - OMKeyRenameRequest omKeyRenameRequest = - new OMKeyRenameRequest(modifiedOmRequest); - - OMClientResponse omKeyRenameResponse = - omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND, - omKeyRenameResponse.getOMResponse().getStatus()); - - } - - @Test - public void testValidateAndUpdateCacheWithToKeyInvalid() throws Exception { - String toKeyName = ""; - OMRequest modifiedOmRequest = - doPreExecute(createRenameKeyRequest(toKeyName)); - - // Add only volume and bucket entry to DB. - - // In actual implementation we don't check for bucket/volume exists - // during delete key. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - OMKeyRenameRequest omKeyRenameRequest = - new OMKeyRenameRequest(modifiedOmRequest); - - OMClientResponse omKeyRenameResponse = - omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_KEY_NAME, - omKeyRenameResponse.getOMResponse().getStatus()); - - } - - @Test - public void testValidateAndUpdateCacheWithFromKeyInvalid() throws Exception { - String toKeyName = UUID.randomUUID().toString(); - keyName = ""; - OMRequest modifiedOmRequest = - doPreExecute(createRenameKeyRequest(toKeyName)); - - // Add only volume and bucket entry to DB. - - // In actual implementation we don't check for bucket/volume exists - // during delete key. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - OMKeyRenameRequest omKeyRenameRequest = - new OMKeyRenameRequest(modifiedOmRequest); - - OMClientResponse omKeyRenameResponse = - omKeyRenameRequest.validateAndUpdateCache(ozoneManager, 100L, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_KEY_NAME, - omKeyRenameResponse.getOMResponse().getStatus()); - - } - - - /** - * This method calls preExecute and verify the modified request. - * @param originalOmRequest - * @return OMRequest - modified request returned from preExecute. - * @throws Exception - */ - - private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception { - OMKeyRenameRequest omKeyRenameRequest = - new OMKeyRenameRequest(originalOmRequest); - - OMRequest modifiedOmRequest = omKeyRenameRequest.preExecute(ozoneManager); - - // Will not be equal, as UserInfo will be set and modification time is - // set in KeyArgs. - Assert.assertNotEquals(originalOmRequest, modifiedOmRequest); - - Assert.assertTrue(modifiedOmRequest.getRenameKeyRequest() - .getKeyArgs().getModificationTime() > 0); - - return modifiedOmRequest; - } - - /** - * Create OMRequest which encapsulates RenameKeyRequest. - * @return OMRequest - */ - private OMRequest createRenameKeyRequest(String toKeyName) { - KeyArgs keyArgs = KeyArgs.newBuilder().setKeyName(keyName) - .setVolumeName(volumeName).setBucketName(bucketName).build(); - - RenameKeyRequest renameKeyRequest = RenameKeyRequest.newBuilder() - .setKeyArgs(keyArgs).setToKeyName(toKeyName).build(); - - return OMRequest.newBuilder() - .setClientId(UUID.randomUUID().toString()) - .setRenameKeyRequest(renameKeyRequest) - .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey).build(); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java deleted file mode 100644 index 92d6cdb8098..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ /dev/null @@ -1,158 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.key; - -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ScmClient; -import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; -import org.apache.hadoop.util.Time; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.when; - -/** - * Base test class for key request. - */ -@SuppressWarnings("visibilitymodifier") -public class TestOMKeyRequest { - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - protected OzoneManager ozoneManager; - protected OMMetrics omMetrics; - protected OMMetadataManager omMetadataManager; - protected AuditLogger auditLogger; - - protected ScmClient scmClient; - protected OzoneBlockTokenSecretManager ozoneBlockTokenSecretManager; - protected ScmBlockLocationProtocol scmBlockLocationProtocol; - - protected final long containerID = 1000L; - protected final long localID = 100L; - - protected String volumeName; - protected String bucketName; - protected String keyName; - protected HddsProtos.ReplicationType replicationType; - protected HddsProtos.ReplicationFactor replicationFactor; - protected long clientID; - protected long scmBlockSize = 1000L; - protected long dataSize; - - // Just setting ozoneManagerDoubleBuffer which does nothing. - protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper = - ((response, transactionIndex) -> { - return null; - }); - - - @Before - public void setup() throws Exception { - ozoneManager = Mockito.mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - auditLogger = Mockito.mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - - scmClient = Mockito.mock(ScmClient.class); - ozoneBlockTokenSecretManager = - Mockito.mock(OzoneBlockTokenSecretManager.class); - scmBlockLocationProtocol = Mockito.mock(ScmBlockLocationProtocol.class); - when(ozoneManager.getScmClient()).thenReturn(scmClient); - when(ozoneManager.getBlockTokenSecretManager()) - .thenReturn(ozoneBlockTokenSecretManager); - when(ozoneManager.getScmBlockSize()).thenReturn(scmBlockSize); - when(ozoneManager.getPreallocateBlocksMax()).thenReturn(2); - when(ozoneManager.isGrpcBlockTokenEnabled()).thenReturn(false); - when(ozoneManager.getOMNodeId()).thenReturn(UUID.randomUUID().toString()); - when(scmClient.getBlockClient()).thenReturn(scmBlockLocationProtocol); - - Pipeline pipeline = Pipeline.newBuilder() - .setState(Pipeline.PipelineState.OPEN) - .setId(PipelineID.randomId()) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .setFactor(HddsProtos.ReplicationFactor.ONE) - .setNodes(new ArrayList<>()) - .build(); - - AllocatedBlock allocatedBlock = - new AllocatedBlock.Builder() - .setContainerBlockID(new ContainerBlockID(containerID, localID)) - .setPipeline(pipeline).build(); - - List allocatedBlocks = new ArrayList<>(); - - allocatedBlocks.add(allocatedBlock); - - when(scmBlockLocationProtocol.allocateBlock(anyLong(), anyInt(), - any(HddsProtos.ReplicationType.class), - any(HddsProtos.ReplicationFactor.class), - anyString(), any(ExcludeList.class))).thenReturn(allocatedBlocks); - - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); - keyName = UUID.randomUUID().toString(); - replicationFactor = HddsProtos.ReplicationFactor.ONE; - replicationType = HddsProtos.ReplicationType.RATIS; - clientID = Time.now(); - dataSize = 1000L; - - } - - @After - public void stop() { - omMetrics.unRegister(); - Mockito.framework().clearInlineMocks(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/package-info.java deleted file mode 100644 index 203467098b3..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains test classes for key requests. - */ -package org.apache.hadoop.ozone.om.request.key; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/package-info.java deleted file mode 100644 index 0bdab7d655a..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Tests for OM request. - */ -package org.apache.hadoop.ozone.om.request; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketCreateRequest.java deleted file mode 100644 index cd42ec64d36..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketCreateRequest.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request.s3.bucket; - -import java.util.UUID; - -import org.apache.commons.lang.RandomStringUtils; -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; - -import static org.junit.Assert.fail; - -/** - * Tests S3BucketCreateRequest class, which handles S3 CreateBucket request. - */ -public class TestS3BucketCreateRequest extends TestS3BucketRequest { - - @Test - public void testPreExecute() throws Exception { - String userName = UUID.randomUUID().toString(); - String s3BucketName = UUID.randomUUID().toString(); - doPreExecute(userName, s3BucketName); - } - - @Test - public void testPreExecuteInvalidBucketLength() throws Exception { - String userName = UUID.randomUUID().toString(); - - // set bucket name which is less than 3 characters length - String s3BucketName = RandomStringUtils.randomAlphabetic(2); - - try { - doPreExecute(userName, s3BucketName); - fail("testPreExecuteInvalidBucketLength failed"); - } catch (OMException ex) { - GenericTestUtils.assertExceptionContains("S3_BUCKET_INVALID_LENGTH", ex); - } - - // set bucket name which is greater than 63 characters length - s3BucketName = RandomStringUtils.randomAlphabetic(64); - - try { - doPreExecute(userName, s3BucketName); - fail("testPreExecuteInvalidBucketLength failed"); - } catch (OMException ex) { - GenericTestUtils.assertExceptionContains("S3_BUCKET_INVALID_LENGTH", ex); - } - } - - - @Test - public void testValidateAndUpdateCache() throws Exception { - String userName = UUID.randomUUID().toString(); - String s3BucketName = UUID.randomUUID().toString(); - - S3BucketCreateRequest s3BucketCreateRequest = doPreExecute(userName, - s3BucketName); - - doValidateAndUpdateCache(userName, s3BucketName, - s3BucketCreateRequest.getOmRequest()); - - } - - - @Test - public void testValidateAndUpdateCacheWithS3BucketAlreadyExists() - throws Exception { - String userName = UUID.randomUUID().toString(); - String s3BucketName = UUID.randomUUID().toString(); - - TestOMRequestUtils.addS3BucketToDB( - S3BucketCreateRequest.formatOzoneVolumeName(userName), s3BucketName, - omMetadataManager); - - S3BucketCreateRequest s3BucketCreateRequest = - doPreExecute(userName, s3BucketName); - - - // Try create same bucket again - OMClientResponse omClientResponse = - s3BucketCreateRequest.validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - OMResponse omResponse = omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getCreateBucketResponse()); - Assert.assertEquals( - OzoneManagerProtocolProtos.Status.S3_BUCKET_ALREADY_EXISTS, - omResponse.getStatus()); - } - - @Test - public void testValidateAndUpdateCacheWithBucketAlreadyExists() - throws Exception { - String userName = UUID.randomUUID().toString(); - String s3BucketName = UUID.randomUUID().toString(); - - S3BucketCreateRequest s3BucketCreateRequest = - doPreExecute(userName, s3BucketName); - - TestOMRequestUtils.addVolumeAndBucketToDB( - s3BucketCreateRequest.formatOzoneVolumeName(userName), - s3BucketName, omMetadataManager); - - - // Try create same bucket again - OMClientResponse omClientResponse = - s3BucketCreateRequest.validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - OMResponse omResponse = omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getCreateBucketResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_ALREADY_EXISTS, - omResponse.getStatus()); - } - - - - private S3BucketCreateRequest doPreExecute(String userName, - String s3BucketName) throws Exception { - OMRequest originalRequest = - TestOMRequestUtils.createS3BucketRequest(userName, s3BucketName); - - S3BucketCreateRequest s3BucketCreateRequest = - new S3BucketCreateRequest(originalRequest); - - OMRequest modifiedRequest = s3BucketCreateRequest.preExecute(ozoneManager); - // Modification time will be set, so requests should not be equal. - Assert.assertNotEquals(originalRequest, modifiedRequest); - return new S3BucketCreateRequest(modifiedRequest); - } - - private void doValidateAndUpdateCache(String userName, String s3BucketName, - OMRequest modifiedRequest) throws Exception { - - // As we have not still called validateAndUpdateCache, get() should - // return null. - - Assert.assertNull(omMetadataManager.getS3Table().get(s3BucketName)); - S3BucketCreateRequest s3BucketCreateRequest = - new S3BucketCreateRequest(modifiedRequest); - - - OMClientResponse omClientResponse = - s3BucketCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - // As now after validateAndUpdateCache it should add entry to cache, get - // should return non null value. - - Assert.assertNotNull(omMetadataManager.getS3Table().get(s3BucketName)); - - String bucketKey = - omMetadataManager.getBucketKey( - s3BucketCreateRequest.formatOzoneVolumeName(userName), - s3BucketName); - - // check ozone bucket entry is created or not. - Assert.assertNotNull(omMetadataManager.getBucketTable().get(bucketKey)); - - String volumeKey = omMetadataManager.getVolumeKey( - s3BucketCreateRequest.formatOzoneVolumeName(userName)); - - // Check volume entry is created or not. - Assert.assertNotNull(omMetadataManager.getVolumeTable().get(volumeKey)); - - // check om response. - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - Assert.assertEquals(OzoneManagerProtocolProtos.Type.CreateS3Bucket, - omClientResponse.getOMResponse().getCmdType()); - - } - -} - diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketDeleteRequest.java deleted file mode 100644 index f5422686df2..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketDeleteRequest.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request.s3.bucket; - -import java.util.UUID; - -import org.apache.commons.lang.RandomStringUtils; -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.test.GenericTestUtils; - -import static org.junit.Assert.fail; - -/** - * Tests S3BucketDelete Request. - */ -public class TestS3BucketDeleteRequest extends TestS3BucketRequest { - - @Test - public void testPreExecute() throws Exception { - String s3BucketName = UUID.randomUUID().toString(); - doPreExecute(s3BucketName); - } - - @Test - public void testValidateAndUpdateCache() throws Exception { - String s3BucketName = UUID.randomUUID().toString(); - OMRequest omRequest = doPreExecute(s3BucketName); - - // Add s3Bucket to s3Bucket table. - TestOMRequestUtils.addS3BucketToDB("ozone", s3BucketName, - omMetadataManager); - - S3BucketDeleteRequest s3BucketDeleteRequest = - new S3BucketDeleteRequest(omRequest); - - OMClientResponse s3BucketDeleteResponse = - s3BucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1L, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - s3BucketDeleteResponse.getOMResponse().getStatus()); - } - - @Test - public void testValidateAndUpdateCacheWithS3BucketNotFound() - throws Exception { - String s3BucketName = UUID.randomUUID().toString(); - OMRequest omRequest = doPreExecute(s3BucketName); - - S3BucketDeleteRequest s3BucketDeleteRequest = - new S3BucketDeleteRequest(omRequest); - - OMClientResponse s3BucketDeleteResponse = - s3BucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1L, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.S3_BUCKET_NOT_FOUND, - s3BucketDeleteResponse.getOMResponse().getStatus()); - } - - @Test - public void testPreExecuteInvalidBucketLength() throws Exception { - // set bucket name which is less than 3 characters length - String s3BucketName = RandomStringUtils.randomAlphabetic(2); - - try { - doPreExecute(s3BucketName); - fail("testPreExecuteInvalidBucketLength failed"); - } catch (OMException ex) { - GenericTestUtils.assertExceptionContains("S3_BUCKET_INVALID_LENGTH", ex); - } - - // set bucket name which is less than 3 characters length - s3BucketName = RandomStringUtils.randomAlphabetic(65); - - try { - doPreExecute(s3BucketName); - fail("testPreExecuteInvalidBucketLength failed"); - } catch (OMException ex) { - GenericTestUtils.assertExceptionContains("S3_BUCKET_INVALID_LENGTH", ex); - } - } - - private OMRequest doPreExecute(String s3BucketName) throws Exception { - OMRequest omRequest = - TestOMRequestUtils.deleteS3BucketRequest(s3BucketName); - - S3BucketDeleteRequest s3BucketDeleteRequest = - new S3BucketDeleteRequest(omRequest); - - OMRequest modifiedOMRequest = - s3BucketDeleteRequest.preExecute(ozoneManager); - - // As user name will be set both should not be equal. - Assert.assertNotEquals(omRequest, modifiedOMRequest); - - return modifiedOMRequest; - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketRequest.java deleted file mode 100644 index 747efb0d184..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketRequest.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request.s3.bucket; - -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; - -/** - * Base test class for S3 Bucket request. - */ -@SuppressWarnings("visibilityModifier") -public class TestS3BucketRequest { - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - protected OzoneManager ozoneManager; - protected OMMetrics omMetrics; - protected OMMetadataManager omMetadataManager; - protected AuditLogger auditLogger; - - // Just setting ozoneManagerDoubleBuffer which does nothing. - protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper = - ((response, transactionIndex) -> { - return null; - }); - - - @Before - public void setup() throws Exception { - - ozoneManager = Mockito.mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - auditLogger = Mockito.mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - } - - @After - public void stop() { - omMetrics.unRegister(); - Mockito.framework().clearInlineMocks(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/package-info.java deleted file mode 100644 index 8b2e84b77ae..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains test classes for s3 bucket requests. - */ -package org.apache.hadoop.ozone.om.request.s3.bucket; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java deleted file mode 100644 index 1d785609b56..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request.s3.multipart; - -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; - -/** - * Tests S3 Initiate Multipart Upload request. - */ -public class TestS3InitiateMultipartUploadRequest - extends TestS3MultipartRequest { - - @Test - public void testPreExecute() { - doPreExecuteInitiateMPU(UUID.randomUUID().toString(), - UUID.randomUUID().toString(), UUID.randomUUID().toString()); - } - - - @Test - public void testValidateAndUpdateCache() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - // Add volume and bucket to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, - bucketName, keyName); - - S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = - new S3InitiateMultipartUploadRequest(modifiedRequest); - - OMClientResponse omClientResponse = - s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager, - 100L, ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest() - .getKeyArgs().getMultipartUploadID()); - - Assert.assertNotNull(omMetadataManager.getOpenKeyTable().get(multipartKey)); - Assert.assertNotNull(omMetadataManager.getMultipartInfoTable() - .get(multipartKey)); - - Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest() - .getKeyArgs().getMultipartUploadID(), - omMetadataManager.getMultipartInfoTable().get(multipartKey) - .getUploadID()); - - Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest() - .getKeyArgs().getModificationTime(), - omMetadataManager.getOpenKeyTable().get(multipartKey) - .getModificationTime()); - Assert.assertEquals(modifiedRequest.getInitiateMultiPartUploadRequest() - .getKeyArgs().getModificationTime(), - omMetadataManager.getOpenKeyTable().get(multipartKey) - .getCreationTime()); - - } - - - @Test - public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); - - OMRequest modifiedRequest = doPreExecuteInitiateMPU( - volumeName, bucketName, keyName); - - S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = - new S3InitiateMultipartUploadRequest(modifiedRequest); - - OMClientResponse omClientResponse = - s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager, - 100L, ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, - omClientResponse.getOMResponse().getStatus()); - - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest() - .getKeyArgs().getMultipartUploadID()); - - Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey)); - Assert.assertNull(omMetadataManager.getMultipartInfoTable() - .get(multipartKey)); - - } - - @Test - public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - - OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, bucketName, - keyName); - - S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = - new S3InitiateMultipartUploadRequest(modifiedRequest); - - OMClientResponse omClientResponse = - s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager, - 100L, ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, - omClientResponse.getOMResponse().getStatus()); - - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, modifiedRequest.getInitiateMultiPartUploadRequest() - .getKeyArgs().getMultipartUploadID()); - - Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey)); - Assert.assertNull(omMetadataManager.getMultipartInfoTable() - .get(multipartKey)); - - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java deleted file mode 100644 index 99500274628..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request.s3.multipart; - -import java.io.IOException; -import java.util.List; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; - -/** - * Base test class for S3 Multipart upload request. - */ -@SuppressWarnings("visibilitymodifier") -public class TestS3MultipartRequest { - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - protected OzoneManager ozoneManager; - protected OMMetrics omMetrics; - protected OMMetadataManager omMetadataManager; - protected AuditLogger auditLogger; - - // Just setting ozoneManagerDoubleBuffer which does nothing. - protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper = - ((response, transactionIndex) -> { - return null; - }); - - - @Before - public void setup() throws Exception { - ozoneManager = Mockito.mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - auditLogger = Mockito.mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - } - - - @After - public void stop() { - omMetrics.unRegister(); - Mockito.framework().clearInlineMocks(); - } - - /** - * Perform preExecute of Initiate Multipart upload request for given - * volume, bucket and key name. - * @param volumeName - * @param bucketName - * @param keyName - * @return OMRequest - returned from preExecute. - */ - protected OMRequest doPreExecuteInitiateMPU( - String volumeName, String bucketName, String keyName) { - OMRequest omRequest = - TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName, - keyName); - - S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = - new S3InitiateMultipartUploadRequest(omRequest); - - OMRequest modifiedRequest = - s3InitiateMultipartUploadRequest.preExecute(ozoneManager); - - Assert.assertNotEquals(omRequest, modifiedRequest); - Assert.assertTrue(modifiedRequest.hasInitiateMultiPartUploadRequest()); - Assert.assertNotNull(modifiedRequest.getInitiateMultiPartUploadRequest() - .getKeyArgs().getMultipartUploadID()); - Assert.assertTrue(modifiedRequest.getInitiateMultiPartUploadRequest() - .getKeyArgs().getModificationTime() > 0); - - return modifiedRequest; - } - - /** - * Perform preExecute of Commit Multipart Upload request for given volume, - * bucket and keyName. - * @param volumeName - * @param bucketName - * @param keyName - * @param clientID - * @param multipartUploadID - * @param partNumber - * @return OMRequest - returned from preExecute. - */ - protected OMRequest doPreExecuteCommitMPU( - String volumeName, String bucketName, String keyName, - long clientID, String multipartUploadID, int partNumber) { - - // Just set dummy size - long dataSize = 100L; - OMRequest omRequest = - TestOMRequestUtils.createCommitPartMPURequest(volumeName, bucketName, - keyName, clientID, dataSize, multipartUploadID, partNumber); - S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest = - new S3MultipartUploadCommitPartRequest(omRequest); - - - OMRequest modifiedRequest = - s3MultipartUploadCommitPartRequest.preExecute(ozoneManager); - - // UserInfo and modification time is set. - Assert.assertNotEquals(omRequest, modifiedRequest); - - return modifiedRequest; - } - - /** - * Perform preExecute of Abort Multipart Upload request for given volume, - * bucket and keyName. - * @param volumeName - * @param bucketName - * @param keyName - * @param multipartUploadID - * @return OMRequest - returned from preExecute. - * @throws IOException - */ - protected OMRequest doPreExecuteAbortMPU( - String volumeName, String bucketName, String keyName, - String multipartUploadID) throws IOException { - - OMRequest omRequest = - TestOMRequestUtils.createAbortMPURequest(volumeName, bucketName, - keyName, multipartUploadID); - - - S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest = - new S3MultipartUploadAbortRequest(omRequest); - - OMRequest modifiedRequest = - s3MultipartUploadAbortRequest.preExecute(ozoneManager); - - // UserInfo and modification time is set. - Assert.assertNotEquals(omRequest, modifiedRequest); - - return modifiedRequest; - - } - - protected OMRequest doPreExecuteCompleteMPU(String volumeName, - String bucketName, String keyName, String multipartUploadID, - List partList) throws IOException { - - OMRequest omRequest = - TestOMRequestUtils.createCompleteMPURequest(volumeName, bucketName, - keyName, multipartUploadID, partList); - - S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest = - new S3MultipartUploadCompleteRequest(omRequest); - - OMRequest modifiedRequest = - s3MultipartUploadCompleteRequest.preExecute(ozoneManager); - - // UserInfo and modification time is set. - Assert.assertNotEquals(omRequest, modifiedRequest); - - return modifiedRequest; - - } - - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java deleted file mode 100644 index d0b61c72636..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.s3.multipart; - -import java.io.IOException; -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; - -/** - * Test Multipart upload abort request. - */ -public class TestS3MultipartUploadAbortRequest extends TestS3MultipartRequest { - - - @Test - public void testPreExecute() throws IOException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - doPreExecuteAbortMPU(volumeName, bucketName, keyName, - UUID.randomUUID().toString()); - } - - @Test - public void testValidateAndUpdateCache() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName, - bucketName, keyName); - - S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = - new S3InitiateMultipartUploadRequest(initiateMPURequest); - - OMClientResponse omClientResponse = - s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager, - 1L, ozoneManagerDoubleBufferHelper); - - String multipartUploadID = omClientResponse.getOMResponse() - .getInitiateMultiPartUploadResponse().getMultipartUploadID(); - - OMRequest abortMPURequest = - doPreExecuteAbortMPU(volumeName, bucketName, keyName, - multipartUploadID); - - S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest = - new S3MultipartUploadAbortRequest(abortMPURequest); - - omClientResponse = - s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L, - ozoneManagerDoubleBufferHelper); - - - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, multipartUploadID); - - // Check table and response. - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - Assert.assertNull( - omMetadataManager.getMultipartInfoTable().get(multipartKey)); - Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey)); - - } - - @Test - public void testValidateAndUpdateCacheMultipartNotFound() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - String multipartUploadID = "randomMPU"; - - OMRequest abortMPURequest = - doPreExecuteAbortMPU(volumeName, bucketName, keyName, - multipartUploadID); - - S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest = - new S3MultipartUploadAbortRequest(abortMPURequest); - - OMClientResponse omClientResponse = - s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L, - ozoneManagerDoubleBufferHelper); - - // Check table and response. - Assert.assertEquals( - OzoneManagerProtocolProtos.Status.NO_SUCH_MULTIPART_UPLOAD_ERROR, - omClientResponse.getOMResponse().getStatus()); - - } - - - @Test - public void testValidateAndUpdateCacheVolumeNotFound() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - - String multipartUploadID = "randomMPU"; - - OMRequest abortMPURequest = - doPreExecuteAbortMPU(volumeName, bucketName, keyName, - multipartUploadID); - - S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest = - new S3MultipartUploadAbortRequest(abortMPURequest); - - OMClientResponse omClientResponse = - s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L, - ozoneManagerDoubleBufferHelper); - - // Check table and response. - Assert.assertEquals( - OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, - omClientResponse.getOMResponse().getStatus()); - - } - - @Test - public void testValidateAndUpdateCacheBucketNotFound() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); - - String multipartUploadID = "randomMPU"; - - OMRequest abortMPURequest = - doPreExecuteAbortMPU(volumeName, bucketName, keyName, - multipartUploadID); - - S3MultipartUploadAbortRequest s3MultipartUploadAbortRequest = - new S3MultipartUploadAbortRequest(abortMPURequest); - - OMClientResponse omClientResponse = - s3MultipartUploadAbortRequest.validateAndUpdateCache(ozoneManager, 2L, - ozoneManagerDoubleBufferHelper); - - // Check table and response. - Assert.assertEquals( - OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, - omClientResponse.getOMResponse().getStatus()); - - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java deleted file mode 100644 index 5b220bf4c87..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request.s3.multipart; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.util.Time; -import org.junit.Assert; -import org.junit.Test; - -import java.util.UUID; - -/** - * Tests S3 Multipart upload commit part request. - */ -public class TestS3MultipartUploadCommitPartRequest - extends TestS3MultipartRequest { - - @Test - public void testPreExecute() { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - doPreExecuteCommitMPU(volumeName, bucketName, keyName, Time.now(), - UUID.randomUUID().toString(), 1); - } - - - @Test - public void testValidateAndUpdateCacheSuccess() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName, - bucketName, keyName); - - S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = - new S3InitiateMultipartUploadRequest(initiateMPURequest); - - OMClientResponse omClientResponse = - s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager, - 1L, ozoneManagerDoubleBufferHelper); - - long clientID = Time.now(); - String multipartUploadID = omClientResponse.getOMResponse() - .getInitiateMultiPartUploadResponse().getMultipartUploadID(); - - OMRequest commitMultipartRequest = doPreExecuteCommitMPU(volumeName, - bucketName, keyName, clientID, multipartUploadID, 1); - - S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest = - new S3MultipartUploadCommitPartRequest(commitMultipartRequest); - - // Add key to open key table. - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, - keyName, clientID, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); - - omClientResponse = - s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager, - 2L, ozoneManagerDoubleBufferHelper); - - - Assert.assertTrue(omClientResponse.getOMResponse().getStatus() - == OzoneManagerProtocolProtos.Status.OK); - - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, multipartUploadID); - - Assert.assertNotNull( - omMetadataManager.getMultipartInfoTable().get(multipartKey)); - Assert.assertTrue(omMetadataManager.getMultipartInfoTable() - .get(multipartKey).getPartKeyInfoMap().size() == 1); - Assert.assertNull(omMetadataManager.getOpenKeyTable() - .get(omMetadataManager.getOpenKey(volumeName, bucketName, keyName, - clientID))); - - } - - @Test - public void testValidateAndUpdateCacheMultipartNotFound() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - - long clientID = Time.now(); - String multipartUploadID = UUID.randomUUID().toString(); - - OMRequest commitMultipartRequest = doPreExecuteCommitMPU(volumeName, - bucketName, keyName, clientID, multipartUploadID, 1); - - S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest = - new S3MultipartUploadCommitPartRequest(commitMultipartRequest); - - // Add key to open key table. - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, - keyName, clientID, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); - - OMClientResponse omClientResponse = - s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager, - 2L, ozoneManagerDoubleBufferHelper); - - - Assert.assertTrue(omClientResponse.getOMResponse().getStatus() - == OzoneManagerProtocolProtos.Status.NO_SUCH_MULTIPART_UPLOAD_ERROR); - - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, multipartUploadID); - - Assert.assertNull( - omMetadataManager.getMultipartInfoTable().get(multipartKey)); - - } - - @Test - public void testValidateAndUpdateCacheKeyNotFound() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - - long clientID = Time.now(); - String multipartUploadID = UUID.randomUUID().toString(); - - OMRequest commitMultipartRequest = doPreExecuteCommitMPU(volumeName, - bucketName, keyName, clientID, multipartUploadID, 1); - - // Don't add key to open table entry, and we are trying to commit this MPU - // part. It will fail with KEY_NOT_FOUND - - S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest = - new S3MultipartUploadCommitPartRequest(commitMultipartRequest); - - - OMClientResponse omClientResponse = - s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager, - 2L, ozoneManagerDoubleBufferHelper); - - Assert.assertTrue(omClientResponse.getOMResponse().getStatus() - == OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND); - - } - - - @Test - public void testValidateAndUpdateCacheBucketFound() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); - - - long clientID = Time.now(); - String multipartUploadID = UUID.randomUUID().toString(); - - OMRequest commitMultipartRequest = doPreExecuteCommitMPU(volumeName, - bucketName, keyName, clientID, multipartUploadID, 1); - - // Don't add key to open table entry, and we are trying to commit this MPU - // part. It will fail with KEY_NOT_FOUND - - S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest = - new S3MultipartUploadCommitPartRequest(commitMultipartRequest); - - - OMClientResponse omClientResponse = - s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager, - 2L, ozoneManagerDoubleBufferHelper); - - Assert.assertTrue(omClientResponse.getOMResponse().getStatus() - == OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND); - - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java deleted file mode 100644 index a04f51fb1cf..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.s3.multipart; - -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part; -import org.apache.hadoop.util.Time; - - -/** - * Tests S3 Multipart Upload Complete request. - */ -public class TestS3MultipartUploadCompleteRequest - extends TestS3MultipartRequest { - - @Test - public void testPreExecute() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - doPreExecuteCompleteMPU(volumeName, bucketName, keyName, - UUID.randomUUID().toString(), new ArrayList<>()); - } - - @Test - public void testValidateAndUpdateCacheSuccess() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - - OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName, - bucketName, keyName); - - S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = - new S3InitiateMultipartUploadRequest(initiateMPURequest); - - OMClientResponse omClientResponse = - s3InitiateMultipartUploadRequest.validateAndUpdateCache(ozoneManager, - 1L, ozoneManagerDoubleBufferHelper); - - long clientID = Time.now(); - String multipartUploadID = omClientResponse.getOMResponse() - .getInitiateMultiPartUploadResponse().getMultipartUploadID(); - - OMRequest commitMultipartRequest = doPreExecuteCommitMPU(volumeName, - bucketName, keyName, clientID, multipartUploadID, 1); - - S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest = - new S3MultipartUploadCommitPartRequest(commitMultipartRequest); - - // Add key to open key table. - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, - keyName, clientID, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); - - s3MultipartUploadCommitPartRequest.validateAndUpdateCache(ozoneManager, - 2L, ozoneManagerDoubleBufferHelper); - - List partList = new ArrayList<>(); - - partList.add(Part.newBuilder().setPartName( - omMetadataManager.getOzoneKey(volumeName, bucketName, keyName) + - clientID).setPartNumber(1).build()); - - OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, - bucketName, keyName, multipartUploadID, partList); - - S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest = - new S3MultipartUploadCompleteRequest(completeMultipartRequest); - - omClientResponse = - s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager, - 3L, ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, multipartUploadID); - - Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey)); - Assert.assertNull( - omMetadataManager.getMultipartInfoTable().get(multipartKey)); - Assert.assertNotNull(omMetadataManager.getKeyTable().get( - omMetadataManager.getOzoneKey(volumeName, bucketName, keyName))); - } - - @Test - public void testValidateAndUpdateCacheVolumeNotFound() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - List partList = new ArrayList<>(); - - OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, - bucketName, keyName, UUID.randomUUID().toString(), partList); - - S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest = - new S3MultipartUploadCompleteRequest(completeMultipartRequest); - - OMClientResponse omClientResponse = - s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager, - 3L, ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, - omClientResponse.getOMResponse().getStatus()); - - } - - @Test - public void testValidateAndUpdateCacheBucketNotFound() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); - List partList = new ArrayList<>(); - - OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, - bucketName, keyName, UUID.randomUUID().toString(), partList); - - S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest = - new S3MultipartUploadCompleteRequest(completeMultipartRequest); - - OMClientResponse omClientResponse = - s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager, - 3L, ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, - omClientResponse.getOMResponse().getStatus()); - - } - - @Test - public void testValidateAndUpdateCacheNoSuchMultipartUploadError() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); - List partList = new ArrayList<>(); - - OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, - bucketName, keyName, UUID.randomUUID().toString(), partList); - - // Doing complete multipart upload request with out initiate. - S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest = - new S3MultipartUploadCompleteRequest(completeMultipartRequest); - - OMClientResponse omClientResponse = - s3MultipartUploadCompleteRequest.validateAndUpdateCache(ozoneManager, - 3L, ozoneManagerDoubleBufferHelper); - - Assert.assertEquals( - OzoneManagerProtocolProtos.Status.NO_SUCH_MULTIPART_UPLOAD_ERROR, - omClientResponse.getOMResponse().getStatus()); - - } -} - diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/package-info.java deleted file mode 100644 index 44554184244..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains test classes for S3 MPU requests. - */ - -package org.apache.hadoop.ozone.om.request.s3.multipart; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java deleted file mode 100644 index b685711416a..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java +++ /dev/null @@ -1,258 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.volume; - -import java.util.UUID; - -import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .VolumeInfo; - -import static org.mockito.Mockito.when; - -/** - * Tests create volume request. - */ - -public class TestOMVolumeCreateRequest extends TestOMVolumeRequest { - - - @Test - public void testPreExecute() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String adminName = UUID.randomUUID().toString(); - String ownerName = UUID.randomUUID().toString(); - doPreExecute(volumeName, adminName, ownerName); - } - - - @Test - public void testValidateAndUpdateCacheWithZeroMaxUserVolumeCount() - throws Exception { - when(ozoneManager.getMaxUserVolumeCount()).thenReturn(0L); - String volumeName = UUID.randomUUID().toString(); - String adminName = "user1"; - String ownerName = "user1"; - - OMRequest originalRequest = createVolumeRequest(volumeName, adminName, - ownerName); - - OMVolumeCreateRequest omVolumeCreateRequest = - new OMVolumeCreateRequest(originalRequest); - - omVolumeCreateRequest.preExecute(ozoneManager); - - try { - OMClientResponse omClientResponse = - omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - Assert.assertTrue(omClientResponse instanceof OMVolumeCreateResponse); - OMVolumeCreateResponse respone = - (OMVolumeCreateResponse) omClientResponse; - Assert.assertEquals(1, respone.getOmVolumeArgs().getObjectID()); - Assert.assertEquals(1, respone.getOmVolumeArgs().getUpdateID()); - } catch (IllegalArgumentException ex){ - GenericTestUtils.assertExceptionContains("should be greater than zero", - ex); - } - - } - - @Test - public void testValidateAndUpdateCacheSuccess() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String adminName = "user1"; - String ownerName = "user1"; - - OMRequest originalRequest = createVolumeRequest(volumeName, adminName, - ownerName); - - OMVolumeCreateRequest omVolumeCreateRequest = - new OMVolumeCreateRequest(originalRequest); - - OMRequest modifiedRequest = omVolumeCreateRequest.preExecute(ozoneManager); - - String volumeKey = omMetadataManager.getVolumeKey(volumeName); - String ownerKey = omMetadataManager.getUserKey(ownerName); - - // As we have not still called validateAndUpdateCache, get() should - // return null. - - Assert.assertNull(omMetadataManager.getVolumeTable().get(volumeKey)); - Assert.assertNull(omMetadataManager.getUserTable().get(ownerKey)); - - omVolumeCreateRequest = new OMVolumeCreateRequest(modifiedRequest); - - OMClientResponse omClientResponse = - omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 2, - ozoneManagerDoubleBufferHelper); - - OzoneManagerProtocolProtos.OMResponse omResponse = - omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getCreateVolumeResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omResponse.getStatus()); - - - // Get volumeInfo from request. - VolumeInfo volumeInfo = omVolumeCreateRequest.getOmRequest() - .getCreateVolumeRequest().getVolumeInfo(); - - OmVolumeArgs omVolumeArgs = - omMetadataManager.getVolumeTable().get(volumeKey); - // As request is valid volume table should not have entry. - Assert.assertNotNull(omVolumeArgs); - Assert.assertEquals(2, omVolumeArgs.getObjectID()); - Assert.assertEquals(2, omVolumeArgs.getUpdateID()); - - // Check data from table and request. - Assert.assertEquals(volumeInfo.getVolume(), omVolumeArgs.getVolume()); - Assert.assertEquals(volumeInfo.getOwnerName(), omVolumeArgs.getOwnerName()); - Assert.assertEquals(volumeInfo.getAdminName(), omVolumeArgs.getAdminName()); - Assert.assertEquals(volumeInfo.getCreationTime(), - omVolumeArgs.getCreationTime()); - - OzoneManagerProtocolProtos.UserVolumeInfo userVolumeInfo = omMetadataManager - .getUserTable().get(ownerKey); - Assert.assertNotNull(userVolumeInfo); - Assert.assertEquals(volumeName, userVolumeInfo.getVolumeNames(0)); - - // Create another volume for the user. - originalRequest = createVolumeRequest("vol1", adminName, - ownerName); - - omVolumeCreateRequest = - new OMVolumeCreateRequest(originalRequest); - - modifiedRequest = omVolumeCreateRequest.preExecute(ozoneManager); - - omClientResponse = - omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 2L, - ozoneManagerDoubleBufferHelper); - - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); - - Assert.assertTrue(omMetadataManager - .getUserTable().get(ownerKey).getVolumeNamesList().size() == 2); - - - } - - - @Test - public void testValidateAndUpdateCacheWithVolumeAlreadyExists() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - String adminName = "user1"; - String ownerName = "user1"; - - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); - - OMRequest originalRequest = createVolumeRequest(volumeName, adminName, - ownerName); - - OMVolumeCreateRequest omVolumeCreateRequest = - new OMVolumeCreateRequest(originalRequest); - - OMRequest modifiedRequest = omVolumeCreateRequest.preExecute(ozoneManager); - - omVolumeCreateRequest = new OMVolumeCreateRequest(modifiedRequest); - - OMClientResponse omClientResponse = - omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OzoneManagerProtocolProtos.OMResponse omResponse = - omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getCreateVolumeResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS, - omResponse.getStatus()); - - // Check really if we have a volume with the specified volume name. - Assert.assertNotNull(omMetadataManager.getVolumeTable().get( - omMetadataManager.getVolumeKey(volumeName))); - - } - - - private void doPreExecute(String volumeName, - String adminName, String ownerName) throws Exception { - - OMRequest originalRequest = createVolumeRequest(volumeName, adminName, - ownerName); - - OMVolumeCreateRequest omVolumeCreateRequest = - new OMVolumeCreateRequest(originalRequest); - - OMRequest modifiedRequest = omVolumeCreateRequest.preExecute(ozoneManager); - verifyRequest(modifiedRequest, originalRequest); - } - - /** - * Verify modifiedOmRequest and originalRequest. - * @param modifiedRequest - * @param originalRequest - */ - private void verifyRequest(OMRequest modifiedRequest, - OMRequest originalRequest) { - VolumeInfo original = originalRequest.getCreateVolumeRequest() - .getVolumeInfo(); - VolumeInfo updated = modifiedRequest.getCreateVolumeRequest() - .getVolumeInfo(); - - Assert.assertEquals(original.getAdminName(), updated.getAdminName()); - Assert.assertEquals(original.getVolume(), updated.getVolume()); - Assert.assertEquals(original.getOwnerName(), - updated.getOwnerName()); - Assert.assertNotEquals(original.getCreationTime(), - updated.getCreationTime()); - } - - /** - * Create OMRequest for create volume. - * @param volumeName - * @param adminName - * @param ownerName - * @return OMRequest - */ - private OMRequest createVolumeRequest(String volumeName, String adminName, - String ownerName) { - VolumeInfo volumeInfo = VolumeInfo.newBuilder().setVolume(volumeName) - .setAdminName(adminName).setOwnerName(ownerName).build(); - CreateVolumeRequest createVolumeRequest = - CreateVolumeRequest.newBuilder().setVolumeInfo(volumeInfo).build(); - - return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) - .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) - .setCreateVolumeRequest(createVolumeRequest).build(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java deleted file mode 100644 index 8b30a234c26..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java +++ /dev/null @@ -1,168 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.volume; - -import java.util.UUID; - -import org.junit.Assert;; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteVolumeRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; - -/** - * Tests delete volume request. - */ -public class TestOMVolumeDeleteRequest extends TestOMVolumeRequest { - - @Test - public void testPreExecute() throws Exception { - String volumeName = UUID.randomUUID().toString(); - OMRequest originalRequest = deleteVolumeRequest(volumeName); - - OMVolumeDeleteRequest omVolumeDeleteRequest = - new OMVolumeDeleteRequest(originalRequest); - - OMRequest modifiedRequest = omVolumeDeleteRequest.preExecute(ozoneManager); - Assert.assertNotEquals(originalRequest, modifiedRequest); - } - - - @Test - public void testValidateAndUpdateCacheSuccess() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - - OMRequest originalRequest = deleteVolumeRequest(volumeName); - - OMVolumeDeleteRequest omVolumeDeleteRequest = - new OMVolumeDeleteRequest(originalRequest); - - omVolumeDeleteRequest.preExecute(ozoneManager); - - // Add volume and user to DB - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - - String volumeKey = omMetadataManager.getVolumeKey(volumeName); - String ownerKey = omMetadataManager.getUserKey(ownerName); - - - Assert.assertNotNull(omMetadataManager.getVolumeTable().get(volumeKey)); - Assert.assertNotNull(omMetadataManager.getUserTable().get(ownerKey)); - - OMClientResponse omClientResponse = - omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OzoneManagerProtocolProtos.OMResponse omResponse = - omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getCreateVolumeResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omResponse.getStatus()); - - - - Assert.assertTrue(omMetadataManager.getUserTable().get(ownerKey) - .getVolumeNamesList().size() == 0); - // As now volume is deleted, table should not have those entries. - Assert.assertNull(omMetadataManager.getVolumeTable().get(volumeKey)); - - } - - - @Test - public void testValidateAndUpdateCacheWithVolumeNotFound() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - OMRequest originalRequest = deleteVolumeRequest(volumeName); - - OMVolumeDeleteRequest omVolumeDeleteRequest = - new OMVolumeDeleteRequest(originalRequest); - - omVolumeDeleteRequest.preExecute(ozoneManager); - - OMClientResponse omClientResponse = - omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OzoneManagerProtocolProtos.OMResponse omResponse = - omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getCreateVolumeResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, - omResponse.getStatus()); - - } - - - @Test - public void testValidateAndUpdateCacheWithVolumeNotEmpty() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - - OMRequest originalRequest = deleteVolumeRequest(volumeName); - - OMVolumeDeleteRequest omVolumeDeleteRequest = - new OMVolumeDeleteRequest(originalRequest); - - omVolumeDeleteRequest.preExecute(ozoneManager); - - // Add some bucket to bucket table cache. - String bucketName = UUID.randomUUID().toString(); - String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); - - OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() - .setVolumeName(volumeName).setBucketName(bucketName).build(); - TestOMRequestUtils.addBucketToOM(omMetadataManager, omBucketInfo); - - // Add user and volume to DB. - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - OMClientResponse omClientResponse = - omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1L, - ozoneManagerDoubleBufferHelper); - - OzoneManagerProtocolProtos.OMResponse omResponse = - omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getCreateVolumeResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_EMPTY, - omResponse.getStatus()); - } - - /** - * Create OMRequest for delete volume. - * @param volumeName - * @return OMRequest - */ - private OMRequest deleteVolumeRequest(String volumeName) { - DeleteVolumeRequest deleteVolumeRequest = - DeleteVolumeRequest.newBuilder().setVolumeName(volumeName).build(); - - return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume) - .setDeleteVolumeRequest(deleteVolumeRequest).build(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java deleted file mode 100644 index cfcdcb72890..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeRequest.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.request.volume; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.audit.AuditLogger; -import org.apache.hadoop.ozone.audit.AuditMessage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; - -/** - * Base test class for Volume request. - */ -@SuppressWarnings("visibilitymodifier") -public class TestOMVolumeRequest { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - protected OzoneManager ozoneManager; - protected OMMetrics omMetrics; - protected OMMetadataManager omMetadataManager; - protected AuditLogger auditLogger; - // Just setting ozoneManagerDoubleBuffer which does nothing. - protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper = - ((response, transactionIndex) -> { - return null; - }); - - @Before - public void setup() throws Exception { - ozoneManager = Mockito.mock(OzoneManager.class); - omMetrics = OMMetrics.create(); - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - when(ozoneManager.getMetrics()).thenReturn(omMetrics); - when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - when(ozoneManager.getMaxUserVolumeCount()).thenReturn(10L); - auditLogger = Mockito.mock(AuditLogger.class); - when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); - Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); - } - - @After - public void stop() { - omMetrics.unRegister(); - Mockito.framework().clearInlineMocks(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java deleted file mode 100644 index af38ba03875..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java +++ /dev/null @@ -1,161 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.volume; - -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; - -/** - * Tests set volume property request. - */ -public class TestOMVolumeSetOwnerRequest extends TestOMVolumeRequest { - - @Test - public void testPreExecute() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String newOwner = "user1"; - OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, newOwner); - - OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = - new OMVolumeSetQuotaRequest(originalRequest); - - OMRequest modifiedRequest = omVolumeSetQuotaRequest.preExecute( - ozoneManager); - Assert.assertNotEquals(modifiedRequest, originalRequest); - } - - - @Test - public void testValidateAndUpdateCacheSuccess() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - String newOwner = "user2"; - - OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, newOwner); - - OMVolumeSetOwnerRequest omVolumeSetOwnerRequest = - new OMVolumeSetOwnerRequest(originalRequest); - - omVolumeSetOwnerRequest.preExecute(ozoneManager); - - String volumeKey = omMetadataManager.getVolumeKey(volumeName); - String ownerKey = omMetadataManager.getUserKey(ownerName); - String newOwnerKey = omMetadataManager.getUserKey(newOwner); - - - - OMClientResponse omClientResponse = - omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OzoneManagerProtocolProtos.OMResponse omResponse = - omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getSetVolumePropertyResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omResponse.getStatus()); - - - String fromDBOwner = omMetadataManager - .getVolumeTable().get(volumeKey).getOwnerName(); - Assert.assertEquals(newOwner, fromDBOwner); - - - OzoneManagerProtocolProtos.UserVolumeInfo newOwnerVolumeList = - omMetadataManager.getUserTable().get(newOwnerKey); - - Assert.assertNotNull(newOwnerVolumeList); - Assert.assertEquals(volumeName, - newOwnerVolumeList.getVolumeNamesList().get(0)); - - OzoneManagerProtocolProtos.UserVolumeInfo oldOwnerVolumeList = - omMetadataManager.getUserTable().get( - omMetadataManager.getUserKey(ownerKey)); - - Assert.assertNotNull(oldOwnerVolumeList); - Assert.assertTrue(oldOwnerVolumeList.getVolumeNamesList().size() == 0); - - } - - - @Test - public void testValidateAndUpdateCacheWithVolumeNotFound() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - - OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, - ownerName); - - OMVolumeSetOwnerRequest omVolumeSetOwnerRequest = - new OMVolumeSetOwnerRequest(originalRequest); - - omVolumeSetOwnerRequest.preExecute(ozoneManager); - - OMClientResponse omClientResponse = - omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OzoneManagerProtocolProtos.OMResponse omResponse = - omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getCreateVolumeResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, - omResponse.getStatus()); - - } - - @Test - public void testInvalidRequest() throws Exception { - String volumeName = UUID.randomUUID().toString(); - - // create request with quota set. - OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, - 100L); - - OMVolumeSetOwnerRequest omVolumeSetOwnerRequest = - new OMVolumeSetOwnerRequest(originalRequest); - - omVolumeSetOwnerRequest.preExecute(ozoneManager); - - OMClientResponse omClientResponse = - omVolumeSetOwnerRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OzoneManagerProtocolProtos.OMResponse omResponse = - omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getCreateVolumeResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_REQUEST, - omResponse.getStatus()); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java deleted file mode 100644 index 963fc333e0e..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.volume; - -import java.util.UUID; - -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; - -/** - * Tests set volume property request. - */ -public class TestOMVolumeSetQuotaRequest extends TestOMVolumeRequest { - - @Test - public void testPreExecute() throws Exception { - String volumeName = UUID.randomUUID().toString(); - long quota = 100L; - OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, quota); - - OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = - new OMVolumeSetQuotaRequest(originalRequest); - - OMRequest modifiedRequest = omVolumeSetQuotaRequest.preExecute( - ozoneManager); - Assert.assertNotEquals(modifiedRequest, originalRequest); - } - - - @Test - public void testValidateAndUpdateCacheSuccess() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - long quotaSet = 100L; - - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - - OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, quotaSet); - - OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = - new OMVolumeSetQuotaRequest(originalRequest); - - omVolumeSetQuotaRequest.preExecute(ozoneManager); - - String volumeKey = omMetadataManager.getVolumeKey(volumeName); - - - // Get Quota before validateAndUpdateCache. - OmVolumeArgs omVolumeArgs = - omMetadataManager.getVolumeTable().get(volumeKey); - // As request is valid volume table should not have entry. - Assert.assertNotNull(omVolumeArgs); - long quotaBeforeSet = omVolumeArgs.getQuotaInBytes(); - - - OMClientResponse omClientResponse = - omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OzoneManagerProtocolProtos.OMResponse omResponse = - omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getSetVolumePropertyResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omResponse.getStatus()); - - - long quotaAfterSet = omMetadataManager - .getVolumeTable().get(volumeKey).getQuotaInBytes(); - Assert.assertEquals(quotaSet, quotaAfterSet); - Assert.assertNotEquals(quotaBeforeSet, quotaAfterSet); - - } - - - @Test - public void testValidateAndUpdateCacheWithVolumeNotFound() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - long quota = 100L; - - OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, quota); - - OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = - new OMVolumeSetQuotaRequest(originalRequest); - - omVolumeSetQuotaRequest.preExecute(ozoneManager); - - OMClientResponse omClientResponse = - omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OzoneManagerProtocolProtos.OMResponse omResponse = - omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getCreateVolumeResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, - omResponse.getStatus()); - - } - - @Test - public void testInvalidRequest() throws Exception { - String volumeName = UUID.randomUUID().toString(); - - // create request with owner set. - OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, - "user1"); - - // Creating OMVolumeSetQuotaRequest with SetProperty request set with owner. - OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = - new OMVolumeSetQuotaRequest(originalRequest); - - omVolumeSetQuotaRequest.preExecute(ozoneManager); - - OMClientResponse omClientResponse = - omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OzoneManagerProtocolProtos.OMResponse omResponse = - omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getCreateVolumeResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_REQUEST, - omResponse.getStatus()); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java deleted file mode 100644 index 66a122f298d..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.volume.acl; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.helpers.OmOzoneAclMap; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.request.volume.TestOMVolumeRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; - -import org.junit.Assert; -import org.junit.Test; - -import java.util.UUID; - -/** - * Tests volume addAcl request. - */ -public class TestOMVolumeAddAclRequest extends TestOMVolumeRequest { - - @Test - public void testPreExecute() throws Exception { - String volumeName = UUID.randomUUID().toString(); - OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); - OMRequest originalRequest = - TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl); - - OMVolumeAddAclRequest omVolumeAddAclRequest = - new OMVolumeAddAclRequest(originalRequest); - - OMRequest modifiedRequest = omVolumeAddAclRequest.preExecute( - ozoneManager); - Assert.assertNotEquals(modifiedRequest, originalRequest); - } - - @Test - public void testValidateAndUpdateCacheSuccess() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); - - OMRequest originalRequest = - TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl); - - OMVolumeAddAclRequest omVolumeAddAclRequest = - new OMVolumeAddAclRequest(originalRequest); - - omVolumeAddAclRequest.preExecute(ozoneManager); - - String volumeKey = omMetadataManager.getVolumeKey(volumeName); - - // Get Acl before validateAndUpdateCache. - OmVolumeArgs omVolumeArgs = - omMetadataManager.getVolumeTable().get(volumeKey); - // As request is valid volume table should have entry. - Assert.assertNotNull(omVolumeArgs); - OmOzoneAclMap aclMapBeforeSet = omVolumeArgs.getAclMap(); - - OMClientResponse omClientResponse = - omVolumeAddAclRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OMResponse omResponse = omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getAddAclResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omResponse.getStatus()); - - OmOzoneAclMap aclMapAfterSet = omMetadataManager - .getVolumeTable().get(volumeKey).getAclMap(); - - // acl is added to aclMapAfterSet - Assert.assertEquals(1, aclMapAfterSet.getAcl().size()); - Assert.assertEquals(acl, aclMapAfterSet.getAcl().get(0)); - } - - @Test - public void testValidateAndUpdateCacheWithVolumeNotFound() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); - OMRequest originalRequest = - TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl); - - OMVolumeAddAclRequest omVolumeAddAclRequest = - new OMVolumeAddAclRequest(originalRequest); - - omVolumeAddAclRequest.preExecute(ozoneManager); - - OMClientResponse omClientResponse = - omVolumeAddAclRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OMResponse omResponse = omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getAddAclResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, - omResponse.getStatus()); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java deleted file mode 100644 index dfd0a237a8c..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.volume.acl; - -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.helpers.OmOzoneAclMap; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.request.volume.TestOMVolumeRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.junit.Assert; -import org.junit.Test; - -import java.util.UUID; - -/** - * Tests volume removeAcl request. - */ -public class TestOMVolumeRemoveAclRequest extends TestOMVolumeRequest { - - @Test - public void testPreExecute() throws Exception { - String volumeName = UUID.randomUUID().toString(); - OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); - OMRequest originalRequest = - TestOMRequestUtils.createVolumeRemoveAclRequest(volumeName, acl); - - OMVolumeRemoveAclRequest omVolumeRemoveAclRequest = - new OMVolumeRemoveAclRequest(originalRequest); - - OMRequest modifiedRequest = omVolumeRemoveAclRequest.preExecute( - ozoneManager); - Assert.assertNotEquals(modifiedRequest, originalRequest); - } - - @Test - public void testValidateAndUpdateCacheSuccess() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); - // add acl first - OMRequest addAclRequest = - TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl); - OMVolumeAddAclRequest omVolumeAddAclRequest = - new OMVolumeAddAclRequest(addAclRequest); - omVolumeAddAclRequest.preExecute(ozoneManager); - OMClientResponse omClientAddResponse = - omVolumeAddAclRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - OMResponse omAddAclResponse = omClientAddResponse.getOMResponse(); - Assert.assertNotNull(omAddAclResponse.getAddAclResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omAddAclResponse.getStatus()); - - - // remove acl - OMRequest removeAclRequest = - TestOMRequestUtils.createVolumeRemoveAclRequest(volumeName, acl); - OMVolumeRemoveAclRequest omVolumeRemoveAclRequest = - new OMVolumeRemoveAclRequest(removeAclRequest); - omVolumeRemoveAclRequest.preExecute(ozoneManager); - - String volumeKey = omMetadataManager.getVolumeKey(volumeName); - - // Get Acl before Remove. - OmVolumeArgs omVolumeArgs = - omMetadataManager.getVolumeTable().get(volumeKey); - // As request is valid volume table should have entry. - Assert.assertNotNull(omVolumeArgs); - OmOzoneAclMap aclMapBeforeRemove = omVolumeArgs.getAclMap(); - Assert.assertEquals(acl, aclMapBeforeRemove.getAcl().get(0)); - - OMClientResponse omClientRemoveResponse = - omVolumeRemoveAclRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OMResponse omRemoveAclResponse = omClientRemoveResponse.getOMResponse(); - Assert.assertNotNull(omRemoveAclResponse.getRemoveAclResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omRemoveAclResponse.getStatus()); - - // acl is removed from aclMapAfterSet - OmOzoneAclMap aclMapAfterRemove = omMetadataManager - .getVolumeTable().get(volumeKey).getAclMap(); - Assert.assertEquals(0, aclMapAfterRemove.getAcl().size()); - } - - @Test - public void testValidateAndUpdateCacheWithVolumeNotFound() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); - OMRequest originalRequest = - TestOMRequestUtils.createVolumeRemoveAclRequest(volumeName, acl); - - OMVolumeRemoveAclRequest omVolumeRemoveAclRequest = - new OMVolumeRemoveAclRequest(originalRequest); - - omVolumeRemoveAclRequest.preExecute(ozoneManager); - - OMClientResponse omClientResponse = - omVolumeRemoveAclRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OMResponse omResponse = omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getRemoveAclResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, - omResponse.getStatus()); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java deleted file mode 100644 index 087ba713f6c..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.request.volume.acl; - -import com.google.common.collect.Lists; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.helpers.OmOzoneAclMap; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.om.request.volume.TestOMVolumeRequest; -import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.junit.Assert; -import org.junit.Test; - -import java.util.List; -import java.util.UUID; - -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclScope.ACCESS; -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclScope.DEFAULT; - -/** - * Tests volume setAcl request. - */ -public class TestOMVolumeSetAclRequest extends TestOMVolumeRequest { - - @Test - public void testPreExecute() throws Exception { - String volumeName = UUID.randomUUID().toString(); - OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); - OMRequest originalRequest = - TestOMRequestUtils.createVolumeSetAclRequest(volumeName, - Lists.newArrayList(acl)); - - OMVolumeSetAclRequest omVolumeSetAclRequest = - new OMVolumeSetAclRequest(originalRequest); - - OMRequest modifiedRequest = omVolumeSetAclRequest.preExecute( - ozoneManager); - Assert.assertNotEquals(modifiedRequest, originalRequest); - } - - @Test - public void testValidateAndUpdateCacheSuccess() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String ownerName = "user1"; - - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - - OzoneAcl userAccessAcl = OzoneAcl.parseAcl("user:bilbo:rw[ACCESS]"); - OzoneAcl groupDefaultAcl = - OzoneAcl.parseAcl("group:admin:rwdlncxy[DEFAULT]"); - - List acls = Lists.newArrayList(userAccessAcl, groupDefaultAcl); - - OMRequest originalRequest = - TestOMRequestUtils.createVolumeSetAclRequest(volumeName, acls); - - OMVolumeSetAclRequest omVolumeSetAclRequest = - new OMVolumeSetAclRequest(originalRequest); - - omVolumeSetAclRequest.preExecute(ozoneManager); - - String volumeKey = omMetadataManager.getVolumeKey(volumeName); - - // Get Acl before validateAndUpdateCache. - OmVolumeArgs omVolumeArgs = - omMetadataManager.getVolumeTable().get(volumeKey); - // As request is valid volume table should have entry. - Assert.assertNotNull(omVolumeArgs); - OmOzoneAclMap aclMapBeforeSet = omVolumeArgs.getAclMap(); - - OMClientResponse omClientResponse = - omVolumeSetAclRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OMResponse omResponse = omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getSetAclResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, - omResponse.getStatus()); - - OmOzoneAclMap aclMapAfterSet = omMetadataManager - .getVolumeTable().get(volumeKey).getAclMap(); - - // Acl is added to aclMapAfterSet - Assert.assertEquals(2, aclMapAfterSet.getAcl().size()); - Assert.assertTrue("Default Acl should be set.", - aclMapAfterSet.getAclsByScope(ACCESS).contains(userAccessAcl)); - Assert.assertTrue("Default Acl should be set.", - aclMapAfterSet.getAclsByScope(DEFAULT).contains(groupDefaultAcl)); - } - - @Test - public void testValidateAndUpdateCacheWithVolumeNotFound() - throws Exception { - String volumeName = UUID.randomUUID().toString(); - OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); - OMRequest originalRequest = - TestOMRequestUtils.createVolumeSetAclRequest(volumeName, - Lists.newArrayList(acl)); - - OMVolumeSetAclRequest omVolumeSetAclRequest = - new OMVolumeSetAclRequest(originalRequest); - - omVolumeSetAclRequest.preExecute(ozoneManager); - - OMClientResponse omClientResponse = - omVolumeSetAclRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); - - OMResponse omResponse = omClientResponse.getOMResponse(); - Assert.assertNotNull(omResponse.getSetAclResponse()); - Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND, - omResponse.getStatus()); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java deleted file mode 100644 index 1552af7874f..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Package contains test classes for volume acl requests. - */ -package org.apache.hadoop.ozone.om.request.volume.acl; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java deleted file mode 100644 index cbe3e2d3c71..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Package contains test classes for volume requests. - */ -package org.apache.hadoop.ozone.om.request.volume; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java deleted file mode 100644 index 5e41d2d5134..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.response; - -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest; -import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; -import org.apache.hadoop.ozone.om.response.s3.bucket.S3BucketCreateResponse; -import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.util.Time; - -/** - * Helper class to test OMClientResponse classes. - */ -public final class TestOMResponseUtils { - - // No one can instantiate, this is just utility class with all static methods. - private TestOMResponseUtils() { - } - - public static OmBucketInfo createBucket(String volume, String bucket) { - return OmBucketInfo.newBuilder().setVolumeName(volume).setBucketName(bucket) - .setCreationTime(Time.now()).setIsVersionEnabled(true).addMetadata( - "key1", "value1").build(); - - } - - public static S3BucketCreateResponse createS3BucketResponse(String userName, - String volumeName, String s3BucketName) { - OzoneManagerProtocolProtos.OMResponse omResponse = - OzoneManagerProtocolProtos.OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CreateS3Bucket) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true) - .setCreateS3BucketResponse( - OzoneManagerProtocolProtos.S3CreateBucketResponse - .getDefaultInstance()) - .build(); - - OzoneManagerProtocolProtos.UserVolumeInfo userVolumeInfo = - OzoneManagerProtocolProtos.UserVolumeInfo.newBuilder() - .setObjectID(1) - .setUpdateID(1) - .addVolumeNames(volumeName).build(); - - OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() - .setOwnerName(userName).setAdminName(userName) - .setVolume(volumeName).setCreationTime(Time.now()).build(); - - OMVolumeCreateResponse omVolumeCreateResponse = - new OMVolumeCreateResponse(omVolumeArgs, userVolumeInfo, omResponse); - - - OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket( - volumeName, s3BucketName); - OMBucketCreateResponse omBucketCreateResponse = - new OMBucketCreateResponse(omBucketInfo, omResponse); - - String s3Mapping = S3BucketCreateRequest.formatS3MappingName(volumeName, - s3BucketName); - return - new S3BucketCreateResponse(omVolumeCreateResponse, - omBucketCreateResponse, s3BucketName, s3Mapping, omResponse); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketCreateResponse.java deleted file mode 100644 index 20ac2f97df7..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketCreateResponse.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.response.bucket; - -import java.util.UUID; - -import org.apache.hadoop.hdds.utils.db.Table; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.response.TestOMResponseUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -/** - * This class tests OMBucketCreateResponse. - */ -public class TestOMBucketCreateResponse { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @Before - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @Test - public void testAddToDBBatch() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket( - volumeName, bucketName); - Assert.assertEquals(0, - omMetadataManager.countRowsInTable(omMetadataManager.getBucketTable())); - OMBucketCreateResponse omBucketCreateResponse = - new OMBucketCreateResponse(omBucketInfo, OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setCreateBucketResponse( - CreateBucketResponse.newBuilder().build()).build()); - - omBucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertEquals(1, - omMetadataManager.countRowsInTable(omMetadataManager.getBucketTable())); - - Table.KeyValue keyValue = - omMetadataManager.getBucketTable().iterator().next(); - - Assert.assertEquals(omMetadataManager.getBucketKey(volumeName, - bucketName), keyValue.getKey()); - Assert.assertEquals(omBucketInfo, keyValue.getValue()); - } - - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketDeleteResponse.java deleted file mode 100644 index e8843eb1a81..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketDeleteResponse.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.response.bucket; - -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.response.TestOMResponseUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -/** - * This class tests OMBucketDeleteResponse. - */ -public class TestOMBucketDeleteResponse { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - - @Before - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @Test - public void testAddToDBBatch() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket( - volumeName, bucketName); - OMBucketCreateResponse omBucketCreateResponse = - new OMBucketCreateResponse(omBucketInfo, OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setCreateBucketResponse( - CreateBucketResponse.newBuilder().build()).build()); - - OMBucketDeleteResponse omBucketDeleteResponse = - new OMBucketDeleteResponse(volumeName, bucketName, - OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setDeleteBucketResponse( - DeleteBucketResponse.getDefaultInstance()).build()); - - omBucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - omBucketDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertNull(omMetadataManager.getBucketTable().get( - omMetadataManager.getBucketKey(volumeName, bucketName))); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketSetPropertyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketSetPropertyResponse.java deleted file mode 100644 index b0cafa6a23b..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/TestOMBucketSetPropertyResponse.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.response.bucket; - -import java.util.UUID; - -import org.apache.hadoop.hdds.utils.db.Table; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.response.TestOMResponseUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateBucketResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -/** - * This class tests OMBucketSetPropertyResponse. - */ -public class TestOMBucketSetPropertyResponse { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @Before - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @Test - public void testAddToDBBatch() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket( - volumeName, bucketName); - OMBucketSetPropertyResponse omBucketCreateResponse = - new OMBucketSetPropertyResponse(omBucketInfo, OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setCreateBucketResponse( - CreateBucketResponse.newBuilder().build()).build()); - - omBucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertEquals(1, - omMetadataManager.countRowsInTable(omMetadataManager.getBucketTable())); - - Table.KeyValue keyValue = - omMetadataManager.getBucketTable().iterator().next(); - - Assert.assertEquals(omMetadataManager.getBucketKey(volumeName, - bucketName), keyValue.getKey()); - Assert.assertEquals(omBucketInfo, keyValue.getValue()); - } - - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java deleted file mode 100644 index 0980106779c..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/bucket/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains test classes for bucket responses. - */ -package org.apache.hadoop.ozone.om.response.bucket; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java deleted file mode 100644 index 1fc36615f2c..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.file; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.slf4j.event.Level; - -import java.util.UUID; - -/** - * Tests OMDirectoryCreateResponse. - */ -public class TestOMDirectoryCreateResponse { - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @Before - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @Test - public void testAddToDBBatch() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); - - OMResponse omResponse = OMResponse.newBuilder().setCreateDirectoryResponse( - OzoneManagerProtocolProtos.CreateDirectoryResponse.getDefaultInstance()) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory) - .build(); - - OMDirectoryCreateResponse omDirectoryCreateResponse = - new OMDirectoryCreateResponse(omKeyInfo, omResponse); - - omDirectoryCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertNotNull(omMetadataManager.getKeyTable().get( - omMetadataManager.getOzoneDirKey(volumeName, bucketName, keyName))); - } - - @Test - public void testAddToDBBatchWithNullOmkeyInfo() throws Exception { - - GenericTestUtils.setLogLevel(OMDirectoryCreateResponse.LOG, Level.DEBUG); - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer - .captureLogs(OMDirectoryCreateResponse.LOG); - - - String volumeName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - - OMResponse omResponse = OMResponse.newBuilder().setCreateDirectoryResponse( - OzoneManagerProtocolProtos.CreateDirectoryResponse.getDefaultInstance()) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory) - .build(); - - OMDirectoryCreateResponse omDirectoryCreateResponse = - new OMDirectoryCreateResponse(null, omResponse); - - omDirectoryCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertNull(omMetadataManager.getKeyTable().get( - omMetadataManager.getOzoneDirKey(volumeName, bucketName, keyName))); - - Assert.assertTrue(logCapturer.getOutput().contains("Response Status is " + - "OK, dirKeyInfo is null")); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/package-info.java deleted file mode 100644 index 4c6c005294d..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains test classes for file responses. - */ -package org.apache.hadoop.ozone.om.response.file; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java deleted file mode 100644 index 5dfc48e419d..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.key; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .AllocateBlockResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; - -/** - * Tests OMAllocateBlockResponse. - */ -public class TestOMAllocateBlockResponse extends TestOMKeyResponse { - - @Test - public void testAddToDBBatch() throws Exception { - - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); - - OMResponse omResponse = OMResponse.newBuilder() - .setAllocateBlockResponse( - AllocateBlockResponse.getDefaultInstance()) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setCmdType(OzoneManagerProtocolProtos.Type.AllocateBlock) - .build(); - OMAllocateBlockResponse omAllocateBlockResponse = - new OMAllocateBlockResponse(omKeyInfo, clientID, omResponse); - - String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, clientID); - - // Not adding key entry before to test whether commit is successful or not. - Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey)); - omAllocateBlockResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey)); - } - - @Test - public void testAddToDBBatchWithErrorResponse() throws Exception { - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); - - OMResponse omResponse = OMResponse.newBuilder() - .setAllocateBlockResponse( - AllocateBlockResponse.getDefaultInstance()) - .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND) - .setCmdType(OzoneManagerProtocolProtos.Type.AllocateBlock) - .build(); - OMAllocateBlockResponse omAllocateBlockResponse = - new OMAllocateBlockResponse(omKeyInfo, clientID, omResponse); - - // Before calling addToDBBatch - String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, clientID); - Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey)); - - omAllocateBlockResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // As omResponse is error it is a no-op. - Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey)); - - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java deleted file mode 100644 index 2b6e6d7e547..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.key; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; - -/** - * Tests OMKeyCommitResponse. - */ -public class TestOMKeyCommitResponse extends TestOMKeyResponse { - - @Test - public void testAddToDBBatch() throws Exception { - - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); - - OzoneManagerProtocolProtos.OMResponse omResponse = - OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse( - OzoneManagerProtocolProtos.CommitKeyResponse.getDefaultInstance()) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setCmdType(OzoneManagerProtocolProtos.Type.CommitKey) - .build(); - - // As during commit Key, entry will be already there in openKeyTable. - // Adding it here. - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); - - String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, clientID); - Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey)); - - OMKeyCommitResponse omKeyCommitResponse = - new OMKeyCommitResponse(omKeyInfo, clientID, omResponse); - - omKeyCommitResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // When key commit key is deleted from openKey table and added to keyTable. - Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey)); - Assert.assertTrue(omMetadataManager.getKeyTable().isExist( - omMetadataManager.getOzoneKey(volumeName, bucketName, keyName))); - } - - @Test - public void testAddToDBBatchNoOp() throws Exception { - - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); - - OzoneManagerProtocolProtos.OMResponse omResponse = - OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse( - OzoneManagerProtocolProtos.CommitKeyResponse.getDefaultInstance()) - .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND) - .setCmdType(OzoneManagerProtocolProtos.Type.CommitKey) - .build(); - - - OMKeyCommitResponse omKeyCommitResponse = - new OMKeyCommitResponse(omKeyInfo, clientID, omResponse); - - // As during commit Key, entry will be already there in openKeyTable. - // Adding it here. - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); - - String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, clientID); - Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey)); - - - omKeyCommitResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - - // As omResponse is error it is a no-op. So, entry should still be in - // openKey table. - Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey)); - Assert.assertFalse(omMetadataManager.getKeyTable().isExist( - omMetadataManager.getOzoneKey(volumeName, bucketName, keyName))); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java deleted file mode 100644 index 77692a7d718..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.key; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateKeyResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; - - -/** - * Tests MKeyCreateResponse. - */ -public class TestOMKeyCreateResponse extends TestOMKeyResponse { - - @Test - public void testAddToDBBatch() throws Exception { - - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); - - OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse( - CreateKeyResponse.getDefaultInstance()) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey) - .build(); - - OMKeyCreateResponse omKeyCreateResponse = - new OMKeyCreateResponse(omKeyInfo, clientID, omResponse); - - String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, clientID); - Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey)); - omKeyCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey)); - } - - @Test - public void testAddToDBBatchWithErrorResponse() throws Exception { - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); - - OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse( - CreateKeyResponse.getDefaultInstance()) - .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND) - .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey) - .build(); - - OMKeyCreateResponse omKeyCreateResponse = - new OMKeyCreateResponse(omKeyInfo, clientID, omResponse); - - // Before calling addToDBBatch - String openKey = omMetadataManager.getOpenKey(volumeName, bucketName, - keyName, clientID); - Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey)); - - omKeyCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // As omResponse is error it is a no-op. - Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey)); - - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java deleted file mode 100644 index ba2b738a3ca..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ /dev/null @@ -1,165 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.key; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; - -import java.util.ArrayList; -import java.util.List; - -/** - * Tests OMKeyDeleteResponse. - */ -public class TestOMKeyDeleteResponse extends TestOMKeyResponse { - - @Test - public void testAddToDBBatch() throws Exception { - - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); - - OzoneManagerProtocolProtos.OMResponse omResponse = - OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse( - OzoneManagerProtocolProtos.DeleteKeyResponse.getDefaultInstance()) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey) - .build(); - - OMKeyDeleteResponse omKeyDeleteResponse = - new OMKeyDeleteResponse(omKeyInfo, omResponse); - - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); - - Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey)); - omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneKey)); - - // As default key entry does not have any blocks, it should not be in - // deletedKeyTable. - Assert.assertFalse(omMetadataManager.getDeletedTable().isExist( - ozoneKey)); - } - - @Test - public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { - - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); - - // Add block to key. - List omKeyLocationInfoList = new ArrayList<>(); - - Pipeline pipeline = Pipeline.newBuilder() - .setState(Pipeline.PipelineState.OPEN) - .setId(PipelineID.randomId()) - .setType(replicationType) - .setFactor(replicationFactor) - .setNodes(new ArrayList<>()) - .build(); - - OmKeyLocationInfo omKeyLocationInfo = - new OmKeyLocationInfo.Builder().setBlockID( - new BlockID(100L, 1000L)) - .setOffset(0).setLength(100L).setPipeline(pipeline).build(); - - - omKeyLocationInfoList.add(omKeyLocationInfo); - - omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false); - - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo); - - OzoneManagerProtocolProtos.OMResponse omResponse = - OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse( - OzoneManagerProtocolProtos.DeleteKeyResponse.getDefaultInstance()) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey) - .build(); - - OMKeyDeleteResponse omKeyDeleteResponse = - new OMKeyDeleteResponse(omKeyInfo, omResponse); - - Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey)); - omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneKey)); - - // Key has blocks, it should not be in deletedKeyTable. - Assert.assertTrue(omMetadataManager.getDeletedTable().isExist( - ozoneKey)); - } - - - @Test - public void testAddToDBBatchWithErrorResponse() throws Exception { - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); - - OzoneManagerProtocolProtos.OMResponse omResponse = - OzoneManagerProtocolProtos.OMResponse.newBuilder().setDeleteKeyResponse( - OzoneManagerProtocolProtos.DeleteKeyResponse.getDefaultInstance()) - .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND) - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteKey) - .build(); - - OMKeyDeleteResponse omKeyDeleteResponse = - new OMKeyDeleteResponse(omKeyInfo, omResponse); - - String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); - - Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey)); - - omKeyDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // As omResponse is error it is a no-op. So, entry should be still in the - // keyTable. - Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey)); - - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java deleted file mode 100644 index 92daa1d6849..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java +++ /dev/null @@ -1,148 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.key; - -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; - -/** - * Tests OMKeyRenameResponse. - */ -public class TestOMKeyRenameResponse extends TestOMKeyResponse { - @Test - public void testAddToDBBatch() throws Exception { - - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); - - OzoneManagerProtocolProtos.OMResponse omResponse = - OzoneManagerProtocolProtos.OMResponse.newBuilder().setRenameKeyResponse( - OzoneManagerProtocolProtos.RenameKeyResponse.getDefaultInstance()) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey) - .build(); - - String toKeyName = UUID.randomUUID().toString(); - - OMKeyRenameResponse omKeyRenameResponse = - new OMKeyRenameResponse(omKeyInfo, toKeyName, keyName, omResponse); - - String ozoneFromKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - String ozoneToKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - toKeyName); - - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); - - Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneFromKey)); - Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneToKey)); - - omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneFromKey)); - Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneToKey)); - } - - @Test - public void testAddToDBBatchWithErrorResponse() throws Exception { - - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); - - OzoneManagerProtocolProtos.OMResponse omResponse = - OzoneManagerProtocolProtos.OMResponse.newBuilder().setRenameKeyResponse( - OzoneManagerProtocolProtos.RenameKeyResponse.getDefaultInstance()) - .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND) - .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey) - .build(); - - String toKeyName = UUID.randomUUID().toString(); - - OMKeyRenameResponse omKeyRenameResponse = - new OMKeyRenameResponse(omKeyInfo, toKeyName, keyName, omResponse); - - String ozoneFromKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - String ozoneToKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - toKeyName); - - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); - - Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneFromKey)); - Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneToKey)); - - omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // As omResponse has error, it is a no-op. So, no changes should happen. - Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneFromKey)); - Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneToKey)); - - } - - @Test - public void testAddToDBBatchWithSameKeyName() throws Exception { - - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); - - OzoneManagerProtocolProtos.OMResponse omResponse = - OzoneManagerProtocolProtos.OMResponse.newBuilder().setRenameKeyResponse( - OzoneManagerProtocolProtos.RenameKeyResponse.getDefaultInstance()) - .setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND) - .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey) - .build(); - - - // Passing toKeyName also same as KeyName. - OMKeyRenameResponse omKeyRenameResponse = - new OMKeyRenameResponse(omKeyInfo, keyName, keyName, omResponse); - - String ozoneFromKey = omMetadataManager.getOzoneKey(volumeName, bucketName, - keyName); - - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); - - Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneFromKey)); - - omKeyRenameResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneFromKey)); - - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java deleted file mode 100644 index 626a3ded67e..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.key; - -import java.util.UUID; - -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.rules.TemporaryFolder; -import org.mockito.Mockito; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -/** - * Base test class for key response. - */ -@SuppressWarnings("visibilitymodifier") -public class TestOMKeyResponse { - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - protected OMMetadataManager omMetadataManager; - protected BatchOperation batchOperation; - - protected String volumeName; - protected String bucketName; - protected String keyName; - protected HddsProtos.ReplicationFactor replicationFactor; - protected HddsProtos.ReplicationType replicationType; - protected long clientID; - - @Before - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - - volumeName = UUID.randomUUID().toString(); - bucketName = UUID.randomUUID().toString(); - keyName = UUID.randomUUID().toString(); - replicationFactor = HddsProtos.ReplicationFactor.ONE; - replicationType = HddsProtos.ReplicationType.RATIS; - clientID = 1000L; - } - - @After - public void stop() { - Mockito.framework().clearInlineMocks(); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/package-info.java deleted file mode 100644 index 1ebf4c21a59..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains test classes for key responses. - */ -package org.apache.hadoop.ozone.om.response.key; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/package-info.java deleted file mode 100644 index fd48e14db97..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Tests for OM Response. - */ -package org.apache.hadoop.ozone.om.response; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketCreateResponse.java deleted file mode 100644 index f4a76e38e67..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketCreateResponse.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.response.s3.bucket; - -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest; -import org.apache.hadoop.ozone.om.response.TestOMResponseUtils; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -/** - * Class to test S3BucketCreateResponse. - */ -public class TestS3BucketCreateResponse { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @Before - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - - @Test - public void testAddToDBBatch() throws Exception { - String userName = UUID.randomUUID().toString(); - String s3BucketName = UUID.randomUUID().toString(); - String volumeName = S3BucketCreateRequest.formatOzoneVolumeName(userName); - - S3BucketCreateResponse s3BucketCreateResponse = - TestOMResponseUtils.createS3BucketResponse(userName, volumeName, - s3BucketName); - - s3BucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertNotNull(omMetadataManager.getS3Table().get(s3BucketName)); - Assert.assertEquals(s3BucketCreateResponse.getS3Mapping(), - omMetadataManager.getS3Table().get(s3BucketName)); - - Assert.assertEquals(1, - omMetadataManager.countRowsInTable(omMetadataManager.getBucketTable())); - Assert.assertEquals(1, - omMetadataManager.countRowsInTable(omMetadataManager.getVolumeTable())); - - Assert.assertEquals(omMetadataManager.getVolumeKey(volumeName), - omMetadataManager.getVolumeTable().iterator().next().getKey()); - Assert.assertNotNull(omMetadataManager.getBucketKey(volumeName, - s3BucketName), omMetadataManager.getBucketTable().iterator().next() - .getKey()); - - } -} - diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketDeleteResponse.java deleted file mode 100644 index 865f4c6b52a..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketDeleteResponse.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.s3.bucket; - -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest; -import org.apache.hadoop.ozone.om.response.TestOMResponseUtils; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .S3DeleteBucketResponse; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - - - -/** - * Tests S3BucketDeleteResponse. - */ -public class TestS3BucketDeleteResponse { - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @Before - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @Test - public void testAddToDBBatch() throws Exception { - String s3BucketName = UUID.randomUUID().toString(); - String userName = "ozone"; - String volumeName = S3BucketCreateRequest.formatOzoneVolumeName(userName); - S3BucketCreateResponse s3BucketCreateResponse = - TestOMResponseUtils.createS3BucketResponse(userName, volumeName, - s3BucketName); - - s3BucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - - OMResponse omResponse = OMResponse.newBuilder().setCmdType( - OzoneManagerProtocolProtos.Type.DeleteS3Bucket).setStatus( - OzoneManagerProtocolProtos.Status.OK).setSuccess(true) - .setDeleteS3BucketResponse(S3DeleteBucketResponse.newBuilder()).build(); - - S3BucketDeleteResponse s3BucketDeleteResponse = - new S3BucketDeleteResponse(s3BucketName, volumeName, omResponse); - - s3BucketDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); - - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - // Check now s3 bucket exists or not. - Assert.assertNull(omMetadataManager.getS3Table().get(s3BucketName)); - Assert.assertNull(omMetadataManager.getBucketTable().get( - omMetadataManager.getBucketKey(volumeName, s3BucketName))); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/package-info.java deleted file mode 100644 index 364396b613f..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains test classes for s3 bucket responses. - */ -package org.apache.hadoop.ozone.om.response.s3.bucket; \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java deleted file mode 100644 index 4996bd0903c..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3InitiateMultipartUploadResponse.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.response.s3.multipart; - -import java.util.UUID; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Class tests S3 Initiate MPU response. - */ -public class TestS3InitiateMultipartUploadResponse - extends TestS3MultipartResponse { - - @Test - public void addDBToBatch() throws Exception { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - String multipartUploadID = UUID.randomUUID().toString(); - - S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse = - createS3InitiateMPUResponse(volumeName, bucketName, keyName, - multipartUploadID); - - - s3InitiateMultipartUploadResponse.addToDBBatch(omMetadataManager, - batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, multipartUploadID); - - Assert.assertNotNull(omMetadataManager.getOpenKeyTable().get(multipartKey)); - Assert.assertNotNull(omMetadataManager.getMultipartInfoTable() - .get(multipartKey)); - - Assert.assertEquals(multipartUploadID, - omMetadataManager.getMultipartInfoTable().get(multipartKey) - .getUploadID()); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java deleted file mode 100644 index 09b028bef47..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om.response.s3.multipart; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.UUID; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.rules.TemporaryFolder; - -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .MultipartUploadAbortResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .PartKeyInfo; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.hdds.utils.db.BatchOperation; - -/** - * Base test class for S3 MPU response. - */ - -@SuppressWarnings("VisibilityModifier") -public class TestS3MultipartResponse { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - protected OMMetadataManager omMetadataManager; - protected BatchOperation batchOperation; - - @Before - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - - public S3InitiateMultipartUploadResponse createS3InitiateMPUResponse( - String volumeName, String bucketName, String keyName, - String multipartUploadID) { - OmMultipartKeyInfo multipartKeyInfo = new OmMultipartKeyInfo( - multipartUploadID, new HashMap<>()); - - OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setReplicationType(HddsProtos.ReplicationType.RATIS) - .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .build(); - - OMResponse omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true).setInitiateMultiPartUploadResponse( - OzoneManagerProtocolProtos.MultipartInfoInitiateResponse - .newBuilder().setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setMultipartUploadID(multipartUploadID)).build(); - - return new S3InitiateMultipartUploadResponse(multipartKeyInfo, omKeyInfo, - omResponse); - } - - public S3MultipartUploadAbortResponse createS3AbortMPUResponse( - String multipartKey, long timeStamp, - OmMultipartKeyInfo omMultipartKeyInfo) { - OMResponse omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.AbortMultiPartUpload) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true) - .setAbortMultiPartUploadResponse( - MultipartUploadAbortResponse.newBuilder().build()).build(); - - return new S3MultipartUploadAbortResponse(multipartKey, omMultipartKeyInfo, - omResponse); - } - - - public void addPart(int partNumber, PartKeyInfo partKeyInfo, - OmMultipartKeyInfo omMultipartKeyInfo) { - omMultipartKeyInfo.addPartKeyInfo(partNumber, partKeyInfo); - } - - public PartKeyInfo createPartKeyInfo( - String volumeName, String bucketName, String keyName, int partNumber) { - return PartKeyInfo.newBuilder() - .setPartNumber(partNumber) - .setPartName(omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, UUID.randomUUID().toString())) - .setPartKeyInfo(KeyInfo.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setDataSize(100L) // Just set dummy size for testing - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setType(HddsProtos.ReplicationType.RATIS) - .setFactor(HddsProtos.ReplicationFactor.ONE).build()).build(); - } -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java deleted file mode 100644 index 60aacd5a336..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.s3.multipart; - -import java.util.UUID; - -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.junit.Assert; -import org.junit.Test; - -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .PartKeyInfo; -import org.apache.hadoop.util.Time; - -/** - * Test multipart upload abort response. - */ -public class TestS3MultipartUploadAbortResponse - extends TestS3MultipartResponse { - - - @Test - public void testAddDBToBatch() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - String multipartUploadID = UUID.randomUUID().toString(); - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, multipartUploadID); - - S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse = - createS3InitiateMPUResponse(volumeName, bucketName, keyName, - multipartUploadID); - - s3InitiateMultipartUploadResponse.addToDBBatch(omMetadataManager, - batchOperation); - - S3MultipartUploadAbortResponse s3MultipartUploadAbortResponse = - createS3AbortMPUResponse(multipartKey, Time.now(), - s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo()); - - s3MultipartUploadAbortResponse.addToDBBatch(omMetadataManager, - batchOperation); - - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey)); - Assert.assertNull( - omMetadataManager.getMultipartInfoTable().get(multipartKey)); - - // As no parts are created, so no entries should be there in delete table. - Assert.assertTrue(omMetadataManager.countRowsInTable( - omMetadataManager.getDeletedTable()) == 0); - } - - @Test - public void testAddDBToBatchWithParts() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - String keyName = UUID.randomUUID().toString(); - String multipartUploadID = UUID.randomUUID().toString(); - String multipartKey = omMetadataManager.getMultipartKey(volumeName, - bucketName, keyName, multipartUploadID); - - S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse = - createS3InitiateMPUResponse(volumeName, bucketName, keyName, - multipartUploadID); - - s3InitiateMultipartUploadResponse.addToDBBatch(omMetadataManager, - batchOperation); - - - // Add some dummy parts for testing. - // Not added any key locations, as this just test is to see entries are - // adding to delete table or not. - - OmMultipartKeyInfo omMultipartKeyInfo = - s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo(); - - PartKeyInfo part1 = createPartKeyInfo(volumeName, bucketName, - keyName, 1); - PartKeyInfo part2 = createPartKeyInfo(volumeName, bucketName, - keyName, 2); - - addPart(1, part1, omMultipartKeyInfo); - addPart(2, part2, omMultipartKeyInfo); - - - long timeStamp = Time.now(); - S3MultipartUploadAbortResponse s3MultipartUploadAbortResponse = - createS3AbortMPUResponse(multipartKey, timeStamp, - s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo()); - - s3MultipartUploadAbortResponse.addToDBBatch(omMetadataManager, - batchOperation); - - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey)); - Assert.assertNull( - omMetadataManager.getMultipartInfoTable().get(multipartKey)); - - // As 2 parts are created, so 2 entries should be there in delete table. - Assert.assertTrue(omMetadataManager.countRowsInTable( - omMetadataManager.getDeletedTable()) == 2); - - String part1DeletedKeyName = - omMultipartKeyInfo.getPartKeyInfo(1).getPartName(); - - String part2DeletedKeyName = - omMultipartKeyInfo.getPartKeyInfo(2).getPartName(); - - Assert.assertNotNull(omMetadataManager.getDeletedTable().get( - part1DeletedKeyName)); - Assert.assertNotNull(omMetadataManager.getDeletedTable().get( - part2DeletedKeyName)); - - RepeatedOmKeyInfo ro = - omMetadataManager.getDeletedTable().get(part1DeletedKeyName); - Assert.assertEquals(OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo()), - ro.getOmKeyInfoList().get(0)); - - ro = omMetadataManager.getDeletedTable().get(part2DeletedKeyName); - Assert.assertEquals(OmKeyInfo.getFromProtobuf(part2.getPartKeyInfo()), - ro.getOmKeyInfoList().get(0)); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/package-info.java deleted file mode 100644 index 1fc3a952625..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Package contains test classes for S3 MPU responses. - */ - -package org.apache.hadoop.ozone.om.response.s3.multipart; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java deleted file mode 100644 index b69d8b7b07e..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeCreateResponse.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.volume; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .UserVolumeInfo; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.IOException; -import java.util.UUID; - -import static org.junit.Assert.fail; - -/** - * This class tests OMVolumeCreateResponse. - */ -public class TestOMVolumeCreateResponse { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @Before - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @Test - public void testAddToDBBatch() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String userName = "user1"; - UserVolumeInfo volumeList = UserVolumeInfo.newBuilder() - .setObjectID(1).setUpdateID(1) - .addVolumeNames(volumeName).build(); - - OMResponse omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true) - .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) - .build(); - - OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() - .setOwnerName(userName).setAdminName(userName) - .setVolume(volumeName).setCreationTime(Time.now()).build(); - OMVolumeCreateResponse omVolumeCreateResponse = - new OMVolumeCreateResponse(omVolumeArgs, volumeList, omResponse); - - omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - - Assert.assertEquals(1, - omMetadataManager.countRowsInTable(omMetadataManager.getVolumeTable())); - Assert.assertEquals(omVolumeArgs, - omMetadataManager.getVolumeTable().iterator().next().getValue()); - - Assert.assertEquals(volumeList, - omMetadataManager.getUserTable().get( - omMetadataManager.getUserKey(userName))); - } - - @Test - public void testAddToDBBatchNoOp() throws Exception { - - OMResponse omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) - .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS) - .setSuccess(false) - .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) - .build(); - - OMVolumeCreateResponse omVolumeCreateResponse = - new OMVolumeCreateResponse(null, null, omResponse); - - try { - omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - Assert.assertTrue(omMetadataManager.countRowsInTable( - omMetadataManager.getVolumeTable()) == 0); - } catch (IOException ex) { - fail("testAddToDBBatchFailure failed"); - } - - } - - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java deleted file mode 100644 index 5d6b48127ee..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.volume; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .UserVolumeInfo; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.IOException; -import java.util.UUID; - -import static org.junit.Assert.fail; - -/** - * This class tests OMVolumeCreateResponse. - */ -public class TestOMVolumeDeleteResponse { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @Before - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @Test - public void testAddToDBBatch() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String userName = "user1"; - UserVolumeInfo volumeList = UserVolumeInfo.newBuilder() - .setObjectID(1) - .setUpdateID(1) - .addVolumeNames(volumeName).build(); - - OMResponse omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true) - .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) - .build(); - - OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() - .setOwnerName(userName).setAdminName(userName) - .setVolume(volumeName).setCreationTime(Time.now()).build(); - OMVolumeCreateResponse omVolumeCreateResponse = - new OMVolumeCreateResponse(omVolumeArgs, volumeList, omResponse); - - // As we are deleting updated volume list should be empty. - UserVolumeInfo updatedVolumeList = UserVolumeInfo.newBuilder() - .setObjectID(1).setUpdateID(1).build(); - OMVolumeDeleteResponse omVolumeDeleteResponse = - new OMVolumeDeleteResponse(volumeName, userName, updatedVolumeList, - omResponse); - - omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - omVolumeDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertNull(null, - omMetadataManager.getVolumeTable().get( - omMetadataManager.getVolumeKey(volumeName))); - - Assert.assertEquals(null, - omMetadataManager.getUserTable().get( - omMetadataManager.getUserKey(userName))); - } - - @Test - public void testAddToDBBatchNoOp() throws Exception { - - OMResponse omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume) - .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) - .setSuccess(false) - .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) - .build(); - - OMVolumeDeleteResponse omVolumeDeleteResponse = - new OMVolumeDeleteResponse(null, null, null, omResponse); - - try { - omVolumeDeleteResponse.addToDBBatch(omMetadataManager, batchOperation); - } catch (IOException ex) { - fail("testAddToDBBatchFailure failed"); - } - - } - - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java deleted file mode 100644 index 0951c062994..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetOwnerResponse.java +++ /dev/null @@ -1,156 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.volume; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .UserVolumeInfo; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.Table; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.IOException; -import java.util.UUID; - -import static org.junit.Assert.fail; - -/** - * This class tests OMVolumeCreateResponse. - */ -public class TestOMVolumeSetOwnerResponse { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @Before - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @Test - public void testAddToDBBatch() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String oldOwner = "user1"; - UserVolumeInfo volumeList = UserVolumeInfo.newBuilder() - .setObjectID(1) - .setUpdateID(1) - .addVolumeNames(volumeName).build(); - - OMResponse omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true) - .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) - .build(); - - OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() - .setOwnerName(oldOwner).setAdminName(oldOwner) - .setVolume(volumeName).setCreationTime(Time.now()).build(); - OMVolumeCreateResponse omVolumeCreateResponse = - new OMVolumeCreateResponse(omVolumeArgs, volumeList, omResponse); - - - - String newOwner = "user2"; - UserVolumeInfo newOwnerVolumeList = UserVolumeInfo.newBuilder() - .setObjectID(1) - .setUpdateID(1) - .addVolumeNames(volumeName).build(); - UserVolumeInfo oldOwnerVolumeList = UserVolumeInfo.newBuilder() - .setObjectID(2) - .setUpdateID(2) - .build(); - OmVolumeArgs newOwnerVolumeArgs = OmVolumeArgs.newBuilder() - .setOwnerName(newOwner).setAdminName(newOwner) - .setVolume(volumeName).setCreationTime(omVolumeArgs.getCreationTime()) - .build(); - - OMVolumeSetOwnerResponse omVolumeSetOwnerResponse = - new OMVolumeSetOwnerResponse(oldOwner, oldOwnerVolumeList, - newOwnerVolumeList, newOwnerVolumeArgs, omResponse); - - omVolumeCreateResponse.addToDBBatch(omMetadataManager, batchOperation); - omVolumeSetOwnerResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - - Assert.assertEquals(1, - omMetadataManager.countRowsInTable(omMetadataManager.getVolumeTable())); - - Table.KeyValue keyValue = - omMetadataManager.getVolumeTable().iterator().next(); - - Assert.assertEquals(omMetadataManager.getVolumeKey(volumeName), - keyValue.getKey()); - Assert.assertEquals(newOwnerVolumeArgs, keyValue.getValue()); - - Assert.assertEquals(volumeList, - omMetadataManager.getUserTable().get( - omMetadataManager.getUserKey(newOwner))); - } - - @Test - public void testAddToDBBatchNoOp() throws Exception { - - OMResponse omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) - .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) - .setSuccess(false) - .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) - .build(); - - OMVolumeSetOwnerResponse omVolumeSetOwnerResponse = - new OMVolumeSetOwnerResponse(null, null, null, null, omResponse); - - try { - omVolumeSetOwnerResponse.addToDBBatch(omMetadataManager, batchOperation); - Assert.assertTrue(omMetadataManager.countRowsInTable( - omMetadataManager.getVolumeTable()) == 0); - } catch (IOException ex) { - fail("testAddToDBBatchFailure failed"); - } - - } - - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java deleted file mode 100644 index 25250bdf614..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeSetQuotaResponse.java +++ /dev/null @@ -1,124 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.response.volume; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .CreateVolumeResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.Table; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.IOException; -import java.util.UUID; - -import static org.junit.Assert.fail; - -/** - * This class tests OMVolumeCreateResponse. - */ -public class TestOMVolumeSetQuotaResponse { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OMMetadataManager omMetadataManager; - private BatchOperation batchOperation; - - @Before - public void setup() throws Exception { - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, - folder.newFolder().getAbsolutePath()); - omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); - batchOperation = omMetadataManager.getStore().initBatchOperation(); - } - - @Test - public void testAddToDBBatch() throws Exception { - - String volumeName = UUID.randomUUID().toString(); - String userName = "user1"; - - OMResponse omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) - .setStatus(OzoneManagerProtocolProtos.Status.OK) - .setSuccess(true) - .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) - .build(); - - OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() - .setOwnerName(userName).setAdminName(userName) - .setVolume(volumeName).setCreationTime(Time.now()).build(); - OMVolumeSetQuotaResponse omVolumeSetQuotaResponse = - new OMVolumeSetQuotaResponse(omVolumeArgs, omResponse); - - omVolumeSetQuotaResponse.addToDBBatch(omMetadataManager, batchOperation); - - // Do manual commit and see whether addToBatch is successful or not. - omMetadataManager.getStore().commitBatchOperation(batchOperation); - - Assert.assertEquals(1, - omMetadataManager.countRowsInTable(omMetadataManager.getVolumeTable())); - - Table.KeyValue keyValue = - omMetadataManager.getVolumeTable().iterator().next(); - - Assert.assertEquals(omMetadataManager.getVolumeKey(volumeName), - keyValue.getKey()); - Assert.assertEquals(omVolumeArgs, keyValue.getValue()); - - } - - @Test - public void testAddToDBBatchNoOp() throws Exception { - - OMResponse omResponse = OMResponse.newBuilder() - .setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume) - .setStatus(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND) - .setSuccess(false) - .setCreateVolumeResponse(CreateVolumeResponse.getDefaultInstance()) - .build(); - - OMVolumeSetQuotaResponse omVolumeSetQuotaResponse = - new OMVolumeSetQuotaResponse(null, omResponse); - - try { - omVolumeSetQuotaResponse.addToDBBatch(omMetadataManager, batchOperation); - Assert.assertTrue(omMetadataManager.countRowsInTable( - omMetadataManager.getVolumeTable()) == 0); - } catch (IOException ex) { - fail("testAddToDBBatchFailure failed"); - } - } - - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/package-info.java deleted file mode 100644 index 98788cd722b..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Test Volume functions. - */ -package org.apache.hadoop.ozone.om.response.volume; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneBlockTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneBlockTokenSecretManager.java deleted file mode 100644 index ea2d46a66ab..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneBlockTokenSecretManager.java +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.security; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.client.OMCertificateClient; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.util.Time; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.security.KeyPair; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.Signature; -import java.security.cert.X509Certificate; -import java.util.EnumSet; - -/** - * Test class for {@link OzoneBlockTokenSecretManager}. - */ -public class TestOzoneBlockTokenSecretManager { - - private OzoneBlockTokenSecretManager secretManager; - private KeyPair keyPair; - private X509Certificate x509Certificate; - private long expiryTime; - private String omCertSerialId; - private CertificateClient client; - private static final String BASEDIR = GenericTestUtils - .getTempPath(TestOzoneBlockTokenSecretManager.class.getSimpleName()); - - - @Before - public void setUp() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, BASEDIR); - // Create Ozone Master key pair. - keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - expiryTime = Time.monotonicNow() + 60 * 60 * 24; - // Create Ozone Master certificate (SCM CA issued cert) and key store. - SecurityConfig securityConfig = new SecurityConfig(conf); - x509Certificate = KeyStoreTestUtil - .generateCertificate("CN=OzoneMaster", keyPair, 30, "SHA256withRSA"); - omCertSerialId = x509Certificate.getSerialNumber().toString(); - secretManager = new OzoneBlockTokenSecretManager(securityConfig, - expiryTime, omCertSerialId); - client = getCertificateClient(securityConfig); - client.init(); - secretManager.start(client); - } - - private CertificateClient getCertificateClient(SecurityConfig secConf) - throws Exception { - return new OMCertificateClient(secConf){ - @Override - public X509Certificate getCertificate() { - return x509Certificate; - } - - @Override - public PrivateKey getPrivateKey() { - return keyPair.getPrivate(); - } - - @Override - public PublicKey getPublicKey() { - return keyPair.getPublic(); - } - }; - } - - @After - public void tearDown() throws Exception { - secretManager = null; - } - - @Test - public void testGenerateToken() throws Exception { - Token token = secretManager.generateToken( - "101", EnumSet.allOf(AccessModeProto.class), 100); - OzoneBlockTokenIdentifier identifier = - OzoneBlockTokenIdentifier.readFieldsProtobuf(new DataInputStream( - new ByteArrayInputStream(token.getIdentifier()))); - // Check basic details. - Assert.assertTrue(identifier.getBlockId().equals("101")); - Assert.assertTrue(identifier.getAccessModes().equals(EnumSet - .allOf(AccessModeProto.class))); - Assert.assertTrue(identifier.getOmCertSerialId().equals(omCertSerialId)); - - validateHash(token.getPassword(), token.getIdentifier()); - } - - @Test - public void testCreateIdentifierSuccess() throws Exception { - OzoneBlockTokenIdentifier btIdentifier = secretManager.createIdentifier( - "testUser", "101", EnumSet.allOf(AccessModeProto.class), 100); - - // Check basic details. - Assert.assertTrue(btIdentifier.getOwnerId().equals("testUser")); - Assert.assertTrue(btIdentifier.getBlockId().equals("101")); - Assert.assertTrue(btIdentifier.getAccessModes().equals(EnumSet - .allOf(AccessModeProto.class))); - Assert.assertTrue(btIdentifier.getOmCertSerialId().equals(omCertSerialId)); - - byte[] hash = secretManager.createPassword(btIdentifier); - validateHash(hash, btIdentifier.getBytes()); - } - - /** - * Validate hash using public key of KeyPair. - * */ - private void validateHash(byte[] hash, byte[] identifier) throws Exception { - Signature rsaSignature = - Signature.getInstance(secretManager.getDefaultSignatureAlgorithm()); - rsaSignature.initVerify(client.getPublicKey()); - rsaSignature.update(identifier); - Assert.assertTrue(rsaSignature.verify(hash)); - } - - @Test - public void testCreateIdentifierFailure() throws Exception { - LambdaTestUtils.intercept(SecurityException.class, - "Ozone block token can't be created without owner and access mode " - + "information.", () -> { - secretManager.createIdentifier(); - }); - } - - @Test - public void testRenewToken() throws Exception { - LambdaTestUtils.intercept(UnsupportedOperationException.class, - "Renew token operation is not supported for ozone block" + - " tokens.", () -> { - secretManager.renewToken(null, null); - }); - } - - @Test - public void testCancelToken() throws Exception { - LambdaTestUtils.intercept(UnsupportedOperationException.class, - "Cancel token operation is not supported for ozone block" + - " tokens.", () -> { - secretManager.cancelToken(null, null); - }); - } - - @Test - public void testVerifySignatureFailure() throws Exception { - OzoneBlockTokenIdentifier id = new OzoneBlockTokenIdentifier( - "testUser", "4234", EnumSet.allOf(AccessModeProto.class), - Time.now() + 60 * 60 * 24, "123444", 1024); - LambdaTestUtils.intercept(UnsupportedOperationException.class, "operation" + - " is not supported for block tokens", - () -> secretManager.verifySignature(id, - client.signData(id.getBytes()))); - } -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java deleted file mode 100644 index 874252d171f..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java +++ /dev/null @@ -1,403 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.  See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.  The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.  You may obtain a copy of the License at - * - *      http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.security; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.client.OMCertificateClient; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.S3SecretManager; -import org.apache.hadoop.ozone.om.S3SecretManagerImpl; -import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.security.token.SecretManager; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.test.LambdaTestUtils; -import org.apache.hadoop.util.Time; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.io.IOException; -import java.security.KeyPair; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.Signature; -import java.security.cert.X509Certificate; -import java.util.HashMap; -import java.util.Map; - -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type.S3TOKEN; - - -/** - * Test class for {@link OzoneDelegationTokenSecretManager}. - */ -public class TestOzoneDelegationTokenSecretManager { - - private OzoneDelegationTokenSecretManager secretManager; - private SecurityConfig securityConfig; - private CertificateClient certificateClient; - private long expiryTime; - private Text serviceRpcAdd; - private OzoneConfiguration conf; - private final static Text TEST_USER = new Text("testUser"); - private long tokenMaxLifetime = 1000 * 20; - private long tokenRemoverScanInterval = 1000 * 20; - private S3SecretManager s3SecretManager; - private String s3Secret = "dbaksbzljandlkandlsd"; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Before - public void setUp() throws Exception { - conf = createNewTestPath(); - securityConfig = new SecurityConfig(conf); - certificateClient = setupCertificateClient(); - certificateClient.init(); - expiryTime = Time.monotonicNow() + 60 * 60 * 24; - serviceRpcAdd = new Text("localhost"); - final Map s3Secrets = new HashMap<>(); - s3Secrets.put("testuser1", s3Secret); - s3Secrets.put("abc", "djakjahkd"); - OMMetadataManager metadataManager = new OmMetadataManagerImpl(conf); - s3SecretManager = new S3SecretManagerImpl(conf, metadataManager) { - @Override - public S3SecretValue getS3Secret(String kerberosID) { - if(s3Secrets.containsKey(kerberosID)) { - return new S3SecretValue(kerberosID, s3Secrets.get(kerberosID)); - } - return null; - } - - @Override - public String getS3UserSecretString(String awsAccessKey) { - if(s3Secrets.containsKey(awsAccessKey)) { - return s3Secrets.get(awsAccessKey); - } - return null; - } - }; - } - - private OzoneConfiguration createNewTestPath() throws IOException { - OzoneConfiguration config = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if (!newFolder.exists()) { - Assert.assertTrue(newFolder.mkdirs()); - } - ServerUtils.setOzoneMetaDirPath(config, newFolder.toString()); - return config; - } - - /** - * Helper function to create certificate client. - * */ - private CertificateClient setupCertificateClient() throws Exception { - KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - X509Certificate cert = KeyStoreTestUtil - .generateCertificate("CN=OzoneMaster", keyPair, 30, "SHA256withRSA"); - - return new OMCertificateClient(securityConfig) { - @Override - public X509Certificate getCertificate() { - return cert; - } - - @Override - public PrivateKey getPrivateKey() { - return keyPair.getPrivate(); - } - - @Override - public PublicKey getPublicKey() { - return keyPair.getPublic(); - } - - @Override - public X509Certificate getCertificate(String serialId) { - return cert; - } - }; - } - - @After - public void tearDown() throws IOException { - secretManager.stop(); - } - - @Test - public void testCreateToken() throws Exception { - secretManager = createSecretManager(conf, tokenMaxLifetime, - expiryTime, tokenRemoverScanInterval); - secretManager.start(certificateClient); - Token token = secretManager.createToken(TEST_USER, - TEST_USER, TEST_USER); - OzoneTokenIdentifier identifier = - OzoneTokenIdentifier.readProtoBuf(token.getIdentifier()); - // Check basic details. - Assert.assertTrue(identifier.getRealUser().equals(TEST_USER)); - Assert.assertTrue(identifier.getRenewer().equals(TEST_USER)); - Assert.assertTrue(identifier.getOwner().equals(TEST_USER)); - - validateHash(token.getPassword(), token.getIdentifier()); - } - - private void restartSecretManager() throws IOException { - secretManager.stop(); - secretManager = null; - secretManager = createSecretManager(conf, tokenMaxLifetime, - expiryTime, tokenRemoverScanInterval); - } - - private void testRenewTokenSuccessHelper(boolean restartSecretManager) - throws Exception { - secretManager = createSecretManager(conf, tokenMaxLifetime, - expiryTime, tokenRemoverScanInterval); - secretManager.start(certificateClient); - Token token = secretManager.createToken(TEST_USER, - TEST_USER, - TEST_USER); - Thread.sleep(10 * 5); - - if (restartSecretManager) { - restartSecretManager(); - } - - long renewalTime = secretManager.renewToken(token, TEST_USER.toString()); - Assert.assertTrue(renewalTime > 0); - } - - @Test - public void testReloadAndRenewToken() throws Exception { - testRenewTokenSuccessHelper(true); - } - - @Test - public void testRenewTokenSuccess() throws Exception { - testRenewTokenSuccessHelper(false); - } - - /** - * Tests failure for mismatch in renewer. - */ - @Test - public void testRenewTokenFailure() throws Exception { - secretManager = createSecretManager(conf, tokenMaxLifetime, - expiryTime, tokenRemoverScanInterval); - secretManager.start(certificateClient); - Token token = secretManager.createToken(TEST_USER, - TEST_USER, TEST_USER); - LambdaTestUtils.intercept(AccessControlException.class, - "rougeUser tries to renew a token", () -> { - secretManager.renewToken(token, "rougeUser"); - }); - } - - /** - * Tests token renew failure due to max time. - */ - @Test - public void testRenewTokenFailureMaxTime() throws Exception { - secretManager = createSecretManager(conf, 100, - 100, tokenRemoverScanInterval); - secretManager.start(certificateClient); - Token token = secretManager.createToken(TEST_USER, - TEST_USER, - TEST_USER); - Thread.sleep(101); - LambdaTestUtils.intercept(IOException.class, - "testUser tried to renew an expired token", () -> { - secretManager.renewToken(token, TEST_USER.toString()); - }); - } - - /** - * Tests token renew failure due to renewal time. - */ - @Test - public void testRenewTokenFailureRenewalTime() throws Exception { - secretManager = createSecretManager(conf, 1000 * 10, - 10, tokenRemoverScanInterval); - secretManager.start(certificateClient); - Token token = secretManager.createToken(TEST_USER, - TEST_USER, - TEST_USER); - Thread.sleep(15); - LambdaTestUtils.intercept(IOException.class, "is expired", () -> { - secretManager.renewToken(token, TEST_USER.toString()); - }); - } - - @Test - public void testCreateIdentifier() throws Exception { - secretManager = createSecretManager(conf, tokenMaxLifetime, - expiryTime, tokenRemoverScanInterval); - secretManager.start(certificateClient); - OzoneTokenIdentifier identifier = secretManager.createIdentifier(); - // Check basic details. - Assert.assertTrue(identifier.getOwner().equals(new Text(""))); - Assert.assertTrue(identifier.getRealUser().equals(new Text(""))); - Assert.assertTrue(identifier.getRenewer().equals(new Text(""))); - } - - @Test - public void testCancelTokenSuccess() throws Exception { - secretManager = createSecretManager(conf, tokenMaxLifetime, - expiryTime, tokenRemoverScanInterval); - secretManager.start(certificateClient); - Token token = secretManager.createToken(TEST_USER, - TEST_USER, TEST_USER); - secretManager.cancelToken(token, TEST_USER.toString()); - } - - @Test - public void testCancelTokenFailure() throws Exception { - secretManager = createSecretManager(conf, tokenMaxLifetime, - expiryTime, tokenRemoverScanInterval); - secretManager.start(certificateClient); - Token token = secretManager.createToken(TEST_USER, - TEST_USER, - TEST_USER); - LambdaTestUtils.intercept(AccessControlException.class, - "rougeUser is not authorized to cancel the token", () -> { - secretManager.cancelToken(token, "rougeUser"); - }); - } - - @Test - public void testVerifySignatureSuccess() throws Exception { - secretManager = createSecretManager(conf, tokenMaxLifetime, - expiryTime, tokenRemoverScanInterval); - secretManager.start(certificateClient); - OzoneTokenIdentifier id = new OzoneTokenIdentifier(); - id.setOmCertSerialId(certificateClient.getCertificate() - .getSerialNumber().toString()); - id.setMaxDate(Time.now() + 60 * 60 * 24); - id.setOwner(new Text("test")); - Assert.assertTrue(secretManager.verifySignature(id, - certificateClient.signData(id.getBytes()))); - } - - @Test - public void testVerifySignatureFailure() throws Exception { - secretManager = createSecretManager(conf, tokenMaxLifetime, - expiryTime, tokenRemoverScanInterval); - secretManager.start(certificateClient); - OzoneTokenIdentifier id = new OzoneTokenIdentifier(); - // set invalid om cert serial id - id.setOmCertSerialId("1927393"); - id.setMaxDate(Time.now() + 60*60*24); - id.setOwner(new Text("test")); - Assert.assertFalse(secretManager.verifySignature(id, id.getBytes())); - } - - @Test - public void testValidateS3TOKENSuccess() throws Exception { - secretManager = createSecretManager(conf, tokenMaxLifetime, - expiryTime, tokenRemoverScanInterval); - secretManager.start(certificateClient); - - OzoneTokenIdentifier identifier = new OzoneTokenIdentifier(); - identifier.setTokenType(S3TOKEN); - identifier.setSignature("56ec73ba1974f8feda8365c3caef89c5d4a688d" + - "5f9baccf4765f46a14cd745ad"); - identifier.setStrToSign("AWS4-HMAC-SHA256\n" + - "20190221T002037Z\n" + - "20190221/us-west-1/s3/aws4_request\n" + - "c297c080cce4e0927779823d3fd1f5cae71481a8f7dfc7e18d91851294efc47d"); - identifier.setAwsAccessId("testuser1"); - secretManager.retrievePassword(identifier); - } - - @Test - public void testValidateS3TOKENFailure() throws Exception { - secretManager = createSecretManager(conf, tokenMaxLifetime, - expiryTime, tokenRemoverScanInterval); - secretManager.start(certificateClient); - - OzoneTokenIdentifier identifier = new OzoneTokenIdentifier(); - identifier.setTokenType(S3TOKEN); - identifier.setSignature("56ec73ba1974f8feda8365c3caef89c5d4a688d" + - "5f9baccf4765f46a14cd745ad"); - identifier.setStrToSign("AWS4-HMAC-SHA256\n" + - "20190221T002037Z\n" + - "20190221/us-west-1/s3/aws4_request\n" + - "c297c080cce4e0927779823d3fd1f5cae71481a8f7dfc7e18d91851294efc47d"); - identifier.setAwsAccessId("testuser2"); - // Case 1: User don't have aws secret set. - LambdaTestUtils.intercept(SecretManager.InvalidToken.class, " No S3 " + - "secret found for S3 identifier", - () -> secretManager.retrievePassword(identifier)); - - // Case 2: Invalid hash in string to sign. - identifier.setStrToSign("AWS4-HMAC-SHA256\n" + - "20190221T002037Z\n" + - "20190221/us-west-1/s3/aws4_request\n" + - "c297c080cce4e0927779823d3fd1f5cae71481a8f7dfc7e18d91851294efc47d" + - "+invalidhash"); - LambdaTestUtils.intercept(SecretManager.InvalidToken.class, " No S3 " + - "secret found for S3 identifier", - () -> secretManager.retrievePassword(identifier)); - - // Case 3: Invalid hash in authorization hmac. - identifier.setSignature("56ec73ba1974f8feda8365c3caef89c5d4a688d" + - "+invalidhash" + "5f9baccf4765f46a14cd745ad"); - identifier.setStrToSign("AWS4-HMAC-SHA256\n" + - "20190221T002037Z\n" + - "20190221/us-west-1/s3/aws4_request\n" + - "c297c080cce4e0927779823d3fd1f5cae71481a8f7dfc7e18d91851294efc47d"); - LambdaTestUtils.intercept(SecretManager.InvalidToken.class, " No S3 " + - "secret found for S3 identifier", - () -> secretManager.retrievePassword(identifier)); - } - - /** - * Validate hash using public key of KeyPair. - */ - private void validateHash(byte[] hash, byte[] identifier) throws Exception { - Signature rsaSignature = - Signature.getInstance(securityConfig.getSignatureAlgo(), - securityConfig.getProvider()); - rsaSignature.initVerify(certificateClient.getPublicKey()); - rsaSignature.update(identifier); - Assert.assertTrue(rsaSignature.verify(hash)); - } - - /** - * Create instance of {@link OzoneDelegationTokenSecretManager}. - */ - private OzoneDelegationTokenSecretManager - createSecretManager(OzoneConfiguration config, long tokenMaxLife, - long expiry, long tokenRemoverScanTime) throws IOException { - return new OzoneDelegationTokenSecretManager(config, tokenMaxLife, - expiry, tokenRemoverScanTime, serviceRpcAdd, s3SecretManager, - certificateClient); - } -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java deleted file mode 100644 index cb7caf31695..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.security; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.Time; -import org.junit.After; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.crypto.KeyGenerator; -import javax.crypto.Mac; -import javax.crypto.SecretKey; -import java.io.File; -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.security.InvalidKeyException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PrivateKey; -import java.security.Signature; -import java.security.SignatureException; -import java.security.cert.Certificate; -import java.security.cert.CertificateEncodingException; -import java.security.cert.X509Certificate; -import java.util.ArrayList; -import java.util.Collections; -import java.util.EnumSet; -import java.util.List; -import java.util.Map; - -/** - * Test class for OzoneManagerDelegationToken. - */ -public class TestOzoneManagerBlockToken { - - private static final Logger LOG = LoggerFactory - .getLogger(TestOzoneManagerBlockToken.class); - private static final String BASEDIR = GenericTestUtils - .getTempPath(TestOzoneManagerBlockToken.class.getSimpleName()); - private static final String KEYSTORES_DIR = - new File(BASEDIR).getAbsolutePath(); - private static long expiryTime; - private static KeyPair keyPair; - private static X509Certificate cert; - private static final long MAX_LEN = 1000; - - @BeforeClass - public static void setUp() throws Exception { - File base = new File(BASEDIR); - FileUtil.fullyDelete(base); - base.mkdirs(); - expiryTime = Time.monotonicNow() + 60 * 60 * 24; - - // Create Ozone Master key pair. - keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - // Create Ozone Master certificate (SCM CA issued cert) and key store. - cert = KeyStoreTestUtil - .generateCertificate("CN=OzoneMaster", keyPair, 30, "SHA256withRSA"); - } - - @After - public void cleanUp() { - } - - @Test - public void testSignToken() throws GeneralSecurityException, IOException { - String keystore = new File(KEYSTORES_DIR, "keystore.jks") - .getAbsolutePath(); - String truststore = new File(KEYSTORES_DIR, "truststore.jks") - .getAbsolutePath(); - String trustPassword = "trustPass"; - String keyStorePassword = "keyStorePass"; - String keyPassword = "keyPass"; - - - KeyStoreTestUtil.createKeyStore(keystore, keyStorePassword, keyPassword, - "OzoneMaster", keyPair.getPrivate(), cert); - - // Create trust store and put the certificate in the trust store - Map certs = Collections.singletonMap("server", - cert); - KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs); - - // Sign the OzoneMaster Token with Ozone Master private key - PrivateKey privateKey = keyPair.getPrivate(); - OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier( - "testUser", "84940", - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), MAX_LEN); - byte[] signedToken = signTokenAsymmetric(tokenId, privateKey); - - // Verify a valid signed OzoneMaster Token with Ozone Master - // public key(certificate) - boolean isValidToken = verifyTokenAsymmetric(tokenId, signedToken, cert); - LOG.info("{} is {}", tokenId, isValidToken ? "valid." : "invalid."); - - // Verify an invalid signed OzoneMaster Token with Ozone Master - // public key(certificate) - tokenId = new OzoneBlockTokenIdentifier("", "", - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), MAX_LEN); - LOG.info("Unsigned token {} is {}", tokenId, - verifyTokenAsymmetric(tokenId, RandomUtils.nextBytes(128), cert)); - - } - - public byte[] signTokenAsymmetric(OzoneBlockTokenIdentifier tokenId, - PrivateKey privateKey) throws NoSuchAlgorithmException, - InvalidKeyException, SignatureException { - Signature rsaSignature = Signature.getInstance("SHA256withRSA"); - rsaSignature.initSign(privateKey); - rsaSignature.update(tokenId.getBytes()); - byte[] signature = rsaSignature.sign(); - return signature; - } - - public boolean verifyTokenAsymmetric(OzoneBlockTokenIdentifier tokenId, - byte[] signature, Certificate certificate) throws InvalidKeyException, - NoSuchAlgorithmException, SignatureException { - Signature rsaSignature = Signature.getInstance("SHA256withRSA"); - rsaSignature.initVerify(certificate); - rsaSignature.update(tokenId.getBytes()); - boolean isValid = rsaSignature.verify(signature); - return isValid; - } - - private byte[] signTokenSymmetric(OzoneBlockTokenIdentifier identifier, - Mac mac, SecretKey key) { - try { - mac.init(key); - } catch (InvalidKeyException ike) { - throw new IllegalArgumentException("Invalid key to HMAC computation", - ike); - } - return mac.doFinal(identifier.getBytes()); - } - - OzoneBlockTokenIdentifier generateTestToken() { - return new OzoneBlockTokenIdentifier(RandomStringUtils.randomAlphabetic(6), - RandomStringUtils.randomAlphabetic(5), - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), MAX_LEN); - } - - @Test - public void testAsymmetricTokenPerf() throws NoSuchAlgorithmException, - CertificateEncodingException, NoSuchProviderException, - InvalidKeyException, SignatureException { - final int testTokenCount = 1000; - List tokenIds = new ArrayList<>(); - List tokenPasswordAsym = new ArrayList<>(); - for (int i = 0; i < testTokenCount; i++) { - tokenIds.add(generateTestToken()); - } - - KeyPair kp = KeyStoreTestUtil.generateKeyPair("RSA"); - - // Create Ozone Master certificate (SCM CA issued cert) and key store - X509Certificate omCert; - omCert = KeyStoreTestUtil.generateCertificate("CN=OzoneMaster", - kp, 30, "SHA256withRSA"); - - long startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - tokenPasswordAsym.add( - signTokenAsymmetric(tokenIds.get(i), kp.getPrivate())); - } - long duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token sign time with HmacSha256(RSA/1024 key) is {} ns", - duration / testTokenCount); - - startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - verifyTokenAsymmetric(tokenIds.get(i), tokenPasswordAsym.get(i), omCert); - } - duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token verify time with HmacSha256(RSA/1024 key) " - + "is {} ns", duration / testTokenCount); - } - - @Test - public void testSymmetricTokenPerf() { - String hmacSHA1 = "HmacSHA1"; - String hmacSHA256 = "HmacSHA256"; - - testSymmetricTokenPerfHelper(hmacSHA1, 64); - testSymmetricTokenPerfHelper(hmacSHA256, 1024); - } - - public void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) { - final int testTokenCount = 1000; - List tokenIds = new ArrayList<>(); - List tokenPasswordSym = new ArrayList<>(); - for (int i = 0; i < testTokenCount; i++) { - tokenIds.add(generateTestToken()); - } - - KeyGenerator keyGen; - try { - keyGen = KeyGenerator.getInstance(hmacAlgorithm); - keyGen.init(keyLen); - } catch (NoSuchAlgorithmException nsa) { - throw new IllegalArgumentException("Can't find " + hmacAlgorithm + - " algorithm."); - } - - Mac mac; - try { - mac = Mac.getInstance(hmacAlgorithm); - } catch (NoSuchAlgorithmException nsa) { - throw new IllegalArgumentException("Can't find " + hmacAlgorithm + - " algorithm."); - } - - SecretKey secretKey = keyGen.generateKey(); - - long startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - tokenPasswordSym.add( - signTokenSymmetric(tokenIds.get(i), mac, secretKey)); - } - long duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token sign time with {}({} symmetric key) is {} ns", - hmacAlgorithm, keyLen, duration / testTokenCount); - } -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java deleted file mode 100644 index f26869d18de..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java +++ /dev/null @@ -1,305 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.security; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.security.InvalidKeyException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PrivateKey; -import java.security.Signature; -import java.security.SignatureException; -import java.security.cert.Certificate; -import java.security.cert.CertificateEncodingException; -import java.security.cert.X509Certificate; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import javax.crypto.KeyGenerator; -import javax.crypto.Mac; -import javax.crypto.SecretKey; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.security.ssl.TestSSLFactory; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.Time; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Test class for {@link OzoneTokenIdentifier}. - */ -public class TestOzoneTokenIdentifier { - - private static final Logger LOG = LoggerFactory - .getLogger(TestOzoneTokenIdentifier.class); - private static final String BASEDIR = GenericTestUtils - .getTempPath(TestOzoneTokenIdentifier.class.getSimpleName()); - private static final String KEYSTORES_DIR = - new File(BASEDIR).getAbsolutePath(); - private static File base; - private static String sslConfsDir; - private static final String EXCLUDE_CIPHERS = - "TLS_ECDHE_RSA_WITH_RC4_128_SHA," - + "SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, \n" - + "SSL_RSA_WITH_DES_CBC_SHA," - + "SSL_DHE_RSA_WITH_DES_CBC_SHA, " - + "SSL_RSA_EXPORT_WITH_RC4_40_MD5,\t \n" - + "SSL_RSA_EXPORT_WITH_DES40_CBC_SHA," - + "SSL_RSA_WITH_RC4_128_MD5"; - - @BeforeClass - public static void setUp() throws Exception { - base = new File(BASEDIR); - FileUtil.fullyDelete(base); - base.mkdirs(); - } - - private Configuration createConfiguration(boolean clientCert, - boolean trustStore) - throws Exception { - Configuration conf = new Configuration(); - KeyStoreTestUtil.setupSSLConfig(KEYSTORES_DIR, sslConfsDir, conf, - clientCert, trustStore, EXCLUDE_CIPHERS); - sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class); - return conf; - } - - @AfterClass - static public void cleanUp() throws Exception { - FileUtil.fullyDelete(base); - KeyStoreTestUtil.cleanupSSLConfig(KEYSTORES_DIR, sslConfsDir); - } - - @Test - public void testSignToken() throws GeneralSecurityException, IOException { - String keystore = new File(KEYSTORES_DIR, "keystore.jks") - .getAbsolutePath(); - String truststore = new File(KEYSTORES_DIR, "truststore.jks") - .getAbsolutePath(); - String trustPassword = "trustPass"; - String keyStorePassword = "keyStorePass"; - String keyPassword = "keyPass"; - - // Create Ozone Master key pair - KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - - // Create Ozone Master certificate (SCM CA issued cert) and key store - X509Certificate cert = KeyStoreTestUtil - .generateCertificate("CN=OzoneMaster", keyPair, 30, "SHA256withRSA"); - KeyStoreTestUtil.createKeyStore(keystore, keyStorePassword, keyPassword, - "OzoneMaster", keyPair.getPrivate(), cert); - - // Create trust store and put the certificate in the trust store - Map certs = Collections.singletonMap("server", - cert); - KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs); - - // Sign the OzoneMaster Token with Ozone Master private key - PrivateKey privateKey = keyPair.getPrivate(); - OzoneTokenIdentifier tokenId = new OzoneTokenIdentifier(); - tokenId.setOmCertSerialId("123"); - byte[] signedToken = signTokenAsymmetric(tokenId, privateKey); - - // Verify a valid signed OzoneMaster Token with Ozone Master - // public key(certificate) - boolean isValidToken = verifyTokenAsymmetric(tokenId, signedToken, cert); - LOG.info("{} is {}", tokenId, isValidToken ? "valid." : "invalid."); - - // Verify an invalid signed OzoneMaster Token with Ozone Master - // public key(certificate) - tokenId = new OzoneTokenIdentifier(new Text("oozie"), - new Text("rm"), new Text("client")); - tokenId.setOmCertSerialId("123"); - LOG.info("Unsigned token {} is {}", tokenId, - verifyTokenAsymmetric(tokenId, RandomUtils.nextBytes(128), cert)); - - } - - public byte[] signTokenAsymmetric(OzoneTokenIdentifier tokenId, - PrivateKey privateKey) throws NoSuchAlgorithmException, - InvalidKeyException, SignatureException { - Signature rsaSignature = Signature.getInstance("SHA256withRSA"); - rsaSignature.initSign(privateKey); - rsaSignature.update(tokenId.getBytes()); - byte[] signature = rsaSignature.sign(); - return signature; - } - - public boolean verifyTokenAsymmetric(OzoneTokenIdentifier tokenId, - byte[] signature, Certificate certificate) throws InvalidKeyException, - NoSuchAlgorithmException, SignatureException { - Signature rsaSignature = Signature.getInstance("SHA256withRSA"); - rsaSignature.initVerify(certificate); - rsaSignature.update(tokenId.getBytes()); - boolean isValide = rsaSignature.verify(signature); - return isValide; - } - - private byte[] signTokenSymmetric(OzoneTokenIdentifier identifier, - Mac mac, SecretKey key) { - try { - mac.init(key); - } catch (InvalidKeyException ike) { - throw new IllegalArgumentException("Invalid key to HMAC computation", - ike); - } - return mac.doFinal(identifier.getBytes()); - } - - OzoneTokenIdentifier generateTestToken() { - OzoneTokenIdentifier tokenIdentifier = new OzoneTokenIdentifier( - new Text(RandomStringUtils.randomAlphabetic(6)), - new Text(RandomStringUtils.randomAlphabetic(5)), - new Text(RandomStringUtils.randomAlphabetic(4))); - tokenIdentifier.setOmCertSerialId("123"); - return tokenIdentifier; - } - - @Test - public void testAsymmetricTokenPerf() throws NoSuchAlgorithmException, - CertificateEncodingException, NoSuchProviderException, - InvalidKeyException, SignatureException { - final int testTokenCount = 1000; - List tokenIds = new ArrayList<>(); - List tokenPasswordAsym = new ArrayList<>(); - for (int i = 0; i < testTokenCount; i++) { - tokenIds.add(generateTestToken()); - } - - KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - - // Create Ozone Master certificate (SCM CA issued cert) and key store - X509Certificate cert; - cert = KeyStoreTestUtil.generateCertificate("CN=OzoneMaster", - keyPair, 30, "SHA256withRSA"); - - long startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - tokenPasswordAsym.add( - signTokenAsymmetric(tokenIds.get(i), keyPair.getPrivate())); - } - long duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token sign time with HmacSha256(RSA/1024 key) is {} ns", - duration/testTokenCount); - - startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - verifyTokenAsymmetric(tokenIds.get(i), tokenPasswordAsym.get(i), cert); - } - duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token verify time with HmacSha256(RSA/1024 key) " - + "is {} ns", duration/testTokenCount); - } - - @Test - public void testSymmetricTokenPerf() { - String hmacSHA1 = "HmacSHA1"; - String hmacSHA256 = "HmacSHA256"; - - testSymmetricTokenPerfHelper(hmacSHA1, 64); - testSymmetricTokenPerfHelper(hmacSHA256, 1024); - } - - - public void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) { - final int testTokenCount = 1000; - List tokenIds = new ArrayList<>(); - List tokenPasswordSym = new ArrayList<>(); - for (int i = 0; i < testTokenCount; i++) { - tokenIds.add(generateTestToken()); - } - - KeyGenerator keyGen; - try { - keyGen = KeyGenerator.getInstance(hmacAlgorithm); - keyGen.init(keyLen); - } catch (NoSuchAlgorithmException nsa) { - throw new IllegalArgumentException("Can't find " + hmacAlgorithm + - " algorithm."); - } - - Mac mac; - try { - mac = Mac.getInstance(hmacAlgorithm); - } catch (NoSuchAlgorithmException nsa) { - throw new IllegalArgumentException("Can't find " + hmacAlgorithm + - " algorithm."); - } - - SecretKey secretKey = keyGen.generateKey(); - - long startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - tokenPasswordSym.add( - signTokenSymmetric(tokenIds.get(i), mac, secretKey)); - } - long duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token sign time with {}({} symmetric key) is {} ns", - hmacAlgorithm, keyLen, duration/testTokenCount); - } - - /* - * Test serialization/deserialization of OzoneTokenIdentifier. - */ - @Test - public void testReadWriteInProtobuf() throws IOException { - OzoneTokenIdentifier id = getIdentifierInst(); - File idFile = new File(BASEDIR + "/tokenFile"); - - FileOutputStream fop = new FileOutputStream(idFile); - DataOutputStream dataOutputStream = new DataOutputStream(fop); - id.write(dataOutputStream); - fop.close(); - - FileInputStream fis = new FileInputStream(idFile); - DataInputStream dis = new DataInputStream(fis); - OzoneTokenIdentifier id2 = new OzoneTokenIdentifier(); - - id2.readFields(dis); - Assert.assertEquals(id, id2); - } - - - public OzoneTokenIdentifier getIdentifierInst() { - OzoneTokenIdentifier id = new OzoneTokenIdentifier(); - id.setOwner(new Text("User1")); - id.setRenewer(new Text("yarn")); - id.setIssueDate(Time.now()); - id.setMaxDate(Time.now() + 5000); - id.setSequenceNumber(1); - id.setOmCertSerialId("123"); - return id; - } -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/package-info.java deleted file mode 100644 index a36f325231b..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.security; -/** - * Ozone security tests. - */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestObjectPrinter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestObjectPrinter.java deleted file mode 100644 index 82e755ccf7e..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestObjectPrinter.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell; - -import java.io.IOException; -import java.util.ArrayList; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; - -import org.junit.Assert; -import static org.junit.Assert.*; -import org.junit.Test; -import org.mockito.Mockito; - -/** - * Test the json object printer. - */ -public class TestObjectPrinter { - - @Test - public void printObjectAsJson() throws IOException { - - OzoneConfiguration conf = new OzoneConfiguration(); - OzoneVolume volume = - new OzoneVolume(conf, Mockito.mock(ClientProtocol.class), "name", - "admin", "owner", 1L, 0L, - new ArrayList<>()); - - String result = ObjectPrinter.getObjectAsJson(volume); - Assert.assertTrue("Result is not a proper json", - result.contains("\"owner\"")); - } -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java deleted file mode 100644 index 7ae052059be..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.web.ozShell; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; - -import org.apache.hadoop.ozone.client.OzoneClientException; - -import org.junit.Assert; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; - -/** - * Test ozone URL parsing. - */ -@RunWith(Parameterized.class) -public class TestOzoneAddress { - - @Parameters - public static Collection data() { - return Arrays.asList(new Object[][] { - {"o3fs://localhost:9878/"}, - {"o3fs://localhost/"}, - {"o3fs:///"}, - {"/"}, - {""} - }); - } - - private String prefix; - - public TestOzoneAddress(String prefix) { - this.prefix = prefix; - } - - @Test - public void checkUrlTypes() throws OzoneClientException, IOException { - OzoneAddress address; - - address = new OzoneAddress(""); - address.ensureRootAddress(); - - address = new OzoneAddress(prefix + ""); - address.ensureRootAddress(); - - address = new OzoneAddress(prefix + "vol1"); - address.ensureVolumeAddress(); - Assert.assertEquals("vol1", address.getVolumeName()); - - address = new OzoneAddress(prefix + "vol1/bucket"); - address.ensureBucketAddress(); - Assert.assertEquals("vol1", address.getVolumeName()); - Assert.assertEquals("bucket", address.getBucketName()); - - address = new OzoneAddress(prefix + "vol1/bucket/"); - address.ensureBucketAddress(); - Assert.assertEquals("vol1", address.getVolumeName()); - Assert.assertEquals("bucket", address.getBucketName()); - - address = new OzoneAddress(prefix + "vol1/bucket/key"); - address.ensureKeyAddress(); - Assert.assertEquals("vol1", address.getVolumeName()); - Assert.assertEquals("bucket", address.getBucketName()); - Assert.assertEquals("key", address.getKeyName()); - - address = new OzoneAddress(prefix + "vol1/bucket/key/"); - address.ensureKeyAddress(); - Assert.assertEquals("vol1", address.getVolumeName()); - Assert.assertEquals("bucket", address.getBucketName()); - Assert.assertEquals("key/", address.getKeyName()); - - address = new OzoneAddress(prefix + "vol1/bucket/key1/key3/key"); - address.ensureKeyAddress(); - Assert.assertEquals("vol1", address.getVolumeName()); - Assert.assertEquals("bucket", address.getBucketName()); - Assert.assertEquals("key1/key3/key", address.getKeyName()); - } -} \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/package-info.java deleted file mode 100644 index 80c19858549..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.web.ozShell; -/** - * Tests for ozone shell.. - */ diff --git a/hadoop-ozone/ozone-manager/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker b/hadoop-ozone/ozone-manager/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker deleted file mode 100644 index 3c9e1c8a697..00000000000 --- a/hadoop-ozone/ozone-manager/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -mock-maker-inline \ No newline at end of file diff --git a/hadoop-ozone/ozonefs-lib-current/pom.xml b/hadoop-ozone/ozonefs-lib-current/pom.xml deleted file mode 100644 index 1645ccc82b2..00000000000 --- a/hadoop-ozone/ozonefs-lib-current/pom.xml +++ /dev/null @@ -1,214 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-filesystem-lib-current - Apache Hadoop Ozone FileSystem Single Jar Library - jar - This projects creates an uber jar from ozonefs with all the - dependencies. - - 0.5.0-SNAPSHOT - - UTF-8 - true - org.apache.hadoop.ozone.shaded - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - - org.codehaus.mojo - animal-sniffer-maven-plugin - - - signature-check - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - true - - - - org.apache.maven.plugins - maven-shade-plugin - - - package - - shade - - - - - - META-INF/BC1024KE.DSA - META-INF/BC2048KE.DSA - META-INF/BC1024KE.SF - META-INF/BC2048KE.SF - - - - - ozone-default-generated.xml - - - - - org - - ${shaded.prefix}.org - - - org.yaml.**.* - org.sqlite.**.* - org.tukaani.**.* - org.bouncycastle.**.* - org.fusesource.leveldbjni.**.* - org.rocksdb.**.* - org.apache.commons.cli.**.* - org.apache.commons.compress.**.* - org.apache.commons.codec.**.* - org.apache.commons.beanutils.**.* - org.apache.commons.collections.**.* - org.apache.commons.digester.**.* - org.apache.commons.logging.**.* - org.apache.commons.pool2.**.* - org.apache.commons.validator.**.* - org.sqlite.**.* - org.apache.thrift.**.* - - org.iq80.**.* - org.fusesource.**.* - - org.apache.http.**.* - - - - com - - ${shaded.prefix}.com - - - com.google.common.**.* - com.google.gson.**.* - com.codahale.**.* - com.lmax.**.* - com.github.joshelser.**.* - com.twitter.**.* - - - - picocli - - ${shaded.prefix}.picocli - - - - info - - ${shaded.prefix}.info - - - - io - - ${shaded.prefix}.io - - - - - - okio - - ${shaded.prefix}.okio - - - - okhttp3 - - ${shaded.prefix}.okhttp3 - - - - - - - - - - - - - org.apache.hadoop - hadoop-ozone-filesystem - compile - - - org.apache.hadoop - hadoop-common - - - org.apache.hadoop - hadoop-hdfs - - - org.apache.hadoop - hadoop-hdfs-client - - - org.slf4j - slf4j-api - - - org.apache.logging.log4j - log4j-api - - - org.apache.logging.log4j - log4j-core - - - com.google.code.findbugs - jsr305 - - - - - diff --git a/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem deleted file mode 100644 index 03680027d53..00000000000 --- a/hadoop-ozone/ozonefs-lib-current/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.fs.ozone.OzoneFileSystem diff --git a/hadoop-ozone/ozonefs-lib-legacy/pom.xml b/hadoop-ozone/ozonefs-lib-legacy/pom.xml deleted file mode 100644 index c2483088d8c..00000000000 --- a/hadoop-ozone/ozonefs-lib-legacy/pom.xml +++ /dev/null @@ -1,139 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-filesystem-lib-legacy - Apache Hadoop Ozone FileSystem Legacy Jar Library - This projects creates an uberjar from ozonefs with all the - dependencies, but the dependencies are located in an isolated subdir - and loaded by a custom class loader. Can be used together with Hadoop 2.x - - jar - 0.5.0-SNAPSHOT - - UTF-8 - true - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - include-dependencies - - unpack-dependencies - - prepare-package - - target/classes/libs - compile - META-INF/*.SF - - slf4j-api,slf4j-log4j12,log4j-api,log4j-core,log4j,hadoop-ozone-filesystem - - - ${project.build.directory}/dependency-maven-plugin-markers-lib - - - - - - include-ozonefs - - unpack-dependencies - - prepare-package - - target/classes - hadoop-ozone-filesystem,hadoop-ozone-common - - compile - META-INF/*.SF - - ${project.build.directory}/dependency-maven-plugin-markers-direct - - - - - - include-token - - unpack-dependencies - - prepare-package - - target/classes - hadoop-ozone-common,hadoop-hdds-common - compile - - org/apache/hadoop/ozone/security/OzoneTokenIdentifier.class,org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.class,org/apache/hadoop/ozone/protocol/proto/OzoneManagerProtocolProtos*,org/apache/hadoop/hdds/protocol/proto/HddsProtos* - - META-INF/*.SF - - ${project.build.directory}/dependency-maven-plugin-markers-token - - - - - - - org.codehaus.mojo - animal-sniffer-maven-plugin - - - signature-check - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - true - - - - - - - - org.apache.hadoop - hadoop-ozone-filesystem - compile - - - diff --git a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem deleted file mode 100644 index 39ca3489c62..00000000000 --- a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.fs.ozone.BasicOzoneFileSystem diff --git a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/ozonefs.txt b/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/ozonefs.txt deleted file mode 100644 index 85c13074a3b..00000000000 --- a/hadoop-ozone/ozonefs-lib-legacy/src/main/resources/ozonefs.txt +++ /dev/null @@ -1,21 +0,0 @@ - - -Apache Hadoop Ozone placeholder file. - -The usage of the legacy version of the uber jar can be detected based on -the existence of this file. diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml deleted file mode 100644 index 4f850701437..00000000000 --- a/hadoop-ozone/ozonefs/pom.xml +++ /dev/null @@ -1,219 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-filesystem - Apache Hadoop Ozone FileSystem - jar - 0.5.0-SNAPSHOT - - UTF-8 - true - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - deplist - compile - - list - - - - - ${project.basedir}/target/1hadoop-tools-deps/${project.artifactId}.tools-optional.txt - - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - - ITestOzoneContract*.java - **/Test*.java - **/*Test.java - **/*Tests.java - **/*TestCase.java - - - - - org.codehaus.mojo - animal-sniffer-maven-plugin - - - signature-check - - - - - - - - - - org.apache.hadoop - hadoop-common - compile - - - org.apache.hadoop - hadoop-hdfs - compile - - - org.apache.hadoop - hadoop-hdds-common - - - org.apache.hadoop - hadoop-hdds-server-scm - test - - - org.apache.hadoop - hadoop-hdds-server-framework - test - - - org.apache.hadoop - hadoop-ozone-ozone-manager - test - - - org.apache.hadoop - hadoop-hdds-container-service - test - - - org.apache.hadoop - hadoop-hdds-client - - - org.apache.hadoop - hadoop-ozone-common - - - org.apache.httpcomponents - httpclient - - - com.github.spotbugs - spotbugs - provided - - - - org.apache.hadoop - hadoop-common - test - test-jar - - - org.apache.hadoop - hadoop-ozone-client - - - org.apache.hadoop - hadoop-hdfs - test - test-jar - - - org.apache.hadoop - hadoop-ozone-integration-test - test - test-jar - - - - org.mockito - mockito-all - 1.10.19 - test - - - junit - junit - test - - - org.apache.hadoop - hadoop-distcp - test - - - org.apache.hadoop - hadoop-distcp - test - test-jar - - - org.apache.hadoop - hadoop-mapreduce-client-jobclient - test - - - org.powermock - powermock-module-junit4 - 1.6.5 - test - - - org.javassist - javassist - - - - - org.powermock - powermock-api-mockito - 1.6.5 - test - - - org.hamcrest - hamcrest-core - - - - - diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicKeyInfo.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicKeyInfo.java deleted file mode 100644 index 06ebc152953..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicKeyInfo.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -/** - * Minimum set of Ozone key information attributes. - *

- * This class doesn't depend on any other ozone class just on primitive - * java types. It could be used easily in the signature of OzoneClientAdapter - * as even if a separated class loader is loaded it it won't cause any - * dependency problem. - */ -public class BasicKeyInfo { - - private String name; - - private long modificationTime; - - private long dataSize; - - public BasicKeyInfo(String name, long modificationTime, long size) { - this.name = name; - this.modificationTime = modificationTime; - this.dataSize = size; - } - - public String getName() { - return name; - } - - public long getModificationTime() { - return modificationTime; - } - - public long getDataSize() { - return dataSize; - } -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java deleted file mode 100644 index 52a8ede1b6d..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.DelegateToFileSystem; -import org.apache.hadoop.ozone.OzoneConsts; - -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; - -/** - * ozone implementation of AbstractFileSystem. - * This impl delegates to the OzoneFileSystem - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class BasicOzFs extends DelegateToFileSystem { - - public BasicOzFs(URI theUri, Configuration conf) - throws IOException, URISyntaxException { - super(theUri, new BasicOzoneFileSystem(), conf, - OzoneConsts.OZONE_URI_SCHEME, false); - } - -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java deleted file mode 100644 index 9ea03b545f3..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ /dev/null @@ -1,446 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; -import java.net.URI; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.fs.FileAlreadyExistsException; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenRenewer; - -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Basic Implementation of the OzoneFileSystem calls. - *

- * This is the minimal version which doesn't include any statistics. - *

- * For full featured version use OzoneClientAdapterImpl. - */ -public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter { - - static final Logger LOG = - LoggerFactory.getLogger(BasicOzoneClientAdapterImpl.class); - - private OzoneClient ozoneClient; - private ObjectStore objectStore; - private OzoneVolume volume; - private OzoneBucket bucket; - private ReplicationType replicationType; - private ReplicationFactor replicationFactor; - private boolean securityEnabled; - - /** - * Create new OzoneClientAdapter implementation. - * - * @param volumeStr Name of the volume to use. - * @param bucketStr Name of the bucket to use - * @throws IOException In case of a problem. - */ - public BasicOzoneClientAdapterImpl(String volumeStr, String bucketStr) - throws IOException { - this(createConf(), volumeStr, bucketStr); - } - - private static OzoneConfiguration createConf() { - ClassLoader contextClassLoader = - Thread.currentThread().getContextClassLoader(); - Thread.currentThread().setContextClassLoader(null); - try { - return new OzoneConfiguration(); - } finally { - Thread.currentThread().setContextClassLoader(contextClassLoader); - } - } - - public BasicOzoneClientAdapterImpl(OzoneConfiguration conf, String volumeStr, - String bucketStr) - throws IOException { - this(null, -1, conf, volumeStr, bucketStr); - } - - public BasicOzoneClientAdapterImpl(String omHost, int omPort, - Configuration hadoopConf, String volumeStr, String bucketStr) - throws IOException { - - ClassLoader contextClassLoader = - Thread.currentThread().getContextClassLoader(); - Thread.currentThread().setContextClassLoader(null); - - try { - OzoneConfiguration conf = OzoneConfiguration.of(hadoopConf); - - if (omHost == null && OmUtils.isServiceIdsDefined(conf)) { - // When the host name or service id isn't given - // but ozone.om.service.ids is defined, declare failure. - - // This is a safety precaution that prevents the client from - // accidentally failing over to an unintended OM. - throw new IllegalArgumentException("Service ID or host name must not" - + " be omitted when ozone.om.service.ids is defined."); - } - - if (omPort != -1) { - // When the port number is specified, perform the following check - if (OmUtils.isOmHAServiceId(conf, omHost)) { - // If omHost is a service id, it shouldn't use a port - throw new IllegalArgumentException("Port " + omPort + - " specified in URI but host '" + omHost + "' is a " - + "logical (HA) OzoneManager and does not use port information."); - } - } else { - // When port number is not specified, read it from config - omPort = OmUtils.getOmRpcPort(conf); - } - - SecurityConfig secConfig = new SecurityConfig(conf); - - if (secConfig.isSecurityEnabled()) { - this.securityEnabled = true; - } - - String replicationTypeConf = - conf.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE, - OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT); - - int replicationCountConf = conf.getInt(OzoneConfigKeys.OZONE_REPLICATION, - OzoneConfigKeys.OZONE_REPLICATION_DEFAULT); - - if (OmUtils.isOmHAServiceId(conf, omHost)) { - // omHost is listed as one of the service ids in the config, - // thus we should treat omHost as omServiceId - this.ozoneClient = - OzoneClientFactory.getRpcClient(omHost, conf); - } else if (StringUtils.isNotEmpty(omHost) && omPort != -1) { - this.ozoneClient = - OzoneClientFactory.getRpcClient(omHost, omPort, conf); - } else { - this.ozoneClient = - OzoneClientFactory.getRpcClient(conf); - } - objectStore = ozoneClient.getObjectStore(); - this.volume = objectStore.getVolume(volumeStr); - this.bucket = volume.getBucket(bucketStr); - this.replicationType = ReplicationType.valueOf(replicationTypeConf); - this.replicationFactor = ReplicationFactor.valueOf(replicationCountConf); - } finally { - Thread.currentThread().setContextClassLoader(contextClassLoader); - } - - } - - @Override - public void close() throws IOException { - ozoneClient.close(); - } - - @Override - public InputStream readFile(String key) throws IOException { - incrementCounter(Statistic.OBJECTS_READ); - try { - return bucket.readFile(key).getInputStream(); - } catch (OMException ex) { - if (ex.getResult() == OMException.ResultCodes.FILE_NOT_FOUND - || ex.getResult() == OMException.ResultCodes.NOT_A_FILE) { - throw new FileNotFoundException( - ex.getResult().name() + ": " + ex.getMessage()); - } else { - throw ex; - } - } - } - - protected void incrementCounter(Statistic objectsRead) { - //noop: Use OzoneClientAdapterImpl which supports statistics. - } - - @Override - public OzoneFSOutputStream createFile(String key, boolean overWrite, - boolean recursive) throws IOException { - incrementCounter(Statistic.OBJECTS_CREATED); - try { - OzoneOutputStream ozoneOutputStream = bucket - .createFile(key, 0, replicationType, replicationFactor, overWrite, - recursive); - return new OzoneFSOutputStream(ozoneOutputStream.getOutputStream()); - } catch (OMException ex) { - if (ex.getResult() == OMException.ResultCodes.FILE_ALREADY_EXISTS - || ex.getResult() == OMException.ResultCodes.NOT_A_FILE) { - throw new FileAlreadyExistsException( - ex.getResult().name() + ": " + ex.getMessage()); - } else { - throw ex; - } - } - } - - @Override - public void renameKey(String key, String newKeyName) throws IOException { - incrementCounter(Statistic.OBJECTS_RENAMED); - bucket.renameKey(key, newKeyName); - } - - /** - * Helper method to create an directory specified by key name in bucket. - * - * @param keyName key name to be created as directory - * @return true if the key is created, false otherwise - */ - @Override - public boolean createDirectory(String keyName) throws IOException { - LOG.trace("creating dir for key:{}", keyName); - incrementCounter(Statistic.OBJECTS_CREATED); - try { - bucket.createDirectory(keyName); - } catch (OMException e) { - if (e.getResult() == OMException.ResultCodes.FILE_ALREADY_EXISTS) { - throw new FileAlreadyExistsException(e.getMessage()); - } - throw e; - } - return true; - } - - /** - * Helper method to delete an object specified by key name in bucket. - * - * @param keyName key name to be deleted - * @return true if the key is deleted, false otherwise - */ - @Override - public boolean deleteObject(String keyName) { - LOG.trace("issuing delete for key" + keyName); - try { - incrementCounter(Statistic.OBJECTS_DELETED); - bucket.deleteKey(keyName); - return true; - } catch (IOException ioe) { - LOG.error("delete key failed " + ioe.getMessage()); - return false; - } - } - - public FileStatusAdapter getFileStatus(String key, URI uri, - Path qualifiedPath, String userName) - throws IOException { - try { - incrementCounter(Statistic.OBJECTS_QUERY); - OzoneFileStatus status = bucket.getFileStatus(key); - makeQualified(status, uri, qualifiedPath, userName); - return toFileStatusAdapter(status); - - } catch (OMException e) { - if (e.getResult() == OMException.ResultCodes.FILE_NOT_FOUND) { - throw new - FileNotFoundException(key + ": No such file or directory!"); - } - throw e; - } - } - - public void makeQualified(FileStatus status, URI uri, Path path, - String username) { - if (status instanceof OzoneFileStatus) { - ((OzoneFileStatus) status) - .makeQualified(uri, path, - username, username); - } - - } - - @Override - public Iterator listKeys(String pathKey) { - incrementCounter(Statistic.OBJECTS_LIST); - return new IteratorAdapter(bucket.listKeys(pathKey)); - } - - public List listStatus(String keyName, boolean recursive, - String startKey, long numEntries, URI uri, - Path workingDir, String username) throws IOException { - try { - incrementCounter(Statistic.OBJECTS_LIST); - List statuses = bucket - .listStatus(keyName, recursive, startKey, numEntries); - - List result = new ArrayList<>(); - for (OzoneFileStatus status : statuses) { - Path qualifiedPath = status.getPath().makeQualified(uri, workingDir); - makeQualified(status, uri, qualifiedPath, username); - result.add(toFileStatusAdapter(status)); - } - return result; - } catch (OMException e) { - if (e.getResult() == OMException.ResultCodes.FILE_NOT_FOUND) { - throw new FileNotFoundException(e.getMessage()); - } - throw e; - } - } - - @Override - public Token getDelegationToken(String renewer) - throws IOException { - if (!securityEnabled) { - return null; - } - Token token = ozoneClient.getObjectStore() - .getDelegationToken(renewer == null ? null : new Text(renewer)); - token.setKind(OzoneTokenIdentifier.KIND_NAME); - return token; - - } - - @Override - public KeyProvider getKeyProvider() throws IOException { - return objectStore.getKeyProvider(); - } - - @Override - public URI getKeyProviderUri() throws IOException { - return objectStore.getKeyProviderUri(); - } - - @Override - public String getCanonicalServiceName() { - return objectStore.getCanonicalServiceName(); - } - - /** - * Ozone Delegation Token Renewer. - */ - @InterfaceAudience.Private - public static class Renewer extends TokenRenewer { - - //Ensure that OzoneConfiguration files are loaded before trying to use - // the renewer. - static { - OzoneConfiguration.activate(); - } - - public Text getKind() { - return OzoneTokenIdentifier.KIND_NAME; - } - - @Override - public boolean handleKind(Text kind) { - return getKind().equals(kind); - } - - @Override - public boolean isManaged(Token token) throws IOException { - return true; - } - - @Override - public long renew(Token token, Configuration conf) - throws IOException, InterruptedException { - Token ozoneDt = - (Token) token; - OzoneClient ozoneClient = - OzoneClientFactory.getRpcClient(conf); - return ozoneClient.getObjectStore().renewDelegationToken(ozoneDt); - } - - @Override - public void cancel(Token token, Configuration conf) - throws IOException, InterruptedException { - Token ozoneDt = - (Token) token; - OzoneClient ozoneClient = - OzoneClientFactory.getRpcClient(conf); - ozoneClient.getObjectStore().cancelDelegationToken(ozoneDt); - } - } - - /** - * Adapter to convert OzoneKey to a safe and simple Key implementation. - */ - public static class IteratorAdapter implements Iterator { - - private Iterator original; - - public IteratorAdapter(Iterator listKeys) { - this.original = listKeys; - } - - @Override - public boolean hasNext() { - return original.hasNext(); - } - - @Override - public BasicKeyInfo next() { - OzoneKey next = original.next(); - if (next == null) { - return null; - } else { - return new BasicKeyInfo( - next.getName(), - next.getModificationTime(), - next.getDataSize() - ); - } - } - } - - private FileStatusAdapter toFileStatusAdapter(OzoneFileStatus status) { - return new FileStatusAdapter( - status.getLen(), - status.getPath(), - status.isDirectory(), - status.getReplication(), - status.getBlockSize(), - status.getModificationTime(), - status.getAccessTime(), - status.getPermission().toShort(), - status.getOwner(), - status.getGroup(), - status.getPath() - ); - } -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java deleted file mode 100644 index 298fd2e6937..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ /dev/null @@ -1,787 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.EnumSet; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Objects; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.stream.Collectors; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CreateFlag; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileAlreadyExistsException; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.Progressable; - -import com.google.common.base.Preconditions; -import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE; -import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER; -import static org.apache.hadoop.fs.ozone.Constants.OZONE_USER_DIR; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; - -import org.apache.http.client.utils.URIBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * The minimal Ozone Filesystem implementation. - *

- * This is a basic version which doesn't extend - * KeyProviderTokenIssuer and doesn't include statistics. It can be used - * from older hadoop version. For newer hadoop version use the full featured - * OzoneFileSystem. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class BasicOzoneFileSystem extends FileSystem { - static final Logger LOG = - LoggerFactory.getLogger(BasicOzoneFileSystem.class); - - /** - * The Ozone client for connecting to Ozone server. - */ - - private URI uri; - private String userName; - private Path workingDir; - - private OzoneClientAdapter adapter; - - private static final Pattern URL_SCHEMA_PATTERN = - Pattern.compile("([^\\.]+)\\.([^\\.]+)\\.{0,1}(.*)"); - - private static final String URI_EXCEPTION_TEXT = "Ozone file system URL " + - "should be one of the following formats: " + - "o3fs://bucket.volume/key OR " + - "o3fs://bucket.volume.om-host.example.com/key OR " + - "o3fs://bucket.volume.om-host.example.com:5678/key"; - - @Override - public void initialize(URI name, Configuration conf) throws IOException { - super.initialize(name, conf); - setConf(conf); - Objects.requireNonNull(name.getScheme(), "No scheme provided in " + name); - Preconditions.checkArgument(getScheme().equals(name.getScheme()), - "Invalid scheme provided in " + name); - - String authority = name.getAuthority(); - if (authority == null) { - // authority is null when fs.defaultFS is not a qualified o3fs URI and - // o3fs:/// is passed to the client. matcher will NPE if authority is null - throw new IllegalArgumentException(URI_EXCEPTION_TEXT); - } - - Matcher matcher = URL_SCHEMA_PATTERN.matcher(authority); - - if (!matcher.matches()) { - throw new IllegalArgumentException(URI_EXCEPTION_TEXT); - } - String bucketStr = matcher.group(1); - String volumeStr = matcher.group(2); - String remaining = matcher.groupCount() == 3 ? matcher.group(3) : null; - - String omHost = null; - int omPort = -1; - if (!isEmpty(remaining)) { - String[] parts = remaining.split(":"); - // Array length should be either 1(hostname or service id) or 2(host:port) - if (parts.length > 2) { - throw new IllegalArgumentException(URI_EXCEPTION_TEXT); - } - omHost = parts[0]; - if (parts.length == 2) { - try { - omPort = Integer.parseInt(parts[1]); - } catch (NumberFormatException e) { - throw new IllegalArgumentException(URI_EXCEPTION_TEXT); - } - } - } - - try { - uri = new URIBuilder().setScheme(OZONE_URI_SCHEME) - .setHost(authority) - .build(); - LOG.trace("Ozone URI for ozfs initialization is " + uri); - - //isolated is the default for ozonefs-lib-legacy which includes the - // /ozonefs.txt, otherwise the default is false. It could be overridden. - boolean defaultValue = - BasicOzoneFileSystem.class.getClassLoader() - .getResource("ozonefs.txt") - != null; - - //Use string here instead of the constant as constant may not be available - //on the classpath of a hadoop 2.7 - boolean isolatedClassloader = - conf.getBoolean("ozone.fs.isolated-classloader", defaultValue); - - this.adapter = createAdapter(conf, bucketStr, volumeStr, omHost, omPort, - isolatedClassloader); - - try { - this.userName = - UserGroupInformation.getCurrentUser().getShortUserName(); - } catch (IOException e) { - this.userName = OZONE_DEFAULT_USER; - } - this.workingDir = new Path(OZONE_USER_DIR, this.userName) - .makeQualified(this.uri, this.workingDir); - } catch (URISyntaxException ue) { - final String msg = "Invalid Ozone endpoint " + name; - LOG.error(msg, ue); - throw new IOException(msg, ue); - } - } - - protected OzoneClientAdapter createAdapter(Configuration conf, - String bucketStr, - String volumeStr, String omHost, int omPort, - boolean isolatedClassloader) throws IOException { - - if (isolatedClassloader) { - - return OzoneClientAdapterFactory - .createAdapter(volumeStr, bucketStr); - - } else { - - return new BasicOzoneClientAdapterImpl(omHost, omPort, conf, - volumeStr, bucketStr); - } - } - - @Override - public void close() throws IOException { - try { - adapter.close(); - } finally { - super.close(); - } - } - - @Override - public URI getUri() { - return uri; - } - - @Override - public String getScheme() { - return OZONE_URI_SCHEME; - } - - @Override - public FSDataInputStream open(Path f, int bufferSize) throws IOException { - incrementCounter(Statistic.INVOCATION_OPEN); - statistics.incrementWriteOps(1); - LOG.trace("open() path:{}", f); - final String key = pathToKey(f); - return new FSDataInputStream(new OzoneFSInputStream(adapter.readFile(key))); - } - - protected void incrementCounter(Statistic statistic) { - //don't do anyting in this default implementation. - } - - @Override - public FSDataOutputStream create(Path f, FsPermission permission, - boolean overwrite, int bufferSize, - short replication, long blockSize, - Progressable progress) throws IOException { - LOG.trace("create() path:{}", f); - incrementCounter(Statistic.INVOCATION_CREATE); - statistics.incrementWriteOps(1); - final String key = pathToKey(f); - return createOutputStream(key, overwrite, true); - } - - @Override - public FSDataOutputStream createNonRecursive(Path path, - FsPermission permission, - EnumSet flags, - int bufferSize, - short replication, - long blockSize, - Progressable progress) throws IOException { - incrementCounter(Statistic.INVOCATION_CREATE_NON_RECURSIVE); - statistics.incrementWriteOps(1); - final String key = pathToKey(path); - return createOutputStream(key, flags.contains(CreateFlag.OVERWRITE), false); - } - - private FSDataOutputStream createOutputStream(String key, boolean overwrite, - boolean recursive) throws IOException { - return new FSDataOutputStream(adapter.createFile(key, overwrite, recursive), - statistics); - } - - @Override - public FSDataOutputStream append(Path f, int bufferSize, - Progressable progress) throws IOException { - throw new UnsupportedOperationException("append() Not implemented by the " - + getClass().getSimpleName() + " FileSystem implementation"); - } - - private class RenameIterator extends OzoneListingIterator { - private final String srcKey; - private final String dstKey; - - RenameIterator(Path srcPath, Path dstPath) - throws IOException { - super(srcPath); - srcKey = pathToKey(srcPath); - dstKey = pathToKey(dstPath); - LOG.trace("rename from:{} to:{}", srcKey, dstKey); - } - - @Override - boolean processKey(String key) throws IOException { - String newKeyName = dstKey.concat(key.substring(srcKey.length())); - adapter.renameKey(key, newKeyName); - return true; - } - } - - /** - * Check whether the source and destination path are valid and then perform - * rename from source path to destination path. - *

- * The rename operation is performed by renaming the keys with src as prefix. - * For such keys the prefix is changed from src to dst. - * - * @param src source path for rename - * @param dst destination path for rename - * @return true if rename operation succeeded or - * if the src and dst have the same path and are of the same type - * @throws IOException on I/O errors or if the src/dst paths are invalid. - */ - @Override - public boolean rename(Path src, Path dst) throws IOException { - incrementCounter(Statistic.INVOCATION_RENAME); - statistics.incrementWriteOps(1); - if (src.equals(dst)) { - return true; - } - - LOG.trace("rename() from:{} to:{}", src, dst); - if (src.isRoot()) { - // Cannot rename root of file system - LOG.trace("Cannot rename the root of a filesystem"); - return false; - } - - // Cannot rename a directory to its own subdirectory - Path dstParent = dst.getParent(); - while (dstParent != null && !src.equals(dstParent)) { - dstParent = dstParent.getParent(); - } - Preconditions.checkArgument(dstParent == null, - "Cannot rename a directory to its own subdirectory"); - // Check if the source exists - FileStatus srcStatus; - try { - srcStatus = getFileStatus(src); - } catch (FileNotFoundException fnfe) { - // source doesn't exist, return - return false; - } - - // Check if the destination exists - FileStatus dstStatus; - try { - dstStatus = getFileStatus(dst); - } catch (FileNotFoundException fnde) { - dstStatus = null; - } - - if (dstStatus == null) { - // If dst doesn't exist, check whether dst parent dir exists or not - // if the parent exists, the source can still be renamed to dst path - dstStatus = getFileStatus(dst.getParent()); - if (!dstStatus.isDirectory()) { - throw new IOException(String.format( - "Failed to rename %s to %s, %s is a file", src, dst, - dst.getParent())); - } - } else { - // if dst exists and source and destination are same, - // check both the src and dst are of same type - if (srcStatus.getPath().equals(dstStatus.getPath())) { - return !srcStatus.isDirectory(); - } else if (dstStatus.isDirectory()) { - // If dst is a directory, rename source as subpath of it. - // for example rename /source to /dst will lead to /dst/source - dst = new Path(dst, src.getName()); - FileStatus[] statuses; - try { - statuses = listStatus(dst); - } catch (FileNotFoundException fnde) { - statuses = null; - } - - if (statuses != null && statuses.length > 0) { - // If dst exists and not a directory not empty - throw new FileAlreadyExistsException(String.format( - "Failed to rename %s to %s, file already exists or not empty!", - src, dst)); - } - } else { - // If dst is not a directory - throw new FileAlreadyExistsException(String.format( - "Failed to rename %s to %s, file already exists!", src, dst)); - } - } - - if (srcStatus.isDirectory()) { - if (dst.toString().startsWith(src.toString() + OZONE_URI_DELIMITER)) { - LOG.trace("Cannot rename a directory to a subdirectory of self"); - return false; - } - } - RenameIterator iterator = new RenameIterator(src, dst); - boolean result = iterator.iterate(); - if (result) { - createFakeParentDirectory(src); - } - return result; - } - - private class DeleteIterator extends OzoneListingIterator { - private boolean recursive; - - DeleteIterator(Path f, boolean recursive) - throws IOException { - super(f); - this.recursive = recursive; - if (getStatus().isDirectory() - && !this.recursive - && listStatus(f).length != 0) { - throw new PathIsNotEmptyDirectoryException(f.toString()); - } - } - - @Override - boolean processKey(String key) throws IOException { - if (key.equals("")) { - LOG.trace("Skipping deleting root directory"); - return true; - } else { - LOG.trace("deleting key:" + key); - boolean succeed = adapter.deleteObject(key); - // if recursive delete is requested ignore the return value of - // deleteObject and issue deletes for other keys. - return recursive || succeed; - } - } - } - - /** - * Deletes the children of the input dir path by iterating though the - * DeleteIterator. - * - * @param f directory path to be deleted - * @return true if successfully deletes all required keys, false otherwise - * @throws IOException - */ - private boolean innerDelete(Path f, boolean recursive) throws IOException { - LOG.trace("delete() path:{} recursive:{}", f, recursive); - try { - DeleteIterator iterator = new DeleteIterator(f, recursive); - return iterator.iterate(); - } catch (FileNotFoundException e) { - if (LOG.isDebugEnabled()) { - LOG.debug("Couldn't delete {} - does not exist", f); - } - return false; - } - } - - @Override - public boolean delete(Path f, boolean recursive) throws IOException { - incrementCounter(Statistic.INVOCATION_DELETE); - statistics.incrementWriteOps(1); - LOG.debug("Delete path {} - recursive {}", f, recursive); - FileStatus status; - try { - status = getFileStatus(f); - } catch (FileNotFoundException ex) { - LOG.warn("delete: Path does not exist: {}", f); - return false; - } - - String key = pathToKey(f); - boolean result; - - if (status.isDirectory()) { - LOG.debug("delete: Path is a directory: {}", f); - key = addTrailingSlashIfNeeded(key); - - if (key.equals("/")) { - LOG.warn("Cannot delete root directory."); - return false; - } - - result = innerDelete(f, recursive); - } else { - LOG.debug("delete: Path is a file: {}", f); - result = adapter.deleteObject(key); - } - - if (result) { - // If this delete operation removes all files/directories from the - // parent direcotry, then an empty parent directory must be created. - createFakeParentDirectory(f); - } - - return result; - } - - /** - * Create a fake parent directory key if it does not already exist and no - * other child of this parent directory exists. - * - * @param f path to the fake parent directory - * @throws IOException - */ - private void createFakeParentDirectory(Path f) throws IOException { - Path parent = f.getParent(); - if (parent != null && !parent.isRoot()) { - createFakeDirectoryIfNecessary(parent); - } - } - - /** - * Create a fake directory key if it does not already exist. - * - * @param f path to the fake directory - * @throws IOException - */ - private void createFakeDirectoryIfNecessary(Path f) throws IOException { - String key = pathToKey(f); - if (!key.isEmpty() && !o3Exists(f)) { - LOG.debug("Creating new fake directory at {}", f); - String dirKey = addTrailingSlashIfNeeded(key); - adapter.createDirectory(dirKey); - } - } - - /** - * Check if a file or directory exists corresponding to given path. - * - * @param f path to file/directory. - * @return true if it exists, false otherwise. - * @throws IOException - */ - private boolean o3Exists(final Path f) throws IOException { - Path path = makeQualified(f); - try { - getFileStatus(path); - return true; - } catch (FileNotFoundException ex) { - return false; - } - } - - @Override - public FileStatus[] listStatus(Path f) throws IOException { - incrementCounter(Statistic.INVOCATION_LIST_STATUS); - statistics.incrementReadOps(1); - LOG.trace("listStatus() path:{}", f); - int numEntries = LISTING_PAGE_SIZE; - LinkedList statuses = new LinkedList<>(); - List tmpStatusList; - String startKey = ""; - - do { - tmpStatusList = - adapter.listStatus(pathToKey(f), false, startKey, numEntries, uri, - workingDir, getUsername()) - .stream() - .map(this::convertFileStatus) - .collect(Collectors.toList()); - - if (!tmpStatusList.isEmpty()) { - if (startKey.isEmpty()) { - statuses.addAll(tmpStatusList); - } else { - statuses.addAll(tmpStatusList.subList(1, tmpStatusList.size())); - } - startKey = pathToKey(statuses.getLast().getPath()); - } - // listStatus returns entries numEntries in size if available. - // Any lesser number of entries indicate that the required entries have - // exhausted. - } while (tmpStatusList.size() == numEntries); - - - return statuses.toArray(new FileStatus[0]); - } - - @Override - public void setWorkingDirectory(Path newDir) { - workingDir = newDir; - } - - @Override - public Path getWorkingDirectory() { - return workingDir; - } - - @Override - public Token getDelegationToken(String renewer) throws IOException { - return adapter.getDelegationToken(renewer); - } - - /** - * Get a canonical service name for this file system. If the URI is logical, - * the hostname part of the URI will be returned. - * - * @return a service string that uniquely identifies this file system. - */ - @Override - public String getCanonicalServiceName() { - return adapter.getCanonicalServiceName(); - } - - /** - * Get the username of the FS. - * - * @return the short name of the user who instantiated the FS - */ - public String getUsername() { - return userName; - } - - /** - * Creates a directory. Directory is represented using a key with no value. - * - * @param path directory path to be created - * @return true if directory exists or created successfully. - * @throws IOException - */ - private boolean mkdir(Path path) throws IOException { - return adapter.createDirectory(pathToKey(path)); - } - - @Override - public boolean mkdirs(Path f, FsPermission permission) throws IOException { - LOG.trace("mkdir() path:{} ", f); - String key = pathToKey(f); - if (isEmpty(key)) { - return false; - } - return mkdir(f); - } - - @Override - public FileStatus getFileStatus(Path f) throws IOException { - incrementCounter(Statistic.INVOCATION_GET_FILE_STATUS); - statistics.incrementReadOps(1); - LOG.trace("getFileStatus() path:{}", f); - Path qualifiedPath = f.makeQualified(uri, workingDir); - String key = pathToKey(qualifiedPath); - FileStatus fileStatus = null; - try { - fileStatus = convertFileStatus( - adapter.getFileStatus(key, uri, qualifiedPath, getUsername())); - } catch (OMException ex) { - if (ex.getResult().equals(OMException.ResultCodes.KEY_NOT_FOUND)) { - throw new FileNotFoundException("File not found. path:" + f); - } - } - return fileStatus; - } - - /** - * Turn a path (relative or otherwise) into an Ozone key. - * - * @param path the path of the file. - * @return the key of the object that represents the file. - */ - public String pathToKey(Path path) { - Objects.requireNonNull(path, "Path canf not be null!"); - if (!path.isAbsolute()) { - path = new Path(workingDir, path); - } - // removing leading '/' char - String key = path.toUri().getPath().substring(1); - LOG.trace("path for key:{} is:{}", key, path); - return key; - } - - /** - * Add trailing delimiter to path if it is already not present. - * - * @param key the ozone Key which needs to be appended - * @return delimiter appended key - */ - private String addTrailingSlashIfNeeded(String key) { - if (!isEmpty(key) && !key.endsWith(OZONE_URI_DELIMITER)) { - return key + OZONE_URI_DELIMITER; - } else { - return key; - } - } - - @Override - public String toString() { - return "OzoneFileSystem{URI=" + uri + ", " - + "workingDir=" + workingDir + ", " - + "userName=" + userName + ", " - + "statistics=" + statistics - + "}"; - } - - /** - * This class provides an interface to iterate through all the keys in the - * bucket prefixed with the input path key and process them. - *

- * Each implementing class should define how the keys should be processed - * through the processKey() function. - */ - private abstract class OzoneListingIterator { - private final Path path; - private final FileStatus status; - private String pathKey; - private Iterator keyIterator; - - OzoneListingIterator(Path path) - throws IOException { - this.path = path; - this.status = getFileStatus(path); - this.pathKey = pathToKey(path); - if (status.isDirectory()) { - this.pathKey = addTrailingSlashIfNeeded(pathKey); - } - keyIterator = adapter.listKeys(pathKey); - } - - /** - * The output of processKey determines if further iteration through the - * keys should be done or not. - * - * @return true if we should continue iteration of keys, false otherwise. - * @throws IOException - */ - abstract boolean processKey(String key) throws IOException; - - /** - * Iterates thorugh all the keys prefixed with the input path's key and - * processes the key though processKey(). - * If for any key, the processKey() returns false, then the iteration is - * stopped and returned with false indicating that all the keys could not - * be processed successfully. - * - * @return true if all keys are processed successfully, false otherwise. - * @throws IOException - */ - boolean iterate() throws IOException { - LOG.trace("Iterating path {}", path); - if (status.isDirectory()) { - LOG.trace("Iterating directory:{}", pathKey); - while (keyIterator.hasNext()) { - BasicKeyInfo key = keyIterator.next(); - LOG.trace("iterating key:{}", key.getName()); - if (!processKey(key.getName())) { - return false; - } - } - return true; - } else { - LOG.trace("iterating file:{}", path); - return processKey(pathKey); - } - } - - String getPathKey() { - return pathKey; - } - - boolean pathIsDirectory() { - return status.isDirectory(); - } - - FileStatus getStatus() { - return status; - } - } - - public OzoneClientAdapter getAdapter() { - return adapter; - } - - public boolean isEmpty(CharSequence cs) { - return cs == null || cs.length() == 0; - } - - public boolean isNumber(String number) { - try { - Integer.parseInt(number); - } catch (NumberFormatException ex) { - return false; - } - return true; - } - - private FileStatus convertFileStatus( - FileStatusAdapter fileStatusAdapter) { - - Path symLink = null; - try { - fileStatusAdapter.getSymlink(); - } catch (Exception ex) { - //NOOP: If not symlink symlink remains null. - } - - return new FileStatus( - fileStatusAdapter.getLength(), - fileStatusAdapter.isDir(), - fileStatusAdapter.getBlockReplication(), - fileStatusAdapter.getBlocksize(), - fileStatusAdapter.getModificationTime(), - fileStatusAdapter.getAccessTime(), - new FsPermission(fileStatusAdapter.getPermission()), - fileStatusAdapter.getOwner(), - fileStatusAdapter.getGroup(), - symLink, - fileStatusAdapter.getPath() - ); - - } -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java deleted file mode 100644 index 832a0cb4051..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Constants.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -/** - * Constants for Ozone FileSystem implementation. - */ -public final class Constants { - - public static final String OZONE_DEFAULT_USER = "hdfs"; - - public static final String OZONE_USER_DIR = "/user"; - - /** Local buffer directory. */ - public static final String BUFFER_DIR_KEY = "fs.ozone.buffer.dir"; - - /** Temporary directory. */ - public static final String BUFFER_TMP_KEY = "hadoop.tmp.dir"; - - /** Page size for Ozone listing operation. */ - public static final int LISTING_PAGE_SIZE = 1024; - - private Constants() { - - } -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java deleted file mode 100644 index 91597839340..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FileStatusAdapter.java +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.fs.Path; - -/** - * Class to hold the internal information of a FileStatus. - *

- * As FileStatus class is not compatible between 3.x and 2.x hadoop we can - * use this adapter to hold all the required information. Hadoop 3.x FileStatus - * information can be converted to this class, and this class can be used to - * create hadoop 2.x FileStatus. - *

- * FileStatus (Hadoop 3.x) --> FileStatusAdapter --> FileStatus (Hadoop 2.x) - */ -public final class FileStatusAdapter { - - private final long length; - private final Path path; - private final boolean isdir; - private final short blockReplication; - private final long blocksize; - private final long modificationTime; - private final long accessTime; - private final short permission; - private final String owner; - private final String group; - private final Path symlink; - - @SuppressWarnings("checkstyle:ParameterNumber") - public FileStatusAdapter(long length, Path path, boolean isdir, - short blockReplication, long blocksize, long modificationTime, - long accessTime, short permission, String owner, - String group, Path symlink) { - this.length = length; - this.path = path; - this.isdir = isdir; - this.blockReplication = blockReplication; - this.blocksize = blocksize; - this.modificationTime = modificationTime; - this.accessTime = accessTime; - this.permission = permission; - this.owner = owner; - this.group = group; - this.symlink = symlink; - } - - public Path getPath() { - return path; - } - - public boolean isDir() { - return isdir; - } - - public short getBlockReplication() { - return blockReplication; - } - - public long getBlocksize() { - return blocksize; - } - - public long getModificationTime() { - return modificationTime; - } - - public long getAccessTime() { - return accessTime; - } - - public short getPermission() { - return permission; - } - - public String getOwner() { - return owner; - } - - public String getGroup() { - return group; - } - - public Path getSymlink() { - return symlink; - } - - public long getLength() { - return length; - } - -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java deleted file mode 100644 index a90797efdd8..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/FilteredClassLoader.java +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import java.net.URL; -import java.net.URLClassLoader; -import java.util.HashSet; -import java.util.Set; - -import org.apache.hadoop.util.StringUtils; - -/** - * Class loader which delegates the loading only for the selected class. - * - *

- * By default java classloader delegates first all the class loading to the - * parent, and loads the class only if it's not found in the class. - *

- * This simple class loader do the opposit. Everything is loaded with this - * class loader without delegation _except_ the few classes which are defined - * in the constructor. - *

- * With this method we can use two separated class loader (the original main - * classloader and instance of this which loaded separated classes, but the - * few selected classes are shared between the two class loaders. - *

- * With this approach it's possible to use any older hadoop version - * (main classloader) together with ozonefs (instance of this classloader) as - * only the selected classes are selected between the class loaders. - */ -public class FilteredClassLoader extends URLClassLoader { - - private final ClassLoader systemClassLoader; - - private final ClassLoader delegate; - private Set delegatedClasses = new HashSet<>(); - - public FilteredClassLoader(URL[] urls, ClassLoader parent) { - super(urls, null); - delegatedClasses.add("org.apache.hadoop.crypto.key.KeyProvider"); - delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneClientAdapter"); - delegatedClasses.add("org.apache.hadoop.fs.ozone.FileStatusAdapter"); - delegatedClasses.add("org.apache.hadoop.security.token.Token"); - delegatedClasses.add("org.apache.hadoop.fs.ozone.BasicKeyInfo"); - delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneFSOutputStream"); - delegatedClasses.add("org.apache.hadoop.fs.ozone.OzoneFSStorageStatistics"); - delegatedClasses.add("org.apache.hadoop.fs.ozone.Statistic"); - delegatedClasses.add("org.apache.hadoop.fs.Seekable"); - delegatedClasses.add("org.apache.hadoop.io.Text"); - delegatedClasses.add("org.apache.hadoop.fs.Path"); - delegatedClasses.addAll(StringUtils.getTrimmedStringCollection( - System.getenv("HADOOP_OZONE_DELEGATED_CLASSES"))); - this.delegate = parent; - systemClassLoader = getSystemClassLoader(); - - } - - @Override - public Class loadClass(String name) throws ClassNotFoundException { - if (delegatedClasses.contains(name) || - name.startsWith("org.apache.log4j") || - name.startsWith("org.slf4j")) { - return delegate.loadClass(name); - } - return super.loadClass(name); - } - - private Class loadFromSystem(String name) { - if (systemClassLoader != null) { - try { - return systemClassLoader.loadClass(name); - } catch (ClassNotFoundException ex) { - //no problem - return null; - } - } else { - return null; - } - } -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java deleted file mode 100644 index a0ec01f6f67..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -import java.io.IOException; -import java.net.URI; - -import org.apache.hadoop.ozone.OzoneConsts; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.DtFetcher; -import org.apache.hadoop.security.token.Token; - - -/** - * A DT fetcher for OzoneFileSystem. - * It is only needed for the `hadoop dtutil` command. - */ -public class O3fsDtFetcher implements DtFetcher { - private static final Logger LOG = - LoggerFactory.getLogger(O3fsDtFetcher.class); - - private static final String SERVICE_NAME = OzoneConsts.OZONE_URI_SCHEME; - - private static final String FETCH_FAILED = - "Fetch ozone delegation token failed"; - - /** - * Returns the service name for O3fs, which is also a valid URL prefix. - */ - public Text getServiceName() { - return new Text(SERVICE_NAME); - } - - public boolean isTokenRequired() { - return UserGroupInformation.isSecurityEnabled(); - } - - /** - * Returns Token object via FileSystem, null if bad argument. - * @param conf - a Configuration object used with FileSystem.get() - * @param creds - a Credentials object to which token(s) will be added - * @param renewer - the renewer to send with the token request - * @param url - the URL to which the request is sent - * @return a Token, or null if fetch fails. - */ - public Token addDelegationTokens(Configuration conf, Credentials creds, - String renewer, String url) throws Exception { - if (!url.startsWith(getServiceName().toString())) { - url = getServiceName().toString() + "://" + url; - } - LOG.debug("addDelegationTokens from {} renewer {}.", url, renewer); - FileSystem fs = FileSystem.get(URI.create(url), conf); - Token token = fs.getDelegationToken(renewer); - if (token == null) { - LOG.error(FETCH_FAILED); - throw new IOException(FETCH_FAILED); - } - creds.addToken(token.getService(), token); - return token; - } -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java deleted file mode 100644 index 4163c135da0..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.DelegateToFileSystem; -import org.apache.hadoop.ozone.OzoneConsts; - -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; - -/** - * ozone implementation of AbstractFileSystem. - * This impl delegates to the OzoneFileSystem - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public class OzFs extends DelegateToFileSystem { - - public OzFs(URI theUri, Configuration conf) - throws IOException, URISyntaxException { - super(theUri, new OzoneFileSystem(), conf, - OzoneConsts.OZONE_URI_SCHEME, false); - } -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java deleted file mode 100644 index 0ae8c8ff1d8..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import java.io.IOException; -import java.io.InputStream; -import java.net.URI; -import java.util.Iterator; -import java.util.List; - -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.security.token.Token; - -/** - * Lightweight adapter to separate hadoop/ozone classes. - *

- * This class contains only the bare minimum Ozone classes in the signature. - * It could be loaded by a different classloader because only the objects in - * the method signatures should be shared between the classloader. - */ -public interface OzoneClientAdapter { - - void close() throws IOException; - - InputStream readFile(String key) throws IOException; - - OzoneFSOutputStream createFile(String key, boolean overWrite, - boolean recursive) throws IOException; - - void renameKey(String key, String newKeyName) throws IOException; - - boolean createDirectory(String keyName) throws IOException; - - boolean deleteObject(String keyName); - - Iterator listKeys(String pathKey); - - List listStatus(String keyName, boolean recursive, - String startKey, long numEntries, URI uri, - Path workingDir, String username) throws IOException; - - Token getDelegationToken(String renewer) - throws IOException; - - KeyProvider getKeyProvider() throws IOException; - - URI getKeyProviderUri() throws IOException; - - String getCanonicalServiceName(); - - FileStatusAdapter getFileStatus(String key, URI uri, - Path qualifiedPath, String userName) throws IOException; - -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java deleted file mode 100644 index fee4298fed6..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterFactory.java +++ /dev/null @@ -1,170 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import java.io.IOException; -import java.lang.reflect.InvocationTargetException; -import java.net.MalformedURLException; -import java.net.URL; -import java.util.ArrayList; -import java.util.Enumeration; -import java.util.List; - -import org.apache.hadoop.fs.StorageStatistics; - -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Creates OzoneClientAdapter with classloader separation. - */ -public final class OzoneClientAdapterFactory { - - static final Logger LOG = - LoggerFactory.getLogger(OzoneClientAdapterFactory.class); - - private OzoneClientAdapterFactory() { - } - - @SuppressFBWarnings("DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED") - public static OzoneClientAdapter createAdapter( - String volumeStr, - String bucketStr) throws IOException { - return createAdapter(volumeStr, bucketStr, true, - (aClass) -> (OzoneClientAdapter) aClass - .getConstructor(String.class, String.class) - .newInstance( - volumeStr, - bucketStr)); - } - - @SuppressFBWarnings("DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED") - public static OzoneClientAdapter createAdapter( - String volumeStr, - String bucketStr, - StorageStatistics storageStatistics) throws IOException { - return createAdapter(volumeStr, bucketStr, false, - (aClass) -> (OzoneClientAdapter) aClass - .getConstructor(String.class, String.class, - OzoneFSStorageStatistics.class) - .newInstance( - volumeStr, - bucketStr, - storageStatistics)); - } - - @SuppressFBWarnings("DP_CREATE_CLASSLOADER_INSIDE_DO_PRIVILEGED") - public static OzoneClientAdapter createAdapter( - String volumeStr, - String bucketStr, - boolean basic, - OzoneClientAdapterCreator creator) throws IOException { - - ClassLoader currentClassLoader = - OzoneClientAdapterFactory.class.getClassLoader(); - List urls = new ArrayList<>(); - - findEmbeddedLibsUrl(urls, currentClassLoader); - - findConfigDirUrl(urls, currentClassLoader); - - ClassLoader classLoader = - new FilteredClassLoader(urls.toArray(new URL[0]), currentClassLoader); - - try { - - ClassLoader contextClassLoader = - Thread.currentThread().getContextClassLoader(); - Thread.currentThread().setContextClassLoader(classLoader); - - //this class caches the context classloader during the first load - //call it here when the context class loader is set to the isoloated - //loader to make sure the grpc class will be loaded by the right - //loader - Class reflectionUtils = - classLoader.loadClass("org.apache.ratis.util.ReflectionUtils"); - reflectionUtils.getMethod("getClassByName", String.class) - .invoke(null, "org.apache.ratis.grpc.GrpcFactory"); - - Class adapterClass = null; - if (basic) { - adapterClass = classLoader - .loadClass( - "org.apache.hadoop.fs.ozone.BasicOzoneClientAdapterImpl"); - } else { - adapterClass = classLoader - .loadClass( - "org.apache.hadoop.fs.ozone.OzoneClientAdapterImpl"); - } - OzoneClientAdapter ozoneClientAdapter = - creator.createOzoneClientAdapter(adapterClass); - - Thread.currentThread().setContextClassLoader(contextClassLoader); - - return ozoneClientAdapter; - } catch (Exception e) { - LOG.error("Can't initialize the ozoneClientAdapter", e); - throw new IOException( - "Can't initialize the OzoneClientAdapter implementation", e); - } - - } - - private static void findConfigDirUrl(List urls, - ClassLoader currentClassLoader) throws IOException { - Enumeration conf = - currentClassLoader.getResources("ozone-site.xml"); - while (conf.hasMoreElements()) { - urls.add( - new URL( - conf.nextElement().toString().replace("ozone-site.xml", ""))); - - } - } - - private static void findEmbeddedLibsUrl(List urls, - ClassLoader currentClassloader) - throws MalformedURLException { - - //marker file is added to the jar to make it easier to find the URL - // for the current jar. - String markerFile = "ozonefs.txt"; - ClassLoader currentClassLoader = - OzoneClientAdapterFactory.class.getClassLoader(); - - URL ozFs = currentClassLoader - .getResource(markerFile); - String rootPath = ozFs.toString().replace(markerFile, ""); - urls.add(new URL(rootPath)); - - urls.add(new URL(rootPath + "libs/")); - - } - - /** - * Interface to create OzoneClientAdapter implementation with reflection. - */ - @FunctionalInterface - interface OzoneClientAdapterCreator { - OzoneClientAdapter createOzoneClientAdapter(Class clientAdapter) - throws NoSuchMethodException, IllegalAccessException, - InvocationTargetException, InstantiationException; - } - -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java deleted file mode 100644 index 975bbf7f4ff..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import java.io.IOException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -/** - * Implementation of the OzoneFileSystem calls. - */ -public class OzoneClientAdapterImpl extends BasicOzoneClientAdapterImpl { - - private OzoneFSStorageStatistics storageStatistics; - - public OzoneClientAdapterImpl(String volumeStr, String bucketStr, - OzoneFSStorageStatistics storageStatistics) - throws IOException { - super(volumeStr, bucketStr); - this.storageStatistics = storageStatistics; - } - - public OzoneClientAdapterImpl( - OzoneConfiguration conf, String volumeStr, String bucketStr, - OzoneFSStorageStatistics storageStatistics) - throws IOException { - super(conf, volumeStr, bucketStr); - this.storageStatistics = storageStatistics; - } - - public OzoneClientAdapterImpl(String omHost, int omPort, - Configuration hadoopConf, String volumeStr, String bucketStr, - OzoneFSStorageStatistics storageStatistics) - throws IOException { - super(omHost, omPort, hadoopConf, volumeStr, bucketStr); - this.storageStatistics = storageStatistics; - } - - @Override - protected void incrementCounter(Statistic objectsRead) { - if (storageStatistics != null) { - storageStatistics.incrementCounter(objectsRead, 1); - } - } -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java deleted file mode 100644 index 909b2aff9b2..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -import java.io.IOException; -import java.io.InputStream; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.FSInputStream; -import org.apache.hadoop.fs.Seekable; - -/** - * The input stream for Ozone file system. - * - * TODO: Make inputStream generic for both rest and rpc clients - * This class is not thread safe. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public final class OzoneFSInputStream extends FSInputStream { - - private final InputStream inputStream; - - public OzoneFSInputStream(InputStream inputStream) { - this.inputStream = inputStream; - } - - @Override - public int read() throws IOException { - return inputStream.read(); - } - - @Override - public int read(byte[] b, int off, int len) throws IOException { - return inputStream.read(b, off, len); - } - - @Override - public synchronized void close() throws IOException { - inputStream.close(); - } - - @Override - public void seek(long pos) throws IOException { - ((Seekable) inputStream).seek(pos); - } - - @Override - public long getPos() throws IOException { - return ((Seekable) inputStream).getPos(); - } - - @Override - public boolean seekToNewSource(long targetPos) throws IOException { - return false; - } - - @Override - public int available() throws IOException { - return inputStream.available(); - } -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java deleted file mode 100644 index efbf93beb5a..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -import java.io.IOException; -import java.io.OutputStream; - - -/** - * The output stream for Ozone file system. - * - * TODO: Make outputStream generic for both rest and rpc clients - * This class is not thread safe. - */ -public class OzoneFSOutputStream extends OutputStream { - - private final OutputStream outputStream; - - public OzoneFSOutputStream(OutputStream outputStream) { - this.outputStream = outputStream; - } - - @Override - public void write(int b) throws IOException { - outputStream.write(b); - } - - @Override - public void write(byte[] b, int off, int len) throws IOException { - outputStream.write(b, off, len); - } - - @Override - public synchronized void flush() throws IOException { - outputStream.flush(); - } - - @Override - public synchronized void close() throws IOException { - outputStream.close(); - } -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSStorageStatistics.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSStorageStatistics.java deleted file mode 100644 index 56c95df9c69..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSStorageStatistics.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.StorageStatistics; -import org.apache.hadoop.ozone.OzoneConsts; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Iterator; -import java.util.Map; -import java.util.EnumMap; -import java.util.Collections; -import java.util.NoSuchElementException; -import java.util.concurrent.atomic.AtomicLong; - -/** - * Storage statistics for OzoneFileSystem. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class OzoneFSStorageStatistics extends StorageStatistics - implements Iterable { - private static final Logger LOG = - LoggerFactory.getLogger(OzoneFSStorageStatistics.class); - - public static final String NAME = "OzoneFSStorageStatistics"; - private final Map opsCount = - new EnumMap<>(Statistic.class); - - public OzoneFSStorageStatistics() { - super(NAME); - for (Statistic opType : Statistic.values()) { - opsCount.put(opType, new AtomicLong(0)); - } - } - - /** - * Increment a specific counter. - * @param op operation - * @param count increment value - * @return the new value - */ - public long incrementCounter(Statistic op, long count) { - long updated = opsCount.get(op).addAndGet(count); - LOG.debug("{} += {} -> {}", op, count, updated); - return updated; - } - - private class LongIterator implements Iterator { - private Iterator> iterator = - Collections.unmodifiableSet(opsCount.entrySet()).iterator(); - - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public LongStatistic next() { - if (!iterator.hasNext()) { - throw new NoSuchElementException(); - } - final Map.Entry entry = iterator.next(); - return new LongStatistic(entry.getKey().getSymbol(), - entry.getValue().get()); - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - } - - @Override - public String getScheme() { - return OzoneConsts.OZONE_URI_SCHEME; - } - - @Override - public Iterator getLongStatistics() { - return new LongIterator(); - } - - @Override - public Iterator iterator() { - return getLongStatistics(); - } - - @Override - public Long getLong(String key) { - final Statistic type = Statistic.fromSymbol(key); - return type == null ? null : opsCount.get(type).get(); - } - - @Override - public boolean isTracked(String key) { - return Statistic.fromSymbol(key) != null; - } - - @Override - public void reset() { - for (AtomicLong value : opsCount.values()) { - value.set(0); - } - } - -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java deleted file mode 100644 index 0514bd728b7..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -import java.io.IOException; -import java.net.URI; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.GlobalStorageStatistics; -import org.apache.hadoop.fs.StorageStatistics; -import org.apache.hadoop.security.token.DelegationTokenIssuer; - -/** - * The Ozone Filesystem implementation. - *

- * This subclass is marked as private as code should not be creating it - * directly; use {@link FileSystem#get(Configuration)} and variants to create - * one. If cast to {@link OzoneFileSystem}, extra methods and features may be - * accessed. Consider those private and unstable. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class OzoneFileSystem extends BasicOzoneFileSystem - implements KeyProviderTokenIssuer { - - private OzoneFSStorageStatistics storageStatistics; - - @Override - public KeyProvider getKeyProvider() throws IOException { - return getAdapter().getKeyProvider(); - } - - @Override - public URI getKeyProviderUri() throws IOException { - return getAdapter().getKeyProviderUri(); - } - - @Override - public DelegationTokenIssuer[] getAdditionalTokenIssuers() - throws IOException { - KeyProvider keyProvider; - try { - keyProvider = getKeyProvider(); - } catch (IOException ioe) { - LOG.debug("Error retrieving KeyProvider.", ioe); - return null; - } - if (keyProvider instanceof DelegationTokenIssuer) { - return new DelegationTokenIssuer[]{(DelegationTokenIssuer)keyProvider}; - } - return null; - } - - StorageStatistics getOzoneFSOpsCountStatistics() { - return storageStatistics; - } - - @Override - protected void incrementCounter(Statistic statistic) { - if (storageStatistics != null) { - storageStatistics.incrementCounter(statistic, 1); - } - } - - @Override - protected OzoneClientAdapter createAdapter(Configuration conf, - String bucketStr, - String volumeStr, String omHost, int omPort, - boolean isolatedClassloader) throws IOException { - - this.storageStatistics = - (OzoneFSStorageStatistics) GlobalStorageStatistics.INSTANCE - .put(OzoneFSStorageStatistics.NAME, - OzoneFSStorageStatistics::new); - - if (isolatedClassloader) { - return OzoneClientAdapterFactory.createAdapter(volumeStr, bucketStr, - storageStatistics); - - } else { - return new OzoneClientAdapterImpl(omHost, omPort, conf, - volumeStr, bucketStr, storageStatistics); - } - } -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java deleted file mode 100644 index e3d87421212..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FsShell; -import org.apache.hadoop.fs.shell.CommandFactory; -import org.apache.hadoop.fs.shell.FsCommand; -import org.apache.hadoop.util.ToolRunner; - -/** Provide command line access to a Ozone FileSystem. */ -@InterfaceAudience.Private -public class OzoneFsShell extends FsShell { - - private final String ozoneUsagePrefix = "Usage: ozone fs [generic options]"; - - /** - * Default ctor with no configuration. Be sure to invoke - * {@link #setConf(Configuration)} with a valid configuration prior - * to running commands. - */ - public OzoneFsShell() { - this(null); - } - - /** - * Construct a OzoneFsShell with the given configuration. - * - * Commands can be executed via {@link #run(String[])} - * @param conf the hadoop configuration - */ - public OzoneFsShell(Configuration conf) { - super(conf); - } - - protected void registerCommands(CommandFactory factory) { - // TODO: DFSAdmin subclasses FsShell so need to protect the command - // registration. This class should morph into a base class for - // commands, and then this method can be abstract - if (this.getClass().equals(OzoneFsShell.class)) { - factory.registerCommands(FsCommand.class); - } - } - - @Override - protected String getUsagePrefix() { - return ozoneUsagePrefix; - } - - /** - * Main entry point to execute fs commands. - * - * @param argv the command and its arguments - * @throws Exception upon error - */ - public static void main(String[] argv) throws Exception { - OzoneFsShell shell = newShellInstance(); - Configuration conf = new Configuration(); - conf.setQuietMode(false); - shell.setConf(conf); - int res; - try { - res = ToolRunner.run(shell, argv); - } finally { - shell.close(); - } - System.exit(res); - } - - // TODO: this should be abstract in a base class - protected static OzoneFsShell newShellInstance() { - return new OzoneFsShell(); - } -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java deleted file mode 100644 index 136d999859e..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/Statistic.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.fs.StorageStatistics.CommonStatisticNames; - -import java.util.HashMap; -import java.util.Map; - -/** - * Statistic which are collected in OzoneFileSystem. - * These statistics are available at a low level in - * {@link OzoneFSStorageStatistics} - */ -public enum Statistic { - OBJECTS_RENAMED("objects_renamed", - "Total number of objects renamed within the object store."), - OBJECTS_CREATED("objects_created", - "Total number of objects created through the object store."), - OBJECTS_DELETED("objects_deleted", - "Total number of objects deleted from the object store."), - OBJECTS_READ("objects_read", - "Total number of objects read from the object store."), - OBJECTS_QUERY("objects_query", - "Total number of objects queried from the object store."), - OBJECTS_LIST("objects_list", - "Total number of object list query from the object store."), - INVOCATION_COPY_FROM_LOCAL_FILE(CommonStatisticNames.OP_COPY_FROM_LOCAL_FILE, - "Calls of copyFromLocalFile()"), - INVOCATION_CREATE(CommonStatisticNames.OP_CREATE, - "Calls of create()"), - INVOCATION_CREATE_NON_RECURSIVE(CommonStatisticNames.OP_CREATE_NON_RECURSIVE, - "Calls of createNonRecursive()"), - INVOCATION_DELETE(CommonStatisticNames.OP_DELETE, - "Calls of delete()"), - INVOCATION_EXISTS(CommonStatisticNames.OP_EXISTS, - "Calls of exists()"), - INVOCATION_GET_FILE_CHECKSUM(CommonStatisticNames.OP_GET_FILE_CHECKSUM, - "Calls of getFileChecksum()"), - INVOCATION_GET_FILE_STATUS(CommonStatisticNames.OP_GET_FILE_STATUS, - "Calls of getFileStatus()"), - INVOCATION_GLOB_STATUS(CommonStatisticNames.OP_GLOB_STATUS, - "Calls of globStatus()"), - INVOCATION_IS_DIRECTORY(CommonStatisticNames.OP_IS_DIRECTORY, - "Calls of isDirectory()"), - INVOCATION_IS_FILE(CommonStatisticNames.OP_IS_FILE, - "Calls of isFile()"), - INVOCATION_LIST_FILES(CommonStatisticNames.OP_LIST_FILES, - "Calls of listFiles()"), - INVOCATION_LIST_LOCATED_STATUS(CommonStatisticNames.OP_LIST_LOCATED_STATUS, - "Calls of listLocatedStatus()"), - INVOCATION_LIST_STATUS(CommonStatisticNames.OP_LIST_STATUS, - "Calls of listStatus()"), - INVOCATION_MKDIRS(CommonStatisticNames.OP_MKDIRS, - "Calls of mkdirs()"), - INVOCATION_OPEN(CommonStatisticNames.OP_OPEN, - "Calls of open()"), - INVOCATION_RENAME(CommonStatisticNames.OP_RENAME, - "Calls of rename()"); - - private static final Map SYMBOL_MAP = - new HashMap<>(Statistic.values().length); - static { - for (Statistic stat : values()) { - SYMBOL_MAP.put(stat.getSymbol(), stat); - } - } - - Statistic(String symbol, String description) { - this.symbol = symbol; - this.description = description; - } - - private final String symbol; - private final String description; - - public String getSymbol() { - return symbol; - } - - /** - * Get a statistic from a symbol. - * @param symbol statistic to look up - * @return the value or null. - */ - public static Statistic fromSymbol(String symbol) { - return SYMBOL_MAP.get(symbol); - } - - public String getDescription() { - return description; - } - - /** - * The string value is simply the symbol. - * This makes this operation very low cost. - * @return the symbol of this statistic. - */ - @Override - public String toString() { - return symbol; - } -} diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java deleted file mode 100644 index 93e82c30229..00000000000 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/package-info.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Ozone Filesystem. - * - * Except for the exceptions, it should all be hidden as implementation details. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; \ No newline at end of file diff --git a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.DtFetcher b/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.DtFetcher deleted file mode 100644 index 6e867319c17..00000000000 --- a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.DtFetcher +++ /dev/null @@ -1,19 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -org.apache.hadoop.fs.ozone.O3fsDtFetcher diff --git a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier deleted file mode 100644 index e0292bc2d80..00000000000 --- a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier +++ /dev/null @@ -1,17 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.ozone.security.OzoneTokenIdentifier -org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier \ No newline at end of file diff --git a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer deleted file mode 100644 index bbb82219aca..00000000000 --- a/hadoop-ozone/ozonefs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ /dev/null @@ -1,19 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -org.apache.hadoop.fs.ozone.BasicOzoneClientAdapterImpl$Renewer diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestFilteredClassLoader.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestFilteredClassLoader.java deleted file mode 100644 index 26a77eb2e1c..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestFilteredClassLoader.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -import java.net.URL; -import java.util.ArrayList; -import java.util.List; - -import org.junit.Test; -import org.junit.runner.RunWith; -import org.powermock.api.mockito.PowerMockito; -import org.powermock.core.classloader.annotations.PrepareForTest; -import org.powermock.modules.junit4.PowerMockRunner; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.when; - -/** - * FilteredClassLoader test using mocks. - */ -@RunWith(PowerMockRunner.class) -@PrepareForTest({ FilteredClassLoader.class, OzoneFSInputStream.class}) -public class TestFilteredClassLoader { - @Test - public void testFilteredClassLoader() { - PowerMockito.mockStatic(System.class); - when(System.getenv("HADOOP_OZONE_DELEGATED_CLASSES")) - .thenReturn("org.apache.hadoop.fs.ozone.OzoneFSInputStream"); - - ClassLoader currentClassLoader = - TestFilteredClassLoader.class.getClassLoader(); - - List urls = new ArrayList<>(); - ClassLoader classLoader = new FilteredClassLoader( - urls.toArray(new URL[0]), currentClassLoader); - - try { - classLoader.loadClass( - "org.apache.hadoop.fs.ozone.OzoneFSInputStream"); - ClassLoader expectedClassLoader = - OzoneFSInputStream.class.getClassLoader(); - assertEquals(expectedClassLoader, currentClassLoader); - } catch (ClassNotFoundException e) { - e.printStackTrace(); - } - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java deleted file mode 100644 index 2e9e3a4d41e..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -import java.io.IOException; -import java.util.Arrays; - -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.TestDataUtil; -import org.apache.hadoop.ozone.client.OzoneBucket; - -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -/** - * Test OzoneFSInputStream by reading through multiple interfaces. - */ -public class TestOzoneFSInputStream { - private static MiniOzoneCluster cluster = null; - private static FileSystem fs; - private static Path filePath = null; - private static byte[] data = null; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeClass - public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 10, - StorageUnit.MB); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10) - .build(); - cluster.waitForClusterToBeReady(); - - // create a volume and a bucket to be used by OzoneFileSystem - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); - - // Fetch the host and port for File System init - DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0) - .getDatanodeDetails(); - - // Set the fs.defaultFS and start the filesystem - String uri = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName()); - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri); - fs = FileSystem.get(conf); - int fileLen = 100 * 1024 * 1024; - data = DFSUtil.string2Bytes(RandomStringUtils.randomAlphanumeric(fileLen)); - filePath = new Path("/" + RandomStringUtils.randomAlphanumeric(5)); - try (FSDataOutputStream stream = fs.create(filePath)) { - stream.write(data); - } - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterClass - public static void shutdown() throws IOException { - fs.close(); - cluster.shutdown(); - } - - @Test - public void testO3FSSingleByteRead() throws IOException { - FSDataInputStream inputStream = fs.open(filePath); - byte[] value = new byte[data.length]; - int i = 0; - while(true) { - int val = inputStream.read(); - if (val == -1) { - break; - } - value[i] = (byte)val; - Assert.assertEquals("value mismatch at:" + i, value[i], data[i]); - i++; - } - Assert.assertEquals(i, data.length); - Assert.assertTrue(Arrays.equals(value, data)); - inputStream.close(); - } - - @Test - public void testO3FSMultiByteRead() throws IOException { - FSDataInputStream inputStream = fs.open(filePath); - byte[] value = new byte[data.length]; - byte[] tmp = new byte[1* 1024 *1024]; - int i = 0; - while(true) { - int val = inputStream.read(tmp); - if (val == -1) { - break; - } - System.arraycopy(tmp, 0, value, i * tmp.length, tmp.length); - i++; - } - Assert.assertEquals(i * tmp.length, data.length); - Assert.assertTrue(Arrays.equals(value, data)); - inputStream.close(); - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java deleted file mode 100644 index 2a7210103f6..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ /dev/null @@ -1,343 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -import java.io.IOException; -import java.net.URI; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; - -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.GlobalStorageStatistics; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.StorageStatistics; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.TestDataUtil; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Time; - -import static java.nio.charset.StandardCharsets.UTF_8; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.RandomStringUtils; -import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER; -import org.junit.After; -import org.junit.Assert; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; - -/** - * Test OzoneFileSystem Interfaces. - * - * This test will test the various interfaces i.e. - * create, read, write, getFileStatus - */ -@RunWith(Parameterized.class) -public class TestOzoneFileInterfaces { - - private String rootPath; - private String userName; - - /** - * Parameter class to set absolute url/defaultFS handling. - *

- * Hadoop file systems could be used in multiple ways: Using the defaultfs - * and file path without the schema, or use absolute url-s even with - * different defaultFS. This parameter matrix would test both the use cases. - */ - @Parameters - public static Collection data() { - return Arrays.asList(new Object[][] {{false, true}, {true, false}}); - } - - private boolean setDefaultFs; - - private boolean useAbsolutePath; - - private MiniOzoneCluster cluster = null; - - private FileSystem fs; - - private OzoneFileSystem o3fs; - - private String volumeName; - - private String bucketName; - - private OzoneFSStorageStatistics statistics; - - private OMMetrics omMetrics; - - public TestOzoneFileInterfaces(boolean setDefaultFs, - boolean useAbsolutePath) { - this.setDefaultFs = setDefaultFs; - this.useAbsolutePath = useAbsolutePath; - GlobalStorageStatistics.INSTANCE.reset(); - } - - @Before - public void init() throws Exception { - volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); - bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); - - OzoneConfiguration conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .build(); - cluster.waitForClusterToBeReady(); - - // create a volume and a bucket to be used by OzoneFileSystem - OzoneBucket bucket = - TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName); - - rootPath = String - .format("%s://%s.%s/", OzoneConsts.OZONE_URI_SCHEME, bucketName, - volumeName); - if (setDefaultFs) { - // Set the fs.defaultFS and start the filesystem - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); - fs = FileSystem.get(conf); - } else { - fs = FileSystem.get(new URI(rootPath + "/test.txt"), conf); - } - o3fs = (OzoneFileSystem) fs; - statistics = (OzoneFSStorageStatistics) o3fs.getOzoneFSOpsCountStatistics(); - omMetrics = cluster.getOzoneManager().getMetrics(); - } - - @After - public void teardown() throws IOException { - if (cluster != null) { - cluster.shutdown(); - } - IOUtils.closeQuietly(fs); - } - - @Test - public void testFileSystemInit() throws IOException { - if (setDefaultFs) { - assertTrue( - "The initialized file system is not OzoneFileSystem but " + - fs.getClass(), - fs instanceof OzoneFileSystem); - assertEquals(OzoneConsts.OZONE_URI_SCHEME, fs.getUri().getScheme()); - assertEquals(OzoneConsts.OZONE_URI_SCHEME, statistics.getScheme()); - } - } - - @Test - public void testOzFsReadWrite() throws IOException { - long currentTime = Time.now(); - int stringLen = 20; - String data = RandomStringUtils.randomAlphanumeric(stringLen); - String filePath = RandomStringUtils.randomAlphanumeric(5); - Path path = createPath("/" + filePath); - try (FSDataOutputStream stream = fs.create(path)) { - stream.writeBytes(data); - } - - assertEquals(statistics.getLong( - StorageStatistics.CommonStatisticNames.OP_CREATE).longValue(), 1); - assertEquals(statistics.getLong("objects_created").longValue(), 1); - - FileStatus status = fs.getFileStatus(path); - assertEquals(statistics.getLong( - StorageStatistics.CommonStatisticNames.OP_GET_FILE_STATUS).longValue(), - 1); - assertEquals(statistics.getLong("objects_query").longValue(), 1); - // The timestamp of the newly created file should always be greater than - // the time when the test was started - assertTrue("Modification time has not been recorded: " + status, - status.getModificationTime() > currentTime); - - assertFalse(status.isDirectory()); - assertEquals(FsPermission.getFileDefault(), status.getPermission()); - verifyOwnerGroup(status); - - try (FSDataInputStream inputStream = fs.open(path)) { - byte[] buffer = new byte[stringLen]; - // This read will not change the offset inside the file - int readBytes = inputStream.read(0, buffer, 0, buffer.length); - String out = new String(buffer, 0, buffer.length, UTF_8); - assertEquals(data, out); - assertEquals(readBytes, buffer.length); - assertEquals(0, inputStream.getPos()); - - // The following read will change the internal offset - readBytes = inputStream.read(buffer, 0, buffer.length); - assertEquals(data, out); - assertEquals(readBytes, buffer.length); - assertEquals(buffer.length, inputStream.getPos()); - } - assertEquals(statistics.getLong( - StorageStatistics.CommonStatisticNames.OP_OPEN).longValue(), 1); - assertEquals(statistics.getLong("objects_read").longValue(), 1); - } - - private void verifyOwnerGroup(FileStatus fileStatus) { - String owner = getCurrentUser(); - assertEquals(owner, fileStatus.getOwner()); - assertEquals(owner, fileStatus.getGroup()); - } - - - @Test - public void testDirectory() throws IOException { - String dirPath = RandomStringUtils.randomAlphanumeric(5); - Path path = createPath("/" + dirPath); - assertTrue("Makedirs returned with false for the path " + path, - fs.mkdirs(path)); - - FileStatus status = fs.getFileStatus(path); - assertTrue("The created path is not directory.", status.isDirectory()); - - assertTrue(status.isDirectory()); - assertEquals(FsPermission.getDirDefault(), status.getPermission()); - verifyOwnerGroup(status); - - assertEquals(0, status.getLen()); - - FileStatus[] statusList = fs.listStatus(createPath("/")); - assertEquals(1, statusList.length); - assertEquals(status, statusList[0]); - - fs.getFileStatus(createPath("/")); - assertTrue("Root dir (/) is not a directory.", status.isDirectory()); - assertEquals(0, status.getLen()); - } - - @Test - public void testListStatus() throws IOException { - List paths = new ArrayList<>(); - String dirPath = RandomStringUtils.randomAlphanumeric(5); - Path path = createPath("/" + dirPath); - paths.add(path); - assertTrue("Makedirs returned with false for the path " + path, - fs.mkdirs(path)); - - long listObjects = statistics.getLong(Statistic.OBJECTS_LIST.getSymbol()); - long omListStatus = omMetrics.getNumListStatus(); - FileStatus[] statusList = fs.listStatus(createPath("/")); - assertEquals(1, statusList.length); - assertEquals(++listObjects, - statistics.getLong(Statistic.OBJECTS_LIST.getSymbol()).longValue()); - assertEquals(++omListStatus, omMetrics.getNumListStatus()); - assertEquals(fs.getFileStatus(path), statusList[0]); - - dirPath = RandomStringUtils.randomAlphanumeric(5); - path = createPath("/" + dirPath); - paths.add(path); - assertTrue("Makedirs returned with false for the path " + path, - fs.mkdirs(path)); - - statusList = fs.listStatus(createPath("/")); - assertEquals(2, statusList.length); - assertEquals(++listObjects, - statistics.getLong(Statistic.OBJECTS_LIST.getSymbol()).longValue()); - assertEquals(++omListStatus, omMetrics.getNumListStatus()); - for (Path p : paths) { - assertTrue(Arrays.asList(statusList).contains(fs.getFileStatus(p))); - } - } - - @Test - public void testOzoneManagerFileSystemInterface() throws IOException { - String dirPath = RandomStringUtils.randomAlphanumeric(5); - - Path path = createPath("/" + dirPath); - assertTrue("Makedirs returned with false for the path " + path, - fs.mkdirs(path)); - - long numFileStatus = - cluster.getOzoneManager().getMetrics().getNumGetFileStatus(); - FileStatus status = fs.getFileStatus(path); - - Assert.assertEquals(numFileStatus + 1, - cluster.getOzoneManager().getMetrics().getNumGetFileStatus()); - assertTrue(status.isDirectory()); - assertEquals(FsPermission.getDirDefault(), status.getPermission()); - verifyOwnerGroup(status); - - long currentTime = System.currentTimeMillis(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(o3fs.pathToKey(path)) - .build(); - OzoneFileStatus omStatus = - cluster.getOzoneManager().getFileStatus(keyArgs); - //Another get file status here, incremented the counter. - Assert.assertEquals(numFileStatus + 2, - cluster.getOzoneManager().getMetrics().getNumGetFileStatus()); - - assertTrue("The created path is not directory.", omStatus.isDirectory()); - - // For directories, the time returned is the current time. - assertEquals(0, omStatus.getLen()); - assertTrue(omStatus.getModificationTime() >= currentTime); - assertEquals(omStatus.getPath().getName(), o3fs.pathToKey(path)); - } - - @Test - public void testPathToKey() throws Exception { - - assertEquals("a/b/1", o3fs.pathToKey(new Path("/a/b/1"))); - - assertEquals("user/" + getCurrentUser() + "/key1/key2", - o3fs.pathToKey(new Path("key1/key2"))); - - assertEquals("key1/key2", - o3fs.pathToKey(new Path("o3fs://test1/key1/key2"))); - } - - private String getCurrentUser() { - try { - return UserGroupInformation.getCurrentUser().getShortUserName(); - } catch (IOException e) { - return OZONE_DEFAULT_USER; - } - } - - private Path createPath(String relativePath) { - if (useAbsolutePath) { - return new Path( - rootPath + (relativePath.startsWith("/") ? "" : "/") + relativePath); - } else { - return new Path(relativePath); - } - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java deleted file mode 100644 index 0dc7c994086..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java +++ /dev/null @@ -1,305 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -import java.io.IOException; -import java.util.Set; -import java.util.TreeSet; - -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.TestDataUtil; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClientException; -import org.apache.hadoop.ozone.client.OzoneKeyDetails; -import org.apache.hadoop.test.GenericTestUtils; - -import org.apache.commons.io.IOUtils; -import org.junit.After; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -/** - * Ozone file system tests that are not covered by contract tests. - */ -public class TestOzoneFileSystem { - - @Rule - public Timeout globalTimeout = new Timeout(300_000); - - private static MiniOzoneCluster cluster = null; - - private static FileSystem fs; - private static OzoneFileSystem o3fs; - - private String volumeName; - private String bucketName; - - private String rootPath; - - @Before - public void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .build(); - cluster.waitForClusterToBeReady(); - - // create a volume and a bucket to be used by OzoneFileSystem - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); - volumeName = bucket.getVolumeName(); - bucketName = bucket.getName(); - - rootPath = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName()); - - // Set the fs.defaultFS and start the filesystem - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); - fs = FileSystem.get(conf); - o3fs = (OzoneFileSystem) fs; - } - - @After - public void teardown() throws IOException { - if (cluster != null) { - cluster.shutdown(); - } - IOUtils.closeQuietly(fs); - } - - @Test - public void testOzoneFsServiceLoader() throws IOException { - assertEquals( - FileSystem.getFileSystemClass(OzoneConsts.OZONE_URI_SCHEME, null), - OzoneFileSystem.class); - } - - @Test - public void testCreateDoesNotAddParentDirKeys() throws Exception { - Path grandparent = new Path("/testCreateDoesNotAddParentDirKeys"); - Path parent = new Path(grandparent, "parent"); - Path child = new Path(parent, "child"); - ContractTestUtils.touch(fs, child); - - OzoneKeyDetails key = getKey(child, false); - assertEquals(key.getName(), o3fs.pathToKey(child)); - - // Creating a child should not add parent keys to the bucket - try { - getKey(parent, true); - } catch (IOException ex) { - assertKeyNotFoundException(ex); - } - - // List status on the parent should show the child file - assertEquals("List status of parent should include the 1 child file", 1L, - (long)fs.listStatus(parent).length); - assertTrue("Parent directory does not appear to be a directory", - fs.getFileStatus(parent).isDirectory()); - } - - @Test - public void testDeleteCreatesFakeParentDir() throws Exception { - Path grandparent = new Path("/testDeleteCreatesFakeParentDir"); - Path parent = new Path(grandparent, "parent"); - Path child = new Path(parent, "child"); - ContractTestUtils.touch(fs, child); - - // Verify that parent dir key does not exist - // Creating a child should not add parent keys to the bucket - try { - getKey(parent, true); - } catch (IOException ex) { - assertKeyNotFoundException(ex); - } - - // Delete the child key - fs.delete(child, false); - - // Deleting the only child should create the parent dir key if it does - // not exist - String parentKey = o3fs.pathToKey(parent) + "/"; - OzoneKeyDetails parentKeyInfo = getKey(parent, true); - assertEquals(parentKey, parentKeyInfo.getName()); - } - - @Test - public void testListStatus() throws Exception { - Path parent = new Path("/testListStatus"); - Path file1 = new Path(parent, "key1"); - Path file2 = new Path(parent, "key2"); - ContractTestUtils.touch(fs, file1); - ContractTestUtils.touch(fs, file2); - - - // ListStatus on a directory should return all subdirs along with - // files, even if there exists a file and sub-dir with the same name. - FileStatus[] fileStatuses = o3fs.listStatus(parent); - assertEquals("FileStatus did not return all children of the directory", - 2, fileStatuses.length); - - // ListStatus should return only the immediate children of a directory. - Path file3 = new Path(parent, "dir1/key3"); - Path file4 = new Path(parent, "dir1/key4"); - ContractTestUtils.touch(fs, file3); - ContractTestUtils.touch(fs, file4); - fileStatuses = o3fs.listStatus(parent); - assertEquals("FileStatus did not return all children of the directory", - 3, fileStatuses.length); - } - - /** - * Tests listStatus operation on root directory. - */ - @Test - public void testListStatusOnRoot() throws Exception { - Path root = new Path("/"); - Path dir1 = new Path(root, "dir1"); - Path dir12 = new Path(dir1, "dir12"); - Path dir2 = new Path(root, "dir2"); - fs.mkdirs(dir12); - fs.mkdirs(dir2); - - // ListStatus on root should return dir1 (even though /dir1 key does not - // exist) and dir2 only. dir12 is not an immediate child of root and - // hence should not be listed. - FileStatus[] fileStatuses = o3fs.listStatus(root); - assertEquals("FileStatus should return only the immediate children", 2, - fileStatuses.length); - - // Verify that dir12 is not included in the result of the listStatus on root - String fileStatus1 = fileStatuses[0].getPath().toUri().getPath(); - String fileStatus2 = fileStatuses[1].getPath().toUri().getPath(); - assertFalse(fileStatus1.equals(dir12.toString())); - assertFalse(fileStatus2.equals(dir12.toString())); - } - - /** - * Tests listStatus operation on root directory. - */ - @Test - public void testListStatusOnLargeDirectory() throws Exception { - Path root = new Path("/"); - Set paths = new TreeSet<>(); - int numDirs = 5111; - for(int i = 0; i < numDirs; i++) { - Path p = new Path(root, String.valueOf(i)); - fs.mkdirs(p); - paths.add(p.getName()); - } - - FileStatus[] fileStatuses = o3fs.listStatus(root); - assertEquals( - "Total directories listed do not match the existing directories", - numDirs, fileStatuses.length); - - for (int i=0; i < numDirs; i++) { - assertTrue(paths.contains(fileStatuses[i].getPath().getName())); - } - } - - /** - * Tests listStatus on a path with subdirs. - */ - @Test - public void testListStatusOnSubDirs() throws Exception { - // Create the following key structure - // /dir1/dir11/dir111 - // /dir1/dir12 - // /dir1/dir12/file121 - // /dir2 - // ListStatus on /dir1 should return all its immediated subdirs only - // which are /dir1/dir11 and /dir1/dir12. Super child files/dirs - // (/dir1/dir12/file121 and /dir1/dir11/dir111) should not be returned by - // listStatus. - Path dir1 = new Path("/dir1"); - Path dir11 = new Path(dir1, "dir11"); - Path dir111 = new Path(dir11, "dir111"); - Path dir12 = new Path(dir1, "dir12"); - Path file121 = new Path(dir12, "file121"); - Path dir2 = new Path("/dir2"); - fs.mkdirs(dir111); - fs.mkdirs(dir12); - ContractTestUtils.touch(fs, file121); - fs.mkdirs(dir2); - - FileStatus[] fileStatuses = o3fs.listStatus(dir1); - assertEquals("FileStatus should return only the immediate children", 2, - fileStatuses.length); - - // Verify that the two children of /dir1 returned by listStatus operation - // are /dir1/dir11 and /dir1/dir12. - String fileStatus1 = fileStatuses[0].getPath().toUri().getPath(); - String fileStatus2 = fileStatuses[1].getPath().toUri().getPath(); - assertTrue(fileStatus1.equals(dir11.toString()) || - fileStatus1.equals(dir12.toString())); - assertTrue(fileStatus2.equals(dir11.toString()) || - fileStatus2.equals(dir12.toString())); - } - - @Test - public void testNonExplicitlyCreatedPathExistsAfterItsLeafsWereRemoved() - throws Exception { - Path source = new Path("/source"); - Path interimPath = new Path(source, "interimPath"); - Path leafInsideInterimPath = new Path(interimPath, "leaf"); - Path target = new Path("/target"); - Path leafInTarget = new Path(target, "leaf"); - - fs.mkdirs(source); - fs.mkdirs(target); - fs.mkdirs(leafInsideInterimPath); - assertTrue(fs.rename(leafInsideInterimPath, leafInTarget)); - - // after rename listStatus for interimPath should succeed and - // interimPath should have no children - FileStatus[] statuses = fs.listStatus(interimPath); - assertNotNull("liststatus returns a null array", statuses); - assertEquals("Statuses array is not empty", 0, statuses.length); - FileStatus fileStatus = fs.getFileStatus(interimPath); - assertEquals("FileStatus does not point to interimPath", - interimPath.getName(), fileStatus.getPath().getName()); - } - - private OzoneKeyDetails getKey(Path keyPath, boolean isDirectory) - throws IOException, OzoneClientException { - String key = o3fs.pathToKey(keyPath); - if (isDirectory) { - key = key + "/"; - } - return cluster.getClient().getObjectStore().getVolume(volumeName) - .getBucket(bucketName).getKey(key); - } - - private void assertKeyNotFoundException(IOException ex) { - GenericTestUtils.assertExceptionContains("KEY_NOT_FOUND", ex); - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java deleted file mode 100644 index 51fd3c8d835..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java +++ /dev/null @@ -1,148 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import java.net.URI; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.security.UserGroupInformation; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.powermock.api.mockito.PowerMockito; -import org.powermock.core.classloader.annotations.PrepareForTest; -import org.powermock.modules.junit4.PowerMockRunner; - -/** - * Ozone File system tests that are light weight and use mocks. - */ -@RunWith(PowerMockRunner.class) -@PrepareForTest({ OzoneClientFactory.class, UserGroupInformation.class }) -public class TestOzoneFileSystemWithMocks { - - @Test - public void testFSUriWithHostPortOverrides() throws Exception { - Configuration conf = new OzoneConfiguration(); - OzoneClient ozoneClient = mock(OzoneClient.class); - ObjectStore objectStore = mock(ObjectStore.class); - OzoneVolume volume = mock(OzoneVolume.class); - OzoneBucket bucket = mock(OzoneBucket.class); - - when(ozoneClient.getObjectStore()).thenReturn(objectStore); - when(objectStore.getVolume(eq("volume1"))).thenReturn(volume); - when(volume.getBucket("bucket1")).thenReturn(bucket); - - PowerMockito.mockStatic(OzoneClientFactory.class); - PowerMockito.when(OzoneClientFactory.getRpcClient(eq("local.host"), - eq(5899), eq(conf))).thenReturn(ozoneClient); - - UserGroupInformation ugi = mock(UserGroupInformation.class); - PowerMockito.mockStatic(UserGroupInformation.class); - PowerMockito.when(UserGroupInformation.getCurrentUser()).thenReturn(ugi); - when(ugi.getShortUserName()).thenReturn("user1"); - - URI uri = new URI("o3fs://bucket1.volume1.local.host:5899"); - - FileSystem fileSystem = FileSystem.get(uri, conf); - OzoneFileSystem ozfs = (OzoneFileSystem) fileSystem; - - assertEquals(ozfs.getUri().getAuthority(), - "bucket1.volume1.local.host:5899"); - PowerMockito.verifyStatic(); - OzoneClientFactory.getRpcClient("local.host", 5899, conf); - } - - @Test - public void testFSUriWithHostPortUnspecified() throws Exception { - Configuration conf = new OzoneConfiguration(); - final int omPort = OmUtils.getOmRpcPort(conf); - - OzoneClient ozoneClient = mock(OzoneClient.class); - ObjectStore objectStore = mock(ObjectStore.class); - OzoneVolume volume = mock(OzoneVolume.class); - OzoneBucket bucket = mock(OzoneBucket.class); - - when(ozoneClient.getObjectStore()).thenReturn(objectStore); - when(objectStore.getVolume(eq("volume1"))).thenReturn(volume); - when(volume.getBucket("bucket1")).thenReturn(bucket); - - PowerMockito.mockStatic(OzoneClientFactory.class); - PowerMockito.when(OzoneClientFactory.getRpcClient(eq("local.host"), - eq(omPort), eq(conf))).thenReturn(ozoneClient); - - UserGroupInformation ugi = mock(UserGroupInformation.class); - PowerMockito.mockStatic(UserGroupInformation.class); - PowerMockito.when(UserGroupInformation.getCurrentUser()).thenReturn(ugi); - when(ugi.getShortUserName()).thenReturn("user1"); - - URI uri = new URI("o3fs://bucket1.volume1.local.host"); - - FileSystem fileSystem = FileSystem.get(uri, conf); - OzoneFileSystem ozfs = (OzoneFileSystem) fileSystem; - - assertEquals(ozfs.getUri().getHost(), "bucket1.volume1.local.host"); - // The URI doesn't contain a port number, expect -1 from getPort() - assertEquals(ozfs.getUri().getPort(), -1); - PowerMockito.verifyStatic(); - // Check the actual port number in use - OzoneClientFactory.getRpcClient("local.host", omPort, conf); - } - - @Test - public void testFSUriHostVersionDefault() throws Exception { - Configuration conf = new OzoneConfiguration(); - OzoneClient ozoneClient = mock(OzoneClient.class); - ObjectStore objectStore = mock(ObjectStore.class); - OzoneVolume volume = mock(OzoneVolume.class); - OzoneBucket bucket = mock(OzoneBucket.class); - - when(ozoneClient.getObjectStore()).thenReturn(objectStore); - when(objectStore.getVolume(eq("volume1"))).thenReturn(volume); - when(volume.getBucket("bucket1")).thenReturn(bucket); - - PowerMockito.mockStatic(OzoneClientFactory.class); - PowerMockito.when(OzoneClientFactory.getRpcClient(eq(conf))) - .thenReturn(ozoneClient); - - UserGroupInformation ugi = mock(UserGroupInformation.class); - PowerMockito.mockStatic(UserGroupInformation.class); - PowerMockito.when(UserGroupInformation.getCurrentUser()).thenReturn(ugi); - when(ugi.getShortUserName()).thenReturn("user1"); - - URI uri = new URI("o3fs://bucket1.volume1/key"); - - FileSystem fileSystem = FileSystem.get(uri, conf); - OzoneFileSystem ozfs = (OzoneFileSystem) fileSystem; - - assertEquals(ozfs.getUri().getAuthority(), "bucket1.volume1"); - PowerMockito.verifyStatic(); - OzoneClientFactory.getRpcClient(conf); - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java deleted file mode 100644 index ab351913d7a..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java +++ /dev/null @@ -1,348 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.fs.ozone; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FsShell; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMStorage; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.util.ToolRunner; -import org.apache.ratis.util.LifeCycle; -import org.hamcrest.core.StringContains; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.Collection; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsUtils.getHostName; -import static org.apache.hadoop.hdds.HddsUtils.getHostPort; - -/** - * Test client-side URI handling with Ozone Manager HA. - */ -public class TestOzoneFsHAURLs { - public static final Logger LOG = LoggerFactory.getLogger( - TestOzoneFsHAURLs.class); - - private OzoneConfiguration conf; - private MiniOzoneCluster cluster; - private String omId; - private String omServiceId; - private String clusterId; - private String scmId; - private OzoneManager om; - private int numOfOMs; - - private String volumeName; - private String bucketName; - private String rootPath; - - private final String o3fsImplKey = - "fs." + OzoneConsts.OZONE_URI_SCHEME + ".impl"; - private final String o3fsImplValue = - "org.apache.hadoop.fs.ozone.OzoneFileSystem"; - - private static final long LEADER_ELECTION_TIMEOUT = 500L; - - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - omId = UUID.randomUUID().toString(); - omServiceId = "om-service-test1"; - numOfOMs = 3; - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - final String path = GenericTestUtils.getTempPath(omId); - java.nio.file.Path metaDirPath = java.nio.file.Paths.get(path, "om-meta"); - conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - conf.setTimeDuration( - OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, - LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS); - - OMStorage omStore = new OMStorage(conf); - omStore.setClusterId(clusterId); - omStore.setScmId(scmId); - // writes the version file properties - omStore.initialize(); - - // Start the cluster - cluster = MiniOzoneCluster.newHABuilder(conf) - .setClusterId(clusterId) - .setScmId(scmId) - .setOMServiceId(omServiceId) - .setNumOfOzoneManagers(numOfOMs) - .build(); - cluster.waitForClusterToBeReady(); - - om = cluster.getOzoneManager(); - Assert.assertEquals(LifeCycle.State.RUNNING, om.getOmRatisServerState()); - - volumeName = "volume" + RandomStringUtils.randomNumeric(5); - ObjectStore objectStore = - OzoneClientFactory.getRpcClient(omServiceId, conf).getObjectStore(); - objectStore.createVolume(volumeName); - - OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); - bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - retVolumeinfo.createBucket(bucketName); - - rootPath = String.format("%s://%s.%s.%s/", OzoneConsts.OZONE_URI_SCHEME, - bucketName, volumeName, omServiceId); - // Set fs.defaultFS - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); - FileSystem fs = FileSystem.get(conf); - // Create some dirs - Path root = new Path("/"); - Path dir1 = new Path(root, "dir1"); - Path dir12 = new Path(dir1, "dir12"); - Path dir2 = new Path(root, "dir2"); - fs.mkdirs(dir12); - fs.mkdirs(dir2); - } - - @After - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - /** - * @return the leader OM's RPC address in the MiniOzoneHACluster - */ - private String getLeaderOMNodeAddr() { - String leaderOMNodeAddr = null; - Collection omNodeIds = OmUtils.getOMNodeIds(conf, omServiceId); - assert(omNodeIds.size() == numOfOMs); - MiniOzoneHAClusterImpl haCluster = (MiniOzoneHAClusterImpl) cluster; - // Note: this loop may be implemented inside MiniOzoneHAClusterImpl - for (String omNodeId : omNodeIds) { - // Find the leader OM - if (!haCluster.getOzoneManager(omNodeId).isLeader()) { - continue; - } - // ozone.om.address.omServiceId.omNode - String leaderOMNodeAddrKey = OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodeId); - leaderOMNodeAddr = conf.get(leaderOMNodeAddrKey); - LOG.info("Found leader OM: nodeId=" + omNodeId + ", " + - leaderOMNodeAddrKey + "=" + leaderOMNodeAddr); - // Leader found, no need to continue loop - break; - } - // There has to be a leader - assert(leaderOMNodeAddr != null); - return leaderOMNodeAddr; - } - - /** - * Get host name from an address. This uses getHostName() internally. - * @param addr Address with port number - * @return Host name - */ - private String getHostFromAddress(String addr) { - Optional hostOptional = getHostName(addr); - assert(hostOptional.isPresent()); - return hostOptional.get(); - } - - /** - * Get port number from an address. This uses getHostPort() internally. - * @param addr Address with port - * @return Port number - */ - private int getPortFromAddress(String addr) { - Optional portOptional = getHostPort(addr); - assert(portOptional.isPresent()); - return portOptional.get(); - } - - /** - * Test OM HA URLs with qualified fs.defaultFS. - * @throws Exception - */ - @Test - public void testWithQualifiedDefaultFS() throws Exception { - OzoneConfiguration clientConf = new OzoneConfiguration(conf); - clientConf.setQuietMode(false); - clientConf.set(o3fsImplKey, o3fsImplValue); - // fs.defaultFS = o3fs://bucketName.volumeName.omServiceId/ - clientConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); - - // Pick leader OM's RPC address and assign it to ozone.om.address for - // the test case: ozone fs -ls o3fs://bucket.volume.om1/ - String leaderOMNodeAddr = getLeaderOMNodeAddr(); - // ozone.om.address was set to service id in MiniOzoneHAClusterImpl - clientConf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, leaderOMNodeAddr); - - FsShell shell = new FsShell(clientConf); - int res; - try { - // Test case 1: ozone fs -ls / - // Expectation: Success. - res = ToolRunner.run(shell, new String[] {"-ls", "/"}); - // Check return value, should be 0 (success) - Assert.assertEquals(res, 0); - - // Test case 2: ozone fs -ls o3fs:/// - // Expectation: Success. fs.defaultFS is a fully qualified path. - res = ToolRunner.run(shell, new String[] {"-ls", "o3fs:///"}); - Assert.assertEquals(res, 0); - - // Test case 3: ozone fs -ls o3fs://bucket.volume/ - // Expectation: Fail. Must have service id or host name when HA is enabled - String unqualifiedPath1 = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); - try (GenericTestUtils.SystemErrCapturer capture = - new GenericTestUtils.SystemErrCapturer()) { - res = ToolRunner.run(shell, new String[] {"-ls", unqualifiedPath1}); - // Check stderr, inspired by testDFSWithInvalidCommmand - Assert.assertThat("Command did not print the error message " + - "correctly for test case: ozone fs -ls o3fs://bucket.volume/", - capture.getOutput(), StringContains.containsString( - "-ls: Service ID or host name must not" - + " be omitted when ozone.om.service.ids is defined.")); - } - // Check return value, should be -1 (failure) - Assert.assertEquals(res, -1); - - // Test case 4: ozone fs -ls o3fs://bucket.volume.om1/ - // Expectation: Success. The client should use the port number - // set in ozone.om.address. - String qualifiedPath1 = String.format("%s://%s.%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName, - getHostFromAddress(leaderOMNodeAddr)); - res = ToolRunner.run(shell, new String[] {"-ls", qualifiedPath1}); - // Note: this test case will fail if the port is not from the leader node - Assert.assertEquals(res, 0); - - // Test case 5: ozone fs -ls o3fs://bucket.volume.om1:port/ - // Expectation: Success. - String qualifiedPath2 = String.format("%s://%s.%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName, - leaderOMNodeAddr); - res = ToolRunner.run(shell, new String[] {"-ls", qualifiedPath2}); - Assert.assertEquals(res, 0); - - // Test case 6: ozone fs -ls o3fs://bucket.volume.id1/ - // Expectation: Success. - String qualifiedPath3 = String.format("%s://%s.%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName, omServiceId); - res = ToolRunner.run(shell, new String[] {"-ls", qualifiedPath3}); - Assert.assertEquals(res, 0); - - // Test case 7: ozone fs -ls o3fs://bucket.volume.id1:port/ - // Expectation: Fail. Service ID does not use port information. - // Use the port number from leader OM (doesn't really matter) - String unqualifiedPath2 = String.format("%s://%s.%s.%s:%d/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName, - omServiceId, getPortFromAddress(leaderOMNodeAddr)); - try (GenericTestUtils.SystemErrCapturer capture = - new GenericTestUtils.SystemErrCapturer()) { - res = ToolRunner.run(shell, new String[] {"-ls", unqualifiedPath2}); - // Check stderr - Assert.assertThat("Command did not print the error message " + - "correctly for test case: " - + "ozone fs -ls o3fs://bucket.volume.id1:port/", - capture.getOutput(), StringContains.containsString( - "does not use port information")); - } - // Check return value, should be -1 (failure) - Assert.assertEquals(res, -1); - } finally { - shell.close(); - } - } - - /** - * Helper function for testOtherDefaultFS(), - * run fs -ls o3fs:/// against different fs.defaultFS input. - * - * @param defaultFS Desired fs.defaultFS to be used in the test - * @throws Exception - */ - private void testWithDefaultFS(String defaultFS) throws Exception { - OzoneConfiguration clientConf = new OzoneConfiguration(conf); - clientConf.setQuietMode(false); - clientConf.set(o3fsImplKey, o3fsImplValue); - // fs.defaultFS = file:/// - clientConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, - defaultFS); - - FsShell shell = new FsShell(clientConf); - try { - // Test case: ozone fs -ls o3fs:/// - // Expectation: Fail. fs.defaultFS is not a qualified o3fs URI. - int res = ToolRunner.run(shell, new String[] {"-ls", "o3fs:///"}); - Assert.assertEquals(res, -1); - } finally { - shell.close(); - } - } - - /** - * Test OM HA URLs with some unqualified fs.defaultFS. - * @throws Exception - */ - @Test - public void testOtherDefaultFS() throws Exception { - // Test scenarios where fs.defaultFS isn't a fully qualified o3fs - - // fs.defaultFS = file:/// - testWithDefaultFS(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT); - - // fs.defaultFS = hdfs://ns1/ - testWithDefaultFS("hdfs://ns1/"); - - // fs.defaultFS = o3fs:/// - String unqualifiedFs1 = String.format( - "%s:///", OzoneConsts.OZONE_URI_SCHEME); - testWithDefaultFS(unqualifiedFs1); - - // fs.defaultFS = o3fs://bucketName.volumeName/ - String unqualifiedFs2 = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); - testWithDefaultFS(unqualifiedFs2); - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java deleted file mode 100644 index 1d584651bf9..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsRenameDir.java +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone; - -import java.io.IOException; - -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.TestDataUtil; -import org.apache.hadoop.ozone.client.OzoneBucket; - -import org.junit.After; -import static org.junit.Assert.assertTrue; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Unit Test for verifying directory rename operation through OzoneFS. - */ -public class TestOzoneFsRenameDir { - public static final Logger LOG = LoggerFactory.getLogger( - TestOzoneFsRenameDir.class); - - private MiniOzoneCluster cluster = null; - private OzoneConfiguration conf = null; - private static FileSystem fs; - - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1) - .build(); - cluster.waitForClusterToBeReady(); - - // create a volume and a bucket to be used by OzoneFileSystem - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); - - // Fetch the host and port for File System init - DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0) - .getDatanodeDetails(); - - // Set the fs.defaultFS and start the filesystem - String uri = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName()); - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri); - fs = FileSystem.get(conf); - LOG.info("fs.defaultFS=" + fs.getUri()); - } - - @After - public void teardown() { - if (cluster != null) { - cluster.shutdown(); - cluster = null; - } - } - - /** - * Tests directory rename opertion through OzoneFS. - */ - @Test(timeout=300_000) - public void testRenameDir() throws IOException { - final String dir = "/root_dir/dir1"; - final Path source = new Path(fs.getUri().toString() + dir); - final Path dest = new Path(source.toString() + ".renamed"); - // Add a sub-dir to the directory to be moved. - final Path subdir = new Path(source, "sub_dir1"); - fs.mkdirs(subdir); - LOG.info("Created dir {}", subdir); - LOG.info("Will move {} to {}", source, dest); - fs.rename(source, dest); - assertTrue("Directory rename failed", fs.exists(dest)); - // Verify that the subdir is also renamed i.e. keys corresponding to the - // sub-directories of the renamed directory have also been renamed. - assertTrue("Keys under the renamed direcotry not renamed", - fs.exists(new Path(dest, "sub_dir1"))); - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java deleted file mode 100644 index dd5431584cc..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractCreateTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import java.io.IOException; - -/** - * Ozone contract tests creating files. - */ -public class ITestOzoneContractCreate extends AbstractContractCreateTest { - - @BeforeClass - public static void createCluster() throws IOException { - OzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java deleted file mode 100644 index f0a3d8d83eb..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import java.io.IOException; - -/** - * Ozone contract tests covering deletes. - */ -public class ITestOzoneContractDelete extends AbstractContractDeleteTest { - - @BeforeClass - public static void createCluster() throws IOException { - OzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java deleted file mode 100644 index 134a9adf316..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.tools.contract.AbstractContractDistCpTest; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import java.io.IOException; - - -/** - * Contract test suite covering S3A integration with DistCp. - * Uses the block output stream, buffered to disk. This is the - * recommended output mechanism for DistCP due to its scalability. - */ -public class ITestOzoneContractDistCp extends AbstractContractDistCpTest { - - @BeforeClass - public static void createCluster() throws IOException { - OzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected OzoneContract createContract(Configuration conf) { - return new OzoneContract(conf); - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java deleted file mode 100644 index 362b22f2831..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; - -/** - * Ozone contract tests covering getFileStatus. - */ -public class ITestOzoneContractGetFileStatus - extends AbstractContractGetFileStatusTest { - - private static final Logger LOG = - LoggerFactory.getLogger(ITestOzoneContractGetFileStatus.class); - - @BeforeClass - public static void createCluster() throws IOException { - OzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } - - @Override - public void teardown() throws Exception { - LOG.info("FS details {}", getFileSystem()); - super.teardown(); - } - - @Override - protected Configuration createConfiguration() { - return super.createConfiguration(); - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java deleted file mode 100644 index bc0de5dfb79..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import java.io.IOException; - -/** - * Test dir operations on Ozone. - */ -public class ITestOzoneContractMkdir extends AbstractContractMkdirTest { - - @BeforeClass - public static void createCluster() throws IOException { - OzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java deleted file mode 100644 index 0bc57d49a9c..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractOpenTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import java.io.IOException; - -/** - * Ozone contract tests opening files. - */ -public class ITestOzoneContractOpen extends AbstractContractOpenTest { - @BeforeClass - public static void createCluster() throws IOException { - OzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java deleted file mode 100644 index 8ce1d1b618d..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractRenameTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import java.io.IOException; - -/** - * Ozone contract tests covering rename. - */ -public class ITestOzoneContractRename extends AbstractContractRenameTest { - - @BeforeClass - public static void createCluster() throws IOException { - OzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } - -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java deleted file mode 100644 index 3156eb2f888..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; - -import java.io.IOException; - -/** - * Ozone contract test for ROOT directory operations. - */ -public class ITestOzoneContractRootDir extends - AbstractContractRootDirectoryTest { - - @BeforeClass - public static void createCluster() throws IOException { - OzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } - -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java deleted file mode 100644 index c4bc0ff119a..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.contract.AbstractContractSeekTest; -import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import java.io.IOException; - -/** - * Ozone contract tests covering file seek. - */ -public class ITestOzoneContractSeek extends AbstractContractSeekTest { - @BeforeClass - public static void createCluster() throws IOException { - OzoneContract.createCluster(); - } - - @AfterClass - public static void teardownCluster() throws IOException { - OzoneContract.destroyCluster(); - } - - @Override - protected AbstractFSContract createContract(Configuration conf) { - return new OzoneContract(conf); - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java deleted file mode 100644 index 56d63ac2f25..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.ozone.contract; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.contract.AbstractFSContract; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.TestDataUtil; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.om.OMConfigKeys; - -import org.junit.Assert; - -/** - * The contract of Ozone: only enabled if the test bucket is provided. - */ -class OzoneContract extends AbstractFSContract { - - private static MiniOzoneCluster cluster; - private static final String CONTRACT_XML = "contract/ozone.xml"; - - OzoneContract(Configuration conf) { - super(conf); - //insert the base features - addConfResource(CONTRACT_XML); - } - - @Override - public String getScheme() { - return OzoneConsts.OZONE_URI_SCHEME; - } - - @Override - public Path getTestPath() { - Path path = new Path("/test"); - return path; - } - - public static void createCluster() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.addResource(CONTRACT_XML); - - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); - try { - cluster.waitForClusterToBeReady(); - } catch (Exception e) { - throw new IOException(e); - } - } - - private void copyClusterConfigs(String configKey) { - getConf().set(configKey, cluster.getConf().get(configKey)); - } - - @Override - public FileSystem getTestFileSystem() throws IOException { - //assumes cluster is not null - Assert.assertNotNull("cluster not created", cluster); - - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster); - - String uri = String.format("%s://%s.%s/", - OzoneConsts.OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName()); - getConf().set("fs.defaultFS", uri); - copyClusterConfigs(OMConfigKeys.OZONE_OM_ADDRESS_KEY); - copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); - return FileSystem.get(getConf()); - } - - public static void destroyCluster() throws IOException { - if (cluster != null) { - cluster.shutdown(); - cluster = null; - } - } -} diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/package-info.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/package-info.java deleted file mode 100644 index 51284c2db4a..00000000000 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Ozone FS Contract tests. - */ -package org.apache.hadoop.fs.ozone; \ No newline at end of file diff --git a/hadoop-ozone/ozonefs/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-ozone/ozonefs/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem deleted file mode 100644 index 03680027d53..00000000000 --- a/hadoop-ozone/ozonefs/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -org.apache.hadoop.fs.ozone.OzoneFileSystem diff --git a/hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml b/hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml deleted file mode 100644 index fe2075c30ae..00000000000 --- a/hadoop-ozone/ozonefs/src/test/resources/contract/ozone.xml +++ /dev/null @@ -1,113 +0,0 @@ - - - - - - - fs.contract.test.root-tests-enabled - true - - - - fs.contract.test.random-seek-count - 10 - - - - fs.contract.is-blobstore - true - - - - fs.contract.create-visibility-delayed - true - - - - fs.contract.is-case-sensitive - true - - - - fs.contract.rename-returns-false-if-source-missing - true - - - - fs.contract.rename-remove-dest-if-empty-dir - false - - - - fs.contract.supports-append - false - - - - fs.contract.supports-atomic-directory-delete - false - - - - fs.contract.supports-atomic-rename - false - - - - fs.contract.supports-block-locality - false - - - - fs.contract.supports-concat - false - - - - fs.contract.supports-getfilestatus - true - - - - fs.contract.supports-seek - true - - - - fs.contract.supports-seek-on-closed-file - true - - - - fs.contract.rejects-seek-past-eof - true - - - - fs.contract.supports-strict-exceptions - true - - - - fs.contract.supports-unix-permissions - false - - diff --git a/hadoop-ozone/ozonefs/src/test/resources/log4j.properties b/hadoop-ozone/ozonefs/src/test/resources/log4j.properties deleted file mode 100644 index 8666dcf499d..00000000000 --- a/hadoop-ozone/ozonefs/src/test/resources/log4j.properties +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# log4j configuration used during build and unit tests - -log4j.rootLogger=INFO,stdout -log4j.threshold=ALL -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n - -log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR -log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR - -# Suppress info messages on every put key from Ratis -log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN - -# for debugging low level Ozone operations, uncomment this line -# log4j.logger.org.apache.hadoop.ozone=DEBUG diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml deleted file mode 100644 index 825e65ccf93..00000000000 --- a/hadoop-ozone/pom.xml +++ /dev/null @@ -1,414 +0,0 @@ - - - 4.0.0 - - org.apache.hadoop - hadoop-main-ozone - 0.5.0-SNAPSHOT - ../pom.ozone.xml - - hadoop-ozone - 0.5.0-SNAPSHOT - Apache Hadoop Ozone Project - Apache Hadoop Ozone - pom - - - 0.5.0-SNAPSHOT - 0.5.0-SNAPSHOT - 0.5.0-201fc85-SNAPSHOT - 1.60 - Crater Lake - ${ozone.version} - 3.0.0-M1 - 4.0 - apache/ozone:${project.version} - - - common - client - ozone-manager - ozonefs - ozonefs-lib-current - ozonefs-lib-legacy - tools - integration-test - datanode - s3gateway - dist - recon - recon-codegen - upgrade - csi - fault-injection-test - insight - - - - - apache.snapshots.https - https://repository.apache.org/content/repositories/snapshots - - - - - - - - org.apache.hadoop - hadoop-ozone-common - ${ozone.version} - - - org.apache.hadoop - hadoop-ozone-client - ${ozone.version} - - - org.apache.hadoop - hadoop-ozone-ozone-manager - ${ozone.version} - - - org.apache.hadoop - hadoop-ozone-s3gateway - ${ozone.version} - - - org.apache.hadoop - hadoop-ozone-csi - ${ozone.version} - - - org.apache.hadoop - hadoop-ozone-datanode - ${ozone.version} - - - org.apache.hadoop - hadoop-ozone-tools - ${ozone.version} - - - org.apache.hadoop - hadoop-ozone-filesystem - ${ozone.version} - - - org.apache.hadoop - hadoop-ozone-filesystem-lib-current - ${ozone.version} - - - org.apache.hadoop - hadoop-ozone-filesystem-lib-legacy - ${ozone.version} - - - org.apache.hadoop - hadoop-hdds-config - ${hdds.version} - - - org.apache.hadoop - hadoop-ozone-integration-test - ${ozone.version} - test-jar - - - org.apache.hadoop - hadoop-ozone-ozone-manager - ${ozone.version} - test-jar - - - org.apache.hadoop - hadoop-hdds-common - ${hdds.version} - - - org.apache.hadoop - hadoop-hdds-server-framework - ${hdds.version} - - - org.apache.hadoop - hadoop-hdds-server-scm - ${hdds.version} - - - org.apache.hadoop - hadoop-hdds-docs - ${hdds.version} - - - org.apache.hadoop - hadoop-hdds-container-service - ${hdds.version} - - - org.apache.hadoop - hadoop-hdds-client - ${hdds.version} - - - org.apache.hadoop - hadoop-hdds-tools - ${hdds.version} - - - org.apache.hadoop - hadoop-ozone-insight - ${hdds.version} - - - org.apache.hadoop - hadoop-ozone-recon - ${ozone.version} - - - org.apache.hadoop - hadoop-ozone-upgrade - ${ozone.version} - - - org.apache.hadoop - hadoop-hdds-container-service - ${hdds.version} - test-jar - - - org.apache.hadoop - hadoop-hdds-server-scm - test-jar - ${hdds.version} - - - com.sun.xml.bind - jaxb-impl - 2.3.0.1 - - - com.sun.xml.bind - jaxb-core - 2.3.0.1 - - - javax.xml.bind - jaxb-api - 2.3.0 - - - javax.activation - activation - 1.1.1 - - - org.bouncycastle - bcprov-jdk15on - ${bouncycastle.version} - - - commons-lang - commons-lang - 2.6 - - - - - - - - - - org.apache.maven.plugins - maven-enforcer-plugin - - - depcheck - - - - false - - - - - - - - org.apache.rat - apache-rat-plugin - - - **/*.json - **/hs_err*.log - **/target/** - .gitattributes - .idea/** - dev-support/*tests - dev-support/checkstyle* - dev-support/jdiff/** - src/contrib/** - src/main/webapps/datanode/robots.txt - src/main/webapps/hdfs/robots.txt - src/main/webapps/journal/robots.txt - src/main/webapps/router/robots.txt - src/main/webapps/secondary/robots.txt - src/site/resources/images/* - src/test/all-tests - src/test/empty-file - src/test/resources/*.log - src/test/resources/*.tgz - src/test/resources/data* - src/test/resources/empty-file - src/test/resources/ssl/* - src/main/compose/ozonesecure/docker-image/runner/build/apache-rat-0.12/README-CLI.txt - src/main/compose/ozonesecure/docker-image/runner/build/apache-rat-0.12/README-ANT.txt - webapps/static/angular-1.6.4.min.js - webapps/static/angular-nvd3-1.0.9.min.js - webapps/static/angular-route-1.6.4.min.js - webapps/static/bootstrap-3.4.1/** - webapps/static/d3-3.5.17.min.js - webapps/static/jquery-3.4.1.min.js - webapps/static/jquery.dataTables.min.js - webapps/static/nvd3-1.8.5.min.css.map - webapps/static/nvd3-1.8.5.min.css - webapps/static/nvd3-1.8.5.min.js.map - webapps/static/nvd3-1.8.5.min.js - **/dependency-reduced-pom.xml - **/node_modules/** - **/yarn.lock - **/ozone-recon-web/build/** - src/main/license/** - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - add-classpath-descriptor - package - - build-classpath - - - ${project.build.directory}/classpath - $HDDS_LIB_JARS_DIR - true - runtime - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - attach-classpath-artifact - package - - attach-artifact - - - - - ${project.build.directory}/classpath - cp - classpath - - - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - **/node_modules/* - **/ozone-recon-web/** - - - - - - test-jar - - - - - - - - - - docker-build - - ${user.name}/ozone:${project.version} - - - - parallel-tests - - - - org.apache.hadoop - hadoop-maven-plugins - - - parallel-tests-createdir - - parallel-tests-createdir - - - - - - org.apache.maven.plugins - maven-surefire-plugin - - ${testsThreadCount} - false - ${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true - - ${testsThreadCount} - ${test.build.data}/${surefire.forkNumber} - ${test.build.dir}/${surefire.forkNumber} - ${hadoop.tmp.dir}/${surefire.forkNumber} - - - - - - ${test.build.data} - - - - - - fork-${surefire.forkNumber} - - - - - - - - diff --git a/hadoop-ozone/recon-codegen/pom.xml b/hadoop-ozone/recon-codegen/pom.xml deleted file mode 100644 index 6abc5ef8598..00000000000 --- a/hadoop-ozone/recon-codegen/pom.xml +++ /dev/null @@ -1,70 +0,0 @@ - - - - - hadoop-ozone - org.apache.hadoop - 0.5.0-SNAPSHOT - - 4.0.0 - hadoop-ozone-reconcodegen - Apache Hadoop Ozone Recon CodeGen - - 3.11.10 - - - - org.apache.hadoop - hadoop-ozone-common - - - org.xerial - sqlite-jdbc - 3.25.2 - - - com.google.inject.extensions - guice-multibindings - ${guice.version} - - - org.springframework - spring-jdbc - 5.1.3.RELEASE - - - org.jooq - jooq-codegen - ${jooq.version} - - - org.jooq - jooq-meta - ${jooq.version} - - - org.jooq - jooq - ${jooq.version} - - - com.google.inject - guice - ${guice.version} - - - diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/JooqCodeGenerator.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/JooqCodeGenerator.java deleted file mode 100644 index fce4e0ba5c0..00000000000 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/JooqCodeGenerator.java +++ /dev/null @@ -1,170 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.hadoop.ozone.recon.codegen; - -import java.io.File; -import java.sql.SQLException; -import java.util.Set; - -import javax.sql.DataSource; - -import org.apache.commons.io.FileUtils; -import org.hadoop.ozone.recon.schema.ReconSchemaDefinition; -import org.jooq.codegen.GenerationTool; -import org.jooq.meta.jaxb.Configuration; -import org.jooq.meta.jaxb.Database; -import org.jooq.meta.jaxb.Generate; -import org.jooq.meta.jaxb.Generator; -import org.jooq.meta.jaxb.Jdbc; -import org.jooq.meta.jaxb.Strategy; -import org.jooq.meta.jaxb.Target; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.sqlite.SQLiteDataSource; - -import com.google.inject.AbstractModule; -import com.google.inject.Guice; -import com.google.inject.Inject; -import com.google.inject.Injector; -import com.google.inject.Provider; - -/** - * Utility class that generates the Dao and Pojos for Recon schema. The - * implementations of {@link ReconSchemaDefinition} are discovered through - * Guice bindings in order to avoid ugly reflection code, and invoked to - * generate the schema over an embedded database. The jooq code generator then - * runs over the embedded database to generate classes for recon. - */ -public class JooqCodeGenerator { - - private static final Logger LOG = - LoggerFactory.getLogger(JooqCodeGenerator.class); - - private static final String SQLITE_DB = - System.getProperty("java.io.tmpdir") + "/recon-generated-schema"; - private static final String JDBC_URL = "jdbc:sqlite:" + SQLITE_DB; - - private final Set allDefinitions; - - @Inject - public JooqCodeGenerator(Set allDefinitions) { - this.allDefinitions = allDefinitions; - } - - /** - * Create schema. - */ - private void initializeSchema() throws SQLException { - for (ReconSchemaDefinition definition : allDefinitions) { - definition.initializeSchema(); - } - } - - /** - * Generate entity and DAO classes. - */ - private void generateSourceCode(String outputDir) throws Exception { - Configuration configuration = - new Configuration() - .withJdbc(new Jdbc() - .withDriver("org.sqlite.JDBC") - .withUrl(JDBC_URL) - .withUser("sa") - .withPassword("sa")) - .withGenerator(new Generator() - .withDatabase(new Database() - .withName("org.jooq.meta.sqlite.SQLiteDatabase") - .withOutputSchemaToDefault(true) - .withIncludeTables(true) - .withIncludePrimaryKeys(true)) - .withGenerate(new Generate() - .withDaos(true) - .withEmptyCatalogs(true) - .withEmptySchemas(true)) - .withStrategy(new Strategy().withName( - "org.hadoop.ozone.recon.codegen.TableNamingStrategy")) - .withTarget(new Target() - .withPackageName("org.hadoop.ozone.recon.schema") - .withClean(true) - .withDirectory(outputDir))); - GenerationTool.generate(configuration); - } - - /** - * Provider for embedded datasource. - */ - static class LocalDataSourceProvider implements Provider { - private static SQLiteDataSource db; - - static { - db = new SQLiteDataSource(); - db.setUrl(JDBC_URL); - } - - @Override - public DataSource get() { - return db; - } - - static void cleanup() { - FileUtils.deleteQuietly(new File(SQLITE_DB)); - } - } - - public static void main(String[] args) { - if (args.length < 1) { - throw new IllegalArgumentException("Missing required arguments: " + - "Need a ouput directory for generated code.\nUsage: " + - "org.apache.hadoop.ozone.recon.persistence.JooqCodeGenerator " + - "."); - } - - String outputDir = args[0]; - Injector injector = Guice.createInjector( - new ReconSchemaGenerationModule(), - new AbstractModule() { - @Override - protected void configure() { - bind(DataSource.class).toProvider(new LocalDataSourceProvider()); - bind(JooqCodeGenerator.class); - } - }); - - JooqCodeGenerator codeGenerator = - injector.getInstance(JooqCodeGenerator.class); - - // Create tables - try { - codeGenerator.initializeSchema(); - } catch (SQLException e) { - LOG.error("Unable to initialize schema.", e); - throw new ExceptionInInitializerError(e); - } - - // Generate Pojos and Daos - try { - codeGenerator.generateSourceCode(outputDir); - } catch (Exception e) { - LOG.error("Code generation failed. Aborting build.", e); - throw new ExceptionInInitializerError(e); - } - - // Cleanup after - LocalDataSourceProvider.cleanup(); - } -} diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java deleted file mode 100644 index c393cc24dee..00000000000 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/ReconSchemaGenerationModule.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.hadoop.ozone.recon.codegen; - -import org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition; -import org.hadoop.ozone.recon.schema.ReconSchemaDefinition; -import org.hadoop.ozone.recon.schema.StatsSchemaDefinition; -import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; - -import com.google.inject.AbstractModule; -import com.google.inject.multibindings.Multibinder; - -/** - * Bindings for DDL generation and used by - * {@link org.hadoop.ozone.recon.codegen.JooqCodeGenerator}. - */ -public class ReconSchemaGenerationModule extends AbstractModule { - @Override - protected void configure() { - // SQL schema creation and related bindings - Multibinder schemaBinder = - Multibinder.newSetBinder(binder(), ReconSchemaDefinition.class); - schemaBinder.addBinding().to(UtilizationSchemaDefinition.class); - schemaBinder.addBinding().to(ReconInternalSchemaDefinition.class); - schemaBinder.addBinding().to(StatsSchemaDefinition.class); - } -} diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/TableNamingStrategy.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/TableNamingStrategy.java deleted file mode 100644 index 93c23c4a908..00000000000 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/TableNamingStrategy.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.hadoop.ozone.recon.codegen; - -import org.jooq.codegen.DefaultGeneratorStrategy; -import org.jooq.meta.Definition; -import org.jooq.meta.TableDefinition; -import org.jooq.tools.StringUtils; - -/** - * Generate Table classes with a different name from POJOS to improve - * readability, loaded at runtime. - */ -public class TableNamingStrategy extends DefaultGeneratorStrategy { - @Override - public String getJavaClassName(Definition definition, Mode mode) { - if (definition instanceof TableDefinition && mode == Mode.DEFAULT) { - StringBuilder result = new StringBuilder(); - - result.append(StringUtils.toCamelCase( - definition.getOutputName() - .replace(' ', '_') - .replace('-', '_') - .replace('.', '_') - )); - - result.append("Table"); - return result.toString(); - } else { - return super.getJavaClassName(definition, mode); - } - } -} diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/package-info.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/package-info.java deleted file mode 100644 index 2e5cf0f5afe..00000000000 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Recon code generation support for entities and daos. - */ -package org.hadoop.ozone.recon.codegen; \ No newline at end of file diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconInternalSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconInternalSchemaDefinition.java deleted file mode 100644 index 9ab9e38e95f..00000000000 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconInternalSchemaDefinition.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.hadoop.ozone.recon.schema; - -import java.sql.Connection; -import java.sql.SQLException; - -import javax.sql.DataSource; - -import org.jooq.impl.DSL; -import org.jooq.impl.SQLDataType; - -import com.google.inject.Inject; - -/** - * Class used to create tables that are required for Recon's internal - * management. - */ -public class ReconInternalSchemaDefinition implements ReconSchemaDefinition { - - public static final String RECON_TASK_STATUS_TABLE_NAME = - "recon_task_status"; - private final DataSource dataSource; - - @Inject - ReconInternalSchemaDefinition(DataSource dataSource) { - this.dataSource = dataSource; - } - - @Override - public void initializeSchema() throws SQLException { - Connection conn = dataSource.getConnection(); - createReconTaskStatus(conn); - } - - /** - * Create the Recon Task Status table. - * @param conn connection - */ - private void createReconTaskStatus(Connection conn) { - DSL.using(conn).createTableIfNotExists(RECON_TASK_STATUS_TABLE_NAME) - .column("task_name", SQLDataType.VARCHAR(1024)) - .column("last_updated_timestamp", SQLDataType.BIGINT) - .column("last_updated_seq_number", SQLDataType.BIGINT) - .constraint(DSL.constraint("pk_task_name") - .primaryKey("task_name")) - .execute(); - } -} diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconSchemaDefinition.java deleted file mode 100644 index 72a105e5fff..00000000000 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconSchemaDefinition.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.hadoop.ozone.recon.schema; - -import java.sql.SQLException; - -/** - * Classes meant to initialize the SQL schema for Recon. The implementations of - * this class will be used to create the SQL schema programmatically. - * Note: Make sure add a binding for your implementation to the Guice module, - * otherwise code-generator will not pick up the schema changes. - */ -public interface ReconSchemaDefinition { - - /** - * Execute DDL that will create Recon schema. - */ - void initializeSchema() throws SQLException; -} diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java deleted file mode 100644 index 6763bc8dc73..00000000000 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/StatsSchemaDefinition.java +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.hadoop.ozone.recon.schema; - -import com.google.inject.Inject; -import org.jooq.impl.DSL; -import org.jooq.impl.SQLDataType; - -import javax.sql.DataSource; -import java.sql.Connection; -import java.sql.SQLException; - -/** - * Class used to create tables that are required for storing Ozone statistics. - */ -public class StatsSchemaDefinition implements ReconSchemaDefinition { - - public static final String GLOBAL_STATS_TABLE_NAME = "global_stats"; - private final DataSource dataSource; - - @Inject - StatsSchemaDefinition(DataSource dataSource) { - this.dataSource = dataSource; - } - - @Override - public void initializeSchema() throws SQLException { - Connection conn = dataSource.getConnection(); - createGlobalStatsTable(conn); - } - - /** - * Create the Ozone Global Stats table. - * @param conn connection - */ - private void createGlobalStatsTable(Connection conn) { - DSL.using(conn).createTableIfNotExists(GLOBAL_STATS_TABLE_NAME) - .column("key", SQLDataType.VARCHAR(255)) - .column("value", SQLDataType.BIGINT) - .column("last_updated_timestamp", SQLDataType.TIMESTAMP) - .constraint(DSL.constraint("pk_key") - .primaryKey("key")) - .execute(); - } -} diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java deleted file mode 100644 index b8e656090ce..00000000000 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.hadoop.ozone.recon.schema; - -import java.sql.Connection; -import java.sql.SQLException; - -import javax.sql.DataSource; - -import org.jooq.impl.DSL; -import org.jooq.impl.SQLDataType; -import org.springframework.transaction.annotation.Transactional; - -import com.google.inject.Inject; - -/** - * Programmatic definition of Recon DDL. - */ -public class UtilizationSchemaDefinition implements ReconSchemaDefinition { - - private final DataSource dataSource; - - public static final String CLUSTER_GROWTH_DAILY_TABLE_NAME = - "cluster_growth_daily"; - - public static final String FILE_COUNT_BY_SIZE_TABLE_NAME = - "file_count_by_size"; - - @Inject - UtilizationSchemaDefinition(DataSource dataSource) { - this.dataSource = dataSource; - } - - @Override - @Transactional - public void initializeSchema() throws SQLException { - Connection conn = dataSource.getConnection(); - createClusterGrowthTable(conn); - createFileSizeCount(conn); - } - - void createClusterGrowthTable(Connection conn) { - DSL.using(conn).createTableIfNotExists(CLUSTER_GROWTH_DAILY_TABLE_NAME) - .column("timestamp", SQLDataType.TIMESTAMP) - .column("datanode_id", SQLDataType.INTEGER) - .column("datanode_host", SQLDataType.VARCHAR(1024)) - .column("rack_id", SQLDataType.VARCHAR(1024)) - .column("available_size", SQLDataType.BIGINT) - .column("used_size", SQLDataType.BIGINT) - .column("container_count", SQLDataType.INTEGER) - .column("block_count", SQLDataType.INTEGER) - .constraint(DSL.constraint("pk_timestamp_datanode_id") - .primaryKey("timestamp", "datanode_id")) - .execute(); - } - - void createFileSizeCount(Connection conn) { - DSL.using(conn).createTableIfNotExists(FILE_COUNT_BY_SIZE_TABLE_NAME) - .column("file_size", SQLDataType.BIGINT) - .column("count", SQLDataType.BIGINT) - .constraint(DSL.constraint("pk_file_size") - .primaryKey("file_size")) - .execute(); - } -} diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/package-info.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/package-info.java deleted file mode 100644 index 3c701f989d3..00000000000 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Classes in this package define the schema for Recon Sql database. - */ -package org.hadoop.ozone.recon.schema; \ No newline at end of file diff --git a/hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml deleted file mode 100644 index 7c0ba4dd4e9..00000000000 --- a/hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml +++ /dev/null @@ -1,28 +0,0 @@ - - - - - - - - - - - - - diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml deleted file mode 100644 index 55318a543c5..00000000000 --- a/hadoop-ozone/recon/pom.xml +++ /dev/null @@ -1,311 +0,0 @@ - - - - - hadoop-ozone - org.apache.hadoop - 0.5.0-SNAPSHOT - - Apache Hadoop Ozone Recon - 4.0.0 - hadoop-ozone-recon - - 3.11.10 - 5.1.3.RELEASE - - - - - src/main/resources - - **/node_modules/** - - - - - - org.codehaus.mojo - exec-maven-plugin - ${exec-maven-plugin.version} - - - generate-resources - - java - - - - - java - compile - org.hadoop.ozone.recon.codegen.JooqCodeGenerator - - ${project.build.directory}/generated-sources/java - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - add-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-sources/java - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - - - - com.github.eirslett - frontend-maven-plugin - 1.6 - - target - ${basedir}/src/main/resources/webapps/recon/ozone-recon-web - - - - Install node and yarn locally to the project - - install-node-and-yarn - - - v12.1.0 - v1.9.2 - - - - yarn install - - yarn - - - install - - - - Build frontend - - yarn - - - run build - - - - - - org.apache.maven.plugins - maven-resources-plugin - - - Copy frontend build to target - process-resources - - copy-resources - - - ${project.build.outputDirectory}/webapps/recon - - - ${basedir}/src/main/resources/webapps/recon/ozone-recon-web/build - true - - - - - - Copy frontend static files to target - process-resources - - copy-resources - - - ${project.build.outputDirectory}/webapps/static - - - ${basedir}/src/main/resources/webapps/recon/ozone-recon-web/build/static - true - - - - - - - - - - - org.apache.hadoop - hadoop-ozone-common - - - jersey-server - com.sun.jersey - - - jersey-core - com.sun.jersey - - - jersey-servlet - com.sun.jersey - - - - - org.apache.hadoop - hadoop-ozone-reconcodegen - ${ozone.version} - - - org.apache.hadoop - hadoop-ozone-ozone-manager - - - com.google.inject - guice - ${guice.version} - - - com.google.inject.extensions - guice-servlet - ${guice.version} - - - org.glassfish.jersey.containers - jersey-container-servlet - 2.27 - - - org.glassfish.hk2 - hk2-api - - - compile - - - org.glassfish.jersey.containers - jersey-container-servlet-core - 2.27 - - - org.glassfish.hk2 - guice-bridge - 2.5.0 - - - org.glassfish.jersey.core - jersey-server - 2.27 - - - org.glassfish.jersey.media - jersey-media-json-jackson - 2.27 - - - com.google.inject.extensions - guice-assistedinject - ${guice.version} - - - org.glassfish.jersey.inject - jersey-hk2 - 2.27 - - - hk2-api - org.glassfish.hk2 - - - org.glassfish.hk2.external - aopalliance-repackaged - - - org.glassfish.hk2 - hk2-utils - - - - - junit - junit - test - - - org.mockito - mockito-core - 2.8.9 - test - - - org.jooq - jooq - ${jooq.version} - - - org.jooq - jooq-meta - ${jooq.version} - - - org.jooq - jooq-codegen - ${jooq.version} - - - com.jolbox - bonecp - 0.8.0.RELEASE - - - org.xerial - sqlite-jdbc - 3.25.2 - - - org.springframework - spring-jdbc - ${spring.version} - - - javax.activation - activation - 1.1.1 - - - diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java deleted file mode 100644 index 5b0195811aa..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon; - -import com.google.inject.Provider; -import org.apache.hadoop.conf.Configuration; - - -/** - * Ozone Configuration Provider. - *

- * As the OzoneConfiguration is created by the CLI application here we inject - * it via a singleton instance to the Jax-RS/CDI instances. - */ -public class ConfigurationProvider implements - Provider { - - private static Configuration configuration; - - static void setConfiguration(Configuration conf) { - ConfigurationProvider.configuration = conf; - } - - @Override - public Configuration get() { - return configuration; - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java deleted file mode 100644 index 86c8a32fbf1..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon; - -import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX; - -/** - * Recon Server constants file. - */ -public final class ReconConstants { - - private ReconConstants() { - // Never Constructed - } - - public static final String RECON_CONTAINER_DB = "recon-" + - CONTAINER_DB_SUFFIX; - - public static final String CONTAINER_COUNT_KEY = "totalCount"; - - public static final String RECON_OM_SNAPSHOT_DB = - "om.snapshot.db"; - - public static final String CONTAINER_KEY_TABLE = - "containerKeyTable"; - - public static final String CONTAINER_KEY_COUNT_TABLE = - "containerKeyCountTable"; - - public static final String FETCH_ALL = "-1"; - public static final String RECON_QUERY_PREVKEY = "prevKey"; - public static final String PREV_CONTAINER_ID_DEFAULT_VALUE = "0"; - public static final String RECON_QUERY_LIMIT = "limit"; - -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java deleted file mode 100644 index c9e870e0d06..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java +++ /dev/null @@ -1,185 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon; - -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_AUTO_COMMIT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_CONNECTION_TIMEOUT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_DB_DRIVER; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_DB_JDBC_URL; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_DB_PASSWORD; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_DB_USER; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_IDLE_CONNECTION_TEST_PERIOD; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_MAX_ACTIVE_CONNECTIONS; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_MAX_CONNECTION_AGE; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_MAX_IDLE_CONNECTION_AGE; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SQL_MAX_IDLE_CONNECTION_TEST_STMT; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.recon.persistence.DataSourceConfiguration; -import org.apache.hadoop.ozone.recon.persistence.JooqPersistenceModule; -import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.recovery.ReconOmMetadataManagerImpl; -import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider; -import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider; -import org.apache.hadoop.ozone.recon.spi.impl.ReconContainerDBProvider; -import org.apache.hadoop.ozone.recon.spi.impl.ContainerDBServiceProviderImpl; -import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; -import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask; -import org.apache.hadoop.ozone.recon.tasks.FileSizeCountTask; -import org.apache.hadoop.ozone.recon.tasks.ReconTaskController; -import org.apache.hadoop.ozone.recon.tasks.ReconTaskControllerImpl; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.ratis.protocol.ClientId; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.inject.AbstractModule; -import com.google.inject.Provides; -import com.google.inject.Singleton; - -/** - * Guice controller that defines concrete bindings. - */ -public class ReconControllerModule extends AbstractModule { - private static final Logger LOG = - LoggerFactory.getLogger(ReconControllerModule.class); - - @Override - protected void configure() { - bind(Configuration.class).toProvider(ConfigurationProvider.class); - bind(ReconHttpServer.class).in(Singleton.class); - bind(DBStore.class) - .toProvider(ReconContainerDBProvider.class).in(Singleton.class); - bind(ReconOMMetadataManager.class) - .to(ReconOmMetadataManagerImpl.class).in(Singleton.class); - bind(OMMetadataManager.class).to(ReconOmMetadataManagerImpl.class) - .in(Singleton.class); - bind(ContainerDBServiceProvider.class) - .to(ContainerDBServiceProviderImpl.class).in(Singleton.class); - bind(OzoneManagerServiceProvider.class) - .to(OzoneManagerServiceProviderImpl.class).in(Singleton.class); - bind(ReconUtils.class).in(Singleton.class); - // Persistence - inject configuration provider - install(new JooqPersistenceModule( - getProvider(DataSourceConfiguration.class))); - - bind(ReconTaskController.class) - .to(ReconTaskControllerImpl.class).in(Singleton.class); - bind(ContainerKeyMapperTask.class); - bind(FileSizeCountTask.class); - } - - @Provides - OzoneManagerProtocol getOzoneManagerProtocol( - final OzoneConfiguration ozoneConfiguration) { - OzoneManagerProtocol ozoneManagerClient = null; - try { - ClientId clientId = ClientId.randomId(); - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - ozoneManagerClient = new - OzoneManagerProtocolClientSideTranslatorPB( - ozoneConfiguration, clientId.toString(), null, ugi); - } catch (IOException ioEx) { - LOG.error("Error in provisioning OzoneManagerProtocol ", ioEx); - } - return ozoneManagerClient; - } - - @Provides - DataSourceConfiguration getDataSourceConfiguration( - final OzoneConfiguration ozoneConfiguration) { - - return new DataSourceConfiguration() { - @Override - public String getDriverClass() { - return ozoneConfiguration.get(OZONE_RECON_SQL_DB_DRIVER, - "org.sqlite.JDBC"); - } - - @Override - public String getJdbcUrl() { - return ozoneConfiguration.get(OZONE_RECON_SQL_DB_JDBC_URL); - } - - @Override - public String getUserName() { - return ozoneConfiguration.get(OZONE_RECON_SQL_DB_USER); - } - - @Override - public String getPassword() { - return ozoneConfiguration.get(OZONE_RECON_SQL_DB_PASSWORD); - } - - @Override - public boolean setAutoCommit() { - return ozoneConfiguration.getBoolean( - OZONE_RECON_SQL_AUTO_COMMIT, false); - } - - @Override - public long getConnectionTimeout() { - return ozoneConfiguration.getLong( - OZONE_RECON_SQL_CONNECTION_TIMEOUT, 30000); - } - - @Override - public String getSqlDialect() { - return JooqPersistenceModule.DEFAULT_DIALECT.toString(); - } - - @Override - public Integer getMaxActiveConnections() { - return ozoneConfiguration.getInt( - OZONE_RECON_SQL_MAX_ACTIVE_CONNECTIONS, 10); - } - - @Override - public Integer getMaxConnectionAge() { - return ozoneConfiguration.getInt( - OZONE_RECON_SQL_MAX_CONNECTION_AGE, 1800); - } - - @Override - public Integer getMaxIdleConnectionAge() { - return ozoneConfiguration.getInt( - OZONE_RECON_SQL_MAX_IDLE_CONNECTION_AGE, 3600); - } - - @Override - public String getConnectionTestStatement() { - return ozoneConfiguration.get( - OZONE_RECON_SQL_MAX_IDLE_CONNECTION_TEST_STMT, "SELECT 1"); - } - - @Override - public Integer getIdleConnectionTestPeriod() { - return ozoneConfiguration.getInt( - OZONE_RECON_SQL_IDLE_CONNECTION_TEST_PERIOD, 60); - } - }; - - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java deleted file mode 100644 index ab11f0e3441..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon; - -import com.google.inject.Injector; -import com.google.inject.servlet.GuiceServletContextListener; - -/** - * Servlet Context Listener that provides the Guice injector. - */ -public class ReconGuiceServletContextListener - extends GuiceServletContextListener { - - private static Injector injector; - - @Override - public Injector getInjector() { - return injector; - } - - static void setInjector(Injector inj) { - injector = inj; - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconHttpServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconHttpServer.java deleted file mode 100644 index e7dcb0cc4d6..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconHttpServer.java +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.server.BaseHttpServer; - -import com.google.inject.Inject; - -/** - * Recon http server with recon supplied config defaults. - */ - -public class ReconHttpServer extends BaseHttpServer { - - @Inject - ReconHttpServer(Configuration conf) throws IOException { - super(conf, "recon"); - } - - @Override - protected String getHttpAddressKey() { - return ReconServerConfigKeys.OZONE_RECON_HTTP_ADDRESS_KEY; - } - - @Override - protected String getHttpsAddressKey() { - return ReconServerConfigKeys.OZONE_RECON_HTTPS_ADDRESS_KEY; - } - - @Override - protected String getHttpBindHostKey() { - return ReconServerConfigKeys.OZONE_RECON_HTTP_BIND_HOST_KEY; - } - - @Override - protected String getHttpsBindHostKey() { - return ReconServerConfigKeys.OZONE_RECON_HTTPS_BIND_HOST_KEY; - } - - @Override - protected String getBindHostDefault() { - return ReconServerConfigKeys.OZONE_RECON_HTTP_BIND_HOST_DEFAULT; - } - - @Override - protected int getHttpBindPortDefault() { - return ReconServerConfigKeys.OZONE_RECON_HTTP_BIND_PORT_DEFAULT; - } - - @Override - protected int getHttpsBindPortDefault() { - return ReconServerConfigKeys.OZONE_RECON_HTTPS_BIND_PORT_DEFAULT; - } - - @Override - protected String getKeytabFile() { - return ReconServerConfigKeys.OZONE_RECON_KEYTAB_FILE; - } - - @Override - protected String getSpnegoPrincipal() { - return ReconServerConfigKeys - .OZONE_RECON_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL; - } - - @Override - protected String getEnabledKey() { - return ReconServerConfigKeys.OZONE_RECON_HTTP_ENABLED_KEY; - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconRestServletModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconRestServletModule.java deleted file mode 100644 index 5a69e66dabd..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconRestServletModule.java +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon; - -import java.net.URL; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.glassfish.hk2.api.ServiceLocator; -import org.glassfish.jersey.internal.inject.InjectionManager; -import org.glassfish.jersey.server.ResourceConfig; -import org.glassfish.jersey.server.spi.Container; -import org.glassfish.jersey.server.spi.ContainerLifecycleListener; -import org.glassfish.jersey.servlet.ServletContainer; -import org.jvnet.hk2.guice.bridge.api.GuiceBridge; -import org.jvnet.hk2.guice.bridge.api.GuiceIntoHK2Bridge; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.inject.Injector; -import com.google.inject.Scopes; -import com.google.inject.servlet.ServletModule; - -/** - * Class to scan API Service classes and bind them to the injector. - */ -public abstract class ReconRestServletModule extends ServletModule { - - private static final Logger LOG = - LoggerFactory.getLogger(ReconRestServletModule.class); - - @Override - abstract protected void configureServlets(); - - /** - * Interface to provide packages for scanning. - */ - public interface RestKeyBindingBuilder { - void packages(String... packages); - } - - protected RestKeyBindingBuilder rest(String... urlPatterns) { - return new RestKeyBindingBuilderImpl(Arrays.asList(urlPatterns)); - } - - private class RestKeyBindingBuilderImpl implements RestKeyBindingBuilder { - private List paths; - - RestKeyBindingBuilderImpl(List paths) { - this.paths = paths; - } - - private void checkIfPackageExistsAndLog(String pkg) { - String resourcePath = pkg.replace(".", "/"); - URL resource = getClass().getClassLoader().getResource(resourcePath); - if (resource != null) { - LOG.info("rest(" + paths + ").packages(" + pkg + ")"); - } else { - LOG.info("No Beans in '" + pkg + "' found. Requests " + paths - + " will fail."); - } - } - - @Override - public void packages(String... packages) { - StringBuilder sb = new StringBuilder(); - - for (String pkg : packages) { - if (sb.length() > 0) { - sb.append(','); - } - checkIfPackageExistsAndLog(pkg); - sb.append(pkg); - } - Map params = new HashMap<>(); - params.put("javax.ws.rs.Application", - GuiceResourceConfig.class.getCanonicalName()); - if (sb.length() > 0) { - params.put("jersey.config.server.provider.packages", sb.toString()); - } - bind(ServletContainer.class).in(Scopes.SINGLETON); - for (String path : paths) { - serve(path).with(ServletContainer.class, params); - } - } - } -} - -/** - * Class to bridge Guice bindings to Jersey hk2 bindings. - */ -class GuiceResourceConfig extends ResourceConfig { - GuiceResourceConfig() { - register(new ContainerLifecycleListener() { - public void onStartup(Container container) { - ServletContainer servletContainer = (ServletContainer) container; - InjectionManager injectionManager = container.getApplicationHandler() - .getInjectionManager(); - ServiceLocator serviceLocator = injectionManager - .getInstance(ServiceLocator.class); - GuiceBridge.getGuiceBridge().initializeGuiceBridge(serviceLocator); - GuiceIntoHK2Bridge guiceBridge = serviceLocator - .getService(GuiceIntoHK2Bridge.class); - Injector injector = (Injector) servletContainer.getServletContext() - .getAttribute(Injector.class.getName()); - guiceBridge.bridgeGuiceInjector(injector); - } - - public void onReload(Container container) { - } - - public void onShutdown(Container container) { - } - }); - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java deleted file mode 100644 index 1aaf8873d58..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java +++ /dev/null @@ -1,117 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon; - -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; - -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider; -import org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition; -import org.hadoop.ozone.recon.schema.StatsSchemaDefinition; -import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.inject.Guice; -import com.google.inject.Inject; -import com.google.inject.Injector; - - -/** - * Recon server main class that stops and starts recon services. - */ -public class ReconServer extends GenericCli { - - private static final Logger LOG = LoggerFactory.getLogger(ReconServer.class); - private final ScheduledExecutorService scheduler = - Executors.newScheduledThreadPool(1); - private Injector injector; - - @Inject - private ReconHttpServer httpServer; - - public static void main(String[] args) { - new ReconServer().run(args); - } - - @Override - public Void call() throws Exception { - OzoneConfiguration ozoneConfiguration = createOzoneConfiguration(); - ConfigurationProvider.setConfiguration(ozoneConfiguration); - - injector = Guice.createInjector(new - ReconControllerModule(), - new ReconRestServletModule() { - @Override - protected void configureServlets() { - rest("/api/*") - .packages("org.apache.hadoop.ozone.recon.api"); - } - }, - new ReconTaskBindingModule()); - - //Pass on injector to listener that does the Guice - Jersey HK2 bridging. - ReconGuiceServletContextListener.setInjector(injector); - - LOG.info("Initializing Recon server..."); - try { - StatsSchemaDefinition statsSchemaDefinition = injector.getInstance( - StatsSchemaDefinition.class); - statsSchemaDefinition.initializeSchema(); - - UtilizationSchemaDefinition utilizationSchemaDefinition = - injector.getInstance(UtilizationSchemaDefinition.class); - utilizationSchemaDefinition.initializeSchema(); - - ReconInternalSchemaDefinition reconInternalSchemaDefinition = - injector.getInstance(ReconInternalSchemaDefinition.class); - reconInternalSchemaDefinition.initializeSchema(); - - LOG.info("Recon server initialized successfully!"); - - httpServer = injector.getInstance(ReconHttpServer.class); - LOG.info("Starting Recon server"); - httpServer.start(); - - //Start Ozone Manager Service that pulls data from OM. - OzoneManagerServiceProvider ozoneManagerServiceProvider = injector - .getInstance(OzoneManagerServiceProvider.class); - ozoneManagerServiceProvider.start(); - } catch (Exception e) { - LOG.error("Error during initializing Recon server.", e); - stop(); - } - - Runtime.getRuntime().addShutdownHook(new Thread(() -> { - try { - stop(); - } catch (Exception e) { - LOG.error("Error during stop Recon server", e); - } - })); - return null; - } - - void stop() throws Exception { - LOG.info("Stopping Recon server"); - httpServer.stop(); - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java deleted file mode 100644 index 034af4a5277..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java +++ /dev/null @@ -1,124 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * This class contains constants for Recon configuration keys. - */ -@InterfaceAudience.Public -@InterfaceStability.Unstable -public final class ReconServerConfigKeys { - - public static final String OZONE_RECON_HTTP_ENABLED_KEY = - "ozone.recon.http.enabled"; - public static final String OZONE_RECON_HTTP_BIND_HOST_KEY = - "ozone.recon.http-bind-host"; - public static final String OZONE_RECON_HTTPS_BIND_HOST_KEY = - "ozone.recon.https-bind-host"; - public static final String OZONE_RECON_HTTP_ADDRESS_KEY = - "ozone.recon.http-address"; - public static final String OZONE_RECON_HTTPS_ADDRESS_KEY = - "ozone.recon.https-address"; - public static final String OZONE_RECON_KEYTAB_FILE = - "ozone.recon.keytab.file"; - public static final String OZONE_RECON_HTTP_BIND_HOST_DEFAULT = - "0.0.0.0"; - public static final int OZONE_RECON_HTTP_BIND_PORT_DEFAULT = 9888; - public static final int OZONE_RECON_HTTPS_BIND_PORT_DEFAULT = 9889; - public static final String OZONE_RECON_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL = - "ozone.recon.authentication.kerberos.principal"; - - public static final String OZONE_RECON_CONTAINER_DB_CACHE_SIZE_MB = - "ozone.recon.container.db.cache.size.mb"; - public static final int OZONE_RECON_CONTAINER_DB_CACHE_SIZE_DEFAULT = 128; - - public static final String OZONE_RECON_DB_DIR = "ozone.recon.db.dir"; - - public static final String OZONE_RECON_OM_SNAPSHOT_DB_DIR = - "ozone.recon.om.db.dir"; - - public static final String RECON_OM_SOCKET_TIMEOUT = - "recon.om.socket.timeout"; - public static final String RECON_OM_SOCKET_TIMEOUT_DEFAULT = "5s"; - - public static final String RECON_OM_CONNECTION_TIMEOUT = - "recon.om.connection.timeout"; - public static final String RECON_OM_CONNECTION_TIMEOUT_DEFAULT = "5s"; - - public static final String RECON_OM_CONNECTION_REQUEST_TIMEOUT = - "recon.om.connection.request.timeout"; - - public static final String RECON_OM_CONNECTION_REQUEST_TIMEOUT_DEFAULT = "5s"; - - public static final String RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY = - "recon.om.snapshot.task.initial.delay"; - public static final String - RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT = "1m"; - - public static final String OZONE_RECON_CONTAINER_DB_STORE_IMPL = - "ozone.recon.container.db.impl"; - public static final String OZONE_RECON_CONTAINER_DB_STORE_IMPL_DEFAULT = - OZONE_METADATA_STORE_IMPL_ROCKSDB; - - public static final String RECON_OM_SNAPSHOT_TASK_INTERVAL = - "recon.om.snapshot.task.interval.delay"; - public static final String RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT - = "10m"; - - public static final String RECON_OM_SNAPSHOT_TASK_FLUSH_PARAM = - "recon.om.snapshot.task.flush.param"; - - // Persistence properties - public static final String OZONE_RECON_SQL_DB_DRIVER = - "ozone.recon.sql.db.driver"; - public static final String OZONE_RECON_SQL_DB_JDBC_URL = - "ozone.recon.sql.db.jdbc.url"; - public static final String OZONE_RECON_SQL_DB_USER = - "ozone.recon.sql.db.username"; - public static final String OZONE_RECON_SQL_DB_PASSWORD = - "ozone.recon.sql.db.password"; - public static final String OZONE_RECON_SQL_AUTO_COMMIT = - "ozone.recon.sql.db.auto.commit"; - public static final String OZONE_RECON_SQL_CONNECTION_TIMEOUT = - "ozone.recon.sql.db.conn.timeout"; - public static final String OZONE_RECON_SQL_MAX_ACTIVE_CONNECTIONS = - "ozone.recon.sql.db.conn.max.active"; - public static final String OZONE_RECON_SQL_MAX_CONNECTION_AGE = - "ozone.recon.sql.db.conn.max.age"; - public static final String OZONE_RECON_SQL_MAX_IDLE_CONNECTION_AGE = - "ozone.recon.sql.db.conn.idle.max.age"; - public static final String OZONE_RECON_SQL_IDLE_CONNECTION_TEST_PERIOD = - "ozone.recon.sql.db.conn.idle.test.period"; - public static final String OZONE_RECON_SQL_MAX_IDLE_CONNECTION_TEST_STMT = - "ozone.recon.sql.db.conn.idle.test"; - - public static final String OZONE_RECON_TASK_THREAD_COUNT_KEY = - "ozone.recon.task.thread.count"; - public static final int OZONE_RECON_TASK_THREAD_COUNT_DEFAULT = 5; - - /** - * Private constructor for utility class. - */ - private ReconServerConfigKeys() { - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconTaskBindingModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconTaskBindingModule.java deleted file mode 100644 index 19cc0da23e6..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconTaskBindingModule.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon; - -import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask; -import org.apache.hadoop.ozone.recon.tasks.FileSizeCountTask; -import org.apache.hadoop.ozone.recon.tasks.ReconDBUpdateTask; - -import com.google.inject.AbstractModule; -import com.google.inject.multibindings.Multibinder; - -/** - * Binds the various Recon Tasks. - */ -public class ReconTaskBindingModule extends AbstractModule { - - @Override - protected void configure() { - Multibinder taskBinder = - Multibinder.newSetBinder(binder(), ReconDBUpdateTask.class); - taskBinder.addBinding().to(ContainerKeyMapperTask.class); - taskBinder.addBinding().to(FileSizeCountTask.class); - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java deleted file mode 100644 index 2d29d3f9404..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ /dev/null @@ -1,274 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon; - -import static java.net.HttpURLConnection.HTTP_CREATED; -import static java.net.HttpURLConnection.HTTP_OK; -import static org.apache.hadoop.hdds.server.ServerUtils.getDirectoryFromConfig; -import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; - -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.zip.GZIPOutputStream; - -import org.apache.commons.compress.archivers.tar.TarArchiveEntry; -import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; -import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; -import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.io.IOUtils; -import org.apache.http.HttpEntity; -import org.apache.http.HttpResponse; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.impl.client.CloseableHttpClient; - -import org.apache.http.util.EntityUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Recon Utility class. - */ -public class ReconUtils { - - private final static int WRITE_BUFFER = 1048576; //1MB - - public ReconUtils() { - } - - private static final Logger LOG = LoggerFactory.getLogger( - ReconUtils.class); - - /** - * Get configured Recon DB directory value based on config. If not present, - * fallback to ozone.metadata.dirs - * - * @param conf configuration bag - * @param dirConfigKey key to check - * @return Return File based on configured or fallback value. - */ - public File getReconDbDir(Configuration conf, String dirConfigKey) { - - File metadataDir = getDirectoryFromConfig(conf, dirConfigKey, - "Recon"); - if (metadataDir != null) { - return metadataDir; - } - - LOG.warn("{} is not configured. We recommend adding this setting. " + - "Falling back to {} instead.", - dirConfigKey, HddsConfigKeys.OZONE_METADATA_DIRS); - return getOzoneMetaDirPath(conf); - } - - /** - * Given a source directory, create a tar.gz file from it. - * - * @param sourcePath the path to the directory to be archived. - * @return tar.gz file - * @throws IOException - */ - public static File createTarFile(Path sourcePath) throws IOException { - TarArchiveOutputStream tarOs = null; - try { - String sourceDir = sourcePath.toString(); - String fileName = sourceDir.concat(".tar.gz"); - FileOutputStream fileOutputStream = new FileOutputStream(fileName); - GZIPOutputStream gzipOutputStream = - new GZIPOutputStream(new BufferedOutputStream(fileOutputStream)); - tarOs = new TarArchiveOutputStream(gzipOutputStream); - File folder = new File(sourceDir); - File[] filesInDir = folder.listFiles(); - if (filesInDir != null) { - for (File file : filesInDir) { - addFilesToArchive(file.getName(), file, tarOs); - } - } - return new File(fileName); - } finally { - try { - org.apache.hadoop.io.IOUtils.closeStream(tarOs); - } catch (Exception e) { - LOG.error("Exception encountered when closing " + - "TAR file output stream: " + e); - } - } - } - - private static void addFilesToArchive(String source, File file, - TarArchiveOutputStream - tarFileOutputStream) - throws IOException { - tarFileOutputStream.putArchiveEntry(new TarArchiveEntry(file, source)); - if (file.isFile()) { - FileInputStream fileInputStream = new FileInputStream(file); - BufferedInputStream bufferedInputStream = - new BufferedInputStream(fileInputStream); - org.apache.commons.compress.utils.IOUtils.copy(bufferedInputStream, - tarFileOutputStream); - tarFileOutputStream.closeArchiveEntry(); - fileInputStream.close(); - } else if (file.isDirectory()) { - tarFileOutputStream.closeArchiveEntry(); - File[] filesInDir = file.listFiles(); - if (filesInDir != null) { - for (File cFile : filesInDir) { - addFilesToArchive(cFile.getAbsolutePath(), cFile, - tarFileOutputStream); - } - } - } - } - - /** - * Untar DB snapshot tar file to recon OM snapshot directory. - * - * @param tarFile source tar file - * @param destPath destination path to untar to. - * @throws IOException ioException - */ - public void untarCheckpointFile(File tarFile, Path destPath) - throws IOException { - - FileInputStream fileInputStream = null; - BufferedInputStream buffIn = null; - GzipCompressorInputStream gzIn = null; - try { - fileInputStream = new FileInputStream(tarFile); - buffIn = new BufferedInputStream(fileInputStream); - gzIn = new GzipCompressorInputStream(buffIn); - - //Create Destination directory if it does not exist. - if (!destPath.toFile().exists()) { - boolean success = destPath.toFile().mkdirs(); - if (!success) { - throw new IOException("Unable to create Destination directory."); - } - } - - try (TarArchiveInputStream tarInStream = - new TarArchiveInputStream(gzIn)) { - TarArchiveEntry entry = null; - - while ((entry = (TarArchiveEntry) tarInStream.getNextEntry()) != null) { - //If directory, create a directory. - if (entry.isDirectory()) { - File f = new File(Paths.get(destPath.toString(), - entry.getName()).toString()); - boolean success = f.mkdirs(); - if (!success) { - LOG.error("Unable to create directory found in tar."); - } - } else { - //Write contents of file in archive to a new file. - int count; - byte[] data = new byte[WRITE_BUFFER]; - - FileOutputStream fos = new FileOutputStream( - Paths.get(destPath.toString(), entry.getName()).toString()); - try (BufferedOutputStream dest = - new BufferedOutputStream(fos, WRITE_BUFFER)) { - while ((count = - tarInStream.read(data, 0, WRITE_BUFFER)) != -1) { - dest.write(data, 0, count); - } - } - } - } - } - } finally { - IOUtils.closeStream(gzIn); - IOUtils.closeStream(buffIn); - IOUtils.closeStream(fileInputStream); - } - } - - /** - * Make HTTP GET call on the URL and return inputstream to the response. - * @param httpClient HttpClient to use. - * @param url url to call - * @return Inputstream to the response of the HTTP call. - * @throws IOException While reading the response. - */ - public InputStream makeHttpCall(CloseableHttpClient httpClient, - String url) - throws IOException { - - HttpGet httpGet = new HttpGet(url); - HttpResponse response = httpClient.execute(httpGet); - int errorCode = response.getStatusLine().getStatusCode(); - HttpEntity entity = response.getEntity(); - - if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) { - return entity.getContent(); - } - - if (entity != null) { - throw new IOException("Unexpected exception when trying to reach Ozone " + - "Manager, " + EntityUtils.toString(entity)); - } else { - throw new IOException("Unexpected null in http payload," + - " while processing request"); - } - } - - /** - * Load last known DB in Recon. - * @param reconDbDir - * @param fileNamePrefix - * @return - */ - public File getLastKnownDB(File reconDbDir, String fileNamePrefix) { - String lastKnownSnapshotFileName = null; - long lastKnonwnSnapshotTs = Long.MIN_VALUE; - if (reconDbDir != null) { - File[] snapshotFiles = reconDbDir.listFiles((dir, name) -> - name.startsWith(fileNamePrefix)); - if (snapshotFiles != null) { - for (File snapshotFile : snapshotFiles) { - String fileName = snapshotFile.getName(); - try { - String[] fileNameSplits = fileName.split("_"); - if (fileNameSplits.length <= 1) { - continue; - } - long snapshotTimestamp = Long.parseLong(fileNameSplits[1]); - if (lastKnonwnSnapshotTs < snapshotTimestamp) { - lastKnonwnSnapshotTs = snapshotTimestamp; - lastKnownSnapshotFileName = fileName; - } - } catch (NumberFormatException nfEx) { - LOG.warn("Unknown file found in Recon DB dir : {}", fileName); - } - } - } - } - return lastKnownSnapshotFileName == null ? null : - new File(reconDbDir.getPath(), lastKnownSnapshotFileName); - } - -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerKeyService.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerKeyService.java deleted file mode 100644 index 4a7abc36eb6..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerKeyService.java +++ /dev/null @@ -1,210 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon.api; - -import java.io.IOException; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.stream.Collectors; - -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; - -import javax.inject.Inject; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; -import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; -import org.apache.hadoop.ozone.recon.api.types.ContainersResponse; -import org.apache.hadoop.ozone.recon.api.types.KeyMetadata; -import org.apache.hadoop.ozone.recon.api.types.KeyMetadata.ContainerBlockMetadata; -import org.apache.hadoop.ozone.recon.api.types.KeysResponse; -import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.ozone.recon.ReconConstants.FETCH_ALL; -import static org.apache.hadoop.ozone.recon.ReconConstants.PREV_CONTAINER_ID_DEFAULT_VALUE; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; - - -/** - * Endpoint for querying keys that belong to a container. - */ -@Path("/containers") -@Produces(MediaType.APPLICATION_JSON) -public class ContainerKeyService { - - private static final Logger LOG = - LoggerFactory.getLogger(ContainerKeyService.class); - - @Inject - private ContainerDBServiceProvider containerDBServiceProvider; - - @Inject - private ReconOMMetadataManager omMetadataManager; - - /** - * Return @{@link org.apache.hadoop.ozone.recon.api.types.ContainerMetadata} - * for the containers starting from the given "prev-key" query param for the - * given "limit". The given "prev-key" is skipped from the results returned. - * - * @param limit max no. of containers to get. - * @param prevKey the containerID after which results are returned. - * @return {@link Response} - */ - @GET - public Response getContainers( - @DefaultValue(FETCH_ALL) @QueryParam(RECON_QUERY_LIMIT) int limit, - @DefaultValue(PREV_CONTAINER_ID_DEFAULT_VALUE) - @QueryParam(RECON_QUERY_PREVKEY) long prevKey) { - Map containersMap; - long containersCount; - try { - containersMap = containerDBServiceProvider.getContainers(limit, prevKey); - containersCount = containerDBServiceProvider.getCountForContainers(); - } catch (IOException ioEx) { - throw new WebApplicationException(ioEx, - Response.Status.INTERNAL_SERVER_ERROR); - } - ContainersResponse containersResponse = - new ContainersResponse(containersCount, containersMap.values()); - return Response.ok(containersResponse).build(); - } - - /** - * Return @{@link org.apache.hadoop.ozone.recon.api.types.KeyMetadata} for - * all keys that belong to the container identified by the id param - * starting from the given "prev-key" query param for the given "limit". - * The given prevKeyPrefix is skipped from the results returned. - * - * @param containerID the given containerID. - * @param limit max no. of keys to get. - * @param prevKeyPrefix the key prefix after which results are returned. - * @return {@link Response} - */ - @GET - @Path("/{id}/keys") - public Response getKeysForContainer( - @PathParam("id") Long containerID, - @DefaultValue(FETCH_ALL) @QueryParam(RECON_QUERY_LIMIT) int limit, - @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) - String prevKeyPrefix) { - Map keyMetadataMap = new LinkedHashMap<>(); - long totalCount; - try { - Map containerKeyPrefixMap = - containerDBServiceProvider.getKeyPrefixesForContainer(containerID, - prevKeyPrefix); - - // Get set of Container-Key mappings for given containerId. - for (ContainerKeyPrefix containerKeyPrefix : containerKeyPrefixMap - .keySet()) { - - // Directly calling get() on the Key table instead of iterating since - // only full keys are supported now. When we change to using a prefix - // of the key, this needs to change to prefix seek (TODO). - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get( - containerKeyPrefix.getKeyPrefix()); - if (null == omKeyInfo) { - continue; - } - - // Filter keys by version. - List matchedKeys = omKeyInfo - .getKeyLocationVersions() - .stream() - .filter(k -> (k.getVersion() == containerKeyPrefix.getKeyVersion())) - .collect(Collectors.toList()); - - List blockIds = new ArrayList<>(); - for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : matchedKeys) { - List omKeyLocationInfos = omKeyLocationInfoGroup - .getLocationList() - .stream() - .filter(c -> c.getContainerID() == containerID) - .collect(Collectors.toList()); - for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfos) { - blockIds.add(new ContainerBlockMetadata(omKeyLocationInfo - .getContainerID(), omKeyLocationInfo.getLocalID())); - } - } - - String ozoneKey = omMetadataManager.getOzoneKey( - omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), - omKeyInfo.getKeyName()); - if (keyMetadataMap.containsKey(ozoneKey)) { - keyMetadataMap.get(ozoneKey).getVersions() - .add(containerKeyPrefix.getKeyVersion()); - - keyMetadataMap.get(ozoneKey).getBlockIds().putAll( - Collections.singletonMap(containerKeyPrefix.getKeyVersion(), - blockIds)); - } else { - // break the for loop if limit has been reached - if (keyMetadataMap.size() == limit) { - break; - } - KeyMetadata keyMetadata = new KeyMetadata(); - keyMetadata.setBucket(omKeyInfo.getBucketName()); - keyMetadata.setVolume(omKeyInfo.getVolumeName()); - keyMetadata.setKey(omKeyInfo.getKeyName()); - keyMetadata.setCreationTime( - Instant.ofEpochMilli(omKeyInfo.getCreationTime())); - keyMetadata.setModificationTime( - Instant.ofEpochMilli(omKeyInfo.getModificationTime())); - keyMetadata.setDataSize(omKeyInfo.getDataSize()); - keyMetadata.setVersions(new ArrayList() {{ - add(containerKeyPrefix.getKeyVersion()); - }}); - keyMetadataMap.put(ozoneKey, keyMetadata); - keyMetadata.setBlockIds(new TreeMap>() {{ - put(containerKeyPrefix.getKeyVersion(), blockIds); - }}); - } - } - - totalCount = - containerDBServiceProvider.getKeyCountForContainer(containerID); - } catch (IOException ioEx) { - throw new WebApplicationException(ioEx, - Response.Status.INTERNAL_SERVER_ERROR); - } - KeysResponse keysResponse = - new KeysResponse(totalCount, keyMetadataMap.values()); - return Response.ok(keysResponse).build(); - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/UtilizationService.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/UtilizationService.java deleted file mode 100644 index 0bc33f3aca5..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/UtilizationService.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.api; - -import javax.inject.Inject; -import org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao; -import org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize; -import org.jooq.Configuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.Produces; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; -import java.util.List; - -/** - * Endpoint for querying the counts of a certain file Size. - */ -@Path("/utilization") -@Produces(MediaType.APPLICATION_JSON) -public class UtilizationService { - private static final Logger LOG = - LoggerFactory.getLogger(UtilizationService.class); - - private FileCountBySizeDao fileCountBySizeDao; - - @Inject - private Configuration sqlConfiguration; - - - FileCountBySizeDao getDao() { - if (fileCountBySizeDao == null) { - fileCountBySizeDao = new FileCountBySizeDao(sqlConfiguration); - } - return fileCountBySizeDao; - } - /** - * Return the file counts from Recon DB. - * @return {@link Response} - */ - @GET - @Path("/fileCount") - public Response getFileCounts() { - fileCountBySizeDao = getDao(); - List resultSet = fileCountBySizeDao.findAll(); - return Response.ok(resultSet).build(); - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/package-info.java deleted file mode 100644 index 894e9d5d9f2..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * The classes in this package define api endpoints for Recon. - */ - -package org.apache.hadoop.ozone.recon.api; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java deleted file mode 100644 index be9ecbd1b37..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.api.types; - -/** - * Class to encapsulate the Key information needed for the Recon container DB. - * Currently, it is the containerId and the whole key + key version. - */ -public class ContainerKeyPrefix { - - private long containerId; - private String keyPrefix; - private long keyVersion = -1; - - public ContainerKeyPrefix(long containerId, String keyPrefix) { - this.containerId = containerId; - this.keyPrefix = keyPrefix; - } - - public ContainerKeyPrefix(long containerId, String keyPrefix, - long keyVersion) { - this.containerId = containerId; - this.keyPrefix = keyPrefix; - this.keyVersion = keyVersion; - } - - public ContainerKeyPrefix(long containerId) { - this.containerId = containerId; - } - - public long getContainerId() { - return containerId; - } - - public void setContainerId(long containerId) { - this.containerId = containerId; - } - - public String getKeyPrefix() { - return keyPrefix; - } - - public void setKeyPrefix(String keyPrefix) { - this.keyPrefix = keyPrefix; - } - - public long getKeyVersion() { - return keyVersion; - } - - public void setKeyVersion(long keyVersion) { - this.keyVersion = keyVersion; - } - - @Override - public boolean equals(Object o) { - - if (!(o instanceof ContainerKeyPrefix)) { - return false; - } - ContainerKeyPrefix that = (ContainerKeyPrefix) o; - return (this.containerId == that.containerId) && - this.keyPrefix.equals(that.keyPrefix) && - this.keyVersion == that.keyVersion; - } - - @Override - public int hashCode() { - return Long.valueOf(containerId).hashCode() + 13 * keyPrefix.hashCode() + - 17 * Long.valueOf(keyVersion).hashCode(); - } - -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerMetadata.java deleted file mode 100644 index 381f2ffc3ba..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerMetadata.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon.api.types; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; - -/** - * Metadata object that represents a Container. - */ -@XmlAccessorType(XmlAccessType.FIELD) -public class ContainerMetadata { - - @XmlElement(name = "ContainerID") - private long containerID; - - @XmlElement(name = "NumberOfKeys") - private long numberOfKeys; - - public ContainerMetadata(long containerID) { - this.containerID = containerID; - } - - public long getContainerID() { - return containerID; - } - - public void setContainerID(long containerID) { - this.containerID = containerID; - } - - public long getNumberOfKeys() { - return numberOfKeys; - } - - public void setNumberOfKeys(long numberOfKeys) { - this.numberOfKeys = numberOfKeys; - } - -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainersResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainersResponse.java deleted file mode 100644 index 2bad498d4cb..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainersResponse.java +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon.api.types; - -import com.fasterxml.jackson.annotation.JsonProperty; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; - -/** - * Class that represents the API Response structure of Containers. - */ -public class ContainersResponse { - /** - * Contains a map with total count of containers and list of containers. - */ - @JsonProperty("data") - private ContainersResponseData containersResponseData; - - public ContainersResponse() { - this(0, new ArrayList<>()); - } - - public ContainersResponse(long totalCount, - Collection containers) { - this.containersResponseData = - new ContainersResponseData(totalCount, containers); - } - - public String toJsonString() { - try { - return JsonUtils.toJsonString(this); - } catch (IOException ignored) { - return null; - } - } - - public ContainersResponseData getContainersResponseData() { - return containersResponseData; - } - - public void setContainersResponseData(ContainersResponseData - containersResponseData) { - this.containersResponseData = containersResponseData; - } - - /** - * Class that encapsulates the data presented in Containers API Response. - */ - public static class ContainersResponseData { - /** - * Total count of the containers. - */ - @JsonProperty("totalCount") - private long totalCount; - - /** - * An array of containers. - */ - @JsonProperty("containers") - private Collection containers; - - ContainersResponseData(long totalCount, - Collection containers) { - this.totalCount = totalCount; - this.containers = containers; - } - - public long getTotalCount() { - return totalCount; - } - - public Collection getContainers() { - return containers; - } - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/IsoDateAdapter.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/IsoDateAdapter.java deleted file mode 100644 index 7bcdbe19f5c..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/IsoDateAdapter.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon.api.types; - -import java.time.Instant; -import java.time.ZoneOffset; -import java.time.format.DateTimeFormatter; - -import javax.xml.bind.annotation.adapters.XmlAdapter; - -/** - * A converter to convert Instant to standard date string. - */ -public class IsoDateAdapter extends XmlAdapter { - - private DateTimeFormatter iso8861Formatter; - - public IsoDateAdapter() { - iso8861Formatter = - DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSX") - .withZone(ZoneOffset.UTC); - } - - @Override - public Instant unmarshal(String v) throws Exception { - throw new UnsupportedOperationException(); - } - - @Override - public String marshal(Instant v) throws Exception { - return iso8861Formatter.format(v); - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java deleted file mode 100644 index 3168263c49f..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyMetadata.java +++ /dev/null @@ -1,147 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon.api.types; - -import java.time.Instant; -import java.util.List; -import java.util.Map; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlRootElement; -import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; - -/** - * Metadata object represents one key in the object store. - */ -@XmlRootElement (name = "KeyMetadata") -@XmlAccessorType(XmlAccessType.FIELD) -public class KeyMetadata { - - @XmlElement(name = "Volume") - private String volume; - - @XmlElement(name = "Bucket") - private String bucket; - - @XmlElement(name = "Key") - private String key; - - @XmlElement(name = "DataSize") - private long dataSize; - - @XmlElement(name = "Versions") - private List versions; - - @XmlElement(name = "Blocks") - private Map> blockIds; - - @XmlJavaTypeAdapter(IsoDateAdapter.class) - @XmlElement(name = "CreationTime") - private Instant creationTime; - - @XmlJavaTypeAdapter(IsoDateAdapter.class) - @XmlElement(name = "ModificationTime") - private Instant modificationTime; - - public String getVolume() { - return volume; - } - - public void setVolume(String volume) { - this.volume = volume; - } - - public String getBucket() { - return bucket; - } - - public void setBucket(String bucket) { - this.bucket = bucket; - } - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public long getDataSize() { - return dataSize; - } - - public void setDataSize(long dataSize) { - this.dataSize = dataSize; - } - - public Instant getCreationTime() { - return creationTime; - } - - public void setCreationTime(Instant creationTime) { - this.creationTime = creationTime; - } - - public Instant getModificationTime() { - return modificationTime; - } - - public void setModificationTime(Instant modificationTime) { - this.modificationTime = modificationTime; - } - - public List getVersions() { - return versions; - } - - public void setVersions(List versions) { - this.versions = versions; - } - - public Map> getBlockIds() { - return blockIds; - } - - public void setBlockIds(Map> blockIds) { - this.blockIds = blockIds; - } - - /** - * Class to hold ContainerID and BlockID. - */ - public static class ContainerBlockMetadata { - private long containerID; - private long localID; - - public ContainerBlockMetadata(long containerID, long localID) { - this.containerID = containerID; - this.localID = localID; - } - - public long getContainerID() { - return containerID; - } - - public long getLocalID() { - return localID; - } - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java deleted file mode 100644 index f2704c5b8a2..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeysResponse.java +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon.api.types; - -import com.fasterxml.jackson.annotation.JsonProperty; -import org.apache.hadoop.ozone.web.utils.JsonUtils; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; - -/** - * Class that represents the API Response structure of Keys within a container. - */ -public class KeysResponse { - /** - * Contains a map with total count of keys inside the given container and a - * list of keys with metadata. - */ - @JsonProperty("data") - private KeysResponseData keysResponseData; - - public KeysResponse() { - this(0, new ArrayList<>()); - } - - public KeysResponse(long totalCount, - Collection keys) { - this.keysResponseData = - new KeysResponseData(totalCount, keys); - } - - public String toJsonString() { - try { - return JsonUtils.toJsonString(this); - } catch (IOException ignored) { - return null; - } - } - - public KeysResponseData getKeysResponseData() { - return keysResponseData; - } - - public void setKeysResponseData(KeysResponseData keysResponseData) { - this.keysResponseData = keysResponseData; - } - - /** - * Class that encapsulates the data presented in Keys API Response. - */ - public static class KeysResponseData { - /** - * Total count of the keys. - */ - @JsonProperty("totalCount") - private long totalCount; - - /** - * An array of keys. - */ - @JsonProperty("keys") - private Collection keys; - - KeysResponseData(long totalCount, Collection keys) { - this.totalCount = totalCount; - this.keys = keys; - } - - public long getTotalCount() { - return totalCount; - } - - public Collection getKeys() { - return keys; - } - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/package-info.java deleted file mode 100644 index cc2a7abfe57..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Common type definitions for Recon API. - */ -package org.apache.hadoop.ozone.recon.api.types; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/package-info.java deleted file mode 100644 index db27ffcecc2..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains application entry point and related classes for Recon. - */ -package org.apache.hadoop.ozone.recon; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DataSourceConfiguration.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DataSourceConfiguration.java deleted file mode 100644 index ec6995a6e10..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DataSourceConfiguration.java +++ /dev/null @@ -1,86 +0,0 @@ -package org.apache.hadoop.ozone.recon.persistence; - -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Common configuration needed to instantiate {@link javax.sql.DataSource}. - */ -public interface DataSourceConfiguration { - /** - * Get database driver class name available on the classpath. - */ - String getDriverClass(); - - /** - * Get Jdbc Url for the database server. - */ - String getJdbcUrl(); - - /** - * Get username for the db. - */ - String getUserName(); - - /** - * Get password for the db. - */ - String getPassword(); - - /** - * Should autocommit be turned on for the datasource. - */ - boolean setAutoCommit(); - - /** - * Sets the maximum time (in milliseconds) to wait before a call to - * getConnection is timed out. - */ - long getConnectionTimeout(); - - /** - * Get a string representation of {@link org.jooq.SQLDialect}. - */ - String getSqlDialect(); - - /** - * In a production database this should be set to something like 10. - * SQLite does not allow multiple connections, hence this defaults to 1. - */ - Integer getMaxActiveConnections(); - - /** - * Sets the maximum connection age (in seconds). - */ - Integer getMaxConnectionAge(); - - /** - * Sets the maximum idle connection age (in seconds). - */ - Integer getMaxIdleConnectionAge(); - - /** - * Statement specific to database, usually SELECT 1. - */ - String getConnectionTestStatement(); - - /** - * How often to test idle connections for being active (in seconds). - */ - Integer getIdleConnectionTestPeriod(); -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java deleted file mode 100644 index 7b28d004886..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java +++ /dev/null @@ -1,74 +0,0 @@ -package org.apache.hadoop.ozone.recon.persistence; - -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import javax.sql.DataSource; - -import org.apache.commons.lang3.StringUtils; -import org.sqlite.SQLiteDataSource; - -import com.google.inject.Inject; -import com.google.inject.Provider; -import com.jolbox.bonecp.BoneCPDataSource; - -/** - * Provide a {@link javax.sql.DataSource} for the application. - */ -public class DefaultDataSourceProvider implements Provider { - - @Inject - private DataSourceConfiguration configuration; - - /** - * Create a pooled datasource for the application. - * - * Default sqlite database does not work with a connection pool, actually - * most embedded databases do not, hence returning native implementation for - * default db. - */ - @Override - public DataSource get() { - if (StringUtils.contains(configuration.getJdbcUrl(), "sqlite")) { - SQLiteDataSource ds = new SQLiteDataSource(); - ds.setUrl(configuration.getJdbcUrl()); - return ds; - } - - BoneCPDataSource cpDataSource = new BoneCPDataSource(); - - cpDataSource.setDriverClass(configuration.getDriverClass()); - cpDataSource.setJdbcUrl(configuration.getJdbcUrl()); - cpDataSource.setUsername(configuration.getUserName()); - cpDataSource.setPassword(configuration.getPassword()); - cpDataSource.setDefaultAutoCommit(configuration.setAutoCommit()); - cpDataSource.setConnectionTimeoutInMs(configuration.getConnectionTimeout()); - cpDataSource.setMaxConnectionsPerPartition( - configuration.getMaxActiveConnections()); - cpDataSource.setMaxConnectionAgeInSeconds( - configuration.getMaxConnectionAge()); - cpDataSource.setIdleMaxAgeInSeconds( - configuration.getMaxIdleConnectionAge()); - cpDataSource.setIdleConnectionTestPeriodInSeconds( - configuration.getIdleConnectionTestPeriod()); - cpDataSource.setConnectionTestStatement( - configuration.getConnectionTestStatement()); - - return cpDataSource; - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/JooqPersistenceModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/JooqPersistenceModule.java deleted file mode 100644 index 2ba4cf7f939..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/JooqPersistenceModule.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon.persistence; - -import static com.google.inject.matcher.Matchers.annotatedWith; -import static com.google.inject.matcher.Matchers.any; - -import java.sql.Connection; -import javax.sql.DataSource; - -import org.jooq.Configuration; -import org.jooq.ConnectionProvider; -import org.jooq.SQLDialect; -import org.jooq.impl.DefaultConfiguration; -import org.springframework.dao.DataAccessException; -import org.springframework.jdbc.datasource.DataSourceTransactionManager; -import org.springframework.jdbc.datasource.DataSourceUtils; -import org.springframework.jdbc.datasource.TransactionAwareDataSourceProxy; -import org.springframework.transaction.annotation.Transactional; - -import com.google.inject.AbstractModule; -import com.google.inject.Provider; -import com.google.inject.Provides; -import com.google.inject.Singleton; - -/** - * Persistence module that provides binding for {@link DataSource} and - * a MethodInterceptor for nested transactions support. - */ -public class JooqPersistenceModule extends AbstractModule { - - private Provider configurationProvider; - public static final SQLDialect DEFAULT_DIALECT = SQLDialect.SQLITE; - - public JooqPersistenceModule( - Provider configurationProvider) { - this.configurationProvider = configurationProvider; - } - - @Override - protected void configure() { - bind(DataSource.class).toProvider(DefaultDataSourceProvider.class) - .in(Singleton.class); - - TransactionalMethodInterceptor interceptor = - new TransactionalMethodInterceptor( - getProvider(DataSourceTransactionManager.class)); - - bindInterceptor(annotatedWith(Transactional.class), any(), interceptor); - bindInterceptor(any(), annotatedWith(Transactional.class), interceptor); - } - - @Provides - @Singleton - Configuration getConfiguration(DefaultDataSourceProvider provider) { - DataSource dataSource = provider.get(); - - return new DefaultConfiguration() - .set(dataSource) - .set(new SpringConnectionProvider(dataSource)) - .set(SQLDialect.valueOf(configurationProvider.get().getSqlDialect())); - } - - @Provides - @Singleton - DataSourceTransactionManager provideDataSourceTransactionManager( - DataSource dataSource) { - return new DataSourceTransactionManager( - new TransactionAwareDataSourceProxy(dataSource)); - } - - /** - * This connection provider uses Spring to extract the - * {@link TransactionAwareDataSourceProxy} from our BoneCP pooled connection - * {@link DataSource}. - */ - static class SpringConnectionProvider implements ConnectionProvider { - - private final DataSource dataSource; - - SpringConnectionProvider(DataSource dataSource) { - this.dataSource = dataSource; - } - - @Override - public Connection acquire() throws DataAccessException { - return DataSourceUtils.getConnection(dataSource); - } - - @Override - public void release(Connection connection) throws DataAccessException { - DataSourceUtils.releaseConnection(connection, dataSource); - } - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/TransactionalMethodInterceptor.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/TransactionalMethodInterceptor.java deleted file mode 100644 index 4479ddd979a..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/TransactionalMethodInterceptor.java +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon.persistence; - -import org.aopalliance.intercept.MethodInterceptor; -import org.aopalliance.intercept.MethodInvocation; -import org.springframework.jdbc.datasource.DataSourceTransactionManager; -import org.springframework.transaction.TransactionStatus; -import org.springframework.transaction.UnexpectedRollbackException; -import org.springframework.transaction.support.DefaultTransactionDefinition; - -import com.google.inject.Provider; - -/** - * A {@link MethodInterceptor} that implements nested transactions. - *

- * Only the outermost transactional method will commit() or - * rollback() the contextual transaction. This can be verified - * through {@link TransactionStatus#isNewTransaction()}, which returns - * true only for the outermost transactional method call. - *

- */ -public class TransactionalMethodInterceptor implements MethodInterceptor { - - private Provider transactionManagerProvider; - - TransactionalMethodInterceptor( - Provider transactionManagerProvider) { - this.transactionManagerProvider = transactionManagerProvider; - } - - @Override - public Object invoke(MethodInvocation invocation) throws Throwable { - DataSourceTransactionManager transactionManager = - transactionManagerProvider.get(); - - DefaultTransactionDefinition transactionDefinition = - new DefaultTransactionDefinition(); - TransactionStatus transaction = transactionManager.getTransaction( - transactionDefinition); - - try { - Object result = invocation.proceed(); - - try { - if (transaction.isNewTransaction()) { - transactionManager.commit(transaction); - } - } catch (UnexpectedRollbackException ignore) { - } - - return result; - } catch (Exception e) { - if (transaction.isNewTransaction()) { - transactionManager.rollback(transaction); - } - - throw e; - } - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/package-info.java deleted file mode 100644 index 0ba0fa47ca4..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package defines the persistence interfaces for Recon SQL DB. - */ -package org.apache.hadoop.ozone.recon.persistence; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java deleted file mode 100644 index fcfcaa5ec1a..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.recovery; - -import java.io.File; -import java.io.IOException; - -import org.apache.hadoop.ozone.om.OMMetadataManager; - -/** - * Interface for the OM Metadata Manager + DB store maintained by - * Recon. - */ -public interface ReconOMMetadataManager extends OMMetadataManager { - - /** - * Refresh the DB instance to point to a new location. Get rid of the old - * DB instance. - * @param dbLocation New location of the OM Snapshot DB. - */ - void updateOmDB(File dbLocation) throws IOException; -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java deleted file mode 100644 index 3d55c999c90..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.recovery; - -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_SNAPSHOT_DB; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; - -import java.io.File; -import java.io.IOException; - -import javax.inject.Inject; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; -import org.apache.hadoop.ozone.recon.ReconUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Recon's implementation of the OM Metadata manager. By extending and - * relying on the OmMetadataManagerImpl, we can make sure all changes made to - * schema in OM will be automatically picked up by Recon. - */ -public class ReconOmMetadataManagerImpl extends OmMetadataManagerImpl - implements ReconOMMetadataManager { - - private static final Logger LOG = - LoggerFactory.getLogger(ReconOmMetadataManagerImpl.class); - - private OzoneConfiguration ozoneConfiguration; - private ReconUtils reconUtils; - - @Inject - public ReconOmMetadataManagerImpl(OzoneConfiguration configuration, - ReconUtils reconUtils) { - this.reconUtils = reconUtils; - this.ozoneConfiguration = configuration; - } - - @Override - public void start(OzoneConfiguration configuration) throws IOException { - LOG.info("Starting ReconOMMetadataManagerImpl"); - File reconDbDir = - reconUtils.getReconDbDir(configuration, OZONE_RECON_OM_SNAPSHOT_DB_DIR); - File lastKnownOMSnapshot = - reconUtils.getLastKnownDB(reconDbDir, RECON_OM_SNAPSHOT_DB); - if (lastKnownOMSnapshot != null) { - LOG.info("Last known snapshot for OM : {}", - lastKnownOMSnapshot.getAbsolutePath()); - initializeNewRdbStore(lastKnownOMSnapshot); - } - } - - /** - * Replace existing DB instance with new one. - * - * @param dbFile new DB file location. - */ - private void initializeNewRdbStore(File dbFile) throws IOException { - try { - DBStoreBuilder dbStoreBuilder = - DBStoreBuilder.newBuilder(ozoneConfiguration) - .setName(dbFile.getName()) - .setPath(dbFile.toPath().getParent()); - addOMTablesAndCodecs(dbStoreBuilder); - DBStore newStore = dbStoreBuilder.build(); - setStore(newStore); - LOG.info("Created OM DB snapshot at {}.", - dbFile.getAbsolutePath()); - } catch (IOException ioEx) { - LOG.error("Unable to initialize Recon OM DB snapshot store.", - ioEx); - } - if (getStore() != null) { - initializeOmTables(); - } - } - - @Override - public void updateOmDB(File newDbLocation) throws IOException { - if (getStore() != null) { - File oldDBLocation = getStore().getDbLocation(); - if (oldDBLocation.exists()) { - LOG.info("Cleaning up old OM snapshot db at {}.", - oldDBLocation.getAbsolutePath()); - FileUtils.deleteDirectory(oldDBLocation); - } - } - initializeNewRdbStore(newDbLocation); - } - -} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/package-info.java deleted file mode 100644 index 5c00ee9ef4b..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * The classes in this package handle OM snapshot recovery and checkpoints. - */ -package org.apache.hadoop.ozone.recon.recovery; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java deleted file mode 100644 index 3da35cce9fb..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java +++ /dev/null @@ -1,159 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.spi; - -import java.io.IOException; -import java.util.Map; - -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; -import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; -import org.apache.hadoop.hdds.utils.db.TableIterator; - -/** - * The Recon Container DB Service interface. - */ -@InterfaceStability.Unstable -public interface ContainerDBServiceProvider { - - /** - * Create new container DB and bulk Store the container to Key prefix - * mapping. - * @param containerKeyPrefixCounts Map of containerId, key-prefix tuple to - * key count. - */ - void initNewContainerDB(Map - containerKeyPrefixCounts) - throws IOException; - - /** - * Store the container to Key prefix mapping into the Recon Container DB. - * - * @param containerKeyPrefix the containerId, key-prefix tuple. - * @param count Count of Keys with that prefix. - */ - void storeContainerKeyMapping(ContainerKeyPrefix containerKeyPrefix, - Integer count) throws IOException; - - /** - * Store the containerID -> no. of keys count into the container DB store. - * - * @param containerID the containerID. - * @param count count of the keys within the given containerID. - * @throws IOException - */ - void storeContainerKeyCount(Long containerID, Long count) throws IOException; - - /** - * Store the total count of containers into the container DB store. - * - * @param count count of the containers present in the system. - */ - void storeContainerCount(Long count); - - /** - * Get the stored key prefix count for the given containerID, key prefix. - * - * @param containerKeyPrefix the containerID, key-prefix tuple. - * @return count of keys with that prefix. - */ - Integer getCountForContainerKeyPrefix( - ContainerKeyPrefix containerKeyPrefix) throws IOException; - - /** - * Get the total count of keys within the given containerID. - * - * @param containerID the given containerId. - * @return count of keys within the given containerID. - * @throws IOException - */ - long getKeyCountForContainer(Long containerID) throws IOException; - - /** - * Get if a containerID exists or not. - * - * @param containerID the given containerID. - * @return if the given ContainerID exists or not. - * @throws IOException - */ - boolean doesContainerExists(Long containerID) throws IOException; - - /** - * Get the stored key prefixes for the given containerId. - * - * @param containerId the given containerId. - * @return Map of Key prefix -> count. - */ - Map getKeyPrefixesForContainer( - long containerId) throws IOException; - - /** - * Get the stored key prefixes for the given containerId starting - * after the given keyPrefix. - * - * @param containerId the given containerId. - * @param prevKeyPrefix the key prefix to seek to and start scanning. - * @return Map of Key prefix -> count. - */ - Map getKeyPrefixesForContainer( - long containerId, String prevKeyPrefix) throws IOException; - - /** - * Get a Map of containerID, containerMetadata of Containers only for the - * given limit. If the limit is -1 or any integer <0, then return all - * the containers without any limit. - * - * @param limit the no. of containers to fetch. - * @param prevContainer containerID after which the results are returned. - * @return Map of containerID -> containerMetadata. - * @throws IOException - */ - Map getContainers(int limit, long prevContainer) - throws IOException; - - /** - * Delete an entry in the container DB. - * - * @param containerKeyPrefix container key prefix to be deleted. - * @throws IOException exception. - */ - void deleteContainerMapping(ContainerKeyPrefix containerKeyPrefix) - throws IOException; - - /** - * Get iterator to the entire container DB. - * @return TableIterator - */ - TableIterator getContainerTableIterator() throws IOException; - - /** - * Get the total count of containers present in the system. - * - * @return total count of containers. - * @throws IOException - */ - long getCountForContainers() throws IOException; - - /** - * Increment the total count for containers in the system by the given count. - * - * @param count no. of new containers to add to containers total count. - */ - void incrementContainerCountBy(long count); -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/HddsDatanodeServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/HddsDatanodeServiceProvider.java deleted file mode 100644 index ce7d4143820..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/HddsDatanodeServiceProvider.java +++ /dev/null @@ -1,25 +0,0 @@ -package org.apache.hadoop.ozone.recon.spi; - -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Interface to access datanode endpoints. - */ -public interface HddsDatanodeServiceProvider { -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java deleted file mode 100644 index 3f57af6f5d4..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java +++ /dev/null @@ -1,43 +0,0 @@ -package org.apache.hadoop.ozone.recon.spi; - -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import org.apache.hadoop.ozone.om.OMMetadataManager; - -/** - * Interface to access OM endpoints. - */ -public interface OzoneManagerServiceProvider { - - /** - * Start a task to sync data from OM. - */ - void start(); - - /** - * Stop the OM sync data. - */ - void stop(); - - /** - * Return instance of OM Metadata manager. - * @return OM metadata manager instance. - */ - OMMetadataManager getOMMetadataManagerInstance(); -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/StorageContainerServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/StorageContainerServiceProvider.java deleted file mode 100644 index db052a7be57..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/StorageContainerServiceProvider.java +++ /dev/null @@ -1,25 +0,0 @@ -package org.apache.hadoop.ozone.recon.spi; - -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Interface to access SCM endpoints. - */ -public interface StorageContainerServiceProvider { -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java deleted file mode 100644 index 85edb7e94d0..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java +++ /dev/null @@ -1,402 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.spi.impl; - -import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_COUNT_KEY; -import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_COUNT_TABLE; -import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_TABLE; -import static org.jooq.impl.DSL.currentTimestamp; -import static org.jooq.impl.DSL.select; -import static org.jooq.impl.DSL.using; - -import java.io.File; -import java.io.IOException; -import java.sql.Timestamp; -import java.util.LinkedHashMap; -import java.util.Map; - -import javax.inject.Inject; -import javax.inject.Singleton; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.recon.ReconUtils; -import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; -import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; -import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.Table.KeyValue; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; -import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; -import org.jooq.Configuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Implementation of the Recon Container DB Service. - */ -@Singleton -public class ContainerDBServiceProviderImpl - implements ContainerDBServiceProvider { - - private static final Logger LOG = - LoggerFactory.getLogger(ContainerDBServiceProviderImpl.class); - - private Table containerKeyTable; - private Table containerKeyCountTable; - private GlobalStatsDao globalStatsDao; - - @Inject - private OzoneConfiguration configuration; - - @Inject - private DBStore containerDbStore; - - @Inject - private Configuration sqlConfiguration; - - @Inject - private ReconUtils reconUtils; - - @Inject - public ContainerDBServiceProviderImpl(DBStore dbStore, - Configuration sqlConfiguration) { - globalStatsDao = new GlobalStatsDao(sqlConfiguration); - initializeTables(dbStore); - } - - /** - * Initialize a new container DB instance, getting rid of the old instance - * and then storing the passed in container prefix counts into the created - * DB instance. Also, truncate or reset the SQL tables as required. - * @param containerKeyPrefixCounts Map of container key-prefix to - * number of keys with the prefix. - * @throws IOException - */ - @Override - public void initNewContainerDB(Map - containerKeyPrefixCounts) - throws IOException { - - File oldDBLocation = containerDbStore.getDbLocation(); - containerDbStore = ReconContainerDBProvider - .getNewDBStore(configuration, reconUtils); - LOG.info("Creating new Recon Container DB at {}", - containerDbStore.getDbLocation().getAbsolutePath()); - initializeTables(containerDbStore); - - if (oldDBLocation.exists()) { - LOG.info("Cleaning up old Recon Container DB at {}.", - oldDBLocation.getAbsolutePath()); - FileUtils.deleteDirectory(oldDBLocation); - } - - if (containerKeyPrefixCounts != null) { - for (Map.Entry entry : - containerKeyPrefixCounts.entrySet()) { - containerKeyTable.put(entry.getKey(), entry.getValue()); - } - } - - // reset total count of containers to zero - storeContainerCount(0L); - } - - /** - * Initialize the container DB tables. - * @param dbStore - */ - private void initializeTables(DBStore dbStore) { - try { - this.containerKeyTable = dbStore.getTable(CONTAINER_KEY_TABLE, - ContainerKeyPrefix.class, Integer.class); - this.containerKeyCountTable = dbStore.getTable(CONTAINER_KEY_COUNT_TABLE, - Long.class, Long.class); - } catch (IOException e) { - LOG.error("Unable to create Container Key tables." + e); - } - } - /** - * Concatenate the containerID and Key Prefix using a delimiter and store the - * count into the container DB store. - * - * @param containerKeyPrefix the containerID, key-prefix tuple. - * @param count Count of the keys matching that prefix. - * @throws IOException - */ - @Override - public void storeContainerKeyMapping(ContainerKeyPrefix containerKeyPrefix, - Integer count) - throws IOException { - containerKeyTable.put(containerKeyPrefix, count); - } - - /** - * Store the containerID -> no. of keys count into the container DB store. - * - * @param containerID the containerID. - * @param count count of the keys within the given containerID. - * @throws IOException - */ - @Override - public void storeContainerKeyCount(Long containerID, Long count) - throws IOException { - containerKeyCountTable.put(containerID, count); - } - - /** - * Get the total count of keys within the given containerID. - * - * @param containerID the given containerID. - * @return count of keys within the given containerID. - * @throws IOException - */ - @Override - public long getKeyCountForContainer(Long containerID) throws IOException { - Long keyCount = containerKeyCountTable.get(containerID); - return keyCount == null ? 0L : keyCount; - } - - /** - * Get if a containerID exists or not. - * - * @param containerID the given containerID. - * @return if the given ContainerID exists or not. - * @throws IOException - */ - @Override - public boolean doesContainerExists(Long containerID) throws IOException { - return containerKeyCountTable.get(containerID) != null; - } - - /** - * Put together the key from the passed in object and get the count from - * the container DB store. - * - * @param containerKeyPrefix the containerID, key-prefix tuple. - * @return count of keys matching the containerID, key-prefix. - * @throws IOException - */ - @Override - public Integer getCountForContainerKeyPrefix( - ContainerKeyPrefix containerKeyPrefix) throws IOException { - Integer count = containerKeyTable.get(containerKeyPrefix); - return count == null ? Integer.valueOf(0) : count; - } - - /** - * Get key prefixes for the given container ID. - * - * @param containerId the given containerID. - * @return Map of (Key-Prefix,Count of Keys). - */ - @Override - public Map getKeyPrefixesForContainer( - long containerId) throws IOException { - // set the default startKeyPrefix to empty string - return getKeyPrefixesForContainer(containerId, StringUtils.EMPTY); - } - - /** - * Use the DB's prefix seek iterator to start the scan from the given - * container ID and prev key prefix. The prev key prefix is skipped from - * the result. - * - * @param containerId the given containerId. - * @param prevKeyPrefix the given key prefix to start the scan from. - * @return Map of (Key-Prefix,Count of Keys). - */ - @Override - public Map getKeyPrefixesForContainer( - long containerId, String prevKeyPrefix) throws IOException { - - Map prefixes = new LinkedHashMap<>(); - TableIterator> containerIterator = containerKeyTable.iterator(); - ContainerKeyPrefix seekKey; - boolean skipPrevKey = false; - if (StringUtils.isNotBlank(prevKeyPrefix)) { - skipPrevKey = true; - seekKey = new ContainerKeyPrefix(containerId, prevKeyPrefix); - } else { - seekKey = new ContainerKeyPrefix(containerId); - } - KeyValue seekKeyValue = - containerIterator.seek(seekKey); - - // check if RocksDB was able to seek correctly to the given key prefix - // if not, then return empty result - // In case of an empty prevKeyPrefix, all the keys in the container are - // returned - if (seekKeyValue == null || - (StringUtils.isNotBlank(prevKeyPrefix) && - !seekKeyValue.getKey().getKeyPrefix().equals(prevKeyPrefix))) { - return prefixes; - } - - while (containerIterator.hasNext()) { - KeyValue keyValue = containerIterator.next(); - ContainerKeyPrefix containerKeyPrefix = keyValue.getKey(); - - // skip the prev key if prev key is present - if (skipPrevKey && - containerKeyPrefix.getKeyPrefix().equals(prevKeyPrefix)) { - continue; - } - - // The prefix seek only guarantees that the iterator's head will be - // positioned at the first prefix match. We still have to check the key - // prefix. - if (containerKeyPrefix.getContainerId() == containerId) { - if (StringUtils.isNotEmpty(containerKeyPrefix.getKeyPrefix())) { - prefixes.put(new ContainerKeyPrefix(containerId, - containerKeyPrefix.getKeyPrefix(), - containerKeyPrefix.getKeyVersion()), - keyValue.getValue()); - } else { - LOG.warn("Null key prefix returned for containerId = " + containerId); - } - } else { - break; //Break when the first mismatch occurs. - } - } - return prefixes; - } - - /** - * Iterate the DB to construct a Map of containerID -> containerMetadata - * only for the given limit from the given start key. The start containerID - * is skipped from the result. - * - * Return all the containers if limit < 0. - * - * @param limit No of containers to get. - * @param prevContainer containerID after which the - * list of containers are scanned. - * @return Map of containerID -> containerMetadata. - * @throws IOException - */ - @Override - public Map getContainers(int limit, - long prevContainer) - throws IOException { - Map containers = new LinkedHashMap<>(); - TableIterator> containerIterator = containerKeyTable.iterator(); - ContainerKeyPrefix seekKey; - if (prevContainer > 0L) { - seekKey = new ContainerKeyPrefix(prevContainer); - KeyValue seekKeyValue = containerIterator.seek(seekKey); - // Check if RocksDB was able to correctly seek to the given - // prevContainer containerId. If not, then return empty result - if (seekKeyValue != null && - seekKeyValue.getKey().getContainerId() != prevContainer) { - return containers; - } else { - // seek to the prevContainer+1 containerID to start scan - seekKey = new ContainerKeyPrefix(prevContainer + 1); - containerIterator.seek(seekKey); - } - } - while (containerIterator.hasNext()) { - KeyValue keyValue = containerIterator.next(); - ContainerKeyPrefix containerKeyPrefix = keyValue.getKey(); - Long containerID = containerKeyPrefix.getContainerId(); - Integer numberOfKeys = keyValue.getValue(); - - // break the loop if limit has been reached - // and one more new entity needs to be added to the containers map - if (containers.size() == limit && !containers.containsKey(containerID)) { - break; - } - - // initialize containerMetadata with 0 as number of keys. - containers.computeIfAbsent(containerID, ContainerMetadata::new); - // increment number of keys for the containerID - ContainerMetadata containerMetadata = containers.get(containerID); - containerMetadata.setNumberOfKeys(containerMetadata.getNumberOfKeys() + - numberOfKeys); - containers.put(containerID, containerMetadata); - } - return containers; - } - - @Override - public void deleteContainerMapping(ContainerKeyPrefix containerKeyPrefix) - throws IOException { - containerKeyTable.delete(containerKeyPrefix); - } - - /** - * Get total count of containers. - * - * @return total count of containers. - */ - @Override - public long getCountForContainers() { - GlobalStats containerCountRecord = - globalStatsDao.fetchOneByKey(CONTAINER_COUNT_KEY); - - return (containerCountRecord == null) ? 0L : - containerCountRecord.getValue(); - } - - @Override - public TableIterator getContainerTableIterator() { - return containerKeyTable.iterator(); - } - - /** - * Store the total count of containers into the container DB store. - * - * @param count count of the containers present in the system. - */ - @Override - public void storeContainerCount(Long count) { - // Get the current timestamp - Timestamp now = - using(sqlConfiguration).fetchValue(select(currentTimestamp())); - GlobalStats containerCountRecord = - globalStatsDao.fetchOneByKey(CONTAINER_COUNT_KEY); - GlobalStats globalStatsRecord = - new GlobalStats(CONTAINER_COUNT_KEY, count, now); - - // Insert a new record for CONTAINER_COUNT_KEY if it does not exist - if (containerCountRecord == null) { - globalStatsDao.insert(globalStatsRecord); - } else { - globalStatsDao.update(globalStatsRecord); - } - } - - /** - * Increment the total count for containers in the system by the given count. - * - * @param count no. of new containers to add to containers total count. - */ - @Override - public void incrementContainerCountBy(long count) { - long containersCount = getCountForContainers(); - storeContainerCount(containersCount + count); - } -} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java deleted file mode 100644 index e35f90056e8..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java +++ /dev/null @@ -1,87 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.spi.impl; - -import static org.apache.commons.compress.utils.CharsetNames.UTF_8; - -import java.io.IOException; -import java.nio.ByteBuffer; - -import org.apache.commons.lang3.ArrayUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; -import org.apache.hadoop.hdds.utils.db.Codec; - -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; - -/** - * Codec to encode ContainerKeyPrefix as byte array. - */ -public class ContainerKeyPrefixCodec implements Codec{ - - private final static String KEY_DELIMITER = "_"; - - @Override - public byte[] toPersistedFormat(ContainerKeyPrefix containerKeyPrefix) - throws IOException { - Preconditions.checkNotNull(containerKeyPrefix, - "Null object can't be converted to byte array."); - byte[] containerIdBytes = Longs.toByteArray(containerKeyPrefix - .getContainerId()); - - //Prefix seek can be done only with containerId. In that case, we can - // expect the key and version to be undefined. - if (StringUtils.isNotEmpty(containerKeyPrefix.getKeyPrefix())) { - byte[] keyPrefixBytes = (KEY_DELIMITER + - containerKeyPrefix.getKeyPrefix()).getBytes(UTF_8); - containerIdBytes = ArrayUtils.addAll(containerIdBytes, keyPrefixBytes); - } - - if (containerKeyPrefix.getKeyVersion() != -1) { - containerIdBytes = ArrayUtils.addAll(containerIdBytes, KEY_DELIMITER - .getBytes(UTF_8)); - containerIdBytes = ArrayUtils.addAll(containerIdBytes, Longs.toByteArray( - containerKeyPrefix.getKeyVersion())); - } - return containerIdBytes; - } - - @Override - public ContainerKeyPrefix fromPersistedFormat(byte[] rawData) - throws IOException { - - // First 8 bytes is the containerId. - long containerIdFromDB = ByteBuffer.wrap(ArrayUtils.subarray( - rawData, 0, Long.BYTES)).getLong(); - // When reading from byte[], we can always expect to have the containerId, - // key and version parts in the byte array. - byte[] keyBytes = ArrayUtils.subarray(rawData, - Long.BYTES + 1, - rawData.length - Long.BYTES - 1); - String keyPrefix = new String(keyBytes, UTF_8); - - // Last 8 bytes is the key version. - byte[] versionBytes = ArrayUtils.subarray(rawData, - rawData.length - Long.BYTES, - rawData.length); - long version = ByteBuffer.wrap(versionBytes).getLong(); - return new ContainerKeyPrefix(containerIdFromDB, keyPrefix, version); - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java deleted file mode 100644 index 789b30168c8..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java +++ /dev/null @@ -1,362 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.spi.impl; - -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_SNAPSHOT_DB; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_CONNECTION_REQUEST_TIMEOUT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_CONNECTION_REQUEST_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_CONNECTION_TIMEOUT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_CONNECTION_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_FLUSH_PARAM; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INTERVAL; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SOCKET_TIMEOUT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_SOCKET_TIMEOUT_DEFAULT; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import javax.inject.Inject; -import javax.inject.Singleton; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.http.HttpConfig; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest; -import org.apache.hadoop.ozone.recon.ReconUtils; -import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider; -import org.apache.hadoop.ozone.recon.tasks.OMDBUpdatesHandler; -import org.apache.hadoop.ozone.recon.tasks.OMUpdateEventBatch; -import org.apache.hadoop.ozone.recon.tasks.ReconTaskController; -import org.apache.hadoop.hdds.utils.db.DBCheckpoint; -import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper; -import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; -import org.apache.hadoop.hdds.utils.db.RDBStore; -import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.ratis.protocol.ClientId; -import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; -import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; -import org.rocksdb.WriteBatch; -import org.rocksdb.WriteOptions; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.annotations.VisibleForTesting; - -/** - * Implementation of the OzoneManager Service provider. - */ -@Singleton -public class OzoneManagerServiceProviderImpl - implements OzoneManagerServiceProvider { - - private static final Logger LOG = - LoggerFactory.getLogger(OzoneManagerServiceProviderImpl.class); - - private final CloseableHttpClient httpClient; - private File omSnapshotDBParentDir = null; - private String omDBSnapshotUrl; - - private OzoneManagerProtocol ozoneManagerClient; - private final ClientId clientId = ClientId.randomId(); - private final OzoneConfiguration configuration; - private final ScheduledExecutorService scheduler = - Executors.newScheduledThreadPool(1); - - private ReconOMMetadataManager omMetadataManager; - private ReconTaskController reconTaskController; - private ReconTaskStatusDao reconTaskStatusDao; - private ReconUtils reconUtils; - private enum OmSnapshotTaskName { - OM_DB_FULL_SNAPSHOT, - OM_DB_DELTA_UPDATES - } - - @Inject - public OzoneManagerServiceProviderImpl( - OzoneConfiguration configuration, - ReconOMMetadataManager omMetadataManager, - ReconTaskController reconTaskController, - ReconUtils reconUtils, - OzoneManagerProtocol ozoneManagerClient) throws IOException { - - String ozoneManagerHttpAddress = configuration.get(OMConfigKeys - .OZONE_OM_HTTP_ADDRESS_KEY); - - String ozoneManagerHttpsAddress = configuration.get(OMConfigKeys - .OZONE_OM_HTTPS_ADDRESS_KEY); - - omSnapshotDBParentDir = reconUtils.getReconDbDir(configuration, - OZONE_RECON_OM_SNAPSHOT_DB_DIR); - - HttpConfig.Policy policy = DFSUtil.getHttpPolicy(configuration); - - int socketTimeout = (int) configuration.getTimeDuration( - RECON_OM_SOCKET_TIMEOUT, RECON_OM_SOCKET_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - int connectionTimeout = (int) configuration.getTimeDuration( - RECON_OM_CONNECTION_TIMEOUT, - RECON_OM_CONNECTION_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - int connectionRequestTimeout = (int)configuration.getTimeDuration( - RECON_OM_CONNECTION_REQUEST_TIMEOUT, - RECON_OM_CONNECTION_REQUEST_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - - RequestConfig config = RequestConfig.custom() - .setConnectTimeout(socketTimeout) - .setConnectionRequestTimeout(connectionTimeout) - .setSocketTimeout(connectionRequestTimeout).build(); - - httpClient = HttpClientBuilder - .create() - .setDefaultRequestConfig(config) - .build(); - - omDBSnapshotUrl = "http://" + ozoneManagerHttpAddress + - OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; - - if (policy.isHttpsEnabled()) { - omDBSnapshotUrl = "https://" + ozoneManagerHttpsAddress + - OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; - } - - boolean flushParam = configuration.getBoolean( - RECON_OM_SNAPSHOT_TASK_FLUSH_PARAM, false); - - if (flushParam) { - omDBSnapshotUrl += "?" + OZONE_DB_CHECKPOINT_REQUEST_FLUSH + "=true"; - } - - this.reconUtils = reconUtils; - this.omMetadataManager = omMetadataManager; - this.reconTaskController = reconTaskController; - this.reconTaskStatusDao = reconTaskController.getReconTaskStatusDao(); - this.ozoneManagerClient = ozoneManagerClient; - this.configuration = configuration; - } - - @Override - public OMMetadataManager getOMMetadataManagerInstance() { - return omMetadataManager; - } - - @Override - public void start() { - try { - omMetadataManager.start(configuration); - } catch (IOException ioEx) { - LOG.error("Error staring Recon OM Metadata Manager.", ioEx); - } - long initialDelay = configuration.getTimeDuration( - RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY, - RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT, - TimeUnit.MILLISECONDS); - long interval = configuration.getTimeDuration( - RECON_OM_SNAPSHOT_TASK_INTERVAL, - RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT, - TimeUnit.MILLISECONDS); - scheduler.scheduleWithFixedDelay(this::syncDataFromOM, - initialDelay, - interval, - TimeUnit.MILLISECONDS); - } - - @Override - public void stop() { - reconTaskController.stop(); - scheduler.shutdownNow(); - } - - /** - * Method to obtain current OM DB Snapshot. - * @return DBCheckpoint instance. - */ - @VisibleForTesting - DBCheckpoint getOzoneManagerDBSnapshot() { - String snapshotFileName = RECON_OM_SNAPSHOT_DB + "_" + System - .currentTimeMillis(); - File targetFile = new File(omSnapshotDBParentDir, snapshotFileName + - ".tar.gz"); - try { - try (InputStream inputStream = reconUtils.makeHttpCall(httpClient, - omDBSnapshotUrl)) { - FileUtils.copyInputStreamToFile(inputStream, targetFile); - } - - // Untar the checkpoint file. - Path untarredDbDir = Paths.get(omSnapshotDBParentDir.getAbsolutePath(), - snapshotFileName); - reconUtils.untarCheckpointFile(targetFile, untarredDbDir); - FileUtils.deleteQuietly(targetFile); - - // TODO Create Checkpoint based on OM DB type. - // Currently, OM DB type is not configurable. Hence, defaulting to - // RocksDB. - return new RocksDBCheckpoint(untarredDbDir); - } catch (IOException e) { - LOG.error("Unable to obtain Ozone Manager DB Snapshot. ", e); - } - return null; - } - - /** - * Update Local OM DB with new OM DB snapshot. - * @throws IOException - */ - @VisibleForTesting - boolean updateReconOmDBWithNewSnapshot() throws IOException { - // Obtain the current DB snapshot from OM and - // update the in house OM metadata managed DB instance. - DBCheckpoint dbSnapshot = getOzoneManagerDBSnapshot(); - if (dbSnapshot != null && dbSnapshot.getCheckpointLocation() != null) { - LOG.info("Got new checkpoint from OM : " + - dbSnapshot.getCheckpointLocation()); - try { - omMetadataManager.updateOmDB(dbSnapshot.getCheckpointLocation() - .toFile()); - return true; - } catch (IOException e) { - LOG.error("Unable to refresh Recon OM DB Snapshot. ", e); - } - } else { - LOG.error("Null snapshot location got from OM."); - } - return false; - } - - /** - * Get Delta updates from OM through RPC call and apply to local OM DB as - * well as accumulate in a buffer. - * @param fromSequenceNumber from sequence number to request from. - * @param omdbUpdatesHandler OM DB updates handler to buffer updates. - * @throws IOException when OM RPC request fails. - * @throws RocksDBException when writing to RocksDB fails. - */ - @VisibleForTesting - void getAndApplyDeltaUpdatesFromOM( - long fromSequenceNumber, OMDBUpdatesHandler omdbUpdatesHandler) - throws IOException, RocksDBException { - DBUpdatesRequest dbUpdatesRequest = DBUpdatesRequest.newBuilder() - .setSequenceNumber(fromSequenceNumber).build(); - DBUpdatesWrapper dbUpdates = ozoneManagerClient.getDBUpdates( - dbUpdatesRequest); - if (null != dbUpdates) { - RDBStore rocksDBStore = (RDBStore)omMetadataManager.getStore(); - RocksDB rocksDB = rocksDBStore.getDb(); - LOG.debug("Number of updates received from OM : " + - dbUpdates.getData().size()); - for (byte[] data : dbUpdates.getData()) { - WriteBatch writeBatch = new WriteBatch(data); - writeBatch.iterate(omdbUpdatesHandler); - RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(writeBatch); - rdbBatchOperation.commit(rocksDB, new WriteOptions()); - } - } - } - - /** - * Based on current state of Recon's OM DB, we either get delta updates or - * full snapshot from Ozone Manager. - */ - @VisibleForTesting - void syncDataFromOM() { - LOG.info("Syncing data from Ozone Manager."); - long currentSequenceNumber = getCurrentOMDBSequenceNumber(); - boolean fullSnapshot = false; - - if (currentSequenceNumber <= 0) { - fullSnapshot = true; - } else { - OMDBUpdatesHandler omdbUpdatesHandler = - new OMDBUpdatesHandler(omMetadataManager); - try { - LOG.info("Obtaining delta updates from Ozone Manager"); - // Get updates from OM and apply to local Recon OM DB. - getAndApplyDeltaUpdatesFromOM(currentSequenceNumber, - omdbUpdatesHandler); - // Update timestamp of successful delta updates query. - ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus( - OmSnapshotTaskName.OM_DB_DELTA_UPDATES.name(), - System.currentTimeMillis(), getCurrentOMDBSequenceNumber()); - reconTaskStatusDao.update(reconTaskStatusRecord); - // Pass on DB update events to tasks that are listening. - reconTaskController.consumeOMEvents(new OMUpdateEventBatch( - omdbUpdatesHandler.getEvents()), omMetadataManager); - } catch (IOException | InterruptedException | RocksDBException e) { - LOG.warn("Unable to get and apply delta updates from OM.", e); - fullSnapshot = true; - } - } - - if (fullSnapshot) { - try { - LOG.info("Obtaining full snapshot from Ozone Manager"); - // Update local Recon OM DB to new snapshot. - boolean success = updateReconOmDBWithNewSnapshot(); - // Update timestamp of successful delta updates query. - if (success) { - ReconTaskStatus reconTaskStatusRecord = - new ReconTaskStatus( - OmSnapshotTaskName.OM_DB_FULL_SNAPSHOT.name(), - System.currentTimeMillis(), getCurrentOMDBSequenceNumber()); - reconTaskStatusDao.update(reconTaskStatusRecord); - // Reinitialize tasks that are listening. - LOG.info("Calling reprocess on Recon tasks."); - reconTaskController.reInitializeTasks(omMetadataManager); - } - } catch (IOException | InterruptedException e) { - LOG.error("Unable to update Recon's OM DB with new snapshot ", e); - } - } - } - - /** - * Get OM RocksDB's latest sequence number. - * @return latest sequence number. - */ - private long getCurrentOMDBSequenceNumber() { - RDBStore rocksDBStore = (RDBStore)omMetadataManager.getStore(); - if (null == rocksDBStore) { - return 0; - } else { - return rocksDBStore.getDb().getLatestSequenceNumber(); - } - } -} - diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java deleted file mode 100644 index 9c3e987f672..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ReconContainerDBProvider.java +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.spi.impl; - -import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_COUNT_TABLE; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_CONTAINER_DB; -import static org.apache.hadoop.ozone.recon.ReconConstants.CONTAINER_KEY_TABLE; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; - -import java.io.File; -import java.nio.file.Path; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.recon.ReconUtils; -import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; -import org.apache.hadoop.hdds.utils.db.IntegerCodec; -import org.apache.hadoop.hdds.utils.db.LongCodec; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.annotations.VisibleForTesting; -import com.google.inject.Inject; -import com.google.inject.Provider; -import com.google.inject.ProvisionException; - -/** - * Provider for the Recon container DB (Metadata store). - */ -public class ReconContainerDBProvider implements Provider { - - @VisibleForTesting - private static final Logger LOG = - LoggerFactory.getLogger(ReconContainerDBProvider.class); - - private OzoneConfiguration configuration; - private ReconUtils reconUtils; - - @Inject - public ReconContainerDBProvider(OzoneConfiguration configuration, - ReconUtils reconUtils) { - this.configuration = configuration; - this.reconUtils = reconUtils; - } - - @Override - public DBStore get() { - DBStore dbStore; - File reconDbDir = - reconUtils.getReconDbDir(configuration, OZONE_RECON_DB_DIR); - File lastKnownOMSnapshot = - reconUtils.getLastKnownDB(reconDbDir, RECON_CONTAINER_DB); - if (lastKnownOMSnapshot != null) { - dbStore = getDBStore(configuration, reconUtils, - lastKnownOMSnapshot.getName()); - } else { - dbStore = getNewDBStore(configuration, reconUtils); - } - if (dbStore == null) { - throw new ProvisionException("Unable to provide instance of DBStore " + - "store."); - } - return dbStore; - } - - private static DBStore getDBStore(OzoneConfiguration configuration, - ReconUtils reconUtils, String dbName) { - DBStore dbStore = null; - try { - Path metaDir = reconUtils.getReconDbDir( - configuration, OZONE_RECON_DB_DIR).toPath(); - dbStore = DBStoreBuilder.newBuilder(configuration) - .setPath(metaDir) - .setName(dbName) - .addTable(CONTAINER_KEY_TABLE) - .addTable(CONTAINER_KEY_COUNT_TABLE) - .addCodec(ContainerKeyPrefix.class, new ContainerKeyPrefixCodec()) - .addCodec(Long.class, new LongCodec()) - .addCodec(Integer.class, new IntegerCodec()) - .build(); - } catch (Exception ex) { - LOG.error("Unable to initialize Recon container metadata store.", ex); - } - return dbStore; - } - - static DBStore getNewDBStore(OzoneConfiguration configuration, - ReconUtils reconUtils) { - String dbName = RECON_CONTAINER_DB + "_" + System.currentTimeMillis(); - return getDBStore(configuration, reconUtils, dbName); - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java deleted file mode 100644 index 1ed44294d1e..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * The classes in this package define the Service Provider implementations for - * Recon. This provides connectivity to underlying Ozone subsystems. - */ -package org.apache.hadoop.ozone.recon.spi.impl; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/package-info.java deleted file mode 100644 index 24692facf0a..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * The classes in this package define the Service Provider interfaces for - * Recon. The implementations of Spi interfaces provide connectivity to - * underlying Ozone subsystems. - */ -package org.apache.hadoop.ozone.recon.spi; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java deleted file mode 100644 index eae17bd0699..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java +++ /dev/null @@ -1,235 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; - -import java.io.IOException; -import java.time.Duration; -import java.time.Instant; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.Set; - -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; -import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.inject.Inject; - -/** - * Class to iterate over the OM DB and populate the Recon container DB with - * the container -> Key reverse mapping. - */ -public class ContainerKeyMapperTask implements ReconDBUpdateTask { - - private static final Logger LOG = - LoggerFactory.getLogger(ContainerKeyMapperTask.class); - - private ContainerDBServiceProvider containerDBServiceProvider; - - @Inject - public ContainerKeyMapperTask(ContainerDBServiceProvider - containerDBServiceProvider) { - this.containerDBServiceProvider = containerDBServiceProvider; - } - - /** - * Read Key -> ContainerId data from OM snapshot DB and write reverse map - * (container, key) -> count to Recon Container DB. - */ - @Override - public Pair reprocess(OMMetadataManager omMetadataManager) { - long omKeyCount = 0; - try { - LOG.info("Starting a 'reprocess' run of ContainerKeyMapperTask."); - Instant start = Instant.now(); - - // initialize new container DB - containerDBServiceProvider.initNewContainerDB(new HashMap<>()); - - Table omKeyInfoTable = omMetadataManager.getKeyTable(); - try (TableIterator> - keyIter = omKeyInfoTable.iterator()) { - while (keyIter.hasNext()) { - Table.KeyValue kv = keyIter.next(); - OmKeyInfo omKeyInfo = kv.getValue(); - writeOMKeyToContainerDB(kv.getKey(), omKeyInfo); - omKeyCount++; - } - } - LOG.info("Completed 'reprocess' of ContainerKeyMapperTask."); - Instant end = Instant.now(); - long duration = Duration.between(start, end).toMillis(); - LOG.info("It took me " + (double) duration / 1000.0 + " seconds to " + - "process " + omKeyCount + " keys."); - } catch (IOException ioEx) { - LOG.error("Unable to populate Container Key Prefix data in Recon DB. ", - ioEx); - return new ImmutablePair<>(getTaskName(), false); - } - return new ImmutablePair<>(getTaskName(), true); - } - - @Override - public String getTaskName() { - return "ContainerKeyMapperTask"; - } - - @Override - public Collection getTaskTables() { - return Collections.singletonList(KEY_TABLE); - } - - @Override - public Pair process(OMUpdateEventBatch events) { - Iterator eventIterator = events.getIterator(); - int eventCount = 0; - while (eventIterator.hasNext()) { - OMDBUpdateEvent omdbUpdateEvent = eventIterator.next(); - String updatedKey = omdbUpdateEvent.getKey(); - OmKeyInfo updatedKeyValue = omdbUpdateEvent.getValue(); - try { - switch (omdbUpdateEvent.getAction()) { - case PUT: - writeOMKeyToContainerDB(updatedKey, updatedKeyValue); - break; - - case DELETE: - deleteOMKeyFromContainerDB(updatedKey); - break; - - default: LOG.debug("Skipping DB update event : " + omdbUpdateEvent - .getAction()); - } - eventCount++; - } catch (IOException e) { - LOG.error("Unexpected exception while updating key data : {} ", - updatedKey, e); - return new ImmutablePair<>(getTaskName(), false); - } - } - LOG.info("{} successfully processed {} OM DB update event(s).", - getTaskName(), eventCount); - return new ImmutablePair<>(getTaskName(), true); - } - - /** - * Delete an OM Key from Container DB and update containerID -> no. of keys - * count. - * - * @param key key String. - * @throws IOException If Unable to write to container DB. - */ - private void deleteOMKeyFromContainerDB(String key) - throws IOException { - - TableIterator> containerIterator = - containerDBServiceProvider.getContainerTableIterator(); - - Set keysToBeDeleted = new HashSet<>(); - - while (containerIterator.hasNext()) { - Table.KeyValue keyValue = - containerIterator.next(); - String keyPrefix = keyValue.getKey().getKeyPrefix(); - if (keyPrefix.equals(key)) { - keysToBeDeleted.add(keyValue.getKey()); - } - } - - for (ContainerKeyPrefix containerKeyPrefix : keysToBeDeleted) { - containerDBServiceProvider.deleteContainerMapping(containerKeyPrefix); - - // decrement count and update containerKeyCount. - Long containerID = containerKeyPrefix.getContainerId(); - long keyCount = - containerDBServiceProvider.getKeyCountForContainer(containerID); - if (keyCount > 0) { - containerDBServiceProvider.storeContainerKeyCount(containerID, - --keyCount); - } - } - } - - /** - * Write an OM key to container DB and update containerID -> no. of keys - * count. - * - * @param key key String - * @param omKeyInfo omKeyInfo value - * @throws IOException if unable to write to recon DB. - */ - private void writeOMKeyToContainerDB(String key, OmKeyInfo omKeyInfo) - throws IOException { - long containerCountToIncrement = 0; - for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : omKeyInfo - .getKeyLocationVersions()) { - long keyVersion = omKeyLocationInfoGroup.getVersion(); - for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfoGroup - .getLocationList()) { - long containerId = omKeyLocationInfo.getContainerID(); - ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix( - containerId, key, keyVersion); - if (containerDBServiceProvider.getCountForContainerKeyPrefix( - containerKeyPrefix) == 0) { - // Save on writes. No need to save same container-key prefix - // mapping again. - containerDBServiceProvider.storeContainerKeyMapping( - containerKeyPrefix, 1); - - // check if container already exists and - // increment the count of containers if it does not exist - if (!containerDBServiceProvider.doesContainerExists(containerId)) { - containerCountToIncrement++; - } - - // update the count of keys for the given containerID - long keyCount = - containerDBServiceProvider.getKeyCountForContainer(containerId); - - // increment the count and update containerKeyCount. - // keyCount will be 0 if containerID is not found. So, there is no - // need to initialize keyCount for the first time. - containerDBServiceProvider.storeContainerKeyCount(containerId, - ++keyCount); - } - } - } - - if (containerCountToIncrement > 0) { - containerDBServiceProvider - .incrementContainerCountBy(containerCountToIncrement); - } - } - -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java deleted file mode 100644 index 3874ddac619..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java +++ /dev/null @@ -1,251 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import com.google.inject.Inject; -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao; -import org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize; -import org.jooq.Configuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; - -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; -import static org.apache.hadoop.ozone.recon.tasks. - OMDBUpdateEvent.OMDBUpdateAction.DELETE; -import static org.apache.hadoop.ozone.recon.tasks. - OMDBUpdateEvent.OMDBUpdateAction.PUT; - -/** - * Class to iterate over the OM DB and store the counts of existing/new - * files binned into ranges (1KB, 2Kb..,4MB,.., 1TB,..1PB) to the Recon - * fileSize DB. - */ -public class FileSizeCountTask implements ReconDBUpdateTask { - private static final Logger LOG = - LoggerFactory.getLogger(FileSizeCountTask.class); - - private int maxBinSize = -1; - private long maxFileSizeUpperBound = 1125899906842624L; // 1 PB - private long[] upperBoundCount; - private long oneKb = 1024L; - private FileCountBySizeDao fileCountBySizeDao; - - @Inject - public FileSizeCountTask(Configuration sqlConfiguration) { - fileCountBySizeDao = new FileCountBySizeDao(sqlConfiguration); - upperBoundCount = new long[getMaxBinSize()]; - } - - long getOneKB() { - return oneKb; - } - - long getMaxFileSizeUpperBound() { - return maxFileSizeUpperBound; - } - - int getMaxBinSize() { - if (maxBinSize == -1) { - // extra bin to add files > 1PB. - // 1 KB (2 ^ 10) is the smallest tracked file. - maxBinSize = nextClosestPowerIndexOfTwo(maxFileSizeUpperBound) - 10 + 1; - } - return maxBinSize; - } - - /** - * Read the Keys from OM snapshot DB and calculate the upper bound of - * File Size it belongs to. - * - * @param omMetadataManager OM Metadata instance. - * @return Pair - */ - @Override - public Pair reprocess(OMMetadataManager omMetadataManager) { - Table omKeyInfoTable = omMetadataManager.getKeyTable(); - try (TableIterator> - keyIter = omKeyInfoTable.iterator()) { - while (keyIter.hasNext()) { - Table.KeyValue kv = keyIter.next(); - - // reprocess() is a PUT operation on the DB. - updateUpperBoundCount(kv.getValue(), PUT); - } - } catch (IOException ioEx) { - LOG.error("Unable to populate File Size Count in Recon DB. ", ioEx); - return new ImmutablePair<>(getTaskName(), false); - } - populateFileCountBySizeDB(); - - LOG.info("Completed a 'reprocess' run of FileSizeCountTask."); - return new ImmutablePair<>(getTaskName(), true); - } - - @Override - public String getTaskName() { - return "FileSizeCountTask"; - } - - @Override - public Collection getTaskTables() { - return Collections.singletonList(KEY_TABLE); - } - - private void updateCountFromDB() { - // Read - Write operations to DB are in ascending order - // of file size upper bounds. - List resultSet = fileCountBySizeDao.findAll(); - int index = 0; - if (resultSet != null) { - for (FileCountBySize row : resultSet) { - upperBoundCount[index] = row.getCount(); - index++; - } - } - } - - /** - * Read the Keys from update events and update the count of files - * pertaining to a certain upper bound. - * - * @param events Update events - PUT/DELETE. - * @return Pair - */ - @Override - public Pair process(OMUpdateEventBatch events) { - Iterator eventIterator = events.getIterator(); - - //update array with file size count from DB - updateCountFromDB(); - - while (eventIterator.hasNext()) { - OMDBUpdateEvent omdbUpdateEvent = eventIterator.next(); - String updatedKey = omdbUpdateEvent.getKey(); - OmKeyInfo omKeyInfo = omdbUpdateEvent.getValue(); - - try{ - switch (omdbUpdateEvent.getAction()) { - case PUT: - updateUpperBoundCount(omKeyInfo, PUT); - break; - - case DELETE: - updateUpperBoundCount(omKeyInfo, DELETE); - break; - - default: LOG.trace("Skipping DB update event : " + omdbUpdateEvent - .getAction()); - } - } catch (IOException e) { - LOG.error("Unexpected exception while updating key data : {} {}", - updatedKey, e.getMessage()); - return new ImmutablePair<>(getTaskName(), false); - } - populateFileCountBySizeDB(); - } - LOG.info("Completed a 'process' run of FileSizeCountTask."); - return new ImmutablePair<>(getTaskName(), true); - } - - /** - * Calculate the bin index based on size of the Key. - * index is calculated as the number of right shifts - * needed until dataSize becomes zero. - * - * @param dataSize Size of the key. - * @return int bin index in upperBoundCount - */ - public int calculateBinIndex(long dataSize) { - if (dataSize >= getMaxFileSizeUpperBound()) { - return getMaxBinSize() - 1; - } - int index = nextClosestPowerIndexOfTwo(dataSize); - // The smallest file size being tracked for count - // is 1 KB i.e. 1024 = 2 ^ 10. - return index < 10 ? 0 : index - 10; - } - - int nextClosestPowerIndexOfTwo(long dataSize) { - int index = 0; - while(dataSize != 0) { - dataSize >>= 1; - index += 1; - } - return index; - } - - /** - * Populate DB with the counts of file sizes calculated - * using the dao. - * - */ - void populateFileCountBySizeDB() { - for (int i = 0; i < upperBoundCount.length; i++) { - long fileSizeUpperBound = (i == upperBoundCount.length - 1) ? - Long.MAX_VALUE : (long) Math.pow(2, (10 + i)); - FileCountBySize fileCountRecord = - fileCountBySizeDao.findById(fileSizeUpperBound); - FileCountBySize newRecord = new - FileCountBySize(fileSizeUpperBound, upperBoundCount[i]); - if (fileCountRecord == null) { - fileCountBySizeDao.insert(newRecord); - } else { - fileCountBySizeDao.update(newRecord); - } - } - } - - /** - * Calculate and update the count of files being tracked by - * upperBoundCount[]. - * Used by reprocess() and process(). - * - * @param omKeyInfo OmKey being updated for count - * @param operation (PUT, DELETE) - */ - void updateUpperBoundCount(OmKeyInfo omKeyInfo, - OMDBUpdateEvent.OMDBUpdateAction operation) throws IOException { - int binIndex = calculateBinIndex(omKeyInfo.getDataSize()); - if (operation == PUT) { - upperBoundCount[binIndex]++; - } else if (operation == DELETE) { - if (upperBoundCount[binIndex] != 0) { - //decrement only if it had files before, default DB value is 0 - upperBoundCount[binIndex]--; - } else { - LOG.warn("Unexpected error while updating bin count. Found 0 count " + - "for index : " + binIndex + " while processing DELETE event for " - + omKeyInfo.getKeyName()); - } - } - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java deleted file mode 100644 index 0fcabccb37a..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdateEvent.java +++ /dev/null @@ -1,125 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -/** - * A class used to encapsulate a single OM DB update event. - * Currently only PUT and DELETE are supported. - * @param Type of Key. - * @param Type of Value. - */ -public final class OMDBUpdateEvent { - - private final OMDBUpdateAction action; - private final String table; - private final KEY updatedKey; - private final VALUE updatedValue; - private final long sequenceNumber; - - private OMDBUpdateEvent(OMDBUpdateAction action, - String table, - KEY updatedKey, - VALUE updatedValue, - long sequenceNumber) { - this.action = action; - this.table = table; - this.updatedKey = updatedKey; - this.updatedValue = updatedValue; - this.sequenceNumber = sequenceNumber; - } - - public OMDBUpdateAction getAction() { - return action; - } - - public String getTable() { - return table; - } - - public KEY getKey() { - return updatedKey; - } - - public VALUE getValue() { - return updatedValue; - } - - public long getSequenceNumber() { - return sequenceNumber; - } - - /** - * Builder used to construct an OM DB Update event. - * @param Key type. - * @param Value type. - */ - public static class OMUpdateEventBuilder { - - private OMDBUpdateAction action; - private String table; - private KEY updatedKey; - private VALUE updatedValue; - private long lastSequenceNumber; - - OMUpdateEventBuilder setAction(OMDBUpdateAction omdbUpdateAction) { - this.action = omdbUpdateAction; - return this; - } - - OMUpdateEventBuilder setTable(String tableName) { - this.table = tableName; - return this; - } - - OMUpdateEventBuilder setKey(KEY key) { - this.updatedKey = key; - return this; - } - - OMUpdateEventBuilder setValue(VALUE value) { - this.updatedValue = value; - return this; - } - - OMUpdateEventBuilder setSequenceNumber(long sequenceNumber) { - this.lastSequenceNumber = sequenceNumber; - return this; - } - - /** - * Build an OM update event. - * @return OMDBUpdateEvent - */ - public OMDBUpdateEvent build() { - return new OMDBUpdateEvent( - action, - table, - updatedKey, - updatedValue, - lastSequenceNumber); - } - } - - /** - * Supported Actions - PUT, DELETE. - */ - public enum OMDBUpdateAction { - PUT, DELETE - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java deleted file mode 100644 index 47d5900334e..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java +++ /dev/null @@ -1,225 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.hdds.utils.db.CodecRegistry; -import org.apache.ratis.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.rocksdb.RocksDBException; -import org.rocksdb.WriteBatch; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Class used to listen on OM RocksDB updates. - */ -public class OMDBUpdatesHandler extends WriteBatch.Handler { - - private static final Logger LOG = - LoggerFactory.getLogger(OMDBUpdatesHandler.class); - - private Map tablesNames; - private CodecRegistry codecRegistry; - private List omdbUpdateEvents = new ArrayList<>(); - - public OMDBUpdatesHandler(OMMetadataManager omMetadataManager) { - tablesNames = omMetadataManager.getStore().getTableNames(); - codecRegistry = omMetadataManager.getStore().getCodecRegistry(); - } - - @Override - public void put(int cfIndex, byte[] keyBytes, byte[] valueBytes) throws - RocksDBException { - try { - processEvent(cfIndex, keyBytes, valueBytes, - OMDBUpdateEvent.OMDBUpdateAction.PUT); - } catch (IOException ioEx) { - LOG.error("Exception when reading key : " + ioEx); - } - } - - @Override - public void delete(int cfIndex, byte[] keyBytes) throws RocksDBException { - try { - processEvent(cfIndex, keyBytes, null, - OMDBUpdateEvent.OMDBUpdateAction.DELETE); - } catch (IOException ioEx) { - LOG.error("Exception when reading key : " + ioEx); - } - } - - /** - * - * @param cfIndex - * @param keyBytes - * @param valueBytes - * @param action - * @throws IOException - */ - private void processEvent(int cfIndex, byte[] keyBytes, byte[] - valueBytes, OMDBUpdateEvent.OMDBUpdateAction action) - throws IOException { - String tableName = tablesNames.get(cfIndex); - Class keyType = getKeyType(tableName); - Class valueType = getValueType(tableName); - if (valueType != null) { - OMDBUpdateEvent.OMUpdateEventBuilder builder = - new OMDBUpdateEvent.OMUpdateEventBuilder<>(); - builder.setTable(tableName); - - Object key = codecRegistry.asObject(keyBytes, keyType); - builder.setKey(key); - - if (!action.equals(OMDBUpdateEvent.OMDBUpdateAction.DELETE)) { - Object value = codecRegistry.asObject(valueBytes, valueType); - builder.setValue(value); - } - - builder.setAction(action); - OMDBUpdateEvent event = builder.build(); - LOG.debug("Generated OM update Event for table : " + event.getTable() - + ", Key = " + event.getKey() + ", action = " + event.getAction()); - // Temporarily adding to an event buffer for testing. In subsequent JIRAs, - // a Recon side class will be implemented that requests delta updates - // from OM and calls on this handler. In that case, we will fill up - // this buffer and pass it on to the ReconTaskController which has - // tasks waiting on OM events. - omdbUpdateEvents.add(event); - } - } - - // There are no use cases yet for the remaining methods in Recon. These - // will be implemented as and when need arises. - - @Override - public void put(byte[] bytes, byte[] bytes1) { - - } - - @Override - public void merge(int i, byte[] bytes, byte[] bytes1) - throws RocksDBException { - } - - @Override - public void merge(byte[] bytes, byte[] bytes1) { - } - - @Override - public void delete(byte[] bytes) { - } - - @Override - public void singleDelete(int i, byte[] bytes) throws RocksDBException { - } - - @Override - public void singleDelete(byte[] bytes) { - } - - @Override - public void deleteRange(int i, byte[] bytes, byte[] bytes1) - throws RocksDBException { - } - - @Override - public void deleteRange(byte[] bytes, byte[] bytes1) { - - } - - @Override - public void logData(byte[] bytes) { - - } - - @Override - public void putBlobIndex(int i, byte[] bytes, byte[] bytes1) - throws RocksDBException { - } - - @Override - public void markBeginPrepare() throws RocksDBException { - - } - - @Override - public void markEndPrepare(byte[] bytes) throws RocksDBException { - - } - - @Override - public void markNoop(boolean b) throws RocksDBException { - - } - - @Override - public void markRollback(byte[] bytes) throws RocksDBException { - - } - - @Override - public void markCommit(byte[] bytes) throws RocksDBException { - - } - - /** - * Return Key type class for a given table name. - * @param name table name. - * @return String.class by default. - */ - private Class getKeyType(String name) { - return String.class; - } - - /** - * Return Value type class for a given table. - * @param name table name - * @return Value type based on table name. - */ - @VisibleForTesting - protected Class getValueType(String name) { - switch (name) { - case KEY_TABLE : return OmKeyInfo.class; - case VOLUME_TABLE : return OmVolumeArgs.class; - case BUCKET_TABLE : return OmBucketInfo.class; - default: return null; - } - } - - /** - * Get List of events. (Temporary API to unit test the class). - * @return List of events. - */ - public List getEvents() { - return omdbUpdateEvents; - } - -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java deleted file mode 100644 index f1374189767..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.stream.Collectors; - -/** - * Wrapper class to hold multiple OM DB update events. - */ -public class OMUpdateEventBatch { - - private List events; - - public OMUpdateEventBatch(Collection e) { - events = new ArrayList<>(e); - } - - /** - * Get Sequence Number and timestamp of last event in this batch. - * @return Event Info instance. - */ - long getLastSequenceNumber() { - if (events.isEmpty()) { - return -1; - } else { - return events.get(events.size() - 1).getSequenceNumber(); - } - } - - /** - * Return iterator to Event batch. - * @return iterator - */ - public Iterator getIterator() { - return events.iterator(); - } - - /** - * Filter events based on Tables. - * @param tables set of tables to filter on. - * @return trimmed event batch. - */ - public OMUpdateEventBatch filter(Collection tables) { - return new OMUpdateEventBatch(events - .stream() - .filter(e -> tables.contains(e.getTable())) - .collect(Collectors.toList())); - } - - /** - * Return if empty. - * @return true if empty, else false. - */ - public boolean isEmpty() { - return !getIterator().hasNext(); - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconDBUpdateTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconDBUpdateTask.java deleted file mode 100644 index 426e0ae0a3e..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconDBUpdateTask.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import java.util.Collection; - -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.ozone.om.OMMetadataManager; - -/** - * Interface used to denote a Recon task that needs to act on OM DB events. - */ -public interface ReconDBUpdateTask { - - /** - * Return task name. - * @return task name - */ - String getTaskName(); - - /** - * Return the list of tables that the task is listening on. - * Empty list means the task is NOT listening on any tables. - * @return Collection of Tables. - */ - Collection getTaskTables(); - - /** - * Process a set of OM events on tables that the task is listening on. - * @param events Set of events to be processed by the task. - * @return Pair of task name -> task success. - */ - Pair process(OMUpdateEventBatch events); - - /** - * Process a on tables that the task is listening on. - * @param omMetadataManager OM Metadata manager instance. - * @return Pair of task name -> task success. - */ - Pair reprocess(OMMetadataManager omMetadataManager); - -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java deleted file mode 100644 index 728a199f5fb..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import java.util.Map; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; - -/** - * Controller used by Recon to manage Tasks that are waiting on Recon events. - */ -public interface ReconTaskController { - - /** - * Register API used by tasks to register themselves. - * @param task task instance - */ - void registerTask(ReconDBUpdateTask task); - - /** - * Pass on a set of OM DB update events to the registered tasks. - * @param events set of events - * @throws InterruptedException InterruptedException - */ - void consumeOMEvents(OMUpdateEventBatch events, - OMMetadataManager omMetadataManager) - throws InterruptedException; - - /** - * Pass on the handle to a new OM DB instance to the registered tasks. - * @param omMetadataManager OM Metadata Manager instance - */ - void reInitializeTasks(OMMetadataManager omMetadataManager) - throws InterruptedException; - - /** - * Get set of registered tasks. - * @return Map of Task name -> Task. - */ - Map getRegisteredTasks(); - - /** - * Get instance of ReconTaskStatusDao. - * @return instance of ReconTaskStatusDao - */ - ReconTaskStatusDao getReconTaskStatusDao(); - - /** - * Stop the tasks. Start API is not needed since it is implicit. - */ - void stop(); -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java deleted file mode 100644 index 9135705ccaa..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java +++ /dev/null @@ -1,245 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_TASK_THREAD_COUNT_DEFAULT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_TASK_THREAD_COUNT_KEY; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.Semaphore; -import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; -import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; -import org.jooq.Configuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.inject.Inject; - -/** - * Implementation of ReconTaskController. - */ -public class ReconTaskControllerImpl implements ReconTaskController { - - private static final Logger LOG = - LoggerFactory.getLogger(ReconTaskControllerImpl.class); - - private Map reconDBUpdateTasks; - private ExecutorService executorService; - private int threadCount = 1; - private final Semaphore taskSemaphore = new Semaphore(1); - private Map taskFailureCounter = new HashMap<>(); - private static final int TASK_FAILURE_THRESHOLD = 2; - private ReconTaskStatusDao reconTaskStatusDao; - - @Inject - public ReconTaskControllerImpl(OzoneConfiguration configuration, - Configuration sqlConfiguration, - Set tasks) { - reconDBUpdateTasks = new HashMap<>(); - threadCount = configuration.getInt(OZONE_RECON_TASK_THREAD_COUNT_KEY, - OZONE_RECON_TASK_THREAD_COUNT_DEFAULT); - executorService = Executors.newFixedThreadPool(threadCount); - reconTaskStatusDao = new ReconTaskStatusDao(sqlConfiguration); - for (ReconDBUpdateTask task : tasks) { - registerTask(task); - } - } - - @Override - public void registerTask(ReconDBUpdateTask task) { - String taskName = task.getTaskName(); - LOG.info("Registered task " + taskName + " with controller."); - - // Store task in Task Map. - reconDBUpdateTasks.put(taskName, task); - // Store Task in Task failure tracker. - taskFailureCounter.put(taskName, new AtomicInteger(0)); - // Create DB record for the task. - ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus(taskName, - 0L, 0L); - if (!reconTaskStatusDao.existsById(taskName)) { - reconTaskStatusDao.insert(reconTaskStatusRecord); - } - } - - /** - * For every registered task, we try process step twice and then reprocess - * once (if process failed twice) to absorb the events. If a task has failed - * reprocess call more than 2 times across events, it is unregistered - * (blacklisted). - * @param events set of events - * @throws InterruptedException - */ - @Override - public void consumeOMEvents(OMUpdateEventBatch events, - OMMetadataManager omMetadataManager) - throws InterruptedException { - taskSemaphore.acquire(); - - try { - if (!events.isEmpty()) { - Collection> tasks = new ArrayList<>(); - for (Map.Entry taskEntry : - reconDBUpdateTasks.entrySet()) { - ReconDBUpdateTask task = taskEntry.getValue(); - Collection tables = task.getTaskTables(); - tasks.add(() -> task.process(events.filter(tables))); - } - - List> results = executorService.invokeAll(tasks); - List failedTasks = processTaskResults(results, events); - - // Retry - List retryFailedTasks = new ArrayList<>(); - if (!failedTasks.isEmpty()) { - tasks.clear(); - for (String taskName : failedTasks) { - ReconDBUpdateTask task = reconDBUpdateTasks.get(taskName); - Collection tables = task.getTaskTables(); - tasks.add(() -> task.process(events.filter(tables))); - } - results = executorService.invokeAll(tasks); - retryFailedTasks = processTaskResults(results, events); - } - - // Reprocess the failed tasks. - // TODO Move to a separate task queue since reprocess may be a heavy - // operation for large OM DB instances - if (!retryFailedTasks.isEmpty()) { - tasks.clear(); - for (String taskName : failedTasks) { - ReconDBUpdateTask task = reconDBUpdateTasks.get(taskName); - tasks.add(() -> task.reprocess(omMetadataManager)); - } - results = executorService.invokeAll(tasks); - List reprocessFailedTasks = - processTaskResults(results, events); - for (String taskName : reprocessFailedTasks) { - LOG.info("Reprocess step failed for task : " + taskName); - if (taskFailureCounter.get(taskName).incrementAndGet() > - TASK_FAILURE_THRESHOLD) { - LOG.info("Blacklisting Task since it failed retry and " + - "reprocess more than " + TASK_FAILURE_THRESHOLD + " times."); - reconDBUpdateTasks.remove(taskName); - } - } - } - } - } catch (ExecutionException e) { - LOG.error("Unexpected error : ", e); - } finally { - taskSemaphore.release(); - } - } - - @Override - public void reInitializeTasks(OMMetadataManager omMetadataManager) - throws InterruptedException { - taskSemaphore.acquire(); - - try { - Collection> tasks = new ArrayList<>(); - for (Map.Entry taskEntry : - reconDBUpdateTasks.entrySet()) { - ReconDBUpdateTask task = taskEntry.getValue(); - tasks.add(() -> task.reprocess(omMetadataManager)); - } - - List> results = executorService.invokeAll(tasks); - for (Future f : results) { - String taskName = f.get().getLeft().toString(); - if (!(Boolean)f.get().getRight()) { - LOG.info("Init failed for task : " + taskName); - } - } - } catch (ExecutionException e) { - LOG.error("Unexpected error : ", e); - } finally { - taskSemaphore.release(); - } - } - - /** - * Store the last completed event sequence number and timestamp to the DB - * for that task. - * @param taskName taskname to be updated. - * @param lastSequenceNumber contains the new sequence number. - */ - private void storeLastCompletedTransaction( - String taskName, long lastSequenceNumber) { - ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus(taskName, - System.currentTimeMillis(), lastSequenceNumber); - reconTaskStatusDao.update(reconTaskStatusRecord); - } - - @Override - public Map getRegisteredTasks() { - return reconDBUpdateTasks; - } - - @Override - public ReconTaskStatusDao getReconTaskStatusDao() { - return reconTaskStatusDao; - } - - @Override - public void stop() { - this.executorService.shutdownNow(); - } - - /** - * Wait on results of all tasks. - * @param results Set of Futures. - * @param events Events. - * @return List of failed task names - * @throws ExecutionException execution Exception - * @throws InterruptedException Interrupted Exception - */ - private List processTaskResults(List> results, - OMUpdateEventBatch events) - throws ExecutionException, InterruptedException { - List failedTasks = new ArrayList<>(); - for (Future f : results) { - String taskName = f.get().getLeft().toString(); - if (!(Boolean)f.get().getRight()) { - LOG.info("Failed task : " + taskName); - failedTasks.add(f.get().getLeft().toString()); - } else { - taskFailureCounter.get(taskName).set(0); - storeLastCompletedTransaction(taskName, events.getLastSequenceNumber()); - } - } - return failedTasks; - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/package-info.java deleted file mode 100644 index fe47f4d0ff9..00000000000 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * The classes in this package contains the various scheduled tasks used by - * Recon. - */ -package org.apache.hadoop.ozone.recon.tasks; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/WEB-INF/web.xml b/hadoop-ozone/recon/src/main/resources/webapps/recon/WEB-INF/web.xml deleted file mode 100644 index 972f3bbcbae..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/WEB-INF/web.xml +++ /dev/null @@ -1,28 +0,0 @@ - - - - org.apache.hadoop.ozone.recon.ReconGuiceServletContextListener - - - guiceFilter - com.google.inject.servlet.GuiceFilter - - - guiceFilter - /* - - \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/.gitignore b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/.gitignore deleted file mode 100644 index 4d29575de80..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. - -# dependencies -/node_modules -/.pnp -.pnp.js - -# testing -/coverage - -# production -/build - -# misc -.DS_Store -.env.local -.env.development.local -.env.test.local -.env.production.local - -npm-debug.log* -yarn-debug.log* -yarn-error.log* diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/LICENSE b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/LICENSE deleted file mode 100644 index e6a896f6edc..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/LICENSE +++ /dev/null @@ -1,17279 +0,0 @@ - --------------------------------------------------------------------------------- - -THE FOLLOWING SETS FORTH ATTRIBUTION NOTICES FOR THIRD PARTY SOFTWARE THAT MAY BE CONTAINED IN PORTIONS OF THE OZONE RECON PRODUCT. - ------ - -The following software may be included in this product: @ant-design/create-react-context, create-react-context. A copy of the source code may be downloaded from https://github.com/ant-design/create-react-context (@ant-design/create-react-context), https://github.com/thejameskyle/create-react-context (create-react-context). This software contains the following license and notice below: - -Copyright (c) 2017-present James Kyle - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: @babel/code-frame, @babel/helper-annotate-as-pure, @babel/helper-get-function-arity, @babel/helper-member-expression-to-functions, @babel/helper-module-imports, @babel/helper-optimise-call-expression, @babel/helper-plugin-utils, @babel/highlight, @babel/preset-react. A copy of the source code may be downloaded from https://github.com/babel/babel/tree/master/packages/babel-code-frame (@babel/code-frame), https://github.com/babel/babel/tree/master/packages/babel-helper-annotate-as-pure (@babel/helper-annotate-as-pure), https://github.com/babel/babel/tree/master/packages/babel-helper-get-function-arity (@babel/helper-get-function-arity), https://github.com/babel/babel/tree/master/packages/babel-helper-member-expression-to-functions (@babel/helper-member-expression-to-functions), https://github.com/babel/babel/tree/master/packages/babel-helper-module-imports (@babel/helper-module-imports), https://github.com/babel/babel/tree/master/packages/babel-helper-optimise-call-expression (@babel/helper-optimise-call-expression), https://github.com/babel/babel/tree/master/packages/babel-helper-plugin-utils (@babel/helper-plugin-utils), https://github.com/babel/babel/tree/master/packages/babel-highlight (@babel/highlight), https://github.com/babel/babel/tree/master/packages/babel-preset-react (@babel/preset-react). This software contains the following license and notice below: - -MIT License - -Copyright (c) 2014-2018 Sebastian McKenzie - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: @babel/core, @babel/generator, @babel/helper-builder-react-jsx, @babel/helper-call-delegate, @babel/helper-create-class-features-plugin, @babel/helper-define-map, @babel/helper-hoist-variables, @babel/helper-module-transforms, @babel/helper-regex, @babel/helper-replace-supers, @babel/helper-split-export-declaration, @babel/helpers, @babel/plugin-proposal-class-properties, @babel/plugin-proposal-decorators, @babel/plugin-proposal-object-rest-spread, @babel/plugin-proposal-unicode-property-regex, @babel/plugin-syntax-typescript, @babel/plugin-transform-async-to-generator, @babel/plugin-transform-block-scoping, @babel/plugin-transform-classes, @babel/plugin-transform-destructuring, @babel/plugin-transform-dotall-regex, @babel/plugin-transform-flow-strip-types, @babel/plugin-transform-for-of, @babel/plugin-transform-function-name, @babel/plugin-transform-modules-commonjs, @babel/plugin-transform-modules-systemjs, @babel/plugin-transform-named-capturing-groups-regex, @babel/plugin-transform-new-target, @babel/plugin-transform-parameters, @babel/plugin-transform-react-jsx, @babel/plugin-transform-regenerator, @babel/plugin-transform-runtime, @babel/plugin-transform-typescript, @babel/plugin-transform-unicode-regex, @babel/preset-env, @babel/preset-typescript, @babel/runtime, @babel/template, @babel/traverse, @babel/types. A copy of the source code may be downloaded from https://github.com/babel/babel/tree/master/packages/babel-core (@babel/core), https://github.com/babel/babel/tree/master/packages/babel-generator (@babel/generator), https://github.com/babel/babel/tree/master/packages/babel-helper-builder-react-jsx (@babel/helper-builder-react-jsx), https://github.com/babel/babel/tree/master/packages/babel-helper-call-delegate (@babel/helper-call-delegate), https://github.com/babel/babel/tree/master/packages/babel-helper-create-class-features-plugin (@babel/helper-create-class-features-plugin), https://github.com/babel/babel/tree/master/packages/babel-helper-define-map (@babel/helper-define-map), https://github.com/babel/babel/tree/master/packages/babel-helper-hoist-variables (@babel/helper-hoist-variables), https://github.com/babel/babel/tree/master/packages/babel-helper-module-transforms (@babel/helper-module-transforms), https://github.com/babel/babel/tree/master/packages/babel-helper-regex (@babel/helper-regex), https://github.com/babel/babel/tree/master/packages/babel-helper-replace-supers (@babel/helper-replace-supers), https://github.com/babel/babel/tree/master/packages/babel-helper-split-export-declaration (@babel/helper-split-export-declaration), https://github.com/babel/babel/tree/master/packages/babel-helpers (@babel/helpers), https://github.com/babel/babel/tree/master/packages/babel-plugin-proposal-class-properties (@babel/plugin-proposal-class-properties), https://github.com/babel/babel/tree/master/packages/babel-plugin-proposal-decorators (@babel/plugin-proposal-decorators), https://github.com/babel/babel/tree/master/packages/babel-plugin-proposal-object-rest-spread (@babel/plugin-proposal-object-rest-spread), https://github.com/babel/babel/tree/master/packages/babel-plugin-proposal-unicode-property-regex (@babel/plugin-proposal-unicode-property-regex), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-typescript (@babel/plugin-syntax-typescript), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-async-to-generator (@babel/plugin-transform-async-to-generator), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-block-scoping (@babel/plugin-transform-block-scoping), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-classes (@babel/plugin-transform-classes), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-destructuring (@babel/plugin-transform-destructuring), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-dotall-regex (@babel/plugin-transform-dotall-regex), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-flow-strip-types (@babel/plugin-transform-flow-strip-types), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-for-of (@babel/plugin-transform-for-of), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-function-name (@babel/plugin-transform-function-name), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-modules-commonjs (@babel/plugin-transform-modules-commonjs), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-modules-systemjs (@babel/plugin-transform-modules-systemjs), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-named-capturing-groups-regex (@babel/plugin-transform-named-capturing-groups-regex), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-new-target (@babel/plugin-transform-new-target), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-parameters (@babel/plugin-transform-parameters), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-react-jsx (@babel/plugin-transform-react-jsx), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-regenerator (@babel/plugin-transform-regenerator), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-runtime (@babel/plugin-transform-runtime), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-typescript (@babel/plugin-transform-typescript), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-unicode-regex (@babel/plugin-transform-unicode-regex), https://github.com/babel/babel/tree/master/packages/babel-preset-env (@babel/preset-env), https://github.com/babel/babel/tree/master/packages/babel-preset-typescript (@babel/preset-typescript), https://github.com/babel/babel/tree/master/packages/babel-runtime (@babel/runtime), https://github.com/babel/babel/tree/master/packages/babel-template (@babel/template), https://github.com/babel/babel/tree/master/packages/babel-traverse (@babel/traverse), https://github.com/babel/babel/tree/master/packages/babel-types (@babel/types). This software contains the following license and notice below: - -MIT License - -Copyright (c) 2014-present Sebastian McKenzie and other contributors - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: @babel/helper-builder-binary-assignment-operator-visitor, @babel/helper-explode-assignable-expression, @babel/helper-function-name, @babel/helper-remap-async-to-generator, @babel/helper-simple-access, @babel/helper-wrap-function, @babel/plugin-proposal-async-generator-functions, @babel/plugin-proposal-json-strings, @babel/plugin-proposal-optional-catch-binding, @babel/plugin-syntax-async-generators, @babel/plugin-syntax-decorators, @babel/plugin-syntax-dynamic-import, @babel/plugin-syntax-flow, @babel/plugin-syntax-json-strings, @babel/plugin-syntax-jsx, @babel/plugin-syntax-object-rest-spread, @babel/plugin-syntax-optional-catch-binding, @babel/plugin-transform-arrow-functions, @babel/plugin-transform-block-scoped-functions, @babel/plugin-transform-computed-properties, @babel/plugin-transform-duplicate-keys, @babel/plugin-transform-exponentiation-operator, @babel/plugin-transform-literals, @babel/plugin-transform-member-expression-literals, @babel/plugin-transform-modules-amd, @babel/plugin-transform-modules-umd, @babel/plugin-transform-object-super, @babel/plugin-transform-property-literals, @babel/plugin-transform-react-constant-elements, @babel/plugin-transform-react-display-name, @babel/plugin-transform-react-jsx-self, @babel/plugin-transform-react-jsx-source, @babel/plugin-transform-reserved-words, @babel/plugin-transform-shorthand-properties, @babel/plugin-transform-spread, @babel/plugin-transform-sticky-regex, @babel/plugin-transform-template-literals, @babel/plugin-transform-typeof-symbol. A copy of the source code may be downloaded from https://github.com/babel/babel/tree/master/packages/babel-helper-builder-binary-assignment-operator-visitor (@babel/helper-builder-binary-assignment-operator-visitor), https://github.com/babel/babel/tree/master/packages/babel-helper-explode-assignable-expression (@babel/helper-explode-assignable-expression), https://github.com/babel/babel/tree/master/packages/babel-helper-function-name (@babel/helper-function-name), https://github.com/babel/babel/tree/master/packages/babel-helper-remap-async-to-generator (@babel/helper-remap-async-to-generator), https://github.com/babel/babel/tree/master/packages/babel-helper-simple-access (@babel/helper-simple-access), https://github.com/babel/babel/tree/master/packages/babel-helper-wrap-function (@babel/helper-wrap-function), https://github.com/babel/babel/tree/master/packages/babel-plugin-proposal-async-generator-functions (@babel/plugin-proposal-async-generator-functions), https://github.com/babel/babel/tree/master/packages/babel-plugin-proposal-json-strings (@babel/plugin-proposal-json-strings), https://github.com/babel/babel/tree/master/packages/babel-plugin-proposal-optional-catch-binding (@babel/plugin-proposal-optional-catch-binding), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-async-generators (@babel/plugin-syntax-async-generators), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-decorators (@babel/plugin-syntax-decorators), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-dynamic-import (@babel/plugin-syntax-dynamic-import), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-flow (@babel/plugin-syntax-flow), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-json-strings (@babel/plugin-syntax-json-strings), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-jsx (@babel/plugin-syntax-jsx), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-object-rest-spread (@babel/plugin-syntax-object-rest-spread), https://github.com/babel/babel/tree/master/packages/babel-plugin-syntax-optional-catch-binding (@babel/plugin-syntax-optional-catch-binding), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-arrow-functions (@babel/plugin-transform-arrow-functions), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-block-scoped-functions (@babel/plugin-transform-block-scoped-functions), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-computed-properties (@babel/plugin-transform-computed-properties), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-duplicate-keys (@babel/plugin-transform-duplicate-keys), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-exponentiation-operator (@babel/plugin-transform-exponentiation-operator), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-literals (@babel/plugin-transform-literals), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-member-expression-literals (@babel/plugin-transform-member-expression-literals), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-modules-amd (@babel/plugin-transform-modules-amd), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-modules-umd (@babel/plugin-transform-modules-umd), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-object-super (@babel/plugin-transform-object-super), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-property-literals (@babel/plugin-transform-property-literals), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-react-constant-elements (@babel/plugin-transform-react-constant-elements), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-react-display-name (@babel/plugin-transform-react-display-name), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-react-jsx-self (@babel/plugin-transform-react-jsx-self), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-react-jsx-source (@babel/plugin-transform-react-jsx-source), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-reserved-words (@babel/plugin-transform-reserved-words), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-shorthand-properties (@babel/plugin-transform-shorthand-properties), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-spread (@babel/plugin-transform-spread), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-sticky-regex (@babel/plugin-transform-sticky-regex), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-template-literals (@babel/plugin-transform-template-literals), https://github.com/babel/babel/tree/master/packages/babel-plugin-transform-typeof-symbol (@babel/plugin-transform-typeof-symbol). This software contains the following license and notice below: - -MIT License - -Copyright (c) 2014-2018 Sebastian McKenzie and other contributors - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: @babel/parser, babylon. A copy of the source code may be downloaded from https://github.com/babel/babel/tree/master/packages/babel-parser (@babel/parser), https://github.com/babel/babylon (babylon). This software contains the following license and notice below: - -Copyright (C) 2012-2014 by various contributors (see AUTHORS) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: @cnakazawa/watch, aws-sign2, forever-agent, oauth-sign, request, tunnel-agent. A copy of the source code may be downloaded from git://github.com/mikeal/watch.git (@cnakazawa/watch), https://github.com/mikeal/aws-sign (aws-sign2), https://github.com/mikeal/forever-agent (forever-agent), https://github.com/mikeal/oauth-sign (oauth-sign), https://github.com/request/request.git (request), https://github.com/mikeal/tunnel-agent (tunnel-agent). This software contains the following license and notice below: - -Apache License - -Version 2.0, January 2004 - -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. - -"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of this License; and - -You must cause any modified files to carry prominent notices stating that You changed the files; and - -You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and - -If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - ------ - -The following software may be included in this product: @csstools/convert-colors, css-blank-pseudo, css-has-pseudo, css-prefers-color-scheme, postcss-browser-comments, postcss-color-functional-notation, postcss-color-mod-function, postcss-dir-pseudo-class, postcss-double-position-gradients, postcss-env-function, postcss-focus-visible, postcss-focus-within, postcss-gap-properties, postcss-image-set-function, postcss-lab-function, postcss-logical, postcss-nesting, postcss-normalize, postcss-overflow-shorthand, postcss-place, postcss-preset-env, postcss-pseudo-class-any-link. A copy of the source code may be downloaded from https://github.com/jonathantneal/convert-colors.git (@csstools/convert-colors), https://github.com/csstools/css-blank-pseudo.git (css-blank-pseudo), https://github.com/csstools/css-has-pseudo.git (css-has-pseudo), https://github.com/csstools/css-prefers-color-scheme.git (css-prefers-color-scheme), https://github.com/csstools/postcss-browser-comments.git (postcss-browser-comments), https://github.com/jonathantneal/postcss-color-functional-notation.git (postcss-color-functional-notation), https://github.com/jonathantneal/postcss-color-mod-function.git (postcss-color-mod-function), https://github.com/jonathantneal/postcss-dir-pseudo-class.git (postcss-dir-pseudo-class), https://github.com/jonathantneal/postcss-double-position-gradients.git (postcss-double-position-gradients), https://github.com/jonathantneal/postcss-env-function.git (postcss-env-function), https://github.com/jonathantneal/postcss-focus-visible.git (postcss-focus-visible), https://github.com/jonathantneal/postcss-focus-within.git (postcss-focus-within), https://github.com/jonathantneal/postcss-gap-properties.git (postcss-gap-properties), https://github.com/jonathantneal/postcss-image-set-function.git (postcss-image-set-function), https://github.com/jonathantneal/postcss-lab-function.git (postcss-lab-function), https://github.com/jonathantneal/postcss-logical.git (postcss-logical), https://github.com/jonathantneal/postcss-nesting.git (postcss-nesting), https://github.com/csstools/postcss-normalize.git (postcss-normalize), https://github.com/jonathantneal/postcss-overflow-shorthand.git (postcss-overflow-shorthand), https://github.com/jonathantneal/postcss-place.git (postcss-place), https://github.com/csstools/postcss-preset-env.git (postcss-preset-env), https://github.com/jonathantneal/postcss-pseudo-class-any-link.git (postcss-pseudo-class-any-link). This software contains the following license and notice below: - -# CC0 1.0 Universal - -## Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator and -subsequent owner(s) (each and all, an “owner”) of an original work of -authorship and/or a database (each, a “Work”). - -Certain owners wish to permanently relinquish those rights to a Work for the -purpose of contributing to a commons of creative, cultural and scientific works -(“Commons”) that the public can reliably and without fear of later claims of -infringement build upon, modify, incorporate in other works, reuse and -redistribute as freely as possible in any form whatsoever and for any purposes, -including without limitation commercial purposes. These owners may contribute -to the Commons to promote the ideal of a free culture and the further -production of creative, cultural and scientific works, or to gain reputation or -greater distribution for their Work in part through the use and efforts of -others. - -For these and/or other purposes and motivations, and without any expectation of -additional consideration or compensation, the person associating CC0 with a -Work (the “Affirmer”), to the extent that he or she is an owner of Copyright -and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and -publicly distribute the Work under its terms, with knowledge of his or her -Copyright and Related Rights in the Work and the meaning and intended legal -effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be - protected by copyright and related or neighboring rights (“Copyright and - Related Rights”). Copyright and Related Rights include, but are not limited - to, the following: - 1. the right to reproduce, adapt, distribute, perform, display, communicate, - and translate a Work; - 2. moral rights retained by the original author(s) and/or performer(s); - 3. publicity and privacy rights pertaining to a person’s image or likeness - depicted in a Work; - 4. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(i), below; - 5. rights protecting the extraction, dissemination, use and reuse of data in - a Work; - 6. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation thereof, - including any amended or successor version of such directive); and - 7. other similar, equivalent or corresponding rights throughout the world - based on applicable law or treaty, and any national implementations - thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention of, - applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and - unconditionally waives, abandons, and surrenders all of Affirmer’s Copyright - and Related Rights and associated claims and causes of action, whether now - known or unknown (including existing as well as future claims and causes of - action), in the Work (i) in all territories worldwide, (ii) for the maximum - duration provided by applicable law or treaty (including future time - extensions), (iii) in any current or future medium and for any number of - copies, and (iv) for any purpose whatsoever, including without limitation - commercial, advertising or promotional purposes (the “Waiver”). Affirmer - makes the Waiver for the benefit of each member of the public at large and - to the detriment of Affirmer’s heirs and successors, fully intending that - such Waiver shall not be subject to revocation, rescission, cancellation, - termination, or any other legal or equitable action to disrupt the quiet - enjoyment of the Work by the public as contemplated by Affirmer’s express - Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason be - judged legally invalid or ineffective under applicable law, then the Waiver - shall be preserved to the maximum extent permitted taking into account - Affirmer’s express Statement of Purpose. In addition, to the extent the - Waiver is so judged Affirmer hereby grants to each affected person a - royalty-free, non transferable, non sublicensable, non exclusive, - irrevocable and unconditional license to exercise Affirmer’s Copyright and - Related Rights in the Work (i) in all territories worldwide, (ii) for the - maximum duration provided by applicable law or treaty (including future time - extensions), (iii) in any current or future medium and for any number of - copies, and (iv) for any purpose whatsoever, including without limitation - commercial, advertising or promotional purposes (the “License”). The License - shall be deemed effective as of the date CC0 was applied by Affirmer to the - Work. Should any part of the License for any reason be judged legally - invalid or ineffective under applicable law, such partial invalidity or - ineffectiveness shall not invalidate the remainder of the License, and in - such case Affirmer hereby affirms that he or she will not (i) exercise any - of his or her remaining Copyright and Related Rights in the Work or (ii) - assert any associated claims and causes of action with respect to the Work, - in either case contrary to Affirmer’s express Statement of Purpose. - -4. Limitations and Disclaimers. - 1. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - 2. Affirmer offers the Work as-is and makes no representations or warranties - of any kind concerning the Work, express, implied, statutory or - otherwise, including without limitation warranties of title, - merchantability, fitness for a particular purpose, non infringement, or - the absence of latent or other defects, accuracy, or the present or - absence of errors, whether or not discoverable, all to the greatest - extent permissible under applicable law. - 3. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person’s Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the Work. - 4. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to this - CC0 or use of the Work. - -For more information, please see -http://creativecommons.org/publicdomain/zero/1.0/. - ------ - -The following software may be included in this product: @csstools/normalize.css. A copy of the source code may be downloaded from https://github.com/csstools/normalize.css.git. This software contains the following license and notice below: - -# The MIT License (MIT) - -Copyright © Jonathan Neal and Nicolas Gallagher - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: @jest/console, @jest/core, @jest/environment, @jest/fake-timers, @jest/reporters, @jest/source-map, @jest/test-result, @jest/test-sequencer, @jest/transform, @jest/types, babel-jest, babel-plugin-jest-hoist, babel-preset-jest, diff-sequences, expect, jest, jest-changed-files, jest-cli, jest-config, jest-diff, jest-docblock, jest-each, jest-environment-jsdom, jest-environment-node, jest-get-type, jest-haste-map, jest-jasmine2, jest-leak-detector, jest-matcher-utils, jest-message-util, jest-mock, jest-regex-util, jest-resolve, jest-resolve-dependencies, jest-runner, jest-runtime, jest-serializer, jest-snapshot, jest-util, jest-validate, jest-watcher, jest-worker, pretty-format. A copy of the source code may be downloaded from https://github.com/facebook/jest.git (@jest/console), https://github.com/facebook/jest (@jest/core), https://github.com/facebook/jest.git (@jest/environment), https://github.com/facebook/jest.git (@jest/fake-timers), https://github.com/facebook/jest (@jest/reporters), https://github.com/facebook/jest.git (@jest/source-map), https://github.com/facebook/jest.git (@jest/test-result), https://github.com/facebook/jest.git (@jest/test-sequencer), https://github.com/facebook/jest.git (@jest/transform), https://github.com/facebook/jest.git (@jest/types), https://github.com/facebook/jest.git (babel-jest), https://github.com/facebook/jest.git (babel-plugin-jest-hoist), https://github.com/facebook/jest.git (babel-preset-jest), https://github.com/facebook/jest.git (diff-sequences), https://github.com/facebook/jest.git (expect), https://github.com/facebook/jest (jest), https://github.com/facebook/jest.git (jest-changed-files), https://github.com/facebook/jest (jest-cli), https://github.com/facebook/jest.git (jest-config), https://github.com/facebook/jest.git (jest-diff), https://github.com/facebook/jest.git (jest-docblock), https://github.com/facebook/jest.git (jest-each), https://github.com/facebook/jest.git (jest-environment-jsdom), https://github.com/facebook/jest.git (jest-environment-node), https://github.com/facebook/jest.git (jest-get-type), https://github.com/facebook/jest.git (jest-haste-map), https://github.com/facebook/jest.git (jest-jasmine2), https://github.com/facebook/jest.git (jest-leak-detector), https://github.com/facebook/jest.git (jest-matcher-utils), https://github.com/facebook/jest.git (jest-message-util), https://github.com/facebook/jest.git (jest-mock), https://github.com/facebook/jest.git (jest-regex-util), https://github.com/facebook/jest.git (jest-resolve), https://github.com/facebook/jest.git (jest-resolve-dependencies), https://github.com/facebook/jest.git (jest-runner), https://github.com/facebook/jest.git (jest-runtime), https://github.com/facebook/jest.git (jest-serializer), https://github.com/facebook/jest.git (jest-snapshot), https://github.com/facebook/jest.git (jest-util), https://github.com/facebook/jest.git (jest-validate), https://github.com/facebook/jest (jest-watcher), https://github.com/facebook/jest.git (jest-worker), https://github.com/facebook/jest.git (pretty-format). This software contains the following license and notice below: - -MIT License - -For Jest software - -Copyright (c) 2014-present, Facebook, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: @mrmlnc/readdir-enhanced. A copy of the source code may be downloaded from https://github.com/bigstickcarpet/readdir-enhanced.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 James Messinger - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -. - ------ - -The following software may be included in this product: @svgr/babel-plugin-add-jsx-attribute, @svgr/babel-plugin-remove-jsx-attribute, @svgr/babel-plugin-remove-jsx-empty-expression, @svgr/babel-plugin-replace-jsx-attribute-value, @svgr/babel-plugin-svg-dynamic-title, @svgr/babel-plugin-svg-em-dimensions, @svgr/babel-plugin-transform-react-native-svg, @svgr/babel-plugin-transform-svg-component, @svgr/babel-preset, @svgr/core, @svgr/hast-util-to-babel-ast, @svgr/plugin-jsx, @svgr/plugin-svgo, @svgr/webpack. A copy of the source code may be downloaded from https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-add-jsx-attribute (@svgr/babel-plugin-add-jsx-attribute), https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-remove-jsx-attribute (@svgr/babel-plugin-remove-jsx-attribute), https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-remove-jsx-empty-expression (@svgr/babel-plugin-remove-jsx-empty-expression), https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-replace-jsx-attribute-value (@svgr/babel-plugin-replace-jsx-attribute-value), https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-svg-dynamic-title (@svgr/babel-plugin-svg-dynamic-title), https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-svg-em-dimensions (@svgr/babel-plugin-svg-em-dimensions), https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-transform-react-native-svg (@svgr/babel-plugin-transform-react-native-svg), https://github.com/smooth-code/svgr/tree/master/packages/babel-plugin-transform-svg-component (@svgr/babel-plugin-transform-svg-component), https://github.com/smooth-code/svgr/tree/master/packages/babel-preset (@svgr/babel-preset), https://github.com/smooth-code/svgr/tree/master/packages/core (@svgr/core), https://github.com/smooth-code/svgr/tree/master/packages/hast-util-to-babel-ast (@svgr/hast-util-to-babel-ast), https://github.com/smooth-code/svgr/tree/master/packages/plugin-jsx (@svgr/plugin-jsx), https://github.com/smooth-code/svgr/tree/master/packages/plugin-svgo (@svgr/plugin-svgo), git@github.com:smooth-code/svgr.git (@svgr/webpack). This software contains the following license and notice below: - -Copyright 2017 Smooth Code - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: @types/babel__core, @types/babel__generator, @types/babel__template, @types/babel__traverse, @types/history, @types/hoist-non-react-statics, @types/istanbul-lib-coverage, @types/jest, @types/jest-diff, @types/node, @types/prop-types, @types/q, @types/react, @types/react-dom, @types/react-router, @types/react-router-dom, @types/react-slick, @types/stack-utils, @types/unist, @types/vfile, @types/vfile-message, @types/yargs. A copy of the source code may be downloaded from https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/babel__core), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/babel__generator), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/babel__template), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/babel__traverse), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/history), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/hoist-non-react-statics), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/istanbul-lib-coverage), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/jest), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/jest-diff), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/node), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/prop-types), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/q), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/react), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/react-dom), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/react-router), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/react-router-dom), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/react-slick), https://www.github.com/DefinitelyTyped/DefinitelyTyped.git (@types/stack-utils), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/unist), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/vfile), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/vfile-message), https://github.com/DefinitelyTyped/DefinitelyTyped.git (@types/yargs). This software contains the following license and notice below: - -MIT License - - Copyright (c) Microsoft Corporation. All rights reserved. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE - ------ - -The following software may be included in this product: @typescript-eslint/eslint-plugin. A copy of the source code may be downloaded from https://github.com/typescript-eslint/typescript-eslint.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2019 TypeScript ESLint and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: @typescript-eslint/parser. A copy of the source code may be downloaded from https://github.com/typescript-eslint/typescript-eslint.git. This software contains the following license and notice below: - -TypeScript ESLint Parser -Copyright JS Foundation and other contributors, https://js.foundation - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: @typescript-eslint/typescript-estree. A copy of the source code may be downloaded from https://github.com/typescript-eslint/typescript-eslint.git. This software contains the following license and notice below: - -TypeScript ESTree - -Originally extracted from: - -TypeScript ESLint Parser -Copyright JS Foundation and other contributors, https://js.foundation - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: @webassemblyjs/ast, @webassemblyjs/helper-api-error, @webassemblyjs/helper-buffer, @webassemblyjs/helper-code-frame, @webassemblyjs/helper-fsm, @webassemblyjs/helper-module-context, @webassemblyjs/helper-wasm-bytecode, @webassemblyjs/helper-wasm-section, @webassemblyjs/ieee754, @webassemblyjs/utf8, @webassemblyjs/wasm-edit, @webassemblyjs/wasm-gen, @webassemblyjs/wasm-opt, @webassemblyjs/wasm-parser, @webassemblyjs/wast-parser, @webassemblyjs/wast-printer. A copy of the source code may be downloaded from https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/ast), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/helper-buffer), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/helper-code-frame), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/helper-module-context), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/helper-wasm-bytecode), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/helper-wasm-section), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/utf8), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/wasm-edit), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/wasm-gen), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/wasm-opt), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/wasm-parser), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/wast-parser), https://github.com/xtuc/webassemblyjs.git (@webassemblyjs/wast-printer). This software contains the following license and notice below: - -MIT License - -Copyright (c) 2018 Sven Sauleau - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: @webassemblyjs/floating-point-hex-parser. A copy of the source code may be downloaded from https://github.com/xtuc/webassemblyjs.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 Mauro Bringolf - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: @webassemblyjs/leb128. This software contains the following license and notice below: - -Copyright 2012 The Obvious Corporation. -http://obvious.com/ - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - -------------------------------------------------------------------------- - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - ------ - -The following software may be included in this product: @xtuc/ieee754. A copy of the source code may be downloaded from git://github.com/feross/ieee754.git. This software contains the following license and notice below: - -Copyright (c) 2008, Fair Oaks Labs, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of Fair Oaks Labs, Inc. nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: @xtuc/long, spdx-correct, validate-npm-package-license. A copy of the source code may be downloaded from https://github.com/dcodeIO/long.js.git (@xtuc/long), https://github.com/jslicense/spdx-correct.js.git (spdx-correct), https://github.com/kemitchell/validate-npm-package-license.js.git (validate-npm-package-license). This software contains the following license and notice below: - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------ - -The following software may be included in this product: abab. A copy of the source code may be downloaded from git+https://github.com/jsdom/abab.git. This software contains the following license and notice below: - -Both the original source code and new contributions in this repository are released under the [W3C 3-clause BSD license](https://github.com/w3c/web-platform-tests/blob/master/LICENSE.md#w3c-3-clause-bsd-license). - -# W3C 3-clause BSD License - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of works must retain the original copyright notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the original copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. -* Neither the name of the W3C nor the names of its contributors may be used to endorse or promote products derived from this work without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: abbrev. A copy of the source code may be downloaded from http://github.com/isaacs/abbrev-js. This software contains the following license and notice below: - -This software is dual-licensed under the ISC and MIT licenses. -You may use this software under EITHER of the following licenses. - ----------- - -The ISC License - -Copyright (c) Isaac Z. Schlueter and Contributors - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ----------- - -Copyright Isaac Z. Schlueter and Contributors -All rights reserved. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: accepts, mime-types. A copy of the source code may be downloaded from https://github.com/jshttp/accepts.git (accepts), https://github.com/jshttp/mime-types.git (mime-types). This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2014 Jonathan Ong -Copyright (c) 2015 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: acorn, acorn-walk. A copy of the source code may be downloaded from https://github.com/acornjs/acorn.git (acorn), https://github.com/acornjs/acorn.git (acorn-walk). This software contains the following license and notice below: - -Copyright (C) 2012-2018 by various contributors (see AUTHORS) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: acorn-dynamic-import. A copy of the source code may be downloaded from https://github.com/kesne/acorn-dynamic-import. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2016 Jordan Gensler - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: acorn-globals, is-promise. A copy of the source code may be downloaded from https://github.com/ForbesLindesay/acorn-globals.git (acorn-globals), https://github.com/then/is-promise.git (is-promise). This software contains the following license and notice below: - -Copyright (c) 2014 Forbes Lindesay - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: acorn-jsx. A copy of the source code may be downloaded from https://github.com/RReverser/acorn-jsx. This software contains the following license and notice below: - -Copyright (C) 2012-2017 by Ingvar Stepanyan - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: add-dom-event-listener, async-validator, css-animation, dom-align, rc-align, rc-animate, rc-collapse, rc-dialog, rc-dropdown, rc-form, rc-input-number, rc-menu, rc-notification, rc-pagination, rc-progress, rc-rate, rc-steps, rc-switch, rc-tabs, rc-time-picker. A copy of the source code may be downloaded from git@github.com:yiminghe/add-dom-event-listener (add-dom-event-listener), git@github.com:yiminghe/async-validator.git (async-validator), git@github.com:yiminghe/css-animation.git (css-animation), git@github.com:yiminghe/dom-align.git (dom-align), git@github.com:react-component/align.git (rc-align), git@github.com:react-component/animate.git (rc-animate), git@github.com:react-component/collapse.git (rc-collapse), git@github.com:react-component/dialog.git (rc-dialog), git@github.com:react-component/dropdown.git (rc-dropdown), https://github.com/react-component/form.git (rc-form), git@github.com:react-component/input-number.git (rc-input-number), git@github.com:react-component/menu.git (rc-menu), git@github.com:react-component/notification.git (rc-notification), git@github.com:react-component/pagination.git (rc-pagination), git@github.com:react-component/progress.git (rc-progress), https://github.com/react-component/rate.git (rc-rate), git+ssh://git@github.com/react-component/steps.git (rc-steps), git@github.com:react-component/switch.git (rc-switch), git@github.com:react-component/tabs.git (rc-tabs), git@github.com:react-component/time-picker.git (rc-time-picker). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-present yiminghe - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: address. A copy of the source code may be downloaded from git://github.com/node-modules/address.git. This software contains the following license and notice below: - -This software is licensed under the MIT License. - -Copyright (C) 2013 - 2014 fengmk2 -Copyright (C) 2015 - 2016 node-modules - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: ajv. A copy of the source code may be downloaded from https://github.com/epoberezkin/ajv.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015-2017 Evgeny Poberezkin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: ajv-errors, fast-deep-equal, json-schema-traverse. A copy of the source code may be downloaded from git+https://github.com/epoberezkin/ajv-errors.git (ajv-errors), git+https://github.com/epoberezkin/fast-deep-equal.git (fast-deep-equal), git+https://github.com/epoberezkin/json-schema-traverse.git (json-schema-traverse). This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 Evgeny Poberezkin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: ajv-keywords. A copy of the source code may be downloaded from git+https://github.com/epoberezkin/ajv-keywords.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 Evgeny Poberezkin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: alphanum-sort, postcss-minify-font-values, postcss-value-parser. A copy of the source code may be downloaded from https://github.com/TrySound/alphanum-sort.git (alphanum-sort), https://github.com/cssnano/cssnano.git (postcss-minify-font-values), https://github.com/TrySound/postcss-value-parser.git (postcss-value-parser). This software contains the following license and notice below: - -Copyright (c) Bogdan Chadkin - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: ansi-colors. A copy of the source code may be downloaded from https://github.com/doowb/ansi-colors.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015-present, Brian Woodward. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: ansi-escapes, ansi-regex, ansi-styles, binary-extensions, callsites, camelcase, chalk, decamelize, del, execa, find-up, get-stream, globals, globby, gzip-size, has-flag, import-fresh, import-local, internal-ip, invert-kv, is-generator-fn, is-root, is-svg, lcid, locate-path, make-dir, mem, mimic-fn, normalize-url, opn, os-locale, p-is-promise, p-limit, p-locate, p-map, p-try, parent-module, parse-json, path-type, pify, pkg-dir, pretty-bytes, read-pkg, read-pkg-up, resolve-from, slash, string-length, string-width, strip-ansi, supports-color. A copy of the source code may be downloaded from https://github.com/sindresorhus/ansi-escapes.git (ansi-escapes), https://github.com/chalk/ansi-regex.git (ansi-regex), https://github.com/chalk/ansi-styles.git (ansi-styles), https://github.com/sindresorhus/binary-extensions.git (binary-extensions), https://github.com/sindresorhus/callsites.git (callsites), https://github.com/sindresorhus/camelcase.git (camelcase), https://github.com/chalk/chalk.git (chalk), https://github.com/sindresorhus/decamelize.git (decamelize), https://github.com/sindresorhus/del.git (del), https://github.com/sindresorhus/execa.git (execa), https://github.com/sindresorhus/find-up.git (find-up), https://github.com/sindresorhus/get-stream.git (get-stream), https://github.com/sindresorhus/globals.git (globals), https://github.com/sindresorhus/globby.git (globby), https://github.com/sindresorhus/gzip-size.git (gzip-size), https://github.com/sindresorhus/has-flag.git (has-flag), https://github.com/sindresorhus/import-fresh.git (import-fresh), https://github.com/sindresorhus/import-local.git (import-local), https://github.com/sindresorhus/internal-ip.git (internal-ip), https://github.com/sindresorhus/invert-kv.git (invert-kv), https://github.com/sindresorhus/is-generator-fn.git (is-generator-fn), https://github.com/sindresorhus/is-root.git (is-root), https://github.com/sindresorhus/is-svg.git (is-svg), https://github.com/sindresorhus/lcid.git (lcid), https://github.com/sindresorhus/locate-path.git (locate-path), https://github.com/sindresorhus/make-dir.git (make-dir), https://github.com/sindresorhus/mem.git (mem), https://github.com/sindresorhus/mimic-fn.git (mimic-fn), https://github.com/sindresorhus/normalize-url.git (normalize-url), https://github.com/sindresorhus/opn.git (opn), https://github.com/sindresorhus/os-locale.git (os-locale), https://github.com/sindresorhus/p-is-promise.git (p-is-promise), https://github.com/sindresorhus/p-limit.git (p-limit), https://github.com/sindresorhus/p-locate.git (p-locate), https://github.com/sindresorhus/p-map.git (p-map), https://github.com/sindresorhus/p-try.git (p-try), https://github.com/sindresorhus/parent-module.git (parent-module), https://github.com/sindresorhus/parse-json.git (parse-json), https://github.com/sindresorhus/path-type.git (path-type), https://github.com/sindresorhus/pify.git (pify), https://github.com/sindresorhus/pkg-dir.git (pkg-dir), https://github.com/sindresorhus/pretty-bytes.git (pretty-bytes), https://github.com/sindresorhus/read-pkg.git (read-pkg), https://github.com/sindresorhus/read-pkg-up.git (read-pkg-up), https://github.com/sindresorhus/resolve-from.git (resolve-from), https://github.com/sindresorhus/slash.git (slash), https://github.com/sindresorhus/string-length.git (string-length), https://github.com/sindresorhus/string-width.git (string-width), https://github.com/chalk/strip-ansi.git (strip-ansi), https://github.com/chalk/supports-color.git (supports-color). This software contains the following license and notice below: - -MIT License - -Copyright (c) Sindre Sorhus (sindresorhus.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: ansi-html. A copy of the source code may be downloaded from git://github.com/Tjatse/ansi-html.git. This software contains the following license and notice below: - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------ - -The following software may be included in this product: ansi-regex, ansi-styles, array-union, array-uniq, arrify, caller-callsite, caller-path, callsites, camelcase, chalk, cli-cursor, code-point-at, decamelize, detect-newline, dot-prop, escape-string-regexp, figures, find-up, globby, has-ansi, import-cwd, import-fresh, import-from, ip-regex, is-absolute-url, is-binary-path, is-fullwidth-code-point, is-obj, is-path-in-cwd, is-path-inside, is-plain-obj, is-stream, is-wsl, leven, load-json-file, locate-path, npm-run-path, number-is-nan, object-assign, onetime, os-homedir, os-tmpdir, p-defer, p-each-series, p-finally, p-locate, p-reduce, p-try, parse-json, path-exists, path-is-absolute, path-key, path-type, pify, pkg-dir, pkg-up, read-pkg, read-pkg-up, resolve-cwd, resolve-from, restore-cursor, shebang-regex, string-width, strip-ansi, strip-bom, strip-eof, strip-json-comments, supports-color, trim-right, wrap-ansi. A copy of the source code may be downloaded from https://github.com/chalk/ansi-regex.git (ansi-regex), https://github.com/chalk/ansi-styles.git (ansi-styles), https://github.com/sindresorhus/array-union.git (array-union), https://github.com/sindresorhus/array-uniq.git (array-uniq), https://github.com/sindresorhus/arrify.git (arrify), https://github.com/sindresorhus/caller-callsite.git (caller-callsite), https://github.com/sindresorhus/caller-path.git (caller-path), https://github.com/sindresorhus/callsites.git (callsites), https://github.com/sindresorhus/camelcase.git (camelcase), https://github.com/chalk/chalk.git (chalk), https://github.com/sindresorhus/cli-cursor.git (cli-cursor), https://github.com/sindresorhus/code-point-at.git (code-point-at), https://github.com/sindresorhus/decamelize.git (decamelize), https://github.com/sindresorhus/detect-newline.git (detect-newline), https://github.com/sindresorhus/dot-prop.git (dot-prop), https://github.com/sindresorhus/escape-string-regexp.git (escape-string-regexp), https://github.com/sindresorhus/figures.git (figures), https://github.com/sindresorhus/find-up.git (find-up), https://github.com/sindresorhus/globby.git (globby), https://github.com/sindresorhus/has-ansi.git (has-ansi), https://github.com/sindresorhus/import-cwd.git (import-cwd), https://github.com/sindresorhus/import-fresh.git (import-fresh), https://github.com/sindresorhus/import-from.git (import-from), https://github.com/sindresorhus/ip-regex.git (ip-regex), https://github.com/sindresorhus/is-absolute-url.git (is-absolute-url), https://github.com/sindresorhus/is-binary-path.git (is-binary-path), https://github.com/sindresorhus/is-fullwidth-code-point.git (is-fullwidth-code-point), https://github.com/sindresorhus/is-obj.git (is-obj), https://github.com/sindresorhus/is-path-in-cwd.git (is-path-in-cwd), https://github.com/sindresorhus/is-path-inside.git (is-path-inside), https://github.com/sindresorhus/is-plain-obj.git (is-plain-obj), https://github.com/sindresorhus/is-stream.git (is-stream), https://github.com/sindresorhus/is-wsl.git (is-wsl), https://github.com/sindresorhus/leven.git (leven), https://github.com/sindresorhus/load-json-file.git (load-json-file), https://github.com/sindresorhus/locate-path.git (locate-path), https://github.com/sindresorhus/npm-run-path.git (npm-run-path), https://github.com/sindresorhus/number-is-nan.git (number-is-nan), https://github.com/sindresorhus/object-assign.git (object-assign), https://github.com/sindresorhus/onetime.git (onetime), https://github.com/sindresorhus/os-homedir.git (os-homedir), https://github.com/sindresorhus/os-tmpdir.git (os-tmpdir), https://github.com/sindresorhus/p-defer.git (p-defer), https://github.com/sindresorhus/p-each-series.git (p-each-series), https://github.com/sindresorhus/p-finally.git (p-finally), https://github.com/sindresorhus/p-locate.git (p-locate), https://github.com/sindresorhus/p-reduce.git (p-reduce), https://github.com/sindresorhus/p-try.git (p-try), https://github.com/sindresorhus/parse-json.git (parse-json), https://github.com/sindresorhus/path-exists.git (path-exists), https://github.com/sindresorhus/path-is-absolute.git (path-is-absolute), https://github.com/sindresorhus/path-key.git (path-key), https://github.com/sindresorhus/path-type.git (path-type), https://github.com/sindresorhus/pify.git (pify), https://github.com/sindresorhus/pkg-dir.git (pkg-dir), https://github.com/sindresorhus/pkg-up.git (pkg-up), https://github.com/sindresorhus/read-pkg.git (read-pkg), https://github.com/sindresorhus/read-pkg-up.git (read-pkg-up), https://github.com/sindresorhus/resolve-cwd.git (resolve-cwd), https://github.com/sindresorhus/resolve-from.git (resolve-from), https://github.com/sindresorhus/restore-cursor.git (restore-cursor), https://github.com/sindresorhus/shebang-regex.git (shebang-regex), https://github.com/sindresorhus/string-width.git (string-width), https://github.com/chalk/strip-ansi.git (strip-ansi), https://github.com/sindresorhus/strip-bom.git (strip-bom), https://github.com/sindresorhus/strip-eof.git (strip-eof), https://github.com/sindresorhus/strip-json-comments.git (strip-json-comments), https://github.com/chalk/supports-color.git (supports-color), https://github.com/sindresorhus/trim-right.git (trim-right), https://github.com/chalk/wrap-ansi.git (wrap-ansi). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Sindre Sorhus (sindresorhus.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: antd. A copy of the source code may be downloaded from https://github.com/ant-design/ant-design. This software contains the following license and notice below: - -MIT LICENSE - -Copyright (c) 2015-present Ant UED, https://xtech.antfin.com/ - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: anymatch. A copy of the source code may be downloaded from https://github.com/micromatch/anymatch. This software contains the following license and notice below: - -The ISC License - -Copyright (c) 2014 Elan Shanker - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: append-transform, find-cache-dir, node-modules-regexp, normalize-range. A copy of the source code may be downloaded from https://github.com/istanbuljs/append-transform.git (append-transform), https://github.com/jamestalmage/find-cache-dir.git (find-cache-dir), https://github.com/jamestalmage/node-modules-regexp.git (node-modules-regexp), https://github.com/jamestalmage/normalize-range.git (normalize-range). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) James Talmage (github.com/jamestalmage) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: aproba, wide-align. A copy of the source code may be downloaded from https://github.com/iarna/aproba (aproba), https://github.com/iarna/wide-align (wide-align). This software contains the following license and notice below: - -Copyright (c) 2015, Rebecca Turner - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: are-we-there-yet. A copy of the source code may be downloaded from https://github.com/iarna/are-we-there-yet.git. This software contains the following license and notice below: - -Copyright (c) 2015, Rebecca Turner - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: argparse. A copy of the source code may be downloaded from https://github.com/nodeca/argparse.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (C) 2012 by Vitaly Puzrin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: aria-query, axobject-query. A copy of the source code may be downloaded from git+https://github.com/A11yance/aria-query.git (aria-query), git+https://github.com/A11yance/axobject-query.git (axobject-query). This software contains the following license and notice below: - -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, -and distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by -the copyright owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all -other entities that control, are controlled by, or are under common -control with that entity. For the purposes of this definition, -"control" means (i) the power, direct or indirect, to cause the -direction or management of such entity, whether by contract or -otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity -exercising permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, -including but not limited to software source code, documentation -source, and configuration files. - -"Object" form shall mean any form resulting from mechanical -transformation or translation of a Source form, including but -not limited to compiled object code, generated documentation, -and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or -Object form, made available under the License, as indicated by a -copyright notice that is included in or attached to the work -(an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object -form, that is based on (or derived from) the Work and for which the -editorial revisions, annotations, elaborations, or other modifications -represent, as a whole, an original work of authorship. For the purposes -of this License, Derivative Works shall not include works that remain -separable from, or merely link (or bind by name) to the interfaces of, -the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including -the original version of the Work and any modifications or additions -to that Work or Derivative Works thereof, that is intentionally -submitted to Licensor for inclusion in the Work by the copyright owner -or by an individual or Legal Entity authorized to submit on behalf of -the copyright owner. For the purposes of this definition, "submitted" -means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, -and issue tracking systems that are managed by, or on behalf of, the -Licensor for the purpose of discussing and improving the Work, but -excluding communication that is conspicuously marked or otherwise -designated in writing by the copyright owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity -on behalf of whom a Contribution has been received by Licensor and -subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of -this License, each Contributor hereby grants to You a perpetual, -worldwide, non-exclusive, no-charge, royalty-free, irrevocable -copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the -Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of -this License, each Contributor hereby grants to You a perpetual, -worldwide, non-exclusive, no-charge, royalty-free, irrevocable -(except as stated in this section) patent license to make, have made, -use, offer to sell, sell, import, and otherwise transfer the Work, -where such license applies only to those patent claims licensable -by such Contributor that are necessarily infringed by their -Contribution(s) alone or by combination of their Contribution(s) -with the Work to which such Contribution(s) was submitted. If You -institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work -or a Contribution incorporated within the Work constitutes direct -or contributory patent infringement, then any patent licenses -granted to You under this License for that Work shall terminate -as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the -Work or Derivative Works thereof in any medium, with or without -modifications, and in Source or Object form, provided that You -meet the following conditions: - -(a) You must give any other recipients of the Work or -Derivative Works a copy of this License; and - -(b) You must cause any modified files to carry prominent notices -stating that You changed the files; and - -(c) You must retain, in the Source form of any Derivative Works -that You distribute, all copyright, patent, trademark, and -attribution notices from the Source form of the Work, -excluding those notices that do not pertain to any part of -the Derivative Works; and - -(d) If the Work includes a "NOTICE" text file as part of its -distribution, then any Derivative Works that You distribute must -include a readable copy of the attribution notices contained -within such NOTICE file, excluding those notices that do not -pertain to any part of the Derivative Works, in at least one -of the following places: within a NOTICE text file distributed -as part of the Derivative Works; within the Source form or -documentation, if provided along with the Derivative Works; or, -within a display generated by the Derivative Works, if and -wherever such third-party notices normally appear. The contents -of the NOTICE file are for informational purposes only and -do not modify the License. You may add Your own attribution -notices within Derivative Works that You distribute, alongside -or as an addendum to the NOTICE text from the Work, provided -that such additional attribution notices cannot be construed -as modifying the License. - -You may add Your own copyright statement to Your modifications and -may provide additional or different license terms and conditions -for use, reproduction, or distribution of Your modifications, or -for any such Derivative Works as a whole, provided Your use, -reproduction, and distribution of the Work otherwise complies with -the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, -any Contribution intentionally submitted for inclusion in the Work -by You to the Licensor shall be under the terms and conditions of -this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify -the terms of any separate license agreement you may have executed -with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade -names, trademarks, service marks, or product names of the Licensor, -except as required for reasonable and customary use in describing the -origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or -agreed to in writing, Licensor provides the Work (and each -Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -implied, including, without limitation, any warranties or conditions -of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A -PARTICULAR PURPOSE. You are solely responsible for determining the -appropriateness of using or redistributing the Work and assume any -risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, -whether in tort (including negligence), contract, or otherwise, -unless required by applicable law (such as deliberate and grossly -negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, -incidental, or consequential damages of any character arising as a -result of this License or out of the use or inability to use the -Work (including but not limited to damages for loss of goodwill, -work stoppage, computer failure or malfunction, or any and all -other commercial damages or losses), even if such Contributor -has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing -the Work or Derivative Works thereof, You may choose to offer, -and charge a fee for, acceptance of support, warranty, indemnity, -or other liability obligations and/or rights consistent with this -License. However, in accepting such obligations, You may act only -on Your own behalf and on Your sole responsibility, not on behalf -of any other Contributor, and only if You agree to indemnify, -defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason -of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - -To apply the Apache License to your work, attach the following -boilerplate notice, with the fields enclosed by brackets "{}" -replaced with your own identifying information. (Don't include -the brackets!) The text should be enclosed in the appropriate -comment syntax for the file format. We also recommend that a -file or class name and description of purpose be included on the -same "printed page" as the copyright notice for easier -identification within third-party archives. - -Copyright {yyyy} {name of copyright owner} - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - ------ - -The following software may be included in this product: arr-diff, fill-range, for-in, has-value, has-values, kind-of, normalize-path, set-value. A copy of the source code may be downloaded from https://github.com/jonschlinkert/arr-diff.git (arr-diff), https://github.com/jonschlinkert/fill-range.git (fill-range), https://github.com/jonschlinkert/for-in.git (for-in), https://github.com/jonschlinkert/has-value.git (has-value), https://github.com/jonschlinkert/has-values.git (has-values), https://github.com/jonschlinkert/kind-of.git (kind-of), https://github.com/jonschlinkert/normalize-path.git (normalize-path), https://github.com/jonschlinkert/set-value.git (set-value). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2017, Jon Schlinkert - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: arr-flatten, clone-deep, is-glob, is-plain-object, kind-of. A copy of the source code may be downloaded from https://github.com/jonschlinkert/arr-flatten.git (arr-flatten), https://github.com/jonschlinkert/clone-deep.git (clone-deep), https://github.com/micromatch/is-glob.git (is-glob), https://github.com/jonschlinkert/is-plain-object.git (is-plain-object), https://github.com/jonschlinkert/kind-of.git (kind-of). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2017, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: arr-union, get-value, has-value, has-values, is-directory, is-glob, isobject, object.pick, repeat-string. A copy of the source code may be downloaded from https://github.com/jonschlinkert/arr-union.git (arr-union), https://github.com/jonschlinkert/get-value.git (get-value), https://github.com/jonschlinkert/has-value.git (has-value), https://github.com/jonschlinkert/has-values.git (has-values), https://github.com/jonschlinkert/is-directory.git (is-directory), https://github.com/jonschlinkert/is-glob.git (is-glob), https://github.com/jonschlinkert/isobject.git (isobject), https://github.com/jonschlinkert/object.pick.git (object.pick), https://github.com/jonschlinkert/repeat-string.git (repeat-string). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2016, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: array-equal, destroy, ee-first, mime-db. A copy of the source code may be downloaded from https://github.com/component/array-equal.git (array-equal), https://github.com/stream-utils/destroy.git (destroy), https://github.com/jonathanong/ee-first.git (ee-first), https://github.com/jshttp/mime-db.git (mime-db). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Jonathan Ong me@jongleberry.com - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: array-flatten, camel-case, lower-case, no-case, param-case, path-to-regexp, upper-case. A copy of the source code may be downloaded from git://github.com/blakeembrey/array-flatten.git (array-flatten), git://github.com/blakeembrey/camel-case.git (camel-case), git://github.com/blakeembrey/lower-case.git (lower-case), git://github.com/blakeembrey/no-case.git (no-case), git://github.com/blakeembrey/param-case.git (param-case), https://github.com/component/path-to-regexp.git (path-to-regexp), git://github.com/blakeembrey/upper-case.git (upper-case). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Blake Embrey (hello@blakeembrey.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: array-includes, define-properties, es-abstract. A copy of the source code may be downloaded from git://github.com/ljharb/array-includes.git (array-includes), git://github.com/ljharb/define-properties.git (define-properties), git://github.com/ljharb/es-abstract.git (es-abstract). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (C) 2015 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: array-map, array-reduce, concat-map, deep-equal, fast-json-stable-stringify, is-typedarray, json-stable-stringify, json-stable-stringify-without-jsonify, minimist, path-browserify, resolve, safe-regex, text-table, tty-browserify, vm-browserify, wordwrap. A copy of the source code may be downloaded from git://github.com/substack/array-map.git (array-map), git://github.com/substack/array-reduce.git (array-reduce), git://github.com/substack/node-concat-map.git (concat-map), http://github.com/substack/node-deep-equal.git (deep-equal), git://github.com/epoberezkin/fast-json-stable-stringify.git (fast-json-stable-stringify), git://github.com/hughsk/is-typedarray.git (is-typedarray), git://github.com/substack/json-stable-stringify.git (json-stable-stringify), git://github.com/samn/json-stable-stringify.git (json-stable-stringify-without-jsonify), git://github.com/substack/minimist.git (minimist), git://github.com/substack/path-browserify.git (path-browserify), git://github.com/substack/node-resolve.git (resolve), git://github.com/substack/safe-regex.git (safe-regex), git://github.com/substack/text-table.git (text-table), git://github.com/substack/tty-browserify.git (tty-browserify), http://github.com/substack/vm-browserify.git (vm-browserify), git://github.com/substack/node-wordwrap.git (wordwrap). This software contains the following license and notice below: - -This software is released under the MIT license: - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: array-unique, is-extglob, is-number. A copy of the source code may be downloaded from https://github.com/jonschlinkert/array-unique.git (array-unique), https://github.com/jonschlinkert/is-extglob.git (is-extglob), https://github.com/jonschlinkert/is-number.git (is-number). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2016, Jon Schlinkert - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: asap. A copy of the source code may be downloaded from https://github.com/kriskowal/asap.git. This software contains the following license and notice below: - -Copyright 2009–2014 Contributors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. - ------ - -The following software may be included in this product: asn1. A copy of the source code may be downloaded from git://github.com/joyent/node-asn1.git. This software contains the following license and notice below: - -Copyright (c) 2011 Mark Cavage, All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE - ------ - -The following software may be included in this product: assert, util. A copy of the source code may be downloaded from git://github.com/defunctzombie/commonjs-assert.git (assert), git://github.com/defunctzombie/node-util (util). This software contains the following license and notice below: - -Copyright Joyent, Inc. and other Node contributors. All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. - ------ - -The following software may be included in this product: assign-symbols, contains-path, define-property, is-accessor-descriptor, is-data-descriptor, is-extendable, lazy-cache, pascalcase, shallow-clone. A copy of the source code may be downloaded from https://github.com/jonschlinkert/assign-symbols.git (assign-symbols), https://github.com/jonschlinkert/contains-path.git (contains-path), https://github.com/jonschlinkert/define-property.git (define-property), https://github.com/jonschlinkert/is-accessor-descriptor.git (is-accessor-descriptor), https://github.com/jonschlinkert/is-data-descriptor.git (is-data-descriptor), https://github.com/jonschlinkert/is-extendable.git (is-extendable), https://github.com/jonschlinkert/lazy-cache.git (lazy-cache), https://github.com/jonschlinkert/pascalcase.git (pascalcase), https://github.com/jonschlinkert/shallow-clone.git (shallow-clone). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: astral-regex, dir-glob. A copy of the source code may be downloaded from https://github.com/kevva/astral-regex.git (astral-regex), https://github.com/kevva/dir-glob.git (dir-glob). This software contains the following license and notice below: - -MIT License - -Copyright (c) Kevin Mårtensson (github.com/kevva) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: async. A copy of the source code may be downloaded from https://github.com/caolan/async.git. This software contains the following license and notice below: - -Copyright (c) 2010-2014 Caolan McMahon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: async. A copy of the source code may be downloaded from https://github.com/caolan/async.git. This software contains the following license and notice below: - -Copyright (c) 2010-2018 Caolan McMahon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: async-limiter. A copy of the source code may be downloaded from https://github.com/strml/async-limiter.git. This software contains the following license and notice below: - -The MIT License (MIT) -Copyright (c) 2017 Samuel Reed - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: asynckit. A copy of the source code may be downloaded from git+https://github.com/alexindigo/asynckit.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 Alex Indigo - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: atob. A copy of the source code may be downloaded from git://git.coolaj86.com/coolaj86/atob.js.git. This software contains the following license and notice below: - -At your option you may choose either of the following licenses: - - * The MIT License (MIT) - * The Apache License 2.0 (Apache-2.0) - - -The MIT License (MIT) - -Copyright (c) 2015 AJ ONeal - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2015 AJ ONeal - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------ - -The following software may be included in this product: autoprefixer, postcss, postcss-safe-parser. A copy of the source code may be downloaded from https://github.com/postcss/autoprefixer.git (autoprefixer), https://github.com/postcss/postcss.git (postcss), https://github.com/postcss/postcss-safe-parser.git (postcss-safe-parser). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright 2013 Andrey Sitnik - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: aws4. A copy of the source code may be downloaded from https://github.com/mhart/aws4.git. This software contains the following license and notice below: - -Copyright 2013 Michael Hart (michael.hart.au@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: babel-eslint. A copy of the source code may be downloaded from https://github.com/babel/babel-eslint.git. This software contains the following license and notice below: - -Copyright (c) 2014-2016 Sebastian McKenzie - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: babel-extract-comments. A copy of the source code may be downloaded from https://github.com/jonschlinkert/babel-extract-comments.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015, 2018, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: babel-loader. A copy of the source code may be downloaded from https://github.com/babel/babel-loader.git. This software contains the following license and notice below: - -Copyright (c) 2014-2016 Luís Couto - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: babel-plugin-dynamic-import-node. A copy of the source code may be downloaded from git+https://github.com/airbnb/babel-plugin-dynamic-import-node.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2016 Airbnb - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: babel-plugin-istanbul. A copy of the source code may be downloaded from git+https://github.com/istanbuljs/babel-plugin-istanbul.git. This software contains the following license and notice below: - -Copyright (c) 2016, Istanbul Code Coverage -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of babel-plugin-istanbul nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: babel-plugin-macros. A copy of the source code may be downloaded from https://github.com/kentcdodds/babel-plugin-macros.git. This software contains the following license and notice below: - -The MIT License (MIT) -Copyright (c) 2017 Kent C. Dodds - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: babel-plugin-named-asset-import, babel-preset-react-app, confusing-browser-globals, create-react-class, eslint-config-react-app, invariant, prop-types, react-app-polyfill, react-dev-utils, react-error-overlay, react-scripts, warning. A copy of the source code may be downloaded from https://github.com/facebook/create-react-app.git (babel-plugin-named-asset-import), https://github.com/facebook/create-react-app.git (babel-preset-react-app), https://github.com/facebook/create-react-app.git (confusing-browser-globals), https://github.com/facebook/react.git (create-react-class), https://github.com/facebook/create-react-app.git (eslint-config-react-app), https://github.com/zertosh/invariant (invariant), https://github.com/facebook/prop-types.git (prop-types), https://github.com/facebook/create-react-app.git (react-app-polyfill), https://github.com/facebook/create-react-app.git (react-dev-utils), https://github.com/facebook/create-react-app.git (react-error-overlay), https://github.com/facebook/create-react-app.git (react-scripts), https://github.com/BerkeleyTrue/warning.git (warning). This software contains the following license and notice below: - -MIT License - -Copyright (c) 2013-present, Facebook, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: babel-plugin-transform-react-remove-prop-types. A copy of the source code may be downloaded from https://github.com/oliviertassinari/babel-plugin-transform-react-remove-prop-types.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Nikita Gusakov - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: bail, ccount. A copy of the source code may be downloaded from https://github.com/wooorm/bail.git (bail), https://github.com/wooorm/ccount.git (ccount). This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2015 Titus Wormer - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: balanced-match. A copy of the source code may be downloaded from git://github.com/juliangruber/balanced-match.git. This software contains the following license and notice below: - -(MIT) - -Copyright (c) 2013 Julian Gruber <julian@juliangruber.com> - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: base, extglob, is-accessor-descriptor, is-data-descriptor, split-string. A copy of the source code may be downloaded from https://github.com/node-base/base.git (base), https://github.com/micromatch/extglob.git (extglob), https://github.com/jonschlinkert/is-accessor-descriptor.git (is-accessor-descriptor), https://github.com/jonschlinkert/is-data-descriptor.git (is-data-descriptor), https://github.com/jonschlinkert/split-string.git (split-string). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015-2017, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: base64-js. A copy of the source code may be downloaded from git://github.com/beatgammit/base64-js.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: batch. A copy of the source code may be downloaded from https://github.com/visionmedia/batch.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2013 TJ Holowaychuk - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: bcrypt-pbkdf. A copy of the source code may be downloaded from git://github.com/joyent/node-bcrypt-pbkdf.git. This software contains the following license and notice below: - -The Blowfish portions are under the following license: - -Blowfish block cipher for OpenBSD -Copyright 1997 Niels Provos -All rights reserved. - -Implementation advice by David Mazieres . - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. -3. The name of the author may not be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. -IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - -The bcrypt_pbkdf portions are under the following license: - -Copyright (c) 2013 Ted Unangst - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - - - -Performance improvements (Javascript-specific): - -Copyright 2016, Joyent Inc -Author: Alex Wilson - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: bluebird. A copy of the source code may be downloaded from git://github.com/petkaantonov/bluebird.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2013-2018 Petka Antonov - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: body-parser, compression, type-is. A copy of the source code may be downloaded from https://github.com/expressjs/body-parser.git (body-parser), https://github.com/expressjs/compression.git (compression), https://github.com/jshttp/type-is.git (type-is). This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2014 Jonathan Ong -Copyright (c) 2014-2015 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: bonjour. A copy of the source code may be downloaded from https://github.com/watson/bonjour.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015-2016 Thomas Watson Steen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: brace-expansion. A copy of the source code may be downloaded from git://github.com/juliangruber/brace-expansion.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2013 Julian Gruber - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: braces, micromatch, normalize-path. A copy of the source code may be downloaded from https://github.com/micromatch/braces.git (braces), https://github.com/micromatch/micromatch.git (micromatch), https://github.com/jonschlinkert/normalize-path.git (normalize-path). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2018, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: browser-process-hrtime. A copy of the source code may be downloaded from git://github.com/kumavis/browser-process-hrtime.git. This software contains the following license and notice below: - -Copyright 2014 kumavis - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: browser-resolve. A copy of the source code may be downloaded from git://github.com/shtylman/node-browser-resolve.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2013-2015 Roman Shtylman - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: browserify-aes. A copy of the source code may be downloaded from git://github.com/crypto-browserify/browserify-aes.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2017 browserify-aes contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: browserify-cipher. A copy of the source code may be downloaded from git@github.com:crypto-browserify/browserify-cipher.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2017 Calvin Metcalf & contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: browserify-des. A copy of the source code may be downloaded from git+https://github.com/crypto-browserify/browserify-des.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2017 Calvin Metcalf, Fedor Indutny & contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: browserify-rsa. A copy of the source code may be downloaded from git@github.com:crypto-browserify/browserify-rsa.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2015 Calvin Metcalf & contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: browserify-sign. A copy of the source code may be downloaded from https://github.com/crypto-browserify/browserify-sign.git. This software contains the following license and notice below: - -Copyright (c) 2014-2015 Calvin Metcalf and browserify-sign contributors - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: browserify-zlib. A copy of the source code may be downloaded from git+https://github.com/devongovett/browserify-zlib.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2015 Devon Govett - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -This project contains parts of Node.js. -Node.js is licensed for use as follows: - -""" -Copyright Node.js contributors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. -""" - -This license applies to parts of Node.js originating from the -https://github.com/joyent/node repository: - -""" -Copyright Joyent, Inc. and other Node contributors. All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. -""" - ------ - -The following software may be included in this product: browserslist. A copy of the source code may be downloaded from https://github.com/browserslist/browserslist.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright 2014 Andrey Sitnik - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: buffer. A copy of the source code may be downloaded from git://github.com/feross/buffer.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Feross Aboukhadijeh, and other contributors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: buffer-from. A copy of the source code may be downloaded from https://github.com/LinusU/buffer-from.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2016, 2018 Linus Unnebäck - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: buffer-indexof. A copy of the source code may be downloaded from git://github.com/soldair/node-buffer-indexof.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2013 Ryan Day - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: buffer-xor. A copy of the source code may be downloaded from https://github.com/crypto-browserify/buffer-xor.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Daniel Cousens - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: builtin-status-codes. A copy of the source code may be downloaded from https://github.com/bendrucker/builtin-status-codes.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Ben Drucker (bendrucker.me) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: bytes. A copy of the source code may be downloaded from https://github.com/visionmedia/bytes.js.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2012-2014 TJ Holowaychuk -Copyright (c) 2015 Jed Watson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: cacache, figgy-pudding, ssri. A copy of the source code may be downloaded from https://github.com/zkat/cacache (cacache), https://github.com/zkat/figgy-pudding (figgy-pudding), https://github.com/zkat/ssri (ssri). This software contains the following license and notice below: - -ISC License - -Copyright (c) npm, Inc. - -Permission to use, copy, modify, and/or distribute this software for -any purpose with or without fee is hereby granted, provided that the -above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE COPYRIGHT HOLDER DISCLAIMS -ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE -COPYRIGHT HOLDER BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR -CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE -USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: cache-base, isobject, write. A copy of the source code may be downloaded from https://github.com/jonschlinkert/cache-base.git (cache-base), https://github.com/jonschlinkert/isobject.git (isobject), https://github.com/jonschlinkert/write.git (write). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2017, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: call-me-maybe. A copy of the source code may be downloaded from git+https://github.com/limulus/call-me-maybe.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Eric McCarthy - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: caniuse-api. A copy of the source code may be downloaded from https://github.com/nyalab/caniuse-api.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Sébastien Balayn - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: caniuse-lite. A copy of the source code may be downloaded from https://github.com/ben-eb/caniuse-lite.git. This software contains the following license and notice below: - -Attribution 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution 4.0 International Public License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution 4.0 International Public License ("Public License"). To the -extent this Public License may be interpreted as a contract, You are -granted the Licensed Rights in consideration of Your acceptance of -these terms and conditions, and the Licensor grants You such rights in -consideration of benefits the Licensor receives from making the -Licensed Material available under these terms and conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - d. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - e. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - f. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - g. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - h. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - i. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - j. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - k. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - 4. If You Share Adapted Material You produce, the Adapter's - License You apply must not prevent recipients of the Adapted - Material from complying with this Public License. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material; and - - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public -licenses. Notwithstanding, Creative Commons may elect to apply one of -its public licenses to material it publishes and in those instances -will be considered the “Licensor.” The text of the Creative Commons -public licenses is dedicated to the public domain under the CC0 Public -Domain Dedication. Except for the limited purpose of indicating that -material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the -public licenses. - -Creative Commons may be contacted at creativecommons.org. - ------ - -The following software may be included in this product: case-sensitive-paths-webpack-plugin. A copy of the source code may be downloaded from git+https://github.com/Urthen/case-sensitive-paths-webpack-plugin.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2018 Michael Pratt - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: caseless. A copy of the source code may be downloaded from https://github.com/mikeal/caseless. This software contains the following license and notice below: - -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -1. Definitions. -"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. -"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. -"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. -"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. -"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. -"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. -"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). -"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. -"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. -2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. -3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. -4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: -You must give any other recipients of the Work or Derivative Works a copy of this License; and -You must cause any modified files to carry prominent notices stating that You changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. -5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. -6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. -7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. -8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. -9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. -END OF TERMS AND CONDITIONS - ------ - -The following software may be included in this product: chardet. A copy of the source code may be downloaded from git@github.com:runk/node-chardet.git. This software contains the following license and notice below: - -Copyright (C) 2018 Dmitry Shirokov - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: chownr, fs-minipass, fs-write-stream-atomic, glob, ignore-walk, ini, isexe, json-stringify-safe, lru-cache, minimatch, mute-stream, nopt, npm-packlist, npmlog, once, osenv, rimraf, semver, tar, which, wrappy, yallist. A copy of the source code may be downloaded from git://github.com/isaacs/chownr.git (chownr), git+https://github.com/npm/fs-minipass.git (fs-minipass), https://github.com/npm/fs-write-stream-atomic (fs-write-stream-atomic), git://github.com/isaacs/node-glob.git (glob), git+https://github.com/isaacs/ignore-walk.git (ignore-walk), git://github.com/isaacs/ini.git (ini), git+https://github.com/isaacs/isexe.git (isexe), git://github.com/isaacs/json-stringify-safe (json-stringify-safe), git://github.com/isaacs/node-lru-cache.git (lru-cache), git://github.com/isaacs/minimatch.git (minimatch), git://github.com/isaacs/mute-stream (mute-stream), https://github.com/npm/nopt.git (nopt), git+https://github.com/npm/npm-packlist.git (npm-packlist), https://github.com/npm/npmlog.git (npmlog), git://github.com/isaacs/once (once), https://github.com/npm/osenv (osenv), git://github.com/isaacs/rimraf.git (rimraf), https://github.com/npm/node-semver (semver), https://github.com/npm/node-tar.git (tar), git://github.com/isaacs/node-which.git (which), https://github.com/npm/wrappy (wrappy), git+https://github.com/isaacs/yallist.git (yallist). This software contains the following license and notice below: - -The ISC License - -Copyright (c) Isaac Z. Schlueter and Contributors - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: chrome-trace-event. A copy of the source code may be downloaded from github.com:samccone/chrome-trace-event. This software contains the following license and notice below: - -# This is the MIT license - -Copyright (c) 2015 Joyent Inc. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: ci-info, is-ci. A copy of the source code may be downloaded from https://github.com/watson/ci-info.git (ci-info), https://github.com/watson/is-ci.git (is-ci). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016-2018 Thomas Watson Steen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: cipher-base, create-hash, create-hmac, evp_bytestokey. A copy of the source code may be downloaded from git+https://github.com/crypto-browserify/cipher-base.git (cipher-base), git@github.com:crypto-browserify/createHash.git (create-hash), https://github.com/crypto-browserify/createHmac.git (create-hmac), https://github.com/crypto-browserify/EVP_BytesToKey.git (evp_bytestokey). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2017 crypto-browserify contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: class-utils. A copy of the source code may be downloaded from https://github.com/jonschlinkert/class-utils.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015, 2017-2018, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: classnames. A copy of the source code may be downloaded from https://github.com/JedWatson/classnames.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2017 Jed Watson - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: clean-css. A copy of the source code may be downloaded from https://github.com/jakubpawlowicz/clean-css.git. This software contains the following license and notice below: - -Copyright (C) 2017 JakubPawlowicz.com - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is furnished -to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: cli-width. A copy of the source code may be downloaded from git@github.com:knownasilya/cli-width.git. This software contains the following license and notice below: - -Copyright (c) 2015, Ilya Radchenko - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: cliui. A copy of the source code may be downloaded from http://github.com/yargs/cliui.git. This software contains the following license and notice below: - -Copyright (c) 2015, Contributors - -Permission to use, copy, modify, and/or distribute this software -for any purpose with or without fee is hereby granted, provided -that the above copyright notice and this permission notice -appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE -LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES -OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, -ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: clone. A copy of the source code may be downloaded from git://github.com/pvorb/node-clone.git. This software contains the following license and notice below: - -Copyright © 2011-2015 Paul Vorbach - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the “Software”), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: clone-deep, extend-shallow, mixin-object. A copy of the source code may be downloaded from https://github.com/jonschlinkert/clone-deep.git (clone-deep), https://github.com/jonschlinkert/extend-shallow.git (extend-shallow), https://github.com/jonschlinkert/mixin-object.git (mixin-object). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2015, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: co. A copy of the source code may be downloaded from https://github.com/tj/co.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2014 TJ Holowaychuk <tj@vision-media.ca> - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: coa. A copy of the source code may be downloaded from git://github.com/veged/coa.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015-present Sergey Berezhnoy - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: collection-visit, define-property, object-visit, unset-value. A copy of the source code may be downloaded from https://github.com/jonschlinkert/collection-visit.git (collection-visit), https://github.com/jonschlinkert/define-property.git (define-property), https://github.com/jonschlinkert/object-visit.git (object-visit), https://github.com/jonschlinkert/unset-value.git (unset-value). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015, 2017, Jon Schlinkert - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: color. A copy of the source code may be downloaded from https://github.com/Qix-/color.git. This software contains the following license and notice below: - -Copyright (c) 2012 Heather Arthur - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: color-convert. A copy of the source code may be downloaded from https://github.com/Qix-/color-convert.git. This software contains the following license and notice below: - -Copyright (c) 2011-2016 Heather Arthur - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: color-name. A copy of the source code may be downloaded from git@github.com:colorjs/color-name.git. This software contains the following license and notice below: - -The MIT License (MIT) -Copyright (c) 2015 Dmitry Ivanov - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: color-string. A copy of the source code may be downloaded from https://github.com/Qix-/color-string.git. This software contains the following license and notice below: - -Copyright (c) 2011 Heather Arthur - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: combined-stream, delayed-stream. A copy of the source code may be downloaded from git://github.com/felixge/node-combined-stream.git (combined-stream), git://github.com/felixge/node-delayed-stream.git (delayed-stream). This software contains the following license and notice below: - -Copyright (c) 2011 Debuggable Limited - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: comma-separated-tokens, hast-util-from-parse5, hast-util-parse-selector, hastscript, space-separated-tokens, unist-util-stringify-position, vendors, web-namespaces. A copy of the source code may be downloaded from https://github.com/wooorm/comma-separated-tokens.git (comma-separated-tokens), https://github.com/syntax-tree/hast-util-from-parse5.git (hast-util-from-parse5), https://github.com/syntax-tree/hast-util-parse-selector.git (hast-util-parse-selector), https://github.com/syntax-tree/hastscript.git (hastscript), https://github.com/wooorm/space-separated-tokens.git (space-separated-tokens), https://github.com/syntax-tree/unist-util-stringify-position.git (unist-util-stringify-position), https://github.com/wooorm/vendors.git (vendors), https://github.com/wooorm/web-namespaces.git (web-namespaces). This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2016 Titus Wormer - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: commander. A copy of the source code may be downloaded from https://github.com/tj/commander.js.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2011 TJ Holowaychuk - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: common-tags. A copy of the source code may be downloaded from https://github.com/declandewet/common-tags. This software contains the following license and notice below: - -License (MIT) -------------- - -Copyright © Declan de Wet - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: commondir, shell-quote. A copy of the source code may be downloaded from http://github.com/substack/node-commondir.git (commondir), http://github.com/substack/node-shell-quote.git (shell-quote). This software contains the following license and notice below: - -The MIT License - -Copyright (c) 2013 James Halliday (mail@substack.net) - -Permission is hereby granted, free of charge, -to any person obtaining a copy of this software and -associated documentation files (the "Software"), to -deal in the Software without restriction, including -without limitation the rights to use, copy, modify, -merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom -the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: compare-versions. A copy of the source code may be downloaded from git+https://github.com/omichelsen/compare-versions.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015-2017 Ole Michelsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: component-emitter. A copy of the source code may be downloaded from https://github.com/component/emitter.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2014 Component contributors - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: compressible. A copy of the source code may be downloaded from https://github.com/jshttp/compressible.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2013 Jonathan Ong -Copyright (c) 2014 Jeremiah Senkpiel -Copyright (c) 2015 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: concat-stream. A copy of the source code may be downloaded from http://github.com/maxogden/concat-stream.git. This software contains the following license and notice below: - -The MIT License - -Copyright (c) 2013 Max Ogden - -Permission is hereby granted, free of charge, -to any person obtaining a copy of this software and -associated documentation files (the "Software"), to -deal in the Software without restriction, including -without limitation the rights to use, copy, modify, -merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom -the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: connect-history-api-fallback. A copy of the source code may be downloaded from http://github.com/bripkens/connect-history-api-fallback.git. This software contains the following license and notice below: - -The MIT License - -Copyright (c) 2012 Ben Ripkens http://bripkens.de - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: console-control-strings, gauge. A copy of the source code may be downloaded from https://github.com/iarna/console-control-strings (console-control-strings), https://github.com/iarna/gauge (gauge). This software contains the following license and notice below: - -Copyright (c) 2014, Rebecca Turner - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: content-disposition, media-typer, on-headers. A copy of the source code may be downloaded from https://github.com/jshttp/content-disposition.git (content-disposition), https://github.com/jshttp/media-typer.git (media-typer), https://github.com/jshttp/on-headers.git (on-headers). This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2014 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: content-type. A copy of the source code may be downloaded from https://github.com/jshttp/content-type.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2015 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: convert-source-map. A copy of the source code may be downloaded from git://github.com/thlorenz/convert-source-map.git. This software contains the following license and notice below: - -Copyright 2013 Thorsten Lorenz. -All rights reserved. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: cookie. A copy of the source code may be downloaded from https://github.com/jshttp/cookie.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2012-2014 Roman Shtylman -Copyright (c) 2015 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: copy-concurrently, move-concurrently, promise-inflight. A copy of the source code may be downloaded from git+https://github.com/npm/copy-concurrently.git (copy-concurrently), git+https://github.com/npm/move-concurrently.git (move-concurrently), git+https://github.com/iarna/promise-inflight.git (promise-inflight). This software contains the following license and notice below: - -Copyright (c) 2017, Rebecca Turner - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: copy-descriptor, expand-brackets. A copy of the source code may be downloaded from https://github.com/jonschlinkert/copy-descriptor.git (copy-descriptor), https://github.com/jonschlinkert/expand-brackets.git (expand-brackets). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015-2016, Jon Schlinkert - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: copy-to-clipboard. A copy of the source code may be downloaded from git+https://github.com/sudodoki/copy-to-clipboard. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 sudodoki - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: core-js, core-js-pure. A copy of the source code may be downloaded from https://github.com/zloirock/core-js.git (core-js), https://github.com/zloirock/core-js.git (core-js-pure). This software contains the following license and notice below: - -Copyright (c) 2014-2019 Denis Pushkarev - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: core-js. A copy of the source code may be downloaded from https://github.com/zloirock/core-js.git. This software contains the following license and notice below: - -Copyright (c) 2015 Denis Pushkarev - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: core-util-is. A copy of the source code may be downloaded from git://github.com/isaacs/core-util-is. This software contains the following license and notice below: - -Copyright Node.js contributors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. - ------ - -The following software may be included in this product: cosmiconfig. A copy of the source code may be downloaded from git+https://github.com/davidtheclark/cosmiconfig.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 David Clark - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: create-ecdh. A copy of the source code may be downloaded from https://github.com/crypto-browserify/createECDH.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2017 createECDH contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: create-react-context. A copy of the source code may be downloaded from https://github.com/thejameskyle/create-react-context. This software contains the following license and notice below: - -Copyright (c) Jamie Kyle - -This license is granted to everyone except for the following entities and -any of their subsidiaries: - -- "Microsoft Corporation" (for working with ICE) -- "Palantir Technologies" (for working with ICE) -- "Amazon.com, Inc." (for abusive treatment of workers and for working with ICE) -- "Northeastern University" (for working with ICE) -- "Ernst & Young" (for working with ICE) -- "Thomson Reuters" (for working with ICE) -- "Motorola Solutions" (for working with ICE) -- "Deloitte Consulting LLP" (for working with ICE) -- "John Hopkins University" (for working with ICE) -- "Dell Inc" (for working with ICE) -- "Xerox Corporation" (for working with ICE) -- "Canon Inc" (for working with ICE) -- "Vermont State Colleges" (for working with ICE) -- "Charter Communications"/"Spectrum"/"Time Warner Cable" (for working with ICE) -- "LinkedIn Corporation" (for working with ICE) -- "United Parcel Service Co" (for working with ICE) -- "Walmart Inc" (for abusive treatment of workers) -- "Sears Holding Corporation" (for abusive treatment of workers) -- "Apple Inc" (for abusive treatment of workers) -- "Tyson Foods Inc" (for abusive treatment of workers) -- "Target Corporation" (for union busting and anti-union propaganda) -- "The H&M group" (for abusive treatment of workers) -- "Tesla, Inc" (for abusive treatment of workers) - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: cross-spawn. A copy of the source code may be downloaded from git@github.com:moxystudio/node-cross-spawn.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2018 Made With MOXY Lda - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: crypto-browserify. A copy of the source code may be downloaded from git://github.com/crypto-browserify/crypto-browserify.git. This software contains the following license and notice below: - -The MIT License - -Copyright (c) 2013 Dominic Tarr - -Permission is hereby granted, free of charge, -to any person obtaining a copy of this software and -associated documentation files (the "Software"), to -deal in the Software without restriction, including -without limitation the rights to use, copy, modify, -merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom -the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: css-declaration-sorter. A copy of the source code may be downloaded from https://github.com/Siilwyn/css-declaration-sorter.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright 2016 Selwyn - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: css-loader, enhanced-resolve, file-loader, html-webpack-plugin, less-loader, loader-utils, mini-css-extract-plugin, schema-utils, style-loader, terser-webpack-plugin, url-loader, watchpack, webpack, webpack-dev-middleware, webpack-dev-server. A copy of the source code may be downloaded from https://github.com/webpack-contrib/css-loader.git (css-loader), git://github.com/webpack/enhanced-resolve.git (enhanced-resolve), https://github.com/webpack-contrib/file-loader.git (file-loader), https://github.com/jantimon/html-webpack-plugin.git (html-webpack-plugin), https://github.com/webpack-contrib/less-loader.git (less-loader), https://github.com/webpack/loader-utils.git (loader-utils), https://github.com/webpack-contrib/mini-css-extract-plugin.git (mini-css-extract-plugin), https://github.com/webpack-contrib/schema-utils (schema-utils), https://github.com/webpack-contrib/style-loader.git (style-loader), https://github.com/webpack-contrib/terser-webpack-plugin.git (terser-webpack-plugin), https://github.com/webpack-contrib/url-loader.git (url-loader), https://github.com/webpack/watchpack.git (watchpack), https://github.com/webpack/webpack.git (webpack), https://github.com/webpack/webpack-dev-middleware.git (webpack-dev-middleware), https://github.com/webpack/webpack-dev-server.git (webpack-dev-server). This software contains the following license and notice below: - -Copyright JS Foundation and other contributors - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: css-select, css-what, domelementtype, domhandler, domutils, entities, nth-check. A copy of the source code may be downloaded from git://github.com/fb55/css-select.git (css-select), https://github.com/fb55/css-what (css-what), git://github.com/fb55/domelementtype.git (domelementtype), git://github.com/fb55/DomHandler.git (domhandler), git://github.com/FB55/domutils.git (domutils), git://github.com/fb55/entities.git (entities), https://github.com/fb55/nth-check (nth-check). This software contains the following license and notice below: - -Copyright (c) Felix Böhm -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS, -EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: css-select-base-adapter. A copy of the source code may be downloaded from https://github.com/nrkn/css-select-base-adapter.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2018 Nik Coughlin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: css-tree. A copy of the source code may be downloaded from https://github.com/csstree/csstree.git. This software contains the following license and notice below: - -Copyright (C) 2016 by Roman Dvornov - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: css-unit-converter. A copy of the source code may be downloaded from https://github.com/andyjansson/css-unit-converter.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright 2015 Andy Jansson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: css-url-regex. A copy of the source code may be downloaded from https://github.com/johnotander/css-url-regex.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) John Otander (johnotander.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: cssdb. A copy of the source code may be downloaded from https://github.com/csstools/cssdb.git. This software contains the following license and notice below: - -# CC0 1.0 Universal - -## Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator and -subsequent owner(s) (each and all, an “owner”) of an original work of -authorship and/or a database (each, a “Work”). - -Certain owners wish to permanently relinquish those rights to a Work for the -purpose of contributing to a commons of creative, cultural and scientific works -(“Commons”) that the public can reliably and without fear of later claims of -infringement build upon, modify, incorporate in other works, reuse and -redistribute as freely as possible in any form whatsoever and for any purposes, -including without limitation commercial purposes. These owners may contribute -to the Commons to promote the ideal of a free culture and the further -production of creative, cultural and scientific works, or to gain reputation or -greater distribution for their Work in part through the use and efforts of -others. - -For these and/or other purposes and motivations, and without any expectation of -additional consideration or compensation, the person associating CC0 with a -Work (the “Affirmer”), to the extent that he or she is an owner of Copyright -and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and -publicly distribute the Work under its terms, with knowledge of his or her -Copyright and Related Rights in the Work and the meaning and intended legal -effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be - protected by copyright and related or neighboring rights (“Copyright and - Related Rights”). Copyright and Related Rights include, but are not limited - to, the following: - 1. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - 2. moral rights retained by the original author(s) and/or performer(s); - 3. publicity and privacy rights pertaining to a person’s image or likeness - depicted in a Work; - 4. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(i), below; - 5. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - 6. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation thereof, - including any amended or successor version of such directive); and - 7. other similar, equivalent or corresponding rights throughout the world - based on applicable law or treaty, and any national implementations - thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention of, -applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and -unconditionally waives, abandons, and surrenders all of Affirmer’s Copyright -and Related Rights and associated claims and causes of action, whether now -known or unknown (including existing as well as future claims and causes of -action), in the Work (i) in all territories worldwide, (ii) for the maximum -duration provided by applicable law or treaty (including future time -extensions), (iii) in any current or future medium and for any number of -copies, and (iv) for any purpose whatsoever, including without limitation -commercial, advertising or promotional purposes (the “Waiver”). Affirmer makes -the Waiver for the benefit of each member of the public at large and to the -detriment of Affirmer’s heirs and successors, fully intending that such Waiver -shall not be subject to revocation, rescission, cancellation, termination, or -any other legal or equitable action to disrupt the quiet enjoyment of the Work -by the public as contemplated by Affirmer’s express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason be -judged legally invalid or ineffective under applicable law, then the Waiver -shall be preserved to the maximum extent permitted taking into account -Affirmer’s express Statement of Purpose. In addition, to the extent the Waiver -is so judged Affirmer hereby grants to each affected person a royalty-free, non -transferable, non sublicensable, non exclusive, irrevocable and unconditional -license to exercise Affirmer’s Copyright and Related Rights in the Work (i) in -all territories worldwide, (ii) for the maximum duration provided by applicable -law or treaty (including future time extensions), (iii) in any current or -future medium and for any number of copies, and (iv) for any purpose -whatsoever, including without limitation commercial, advertising or promotional -purposes (the “License”). The License shall be deemed effective as of the date -CC0 was applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder of the -License, and in such case Affirmer hereby affirms that he or she will not (i) -exercise any of his or her remaining Copyright and Related Rights in the Work -or (ii) assert any associated claims and causes of action with respect to the -Work, in either case contrary to Affirmer’s express Statement of Purpose. - -4. Limitations and Disclaimers. - 1. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - 2. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, statutory - or otherwise, including without limitation warranties of title, - merchantability, fitness for a particular purpose, non infringement, or - the absence of latent or other defects, accuracy, or the present or - absence of errors, whether or not discoverable, all to the greatest - extent permissible under applicable law. - 3. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person’s Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the Work. - 4. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. - -For more information, please see -https://creativecommons.org/publicdomain/zero/1.0/. - ------ - -The following software may be included in this product: csso. A copy of the source code may be downloaded from https://github.com/css/csso.git. This software contains the following license and notice below: - -Copyright (C) 2011-2017 by Sergey Kryzhanovsky - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: cssom. A copy of the source code may be downloaded from https://github.com/NV/CSSOM.git. This software contains the following license and notice below: - -Copyright (c) Nikita Vasilyev - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: csstype. A copy of the source code may be downloaded from https://github.com/frenic/csstype. This software contains the following license and notice below: - -Copyright (c) 2017-2018 Fredrik Nicol - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: dashdash. A copy of the source code may be downloaded from git://github.com/trentm/node-dashdash.git. This software contains the following license and notice below: - -# This is the MIT license - -Copyright (c) 2013 Trent Mick. All rights reserved. -Copyright (c) 2013 Joyent Inc. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: data-urls, whatwg-mimetype. A copy of the source code may be downloaded from https://github.com/jsdom/data-urls.git (data-urls), https://github.com/jsdom/whatwg-mimetype.git (whatwg-mimetype). This software contains the following license and notice below: - -Copyright © 2017–2018 Domenic Denicola - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: debug. A copy of the source code may be downloaded from git://github.com/visionmedia/debug.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2014 TJ Holowaychuk - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software -and associated documentation files (the 'Software'), to deal in the Software without restriction, -including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, -and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial -portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT -LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: decode-uri-component. A copy of the source code may be downloaded from https://github.com/SamVerschueren/decode-uri-component.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Sam Verschueren (github.com/SamVerschueren) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: deep-extend. A copy of the source code may be downloaded from git://github.com/unclechu/node-deep-extend.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2013-2018, Viacheslav Lotsmanov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: deep-is. A copy of the source code may be downloaded from http://github.com/thlorenz/deep-is.git. This software contains the following license and notice below: - -Copyright (c) 2012, 2013 Thorsten Lorenz -Copyright (c) 2012 James Halliday -Copyright (c) 2009 Thomas Robinson <280north.com> - -This software is released under the MIT license: - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: default-gateway. A copy of the source code may be downloaded from https://github.com/silverwind/default-gateway.git. This software contains the following license and notice below: - -Copyright (c) silverwind -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: default-require-extensions. A copy of the source code may be downloaded from https://github.com/avajs/default-require-extensions.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Node.js contributors, James Talmage (github.com/jamestalmage) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: define-property, is-windows, strip-comments. A copy of the source code may be downloaded from https://github.com/jonschlinkert/define-property.git (define-property), https://github.com/jonschlinkert/is-windows.git (is-windows), https://github.com/jonschlinkert/strip-comments.git (strip-comments). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015-2018, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: delegates. A copy of the source code may be downloaded from https://github.com/visionmedia/node-delegates.git. This software contains the following license and notice below: - -Copyright (c) 2015 TJ Holowaychuk - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: depd, forwarded, vary. A copy of the source code may be downloaded from https://github.com/dougwilson/nodejs-depd.git (depd), https://github.com/jshttp/forwarded.git (forwarded), https://github.com/jshttp/vary.git (vary). This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2014-2017 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: detect-libc. A copy of the source code may be downloaded from git://github.com/lovell/detect-libc. This software contains the following license and notice below: - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------ - -The following software may be included in this product: detect-node. A copy of the source code may be downloaded from https://github.com/iliakan/detect-node. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 Ilya Kantor - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: detect-port-alt. A copy of the source code may be downloaded from git://github.com/node-modules/detect-port.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 xdf - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: diffie-hellman, public-encrypt. A copy of the source code may be downloaded from https://github.com/crypto-browserify/diffie-hellman.git (diffie-hellman), https://github.com/crypto-browserify/publicEncrypt.git (public-encrypt). This software contains the following license and notice below: - -Copyright (c) 2017 Calvin Metcalf - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: dns-equal. A copy of the source code may be downloaded from git+https://github.com/watson/dns-equal.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 Thomas Watson Steen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: dns-packet, stream-shift. A copy of the source code may be downloaded from https://github.com/mafintosh/dns-packet (dns-packet), https://github.com/mafintosh/stream-shift.git (stream-shift). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 Mathias Buus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: dns-txt. A copy of the source code may be downloaded from https://github.com/watson/dns-txt.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Thomas Watson Steen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: doctrine. A copy of the source code may be downloaded from https://github.com/eslint/doctrine.git. This software contains the following license and notice below: - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - ------ - -The following software may be included in this product: doctrine. A copy of the source code may be downloaded from https://github.com/eslint/doctrine.git. This software contains the following license and notice below: - -Doctrine -Copyright jQuery Foundation and other contributors, https://jquery.org/ - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: dom-closest, dom-matches. A copy of the source code may be downloaded from https://github.com/necolas/dom-closest.git (dom-closest), https://github.com/necolas/dom-matches.git (dom-matches). This software contains the following license and notice below: - -Copyright (c) Nicolas Gallagher - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: dom-converter, pretty-error. A copy of the source code may be downloaded from https://github.com/AriaMinaei/dom-converter (dom-converter), https://github.com/AriaMinaei/pretty-error.git (pretty-error). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2013 Aria Minaei - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: dom-serializer. A copy of the source code may be downloaded from git://github.com/cheeriojs/dom-renderer.git. This software contains the following license and notice below: - -License - -(The MIT License) - -Copyright (c) 2014 The cheeriojs contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: domain-browser. A copy of the source code may be downloaded from https://github.com/bevry/domain-browser.git. This software contains the following license and notice below: - - - -

License

- -Unless stated otherwise all works are: - - - -and licensed under: - - - -

MIT License

- -
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
- - - ------ - -The following software may be included in this product: domexception. A copy of the source code may be downloaded from https://github.com/jsdom/domexception.git. This software contains the following license and notice below: - -MIT License - -Copyright © 2017 Domenic Denicola - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: dotenv. A copy of the source code may be downloaded from git://github.com/motdotla/dotenv.git. This software contains the following license and notice below: - -Copyright (c) 2015, Scott Motte -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: dotenv-expand. This software contains the following license and notice below: - -Copyright (c) 2016, Scott Motte -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: draft-js. A copy of the source code may be downloaded from https://github.com/facebook/draft-js.git. This software contains the following license and notice below: - -BSD License - -For Draft.js software - -Copyright (c) 2013-present, Facebook, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: duplexify, end-of-stream, pump, pumpify. A copy of the source code may be downloaded from git://github.com/mafintosh/duplexify (duplexify), git://github.com/mafintosh/end-of-stream.git (end-of-stream), git://github.com/mafintosh/pump.git (pump), git://github.com/mafintosh/pumpify (pumpify). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Mathias Buus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: ecc-jsbn. A copy of the source code may be downloaded from https://github.com/quartzjer/ecc-jsbn.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Jeremie Miller - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: electron-to-chromium. A copy of the source code may be downloaded from https://github.com/kilian/electron-to-chromium/. This software contains the following license and notice below: - -Copyright 2018 Kilian Valkhof - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: emojis-list. A copy of the source code may be downloaded from git+https://github.com/kikobeats/emojis-list.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright © 2015 Kiko Beats - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: encodeurl. A copy of the source code may be downloaded from https://github.com/pillarjs/encodeurl.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2016 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: encoding. A copy of the source code may be downloaded from https://github.com/andris9/encoding.git. This software contains the following license and notice below: - -Copyright (c) 2012-2014 Andris Reinman - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: enquire.js. A copy of the source code may be downloaded from git://github.com/WickyNilliams/enquire.js.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2012 Nick Williams - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: error-ex, is-arrayish. A copy of the source code may be downloaded from https://github.com/qix-/node-error-ex.git (error-ex), https://github.com/qix-/node-is-arrayish.git (is-arrayish). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 JD Ballard - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: es-to-primitive, is-callable, is-date-object, is-symbol, object.getownpropertydescriptors, object.values. A copy of the source code may be downloaded from git://github.com/ljharb/es-to-primitive.git (es-to-primitive), git://github.com/ljharb/is-callable.git (is-callable), git://github.com/ljharb/is-date-object.git (is-date-object), git://github.com/ljharb/is-symbol.git (is-symbol), git://github.com/ljharb/object.getownpropertydescriptors.git (object.getownpropertydescriptors), git://github.com/es-shims/Object.values.git (object.values). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: escape-html. A copy of the source code may be downloaded from https://github.com/component/escape-html.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2012-2013 TJ Holowaychuk -Copyright (c) 2015 Andreas Lubbe -Copyright (c) 2015 Tiancheng "Timothy" Gu - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: escodegen, estraverse, esutils. A copy of the source code may be downloaded from http://github.com/estools/escodegen.git (escodegen), http://github.com/estools/estraverse.git (estraverse), http://github.com/estools/esutils.git (esutils). This software contains the following license and notice below: - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: eslint. A copy of the source code may be downloaded from https://github.com/eslint/eslint.git. This software contains the following license and notice below: - -Copyright JS Foundation and other contributors, https://js.foundation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: eslint-loader. A copy of the source code may be downloaded from https://github.com/webpack-contrib/eslint-loader.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Maxime Thirouin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: eslint-plugin-flowtype. A copy of the source code may be downloaded from https://github.com/gajus/eslint-plugin-flowtype. This software contains the following license and notice below: - -Copyright (c) 2015, Gajus Kuizinas (http://gajus.com/) -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the Gajus Kuizinas (http://gajus.com/) nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL ANUARY BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: eslint-plugin-import. A copy of the source code may be downloaded from https://github.com/benmosher/eslint-plugin-import. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Ben Mosher - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: eslint-plugin-jsx-a11y, jsx-ast-utils. A copy of the source code may be downloaded from https://github.com/evcohen/eslint-plugin-jsx-a11y (eslint-plugin-jsx-a11y), https://github.com/evcohen/jsx-ast-utils (jsx-ast-utils). This software contains the following license and notice below: - -The MIT License (MIT) -Copyright (c) 2016 Ethan Cohen - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: eslint-plugin-react. A copy of the source code may be downloaded from https://github.com/yannickcr/eslint-plugin-react. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Yannick Croissant - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: eslint-plugin-react-hooks, react, react-dom, react-is, scheduler. A copy of the source code may be downloaded from https://github.com/facebook/react.git (eslint-plugin-react-hooks), https://github.com/facebook/react.git (react), https://github.com/facebook/react.git (react-dom), https://github.com/facebook/react.git (react-is), https://github.com/facebook/react.git (scheduler). This software contains the following license and notice below: - -MIT License - -Copyright (c) Facebook, Inc. and its affiliates. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: eslint-scope. A copy of the source code may be downloaded from https://github.com/eslint/eslint-scope.git. This software contains the following license and notice below: - -Copyright JS Foundation and other contributors, https://js.foundation -Copyright (C) 2012-2013 Yusuke Suzuki (twitter: @Constellation) and other contributors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: eslint-scope. A copy of the source code may be downloaded from https://github.com/eslint/eslint-scope.git. This software contains the following license and notice below: - -eslint-scope -Copyright JS Foundation and other contributors, https://js.foundation -Copyright (C) 2012-2013 Yusuke Suzuki (twitter: @Constellation) and other contributors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: eslint-utils, regexpp. A copy of the source code may be downloaded from git+https://github.com/mysticatea/eslint-utils.git (eslint-utils), git+https://github.com/mysticatea/regexpp.git (regexpp). This software contains the following license and notice below: - -MIT License - -Copyright (c) 2018 Toru Nagashima - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: eslint-visitor-keys. A copy of the source code may be downloaded from https://github.com/eslint/eslint-visitor-keys.git. This software contains the following license and notice below: - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright contributors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------ - -The following software may be included in this product: espree. A copy of the source code may be downloaded from https://github.com/eslint/espree.git. This software contains the following license and notice below: - -Espree -Copyright JS Foundation and other contributors, https://js.foundation - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: esprima. A copy of the source code may be downloaded from https://github.com/jquery/esprima.git. This software contains the following license and notice below: - -Copyright JS Foundation and other contributors, https://js.foundation/ - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: esquery. A copy of the source code may be downloaded from https://github.com/jrfeenst/esquery.git. This software contains the following license and notice below: - -Copyright (c) 2013, Joel Feenstra -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the ESQuery nor the names of its contributors may - be used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL JOEL FEENSTRA BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: etag, proxy-addr. A copy of the source code may be downloaded from https://github.com/jshttp/etag.git (etag), https://github.com/jshttp/proxy-addr.git (proxy-addr). This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2014-2016 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: eventemitter3. A copy of the source code may be downloaded from git://github.com/primus/eventemitter3.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Arnout Kazemier - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: eventlistener. A copy of the source code may be downloaded from git@github.com:finn-no/eventlistener.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2013 FINN.no AS - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: events. A copy of the source code may be downloaded from git://github.com/Gozala/events.git. This software contains the following license and notice below: - -MIT - -Copyright Joyent, Inc. and other Node contributors. - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to permit -persons to whom the Software is furnished to do so, subject to the -following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN -NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: eventsource. A copy of the source code may be downloaded from git://github.com/EventSource/eventsource.git. This software contains the following license and notice below: - -The MIT License - -Copyright (c) EventSource GitHub organisation - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: exec-sh. A copy of the source code may be downloaded from git@github.com:tsertkov/exec-sh.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Aleksandr Tsertkov - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: express. A copy of the source code may be downloaded from https://github.com/expressjs/express.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2009-2014 TJ Holowaychuk -Copyright (c) 2013-2014 Roman Shtylman -Copyright (c) 2014-2015 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: extend. A copy of the source code may be downloaded from https://github.com/justmoon/node-extend.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Stefan Thomas - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: extend-shallow, mixin-deep. A copy of the source code may be downloaded from https://github.com/jonschlinkert/extend-shallow.git (extend-shallow), https://github.com/jonschlinkert/mixin-deep.git (mixin-deep). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2015, 2017, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: external-editor. A copy of the source code may be downloaded from git+https://github.com/mrkmg/node-external-editor.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 Kevin Gravier - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: extsprintf, jsprim. A copy of the source code may be downloaded from git://github.com/davepacheco/node-extsprintf.git (extsprintf), git://github.com/joyent/node-jsprim.git (jsprim). This software contains the following license and notice below: - -Copyright (c) 2012, Joyent, Inc. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE - ------ - -The following software may be included in this product: fast-glob. A copy of the source code may be downloaded from https://github.com/mrmlnc/fast-glob.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Denis Malinochkin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: fast-levenshtein. A copy of the source code may be downloaded from https://github.com/hiddentao/fast-levenshtein.git. This software contains the following license and notice below: - -(MIT License) - -Copyright (c) 2013 [Ramesh Nair](http://www.hiddentao.com/) - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: fbjs. A copy of the source code may be downloaded from https://github.com/facebook/fbjs.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2013-present, Facebook, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: file-entry-cache, flat-cache. A copy of the source code may be downloaded from https://github.com/royriojas/file-entry-cache.git (file-entry-cache), https://github.com/royriojas/flat-cache.git (flat-cache). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Roy Riojas - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: filesize. A copy of the source code may be downloaded from git://github.com/avoidwork/filesize.js.git. This software contains the following license and notice below: - -Copyright (c) 2018, Jason Mulligan -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of filesize nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: finalhandler. A copy of the source code may be downloaded from https://github.com/pillarjs/finalhandler.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2014-2017 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: find-cache-dir. A copy of the source code may be downloaded from https://github.com/avajs/find-cache-dir.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) James Talmage (github.com/jamestalmage) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: flatted. A copy of the source code may be downloaded from git+https://github.com/WebReflection/flatted.git. This software contains the following license and notice below: - -ISC License - -Copyright (c) 2018, Andrea Giammarchi, @WebReflection - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY -AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: flatten. A copy of the source code may be downloaded from git://github.com/jesusabdullah/node-flatten.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 Joshua Holbrook - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: flush-write-stream, multicast-dns, multicast-dns-service-types, stream-each. A copy of the source code may be downloaded from https://github.com/mafintosh/flush-write-stream.git (flush-write-stream), https://github.com/mafintosh/multicast-dns.git (multicast-dns), https://github.com/mafintosh/multicast-dns-service-types.git (multicast-dns-service-types), https://github.com/mafintosh/stream-each.git (stream-each). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Mathias Buus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: follow-redirects. A copy of the source code may be downloaded from git@github.com:follow-redirects/follow-redirects.git. This software contains the following license and notice below: - -Copyright 2014–present Olivier Lalonde , James Talmage , Ruben Verborgh - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: for-own. A copy of the source code may be downloaded from https://github.com/jonschlinkert/for-own.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2015, 2017, Jon Schlinkert - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: fork-ts-checker-webpack-plugin. A copy of the source code may be downloaded from https://github.com/Realytics/fork-ts-checker-webpack-plugin.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 Realytics - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: form-data. A copy of the source code may be downloaded from git://github.com/form-data/form-data.git. This software contains the following license and notice below: - -Copyright (c) 2012 Felix Geisendörfer (felix@debuggable.com) and contributors - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - ------ - -The following software may be included in this product: fragment-cache, posix-character-classes. A copy of the source code may be downloaded from https://github.com/jonschlinkert/fragment-cache.git (fragment-cache), https://github.com/jonschlinkert/posix-character-classes.git (posix-character-classes). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016-2017, Jon Schlinkert - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: fresh. A copy of the source code may be downloaded from https://github.com/jshttp/fresh.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2012 TJ Holowaychuk -Copyright (c) 2016-2017 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: from2. A copy of the source code may be downloaded from git://github.com/hughsk/from2. This software contains the following license and notice below: - -## The MIT License (MIT) ## - -Copyright (c) 2014 Hugh Kennedy - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: fs-extra. A copy of the source code may be downloaded from https://github.com/jprichardson/node-fs-extra. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2011-2017 JP Richardson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files -(the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, - merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS -OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: fs.realpath. A copy of the source code may be downloaded from git+https://github.com/isaacs/fs.realpath.git. This software contains the following license and notice below: - -The ISC License - -Copyright (c) Isaac Z. Schlueter and Contributors - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ----- - -This library bundles a version of the `fs.realpath` and `fs.realpathSync` -methods from Node.js v0.10 under the terms of the Node.js MIT license. - -Node's license follows, also included at the header of `old.js` which contains -the licensed code: - - Copyright Joyent, Inc. and other Node contributors. - - Permission is hereby granted, free of charge, to any person obtaining a - copy of this software and associated documentation files (the "Software"), - to deal in the Software without restriction, including without limitation - the rights to use, copy, modify, merge, publish, distribute, sublicense, - and/or sell copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: fsevents. A copy of the source code may be downloaded from https://github.com/fsevents/fsevents.git. This software contains the following license and notice below: - -MIT License ------------ - -Copyright (C) 2010-2019 by Philipp Dunkel, Ben Noordhuis, Elan Shankar - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: fsevents. A copy of the source code may be downloaded from https://github.com/strongloop/fsevents.git. This software contains the following license and notice below: - -MIT License ------------ - -Copyright (C) 2010-2014 Philipp Dunkel - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: function-bind. A copy of the source code may be downloaded from git://github.com/Raynos/function-bind.git. This software contains the following license and notice below: - -Copyright (c) 2013 Raynos. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: functional-red-black-tree, uniq. A copy of the source code may be downloaded from git://github.com/mikolalysenko/functional-red-black-tree.git (functional-red-black-tree), git://github.com/mikolalysenko/uniq.git (uniq). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2013 Mikola Lysenko - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: get-caller-file. A copy of the source code may be downloaded from git+https://github.com/stefanpenner/get-caller-file.git. This software contains the following license and notice below: - -ISC License (ISC) -Copyright 2018 Stefan Penner - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: getpass, http-signature, sshpk. A copy of the source code may be downloaded from https://github.com/arekinath/node-getpass.git (getpass), git://github.com/joyent/node-http-signature.git (http-signature), git+https://github.com/joyent/node-sshpk.git (sshpk). This software contains the following license and notice below: - -Copyright Joyent, Inc. All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. - ------ - -The following software may be included in this product: glob-parent. A copy of the source code may be downloaded from https://github.com/es128/glob-parent. This software contains the following license and notice below: - -The ISC License - -Copyright (c) 2015 Elan Shanker - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: global-modules, global-prefix, repeat-element, use. A copy of the source code may be downloaded from https://github.com/jonschlinkert/global-modules.git (global-modules), https://github.com/jonschlinkert/global-prefix.git (global-prefix), https://github.com/jonschlinkert/repeat-element.git (repeat-element), https://github.com/jonschlinkert/use.git (use). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015-present, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: graceful-fs. A copy of the source code may be downloaded from https://github.com/isaacs/node-graceful-fs. This software contains the following license and notice below: - -The ISC License - -Copyright (c) Isaac Z. Schlueter, Ben Noordhuis, and Contributors - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: hammerjs. A copy of the source code may be downloaded from git://github.com/hammerjs/hammer.js.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (C) 2011-2014 by Jorik Tangelder (Eight Media) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: handlebars. A copy of the source code may be downloaded from https://github.com/wycats/handlebars.js.git. This software contains the following license and notice below: - -Copyright (C) 2011-2017 by Yehuda Katz - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: har-schema. A copy of the source code may be downloaded from https://github.com/ahmadnassri/har-schema.git. This software contains the following license and notice below: - -Copyright (c) 2015, Ahmad Nassri - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: har-validator. A copy of the source code may be downloaded from https://github.com/ahmadnassri/node-har-validator.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2018 Ahmad Nassri - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: has-symbols. A copy of the source code may be downloaded from git://github.com/ljharb/has-symbols.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2016 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: has-unicode. A copy of the source code may be downloaded from https://github.com/iarna/has-unicode. This software contains the following license and notice below: - -Copyright (c) 2014, Rebecca Turner - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: hash-base, md5.js. A copy of the source code may be downloaded from https://github.com/crypto-browserify/hash-base.git (hash-base), https://github.com/crypto-browserify/md5.js.git (md5.js). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 Kirill Fomichev - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: hex-color-regex. A copy of the source code may be downloaded from https://github.com/regexps/hex-color-regex.git. This software contains the following license and notice below: - -# The MIT License - -Copyright (c) 2015 [Charlike Make Reagent](http://j.mp/1stW47C) - -> Permission is hereby granted, free of charge, to any person obtaining a copy -> of this software and associated documentation files (the "Software"), to deal -> in the Software without restriction, including without limitation the rights -> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -> copies of the Software, and to permit persons to whom the Software is -> furnished to do so, subject to the following conditions: -> -> The above copyright notice and this permission notice shall be included in -> all copies or substantial portions of the Software. -> -> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -> SOFTWARE. - ------ - -The following software may be included in this product: history, react-router, react-router-dom. A copy of the source code may be downloaded from https://github.com/ReactTraining/history.git (history), https://github.com/ReactTraining/react-router.git (react-router), https://github.com/ReactTraining/react-router.git (react-router-dom). This software contains the following license and notice below: - -MIT License - -Copyright (c) React Training 2016-2018 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: hoek. A copy of the source code may be downloaded from git://github.com/hapijs/hoek. This software contains the following license and notice below: - -Copyright (c) 2011-2018, Project contributors -Copyright (c) 2011-2014, Walmart -Copyright (c) 2011, Yahoo Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * The names of any contributors may not be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: hoist-non-react-statics. A copy of the source code may be downloaded from git://github.com/mridgway/hoist-non-react-statics.git. This software contains the following license and notice below: - -Software License Agreement (BSD License) -======================================== - -Copyright (c) 2015, Yahoo! Inc. All rights reserved. ----------------------------------------------------- - -Redistribution and use of this software in source and binary forms, with or -without modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of Yahoo! Inc. nor the names of YUI's contributors may be - used to endorse or promote products derived from this software without - specific prior written permission of Yahoo! Inc. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: hosted-git-info. A copy of the source code may be downloaded from git+https://github.com/npm/hosted-git-info.git. This software contains the following license and notice below: - -Copyright (c) 2015, Rebecca Turner - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR -OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: hsl-regex, hsla-regex, rgb-regex, rgba-regex. A copy of the source code may be downloaded from https://github.com/regexps/hsl-regex.git (hsl-regex), https://github.com/regexps/hsla-regex.git (hsla-regex), https://github.com/regexps/rgb-regex.git (rgb-regex), https://github.com/johnotander/rgba-regex.git (rgba-regex). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 John Otander - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: html-comment-regex. A copy of the source code may be downloaded from https://github.com/stevemao/html-comment-regex.git. This software contains the following license and notice below: - -Copyright 2018 Steve Mao - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: html-encoding-sniffer. A copy of the source code may be downloaded from https://github.com/jsdom/html-encoding-sniffer.git. This software contains the following license and notice below: - -Copyright © 2016 Domenic Denicola - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: html-entities. A copy of the source code may be downloaded from https://github.com/mdevils/node-html-entities.git. This software contains the following license and notice below: - -Copyright (c) 2013 Dulin Marat - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: html-minifier. A copy of the source code may be downloaded from git+https://github.com/kangax/html-minifier.git. This software contains the following license and notice below: - -Copyright (c) 2010-2018 Juriy "kangax" Zaytsev - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: htmlparser2. A copy of the source code may be downloaded from git://github.com/fb55/htmlparser2.git. This software contains the following license and notice below: - -Copyright 2010, 2011, Chris Winberry . All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. - ------ - -The following software may be included in this product: http-errors. A copy of the source code may be downloaded from https://github.com/jshttp/http-errors.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Jonathan Ong me@jongleberry.com -Copyright (c) 2016 Douglas Christopher Wilson doug@somethingdoug.com - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: http-parser-js. A copy of the source code may be downloaded from git://github.com/creationix/http-parser-js.git. This software contains the following license and notice below: - -Copyright (c) 2015 Tim Caswell (https://github.com/creationix) and other -contributors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -Some files from the tests folder are from joyent/node and mscedex/io.js, a fork -of nodejs/io.js: - -- tests/iojs/test-http-parser-durability.js - - This file is from https://github.com/mscdex/io.js/blob/js-http-parser/test/pummel/test-http-parser-durability.js - with modifications by Jan Schär (jscissr). - - """ - Copyright io.js contributors. All rights reserved. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to - deal in the Software without restriction, including without limitation the - rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - sell copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - IN THE SOFTWARE. - """ - -- tests/fixtures/* - tests/parallel/* - tests/testpy/* - tests/common.js - tests/test.py - tests/utils.py - - These files are from https://github.com/nodejs/node with changes by - Jan Schär (jscissr). - - Node.js is licensed for use as follows: - - """ - Copyright Node.js contributors. All rights reserved. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to - deal in the Software without restriction, including without limitation the - rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - sell copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - IN THE SOFTWARE. - """ - - This license applies to parts of Node.js originating from the - https://github.com/joyent/node repository: - - """ - Copyright Joyent, Inc. and other Node contributors. All rights reserved. - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to - deal in the Software without restriction, including without limitation the - rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - sell copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - IN THE SOFTWARE. - """ - ------ - -The following software may be included in this product: http-proxy. A copy of the source code may be downloaded from https://github.com/nodejitsu/node-http-proxy.git. This software contains the following license and notice below: - -node-http-proxy - - Copyright (c) 2010-2016 Charlie Robbins, Jarrett Cruger & the Contributors. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: http-proxy-middleware. A copy of the source code may be downloaded from https://github.com/chimurai/http-proxy-middleware.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Steven Chim - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: https-browserify, stream-browserify. A copy of the source code may be downloaded from git://github.com/substack/https-browserify.git (https-browserify), git://github.com/browserify/stream-browserify.git (stream-browserify). This software contains the following license and notice below: - -This software is released under the MIT license: - -Copyright (c) James Halliday - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: iconv-lite. A copy of the source code may be downloaded from git://github.com/ashtuchkin/iconv-lite.git. This software contains the following license and notice below: - -Copyright (c) 2011 Alexander Shtuchkin - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: icss-utils. A copy of the source code may be downloaded from git+https://github.com/css-modules/icss-utils.git. This software contains the following license and notice below: - -ISC License (ISC) -Copyright 2018 Glen Maddern - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: identity-obj-proxy. A copy of the source code may be downloaded from git+https://github.com/keyanzhang/identity-obj-proxy.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Keyan Zhang - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: ieee754. A copy of the source code may be downloaded from git://github.com/feross/ieee754.git. This software contains the following license and notice below: - -Copyright 2008 Fair Oaks Labs, Inc. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: iferr. A copy of the source code may be downloaded from https://github.com/shesek/iferr. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Nadav Ivgi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: image-size. A copy of the source code may be downloaded from https://github.com/image-size/image-size.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright © 2017 Aditya Yadav, http://netroy.in - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: immer. A copy of the source code may be downloaded from https://github.com/mweststrate/immer.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 Michel Weststrate - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: immutable. A copy of the source code may be downloaded from git://github.com/facebook/immutable-js.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2014-present, Facebook, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: immutable. A copy of the source code may be downloaded from git://github.com/facebook/immutable-js.git. This software contains the following license and notice below: - -BSD License - -For Immutable JS software - -Copyright (c) 2014-2015, Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: indexes-of. A copy of the source code may be downloaded from git://github.com/dominictarr/indexes-of.git. This software contains the following license and notice below: - -Copyright (c) 2013 Dominic Tarr - -Permission is hereby granted, free of charge, -to any person obtaining a copy of this software and -associated documentation files (the "Software"), to -deal in the Software without restriction, including -without limitation the rights to use, copy, modify, -merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom -the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: inflight. A copy of the source code may be downloaded from https://github.com/npm/inflight.git. This software contains the following license and notice below: - -The ISC License - -Copyright (c) Isaac Z. Schlueter - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: inherits. A copy of the source code may be downloaded from git://github.com/isaacs/inherits. This software contains the following license and notice below: - -The ISC License - -Copyright (c) Isaac Z. Schlueter - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR -OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: inquirer. A copy of the source code may be downloaded from https://github.com/SBoudrias/Inquirer.js.git. This software contains the following license and notice below: - -Copyright (c) 2012 Simon Boudrias - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: ipaddr.js. A copy of the source code may be downloaded from git://github.com/whitequark/ipaddr.js. This software contains the following license and notice below: - -Copyright (C) 2011-2017 whitequark - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: is-buffer, safe-buffer. A copy of the source code may be downloaded from git://github.com/feross/is-buffer.git (is-buffer), git://github.com/feross/safe-buffer.git (safe-buffer). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Feross Aboukhadijeh - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: is-color-stop. A copy of the source code may be downloaded from git+https://github.com/pigcan/is-color-stop.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 pigcan - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: is-descriptor, is-extendable, shallow-clone. A copy of the source code may be downloaded from https://github.com/jonschlinkert/is-descriptor.git (is-descriptor), https://github.com/jonschlinkert/is-extendable.git (is-extendable), https://github.com/jonschlinkert/shallow-clone.git (shallow-clone). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015-2017, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: is-regex. A copy of the source code may be downloaded from git://github.com/ljharb/is-regex.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: is-resolvable. A copy of the source code may be downloaded from https://github.com/shinnn/is-resolvable.git. This software contains the following license and notice below: - -ISC License (ISC) -Copyright 2018 Shinnosuke Watanabe - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: isemail. A copy of the source code may be downloaded from git://github.com/hapijs/isemail. This software contains the following license and notice below: - -Copyright (c) 2014-2015, Eli Skeggs and Project contributors -Copyright (c) 2013-2014, GlobeSherpa -Copyright (c) 2008-2011, Dominic Sayers -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * The names of any contributors may not be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - * * * - -The complete list of contributors can be found at: https://github.com/hapijs/isemail/graphs/contributors -Previously published under the 2-Clause-BSD license published here: https://github.com/hapijs/isemail/blob/v1.2.0/LICENSE - ------ - -The following software may be included in this product: isomorphic-fetch. A copy of the source code may be downloaded from https://github.com/matthew-andrews/isomorphic-fetch.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Matt Andrews - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: isstream. A copy of the source code may be downloaded from https://github.com/rvagg/isstream.git. This software contains the following license and notice below: - -The MIT License (MIT) -===================== - -Copyright (c) 2015 Rod Vagg ---------------------------- - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: istanbul-api, istanbul-lib-source-maps. A copy of the source code may be downloaded from git+ssh://git@github.com/istanbuljs/istanbuljs.git (istanbul-api), git+ssh://git@github.com/istanbuljs/istanbuljs.git (istanbul-lib-source-maps). This software contains the following license and notice below: - -Copyright 2015 Yahoo! Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the Yahoo! Inc. nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL YAHOO! INC. BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: istanbul-lib-coverage, istanbul-lib-hook, istanbul-lib-instrument, istanbul-lib-report, istanbul-reports. A copy of the source code may be downloaded from git@github.com:istanbuljs/istanbuljs.git (istanbul-lib-coverage), git+ssh://git@github.com/istanbuljs/istanbuljs.git (istanbul-lib-hook), git@github.com:istanbuljs/istanbuljs.git (istanbul-lib-instrument), git@github.com:istanbuljs/istanbuljs.git (istanbul-lib-report), git@github.com:istanbuljs/istanbuljs (istanbul-reports). This software contains the following license and notice below: - -Copyright 2012-2015 Yahoo! Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the Yahoo! Inc. nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL YAHOO! INC. BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: jest-environment-jsdom-fourteen. A copy of the source code may be downloaded from https://github.com/ianschmitz/jest-environment-jsdom-fourteen. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2019 Ian Schmitz - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: jest-watch-typeahead. A copy of the source code may be downloaded from https://github.com/jest-community/jest-watch-typeahead.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2018 Rogelio Guzman - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: joi, topo. A copy of the source code may be downloaded from git://github.com/hapijs/joi (joi), git://github.com/hapijs/topo (topo). This software contains the following license and notice below: - -Copyright (c) 2012-2018, Project contributors -Copyright (c) 2012-2014, Walmart -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * The names of any contributors may not be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: js-levenshtein. A copy of the source code may be downloaded from https://github.com/gustf/js-levenshtein.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 Gustaf Andersson - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: js-tokens. A copy of the source code may be downloaded from https://github.com/lydell/js-tokens.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014, 2015, 2016, 2017, 2018 Simon Lydell - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: js-tokens, source-map-resolve. A copy of the source code may be downloaded from https://github.com/lydell/js-tokens.git (js-tokens), https://github.com/lydell/source-map-resolve.git (source-map-resolve). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014, 2015, 2016, 2017 Simon Lydell - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: js-yaml. A copy of the source code may be downloaded from https://github.com/nodeca/js-yaml.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (C) 2011-2015 by Vitaly Puzrin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: jsbn. A copy of the source code may be downloaded from https://github.com/andyperlitch/jsbn.git. This software contains the following license and notice below: - -Licensing ---------- - -This software is covered under the following copyright: - -/* - * Copyright (c) 2003-2005 Tom Wu - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, - * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY - * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL TOM WU BE LIABLE FOR ANY SPECIAL, INCIDENTAL, - * INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY DAMAGES WHATSOEVER - * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER OR NOT ADVISED OF - * THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF LIABILITY, ARISING OUT - * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - * In addition, the following condition applies: - * - * All redistributions must retain an intact copy of this copyright notice - * and disclaimer. - */ - -Address all questions regarding this license to: - - Tom Wu - tjw@cs.Stanford.EDU - ------ - -The following software may be included in this product: jsdom. A copy of the source code may be downloaded from https://github.com/jsdom/jsdom.git. This software contains the following license and notice below: - -Copyright (c) 2010 Elijah Insua - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: json-parse-better-errors. A copy of the source code may be downloaded from https://github.com/zkat/json-parse-better-errors. This software contains the following license and notice below: - -Copyright 2017 Kat Marchán - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: json2mq, react-slick. A copy of the source code may be downloaded from https://github.com/akiran/json2mq (json2mq), https://github.com/akiran/react-slick (react-slick). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Kiran Abburi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: json3. A copy of the source code may be downloaded from git://github.com/bestiejs/json3.git. This software contains the following license and notice below: - -Copyright (c) 2012-2014 Kit Cambridge. -http://kitcambridge.be/ - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: json5. A copy of the source code may be downloaded from git+https://github.com/json5/json5.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2012-2018 Aseem Kishore, and [others]. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -[others]: https://github.com/json5/json5/contributors - ------ - -The following software may be included in this product: jsonfile. A copy of the source code may be downloaded from git@github.com:jprichardson/node-jsonfile.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2012-2015, JP Richardson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files -(the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, - merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS -OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: killable. A copy of the source code may be downloaded from https://github.com/marten-de-vries/killable.git. This software contains the following license and notice below: - -Copyright 2014 Marten de Vries - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: kind-of. A copy of the source code may be downloaded from https://github.com/jonschlinkert/kind-of.git. This software contains the following license and notice below: - -Copyright (c) 2014-2015, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: kleur. A copy of the source code may be downloaded from https://github.com/lukeed/kleur.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Luke Edwards (lukeed.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: last-call-webpack-plugin, optimize-css-assets-webpack-plugin. A copy of the source code may be downloaded from http://github.com/NMFR/last-call-webpack-plugin.git (last-call-webpack-plugin), http://github.com/NMFR/optimize-css-assets-webpack-plugin.git (optimize-css-assets-webpack-plugin). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 Nuno Rodrigues - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: lazy-cache, map-cache, snapdragon, to-object-path. A copy of the source code may be downloaded from https://github.com/jonschlinkert/lazy-cache.git (lazy-cache), https://github.com/jonschlinkert/map-cache.git (map-cache), https://github.com/jonschlinkert/snapdragon.git (snapdragon), https://github.com/jonschlinkert/to-object-path.git (to-object-path). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015-2016, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: less. A copy of the source code may be downloaded from https://github.com/less/less.js.git. This software contains the following license and notice below: - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - ------ - -The following software may be included in this product: levn, optionator, prelude-ls, type-check. A copy of the source code may be downloaded from git://github.com/gkz/levn.git (levn), git://github.com/gkz/optionator.git (optionator), git://github.com/gkz/prelude-ls.git (prelude-ls), git://github.com/gkz/type-check.git (type-check). This software contains the following license and notice below: - -Copyright (c) George Zahariev - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: loader-fs-cache. This software contains the following license and notice below: - -Copyright (c) 2014-2016 Ade Viankakrisna Fadlil - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: loader-runner, tapable. A copy of the source code may be downloaded from git+https://github.com/webpack/loader-runner.git (loader-runner), http://github.com/webpack/tapable.git (tapable). This software contains the following license and notice below: - -The MIT License - -Copyright (c) Tobias Koppers @sokra - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: lodash. A copy of the source code may be downloaded from https://github.com/lodash/lodash.git. This software contains the following license and notice below: - -Copyright JS Foundation and other contributors - -Based on Underscore.js, copyright Jeremy Ashkenas, -DocumentCloud and Investigative Reporters & Editors - -This software consists of voluntary contributions made by many -individuals. For exact contribution history, see the revision history -available at https://github.com/lodash/lodash - -The following license applies to all parts of this software except as -documented below: - -==== - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -==== - -Copyright and related rights for sample code are waived via CC0. Sample -code is defined as all source code displayed within the prose of the -documentation. - -CC0: http://creativecommons.org/publicdomain/zero/1.0/ - -==== - -Files located in the node_modules and vendor directories are externally -maintained libraries used by this software which have their own -licenses; we recommend you read them, as their terms may differ from the -terms above. - ------ - -The following software may be included in this product: lodash._getnative, lodash.isarray, lodash.keys. A copy of the source code may be downloaded from https://github.com/lodash/lodash.git (lodash._getnative), https://github.com/lodash/lodash.git (lodash.isarray), https://github.com/lodash/lodash.git (lodash.keys). This software contains the following license and notice below: - -Copyright 2012-2015 The Dojo Foundation -Based on Underscore.js, copyright 2009-2015 Jeremy Ashkenas, -DocumentCloud and Investigative Reporters & Editors - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: lodash._reinterpolate. A copy of the source code may be downloaded from https://github.com/lodash/lodash.git. This software contains the following license and notice below: - -Copyright 2012-2015 The Dojo Foundation -Based on Underscore.js 1.7.0, copyright 2009-2015 Jeremy Ashkenas, -DocumentCloud and Investigative Reporters & Editors - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: lodash.debounce, lodash.flow, lodash.isarguments, lodash.memoize, lodash.sortby, lodash.tail, lodash.template, lodash.templatesettings, lodash.throttle, lodash.unescape, lodash.uniq. A copy of the source code may be downloaded from https://github.com/lodash/lodash.git (lodash.debounce), https://github.com/lodash/lodash.git (lodash.flow), https://github.com/lodash/lodash.git (lodash.isarguments), https://github.com/lodash/lodash.git (lodash.memoize), https://github.com/lodash/lodash.git (lodash.sortby), https://github.com/lodash/lodash.git (lodash.tail), https://github.com/lodash/lodash.git (lodash.template), https://github.com/lodash/lodash.git (lodash.templatesettings), https://github.com/lodash/lodash.git (lodash.throttle), https://github.com/lodash/lodash.git (lodash.unescape), https://github.com/lodash/lodash.git (lodash.uniq). This software contains the following license and notice below: - -Copyright jQuery Foundation and other contributors - -Based on Underscore.js, copyright Jeremy Ashkenas, -DocumentCloud and Investigative Reporters & Editors - -This software consists of voluntary contributions made by many -individuals. For exact contribution history, see the revision history -available at https://github.com/lodash/lodash - -The following license applies to all parts of this software except as -documented below: - -==== - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -==== - -Copyright and related rights for sample code are waived via CC0. Sample -code is defined as all source code displayed within the prose of the -documentation. - -CC0: http://creativecommons.org/publicdomain/zero/1.0/ - -==== - -Files located in the node_modules and vendor directories are externally -maintained libraries used by this software which have their own -licenses; we recommend you read them, as their terms may differ from the -terms above. - ------ - -The following software may be included in this product: loose-envify. A copy of the source code may be downloaded from git://github.com/zertosh/loose-envify.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Andres Suarez - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: makeerror, tmpl. A copy of the source code may be downloaded from https://github.com/daaku/nodejs-makeerror (makeerror), https://github.com/daaku/nodejs-tmpl (tmpl). This software contains the following license and notice below: - -BSD License - -Copyright (c) 2014, Naitik Shah. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Naitik Shah nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: map-age-cleaner. A copy of the source code may be downloaded from https://github.com/SamVerschueren/map-age-cleaner.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) Sam Verschueren (github.com/SamVerschueren) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: map-visit, to-regex-range, union-value. A copy of the source code may be downloaded from https://github.com/jonschlinkert/map-visit.git (map-visit), https://github.com/micromatch/to-regex-range.git (to-regex-range), https://github.com/jonschlinkert/union-value.git (union-value). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015-2017, Jon Schlinkert - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: mdn-data. A copy of the source code may be downloaded from https://github.com/mdn/data.git. This software contains the following license and notice below: - -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. - ------ - -The following software may be included in this product: merge-deep. A copy of the source code may be downloaded from https://github.com/jonschlinkert/merge-deep.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-present, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: merge-descriptors. A copy of the source code may be downloaded from https://github.com/component/merge-descriptors.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2013 Jonathan Ong -Copyright (c) 2015 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: merge-stream. A copy of the source code may be downloaded from https://github.com/grncdr/merge-stream.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Stephen Sugden (stephensugden.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: merge2. A copy of the source code may be downloaded from git@github.com:teambition/merge2.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-2018 Teambition - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: methods. A copy of the source code may be downloaded from https://github.com/jshttp/methods.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2013-2014 TJ Holowaychuk -Copyright (c) 2015-2016 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: mime. A copy of the source code may be downloaded from https://github.com/broofa/node-mime. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2010 Benjamin Thomas, Robert Kieffer - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: minimalistic-assert. A copy of the source code may be downloaded from https://github.com/calvinmetcalf/minimalistic-assert.git. This software contains the following license and notice below: - -Copyright 2015 Calvin Metcalf - -Permission to use, copy, modify, and/or distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright notice -and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: minipass, npm-bundled. A copy of the source code may be downloaded from git+https://github.com/isaacs/minipass.git (minipass), git+https://github.com/npm/npm-bundled.git (npm-bundled). This software contains the following license and notice below: - -The ISC License - -Copyright (c) npm, Inc. and Contributors - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: minizlib. A copy of the source code may be downloaded from git+https://github.com/isaacs/minizlib.git. This software contains the following license and notice below: - -Minizlib was created by Isaac Z. Schlueter. -It is a derivative work of the Node.js project. - -""" -Copyright Isaac Z. Schlueter and Contributors -Copyright Node.js contributors. All rights reserved. -Copyright Joyent, Inc. and other Node contributors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the "Software"), -to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, -and/or sell copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -""" - ------ - -The following software may be included in this product: mississippi. A copy of the source code may be downloaded from git+https://github.com/maxogden/mississippi.git. This software contains the following license and notice below: - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: mkdirp, optimist. A copy of the source code may be downloaded from https://github.com/substack/node-mkdirp.git (mkdirp), http://github.com/substack/node-optimist.git (optimist). This software contains the following license and notice below: - -Copyright 2010 James Halliday (mail@substack.net) - -This project is free software released under the MIT/X11 license: - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: moment. A copy of the source code may be downloaded from https://github.com/moment/moment.git. This software contains the following license and notice below: - -Copyright (c) JS Foundation and other contributors - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: ms. A copy of the source code may be downloaded from https://github.com/zeit/ms.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 Zeit, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: mutationobserver-shim. A copy of the source code may be downloaded from github.com/megawac/MutationObserver.js. This software contains the following license and notice below: - -Copyright © 2014 Graeme Yeates - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: nan. A copy of the source code may be downloaded from git://github.com/nodejs/nan.git. This software contains the following license and notice below: - -The MIT License (MIT) -===================== - -Copyright (c) 2018 NAN contributors ------------------------------------ - -*NAN contributors listed at * - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: nanomatch, to-regex. A copy of the source code may be downloaded from https://github.com/micromatch/nanomatch.git (nanomatch), https://github.com/jonschlinkert/to-regex.git (to-regex). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016-2018, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: needle. A copy of the source code may be downloaded from https://github.com/tomas/needle.git. This software contains the following license and notice below: - -Copyright (c) Fork, Ltd. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: negotiator. A copy of the source code may be downloaded from https://github.com/jshttp/negotiator.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2012-2014 Federico Romero -Copyright (c) 2012-2014 Isaac Z. Schlueter -Copyright (c) 2014-2015 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: neo-async. A copy of the source code may be downloaded from git@github.com:suguru03/neo-async.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2014-2018 Suguru Motegi -Based on Async.js, Copyright Caolan McMahon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: nice-try. A copy of the source code may be downloaded from https://github.com/electerious/nice-try.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2018 Tobias Reich - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: node-fetch. A copy of the source code may be downloaded from https://github.com/bitinn/node-fetch.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 David Frank - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: node-forge. A copy of the source code may be downloaded from https://github.com/digitalbazaar/forge. This software contains the following license and notice below: - -You may use the Forge project under the terms of either the BSD License or the -GNU General Public License (GPL) Version 2. - -The BSD License is recommended for most projects. It is simple and easy to -understand and it places almost no restrictions on what you can do with the -Forge project. - -If the GPL suits your project better you are also free to use Forge under -that license. - -You don't have to do anything special to choose one license or the other and -you don't have to notify anyone which license you are using. You are free to -use this project in commercial projects as long as the copyright header is -left intact. - -If you are a commercial entity and use this set of libraries in your -commercial software then reasonable payment to Digital Bazaar, if you can -afford it, is not required but is expected and would be appreciated. If this -library saves you time, then it's saving you money. The cost of developing -the Forge software was on the order of several hundred hours and tens of -thousands of dollars. We are attempting to strike a balance between helping -the development community while not being taken advantage of by lucrative -commercial entities for our efforts. - -------------------------------------------------------------------------------- -New BSD License (3-clause) -Copyright (c) 2010, Digital Bazaar, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Digital Bazaar, Inc. nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL DIGITAL BAZAAR BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------------------------- - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc. - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - ------ - -The following software may be included in this product: node-int64. A copy of the source code may be downloaded from https://github.com/broofa/node-int64. This software contains the following license and notice below: - -Copyright (c) 2014 Robert Kieffer - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: node-libs-browser. A copy of the source code may be downloaded from git+https://github.com/webpack/node-libs-browser.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2012 Tobias Koppers - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: node-notifier. A copy of the source code may be downloaded from git+ssh://git@github.com/mikaelbr/node-notifier.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 Mikael Brevik - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: node-pre-gyp. A copy of the source code may be downloaded from git://github.com/mapbox/node-pre-gyp.git. This software contains the following license and notice below: - -Copyright (c), Mapbox - -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of node-pre-gyp nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: node-releases. A copy of the source code may be downloaded from git+https://github.com/chicoxyzzy/node-releases.git. This software contains the following license and notice below: - -The MIT License - -Copyright (c) 2017 Sergey Rubanov (https://github.com/chicoxyzzy) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: normalize-package-data. A copy of the source code may be downloaded from git://github.com/npm/normalize-package-data.git. This software contains the following license and notice below: - -This package contains code originally written by Isaac Z. Schlueter. -Used with permission. - -Copyright (c) Meryn Stol ("Author") -All rights reserved. - -The BSD License - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS -BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN -IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: num2fraction, postcss-media-minmax. A copy of the source code may be downloaded from git@github.com:yisibl/num2fraction.git (num2fraction), https://github.com/postcss/postcss-media-minmax.git (postcss-media-minmax). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 PostCSS - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: nwsapi. A copy of the source code may be downloaded from git://github.com/dperini/nwsapi.git. This software contains the following license and notice below: - -Copyright (c) 2007-2019 Diego Perini (http://www.iport.it/) - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: object-copy, static-extend. A copy of the source code may be downloaded from https://github.com/jonschlinkert/object-copy.git (object-copy), https://github.com/jonschlinkert/static-extend.git (static-extend). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: object-hash. A copy of the source code may be downloaded from https://github.com/puleos/object-hash. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 object-hash contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: object-keys. A copy of the source code may be downloaded from git://github.com/ljharb/object-keys.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (C) 2013 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: object.assign. A copy of the source code may be downloaded from git://github.com/ljharb/object.assign.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: object.fromentries. A copy of the source code may be downloaded from git://github.com/es-shims/Object.fromEntries.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2018 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: obuf. A copy of the source code may be downloaded from git@github.com:indutny/offset-buffer. This software contains the following license and notice below: - -Copyright Fedor Indutny, 2015. - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to permit -persons to whom the Software is furnished to do so, subject to the -following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN -NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE -USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: omit.js. A copy of the source code may be downloaded from git+https://github.com/benjycui/omit.js.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2016 Benjy Cui - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: on-finished. A copy of the source code may be downloaded from https://github.com/jshttp/on-finished.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2013 Jonathan Ong -Copyright (c) 2014 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: original, querystringify, requires-port, url-parse. A copy of the source code may be downloaded from https://github.com/unshiftio/original (original), https://github.com/unshiftio/querystringify (querystringify), https://github.com/unshiftio/requires-port (requires-port), https://github.com/unshiftio/url-parse.git (url-parse). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Unshift.io, Arnout Kazemier, the Contributors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: os-browserify. A copy of the source code may be downloaded from http://github.com/CoderPuppy/os-browserify.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2017 CoderPuppy - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: pako. A copy of the source code may be downloaded from https://github.com/nodeca/pako.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (C) 2014-2017 by Vitaly Puzrin and Andrei Tuputcyn - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: parallel-transform. A copy of the source code may be downloaded from git://github.com/mafintosh/parallel-transform. This software contains the following license and notice below: - -Copyright 2013 Mathias Buus - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: parse-asn1. A copy of the source code may be downloaded from git://github.com/crypto-browserify/parse-asn1.git. This software contains the following license and notice below: - -Copyright (c) 2017, crypto-browserify contributors - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: parse5. A copy of the source code may be downloaded from git://github.com/inikulin/parse5.git. This software contains the following license and notice below: - -Copyright (c) 2013-2016 Ivan Nikulin (ifaaan@gmail.com, https://github.com/inikulin) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: parseurl. A copy of the source code may be downloaded from https://github.com/pillarjs/parseurl.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2014 Jonathan Ong -Copyright (c) 2014-2017 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: path-dirname. A copy of the source code may be downloaded from https://github.com/es128/path-dirname.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Elan Shanker and Node.js contributors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. - ------ - -The following software may be included in this product: path-is-inside. A copy of the source code may be downloaded from https://github.com/domenic/path-is-inside.git. This software contains the following license and notice below: - -Dual licensed under WTFPL and MIT: - ---- - -Copyright © 2013–2016 Domenic Denicola - -This work is free. You can redistribute it and/or modify it under the -terms of the Do What The Fuck You Want To Public License, Version 2, -as published by Sam Hocevar. See below for more details. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - ---- - -The MIT License (MIT) - -Copyright © 2013–2016 Domenic Denicola - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: path-parse. A copy of the source code may be downloaded from https://github.com/jbgutierrez/path-parse.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Javier Blanco - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: pbkdf2. A copy of the source code may be downloaded from https://github.com/crypto-browserify/pbkdf2.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Daniel Cousens - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: performance-now. A copy of the source code may be downloaded from git://github.com/braveg1rl/performance-now.git. This software contains the following license and notice below: - -Copyright (c) 2013 Braveg1rl - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: pinkie, pinkie-promise, require-from-string. A copy of the source code may be downloaded from https://github.com/floatdrop/pinkie.git (pinkie), https://github.com/floatdrop/pinkie-promise.git (pinkie-promise), https://github.com/floatdrop/require-from-string.git (require-from-string). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Vsevolod Strukchinsky (github.com/floatdrop) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: pirates. A copy of the source code may be downloaded from https://github.com/ariporad/pirates.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2016-2018 Ari Porad - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: portfinder. A copy of the source code may be downloaded from git@github.com:indexzero/node-portfinder.git. This software contains the following license and notice below: - -node-portfinder - -Copyright (c) 2012 Charlie Robbins - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: postcss-attribute-case-insensitive. A copy of the source code may be downloaded from git+https://github.com/Semigradsky/postcss-attribute-case-insensitive.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright 2016 Dmitry Semigradsky - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: postcss-calc, postcss-color-rebeccapurple. A copy of the source code may be downloaded from https://github.com/postcss/postcss-calc.git (postcss-calc), https://github.com/postcss/postcss-color-rebeccapurple.git (postcss-color-rebeccapurple). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Maxime Thirouin - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: postcss-color-gray. A copy of the source code may be downloaded from https://github.com/postcss/postcss-color-gray.git. This software contains the following license and notice below: - -# ISC License (ISC) - -## Copyright 2018 Shinnosuke Watanabe - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR -OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -PERFORMANCE OF THIS SOFTWARE. - -For more information, please see -https://opensource.org/licenses/ISC. - ------ - -The following software may be included in this product: postcss-color-hex-alpha, postcss-custom-media, postcss-custom-properties, postcss-custom-selectors. A copy of the source code may be downloaded from https://github.com/postcss/postcss-color-hex-alpha.git (postcss-color-hex-alpha), https://github.com/postcss/postcss-custom-media.git (postcss-custom-media), https://github.com/postcss/postcss-custom-properties.git (postcss-custom-properties), https://github.com/postcss/postcss-custom-selectors.git (postcss-custom-selectors). This software contains the following license and notice below: - -# The MIT License (MIT) - -Copyright © PostCSS - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: postcss-discard-overridden. A copy of the source code may be downloaded from https://github.com/cssnano/cssnano.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright 2016 Justineo - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: postcss-flexbugs-fixes. A copy of the source code may be downloaded from https://github.com/luisrudge/postcss-flexbugs-fixes.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright 2015 Luis Rudge - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: postcss-font-variant. A copy of the source code may be downloaded from https://github.com/postcss/postcss-font-variant.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Maxime Thirouin & Ian Storm Taylor - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: postcss-initial. A copy of the source code may be downloaded from https://github.com/maximkoretskiy/postcss-initial.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright 2015 Maksim Koretskiy - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: postcss-load-config. A copy of the source code may be downloaded from https://github.com/michael-ciniawsky/postcss-load-config.git. This software contains the following license and notice below: - -License (MIT) - -Copyright (c) Michael Ciniawsky - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: postcss-loader. A copy of the source code may be downloaded from https://github.com/postcss/postcss-loader.git. This software contains the following license and notice below: - -License (MIT) - -Copyright 2017 Andrey Sitnik - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: postcss-minify-params, postcss-normalize-charset. A copy of the source code may be downloaded from https://github.com/cssnano/cssnano.git (postcss-minify-params), https://github.com/cssnano/cssnano.git (postcss-normalize-charset). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright 2015 Bogdan Chadkin - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: postcss-modules-extract-imports. A copy of the source code may be downloaded from https://github.com/css-modules/postcss-modules-extract-imports.git. This software contains the following license and notice below: - -Copyright 2015 Glen Maddern - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: postcss-modules-local-by-default. A copy of the source code may be downloaded from https://github.com/css-modules/postcss-modules-local-by-default.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright 2015 Mark Dalgleish - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: postcss-modules-scope. A copy of the source code may be downloaded from https://github.com/css-modules/postcss-modules-scope.git. This software contains the following license and notice below: - -ISC License (ISC) - -Copyright (c) 2015, Glen Maddern - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: postcss-modules-values. A copy of the source code may be downloaded from git+https://github.com/css-modules/postcss-modules-values.git. This software contains the following license and notice below: - -ISC License (ISC) - -Copyright (c) 2015, Glen Maddern - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: postcss-page-break. A copy of the source code may be downloaded from https://github.com/shrpne/postcss-page-break.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright 2017 AUTHOR_NAME - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: postcss-replace-overflow-wrap. A copy of the source code may be downloaded from https://github.com/MattDiMu/postcss-replace-overflow-wrap.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright 2016 Matthias Müller - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: postcss-selector-matches, postcss-selector-not. A copy of the source code may be downloaded from https://github.com/postcss/postcss-selector-matches.git (postcss-selector-matches), https://github.com/postcss/postcss-selector-not.git (postcss-selector-not). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2017 Maxime Thirouin - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: postcss-values-parser. A copy of the source code may be downloaded from https://github.com/lesshint/postcss-values-parser.git. This software contains the following license and notice below: - -Copyright (c) Andrew Powell - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: private. A copy of the source code may be downloaded from git://github.com/benjamn/private.git. This software contains the following license and notice below: - -Copyright (c) 2014 Ben Newman - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: process. A copy of the source code may be downloaded from git://github.com/shtylman/node-process.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2013 Roman Shtylman - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: process-nextick-args. A copy of the source code may be downloaded from https://github.com/calvinmetcalf/process-nextick-args.git. This software contains the following license and notice below: - -# Copyright (c) 2015 Calvin Metcalf - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE.** - ------ - -The following software may be included in this product: progress. A copy of the source code may be downloaded from git://github.com/visionmedia/node-progress. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2017 TJ Holowaychuk - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: promise. A copy of the source code may be downloaded from https://github.com/then/promise.git. This software contains the following license and notice below: - -Copyright (c) 2014 Forbes Lindesay - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: prompts, sisteransi. A copy of the source code may be downloaded from https://github.com/terkelg/prompts.git (prompts), https://github.com/terkelg/sisteransi (sisteransi). This software contains the following license and notice below: - -MIT License - -Copyright (c) 2018 Terkel Gjervig Nielsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: property-information. A copy of the source code may be downloaded from https://github.com/wooorm/property-information.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2015 Titus Wormer - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: prr. A copy of the source code may be downloaded from https://github.com/rvagg/prr.git. This software contains the following license and notice below: - -The MIT License (MIT) -===================== - -Copyright (c) 2014 Rod Vagg ---------------------------- - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: psl. A copy of the source code may be downloaded from git@github.com:wrangr/psl.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2017 Lupo Montero lupomontero@gmail.com - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: q. A copy of the source code may be downloaded from git://github.com/kriskowal/q.git. This software contains the following license and notice below: - -Copyright 2009–2017 Kristopher Michael Kowal. All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. - ------ - -The following software may be included in this product: qs. A copy of the source code may be downloaded from https://github.com/ljharb/qs.git. This software contains the following license and notice below: - -Copyright (c) 2014 Nathan LaFreniere and other contributors. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * The names of any contributors may not be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - * * * - -The complete list of contributors can be found at: https://github.com/hapijs/qs/graphs/contributors - ------ - -The following software may be included in this product: querystring, querystring-es3. A copy of the source code may be downloaded from git://github.com/Gozala/querystring.git (querystring), git://github.com/mike-spainhower/querystring.git (querystring-es3). This software contains the following license and notice below: - -Copyright 2012 Irakli Gozalishvili. All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. - ------ - -The following software may be included in this product: raf. A copy of the source code may be downloaded from git://github.com/chrisdickinson/raf.git. This software contains the following license and notice below: - -Copyright 2013 Chris Dickinson - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: randombytes, randomfill. A copy of the source code may be downloaded from git@github.com:crypto-browserify/randombytes.git (randombytes), https://github.com/crypto-browserify/randomfill.git (randomfill). This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 crypto-browserify - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: range-parser. A copy of the source code may be downloaded from https://github.com/jshttp/range-parser.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2012-2014 TJ Holowaychuk -Copyright (c) 2015-2016 Douglas Christopher Wilson -Copyright (c) 2014-2015 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: rc, through. A copy of the source code may be downloaded from https://github.com/dominictarr/rc.git (rc), https://github.com/dominictarr/through.git (through). This software contains the following license and notice below: - -Apache License, Version 2.0 - -Copyright (c) 2011 Dominic Tarr - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - ------ - -The following software may be included in this product: rc-calendar. A copy of the source code may be downloaded from git@github.com:react-component/calendar.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-present yiminghe - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: rc-cascader, rc-editor-mention, rc-select. A copy of the source code may be downloaded from https://github.com/react-component/cascader.git (rc-cascader), https://github.com/react-component/mention.git (rc-editor-mention), git@github.com:react-component/select.git (rc-select). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014-present alipay.com - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: rc-checkbox. A copy of the source code may be downloaded from git@github.com:react-component/checkbox.git. This software contains the following license and notice below: - -The MIT License (MIT) Copyright (c) 2016 React Components - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: rc-drawer, rc-table, rc-tree, rc-tree-select. A copy of the source code may be downloaded from https://github.com/ant-motion/drawer.git (rc-drawer), git@github.com:react-component/table.git (rc-table), git@github.com:react-component/tree.git (rc-tree), https://github.com/react-component/tree-select.git (rc-tree-select). This software contains the following license and notice below: - -MIT LICENSE - -Copyright (c) 2015-present Alipay.com, https://www.alipay.com/ - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: rc-editor-core. A copy of the source code may be downloaded from https://github.com/react-component/editor-core.git. This software contains the following license and notice below: - -BSD License - -For Draft.js software - -Copyright (c) 2013-present, Facebook, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: rc-hammerjs. A copy of the source code may be downloaded from https://github.com/react-component/react-hammerjs. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 Jed Watson - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: rc-slider, rc-tooltip, rc-trigger, rc-util. A copy of the source code may be downloaded from git@github.com:react-component/slider.git (rc-slider), git@github.com:react-component/tooltip.git (rc-tooltip), https://github.com/react-component/trigger.git (rc-trigger), git@github.com:react-component/util.git (rc-util). This software contains the following license and notice below: - -The MIT License (MIT) -Copyright (c) 2015-present Alipay.com, https://www.alipay.com/ - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: react-app-rewired. A copy of the source code may be downloaded from git+https://github.com/timarney/react-app-rewired. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2016 Tim Arney - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: react-lazy-load. A copy of the source code may be downloaded from https://github.com/loktar00/react-lazy-load.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Jason - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: react-lifecycles-compat. A copy of the source code may be downloaded from https://github.com/reactjs/react-lifecycles-compat.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2013-present, Facebook, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: readable-stream. A copy of the source code may be downloaded from git://github.com/nodejs/readable-stream. This software contains the following license and notice below: - -Node.js is licensed for use as follows: - -""" -Copyright Node.js contributors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. -""" - -This license applies to parts of Node.js originating from the -https://github.com/joyent/node repository: - -""" -Copyright Joyent, Inc. and other Node contributors. All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. -""" - ------ - -The following software may be included in this product: readdirp. A copy of the source code may be downloaded from git://github.com/paulmillr/readdirp.git. This software contains the following license and notice below: - -This software is released under the MIT license: - -Copyright (c) 2012-2015 Thorsten Lorenz - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: realpath-native. A copy of the source code may be downloaded from https://github.com/SimenB/realpath-native.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 Simen Bekkhus - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: recursive-readdir. A copy of the source code may be downloaded from git://github.com/jergason/recursive-readdir.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: regex-not. A copy of the source code may be downloaded from https://github.com/jonschlinkert/regex-not.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016, 2018, Jon Schlinkert. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: regexp-tree. A copy of the source code may be downloaded from https://github.com/DmitrySoshnikov/regexp-tree.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 Dmitry Soshnikov - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: regjsgen. A copy of the source code may be downloaded from https://github.com/bnjmnt4n/regjsgen.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright 2014-2018 Benjamin Tan - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: regjsparser. A copy of the source code may be downloaded from git@github.com:jviereck/regjsparser.git. This software contains the following license and notice below: - -Copyright (c) Julian Viereck and Contributors, All Rights Reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: relateurl. A copy of the source code may be downloaded from git://github.com/stevenvachon/relateurl.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Steven Vachon (svachon.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: remove-trailing-separator. A copy of the source code may be downloaded from git+https://github.com/darsain/remove-trailing-separator.git. This software contains the following license and notice below: - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: renderkid. A copy of the source code may be downloaded from https://github.com/AriaMinaei/RenderKid.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Aria Minaei - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: replace-ext. A copy of the source code may be downloaded from https://github.com/gulpjs/replace-ext.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Blaine Bublitz , Eric Schoffstall and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: request-promise-core. A copy of the source code may be downloaded from git+https://github.com/request/promise-core.git. This software contains the following license and notice below: - -ISC License - -Copyright (c) 2016, Nicolai Kamenzky and contributors - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: request-promise-native, stealthy-require. A copy of the source code may be downloaded from git+https://github.com/request/request-promise-native.git (request-promise-native), git+https://github.com/analog-nico/stealthy-require.git (stealthy-require). This software contains the following license and notice below: - -ISC License - -Copyright (c) 2017, Nicolai Kamenzky and contributors - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: require-directory. A copy of the source code may be downloaded from git://github.com/troygoode/node-require-directory.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2011 Troy Goode - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: require-main-filename, set-blocking, test-exclude, yargs-parser. A copy of the source code may be downloaded from git+ssh://git@github.com/yargs/require-main-filename.git (require-main-filename), git+https://github.com/yargs/set-blocking.git (set-blocking), git+https://github.com/istanbuljs/istanbuljs.git (test-exclude), git@github.com:yargs/yargs-parser.git (yargs-parser). This software contains the following license and notice below: - -Copyright (c) 2016, Contributors - -Permission to use, copy, modify, and/or distribute this software -for any purpose with or without fee is hereby granted, provided -that the above copyright notice and this permission notice -appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE -LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES -OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, -ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: resize-observer-polyfill. A copy of the source code may be downloaded from https://github.com/que-etc/resize-observer-polyfill.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 Denis Rul - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: resolve-url, urix. A copy of the source code may be downloaded from https://github.com/lydell/resolve-url.git (resolve-url), https://github.com/lydell/urix.git (urix). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2013 Simon Lydell - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: ret. A copy of the source code may be downloaded from git://github.com/fent/ret.js.git. This software contains the following license and notice below: - -Copyright (C) 2011 by Roly Fentanes - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: ripemd160. A copy of the source code may be downloaded from https://github.com/crypto-browserify/ripemd160. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 crypto-browserify - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: rsvp. A copy of the source code may be downloaded from https://github.com/tildeio/rsvp.js.git. This software contains the following license and notice below: - -Copyright (c) 2014 Yehuda Katz, Tom Dale, Stefan Penner and contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: run-async. A copy of the source code may be downloaded from https://github.com/SBoudrias/run-async.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Simon Boudrias - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: rxjs. A copy of the source code may be downloaded from https://github.com/reactivex/rxjs.git. This software contains the following license and notice below: - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2015-2018 Google, Inc., Netflix, Inc., Microsoft Corp. and contributors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------ - -The following software may be included in this product: safer-buffer. A copy of the source code may be downloaded from git+https://github.com/ChALkeR/safer-buffer.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2018 Nikita Skovoroda - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: sass-loader. A copy of the source code may be downloaded from https://github.com/webpack-contrib/sass-loader.git. This software contains the following license and notice below: - -Copyright JS Foundation and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: sax. A copy of the source code may be downloaded from git://github.com/isaacs/sax-js.git. This software contains the following license and notice below: - -The ISC License - -Copyright (c) Isaac Z. Schlueter and Contributors - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -==== - -`String.fromCodePoint` by Mathias Bynens used according to terms of MIT -License, as follows: - - Copyright Mathias Bynens - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: saxes. A copy of the source code may be downloaded from https://github.com/lddubeau/saxes.git. This software contains the following license and notice below: - -The ISC License - -Copyright (c) Contributors - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -==== - -The following license is the one that governed sax, from which saxes -was forked. Isaac Schlueter is not *directly* involved with saxes so -don't go bugging him for saxes issues. - -The ISC License - -Copyright (c) Isaac Z. Schlueter and Contributors - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -==== - -`String.fromCodePoint` by Mathias Bynens is no longer used, but it can -still be found in old commits. It was once used according to terms of -MIT License, as follows: - - Copyright Mathias Bynens - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: selfsigned. A copy of the source code may be downloaded from git://github.com/jfromaniello/selfsigned.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2013 José F. Romaniello - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: send. A copy of the source code may be downloaded from https://github.com/pillarjs/send.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2012 TJ Holowaychuk -Copyright (c) 2014-2016 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: serialize-javascript. A copy of the source code may be downloaded from git+https://github.com/yahoo/serialize-javascript.git. This software contains the following license and notice below: - -Copyright 2014 Yahoo! Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - * Neither the name of the Yahoo! Inc. nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL YAHOO! INC. BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: serve-index. A copy of the source code may be downloaded from https://github.com/expressjs/serve-index.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2010 Sencha Inc. -Copyright (c) 2011 LearnBoost -Copyright (c) 2011 TJ Holowaychuk -Copyright (c) 2014-2015 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: serve-static. A copy of the source code may be downloaded from https://github.com/expressjs/serve-static.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2010 Sencha Inc. -Copyright (c) 2011 LearnBoost -Copyright (c) 2011 TJ Holowaychuk -Copyright (c) 2014-2016 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: setimmediate. A copy of the source code may be downloaded from https://github.com/YuzuJS/setImmediate.git. This software contains the following license and notice below: - -Copyright (c) 2012 Barnesandnoble.com, llc, Donavon West, and Domenic Denicola - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: setprototypeof. A copy of the source code may be downloaded from https://github.com/wesleytodd/setprototypeof.git. This software contains the following license and notice below: - -Copyright (c) 2015, Wes Todd - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION -OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN -CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: sha.js. A copy of the source code may be downloaded from git://github.com/crypto-browserify/sha.js.git. This software contains the following license and notice below: - -Copyright (c) 2013-2018 sha.js contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -Copyright (c) 1998 - 2009, Paul Johnston & Contributors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -Redistributions of source code must retain the above copyright notice, this -list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, this -list of conditions and the following disclaimer in the documentation and/or -other materials provided with the distribution. - -Neither the name of the author nor the names of its contributors may be used to -endorse or promote products derived from this software without specific prior -written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: shallow-equal. A copy of the source code may be downloaded from https://github.com/moroshko/shallow-equal.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright © 2016 Misha Moroshko - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the “Software”), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: shallowequal. A copy of the source code may be downloaded from https://github.com/dashed/shallowequal.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 Alberto Leal (github.com/dashed) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: shallowequal. A copy of the source code may be downloaded from https://github.com/dashed/shallowequal.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Alberto Leal (github.com/dashed) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: shebang-command. A copy of the source code may be downloaded from https://github.com/kevva/shebang-command.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Kevin Martensson (github.com/kevva) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: shellwords. A copy of the source code may be downloaded from git://github.com/jimmycuadra/shellwords.git. This software contains the following license and notice below: - -Copyright (C) 2011 by Jimmy Cuadra - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: signal-exit. A copy of the source code may be downloaded from https://github.com/tapjs/signal-exit.git. This software contains the following license and notice below: - -The ISC License - -Copyright (c) 2015, Contributors - -Permission to use, copy, modify, and/or distribute this software -for any purpose with or without fee is hereby granted, provided -that the above copyright notice and this permission notice -appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES -OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE -LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES -OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, -ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: simple-swizzle. A copy of the source code may be downloaded from https://github.com/qix-/node-simple-swizzle.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Josh Junon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: slice-ansi. A copy of the source code may be downloaded from https://github.com/chalk/slice-ansi.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) DC - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: snapdragon-node, snapdragon-util. A copy of the source code may be downloaded from https://github.com/jonschlinkert/snapdragon-node.git (snapdragon-node), https://github.com/jonschlinkert/snapdragon-util.git (snapdragon-util). This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2017, Jon Schlinkert - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: sockjs. A copy of the source code may be downloaded from https://github.com/sockjs/sockjs-node.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (C) 2011 VMware, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: sockjs-client. A copy of the source code may be downloaded from https://github.com/sockjs/sockjs-client.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2011-2012 VMware, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: source-list-map. A copy of the source code may be downloaded from https://github.com/webpack/source-list-map.git. This software contains the following license and notice below: - -Copyright 2017 JS Foundation - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: source-map. A copy of the source code may be downloaded from http://github.com/mozilla/source-map.git. This software contains the following license and notice below: - -Copyright (c) 2009-2011, Mozilla Foundation and contributors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the names of the Mozilla Foundation nor the names of project - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: source-map-support. A copy of the source code may be downloaded from https://github.com/evanw/node-source-map-support. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Evan Wallace - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: source-map-url. A copy of the source code may be downloaded from https://github.com/lydell/source-map-url.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Simon Lydell - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: spdx-expression-parse. A copy of the source code may be downloaded from https://github.com/jslicense/spdx-expression-parse.js.git. This software contains the following license and notice below: - -The MIT License - -Copyright (c) 2015 Kyle E. Mitchell & other authors listed in AUTHORS - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: sprintf-js. A copy of the source code may be downloaded from https://github.com/alexei/sprintf.js.git. This software contains the following license and notice below: - -Copyright (c) 2007-2014, Alexandru Marasteanu -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -* Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. -* Neither the name of this software nor the names of its contributors may be - used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: stack-utils. A copy of the source code may be downloaded from https://github.com/tapjs/stack-utils.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Isaac Z. Schlueter , James Talmage (github.com/jamestalmage), and Contributors - -Extracted from code in node-tap http://www.node-tap.org/ - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: statuses. A copy of the source code may be downloaded from https://github.com/jshttp/statuses.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Jonathan Ong -Copyright (c) 2016 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: stream-http. A copy of the source code may be downloaded from git://github.com/jhiesey/stream-http.git. This software contains the following license and notice below: - -The MIT License - -Copyright (c) 2015 John Hiesey - -Permission is hereby granted, free of charge, -to any person obtaining a copy of this software and -associated documentation files (the "Software"), to -deal in the Software without restriction, including -without limitation the rights to use, copy, modify, -merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom -the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: string_decoder. A copy of the source code may be downloaded from git://github.com/nodejs/string_decoder.git. This software contains the following license and notice below: - -Node.js is licensed for use as follows: - -""" -Copyright Node.js contributors. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. -""" - -This license applies to parts of Node.js originating from the -https://github.com/joyent/node repository: - -""" -Copyright Joyent, Inc. and other Node contributors. All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. -""" - ------ - -The following software may be included in this product: string-convert. A copy of the source code may be downloaded from https://github.com/akiran/string-convert. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Kiran Abburi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: stringify-object. A copy of the source code may be downloaded from https://github.com/yeoman/stringify-object.git. This software contains the following license and notice below: - -Copyright (c) 2015, Yeoman team -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: svgo. A copy of the source code may be downloaded from git://github.com/svg/svgo.git. This software contains the following license and notice below: - -The MIT License - -Copyright © 2012–2016 Kir Belevich - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Лицензия MIT - -Copyright © 2012–2016 Кир Белевич - -Данная лицензия разрешает лицам, получившим копию данного -программного обеспечения и сопутствующей документации -(в дальнейшем именуемыми «Программное Обеспечение»), безвозмездно -использовать Программное Обеспечение без ограничений, включая -неограниченное право на использование, копирование, изменение, -добавление, публикацию, распространение, сублицензирование -и/или продажу копий Программного Обеспечения, также как и лицам, -которым предоставляется данное Программное Обеспечение, -при соблюдении следующих условий: - -Указанное выше уведомление об авторском праве и данные условия -должны быть включены во все копии или значимые части данного -Программного Обеспечения. - -ДАННОЕ ПРОГРАММНОЕ ОБЕСПЕЧЕНИЕ ПРЕДОСТАВЛЯЕТСЯ «КАК ЕСТЬ», -БЕЗ КАКИХ-ЛИБО ГАРАНТИЙ, ЯВНО ВЫРАЖЕННЫХ ИЛИ ПОДРАЗУМЕВАЕМЫХ, -ВКЛЮЧАЯ, НО НЕ ОГРАНИЧИВАЯСЬ ГАРАНТИЯМИ ТОВАРНОЙ ПРИГОДНОСТИ, -СООТВЕТСТВИЯ ПО ЕГО КОНКРЕТНОМУ НАЗНАЧЕНИЮ И ОТСУТСТВИЯ НАРУШЕНИЙ -ПРАВ. НИ В КАКОМ СЛУЧАЕ АВТОРЫ ИЛИ ПРАВООБЛАДАТЕЛИ НЕ НЕСУТ -ОТВЕТСТВЕННОСТИ ПО ИСКАМ О ВОЗМЕЩЕНИИ УЩЕРБА, УБЫТКОВ ИЛИ ДРУГИХ -ТРЕБОВАНИЙ ПО ДЕЙСТВУЮЩИМ КОНТРАКТАМ, ДЕЛИКТАМ ИЛИ ИНОМУ, -ВОЗНИКШИМ ИЗ, ИМЕЮЩИМ ПРИЧИНОЙ ИЛИ СВЯЗАННЫМ С ПРОГРАММНЫМ -ОБЕСПЕЧЕНИЕМ ИЛИ ИСПОЛЬЗОВАНИЕМ ПРОГРАММНОГО ОБЕСПЕЧЕНИЯ -ИЛИ ИНЫМИ ДЕЙСТВИЯМИ С ПРОГРАММНЫМ ОБЕСПЕЧЕНИЕМ. - ------ - -The following software may be included in this product: symbol-tree. A copy of the source code may be downloaded from https://github.com/jsdom/js-symbol-tree.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015 Joris van der Wel - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: table. A copy of the source code may be downloaded from https://github.com/gajus/table. This software contains the following license and notice below: - -Copyright (c) 2018, Gajus Kuizinas (http://gajus.com/) -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the Gajus Kuizinas (http://gajus.com/) nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL ANUARY BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: terser, uglify-js. A copy of the source code may be downloaded from https://github.com/fabiosantoscode/terser.git (terser), https://github.com/mishoo/UglifyJS2.git (uglify-js). This software contains the following license and notice below: - -UglifyJS is released under the BSD license: - -Copyright 2012-2018 (c) Mihai Bazon - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - * Redistributions of source code must retain the above - copyright notice, this list of conditions and the following - disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials - provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER “AS IS” AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, -OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF -THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -SUCH DAMAGE. - ------ - -The following software may be included in this product: throat. A copy of the source code may be downloaded from https://github.com/ForbesLindesay/throat.git. This software contains the following license and notice below: - -Copyright (c) 2013 Forbes Lindesay - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: through2. A copy of the source code may be downloaded from https://github.com/rvagg/through2.git. This software contains the following license and notice below: - -# The MIT License (MIT) - -**Copyright (c) Rod Vagg (the "Original Author") and additional contributors** - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: thunky. A copy of the source code may be downloaded from git://github.com/mafintosh/thunky.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2018 Mathias Buus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: timers-browserify. A copy of the source code may be downloaded from git://github.com/jryans/timers-browserify.git. This software contains the following license and notice below: - -# timers-browserify - -This project uses the [MIT](http://jryans.mit-license.org/) license: - - Copyright © 2012 J. Ryan Stinnett - - Permission is hereby granted, free of charge, to any person obtaining a - copy of this software and associated documentation files (the “Software”), - to deal in the Software without restriction, including without limitation - the rights to use, copy, modify, merge, publish, distribute, sublicense, - and/or sell copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. - -# lib/node - -The `lib/node` directory borrows files from joyent/node which uses the following license: - - Copyright Joyent, Inc. and other Node contributors. All rights reserved. - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to - deal in the Software without restriction, including without limitation the - rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - sell copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - IN THE SOFTWARE. - ------ - -The following software may be included in this product: timsort. A copy of the source code may be downloaded from https://github.com/mziccard/node-timsort.git. This software contains the following license and notice below: - -The MIT License - -Copyright (c) 2015 Marco Ziccardi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: tiny-invariant. A copy of the source code may be downloaded from git+https://github.com/alexreardon/tiny-invariant.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2019 Alexander Reardon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: tinycolor2. A copy of the source code may be downloaded from https://bgrins.github.com/TinyColor. This software contains the following license and notice below: - -Copyright (c), Brian Grinstead, http://briangrinstead.com - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: tmp. A copy of the source code may be downloaded from https://github.com/raszi/node-tmp.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 KARASZI István - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: to-arraybuffer. A copy of the source code may be downloaded from git://github.com/jhiesey/to-arraybuffer.git. This software contains the following license and notice below: - -The MIT License - -Copyright (c) 2016 John Hiesey - -Permission is hereby granted, free of charge, -to any person obtaining a copy of this software and -associated documentation files (the "Software"), to -deal in the Software without restriction, including -without limitation the rights to use, copy, modify, -merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom -the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR -ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: to-fast-properties. A copy of the source code may be downloaded from https://github.com/sindresorhus/to-fast-properties.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2014 Petka Antonov - 2015 Sindre Sorhus - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: tough-cookie. A copy of the source code may be downloaded from git://github.com/salesforce/tough-cookie.git. This software contains the following license and notice below: - -Copyright (c) 2015, Salesforce.com, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -3. Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: tr46. A copy of the source code may be downloaded from https://github.com/Sebmaster/tr46.js.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2016 Sebastian Mayr - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: trough. A copy of the source code may be downloaded from https://github.com/wooorm/trough.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2016 Titus Wormer - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: tslib, typescript. A copy of the source code may be downloaded from https://github.com/Microsoft/tslib.git (tslib), https://github.com/Microsoft/TypeScript.git (typescript). This software contains the following license and notice below: - -Apache License - -Version 2.0, January 2004 - -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. - -"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of this License; and - -You must cause any modified files to carry prominent notices stating that You changed the files; and - -You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and - -If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - ------ - -The following software may be included in this product: tsutils. A copy of the source code may be downloaded from https://github.com/ajafff/tsutils. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2017 Klaus Meinhardt - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: tweetnacl. A copy of the source code may be downloaded from https://github.com/dchest/tweetnacl-js.git. This software contains the following license and notice below: - -This is free and unencumbered software released into the public domain. - -Anyone is free to copy, modify, publish, use, compile, sell, or -distribute this software, either in source code form or as a compiled -binary, for any purpose, commercial or non-commercial, and by any -means. - -In jurisdictions that recognize copyright laws, the author or authors -of this software dedicate any and all copyright interest in the -software to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and -successors. We intend this dedication to be an overt act of -relinquishment in perpetuity of all present and future rights to this -software under copyright law. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -For more information, please refer to - ------ - -The following software may be included in this product: typedarray. A copy of the source code may be downloaded from git://github.com/substack/typedarray.git. This software contains the following license and notice below: - -/* - Copyright (c) 2010, Linden Research, Inc. - Copyright (c) 2012, Joshua Bell - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. - $/LicenseInfo$ - */ - -// Original can be found at: -// https://bitbucket.org/lindenlab/llsd -// Modifications by Joshua Bell inexorabletash@gmail.com -// https://github.com/inexorabletash/polyfill - -// ES3/ES5 implementation of the Krhonos Typed Array Specification -// Ref: http://www.khronos.org/registry/typedarray/specs/latest/ -// Date: 2011-02-01 -// -// Variations: -// * Allows typed_array.get/set() as alias for subscripts (typed_array[]) - ------ - -The following software may be included in this product: ua-parser-js. A copy of the source code may be downloaded from https://github.com/faisalman/ua-parser-js.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2012-2018 Faisal Salman <> - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: unified, vfile. A copy of the source code may be downloaded from https://github.com/unifiedjs/unified.git (unified), https://github.com/vfile/vfile.git (vfile). This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2015 Titus Wormer - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: unique-filename. A copy of the source code may be downloaded from https://github.com/iarna/unique-filename.git. This software contains the following license and notice below: - -Copyright npm, Inc - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: unique-slug. A copy of the source code may be downloaded from git://github.com/iarna/unique-slug.git. This software contains the following license and notice below: - -The ISC License - -Copyright npm, Inc - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR -IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: universalify. A copy of the source code may be downloaded from git+https://github.com/RyanZim/universalify.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2017, Ryan Zimmerman - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the 'Software'), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: unpipe. A copy of the source code may be downloaded from https://github.com/stream-utils/unpipe.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2015 Douglas Christopher Wilson - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: unquote. A copy of the source code may be downloaded from https://github.com/lakenen/node-unquote.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2017 Cameron Lakenen - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sub-license, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: upath. A copy of the source code may be downloaded from git://github.com/anodynos/upath. This software contains the following license and notice below: - -Copyright(c) 2014-2017 Angelos Pikoulas (agelos.pikoulas@gmail.com) - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: url. A copy of the source code may be downloaded from https://github.com/defunctzombie/node-url.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright Joyent, Inc. and other Node contributors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: util-deprecate. A copy of the source code may be downloaded from git://github.com/TooTallNate/util-deprecate.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2014 Nathan Rajlich - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: util.promisify. A copy of the source code may be downloaded from git+https://github.com/ljharb/util.promisify.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 Jordan Harband - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: utila. A copy of the source code may be downloaded from https://github.com/AriaMinaei/utila.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2014 Aria Minaei - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: utils-merge. A copy of the source code may be downloaded from git://github.com/jaredhanson/utils-merge.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2013-2017 Jared Hanson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: uuid. A copy of the source code may be downloaded from https://github.com/kelektiv/node-uuid.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2010-2016 Robert Kieffer and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: verror. A copy of the source code may be downloaded from git://github.com/davepacheco/node-verror.git. This software contains the following license and notice below: - -Copyright (c) 2016, Joyent, Inc. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE - ------ - -The following software may be included in this product: vfile-message. A copy of the source code may be downloaded from https://github.com/vfile/vfile-message.git. This software contains the following license and notice below: - -(The MIT License) - -Copyright (c) 2017 Titus Wormer - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -'Software'), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: walker. A copy of the source code may be downloaded from https://github.com/daaku/nodejs-walker. This software contains the following license and notice below: - -Copyright 2013 Naitik Shah - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - ------ - -The following software may be included in this product: warning. A copy of the source code may be downloaded from https://github.com/BerkeleyTrue/warning.git. This software contains the following license and notice below: - -BSD License - -For React software - -Copyright (c) 2013-2015, Facebook, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: webidl-conversions. A copy of the source code may be downloaded from https://github.com/jsdom/webidl-conversions.git. This software contains the following license and notice below: - -# The BSD 2-Clause License - -Copyright (c) 2014, Domenic Denicola -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------ - -The following software may be included in this product: webpack-log. A copy of the source code may be downloaded from https://github.com/webpack-contrib/webpack-log.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 webpack-contrib - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: webpack-manifest-plugin. A copy of the source code may be downloaded from https://github.com/danethurber/webpack-manifest-plugin.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) Dane Thurber - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: webpack-sources. A copy of the source code may be downloaded from git+https://github.com/webpack/webpack-sources.git. This software contains the following license and notice below: - -MIT License - -Copyright (c) 2017 JS Foundation and other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: websocket-driver. A copy of the source code may be downloaded from git://github.com/faye/websocket-driver-node.git. This software contains the following license and notice below: - -# The MIT License - -Copyright (c) 2010-2017 James Coglan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the 'Software'), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: websocket-extensions. A copy of the source code may be downloaded from git://github.com/faye/websocket-extensions-node.git. This software contains the following license and notice below: - -# The MIT License - -Copyright (c) 2014-2017 James Coglan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the 'Software'), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: whatwg-encoding. A copy of the source code may be downloaded from https://github.com/jsdom/whatwg-encoding.git. This software contains the following license and notice below: - -Copyright © 2016–2018 Domenic Denicola - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: whatwg-fetch. A copy of the source code may be downloaded from https://github.com/github/fetch.git. This software contains the following license and notice below: - -Copyright (c) 2014-2016 GitHub, Inc. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: whatwg-url. A copy of the source code may be downloaded from https://github.com/jsdom/whatwg-url.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2015–2016 Sebastian Mayr - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: which-module. A copy of the source code may be downloaded from git+https://github.com/nexdrew/which-module.git. This software contains the following license and notice below: - -Copyright (c) 2016, Contributors - -Permission to use, copy, modify, and/or distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright notice -and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF -THIS SOFTWARE. - ------ - -The following software may be included in this product: workbox-background-sync, workbox-broadcast-update, workbox-build, workbox-cacheable-response, workbox-core, workbox-expiration, workbox-google-analytics, workbox-navigation-preload, workbox-precaching, workbox-range-requests, workbox-routing, workbox-strategies, workbox-streams, workbox-sw, workbox-webpack-plugin, workbox-window. A copy of the source code may be downloaded from https://github.com/googlechrome/workbox.git (workbox-background-sync), https://github.com/googlechrome/workbox.git (workbox-broadcast-update), https://github.com/googlechrome/workbox.git (workbox-build), https://github.com/googlechrome/workbox.git (workbox-cacheable-response), https://github.com/googlechrome/workbox.git (workbox-core), https://github.com/googlechrome/workbox.git (workbox-expiration), https://github.com/googlechrome/workbox.git (workbox-google-analytics), https://github.com/googlechrome/workbox.git (workbox-navigation-preload), https://github.com/googlechrome/workbox.git (workbox-precaching), https://github.com/googlechrome/workbox.git (workbox-range-requests), https://github.com/googlechrome/workbox.git (workbox-routing), https://github.com/googlechrome/workbox.git (workbox-strategies), https://github.com/googlechrome/workbox.git (workbox-streams), https://github.com/googlechrome/workbox.git (workbox-sw), https://github.com/googlechrome/workbox.git (workbox-webpack-plugin), https://github.com/googlechrome/workbox.git (workbox-window). This software contains the following license and notice below: - -Copyright 2018 Google LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: worker-farm. A copy of the source code may be downloaded from https://github.com/rvagg/node-worker-farm.git. This software contains the following license and notice below: - -The MIT License (MIT) -===================== - -Copyright (c) 2014 LevelUP contributors ---------------------------------------- - -*LevelUP contributors listed at * - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: write-file-atomic. A copy of the source code may be downloaded from git@github.com:iarna/write-file-atomic.git. This software contains the following license and notice below: - -Copyright (c) 2015, Rebecca Turner - -Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ------ - -The following software may be included in this product: ws. A copy of the source code may be downloaded from https://github.com/websockets/ws.git. This software contains the following license and notice below: - -The MIT License (MIT) - -Copyright (c) 2011 Einar Otto Stangvik - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ------ - -The following software may be included in this product: xml-name-validator. A copy of the source code may be downloaded from https://github.com/jsdom/xml-name-validator.git. This software contains the following license and notice below: - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - ------ - -The following software may be included in this product: xmlchars. A copy of the source code may be downloaded from https://github.com/lddubeau/xmlchars.git. This software contains the following license and notice below: - -Copyright Louis-Dominique Dubeau and contributors to xmlchars - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ------ - -The following software may be included in this product: xregexp. A copy of the source code may be downloaded from https://github.com/slevithan/xregexp.git. This software contains the following license and notice below: - -The MIT License - -Copyright (c) 2007-2017 Steven Levithan - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ------ - -The following software may be included in this product: y18n. A copy of the source code may be downloaded from git@github.com:yargs/y18n.git. This software contains the following license and notice below: - -Copyright (c) 2015, Contributors - -Permission to use, copy, modify, and/or distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright notice -and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF -THIS SOFTWARE. - ------ - -The following software may be included in this product: yargs. A copy of the source code may be downloaded from https://github.com/yargs/yargs.git. This software contains the following license and notice below: - -Copyright 2010 James Halliday (mail@substack.net) -Modified work Copyright 2014 Contributors (ben@npmjs.com) - -This project is free software released under the MIT/X11 license: - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/NOTICE b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/NOTICE deleted file mode 100644 index b1e960815f6..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Apache Hadoop Ozone Recon -Copyright 2019 and onwards The Apache Software Foundation. - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/README.md b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/README.md deleted file mode 100644 index d555ccd4bdf..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/README.md +++ /dev/null @@ -1,45 +0,0 @@ - - -This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app). - -## Available Scripts - -In the project directory, you can run: - -### `yarn start` - -Runs the app in the development mode.
-Open [http://localhost:3000](http://localhost:3000) to view it in the browser. - -The page will reload if you make edits.
-You will also see any lint errors in the console. - -### `yarn test` - -Launches the test runner in the interactive watch mode.
-See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information. - -### `yarn run build` - -Builds the app for production to the `build` folder.
-It correctly bundles React in production mode and optimizes the build for the best performance. - -The build is minified and the filenames include the hashes.
-Your app is ready to be deployed! - -See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information. diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/config-overrides.js b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/config-overrides.js deleted file mode 100644 index d29b5302568..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/config-overrides.js +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -const { override, fixBabelImports, addLessLoader} = require('customize-cra'); - -module.exports = override( - fixBabelImports('import', { - libraryName: 'antd', - libraryDirectory: 'es', - style: true - }), - addLessLoader({ - javascriptEnabled: true, - modifyVars: { - '@primary-color': '#1DA57A' - } - }) -); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json deleted file mode 100644 index cd55957a195..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "name": "ozone-recon", - "version": "0.1.0", - "private": true, - "dependencies": { - "@babel/core": "^7.0.0", - "@types/jest": "24.0.12", - "@types/node": "11.13.9", - "@types/react": "16.8.15", - "@types/react-dom": "16.8.4", - "@types/react-router-dom": "^4.3.3", - "antd": "^3.16.6", - "babel-jest": "24.7.1", - "babel-plugin-import": "^1.11.0", - "classnames": "^2.2.6", - "customize-cra": "^0.2.12", - "less": "^3.9.0", - "less-loader": "^5.0.0", - "react": "^16.8.6", - "react-app-rewired": "^2.1.3", - "react-dom": "^16.8.6", - "react-router-dom": "^5.0.0", - "react-scripts": "3.0.0", - "typescript": "3.4.5" - }, - "scripts": { - "start": "react-app-rewired start", - "build": "react-app-rewired build", - "test": "react-app-rewired test", - "eject": "react-scripts eject" - }, - "eslintConfig": { - "extends": "react-app" - }, - "browserslist": { - "production": [ - ">0.2%", - "not dead", - "not op_mini all" - ], - "development": [ - "last 1 chrome version", - "last 1 firefox version", - "last 1 safari version" - ] - } -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/favicon.ico b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/favicon.ico deleted file mode 100644 index df12210781df8596dffad422ee5513a9df8807e2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17470 zcmeHN2YgmVw*S!8RZ&-cmPHWjvX)guMU+lDq>uyxXy}9{MF9l_q)L@0(qu)t^pZ|O z3Pn;Nfj|(P(w&w}#vW+E3zb^k(-DVT0ssN}~P*CuQ^{iTUcJ`fYSF7sX-&&v0 zE;O-jL>M}D?AY7ncX)XCJ#7Dh&EGs9NVlSy_4|Z$!fV0^N-haVaW@lZ~VK`0>i5-fOJI5fmD34v3`q^inDNibP zAt)&5cJUeVjleI(+8RgJ;8*1b@eto9Jg7@MP~34#H`rF%?1!!$hH08WxQ!R6w`i#26D z=feJ9RW3OH9fA*Q-)m+0@K_fB+n2WTFAprn)sky=_J7F7ckJA`vj*E891h1~(wBMS z#EH`V`}ZR`IT>MLVfgB+uP|%YEKHd)1=FTY!@PO(uxiyR#KpxSEiDaMSy{NmQd(M? zz;VVA-sZSU53TusH9im*EeAbwOY_iRl?!ZN+3am1JK^w^qkN2{i+LAM?Af~)n>TO9 zz<~qt`s=TQcpC(}1rF9Xg7Y)e&(9Cjr%%VX-+l}7&!Bvya_sjRi~gD4s>0vDSikgG zKd0urng?SJUHA^RCAExaRyH$j%WX#3@hIdM7hvA(c|xE4PY*p{v}n=7B*4wh4N^C6 z-W<)EHMP*`GZXpw`DZ!*ILerc&)HV?_2=OK zOWw8SQ}iFpIaZ6mbCPrRhzuAW>pH-;)MoTc7-pU;JddpXnP}Xkk*q!3-QCfoNfR_| z*bt2xHB!*HapTIi^vN>~@>}@e>guZA;TTGO`u6RMjEoFWuTD|FdTLsT$A4TFIES+i zzC*i}yJpR59RL2f8GR%Mwna90Z|ICatZal@8(fieW)En`Fn9JBiU;-IsIT}SbPF#u z$Zy@J+fqC8MDyaQr=AkIu#X%$5;-|JAP=IB9zFVu@WJ9irR~+c`FGS#{1x9e_uRR2 z_~@gLlwNzc^2X)Mm$7XBH?S?PhL#(gd z2tOPm4_}D-W=p852VtC+*rM z2@YB|MSf-4wX4=3uke!5WJ5b!_#?6YIYw%e|LgeYwr_Hr%*W&TVA2&#&A$-13 z76c!ezgDeUCUG&u7L}Kmhsc;5<6rU%`a9P1WGy(9TK5}i7y8APz4+pb<~{e^0q4{4O*_gW z?!W(jhm0?>uGd}9ZOOe65)uOO2`ozqk6H6>`u21h<-oJNb?9JtY#sp5E$_o8^h2}` z9SYmV?wFaj(ve?QDBq#5s3?tV*I46hT7`ZD@33JA3Lk}+HuXohglXo{%O_?3AodH?iKzz; z98h~oYyYIz<4=4gRQ%6F4?X0NwXcc?Vkd}$o&E~kZ+&6W^H1pk$5-#MUa))jZZ(eZ zRIk6xDfC;qZb*zyoXhyWr!~*r7S56<6h1!2yurz@4s5_(>$*7FgpW|P`-XiC-|&%W z7daLlp&#R|h+#-LmF6%EQ|UxOVZkAe+d{^W!g17iR(s*`v&qPiO8qm`0A^# zn)D6g6HKm6L-|hf3R2&ya1OopuTt*qGEV*a^-V4Fns!Ui%mMue;M%oodAV0|U%l0s z(oZ^SUoECJ-~FKt0VS#gCdl$L@r z`1uX8pyZgiF0|~DUF?Cji6WmOrxuP5*@MxiL}IiB?d!lwv| znuw0kQ}Np7q4;3ue4NTZZ@^I@3*^BOg1e@HV`)5T4wO%}i!N#z&~{32eeSvEOtDK` z16S+&DrJCU>b0-Kt#^Uw&}*-~X3AIsdhLbR9LGF*3L(jP|JBZTfrgeion3Oa8644z?Q;1)Rs z-=01|4{6#>!$4`7l7+eS@7hPmde#&hHEh^0JoC&mhOCw5fzYmHK-Q9FjYWTzF#(@z zo{rqJhx<9&59hU)afK%jKm4#EISa{aaPF4*0SF@le^eo zO6g($TWT+R-IPA|dMW+wjrI&OUDAf3{r>Ujo;D4^J0>G2dJ@`3OhLP-&(LA(3pJ|2F28*H0g z%Y)*FAZX`D2u>M^Zu`a}Bx3@49hgLzg5C$Fp=ZW4gdCiX-dVHQo{ervvk)9N3-3f# zfbgLm^+4p}T|%9W?_(JC;Y@xms1NqC@-o@;!EU!tKY#wb@)4Xr`O7cAtmJ{LmC!GG zEB=9aIwYsW(%-`QRyt6ZaYhMa8Tq!-?#Xx}3$pf-v*H?LN$#KHX_=9|LHhP{Ddd4m zxz|?u`K|B6YujM>Z~qwWlSiXd`UG^#m`vKIq9^I^d2l9rAN(ABv*+Ty%z5a&cRspo zpMwt3Gbj^N(Kd1t?ZX7}J9Y@XVm?Bv*x~St8-+GI#^K%MN$8qRIXEyAeKKbgzQB7~bI~t* zKKfc&I_gpEV%u+ikj82B(=sJ^~C)=(eH>}4n`;s5e-kpuCU6R8tN zWj|i8UOiLRO6!2sb?VeHB|fA4i{Mt~jf4j0!e?KuU%y__FMJdF<$KWs0pSH@&Q8A9 zmGAhzwwYR-xZ&_6{eE#{;2%f3moN$KcTPj6-C(0OxKbmE*C!;D@3!cs07Wqgl*AGa!yJ55X^fGy)RFAu!=n@?bLB zCQL)S#Odg;V$<=* zbO18WX45~wBo7S6OM)A=Y}tYbAAAtBduneiHbCou#0%7c?iTKP&E%)hau?&yAA0xh z4d$2(Ek7drI;P&RVS~if+K*iKy){N-#L{uPeJFslW9XIq5Hu}=yu>!1gDNh>$nf$8~Onn zuM1&r#Ty&5B4IZiC@m?)wft*J7LL$2ck9OU36do~OWSSP!?KLEaJ>$H_Sc|nU-9+z zRXQYVe}nGZC4bK{(fVHLy|kSRpI2MAuF@_J&Mm!+;Kb3W7v0}z%k}r69#rr^8aoWW8Zk8Z=59$6BIv0Hbh7K6UI_sK2dfv zfIRcx_Az-f41tM^A$AW($Gs!aDQ!49Ck;ov&<^lpjwLfk`3KNHC|wY{kdl&8X}iTf zuzYh1`u`&Ii=A_(U-aMx+uygU@1^g?;>3eCmrJfWMyAfgqmdzwb_pT}vW!l!+ z@b|Rk>ij|Uf%+`;s@!0oK7AYu7cP{a8w5|*|LXd`Z+(Bu^>^k2=efgr-?ON^qJD1J&!Lp1C**K7U z2>Z`v5i*gH%ios{Vf*RbIDGXVLZ`9?q6<twR4DB1o&j9JZMW%hjnXmTt@QtH%FBsf zy?QBM=WPGA{gk~V`M1_u&q{x%>HM*pIBFjJ&2Bn!mK@$pIc{qYWNhFiaT1||-XZWL z&7QG?*f$8SlmXi^e?%ROS9Xmy-mvyUUpmjn@oVOsJ#&eDV)xC*tN&TqfBDTlptK#) z(b0Sj6UMl4&Dfc5=x-(nwZOpk!W?m$stMf^)&zD;t|GB3vt zS*V>)*y2db9y0stI{XMvB?F*Nv-tb=23xR9i;~Ap| zzWR10WDitQTwE^1P#!awbG?tv$?@^=g1gFx2)?4j68}ie^CDBu%UI+rUHEkq?(FNU zb1D);>)6rR2g+Fmi_Wj##6uZd7rrNcuqx2H@Gzh6jXRq}9(dSW#S9?Leb8cSZ?uT+ zh33&c(L6E)Ex+l3maDoWV09nzzztJk=b)&h2xX;Zng>T{-;T>!yW}h+Ue)?5_4Cg^ zZ+7X@MdJTcoKLQW3jH_bg4p*O_tK@zB?#>|?f>q&>n@e=pYdlB(e({M|DQiu+F5N=V&WC6FR|dWiWinkH38FiFelYLC&SK689PT1^K4% z;49uO5fUZt6}i{@fAX1AV1TAmYUjddN|srVrF~O+R8{`x_SOvWfbF8Z^B0OPUOZovo0D5~@jL;& z6Gq^r@HS`~*%=KZgV7{37#=G+!F^>%xUC2#ybG5V9pJsH6Y4GXM#H84$j?F_?w6QH&S`7b ztSNkO$ekmWpY+>&N6Wx!i-&fRA8kXm50KmteTq77r7dV@@j%%$X{$o|IiBL_WfXSI zE;(in$Q+GVcek~--P6zXPU&g3N(wPOzwKeV?dWQ{CU!Pm;yR&eOfVWnk^YDvyg@m5 zdqW$zEESq7pap4fPTHF{Q{{qcics&erz!d-}sawKt&;0`~N+1o`ck(i^@>l#kur(K422$Ll)7Wo3Ib zTiS*Yh-Rd_>9PO?O}PF|86UZ?;C+kv_hA0^MJQGNQQ1TX@snI_sDX+sZ z>#t?M%uc)566f2i`Hg;1%8#t~@_(zB~|3mb6CW zCBA69l(5VPjh6brQ}Lh%J_#R(Yeo66GhVi0#pA@`4GX^?y^Gv8sl2De zgU)&&JZRObmC~;nGiJznYbkw#imj}9#Qs;>#WbX;d6~t3?hJAdFVEY6hj+M@2c+^` zJf#PGQfLR9z;8EgL~L)`2+F}W3&dt@3t>*N8$7}~qQ%-EG+E|{hF`ZLv_!)tt;mBQ z+JnELm0eenzoyJ1MYM0;IKT@BQ0MR4=@%W?Ow56nOm-?J)=9rE%aS9U(<%X@WtC-^WMzT06h0qBfP%E z0|!nXRQ^%+!jcc5UHI4;H{HIm7QCg8^1Ne9W}$N0QT1ibHmeo z|Ak0zJQm^ZcqH82cyP0e@z~m?#)He=G;A|$#=N)%MqzQGqpY;Fj5vuL>|mvH;YvQ8 z@sc*5YZ-Wy?W2Z|g(06tuAHQqYF7T{c?zv=9|5k9Ep@blUJE0+= zUMcJ`SvZDGr(=!q6Nw1r{rWHg?u~v5HCVe?TW|J)hjV`{uh`re-36X zoQuyF&By0o3d~b5o3(~nY|mOa4|5kU!lyH)84H&!HtjG_4&(DYl=@ypS$jf|hGO#k zWZ9*lgSp3oNVD+YhtS@tEA6y1?Yhl(G!H}$QUrfKv-3`pDRN(xJ_sLV?;-NQ+T>lK+_59hRP1dB@!5Z*XdYO1 zJC?{E$jY&+dlI7i&V11FATlAcBD|1u1mTCshP25$;h)^ofQOd{+&tVX@)pcpxCn3$ z-on+T0$f_4S&QaqLD_2Q-3oPH_^aZl#I_}6C6|bww-BC)MDe>~v;JAAQunK3BFC(J zm+fw}Z&yVhZoBO^!NsBJ)V@+`XFh0Kd8=CcRC%ty)pkan$$m`knQ<<=-0_k#PnKB1 zvw|<{Ds8zu`*#)2A#uIdhbP(oM)In1-->gnHIcjPVgp1bgcePMrs-C-eoxy~O|$Uz zzWeS|V+RKZBQ-Tue9IZm-AB{K_J5pqJ(twZ^ReB65G(mNIX`XPy0!2^-RG6FSJJD_ zWM%z?C*mix{nWMeiH?Xpk@}4{-Y{j4!EwZ1m?K7vfSf&Zk9(eTjVC - - - - - - - - - - - - - React App - - - -
- - - diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/manifest.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/manifest.json deleted file mode 100644 index 1f2f141fafd..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/public/manifest.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "short_name": "React App", - "name": "Create React App Sample", - "icons": [ - { - "src": "favicon.ico", - "sizes": "64x64 32x32 24x24 16x16", - "type": "image/x-icon" - } - ], - "start_url": ".", - "display": "standalone", - "theme_color": "#000000", - "background_color": "#ffffff" -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.less deleted file mode 100644 index 1d6ee7c3f06..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.less +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -@import "./components/NavBar/NavBar.less"; - -.ant-layout-header { - padding: 0 20px; - height: 50px; - line-height: 50px; - background: #FFF; -} - -.content-layout { - margin-left: 200px; - &.sidebar-collapsed { - margin-left: @sidebar-collapsed-width; - } -} - -.page-header { - padding: 10px 0; - font-size: 20px; - font-weight: 500; -} - -.content-div { - padding: 24px; - background-color: #FFF; - min-height: 80vh; -} - -body { - font-family: 'Roboto', sans-serif; -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.test.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.test.tsx deleted file mode 100644 index 0205e7473ad..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.test.tsx +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React from 'react'; -import ReactDOM from 'react-dom'; -import App from './App'; - -it('renders without crashing', () => { - const div = document.createElement('div'); - ReactDOM.render(, div); - ReactDOM.unmountComponentAtNode(div); -}); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.tsx deleted file mode 100644 index 8c1e7c01d7e..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.tsx +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React from 'react'; - -import { Layout } from 'antd'; -import './App.less'; -import NavBar from './components/NavBar/NavBar'; -import Breadcrumbs from './components/Breadcrumbs/Breadcrumbs'; -import { BrowserRouter as Router, Switch, Route, Redirect } from 'react-router-dom'; -import { routes } from './routes'; -import { MakeRouteWithSubRoutes } from './makeRouteWithSubRoutes'; - -const classNames = require('classnames'); -const { - Header, Content, Footer -} = Layout; - -interface Props { -} - -interface State { - collapsed: boolean; -} - -class App extends React.Component { - - constructor(props: Props) { - super(props); - - this.state = {collapsed: false}; - } - - onCollapse = (collapsed: boolean) => { - this.setState({ collapsed }); - }; - - render() { - const { collapsed } = this.state; - const layoutClass = classNames('content-layout', {'sidebar-collapsed': collapsed}); - - return ( - - - - -
-
- -
-
- - - - - - { - routes.map( - (route, index) => - ) - } - - -
-
-
-
-
- ); - } -} - -export default App; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/Breadcrumbs/Breadcrumbs.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/Breadcrumbs/Breadcrumbs.tsx deleted file mode 100644 index 3e8b13d241b..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/Breadcrumbs/Breadcrumbs.tsx +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React from 'react'; -import { Breadcrumb } from 'antd'; -import { withRouter, Link } from 'react-router-dom'; -import { RouteComponentProps } from 'react-router'; -import { breadcrumbNameMap } from '../../constants/breadcrumbs.constants'; - -interface Props extends RouteComponentProps { - collapsed: boolean; - onCollapse: (arg: boolean) => void; -} - -class Breadcrumbs extends React.Component { - - render() { - const { location } = this.props; - const pathSnippets = location.pathname.split('/').filter(i => i); - const extraBreadcrumbItems = pathSnippets.map((_, index) => { - const url = `/${pathSnippets.slice(0, index + 1).join('/')}`; - return ( - - - {breadcrumbNameMap[url]} - - - ); - }); - const breadcrumbItems = [( - - Home - - )].concat(extraBreadcrumbItems); - return ( - - {breadcrumbItems} - - ); - } -} - -export default withRouter(Breadcrumbs); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.less deleted file mode 100644 index cd3ab1fc6a5..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.less +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -@sidebar-collapsed-width: 50px; - -.logo { - color: #FFF; - font-size: 20px; - font-weight: 500; - padding: 10px; - background-color: #002040; - .logo-text { - margin-left: 10px; - } -} - -.ant-layout-sider-collapsed .logo-text { - display: none; -} - -.ant-menu-inline-collapsed { - width: @sidebar-collapsed-width; - .ant-menu-item { - padding-left: 17px !important; - } -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.tsx deleted file mode 100644 index 69af9691853..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/NavBar/NavBar.tsx +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React from 'react'; -import logo from '../../logo.png'; -import { Layout, Menu, Icon } from 'antd'; -import './NavBar.less'; -import { withRouter, Link } from 'react-router-dom'; -import { RouteComponentProps } from 'react-router'; -const { Sider } = Layout; - -interface NavBarProps extends RouteComponentProps { - collapsed: boolean; - onCollapse: (arg: boolean) => void; -} - -class NavBar extends React.Component { - render() { - const {location} = this.props; - return ( - -
- Ozone Recon Logo - Ozone Recon -
- - - - Dashboard - - - - - Container Browser - - - -
- ); - } -} - -export default withRouter(NavBar); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx deleted file mode 100644 index 5af64580ce1..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/constants/breadcrumbs.constants.tsx +++ /dev/null @@ -1,26 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -interface IBreadcrumbNameMap { - [path: string]: string; -} - -export const breadcrumbNameMap: IBreadcrumbNameMap = { - '/Dashboard': 'Dashboard', - '/ContainerBrowser': 'Container Browser' -}; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.less deleted file mode 100644 index 1b94f4e6a4b..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.less +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -body { - margin: 0; - padding: 0; - font-family: -apple-system, BlinkMacSystemFont, 'Roboto', 'Segoe UI', - 'Oxygen', 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', - 'Helvetica Neue', sans-serif; - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; -} - -code { - font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', - monospace; -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.tsx deleted file mode 100644 index a3e450c34f3..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/index.tsx +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React from 'react'; -import ReactDOM from 'react-dom'; -import './index.less'; -import App from './App'; -import * as serviceWorker from './serviceWorker'; - -ReactDOM.render(, document.getElementById('root')); - -// If you want your app to work offline and load faster, you can change -// unregister() to register() below. Note this comes with some pitfalls. -// Learn more about service workers: https://bit.ly/CRA-PWA -serviceWorker.unregister(); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/logo.png b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/logo.png deleted file mode 100644 index 0438317fa5a0bbce14aa879ad6845b94d3813e45..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22480 zcmbq)1yCH@*6uL42X_JlcXyZIZi59K+})iJoDkgI-CYC03GR~M?rx8BuATee`&Yes zZ>nZ`diVOiC3|h_-W{f-Ac>5Cj{pDwkfo)>Ro;#sZ(n~nn73ahvw`Hd1B8=`q$r?r zoN)i`1m0ds%LxEL#Q5V20Z2{91pr_TEYv{GAbB}nV>=s0LlZkAFr&MT{hKrZ!0*oc z_SXjNY)Im6V{Pli>n=d{rv&fY|38YE$VmPaakdg51Ia6qh}k)UNjMnU8JWoh5lBc# z_#I75c~!(e{vG~yCO~HH>}=1=#N_7Y#^}bzXy<6g#KObF!^F(W#LCL>Cc)t3Ve4$@ z&S2~G;cq1W;1LHq89Q3oJ6qVx~hoXOe{${7Wxt+6}lewM!Kh@bf z{oS`WFPZ*%nTdswnd!d~I9r(h%hZ3v{&D4hg}p`VziV_iwEtg0CdU87*tCPCSXCfzry}i{1?<8wZNbOsBuGP7_qFmp4ovZ=AK@v`&qva>QUbMZ1W^E3U2?El~qa|FLJ{5KYEUbcT@ z`HSTL;COS>!qnrxar}oR|C0P49DlOgIjY&&SqqB%(QZkU{*m@1VqzqJct}DmZ)j{` z`$t=#`I}>Z1^tE3#F*FA&e6uuSbB z`Ts`qAF8A*oSf|(J^t)Z{@Kj`>{wL64*y>IM_&HXGgw>v*+JMFIy!;>$YTMreYBi|Bs?{>y=X>iD|?H*>J<|4h8Urp({i{*?OP zNi1xgoDFS_!GDjq#s780|Hl3o`2ViM(D=_bW&CHNoA5LJ4f^-e{|Vz~`di5Vy(<1z z`tLRLk6QoF8s-06R!RTA`t~32e;N6A>i^F~{R{l>)PHmEFJsc*2E0Fm@iyWKBD{I~ zkI_>Qq5oAg1pp8RNQ;Z8xkDUhz@}^TU*GOp=vW{IB8M~>i>Mvt`U`|{z(fr|dFAFR z=dmaoN7eKbM^W6^TGtr&tyLRnzRQ<7we~r9e9rLNU2H>btER@k^vif&$!KvZdfiFq zaVq2=e@0Cc&3U&OcIfdd1Sf96Q{~s@dYMRlcYB{~7t5DB8%ywGJ<)-pW)tN-wlR zFMi@vQGfuLAEAmJ`+Nq>nsOhUkukDeSTwzFq3`#6E%$rooMA+jJs53o*saB-(CGJi z-6kxoBV4&MR#eqzCY8fCTQ$lmD~V%e3SfXHew`H~b?A!7ilImj5x;2Z=<4*^Tr)&6 zznwh=qnz2Qs!n^U7AZV`X`iJT4F;>|+(%>k-mh2tG9RUW#%!Q4Ux~D9oy?Qev}ykJ zc^NQT>E8_;ywP3VB+Vy?HT>-!q@F^@4W`dhpc}f?u5(lMd3SAT5z70M5J9~~B|dt^A1v75WM{wnY!nJP-ow7aYbjMc z8HLA2`8q&gx0UI0vv@c7^+SaJ&GGU)b#-oTF9rH1Qx$p`L;u+i2sa-wE2v~@9>$xG zvVCvwceD7EJCVdhU%DYtM^W+F2W>jOM|2ac-e6BD8sZs zn(EFHv^}6ck;F`uTY!MWZ}Cv2LKDQk&&VUEH9v^&j~ka63&ych6%Dsv-Cyo^?Vg4d zeW&P!hQD52Uj+EV-P|J7dh$mZ7&9WbFh+S&Mm@@Md>>6=TXmWcNP<|n`=%&pk>9eN zs1shAE)4c&*YQVy6DTNT=$c}F>8Llpb6W)J`O8X8^93g7)zMh~+WMwAeq1*pXnR=E zbR&8w-rczsh{$+HB^E`!+KddQL5LKbcunkZm-T_A$oUPFS4D%D0uHC z4`bdAeuyy{7koV0D%Ywrg40F`7zAL^T^n0sb{F$$>)s>A-*nQIzcRMX#6Rro#dI^$ z(XpMJKO9!2`E*0TZlMTV{p?lXJ0?F+S1~?3I2ad3Y1U-={+3ATHSQk~&ID3V3Ad_i zKX&-L;v&0}ULTSU`)mZjvcBb1JvA8&m^-S)z4)2pc)d+!^iydyf4-^*pzu4E+cS26dx7Cf+@X-^<@m!$opxW_w1w$HD$>V9S< zsx4&8qDmza+MNNzC)C5j%FI-8fb z`{L^z%gaW)2k}PRS-uY?kE^G?NqjrW#^gC=(>rM76DGwxiWX=^+;aEzU5bx<5GgWP z(6O ztWx&Kh)&xyttRq3o%PA{>N$;Z5N*z@^h5>H17y~hFTcIk;s#)G$a=>3UfMcPaVJRc z<$a8D2VNU- z0qgTX%=@h85&akRuFG-0%^+A*&KwC~0$}>XbSBZWDZ~fEXD=PY-1Wqr8)1n)iRWC~ zkn>ZU#S5`wXSi5};Z(opL;Vyn6drM#-?+7m;8J&$sTZ_+}{=?NL-_b ziiR9~Mj<+Q>s+caMYV{3Iw8qZP2IoW?MT0JtWR3Fy){Ziq$umgcn z+sdA*I%_^4=%IV^AIiA2@RwMlyT*t|vTKh!keSFw@_G zf@90XOK|j@l~1mw6&1gD7O<-2B_e15`ylg&J(`;Gd^;~53I|lR(cQg^lrwWl3K>z5 zQ|Hz0%_aEI6q{!s%6Dk_)SFLMTie1fWTrsrDKs%;Pt6@Hns>yY9iZ6AxA_?P2&MjO2L&!v7(9Lsj9rv*zNSDlpNjQ`> z*PnP})BK7qHXgAk!&Up&#RyZ(Y5V2)+#Ya1NM{VULk{^tUp?@%UP9j-0`~d ztH}&zV9~~~UPwpIvnh+Vf73$Gh4VW`5cDkO0+Umd_QAk<8UkU!r>7j)_X?*S6Q95# z?P)w_OSvKfUT!Svsi-adHcRr?uRA*WQr#frs9QcjCk#C6YpBpu&#HYNvO~QvOtruA zhr_R7t#4V__+*0l=~6j>&VXXHy6{NJZ6_27FHX|a#j2t~qt@+oO<}|JCuSjsT=eXi zo;f1Pw3(dKK2l(I4}N|ut!#s5QkC+q5uUjBhw&+cgZw!db}?h)pk?p63f2y{C-<-DjnAGL7qmo~slQ59?5 zKe~fb8UY3q@&5UCT_^^SG!y7&UI@99MvebT%<6`5y3XPCX;;YXykZTOx)(0?A6fb4~=|FF2^fl!IBL^Hng4T1;?}&eK_`r8y_K)c|0r* zWa$OITKmF=ln=*mhymwGOu(;qKZGckjV4V@_9bji=U`87I4`A)FX5?W$E;(9VhtQZMJ1Hj z=Ftlsc5sJK$^YrZ=vr&RrOtu)_hY_j+jd94I=Tr@XK710gFi(QWAjcS5~}BuZVG|c zYhjy7#^vg%EBCm%x?0@j4EY5r=CM*L z_NdiK>tQ7lpePZ`pbQRvP@~Us1t&T7y+AD$x-ycaA#xJFe_*3XJJL7@;!0aOdR^8?9@DP&`!%ywoi2A zoxgP|d3SCM`b=9>p6w}>zeLAUYTpT}+w=AOuJ8SbK4m@!aHUGCxeEb2yc(R9&mGJZ zDC7CHaB3d=(gDKXm>?eqd&gQpNXSo(jF`#mVB|;Lt)GvJ<94l!UZp{V(OeIdHAP*r zu0T9QYo~||M$_$=-KG#;b9`Q&c4`22blf+toPjlY^%1eeg1PbCrf^ zQ@LY{&T5&Sn_Pl$P+YPW*D>fO6P1`B6t8!5gs@C1Q|J(Ng;sxq(XFHr?NEzkIG6|n zZEyvTNLsdcy1Itl4OBg>J8GtGe|>puiQ-xVZ4cFa*t85Tr&}Op6_F0!2v4Y|ZJ(m$ zFw~lMVr6qpNy>VShkBCyA&~6jzT`5bJ~HQ~MX(VF!p8~GaU$CsE*rKG;dFP8kAR0A z9A2J~*uuz2cE&s-t7eXpb6VrzGDTV_$7BPPU6^0mOSDhTf^nK3p6J~kW4Ts#tn4f{ ztFWDVUB`tRXcnAcI=a{P`m^5cYQHUF*t}5@bm{$Ig*6~Cl*6tA*Y&HK{}`i<<2>g< z#>i<)?$S#|Wr2gi(6p_2zDwul-!T-M>&AKMULV(A1wV>>Q!`?JY*qm8_EY!n^tJP~ zPx?3;*1HVgE#i4`QQ{M$i}`L`KGsf*xAHBvSwkk#u)~6D(V<_F+P!ViHhJfYVu-P)b^I2UqHOk`gnE>`vfQ|6)Mlzau4f@!_ZdSG?%7PP&#Y0JtlGy_M0y3>B> zekJi`?C|diwm(w948WKcno_+EQ`9zbZKth>WvwT8fya%95>yYN089#n;kHqlfIVwG z)h6#@@N|k%Mj&;d@IA=hYDlH~7-NvYt~%zakBurE8WfN=*~yNMc1FBFA^{EF3wLwG zNR@1p=RN*CzO--Dbb!#IhcR8@?6jmPjcm~U9GH>EH~s0GsJ~52ielLe%CPnULFcqP zC(5+djmhLMvZ$JQPyW!xXd3udxZMZ}UQDg_2)EsDijG z2HpP4_GJ~kaxF67sA0}x7h;d# zri|vRVx52vxP5)~{d;VJ#AjZv^BAo2aKyrbBJ7jLr^a4bM17en+A`=3$|a%f?}^U* zTMR#k3Ruu4fArHrvB#{h}hA@*f3g9Dwk{1Z$@xtG(w0c z*u!6bh2zE{X#u}YOjDtu;I;c)0|3PzGU(9_;)r>Em6la5oyMywc<37# zT3gnGM3&A;qCc`C$A-|PRE;0UmkavHTy8z-Wv%WF%y!|NX6iP;*~f~vsUtg?BfHa1 zFsq?r0W0LLx=QnJ+x=~@tMSdm3F{e7gCW|F z3(J@OX)cXqz!7OmQa|4S@UH(Y6K<+8Z5U`?w=?G-8S38xJm_N@%r-skxE*J+9q8Ma z{~AcsXB1x%=S%e< z
  • `(Uc%`ND}C69eqTD0t1;DIYS zXPe;vrZ_V9>A1#x+a|!WqG2NZ5WZ3BY?!k}{~!r5M+gGQR*go7 zJtx6-V@z+_7E8|&zF0g{E%FJrQL?n%TLC@=SfJyV!+oQLoN!#w=MSxiMoqN&*<2?tyL2legCOYh z8iQ4_&HXZVhqJ|+RbK$*DGH%9AP18`R%qKtRAw1!)MSC2r5YtCO;FH4NHXObP+=g5 z_K#(!vJ{{1MTGtn8R~ZFK5Kg9LJ)ZHtD)eCOi#ZJ8%Evc>G9WMyqoh}RJJBRUl&JZ zQ-yNgNo4=8kvLTv8^#jIWsz9OMIUigrl1ce-IGUp0_!3xoIv*h46C1Ne=Lcxc>zo*ZSr`i_ zkK%9C9UB%yTxZuWr7hc*GSQz`fJuxqEop9^KLFztAsdto)I4|{>s=sMZ>;NhsO;)d>-fy`Vbr=jQd-4RaJjnoY;zvo)o{&H7ydpW&E4`Z~bB!$)=nB(qV&O{sC37xKkKDq6ib8xd*V zFU4#1GKN3Yts40ud=B)Y3Y&gsDXFLb=IRJfqt4&yR=53l37Rj;&1$p0)j=|#;mHM9 zS<*86hNnt50G!s2Fe*rXR>GxlZbluO3m9bQ$fvW^&>-ThpiTkJ5lu=Faonh$gs^d2qiORdiKdR#EH~bXik}T;bQG{I zb&H`2%8tN37Iw?^MvSyMPRhazhpR`Pft@vTe80IO#jIs%?ODVD1#ms4DijYC32fU> z-W8>Ko#<%*{6L6N%q2-8aC=?dU~IqM#Qm*%-r*k0%CEky(c4AqWcc>;-RC6MLevQ> zx_(VP375t4$40CelvI$3gJ>0Ws<}e~f6j4YQXAD8;Wgr&KB z#}J&42o9CP2vw8| zU{57B1FnfZHG2xE7vDM!#8k%f3SwJ(v_^I|k+yp{*=h)NOr}P1gV!jXq;|RAG&%*- zmIW-3SK3otguTL@EZ(?^5LX|I>JiIa_9MnW9E0kDF4W}s`{H2W!;WFX9={{G^d#Xbb`Lw|zrY4o`63UW8w&K~(&}HU4ums~~1Zw>xI|yaiK~D^5$zGyO zGCauWa|HRyUil@Qry?0#qSQOrfO%w|)rdN;oaVmtU5&+fAQHxTmnM;WbJHw3W_ojT zNZhHsg-1RoT=PqP8vRAx;iol$Pn!Bke80Ot;Nzzr*j3P1Et##PPdFBJ)M)0?cCtE1 zBt3;iihW3;=J_;nGG=D8vW8P%`)dVcb3fOJy!P9=y`6Y#n-CVj;{fW zH3lQ(;Aa-L%?4v)@bJd_kj?Biucz}Lb%d^pwMbsmpQFMK)dQbtoQYT(tv@!>1~O!tLPt>g)Yu>M_$;zm~6d=gC{FeSw-; z&W(AU%dBX}QshMB9pg$U7`{nq-zZ!0utQdicfe3Xm)4$Us(IE>lS7JXIau!}72(-@ z50XLWf(Y#FIhk_D!+c79^y$)BfmnW8>Xc{zSyI6l?jfc(GFVNA$mux7_+yV?AAU^O z2WT}h9Lbznc?6DPdC&~hS#gMFo;$yD#VZqy6P4n#*sP&Ng;yi&rc3B(sCA|Z*FBS#PWl3@BlC{d<>WS8`LkZ zS44l*6lE?BuM>j4ZytquvqQv;eYNC{4GpP^GpxHIm8N>%A9Xl81>neu65UPHqsch7 zldAWGY`nXcoi3m0_i}k3sDA0AX{9!uWhNHUt-Jw-{5gJ3+3))!i}X%=krJwPyB!YVwLF@;yc8PM z5|P33F52jtl)mj82#uZ(mg3?5U2DnSDFQ7H@qEUYR4e1wvuFlJldJ%1mv_(+@1UxS zRnirI%|w%7+UewE<7&2OO@X^KiM2Rl4iP>j{7(dZnuwkB&S6hExqc+x6PHm*e0t&uL_l$;X zW81cCSe)5-OD5tQOc;Fku?~hfECibOoIY5 z{!fs%sj0lA=ZluAG>=N*hCSQtd2Rutx(hW{(=t$WK7lcp?(r`doD2+vHpgH1pgr?SwR*RBuWPC@%K-#{=(OPPxjSMPQXr@QoFgj39C2C9Yy2W8n;O-1f(K3 z^1uKEiqhTS9@-u(7Celg@Ey~nLNz`79w*Qf`*a!?;Y1tBn2M}YYRxS{qT;CHX)WK1Q*Xv|PowZXU1 z)OT|sB!U*jk12y>ncn-+R2e)g*i3PY4n#0O434FUb=k~_ddUo59#;F+uAI~GG>EHe zz?L$Q%#^(s(|5W~H)t3FiGbaGo?o+vxCY!fDhKV7Pm4fA#$qZ}1}Ntnp|&Aov@^t-o- z5IFSb7R=eCyX-}tPa@+zP1LqweAbk5X7wH&)Y-~st;Mb`j$V-S?)UI&4e80qsD7dA z3u+w{Vf7$_W>Pn5R(Zl+xWw|3#ZTBh!1;V7qX7G=}i0IrpHb#$_nl{o?E$h~9 zC{PDT%99YK#X#kpsZLT>ub_9%NNK+-85t8XKipR!b_~<0(0mW6>9i6{Yg0#*eVDpU zl-%nhvN%kwiNTpD?#7&!tf{-dFZvogI#dtqVW96#^b zM6tYVM6jNOj6*#gOUW{STXbPgq|eSp(z4kn9UZG6?n_WxSNKF{MXofx8`phV1QX0z zzuhx7LMBSYrLgmD`%c^QwtLkS@e}X9aBmjRH z+t0KQgg!Z|@%75tbYKi=&^@Y2-I4asRkHBqsgB-vHXveD29rUOqsq09)tnvHnMlQY z_e*-7W_aN8p3V=HqwRp}vO|lsy)EW7Hxx%NfIhH0YLN%VzHu;@CP8MQ&|<%wwti1% z#tO?nu%4WUiEp|>ttOh!c96%m;G_&wGfvdsDKceAo+rVL5N81UD`Y$$if_-vif-l2`z>gZ!ui#N3~V{R_Iwnsr9JJ@*;xp$5nXUlFX1W zX?IZE`3$`dG!eIbHmnC{5tIWh5GPMIgIR$uCP4pq8(EwvT`f7?e0GA!oSin_dd@&|47t3Jgbq z9!>Ya?dQ0sNK3#y5d=(umAA{cB4G^KkP-@t3hg1`Apz0#f&tFhA+<98C0P?R{x61P z$Jz)*=U*hg1m+9TGN*MHTAi5VA5(S)qgDXM3+*5_*3FSRX8@Tv0D1(|aROl-V9gJ{ zDgK^T`)+RNWw?+=O&dH^~gRNJ)3D|B9BOF^FAh6RAO&hbQfgh zhQRq@Vea9El+%WX>ea^O{w$*V@(KkD!*)Ua-Bg6AN~iWRS<$DZG4xpvx@ag4xG5P_ zSghOQpJX8cay%7YfaC)Wa$|KP>5uEstmygBkS42ywtP5=*lyDMjQcut{!WX_fV+*9 zh|Y1{o14j@ffet!B|Ol)$&|zdS{?4724o|Os=nlh5jxYBN~&rO*m$x1j*Zh|n?RRb zRjj1=o!SzjG_lsWtH7Zr_5ra&^N?uMuuOD`LD=ql0baD%wRCS1d7RjHX@X^fdnu#& z533~!J%aV;6G5Xpsy~Oe%Y6#~37wD5YNTVxU$5wip+VhT*G4Xw#sz{$7u9)yqPqQM zh&xR-9Qh&Ln>m(}ffatujJ=+PftH-uK!NARg-*;)Qb_KgSDE-{L0uGW@(SDL z%h)l7i#KvGIid)8KcZOO9kR_iHSFnoXI#!Xl zXKt=ySIH3-f=BY*;xV7L5se>lKMVQ-xK^FNEw!hX(|R0OBpHFW^)0=VkcQtmKTa{r zdNU0fv_KxR#Q;ZfS75qHp_?G8t26)=uppWdA|MG)6jt?dcN3ZNapA1rRww9;8zGvu zvkoGbc`3Cl7CkXgr~?ZWGWcV=2t2~SB2GI24F4uxim_BtSs6(HT&Rbl2ira!;TZlH ze>N_|S>?2{t?FGkhB$afteYVD_5l0v3)4K)@^SkLeBlOJzBzbnzVMu+>E1z0fF{x< z5ggT2f2P*RePiOZ&zHRsL^<}#>NLA&tfHkBFBZBgDc}8!QWcosILII_r9yr6FGF{bKIac4V4# zk7W?W^$0`DeUG((2e$4t=`KY>(j_##R21ILky|XS0;z8lxtY%wr?j#mPhsam9UicNtkPvSVqbZF z4F>kNOH_tsd!i#6j~BbTNKAR@%9xdeatSlzvh!l%{(sm3-|&2tw}{MAt{pieydF4@Jm$Sb>rUY>Y3UxTdhPdOwoTW!jX-nS zL}Q`mqE8J)ocdoRtmI#tH=~`jv1U-vrI%#Bjd??d1Wa*A)U8cob0&)1O$^IG>8zRt zR+N@z60y?8Bjoc5ZNZp~tc|IM*e;g#w)N+KyB0)2;KA%RS~(ykq#-lb=saO9J%2T3f0Z#T+1* zom#u6Y7+P*blC(*FJlZptwfmDNzOA^2W42EPA{?*0jPY=1ppM8q zf>^S}xJhvO`e3vdb*gSapW^po-7uYh1I*cN*XydD=YGM*4uoCBO6thDY_3HU3^0?- zoZLtJhFle?7aKfCJpUbl5@fls44OPlu1yVyA2r+hns)BvF$&t$-cZcvt1YE_T{0SE zx}Qey^gVe{8_EEF^P{7fsia6^jX9dmR61F;v|CIWxk zT0d`|WH(gW!`ixHZku|j^ZK-cNnZD+BOV2KXsMxx1)EZY$Z_ z7t8w$zJv?I9C4l5Kr~A&1|tfiCfB{lDwdyXKw}lt!?$2Yu$9>AjVk2Mq7yU{zmp5- zN&ER-n#t1HWqP-z74K5FP$W*kg4DbKR5HAiy273q{mOpE!sQ*YiDXcPpdNaK`L+R1 z(lGo;tIR_?i-{vV&rmrH*5qApCnNIoTHE?2MeS>9K@A7)79zdb;w18QcJaBu@S{Xm zooVW{-Pw2`FemSY(!blE=sP&ozfv)bLq@&MegAIe9hzm~8o(;qU_YoR7no zIb^s-Mg4e(Q*nd$%gIULv7r57Me|qXCPsNISmtPqK>7f6p(OH~(Td)W9on$(A~aGb z8xxk@kT^HWw(zs4?hk@p+Y+f$DlJU|U+UWWgt0?DvLOT(Luf-Xl~z?$xReWOy#w7~ z8!pU2_yBDd@-1#d3^ds&JoEs5+%C=!fo?DfE;3h!OiqC6>^Slya!{teEt3MUuU-PWC!h-QPRHvVKX z=mYca56w_L7V;AcwWJ;60H~mV9)9{icMR@dh}J<65~{dPuq>U;RqIfZ&R7H-Akn3b(f@ z6{8#r0YS?4rV54%I4f18q_^+ct}Cu2PEWPxJ~VXL%%VX*$~1NpV(VgcPj?R(ZVaQ! z%=EBHucWpOpjrP0Hp)^Z6_>JuL;6W0d!cQgQHxbInHQypcp`NB>hBpl0EQaBvhOxb z^?(nrbJ^ba(OKTQtkV^m9_*&jRN1eAv$LzJsx94R(YNWxbX zd~U4fOk%)r^%s>IiaOfB)5kCBa0{?50nAAve1D#tQ&l6u%S!2}KUZ9D02YkFJk4Za zpUxXJ?fNqpyJU0brryyH&SF}wk=njCQFg?)Y;-V;c#=0UpEXNx0{n4dste2+ zFIns{MWaY_;B;niBuPwSqLi)Lof;xESvz|(A(?%&+wrzxPjeM~CdlMCCPV{Rd^$&R zzbo0k=={{aFpSdF{NQ+dRemJJWke!6XZj(0(=HWpapp5WPcava*TC-sp;1|;jx9?o zEBW^o(b=r-%-0<=)I=^&dMlNeD!~~~W%|#S+PI8+&;?(~PDNGNAZiem9ocT3!+e}v z`I7Swb#w}&MEjdv2k+$9JAK@cH=zUyqM$_m*%mtX$nBIS8`hG!X`LpMiBvjN+H{a0 zR+gYPmi(-C0cw!goXN%XZcilNWL=McH%l$`(=|gU!5|ogX+syn&gr&8Ksi8vaz$%n zGNX4MW5a1B?2T{SrR|Ia34%cQ2<7KZ(a+20YI7N!d-6@s+CnuUaN|0QPnvFFN>q9= z$?vYGI&nToD}J2|r>nfMhOl&jlJ+Q;UpCh~MB;7zj+WnU$~Iaf^x=-Mmlc>2oU?a~ zKilmIEW^ie@cUyK7G?pgB-XH$LCkfNpV|kv29J?-f4(8hj3-m+U`T{>IBElO(3zcsg(xO)uL<5$m*0 ze_q+2JDy$N>2g8-jyj-kb1*?BP!y|W3UQ`UKiMH(wSDu~`w;+ct z{X^ROh4vt^1S`F2qsTdwLwNSqv84N})*YK8TWa$UO)Eg=lI5&!p8&NkKa!se0{S8&E)SAP&;Od4iH;E^7&n7E%l}FFfyRo`O?t^wXERtYY z?5S15VQ3ErN>LOMdT0u>FqC58M)s^2m)q2_JCe_!L$Fy1`LYn3r*WE(+8=V6!829? zA6|RK>t~V;o-)>+nh9zLq;0>u;xY(8!5Kw!kR3_c$-+XEC>OqZiI06&*MDKSSIfy}S7%|91N ziz68PBIlTrje72jMzQ(O4k6bbWx#h3)$!5yTxn-Y(KoS_VV%gl;oUo>5?GBv^5J1U zJnQE4G5BpbIv{6d3F^RbcC`7d4Rndm?Q10KKAcA-<>0s>0=x0o5r!>d%-;jRIm--k zxj{)p8+cIMnUn3O6ZzY+s8iRj6}8KC1e}c5sY&ZD4p-s>B8arLP@hkA$?J2#7vfgN zKpux$zt@+6QXk4m*DK%J?W*yz^W-2z=wKstdjzvCA*4PAj6oPA;7>a7O0&x*?dnyZ zo?(7%W-ReIj@_&)FC}<&7}t6xbT_}w#VC&8`EiNLTPZuyrK)phucK)%Rvh0}yHZqu zRi9oaT|&80^{^U`GYUcF!KY1H@tRROdBlRige^OA?eMCqP7oInMJRJZW(!;+()L`a z(8~}P;!YpBqeGro6Rv6;J>EVs*2|)3>!KkX4&CX-2gBxJRi(`q7 z;XroP3r+*;ha#IChcUh;IM=4xJtOdxEWK&nclo$wH&aI6`jG)UxRcU5bijDqIKCW6 zi*7Bh9Y0TwN$9#@*izN`Fbv}ZeMPBVU7R(5gO$lbH4wlP)h~UbFDQtVr_Uj4hLCcM z3tanNH4XWGHL>IAQonG{5J{mfn@onTs@zpwueIeM^`ue-6R}!_1tBaO%{v;#Y9vBe zq8|9&$h;7fN+&cB!XV9z(Z1y)ibs$~b!)zTk604&Yan?LracBH-lki|R90Jp)tn2J zwHx2%y1Y$`BA{#>ke;&!5BMV1p()yt=13yXg`7AY9i`-eZ0P%g>xYq_Xz)9^ir#i= z;~A)HYf>_ygl$-j8Jz|NX7n=}0<;rd=x z+#k@CuCJH^cU)I15Z+d!CaJE*A9=#>O1uVEk~^%A?BWhxmx4; zugOrHL+&uNx^3&a!_frJ(r;V0bqYINv{2W1#WKJhBGyu_3Z`q`$W*H0ZQxGg&T){; zKNR}?DL^Nlb*1;+?QPDz>!!{^Kv4Si3G)6jK5Oak-oiB7(sOs>dFe=mE+*n0N&1>R z5}lmAoEWr1mM5OuKyot(kR=k-I;;0gTQGdd;U}N#*@Zq-A$m)NtK;R{`UmK1A!*8y zteHm!md!6)8fAooxH`W|7cLm)%K767@g-i)L2PvTS#E>c$9gA&T+-usu))Mq2lBrvpbi#8wYMDUsZP)4*BdC_Z{fADjAv~vWuiIpb9a4f7& z$Ik}S0Th6DdAjG-q#FVhe~ZDR<)WeXH7x({|OVUS^qI*>k+Ct+ZXz zWmm0M9y^bSFB8Og3S&vm0&3E%1e-EvBkl zOpg~xv6toy6=n~7I1U3^6awaEzk6%qG-Se;bpYMMb%3=h*cNhFq<1%#RSX?MDy^xS zy0i91cTZeZA0uyaf?oMkK=$ZO5xj8FOay8=iA=Q^ed7jU*ycRq90=dsv#i^U4riasaLeqHMcg2nk^6hOV% zV|}OxIGjI9=G=BkfXXl?%rJ$`m2VgvIb%u=trxEj1Ws+5Jn_(b=`jk8IrIwr+I6+$ z)~7}-&mB=bMaRs~nC!i6J*+5eD?zAE1xb{P9_eUvY?s`iScV(pMaAeyQP6RfZw&Xl z6?JV1CO^KPmku4doN{VX(8aURniOVKey2rRkK5z1>Cxi0;;)Y4>Sf~|Sw?#p7*~{{ z6+{HC{T5#OzHc!^=^{&-wTjJTa@{u@S)AO0S6CnL5?Y+7Uf7(7x}A7#L$%!2(dC!K za8mNvv9L)o9K~k*l@(08{=GCQEH&at8xJjo9f2m@1FG6;>jz^GKd*&qIh~V$vg$z* zG-8b><5T@($=i-9d}BP7)X1&iFpLwD7~~jUmj%uRoipo}@`>)eIkgpJ<@bVAQL{yC zYh$a|`!(f23t2`T(y`p*(l+e0!aD|3rl*yVf!q5NhNeSavsOq>fmwj95gfoE zPYFs&K8lis@&u~8NX4(jUKJ48Ozf<2p|V-_0Wll4n)Z(^^5wp5D#xUY18L?2A6FkL z-KJQOlbmPkv%VNXbUl;Cg0&&v-7%c|(Z%+~YAD^kjsu$rJU#8Dkab&0%=0Zny zmc?p+OMFME;dZlV_QRY+bGhiosJSN5H!yasoVwZ!72`-Q6| z=kO>-e>4%6ZQmNEM{?ehJbP_fi|1gXGICeE!z{x^Bj8fM2U`|FafxMBAy{>~Y`OTg z@aSsL{@6riD!=K#J`{a&2g)WkL#4@Z8xaU?>0_OMz%o>W-LZa zsnG$`h4GT=lP=N+vu5e+G1)P7cf3XArU5%Vhzh>4ci$bK3iBAFECSh(jDnWT;~_Jm z1#TADFD($4=Y;gcq#(neix#j|*OM()a>%q;EO%D$P7>Y5#}rF8eZR${B*Wg?izvXT zoe9z7z(2ZWNwR9}<0Gi*-j0{G84_gdd(M}GcVklV(~?Zt*yU$xRYR-CZ}rjo)!GcrPPXv%cL8!xFn2Wa-HxHfJrn^{uy! zV{w#6)M=KL@W}4U$sl}m^u;GS|GLE&R-1I0NyKRlVy^xiqL#ObnKrImqzZ9o2Pex@ z$nQ|E$HE(G0i0#DgcV@5Y1yXofy!R0Dsr0FOpJ~T{_rWnQmqLeyNODNY~)Ol-ibvl zM`{`y9eXluBE(0xkFqYk66bfS$#PJB;7GAOt4+sY+Eag}v)%03uRe_Gc6x&6R+eLT zW#Rd`IbUIJRS#{^uYSP$&c~B!#r;x@?_O#*?(q|=?_W1EN9owAWGX<76w6pGUnI=p z>6yNQvd@(E)x_%(DAyO=Xpj@qaJ*KJ)w~jyT{?nAc@%d?{OkA61;BXpn$N!o9`LlC+e+uYrT^HeGlrTon^6XWjb7|D4%lt9tHvJ2fnIUPQi8@cux;HetPXXd!DyT#JRou93_CocPg&CU z%LVYhgx_yQSu!P3bc0MKegs=P9)LNDEH<O&@5dA!A%TK& zozkw6o#~upiHU5Y4$(XczhMb-i>5fjmohh1YrD%y$Ws=7iy0ugXJLD}u8`&19fAol ztA;4R$HCGq0SsWd2@=heMjq~bPbHLrZ$G~WKM}`>-t76?+kNbm_eK_gGI zRb?RvhPg3@cIjZ6DDcwZmEw~f`t=>Zw3w~$edYb`(JHsw!4b_p!H@Km$hPr#Sw3>e zv9DRICDfGCL7`y>hP%^I4Nk^(Z7!ZCd(o>GS(S>nE)qUG8sP|1do&l%nENj8j_#41 zf$;E})c&cs#NivCw>3jCyks%l=$-^2HnPZ?Ocuhw&8#`CvR8pcdNG#=nv^Zl#Gj>q zFtV7fYX*dnsydw;=lhYOwmpd4`w`TM$0jgOzcJnR?klC#JPD>Gd| zYq*~#61hCuU8pLWjXKDop{L){*gKMhV*0_w-TbH?C1sB*PW7E#5&BBqR`iwUQ2tbR z%e3gn0PSzR$Z!S$fVw9{GlT@}XCs%c zBNVLi>5=iaG=#dmjjg#^>8KI5{(*+<$uQsI@MV#{!M}FLYE}Whbca8MIh= z##D`AF;TR3hN$=YM{(u$cGT_tR6)f>__DDYs?Ja`DPlC`xQ;RZS~`(d#A@qmY`)y%OF1jw2a5R(0;mWE1#%$P06p^!RuY)w(7GpA6%aCw@_7Cm0yFqc6;_OHL9ZHTIKM5s0dYf1>4x`pGRLTXDqNB(3Y{hmYeK`0h8sPq zkLo28wcB%x18TY?fSI}E1u#*YxM}Y=?CJU~Ev2V4Q zcLBsuk}lG1K*0e`%gtfdRHd6;c+w5*d4l`(#;3rZyR+Ju$#VsA66&37v~bY?01y31 zL_t(V2R#;W?<4ONiiR>I;tcFrQ=L{CEKS!?-24>j`dGs5qOzjt$}!J(=bwMRE&PiM zkzOls6|Ox{{WpCVz@+VT+Y*9jft&sd%&FtZ1IIG1UqhI0Eo>=2Z#oRJB9OU>@@(?! z#y+Jn%sE#_8o4rhCHe*Y`%Z&j!Pp|0YBU#;Ks*N4V7Wcm$h?jH6wd z$2K7gl+Jy1Iq9a1kNZ%sUl4a~r46_OA;+?7vTxtMPSUfckA8?!*}d^@*-^K?dq3F{ zw8Y-ksTqqiYpk(GO5-pcP;!r^k$&3dlm1qAbh@(Cgvi`>Jz&pA8}w7A8>7+glru)< z`1Mp9pJ-46+E>zp=xt@<-Lf?)Mtb^^;ow%htXv4j~x8 z5XTb$dB%59J1D$B<7WXk zfm$&Rh3RaVeHZn5zE9)l?(^Xr5}F_F19a8%^-|61gaY^R}-|_ zH%O8K8PMozhcTYqD~d!TmZRq<#JZuNBEV6Qy<1sIjVH=r$kZ>;xRq91$=x&h7I*Py zhq#Tab`7o@<{A>unggH(cEVL~-^P@}6K=;|r@7%9pTziEV*;3jIc;zU&;n1J_0yQB z@sMph4&8Z{-`voar8okav3KUlCiRM<#a6xXpy|ET-QK(Wm7nUW^t7GF1I(EIDXQW< z*sDHX;l2xiRn|5eK_5C|D3}3FgB`%8g1c2vX>2sL5YQ6FWCW}S*W1z|Lsj-GDIb3H zq1$kS4c*IQpLf4|eMh%x^-f%rhFZ}7QA!C`V$Zs3I#!lV;t+QHe6^02#__rhy}*f=`{7xlBsS+fK|rOR2is}@R=*yykF%NRjbqbvZ`L* zwB`1GZP`jkONWwIG#Azqq~_LehR0@RZx6>%s2W0q3~YYWiAxeEiAB?uyH=kaEI) z<4~PQc_=U6vBEN%zZzcOy)C!zcygDTsGoOx+oxM<>$}p_j6w5Y=DY>8Svl0gNx0if zbnssjN55T3u$Bdx(!7_~dq7^_GBpaS(}^jh?I&3(+kyt1irO*-drE6cHEPr--3n_B zD@qc;r=5P90jwXjzUbxC-Q=d1g5PLjZCH(D3Jw+*bXS+Q?0%Ae%Kd!Ri``$gy~Y66 z20qSSx&9mWx2;B!el>oI)rxz_mY&={de?VRtXQZASyi|EA^`h$+0pObzb!xiP3baB zn2;rakx_D-Dv1eThKhS=hhfB@tnl9lz$)uwg zs}nt+Z9qE6TX_@`u6GT{`1iTZr{=D&ZrInW&~EZ)6U9j?xLLHo^^dHhqzM*T;ckyv=LO8 za}PiAko(1czpxYfY&>W+Z*2=)`9(oeS=utCVV3mnzsdG)$pPXZNa@-PpGRYs=d8DW zO=c!@?&H|``hdbxp5?0OXsa%ze#;$XWk6+$i`erTAeqzlUl2BwA6~me{`|Y=%b!IyG({QmrpbGt%uzn@W zXZgL^-wDVnJ(W`0Rxz$mSedgt7c(9oq)!v)O^6M9Y7N)jZ30Nzy2H{S>{-%tCROjo z=H7GXtaF?D#YP7!Dfj#f&$~m0AHrtyxo+#Nw`xVb`raO>QU54NiqoEnhKsrtUhn?# zycl&L(I|*!>f{9`TYSefT zWS4%6hAVaZc%}Y&*Ej!NX=*+#jg_=%nkgs<^V~eUTXYSg0ZI8rGatTy4e^ijy_xvM zAxjbdo2GS(Eyt4hN@7>@d_ZHZaq~OZWeaa?pW`p z`~e&NI*)W6dwS$kiQRVH-Abd)=>k|njix@{L;;Pu{r8U8l&e)R|E_Ufz)IIFoi%Dl z;~*$93OZrP@@fKBY;A{J@CnPKSMyCvn7XV*pi9fvI`LLHeZMc?3CL8GItG=5&5gvp z^Rr{0&g}g1ZmFMCZ3_j@bDjAylrLj1yPjpe-Nl>TpWVFoRz|3_$?wmwISL6@W2hrtXb*uHB{OZBAkXfGLZzK;YT%p_~80R=xC$(@JqBkz>J@RV1F+)W*gjyrw%QQnssk%qfR@zQMu>b zCRP0kmuz-xcGFci7r^!N=FL8`d-tAq;jU)$T9ReB^ktV_));0~4ci`ox^xKpzy>jG z*%<#7!*6D3lY%Hw4~rzabw}Hi3VNqJM&V-TQm4Wt#Fq6thcT!#Cf3V^uO?JQ(##Nk z0z%f?>>_;$73^!rAAkHfzT1K_Y5ri@a-qSkXpJgG_|5CHN z#T8b&+Z7IY;wSmfW__ajv{}9{%$z=Zx~(z{9z3`c%K9!ut3U@`jxEZSB(276tMe%z zJ+|3hHsO|wiKHuP{hy`S<8iyX<1@840-zFR^+LZd(8=T3f~g@4+c{0E*I{+lMtU3AZfywN-}y z+kP~V0oT z$BbzK@U!u7eqnwM?kNKFf>+PZ+&Si!BK0C%>0%lEnrn>Zw{9HA)Pb^!HrO-XKbjv@ z3xH>F48#4h03J2rxuy@NkCF5$;cCY0d9(Kk3ti1_L&?oCVa74{GgLjT4>wMyOYD)k%sSMPYw5KcE%>M-#NLe=krm0lFw7Fo%al z{ZY930b#aGMFCI`@Gb^#_zxA}b8PWhONmLi;?X$#qj0rMt@{HA44z(Pxd!g}A-+vW z&nJe(i_rw1HphW=* z_lp7;>OH}ioNcz9QBT^U0)C~`Ta4?&|7iaJTfl>i{VyIJ%<^Nege%Q^rL_{c8m}ce zd(|Y)AI%TG1pw5u>4Tx(^L&9{V^ z;|g~I7A;`){7AiF { - return ( - ( - - )} - /> - ); -}; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/react-app-env.d.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/react-app-env.d.ts deleted file mode 100644 index 15f01c4b100..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/react-app-env.d.ts +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/// diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.tsx deleted file mode 100644 index 4ea0a39f687..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.tsx +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import { Dashboard } from './views/Dashboard/Dashboard'; -import { ContainerBrowser } from './views/ContainerBrowser/ContainerBrowser'; -import { NotFound } from './views/NotFound/NotFound'; -import { IRoute } from "./routes.types"; - -export const routes:IRoute[] = [ - { - path: "/Dashboard", - component: Dashboard - }, - { - path: "/ContainerBrowser", - component: ContainerBrowser - }, - { - path: "/:NotFound", - component: NotFound, - } -]; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.types.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.types.tsx deleted file mode 100644 index 7e12d80f4d9..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/routes.types.tsx +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -export interface IRoute { - path: string; - component: any; - routes?: IRoute[]; -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/serviceWorker.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/serviceWorker.ts deleted file mode 100644 index 47bb33ba7cd..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/serviceWorker.ts +++ /dev/null @@ -1,161 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// This optional code is used to register a service worker. -// register() is not called by default. - -// This lets the app load faster on subsequent visits in production, and gives -// it offline capabilities. However, it also means that developers (and users) -// will only see deployed updates on subsequent visits to a page, after all the -// existing tabs open on the page have been closed, since previously cached -// resources are updated in the background. - -// To learn more about the benefits of this model and instructions on how to -// opt-in, read https://bit.ly/CRA-PWA - -const isLocalhost = Boolean( - window.location.hostname === 'localhost' || - // [::1] is the IPv6 localhost address. - window.location.hostname === '[::1]' || - // 127.0.0.1/8 is considered localhost for IPv4. - window.location.hostname.match( - /^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/ - ) -); - -type Config = { - onSuccess?: (registration: ServiceWorkerRegistration) => void; - onUpdate?: (registration: ServiceWorkerRegistration) => void; -}; - -export function register(config?: Config) { - if (process.env.NODE_ENV === 'production' && 'serviceWorker' in navigator) { - // The URL constructor is available in all browsers that support SW. - const publicUrl = new URL( - (process as { env: { [key: string]: string } }).env.PUBLIC_URL, - window.location.href - ); - if (publicUrl.origin !== window.location.origin) { - // Our service worker won't work if PUBLIC_URL is on a different origin - // from what our page is served on. This might happen if a CDN is used to - // serve assets; see https://github.com/facebook/create-react-app/issues/2374 - return; - } - - window.addEventListener('load', () => { - const swUrl = `${process.env.PUBLIC_URL}/service-worker.js`; - - if (isLocalhost) { - // This is running on localhost. Let's check if a service worker still exists or not. - checkValidServiceWorker(swUrl, config); - - // Add some additional logging to localhost, pointing developers to the - // service worker/PWA documentation. - navigator.serviceWorker.ready.then(() => { - console.log( - 'This web app is being served cache-first by a service ' + - 'worker. To learn more, visit https://bit.ly/CRA-PWA' - ); - }); - } else { - // Is not localhost. Just register service worker - registerValidSW(swUrl, config); - } - }); - } -} - -function registerValidSW(swUrl: string, config?: Config) { - navigator.serviceWorker - .register(swUrl) - .then(registration => { - registration.onupdatefound = () => { - const installingWorker = registration.installing; - if (installingWorker == null) { - return; - } - installingWorker.onstatechange = () => { - if (installingWorker.state === 'installed') { - if (navigator.serviceWorker.controller) { - // At this point, the updated precached content has been fetched, - // but the previous service worker will still serve the older - // content until all client tabs are closed. - console.log( - 'New content is available and will be used when all ' + - 'tabs for this page are closed. See https://bit.ly/CRA-PWA.' - ); - - // Execute callback - if (config && config.onUpdate) { - config.onUpdate(registration); - } - } else { - // At this point, everything has been precached. - // It's the perfect time to display a - // "Content is cached for offline use." message. - console.log('Content is cached for offline use.'); - - // Execute callback - if (config && config.onSuccess) { - config.onSuccess(registration); - } - } - } - }; - }; - }) - .catch(error => { - console.error('Error during service worker registration:', error); - }); -} - -function checkValidServiceWorker(swUrl: string, config?: Config) { - // Check if the service worker can be found. If it can't reload the page. - fetch(swUrl) - .then(response => { - // Ensure service worker exists, and that we really are getting a JS file. - const contentType = response.headers.get('content-type'); - if ( - response.status === 404 || - (contentType != null && contentType.indexOf('javascript') === -1) - ) { - // No service worker found. Probably a different app. Reload the page. - navigator.serviceWorker.ready.then(registration => { - registration.unregister().then(() => { - window.location.reload(); - }); - }); - } else { - // Service worker found. Proceed as normal. - registerValidSW(swUrl, config); - } - }) - .catch(() => { - console.log( - 'No internet connection found. App is running in offline mode.' - ); - }); -} - -export function unregister() { - if ('serviceWorker' in navigator) { - navigator.serviceWorker.ready.then(registration => { - registration.unregister(); - }); - } -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/ContainerBrowser/ContainerBrowser.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/ContainerBrowser/ContainerBrowser.tsx deleted file mode 100644 index 981f767994a..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/ContainerBrowser/ContainerBrowser.tsx +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React from 'react'; - -export const ContainerBrowser:React.FC= () => { - return ( - -
    -
    - Container Browser -
    -
    - Container Browser content -
    -
    - ); -}; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Dashboard/Dashboard.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Dashboard/Dashboard.tsx deleted file mode 100644 index 682d5997b73..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Dashboard/Dashboard.tsx +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React from 'react'; - -export const Dashboard:React.FC= () => { - return ( -
    -
    - Dashboard -
    -
    - Dashboard content -
    -
    - ); -}; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/NotFound/NotFound.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/NotFound/NotFound.tsx deleted file mode 100644 index 5bc27cbc2d7..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/NotFound/NotFound.tsx +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import React from 'react'; - -export const NotFound:React.FC= () => { - return ( -
    -
    - 404 Page Not Found :( -
    -
    - ); -}; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/tsconfig.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/tsconfig.json deleted file mode 100644 index 96c8b91945b..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/tsconfig.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "compilerOptions": { - "target": "es5", - "lib": [ - "dom", - "dom.iterable", - "esnext" - ], - "allowJs": true, - "skipLibCheck": true, - "esModuleInterop": true, - "allowSyntheticDefaultImports": true, - "strict": true, - "forceConsistentCasingInFileNames": true, - "module": "esnext", - "moduleResolution": "node", - "resolveJsonModule": true, - "isolatedModules": true, - "noEmit": true, - "jsx": "preserve", - "rootDir": "src", - "baseUrl": "src" - }, - "include": [ - "src" - ] -} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/yarn.lock b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/yarn.lock deleted file mode 100644 index 1f3de1dee8d..00000000000 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/yarn.lock +++ /dev/null @@ -1,11114 +0,0 @@ -# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. -# yarn lockfile v1 - - -"@ant-design/colors@^3.1.0": - version "3.2.1" - resolved "https://registry.yarnpkg.com/@ant-design/colors/-/colors-3.2.1.tgz#ad86cbf2d1a0039d01586aa73a7ea8a7ce0455a2" - integrity sha512-ibJybOcR1+h2IEr0Yxx4y/Wcz8obEtKvl2EYvxh8ugMkYniGSItpLKGzKNyyqzOaum5jb6fVCyH1aR9VkdpFRA== - dependencies: - tinycolor2 "^1.4.1" - -"@ant-design/create-react-context@^0.2.4": - version "0.2.4" - resolved "https://registry.yarnpkg.com/@ant-design/create-react-context/-/create-react-context-0.2.4.tgz#0fe9adad030350c0c9bb296dd6dcf5a8a36bd425" - integrity sha512-8sw+/w6r+aEbd+OJ62ojoSE4zDt/3yfQydmbWFznoftjr8v/opOswGjM+/MU0rSaREbluqzOmZ6xdecHpSaS2w== - dependencies: - gud "^1.0.0" - warning "^4.0.3" - -"@ant-design/icons-react@~2.0.1": - version "2.0.1" - resolved "https://registry.yarnpkg.com/@ant-design/icons-react/-/icons-react-2.0.1.tgz#17a2513571ab317aca2927e58cea25dd31e536fb" - integrity sha512-r1QfoltMuruJZqdiKcbPim3d8LNsVPB733U0gZEUSxBLuqilwsW28K2rCTWSMTjmFX7Mfpf+v/wdiFe/XCqThw== - dependencies: - "@ant-design/colors" "^3.1.0" - babel-runtime "^6.26.0" - -"@ant-design/icons@~2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@ant-design/icons/-/icons-2.1.1.tgz#7b9c08dffd4f5d41db667d9dbe5e0107d0bd9a4a" - integrity sha512-jCH+k2Vjlno4YWl6g535nHR09PwCEmTBKAG6VqF+rhkrSPRLfgpU2maagwbZPLjaHuU5Jd1DFQ2KJpQuI6uG8w== - -"@babel/code-frame@7.5.5", "@babel/code-frame@^7.0.0", "@babel/code-frame@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.5.5.tgz#bc0782f6d69f7b7d49531219699b988f669a8f9d" - integrity sha512-27d4lZoomVyo51VegxI20xZPuSHusqbQag/ztrBC7wegWoQ1nLREPVSKSW8byhTlzTKyNE4ifaTA6lCp7JjpFw== - dependencies: - "@babel/highlight" "^7.0.0" - -"@babel/core@7.4.3": - version "7.4.3" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.4.3.tgz#198d6d3af4567be3989550d97e068de94503074f" - integrity sha512-oDpASqKFlbspQfzAE7yaeTmdljSH2ADIvBlb0RwbStltTuWa0+7CCI1fYVINNv9saHPa1W7oaKeuNuKj+RQCvA== - dependencies: - "@babel/code-frame" "^7.0.0" - "@babel/generator" "^7.4.0" - "@babel/helpers" "^7.4.3" - "@babel/parser" "^7.4.3" - "@babel/template" "^7.4.0" - "@babel/traverse" "^7.4.3" - "@babel/types" "^7.4.0" - convert-source-map "^1.1.0" - debug "^4.1.0" - json5 "^2.1.0" - lodash "^4.17.11" - resolve "^1.3.2" - semver "^5.4.1" - source-map "^0.5.0" - -"@babel/core@^7.0.0", "@babel/core@^7.1.0", "@babel/core@^7.1.6", "@babel/core@^7.4.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.5.5.tgz#17b2686ef0d6bc58f963dddd68ab669755582c30" - integrity sha512-i4qoSr2KTtce0DmkuuQBV4AuQgGPUcPXMr9L5MyYAtk06z068lQ10a4O009fe5OB/DfNV+h+qqT7ddNV8UnRjg== - dependencies: - "@babel/code-frame" "^7.5.5" - "@babel/generator" "^7.5.5" - "@babel/helpers" "^7.5.5" - "@babel/parser" "^7.5.5" - "@babel/template" "^7.4.4" - "@babel/traverse" "^7.5.5" - "@babel/types" "^7.5.5" - convert-source-map "^1.1.0" - debug "^4.1.0" - json5 "^2.1.0" - lodash "^4.17.13" - resolve "^1.3.2" - semver "^5.4.1" - source-map "^0.5.0" - -"@babel/generator@^7.4.0", "@babel/generator@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.5.5.tgz#873a7f936a3c89491b43536d12245b626664e3cf" - integrity sha512-ETI/4vyTSxTzGnU2c49XHv2zhExkv9JHLTwDAFz85kmcwuShvYG2H08FwgIguQf4JC75CBnXAUM5PqeF4fj0nQ== - dependencies: - "@babel/types" "^7.5.5" - jsesc "^2.5.1" - lodash "^4.17.13" - source-map "^0.5.0" - trim-right "^1.0.1" - -"@babel/helper-annotate-as-pure@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.0.0.tgz#323d39dd0b50e10c7c06ca7d7638e6864d8c5c32" - integrity sha512-3UYcJUj9kvSLbLbUIfQTqzcy5VX7GRZ/CCDrnOaZorFFM01aXp1+GJwuFGV4NDDoAS+mOUyHcO6UD/RfqOks3Q== - dependencies: - "@babel/types" "^7.0.0" - -"@babel/helper-builder-binary-assignment-operator-visitor@^7.1.0": - version "7.1.0" - resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.1.0.tgz#6b69628dfe4087798e0c4ed98e3d4a6b2fbd2f5f" - integrity sha512-qNSR4jrmJ8M1VMM9tibvyRAHXQs2PmaksQF7c1CGJNipfe3D8p+wgNwgso/P2A2r2mdgBWAXljNWR0QRZAMW8w== - dependencies: - "@babel/helper-explode-assignable-expression" "^7.1.0" - "@babel/types" "^7.0.0" - -"@babel/helper-builder-react-jsx@^7.3.0": - version "7.3.0" - resolved "https://registry.yarnpkg.com/@babel/helper-builder-react-jsx/-/helper-builder-react-jsx-7.3.0.tgz#a1ac95a5d2b3e88ae5e54846bf462eeb81b318a4" - integrity sha512-MjA9KgwCuPEkQd9ncSXvSyJ5y+j2sICHyrI0M3L+6fnS4wMSNDc1ARXsbTfbb2cXHn17VisSnU/sHFTCxVxSMw== - dependencies: - "@babel/types" "^7.3.0" - esutils "^2.0.0" - -"@babel/helper-call-delegate@^7.4.4": - version "7.4.4" - resolved "https://registry.yarnpkg.com/@babel/helper-call-delegate/-/helper-call-delegate-7.4.4.tgz#87c1f8ca19ad552a736a7a27b1c1fcf8b1ff1f43" - integrity sha512-l79boDFJ8S1c5hvQvG+rc+wHw6IuH7YldmRKsYtpbawsxURu/paVy57FZMomGK22/JckepaikOkY0MoAmdyOlQ== - dependencies: - "@babel/helper-hoist-variables" "^7.4.4" - "@babel/traverse" "^7.4.4" - "@babel/types" "^7.4.4" - -"@babel/helper-create-class-features-plugin@^7.4.0", "@babel/helper-create-class-features-plugin@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.5.5.tgz#401f302c8ddbc0edd36f7c6b2887d8fa1122e5a4" - integrity sha512-ZsxkyYiRA7Bg+ZTRpPvB6AbOFKTFFK4LrvTet8lInm0V468MWCaSYJE+I7v2z2r8KNLtYiV+K5kTCnR7dvyZjg== - dependencies: - "@babel/helper-function-name" "^7.1.0" - "@babel/helper-member-expression-to-functions" "^7.5.5" - "@babel/helper-optimise-call-expression" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-replace-supers" "^7.5.5" - "@babel/helper-split-export-declaration" "^7.4.4" - -"@babel/helper-define-map@^7.4.0", "@babel/helper-define-map@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/helper-define-map/-/helper-define-map-7.5.5.tgz#3dec32c2046f37e09b28c93eb0b103fd2a25d369" - integrity sha512-fTfxx7i0B5NJqvUOBBGREnrqbTxRh7zinBANpZXAVDlsZxYdclDp467G1sQ8VZYMnAURY3RpBUAgOYT9GfzHBg== - dependencies: - "@babel/helper-function-name" "^7.1.0" - "@babel/types" "^7.5.5" - lodash "^4.17.13" - -"@babel/helper-explode-assignable-expression@^7.1.0": - version "7.1.0" - resolved "https://registry.yarnpkg.com/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.1.0.tgz#537fa13f6f1674df745b0c00ec8fe4e99681c8f6" - integrity sha512-NRQpfHrJ1msCHtKjbzs9YcMmJZOg6mQMmGRB+hbamEdG5PNpaSm95275VD92DvJKuyl0s2sFiDmMZ+EnnvufqA== - dependencies: - "@babel/traverse" "^7.1.0" - "@babel/types" "^7.0.0" - -"@babel/helper-function-name@^7.1.0": - version "7.1.0" - resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.1.0.tgz#a0ceb01685f73355d4360c1247f582bfafc8ff53" - integrity sha512-A95XEoCpb3TO+KZzJ4S/5uW5fNe26DjBGqf1o9ucyLyCmi1dXq/B3c8iaWTfBk3VvetUxl16e8tIrd5teOCfGw== - dependencies: - "@babel/helper-get-function-arity" "^7.0.0" - "@babel/template" "^7.1.0" - "@babel/types" "^7.0.0" - -"@babel/helper-get-function-arity@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.0.0.tgz#83572d4320e2a4657263734113c42868b64e49c3" - integrity sha512-r2DbJeg4svYvt3HOS74U4eWKsUAMRH01Z1ds1zx8KNTPtpTL5JAsdFv8BNyOpVqdFhHkkRDIg5B4AsxmkjAlmQ== - dependencies: - "@babel/types" "^7.0.0" - -"@babel/helper-hoist-variables@^7.4.4": - version "7.4.4" - resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.4.4.tgz#0298b5f25c8c09c53102d52ac4a98f773eb2850a" - integrity sha512-VYk2/H/BnYbZDDg39hr3t2kKyifAm1W6zHRfhx8jGjIHpQEBv9dry7oQ2f3+J703TLu69nYdxsovl0XYfcnK4w== - dependencies: - "@babel/types" "^7.4.4" - -"@babel/helper-member-expression-to-functions@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.5.5.tgz#1fb5b8ec4453a93c439ee9fe3aeea4a84b76b590" - integrity sha512-5qZ3D1uMclSNqYcXqiHoA0meVdv+xUEex9em2fqMnrk/scphGlGgg66zjMrPJESPwrFJ6sbfFQYUSa0Mz7FabA== - dependencies: - "@babel/types" "^7.5.5" - -"@babel/helper-module-imports@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.0.0.tgz#96081b7111e486da4d2cd971ad1a4fe216cc2e3d" - integrity sha512-aP/hlLq01DWNEiDg4Jn23i+CXxW/owM4WpDLFUbpjxe4NS3BhLVZQ5i7E0ZrxuQ/vwekIeciyamgB1UIYxxM6A== - dependencies: - "@babel/types" "^7.0.0" - -"@babel/helper-module-transforms@^7.1.0", "@babel/helper-module-transforms@^7.4.4": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.5.5.tgz#f84ff8a09038dcbca1fd4355661a500937165b4a" - integrity sha512-jBeCvETKuJqeiaCdyaheF40aXnnU1+wkSiUs/IQg3tB85up1LyL8x77ClY8qJpuRJUcXQo+ZtdNESmZl4j56Pw== - dependencies: - "@babel/helper-module-imports" "^7.0.0" - "@babel/helper-simple-access" "^7.1.0" - "@babel/helper-split-export-declaration" "^7.4.4" - "@babel/template" "^7.4.4" - "@babel/types" "^7.5.5" - lodash "^4.17.13" - -"@babel/helper-optimise-call-expression@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.0.0.tgz#a2920c5702b073c15de51106200aa8cad20497d5" - integrity sha512-u8nd9NQePYNQV8iPWu/pLLYBqZBa4ZaY1YWRFMuxrid94wKI1QNt67NEZ7GAe5Kc/0LLScbim05xZFWkAdrj9g== - dependencies: - "@babel/types" "^7.0.0" - -"@babel/helper-plugin-utils@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.0.0.tgz#bbb3fbee98661c569034237cc03967ba99b4f250" - integrity sha512-CYAOUCARwExnEixLdB6sDm2dIJ/YgEAKDM1MOeMeZu9Ld/bDgVo8aiWrXwcY7OBh+1Ea2uUcVRcxKk0GJvW7QA== - -"@babel/helper-regex@^7.0.0", "@babel/helper-regex@^7.4.4": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/helper-regex/-/helper-regex-7.5.5.tgz#0aa6824f7100a2e0e89c1527c23936c152cab351" - integrity sha512-CkCYQLkfkiugbRDO8eZn6lRuR8kzZoGXCg3149iTk5se7g6qykSpy3+hELSwquhu+TgHn8nkLiBwHvNX8Hofcw== - dependencies: - lodash "^4.17.13" - -"@babel/helper-remap-async-to-generator@^7.1.0": - version "7.1.0" - resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.1.0.tgz#361d80821b6f38da75bd3f0785ece20a88c5fe7f" - integrity sha512-3fOK0L+Fdlg8S5al8u/hWE6vhufGSn0bN09xm2LXMy//REAF8kDCrYoOBKYmA8m5Nom+sV9LyLCwrFynA8/slg== - dependencies: - "@babel/helper-annotate-as-pure" "^7.0.0" - "@babel/helper-wrap-function" "^7.1.0" - "@babel/template" "^7.1.0" - "@babel/traverse" "^7.1.0" - "@babel/types" "^7.0.0" - -"@babel/helper-replace-supers@^7.4.0", "@babel/helper-replace-supers@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.5.5.tgz#f84ce43df031222d2bad068d2626cb5799c34bc2" - integrity sha512-XvRFWrNnlsow2u7jXDuH4jDDctkxbS7gXssrP4q2nUD606ukXHRvydj346wmNg+zAgpFx4MWf4+usfC93bElJg== - dependencies: - "@babel/helper-member-expression-to-functions" "^7.5.5" - "@babel/helper-optimise-call-expression" "^7.0.0" - "@babel/traverse" "^7.5.5" - "@babel/types" "^7.5.5" - -"@babel/helper-simple-access@^7.1.0": - version "7.1.0" - resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.1.0.tgz#65eeb954c8c245beaa4e859da6188f39d71e585c" - integrity sha512-Vk+78hNjRbsiu49zAPALxTb+JUQCz1aolpd8osOF16BGnLtseD21nbHgLPGUwrXEurZgiCOUmvs3ExTu4F5x6w== - dependencies: - "@babel/template" "^7.1.0" - "@babel/types" "^7.0.0" - -"@babel/helper-split-export-declaration@^7.4.0", "@babel/helper-split-export-declaration@^7.4.4": - version "7.4.4" - resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.4.4.tgz#ff94894a340be78f53f06af038b205c49d993677" - integrity sha512-Ro/XkzLf3JFITkW6b+hNxzZ1n5OQ80NvIUdmHspih1XAhtN3vPTuUFT4eQnela+2MaZ5ulH+iyP513KJrxbN7Q== - dependencies: - "@babel/types" "^7.4.4" - -"@babel/helper-wrap-function@^7.1.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.2.0.tgz#c4e0012445769e2815b55296ead43a958549f6fa" - integrity sha512-o9fP1BZLLSrYlxYEYyl2aS+Flun5gtjTIG8iln+XuEzQTs0PLagAGSXUcqruJwD5fM48jzIEggCKpIfWTcR7pQ== - dependencies: - "@babel/helper-function-name" "^7.1.0" - "@babel/template" "^7.1.0" - "@babel/traverse" "^7.1.0" - "@babel/types" "^7.2.0" - -"@babel/helpers@^7.4.3", "@babel/helpers@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.5.5.tgz#63908d2a73942229d1e6685bc2a0e730dde3b75e" - integrity sha512-nRq2BUhxZFnfEn/ciJuhklHvFOqjJUD5wpx+1bxUF2axL9C+v4DE/dmp5sT2dKnpOs4orZWzpAZqlCy8QqE/7g== - dependencies: - "@babel/template" "^7.4.4" - "@babel/traverse" "^7.5.5" - "@babel/types" "^7.5.5" - -"@babel/highlight@^7.0.0": - version "7.5.0" - resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.5.0.tgz#56d11312bd9248fa619591d02472be6e8cb32540" - integrity sha512-7dV4eu9gBxoM0dAnj/BCFDW9LFU0zvTrkq0ugM7pnHEgguOEeOz1so2ZghEdzviYzQEED0r4EAgpsBChKy1TRQ== - dependencies: - chalk "^2.0.0" - esutils "^2.0.2" - js-tokens "^4.0.0" - -"@babel/parser@^7.0.0", "@babel/parser@^7.1.0", "@babel/parser@^7.4.3", "@babel/parser@^7.4.4", "@babel/parser@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.5.5.tgz#02f077ac8817d3df4a832ef59de67565e71cca4b" - integrity sha512-E5BN68cqR7dhKan1SfqgPGhQ178bkVKpXTPEXnFJBrEt8/DKRZlybmy+IgYLTeN7tp1R5Ccmbm2rBk17sHYU3g== - -"@babel/plugin-proposal-async-generator-functions@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.2.0.tgz#b289b306669dce4ad20b0252889a15768c9d417e" - integrity sha512-+Dfo/SCQqrwx48ptLVGLdE39YtWRuKc/Y9I5Fy0P1DDBB9lsAHpjcEJQt+4IifuSOSTLBKJObJqMvaO1pIE8LQ== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-remap-async-to-generator" "^7.1.0" - "@babel/plugin-syntax-async-generators" "^7.2.0" - -"@babel/plugin-proposal-class-properties@7.4.0": - version "7.4.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.4.0.tgz#d70db61a2f1fd79de927eea91f6411c964e084b8" - integrity sha512-t2ECPNOXsIeK1JxJNKmgbzQtoG27KIlVE61vTqX0DKR9E9sZlVVxWUtEW9D5FlZ8b8j7SBNCHY47GgPKCKlpPg== - dependencies: - "@babel/helper-create-class-features-plugin" "^7.4.0" - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-proposal-decorators@7.4.0": - version "7.4.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.4.0.tgz#8e1bfd83efa54a5f662033afcc2b8e701f4bb3a9" - integrity sha512-d08TLmXeK/XbgCo7ZeZ+JaeZDtDai/2ctapTRsWWkkmy7G/cqz8DQN/HlWG7RR4YmfXxmExsbU3SuCjlM7AtUg== - dependencies: - "@babel/helper-create-class-features-plugin" "^7.4.0" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-decorators" "^7.2.0" - -"@babel/plugin-proposal-dynamic-import@^7.5.0": - version "7.5.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.5.0.tgz#e532202db4838723691b10a67b8ce509e397c506" - integrity sha512-x/iMjggsKTFHYC6g11PL7Qy58IK8H5zqfm9e6hu4z1iH2IRyAp9u9dL80zA6R76yFovETFLKz2VJIC2iIPBuFw== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-dynamic-import" "^7.2.0" - -"@babel/plugin-proposal-json-strings@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.2.0.tgz#568ecc446c6148ae6b267f02551130891e29f317" - integrity sha512-MAFV1CA/YVmYwZG0fBQyXhmj0BHCB5egZHCKWIFVv/XCxAeVGIHfos3SwDck4LvCllENIAg7xMKOG5kH0dzyUg== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-json-strings" "^7.2.0" - -"@babel/plugin-proposal-object-rest-spread@7.4.3": - version "7.4.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.4.3.tgz#be27cd416eceeba84141305b93c282f5de23bbb4" - integrity sha512-xC//6DNSSHVjq8O2ge0dyYlhshsH4T7XdCVoxbi5HzLYWfsC5ooFlJjrXk8RcAT+hjHAK9UjBXdylzSoDK3t4g== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-object-rest-spread" "^7.2.0" - -"@babel/plugin-proposal-object-rest-spread@^7.4.3", "@babel/plugin-proposal-object-rest-spread@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.5.5.tgz#61939744f71ba76a3ae46b5eea18a54c16d22e58" - integrity sha512-F2DxJJSQ7f64FyTVl5cw/9MWn6naXGdk3Q3UhDbFEEHv+EilCPoeRD3Zh/Utx1CJz4uyKlQ4uH+bJPbEhMV7Zw== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-object-rest-spread" "^7.2.0" - -"@babel/plugin-proposal-optional-catch-binding@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.2.0.tgz#135d81edb68a081e55e56ec48541ece8065c38f5" - integrity sha512-mgYj3jCcxug6KUcX4OBoOJz3CMrwRfQELPQ5560F70YQUBZB7uac9fqaWamKR1iWUzGiK2t0ygzjTScZnVz75g== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-optional-catch-binding" "^7.2.0" - -"@babel/plugin-proposal-unicode-property-regex@^7.4.0", "@babel/plugin-proposal-unicode-property-regex@^7.4.4": - version "7.4.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.4.4.tgz#501ffd9826c0b91da22690720722ac7cb1ca9c78" - integrity sha512-j1NwnOqMG9mFUOH58JTFsA/+ZYzQLUZ/drqWUqxCYLGeu2JFZL8YrNC9hBxKmWtAuOCHPcRpgv7fhap09Fb4kA== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-regex" "^7.4.4" - regexpu-core "^4.5.4" - -"@babel/plugin-syntax-async-generators@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.2.0.tgz#69e1f0db34c6f5a0cf7e2b3323bf159a76c8cb7f" - integrity sha512-1ZrIRBv2t0GSlcwVoQ6VgSLpLgiN/FVQUzt9znxo7v2Ov4jJrs8RY8tv0wvDmFN3qIdMKWrmMMW6yZ0G19MfGg== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-syntax-decorators@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.2.0.tgz#c50b1b957dcc69e4b1127b65e1c33eef61570c1b" - integrity sha512-38QdqVoXdHUQfTpZo3rQwqQdWtCn5tMv4uV6r2RMfTqNBuv4ZBhz79SfaQWKTVmxHjeFv/DnXVC/+agHCklYWA== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-syntax-dynamic-import@7.2.0", "@babel/plugin-syntax-dynamic-import@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.2.0.tgz#69c159ffaf4998122161ad8ebc5e6d1f55df8612" - integrity sha512-mVxuJ0YroI/h/tbFTPGZR8cv6ai+STMKNBq0f8hFxsxWjl94qqhsb+wXbpNMDPU3cfR1TIsVFzU3nXyZMqyK4w== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-syntax-flow@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-flow/-/plugin-syntax-flow-7.2.0.tgz#a765f061f803bc48f240c26f8747faf97c26bf7c" - integrity sha512-r6YMuZDWLtLlu0kqIim5o/3TNRAlWb073HwT3e2nKf9I8IIvOggPrnILYPsrrKilmn/mYEMCf/Z07w3yQJF6dg== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-syntax-json-strings@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.2.0.tgz#72bd13f6ffe1d25938129d2a186b11fd62951470" - integrity sha512-5UGYnMSLRE1dqqZwug+1LISpA403HzlSfsg6P9VXU6TBjcSHeNlw4DxDx7LgpF+iKZoOG/+uzqoRHTdcUpiZNg== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-syntax-jsx@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.2.0.tgz#0b85a3b4bc7cdf4cc4b8bf236335b907ca22e7c7" - integrity sha512-VyN4QANJkRW6lDBmENzRszvZf3/4AXaj9YR7GwrWeeN9tEBPuXbmDYVU9bYBN0D70zCWVwUy0HWq2553VCb6Hw== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-syntax-object-rest-spread@^7.0.0", "@babel/plugin-syntax-object-rest-spread@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.2.0.tgz#3b7a3e733510c57e820b9142a6579ac8b0dfad2e" - integrity sha512-t0JKGgqk2We+9may3t0xDdmneaXmyxq0xieYcKHxIsrJO64n1OiMWNUtc5gQK1PA0NpdCRrtZp4z+IUaKugrSA== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-syntax-optional-catch-binding@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.2.0.tgz#a94013d6eda8908dfe6a477e7f9eda85656ecf5c" - integrity sha512-bDe4xKNhb0LI7IvZHiA13kff0KEfaGX/Hv4lMA9+7TEc63hMNvfKo6ZFpXhKuEp+II/q35Gc4NoMeDZyaUbj9w== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-syntax-typescript@^7.2.0": - version "7.3.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.3.3.tgz#a7cc3f66119a9f7ebe2de5383cce193473d65991" - integrity sha512-dGwbSMA1YhVS8+31CnPR7LB4pcbrzcV99wQzby4uAfrkZPYZlQ7ImwdpzLqi6Z6IL02b8IAL379CaMwo0x5Lag== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-arrow-functions@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.2.0.tgz#9aeafbe4d6ffc6563bf8f8372091628f00779550" - integrity sha512-ER77Cax1+8/8jCB9fo4Ud161OZzWN5qawi4GusDuRLcDbDG+bIGYY20zb2dfAFdTRGzrfq2xZPvF0R64EHnimg== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-async-to-generator@^7.4.0", "@babel/plugin-transform-async-to-generator@^7.5.0": - version "7.5.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.5.0.tgz#89a3848a0166623b5bc481164b5936ab947e887e" - integrity sha512-mqvkzwIGkq0bEF1zLRRiTdjfomZJDV33AH3oQzHVGkI2VzEmXLpKKOBvEVaFZBJdN0XTyH38s9j/Kiqr68dggg== - dependencies: - "@babel/helper-module-imports" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-remap-async-to-generator" "^7.1.0" - -"@babel/plugin-transform-block-scoped-functions@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.2.0.tgz#5d3cc11e8d5ddd752aa64c9148d0db6cb79fd190" - integrity sha512-ntQPR6q1/NKuphly49+QiQiTN0O63uOwjdD6dhIjSWBI5xlrbUFh720TIpzBhpnrLfv2tNH/BXvLIab1+BAI0w== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-block-scoping@^7.4.0", "@babel/plugin-transform-block-scoping@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.5.5.tgz#a35f395e5402822f10d2119f6f8e045e3639a2ce" - integrity sha512-82A3CLRRdYubkG85lKwhZB0WZoHxLGsJdux/cOVaJCJpvYFl1LVzAIFyRsa7CvXqW8rBM4Zf3Bfn8PHt5DP0Sg== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - lodash "^4.17.13" - -"@babel/plugin-transform-classes@7.4.3": - version "7.4.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.4.3.tgz#adc7a1137ab4287a555d429cc56ecde8f40c062c" - integrity sha512-PUaIKyFUDtG6jF5DUJOfkBdwAS/kFFV3XFk7Nn0a6vR7ZT8jYw5cGtIlat77wcnd0C6ViGqo/wyNf4ZHytF/nQ== - dependencies: - "@babel/helper-annotate-as-pure" "^7.0.0" - "@babel/helper-define-map" "^7.4.0" - "@babel/helper-function-name" "^7.1.0" - "@babel/helper-optimise-call-expression" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-replace-supers" "^7.4.0" - "@babel/helper-split-export-declaration" "^7.4.0" - globals "^11.1.0" - -"@babel/plugin-transform-classes@^7.4.3", "@babel/plugin-transform-classes@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.5.5.tgz#d094299d9bd680a14a2a0edae38305ad60fb4de9" - integrity sha512-U2htCNK/6e9K7jGyJ++1p5XRU+LJjrwtoiVn9SzRlDT2KubcZ11OOwy3s24TjHxPgxNwonCYP7U2K51uVYCMDg== - dependencies: - "@babel/helper-annotate-as-pure" "^7.0.0" - "@babel/helper-define-map" "^7.5.5" - "@babel/helper-function-name" "^7.1.0" - "@babel/helper-optimise-call-expression" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-replace-supers" "^7.5.5" - "@babel/helper-split-export-declaration" "^7.4.4" - globals "^11.1.0" - -"@babel/plugin-transform-computed-properties@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.2.0.tgz#83a7df6a658865b1c8f641d510c6f3af220216da" - integrity sha512-kP/drqTxY6Xt3NNpKiMomfgkNn4o7+vKxK2DDKcBG9sHj51vHqMBGy8wbDS/J4lMxnqs153/T3+DmCEAkC5cpA== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-destructuring@7.4.3": - version "7.4.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.4.3.tgz#1a95f5ca2bf2f91ef0648d5de38a8d472da4350f" - integrity sha512-rVTLLZpydDFDyN4qnXdzwoVpk1oaXHIvPEOkOLyr88o7oHxVc/LyrnDx+amuBWGOwUb7D1s/uLsKBNTx08htZg== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-destructuring@^7.4.3", "@babel/plugin-transform-destructuring@^7.5.0": - version "7.5.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.5.0.tgz#f6c09fdfe3f94516ff074fe877db7bc9ef05855a" - integrity sha512-YbYgbd3TryYYLGyC7ZR+Tq8H/+bCmwoaxHfJHupom5ECstzbRLTch6gOQbhEY9Z4hiCNHEURgq06ykFv9JZ/QQ== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-dotall-regex@^7.4.3", "@babel/plugin-transform-dotall-regex@^7.4.4": - version "7.4.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.4.4.tgz#361a148bc951444312c69446d76ed1ea8e4450c3" - integrity sha512-P05YEhRc2h53lZDjRPk/OektxCVevFzZs2Gfjd545Wde3k+yFDbXORgl2e0xpbq8mLcKJ7Idss4fAg0zORN/zg== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-regex" "^7.4.4" - regexpu-core "^4.5.4" - -"@babel/plugin-transform-duplicate-keys@^7.2.0", "@babel/plugin-transform-duplicate-keys@^7.5.0": - version "7.5.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.5.0.tgz#c5dbf5106bf84cdf691222c0974c12b1df931853" - integrity sha512-igcziksHizyQPlX9gfSjHkE2wmoCH3evvD2qR5w29/Dk0SMKE/eOI7f1HhBdNhR/zxJDqrgpoDTq5YSLH/XMsQ== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-exponentiation-operator@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.2.0.tgz#a63868289e5b4007f7054d46491af51435766008" - integrity sha512-umh4hR6N7mu4Elq9GG8TOu9M0bakvlsREEC+ialrQN6ABS4oDQ69qJv1VtR3uxlKMCQMCvzk7vr17RHKcjx68A== - dependencies: - "@babel/helper-builder-binary-assignment-operator-visitor" "^7.1.0" - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-flow-strip-types@7.4.0": - version "7.4.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-flow-strip-types/-/plugin-transform-flow-strip-types-7.4.0.tgz#f3c59eecff68c99b9c96eaafe4fe9d1fa8947138" - integrity sha512-C4ZVNejHnfB22vI2TYN4RUp2oCmq6cSEAg4RygSvYZUECRqUu9O4PMEMNJ4wsemaRGg27BbgYctG4BZh+AgIHw== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-flow" "^7.2.0" - -"@babel/plugin-transform-for-of@^7.4.3", "@babel/plugin-transform-for-of@^7.4.4": - version "7.4.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.4.4.tgz#0267fc735e24c808ba173866c6c4d1440fc3c556" - integrity sha512-9T/5Dlr14Z9TIEXLXkt8T1DU7F24cbhwhMNUziN3hB1AXoZcdzPcTiKGRn/6iOymDqtTKWnr/BtRKN9JwbKtdQ== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-function-name@^7.4.3", "@babel/plugin-transform-function-name@^7.4.4": - version "7.4.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.4.4.tgz#e1436116abb0610c2259094848754ac5230922ad" - integrity sha512-iU9pv7U+2jC9ANQkKeNF6DrPy4GBa4NWQtl6dHB4Pb3izX2JOEvDTFarlNsBj/63ZEzNNIAMs3Qw4fNCcSOXJA== - dependencies: - "@babel/helper-function-name" "^7.1.0" - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-literals@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.2.0.tgz#690353e81f9267dad4fd8cfd77eafa86aba53ea1" - integrity sha512-2ThDhm4lI4oV7fVQ6pNNK+sx+c/GM5/SaML0w/r4ZB7sAneD/piDJtwdKlNckXeyGK7wlwg2E2w33C/Hh+VFCg== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-member-expression-literals@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.2.0.tgz#fa10aa5c58a2cb6afcf2c9ffa8cb4d8b3d489a2d" - integrity sha512-HiU3zKkSU6scTidmnFJ0bMX8hz5ixC93b4MHMiYebmk2lUVNGOboPsqQvx5LzooihijUoLR/v7Nc1rbBtnc7FA== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-modules-amd@^7.2.0", "@babel/plugin-transform-modules-amd@^7.5.0": - version "7.5.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.5.0.tgz#ef00435d46da0a5961aa728a1d2ecff063e4fb91" - integrity sha512-n20UsQMKnWrltocZZm24cRURxQnWIvsABPJlw/fvoy9c6AgHZzoelAIzajDHAQrDpuKFFPPcFGd7ChsYuIUMpg== - dependencies: - "@babel/helper-module-transforms" "^7.1.0" - "@babel/helper-plugin-utils" "^7.0.0" - babel-plugin-dynamic-import-node "^2.3.0" - -"@babel/plugin-transform-modules-commonjs@^7.4.3", "@babel/plugin-transform-modules-commonjs@^7.5.0": - version "7.5.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.5.0.tgz#425127e6045231360858eeaa47a71d75eded7a74" - integrity sha512-xmHq0B+ytyrWJvQTc5OWAC4ii6Dhr0s22STOoydokG51JjWhyYo5mRPXoi+ZmtHQhZZwuXNN+GG5jy5UZZJxIQ== - dependencies: - "@babel/helper-module-transforms" "^7.4.4" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-simple-access" "^7.1.0" - babel-plugin-dynamic-import-node "^2.3.0" - -"@babel/plugin-transform-modules-systemjs@^7.4.0", "@babel/plugin-transform-modules-systemjs@^7.5.0": - version "7.5.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.5.0.tgz#e75266a13ef94202db2a0620977756f51d52d249" - integrity sha512-Q2m56tyoQWmuNGxEtUyeEkm6qJYFqs4c+XyXH5RAuYxObRNz9Zgj/1g2GMnjYp2EUyEy7YTrxliGCXzecl/vJg== - dependencies: - "@babel/helper-hoist-variables" "^7.4.4" - "@babel/helper-plugin-utils" "^7.0.0" - babel-plugin-dynamic-import-node "^2.3.0" - -"@babel/plugin-transform-modules-umd@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.2.0.tgz#7678ce75169f0877b8eb2235538c074268dd01ae" - integrity sha512-BV3bw6MyUH1iIsGhXlOK6sXhmSarZjtJ/vMiD9dNmpY8QXFFQTj+6v92pcfy1iqa8DeAfJFwoxcrS/TUZda6sw== - dependencies: - "@babel/helper-module-transforms" "^7.1.0" - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-named-capturing-groups-regex@^7.4.2", "@babel/plugin-transform-named-capturing-groups-regex@^7.4.5": - version "7.4.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.4.5.tgz#9d269fd28a370258199b4294736813a60bbdd106" - integrity sha512-z7+2IsWafTBbjNsOxU/Iv5CvTJlr5w4+HGu1HovKYTtgJ362f7kBcQglkfmlspKKZ3bgrbSGvLfNx++ZJgCWsg== - dependencies: - regexp-tree "^0.1.6" - -"@babel/plugin-transform-new-target@^7.4.0", "@babel/plugin-transform-new-target@^7.4.4": - version "7.4.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.4.4.tgz#18d120438b0cc9ee95a47f2c72bc9768fbed60a5" - integrity sha512-r1z3T2DNGQwwe2vPGZMBNjioT2scgWzK9BCnDEh+46z8EEwXBq24uRzd65I7pjtugzPSj921aM15RpESgzsSuA== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-object-super@^7.2.0", "@babel/plugin-transform-object-super@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.5.5.tgz#c70021df834073c65eb613b8679cc4a381d1a9f9" - integrity sha512-un1zJQAhSosGFBduPgN/YFNvWVpRuHKU7IHBglLoLZsGmruJPOo6pbInneflUdmq7YvSVqhpPs5zdBvLnteltQ== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-replace-supers" "^7.5.5" - -"@babel/plugin-transform-parameters@^7.4.3", "@babel/plugin-transform-parameters@^7.4.4": - version "7.4.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.4.4.tgz#7556cf03f318bd2719fe4c922d2d808be5571e16" - integrity sha512-oMh5DUO1V63nZcu/ZVLQFqiihBGo4OpxJxR1otF50GMeCLiRx5nUdtokd+u9SuVJrvvuIh9OosRFPP4pIPnwmw== - dependencies: - "@babel/helper-call-delegate" "^7.4.4" - "@babel/helper-get-function-arity" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-property-literals@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.2.0.tgz#03e33f653f5b25c4eb572c98b9485055b389e905" - integrity sha512-9q7Dbk4RhgcLp8ebduOpCbtjh7C0itoLYHXd9ueASKAG/is5PQtMR5VJGka9NKqGhYEGn5ITahd4h9QeBMylWQ== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-react-constant-elements@7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.2.0.tgz#ed602dc2d8bff2f0cb1a5ce29263dbdec40779f7" - integrity sha512-YYQFg6giRFMsZPKUM9v+VcHOdfSQdz9jHCx3akAi3UYgyjndmdYGSXylQ/V+HswQt4fL8IklchD9HTsaOCrWQQ== - dependencies: - "@babel/helper-annotate-as-pure" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-react-constant-elements@^7.0.0": - version "7.5.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.5.0.tgz#4d6ae4033bc38f8a65dfca2b6235c44522a422fc" - integrity sha512-c5Ba8cpybZFp1Izkf2sWGuNjOxoQ32tFgBvvYvwGhi4+9f6vGiSK9Gex4uVuO/Va6YJFu41aAh1MzMjUWkp0IQ== - dependencies: - "@babel/helper-annotate-as-pure" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-react-display-name@7.2.0", "@babel/plugin-transform-react-display-name@^7.0.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.2.0.tgz#ebfaed87834ce8dc4279609a4f0c324c156e3eb0" - integrity sha512-Htf/tPa5haZvRMiNSQSFifK12gtr/8vwfr+A9y69uF0QcU77AVu4K7MiHEkTxF7lQoHOL0F9ErqgfNEAKgXj7A== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-react-jsx-self@^7.0.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.2.0.tgz#461e21ad9478f1031dd5e276108d027f1b5240ba" - integrity sha512-v6S5L/myicZEy+jr6ielB0OR8h+EH/1QFx/YJ7c7Ua+7lqsjj/vW6fD5FR9hB/6y7mGbfT4vAURn3xqBxsUcdg== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-jsx" "^7.2.0" - -"@babel/plugin-transform-react-jsx-source@^7.0.0": - version "7.5.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.5.0.tgz#583b10c49cf057e237085bcbd8cc960bd83bd96b" - integrity sha512-58Q+Jsy4IDCZx7kqEZuSDdam/1oW8OdDX8f+Loo6xyxdfg1yF0GE2XNJQSTZCaMol93+FBzpWiPEwtbMloAcPg== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-jsx" "^7.2.0" - -"@babel/plugin-transform-react-jsx@^7.0.0": - version "7.3.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.3.0.tgz#f2cab99026631c767e2745a5368b331cfe8f5290" - integrity sha512-a/+aRb7R06WcKvQLOu4/TpjKOdvVEKRLWFpKcNuHhiREPgGRB4TQJxq07+EZLS8LFVYpfq1a5lDUnuMdcCpBKg== - dependencies: - "@babel/helper-builder-react-jsx" "^7.3.0" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-jsx" "^7.2.0" - -"@babel/plugin-transform-regenerator@^7.4.3", "@babel/plugin-transform-regenerator@^7.4.5": - version "7.4.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.4.5.tgz#629dc82512c55cee01341fb27bdfcb210354680f" - integrity sha512-gBKRh5qAaCWntnd09S8QC7r3auLCqq5DI6O0DlfoyDjslSBVqBibrMdsqO+Uhmx3+BlOmE/Kw1HFxmGbv0N9dA== - dependencies: - regenerator-transform "^0.14.0" - -"@babel/plugin-transform-reserved-words@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.2.0.tgz#4792af87c998a49367597d07fedf02636d2e1634" - integrity sha512-fz43fqW8E1tAB3DKF19/vxbpib1fuyCwSPE418ge5ZxILnBhWyhtPgz8eh1RCGGJlwvksHkyxMxh0eenFi+kFw== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-runtime@7.4.3": - version "7.4.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.4.3.tgz#4d6691690ecdc9f5cb8c3ab170a1576c1f556371" - integrity sha512-7Q61bU+uEI7bCUFReT1NKn7/X6sDQsZ7wL1sJ9IYMAO7cI+eg6x9re1cEw2fCRMbbTVyoeUKWSV1M6azEfKCfg== - dependencies: - "@babel/helper-module-imports" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - resolve "^1.8.1" - semver "^5.5.1" - -"@babel/plugin-transform-shorthand-properties@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.2.0.tgz#6333aee2f8d6ee7e28615457298934a3b46198f0" - integrity sha512-QP4eUM83ha9zmYtpbnyjTLAGKQritA5XW/iG9cjtuOI8s1RuL/3V6a3DeSHfKutJQ+ayUfeZJPcnCYEQzaPQqg== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-spread@^7.2.0": - version "7.2.2" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.2.2.tgz#3103a9abe22f742b6d406ecd3cd49b774919b406" - integrity sha512-KWfky/58vubwtS0hLqEnrWJjsMGaOeSBn90Ezn5Jeg9Z8KKHmELbP1yGylMlm5N6TPKeY9A2+UaSYLdxahg01w== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-sticky-regex@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.2.0.tgz#a1e454b5995560a9c1e0d537dfc15061fd2687e1" - integrity sha512-KKYCoGaRAf+ckH8gEL3JHUaFVyNHKe3ASNsZ+AlktgHevvxGigoIttrEJb8iKN03Q7Eazlv1s6cx2B2cQ3Jabw== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-regex" "^7.0.0" - -"@babel/plugin-transform-template-literals@^7.2.0", "@babel/plugin-transform-template-literals@^7.4.4": - version "7.4.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.4.4.tgz#9d28fea7bbce637fb7612a0750989d8321d4bcb0" - integrity sha512-mQrEC4TWkhLN0z8ygIvEL9ZEToPhG5K7KDW3pzGqOfIGZ28Jb0POUkeWcoz8HnHvhFy6dwAT1j8OzqN8s804+g== - dependencies: - "@babel/helper-annotate-as-pure" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-typeof-symbol@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.2.0.tgz#117d2bcec2fbf64b4b59d1f9819894682d29f2b2" - integrity sha512-2LNhETWYxiYysBtrBTqL8+La0jIoQQnIScUJc74OYvUGRmkskNY4EzLCnjHBzdmb38wqtTaixpo1NctEcvMDZw== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - -"@babel/plugin-transform-typescript@^7.3.2": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.5.5.tgz#6d862766f09b2da1cb1f7d505fe2aedab6b7d4b8" - integrity sha512-pehKf4m640myZu5B2ZviLaiBlxMCjSZ1qTEO459AXKX5GnPueyulJeCqZFs1nz/Ya2dDzXQ1NxZ/kKNWyD4h6w== - dependencies: - "@babel/helper-create-class-features-plugin" "^7.5.5" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-syntax-typescript" "^7.2.0" - -"@babel/plugin-transform-unicode-regex@^7.4.3", "@babel/plugin-transform-unicode-regex@^7.4.4": - version "7.4.4" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.4.4.tgz#ab4634bb4f14d36728bf5978322b35587787970f" - integrity sha512-il+/XdNw01i93+M9J9u4T7/e/Ue/vWfNZE4IRUQjplu2Mqb/AFTDimkw2tdEdSH50wuQXZAbXSql0UphQke+vA== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/helper-regex" "^7.4.4" - regexpu-core "^4.5.4" - -"@babel/preset-env@7.4.3": - version "7.4.3" - resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.4.3.tgz#e71e16e123dc0fbf65a52cbcbcefd072fbd02880" - integrity sha512-FYbZdV12yHdJU5Z70cEg0f6lvtpZ8jFSDakTm7WXeJbLXh4R0ztGEu/SW7G1nJ2ZvKwDhz8YrbA84eYyprmGqw== - dependencies: - "@babel/helper-module-imports" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-proposal-async-generator-functions" "^7.2.0" - "@babel/plugin-proposal-json-strings" "^7.2.0" - "@babel/plugin-proposal-object-rest-spread" "^7.4.3" - "@babel/plugin-proposal-optional-catch-binding" "^7.2.0" - "@babel/plugin-proposal-unicode-property-regex" "^7.4.0" - "@babel/plugin-syntax-async-generators" "^7.2.0" - "@babel/plugin-syntax-json-strings" "^7.2.0" - "@babel/plugin-syntax-object-rest-spread" "^7.2.0" - "@babel/plugin-syntax-optional-catch-binding" "^7.2.0" - "@babel/plugin-transform-arrow-functions" "^7.2.0" - "@babel/plugin-transform-async-to-generator" "^7.4.0" - "@babel/plugin-transform-block-scoped-functions" "^7.2.0" - "@babel/plugin-transform-block-scoping" "^7.4.0" - "@babel/plugin-transform-classes" "^7.4.3" - "@babel/plugin-transform-computed-properties" "^7.2.0" - "@babel/plugin-transform-destructuring" "^7.4.3" - "@babel/plugin-transform-dotall-regex" "^7.4.3" - "@babel/plugin-transform-duplicate-keys" "^7.2.0" - "@babel/plugin-transform-exponentiation-operator" "^7.2.0" - "@babel/plugin-transform-for-of" "^7.4.3" - "@babel/plugin-transform-function-name" "^7.4.3" - "@babel/plugin-transform-literals" "^7.2.0" - "@babel/plugin-transform-member-expression-literals" "^7.2.0" - "@babel/plugin-transform-modules-amd" "^7.2.0" - "@babel/plugin-transform-modules-commonjs" "^7.4.3" - "@babel/plugin-transform-modules-systemjs" "^7.4.0" - "@babel/plugin-transform-modules-umd" "^7.2.0" - "@babel/plugin-transform-named-capturing-groups-regex" "^7.4.2" - "@babel/plugin-transform-new-target" "^7.4.0" - "@babel/plugin-transform-object-super" "^7.2.0" - "@babel/plugin-transform-parameters" "^7.4.3" - "@babel/plugin-transform-property-literals" "^7.2.0" - "@babel/plugin-transform-regenerator" "^7.4.3" - "@babel/plugin-transform-reserved-words" "^7.2.0" - "@babel/plugin-transform-shorthand-properties" "^7.2.0" - "@babel/plugin-transform-spread" "^7.2.0" - "@babel/plugin-transform-sticky-regex" "^7.2.0" - "@babel/plugin-transform-template-literals" "^7.2.0" - "@babel/plugin-transform-typeof-symbol" "^7.2.0" - "@babel/plugin-transform-unicode-regex" "^7.4.3" - "@babel/types" "^7.4.0" - browserslist "^4.5.2" - core-js-compat "^3.0.0" - invariant "^2.2.2" - js-levenshtein "^1.1.3" - semver "^5.5.0" - -"@babel/preset-env@^7.1.6": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.5.5.tgz#bc470b53acaa48df4b8db24a570d6da1fef53c9a" - integrity sha512-GMZQka/+INwsMz1A5UEql8tG015h5j/qjptpKY2gJ7giy8ohzU710YciJB5rcKsWGWHiW3RUnHib0E5/m3Tp3A== - dependencies: - "@babel/helper-module-imports" "^7.0.0" - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-proposal-async-generator-functions" "^7.2.0" - "@babel/plugin-proposal-dynamic-import" "^7.5.0" - "@babel/plugin-proposal-json-strings" "^7.2.0" - "@babel/plugin-proposal-object-rest-spread" "^7.5.5" - "@babel/plugin-proposal-optional-catch-binding" "^7.2.0" - "@babel/plugin-proposal-unicode-property-regex" "^7.4.4" - "@babel/plugin-syntax-async-generators" "^7.2.0" - "@babel/plugin-syntax-dynamic-import" "^7.2.0" - "@babel/plugin-syntax-json-strings" "^7.2.0" - "@babel/plugin-syntax-object-rest-spread" "^7.2.0" - "@babel/plugin-syntax-optional-catch-binding" "^7.2.0" - "@babel/plugin-transform-arrow-functions" "^7.2.0" - "@babel/plugin-transform-async-to-generator" "^7.5.0" - "@babel/plugin-transform-block-scoped-functions" "^7.2.0" - "@babel/plugin-transform-block-scoping" "^7.5.5" - "@babel/plugin-transform-classes" "^7.5.5" - "@babel/plugin-transform-computed-properties" "^7.2.0" - "@babel/plugin-transform-destructuring" "^7.5.0" - "@babel/plugin-transform-dotall-regex" "^7.4.4" - "@babel/plugin-transform-duplicate-keys" "^7.5.0" - "@babel/plugin-transform-exponentiation-operator" "^7.2.0" - "@babel/plugin-transform-for-of" "^7.4.4" - "@babel/plugin-transform-function-name" "^7.4.4" - "@babel/plugin-transform-literals" "^7.2.0" - "@babel/plugin-transform-member-expression-literals" "^7.2.0" - "@babel/plugin-transform-modules-amd" "^7.5.0" - "@babel/plugin-transform-modules-commonjs" "^7.5.0" - "@babel/plugin-transform-modules-systemjs" "^7.5.0" - "@babel/plugin-transform-modules-umd" "^7.2.0" - "@babel/plugin-transform-named-capturing-groups-regex" "^7.4.5" - "@babel/plugin-transform-new-target" "^7.4.4" - "@babel/plugin-transform-object-super" "^7.5.5" - "@babel/plugin-transform-parameters" "^7.4.4" - "@babel/plugin-transform-property-literals" "^7.2.0" - "@babel/plugin-transform-regenerator" "^7.4.5" - "@babel/plugin-transform-reserved-words" "^7.2.0" - "@babel/plugin-transform-shorthand-properties" "^7.2.0" - "@babel/plugin-transform-spread" "^7.2.0" - "@babel/plugin-transform-sticky-regex" "^7.2.0" - "@babel/plugin-transform-template-literals" "^7.4.4" - "@babel/plugin-transform-typeof-symbol" "^7.2.0" - "@babel/plugin-transform-unicode-regex" "^7.4.4" - "@babel/types" "^7.5.5" - browserslist "^4.6.0" - core-js-compat "^3.1.1" - invariant "^2.2.2" - js-levenshtein "^1.1.3" - semver "^5.5.0" - -"@babel/preset-react@7.0.0", "@babel/preset-react@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.0.0.tgz#e86b4b3d99433c7b3e9e91747e2653958bc6b3c0" - integrity sha512-oayxyPS4Zj+hF6Et11BwuBkmpgT/zMxyuZgFrMeZID6Hdh3dGlk4sHCAhdBCpuCKW2ppBfl2uCCetlrUIJRY3w== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-transform-react-display-name" "^7.0.0" - "@babel/plugin-transform-react-jsx" "^7.0.0" - "@babel/plugin-transform-react-jsx-self" "^7.0.0" - "@babel/plugin-transform-react-jsx-source" "^7.0.0" - -"@babel/preset-typescript@7.3.3": - version "7.3.3" - resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.3.3.tgz#88669911053fa16b2b276ea2ede2ca603b3f307a" - integrity sha512-mzMVuIP4lqtn4du2ynEfdO0+RYcslwrZiJHXu4MGaC1ctJiW2fyaeDrtjJGs7R/KebZ1sgowcIoWf4uRpEfKEg== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - "@babel/plugin-transform-typescript" "^7.3.2" - -"@babel/runtime@7.4.3": - version "7.4.3" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.4.3.tgz#79888e452034223ad9609187a0ad1fe0d2ad4bdc" - integrity sha512-9lsJwJLxDh/T3Q3SZszfWOTkk3pHbkmH+3KY+zwIDmsNlxsumuhS2TH3NIpktU4kNvfzy+k3eLT7aTJSPTo0OA== - dependencies: - regenerator-runtime "^0.13.2" - -"@babel/runtime@^7.0.0", "@babel/runtime@^7.1.2", "@babel/runtime@^7.3.4", "@babel/runtime@^7.4.0", "@babel/runtime@^7.4.2": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.5.5.tgz#74fba56d35efbeca444091c7850ccd494fd2f132" - integrity sha512-28QvEGyQyNkB0/m2B4FU7IEZGK2NUrcMtT6BZEFALTguLk+AUT6ofsHtPk5QyjAdUkpMJ+/Em+quwz4HOt30AQ== - dependencies: - regenerator-runtime "^0.13.2" - -"@babel/template@^7.1.0", "@babel/template@^7.4.0", "@babel/template@^7.4.4": - version "7.4.4" - resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.4.4.tgz#f4b88d1225689a08f5bc3a17483545be9e4ed237" - integrity sha512-CiGzLN9KgAvgZsnivND7rkA+AeJ9JB0ciPOD4U59GKbQP2iQl+olF1l76kJOupqidozfZ32ghwBEJDhnk9MEcw== - dependencies: - "@babel/code-frame" "^7.0.0" - "@babel/parser" "^7.4.4" - "@babel/types" "^7.4.4" - -"@babel/traverse@^7.0.0", "@babel/traverse@^7.1.0", "@babel/traverse@^7.4.3", "@babel/traverse@^7.4.4", "@babel/traverse@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.5.5.tgz#f664f8f368ed32988cd648da9f72d5ca70f165bb" - integrity sha512-MqB0782whsfffYfSjH4TM+LMjrJnhCNEDMDIjeTpl+ASaUvxcjoiVCo/sM1GhS1pHOXYfWVCYneLjMckuUxDaQ== - dependencies: - "@babel/code-frame" "^7.5.5" - "@babel/generator" "^7.5.5" - "@babel/helper-function-name" "^7.1.0" - "@babel/helper-split-export-declaration" "^7.4.4" - "@babel/parser" "^7.5.5" - "@babel/types" "^7.5.5" - debug "^4.1.0" - globals "^11.1.0" - lodash "^4.17.13" - -"@babel/types@^7.0.0", "@babel/types@^7.2.0", "@babel/types@^7.3.0", "@babel/types@^7.4.0", "@babel/types@^7.4.4", "@babel/types@^7.5.5": - version "7.5.5" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.5.5.tgz#97b9f728e182785909aa4ab56264f090a028d18a" - integrity sha512-s63F9nJioLqOlW3UkyMd+BYhXt44YuaFm/VV0VwuteqjYwRrObkU7ra9pY4wAJR3oXi8hJrMcrcJdO/HH33vtw== - dependencies: - esutils "^2.0.2" - lodash "^4.17.13" - to-fast-properties "^2.0.0" - -"@cnakazawa/watch@^1.0.3": - version "1.0.3" - resolved "https://registry.yarnpkg.com/@cnakazawa/watch/-/watch-1.0.3.tgz#099139eaec7ebf07a27c1786a3ff64f39464d2ef" - integrity sha512-r5160ogAvGyHsal38Kux7YYtodEKOj89RGb28ht1jh3SJb08VwRwAKKJL0bGb04Zd/3r9FL3BFIc3bBidYffCA== - dependencies: - exec-sh "^0.3.2" - minimist "^1.2.0" - -"@csstools/convert-colors@^1.4.0": - version "1.4.0" - resolved "https://registry.yarnpkg.com/@csstools/convert-colors/-/convert-colors-1.4.0.tgz#ad495dc41b12e75d588c6db8b9834f08fa131eb7" - integrity sha512-5a6wqoJV/xEdbRNKVo6I4hO3VjyDq//8q2f9I6PBAvMesJHFauXDorcNCsr9RzvsZnaWi5NYCcfyqP1QeFHFbw== - -"@csstools/normalize.css@^9.0.1": - version "9.0.1" - resolved "https://registry.yarnpkg.com/@csstools/normalize.css/-/normalize.css-9.0.1.tgz#c27b391d8457d1e893f1eddeaf5e5412d12ffbb5" - integrity sha512-6It2EVfGskxZCQhuykrfnALg7oVeiI6KclWSmGDqB0AiInVrTGB9Jp9i4/Ad21u9Jde/voVQz6eFX/eSg/UsPA== - -"@hapi/address@2.x.x": - version "2.0.0" - resolved "https://registry.yarnpkg.com/@hapi/address/-/address-2.0.0.tgz#9f05469c88cb2fd3dcd624776b54ee95c312126a" - integrity sha512-mV6T0IYqb0xL1UALPFplXYQmR0twnXG0M6jUswpquqT2sD12BOiCiLy3EvMp/Fy7s3DZElC4/aPjEjo2jeZpvw== - -"@hapi/bourne@1.x.x": - version "1.3.2" - resolved "https://registry.yarnpkg.com/@hapi/bourne/-/bourne-1.3.2.tgz#0a7095adea067243ce3283e1b56b8a8f453b242a" - integrity sha512-1dVNHT76Uu5N3eJNTYcvxee+jzX4Z9lfciqRRHCU27ihbUcYi+iSc2iml5Ke1LXe1SyJCLA0+14Jh4tXJgOppA== - -"@hapi/hoek@8.x.x": - version "8.2.1" - resolved "https://registry.yarnpkg.com/@hapi/hoek/-/hoek-8.2.1.tgz#924af04cbb22e17359c620d2a9c946e63f58eb77" - integrity sha512-JPiBy+oSmsq3St7XlipfN5pNA6bDJ1kpa73PrK/zR29CVClDVqy04AanM/M/qx5bSF+I61DdCfAvRrujau+zRg== - -"@hapi/joi@^15.0.0": - version "15.1.1" - resolved "https://registry.yarnpkg.com/@hapi/joi/-/joi-15.1.1.tgz#c675b8a71296f02833f8d6d243b34c57b8ce19d7" - integrity sha512-entf8ZMOK8sc+8YfeOlM8pCfg3b5+WZIKBfUaaJT8UsjAAPjartzxIYm3TIbjvA4u+u++KbcXD38k682nVHDAQ== - dependencies: - "@hapi/address" "2.x.x" - "@hapi/bourne" "1.x.x" - "@hapi/hoek" "8.x.x" - "@hapi/topo" "3.x.x" - -"@hapi/topo@3.x.x": - version "3.1.3" - resolved "https://registry.yarnpkg.com/@hapi/topo/-/topo-3.1.3.tgz#c7a02e0d936596d29f184e6d7fdc07e8b5efce11" - integrity sha512-JmS9/vQK6dcUYn7wc2YZTqzIKubAQcJKu2KCKAru6es482U5RT5fP1EXCPtlXpiK7PR0On/kpQKI4fRKkzpZBQ== - dependencies: - "@hapi/hoek" "8.x.x" - -"@jest/console@^24.7.1", "@jest/console@^24.9.0": - version "24.9.0" - resolved "https://registry.yarnpkg.com/@jest/console/-/console-24.9.0.tgz#79b1bc06fb74a8cfb01cbdedf945584b1b9707f0" - integrity sha512-Zuj6b8TnKXi3q4ymac8EQfc3ea/uhLeCGThFqXeC8H9/raaH8ARPUTdId+XyGd03Z4In0/VjD2OYFcBF09fNLQ== - dependencies: - "@jest/source-map" "^24.9.0" - chalk "^2.0.1" - slash "^2.0.0" - -"@jest/core@^24.9.0": - version "24.9.0" - resolved "https://registry.yarnpkg.com/@jest/core/-/core-24.9.0.tgz#2ceccd0b93181f9c4850e74f2a9ad43d351369c4" - integrity sha512-Fogg3s4wlAr1VX7q+rhV9RVnUv5tD7VuWfYy1+whMiWUrvl7U3QJSJyWcDio9Lq2prqYsZaeTv2Rz24pWGkJ2A== - dependencies: - "@jest/console" "^24.7.1" - "@jest/reporters" "^24.9.0" - "@jest/test-result" "^24.9.0" - "@jest/transform" "^24.9.0" - "@jest/types" "^24.9.0" - ansi-escapes "^3.0.0" - chalk "^2.0.1" - exit "^0.1.2" - graceful-fs "^4.1.15" - jest-changed-files "^24.9.0" - jest-config "^24.9.0" - jest-haste-map "^24.9.0" - jest-message-util "^24.9.0" - jest-regex-util "^24.3.0" - jest-resolve "^24.9.0" - jest-resolve-dependencies "^24.9.0" - jest-runner "^24.9.0" - jest-runtime "^24.9.0" - jest-snapshot "^24.9.0" - jest-util "^24.9.0" - jest-validate "^24.9.0" - jest-watcher "^24.9.0" - micromatch "^3.1.10" - p-each-series "^1.0.0" - realpath-native "^1.1.0" - rimraf "^2.5.4" - slash "^2.0.0" - strip-ansi "^5.0.0" - -"@jest/environment@^24.9.0": - version "24.9.0" - resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-24.9.0.tgz#21e3afa2d65c0586cbd6cbefe208bafade44ab18" - integrity sha512-5A1QluTPhvdIPFYnO3sZC3smkNeXPVELz7ikPbhUj0bQjB07EoE9qtLrem14ZUYWdVayYbsjVwIiL4WBIMV4aQ== - dependencies: - "@jest/fake-timers" "^24.9.0" - "@jest/transform" "^24.9.0" - "@jest/types" "^24.9.0" - jest-mock "^24.9.0" - -"@jest/fake-timers@^24.9.0": - version "24.9.0" - resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-24.9.0.tgz#ba3e6bf0eecd09a636049896434d306636540c93" - integrity sha512-eWQcNa2YSwzXWIMC5KufBh3oWRIijrQFROsIqt6v/NS9Io/gknw1jsAC9c+ih/RQX4A3O7SeWAhQeN0goKhT9A== - dependencies: - "@jest/types" "^24.9.0" - jest-message-util "^24.9.0" - jest-mock "^24.9.0" - -"@jest/reporters@^24.9.0": - version "24.9.0" - resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-24.9.0.tgz#86660eff8e2b9661d042a8e98a028b8d631a5b43" - integrity sha512-mu4X0yjaHrffOsWmVLzitKmmmWSQ3GGuefgNscUSWNiUNcEOSEQk9k3pERKEQVBb0Cnn88+UESIsZEMH3o88Gw== - dependencies: - "@jest/environment" "^24.9.0" - "@jest/test-result" "^24.9.0" - "@jest/transform" "^24.9.0" - "@jest/types" "^24.9.0" - chalk "^2.0.1" - exit "^0.1.2" - glob "^7.1.2" - istanbul-lib-coverage "^2.0.2" - istanbul-lib-instrument "^3.0.1" - istanbul-lib-report "^2.0.4" - istanbul-lib-source-maps "^3.0.1" - istanbul-reports "^2.2.6" - jest-haste-map "^24.9.0" - jest-resolve "^24.9.0" - jest-runtime "^24.9.0" - jest-util "^24.9.0" - jest-worker "^24.6.0" - node-notifier "^5.4.2" - slash "^2.0.0" - source-map "^0.6.0" - string-length "^2.0.0" - -"@jest/source-map@^24.3.0", "@jest/source-map@^24.9.0": - version "24.9.0" - resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-24.9.0.tgz#0e263a94430be4b41da683ccc1e6bffe2a191714" - integrity sha512-/Xw7xGlsZb4MJzNDgB7PW5crou5JqWiBQaz6xyPd3ArOg2nfn/PunV8+olXbbEZzNl591o5rWKE9BRDaFAuIBg== - dependencies: - callsites "^3.0.0" - graceful-fs "^4.1.15" - source-map "^0.6.0" - -"@jest/test-result@^24.9.0": - version "24.9.0" - resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-24.9.0.tgz#11796e8aa9dbf88ea025757b3152595ad06ba0ca" - integrity sha512-XEFrHbBonBJ8dGp2JmF8kP/nQI/ImPpygKHwQ/SY+es59Z3L5PI4Qb9TQQMAEeYsThG1xF0k6tmG0tIKATNiiA== - dependencies: - "@jest/console" "^24.9.0" - "@jest/types" "^24.9.0" - "@types/istanbul-lib-coverage" "^2.0.0" - -"@jest/test-sequencer@^24.9.0": - version "24.9.0" - resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-24.9.0.tgz#f8f334f35b625a4f2f355f2fe7e6036dad2e6b31" - integrity sha512-6qqsU4o0kW1dvA95qfNog8v8gkRN9ph6Lz7r96IvZpHdNipP2cBcb07J1Z45mz/VIS01OHJ3pY8T5fUY38tg4A== - dependencies: - "@jest/test-result" "^24.9.0" - jest-haste-map "^24.9.0" - jest-runner "^24.9.0" - jest-runtime "^24.9.0" - -"@jest/transform@^24.7.1", "@jest/transform@^24.9.0": - version "24.9.0" - resolved "https://registry.yarnpkg.com/@jest/transform/-/transform-24.9.0.tgz#4ae2768b296553fadab09e9ec119543c90b16c56" - integrity sha512-TcQUmyNRxV94S0QpMOnZl0++6RMiqpbH/ZMccFB/amku6Uwvyb1cjYX7xkp5nGNkbX4QPH/FcB6q1HBTHynLmQ== - dependencies: - "@babel/core" "^7.1.0" - "@jest/types" "^24.9.0" - babel-plugin-istanbul "^5.1.0" - chalk "^2.0.1" - convert-source-map "^1.4.0" - fast-json-stable-stringify "^2.0.0" - graceful-fs "^4.1.15" - jest-haste-map "^24.9.0" - jest-regex-util "^24.9.0" - jest-util "^24.9.0" - micromatch "^3.1.10" - pirates "^4.0.1" - realpath-native "^1.1.0" - slash "^2.0.0" - source-map "^0.6.1" - write-file-atomic "2.4.1" - -"@jest/types@^24.7.0", "@jest/types@^24.9.0": - version "24.9.0" - resolved "https://registry.yarnpkg.com/@jest/types/-/types-24.9.0.tgz#63cb26cb7500d069e5a389441a7c6ab5e909fc59" - integrity sha512-XKK7ze1apu5JWQ5eZjHITP66AX+QsLlbaJRBGYr8pNzwcAE2JVkwnf0yqjHTsDRcjR0mujy/NmZMXw5kl+kGBw== - dependencies: - "@types/istanbul-lib-coverage" "^2.0.0" - "@types/istanbul-reports" "^1.1.1" - "@types/yargs" "^13.0.0" - -"@mrmlnc/readdir-enhanced@^2.2.1": - version "2.2.1" - resolved "https://registry.yarnpkg.com/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz#524af240d1a360527b730475ecfa1344aa540dde" - integrity sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g== - dependencies: - call-me-maybe "^1.0.1" - glob-to-regexp "^0.3.0" - -"@nodelib/fs.stat@^1.1.2": - version "1.1.3" - resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-1.1.3.tgz#2b5a3ab3f918cca48a8c754c08168e3f03eba61b" - integrity sha512-shAmDyaQC4H92APFoIaVDHCx5bStIocgvbwQyxPRrbUY20V1EYTbSDchWbuwlMG3V17cprZhA6+78JfB+3DTPw== - -"@svgr/babel-plugin-add-jsx-attribute@^4.2.0": - version "4.2.0" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-4.2.0.tgz#dadcb6218503532d6884b210e7f3c502caaa44b1" - integrity sha512-j7KnilGyZzYr/jhcrSYS3FGWMZVaqyCG0vzMCwzvei0coIkczuYMcniK07nI0aHJINciujjH11T72ICW5eL5Ig== - -"@svgr/babel-plugin-remove-jsx-attribute@^4.2.0": - version "4.2.0" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-4.2.0.tgz#297550b9a8c0c7337bea12bdfc8a80bb66f85abc" - integrity sha512-3XHLtJ+HbRCH4n28S7y/yZoEQnRpl0tvTZQsHqvaeNXPra+6vE5tbRliH3ox1yZYPCxrlqaJT/Mg+75GpDKlvQ== - -"@svgr/babel-plugin-remove-jsx-empty-expression@^4.2.0": - version "4.2.0" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-4.2.0.tgz#c196302f3e68eab6a05e98af9ca8570bc13131c7" - integrity sha512-yTr2iLdf6oEuUE9MsRdvt0NmdpMBAkgK8Bjhl6epb+eQWk6abBaX3d65UZ3E3FWaOwePyUgNyNCMVG61gGCQ7w== - -"@svgr/babel-plugin-replace-jsx-attribute-value@^4.2.0": - version "4.2.0" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-4.2.0.tgz#310ec0775de808a6a2e4fd4268c245fd734c1165" - integrity sha512-U9m870Kqm0ko8beHawRXLGLvSi/ZMrl89gJ5BNcT452fAjtF2p4uRzXkdzvGJJJYBgx7BmqlDjBN/eCp5AAX2w== - -"@svgr/babel-plugin-svg-dynamic-title@^4.3.1": - version "4.3.1" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-4.3.1.tgz#646c2f5b5770c2fe318d6e51492344c3d62ddb63" - integrity sha512-p6z6JJroP989jHWcuraeWpzdejehTmLUpyC9smhTBWyPN0VVGe2phbYxpPTV7Vh8XzmFrcG55idrnfWn/2oQEw== - -"@svgr/babel-plugin-svg-em-dimensions@^4.2.0": - version "4.2.0" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-4.2.0.tgz#9a94791c9a288108d20a9d2cc64cac820f141391" - integrity sha512-C0Uy+BHolCHGOZ8Dnr1zXy/KgpBOkEUYY9kI/HseHVPeMbluaX3CijJr7D4C5uR8zrc1T64nnq/k63ydQuGt4w== - -"@svgr/babel-plugin-transform-react-native-svg@^4.2.0": - version "4.2.0" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-4.2.0.tgz#151487322843359a1ca86b21a3815fd21a88b717" - integrity sha512-7YvynOpZDpCOUoIVlaaOUU87J4Z6RdD6spYN4eUb5tfPoKGSF9OG2NuhgYnq4jSkAxcpMaXWPf1cePkzmqTPNw== - -"@svgr/babel-plugin-transform-svg-component@^4.2.0": - version "4.2.0" - resolved "https://registry.yarnpkg.com/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-4.2.0.tgz#5f1e2f886b2c85c67e76da42f0f6be1b1767b697" - integrity sha512-hYfYuZhQPCBVotABsXKSCfel2slf/yvJY8heTVX1PCTaq/IgASq1IyxPPKJ0chWREEKewIU/JMSsIGBtK1KKxw== - -"@svgr/babel-preset@^4.3.1": - version "4.3.1" - resolved "https://registry.yarnpkg.com/@svgr/babel-preset/-/babel-preset-4.3.1.tgz#62ffcb85d756580e8ce608e9d2ac3b9063be9e28" - integrity sha512-rPFKLmyhlh6oeBv3j2vEAj2nd2QbWqpoJLKzBLjwQVt+d9aeXajVaPNEqrES2spjXKR4OxfgSs7U0NtmAEkr0Q== - dependencies: - "@svgr/babel-plugin-add-jsx-attribute" "^4.2.0" - "@svgr/babel-plugin-remove-jsx-attribute" "^4.2.0" - "@svgr/babel-plugin-remove-jsx-empty-expression" "^4.2.0" - "@svgr/babel-plugin-replace-jsx-attribute-value" "^4.2.0" - "@svgr/babel-plugin-svg-dynamic-title" "^4.3.1" - "@svgr/babel-plugin-svg-em-dimensions" "^4.2.0" - "@svgr/babel-plugin-transform-react-native-svg" "^4.2.0" - "@svgr/babel-plugin-transform-svg-component" "^4.2.0" - -"@svgr/core@^4.1.0": - version "4.3.2" - resolved "https://registry.yarnpkg.com/@svgr/core/-/core-4.3.2.tgz#939c89be670ad79b762f4c063f213f0e02535f2e" - integrity sha512-N+tP5CLFd1hP9RpO83QJPZY3NL8AtrdqNbuhRgBkjE/49RnMrrRsFm1wY8pueUfAGvzn6tSXUq29o6ah8RuR5w== - dependencies: - "@svgr/plugin-jsx" "^4.3.2" - camelcase "^5.3.1" - cosmiconfig "^5.2.1" - -"@svgr/hast-util-to-babel-ast@^4.3.2": - version "4.3.2" - resolved "https://registry.yarnpkg.com/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-4.3.2.tgz#1d5a082f7b929ef8f1f578950238f630e14532b8" - integrity sha512-JioXclZGhFIDL3ddn4Kiq8qEqYM2PyDKV0aYno8+IXTLuYt6TOgHUbUAAFvqtb0Xn37NwP0BTHglejFoYr8RZg== - dependencies: - "@babel/types" "^7.4.4" - -"@svgr/plugin-jsx@^4.1.0", "@svgr/plugin-jsx@^4.3.2": - version "4.3.2" - resolved "https://registry.yarnpkg.com/@svgr/plugin-jsx/-/plugin-jsx-4.3.2.tgz#ce9ddafc8cdd74da884c9f7af014afcf37f93d3c" - integrity sha512-+1GW32RvmNmCsOkMoclA/TppNjHPLMnNZG3/Ecscxawp051XJ2MkO09Hn11VcotdC2EPrDfT8pELGRo+kbZ1Eg== - dependencies: - "@babel/core" "^7.4.5" - "@svgr/babel-preset" "^4.3.1" - "@svgr/hast-util-to-babel-ast" "^4.3.2" - svg-parser "^2.0.0" - -"@svgr/plugin-svgo@^4.0.3": - version "4.3.1" - resolved "https://registry.yarnpkg.com/@svgr/plugin-svgo/-/plugin-svgo-4.3.1.tgz#daac0a3d872e3f55935c6588dd370336865e9e32" - integrity sha512-PrMtEDUWjX3Ea65JsVCwTIXuSqa3CG9px+DluF1/eo9mlDrgrtFE7NE/DjdhjJgSM9wenlVBzkzneSIUgfUI/w== - dependencies: - cosmiconfig "^5.2.1" - merge-deep "^3.0.2" - svgo "^1.2.2" - -"@svgr/webpack@4.1.0": - version "4.1.0" - resolved "https://registry.yarnpkg.com/@svgr/webpack/-/webpack-4.1.0.tgz#20c88f32f731c7b1d4711045b2b993887d731c28" - integrity sha512-d09ehQWqLMywP/PT/5JvXwPskPK9QCXUjiSkAHehreB381qExXf5JFCBWhfEyNonRbkIneCeYM99w+Ud48YIQQ== - dependencies: - "@babel/core" "^7.1.6" - "@babel/plugin-transform-react-constant-elements" "^7.0.0" - "@babel/preset-env" "^7.1.6" - "@babel/preset-react" "^7.0.0" - "@svgr/core" "^4.1.0" - "@svgr/plugin-jsx" "^4.1.0" - "@svgr/plugin-svgo" "^4.0.3" - loader-utils "^1.1.0" - -"@types/babel__core@^7.1.0": - version "7.1.2" - resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.1.2.tgz#608c74f55928033fce18b99b213c16be4b3d114f" - integrity sha512-cfCCrFmiGY/yq0NuKNxIQvZFy9kY/1immpSpTngOnyIbD4+eJOG5mxphhHDv3CHL9GltO4GcKr54kGBg3RNdbg== - dependencies: - "@babel/parser" "^7.1.0" - "@babel/types" "^7.0.0" - "@types/babel__generator" "*" - "@types/babel__template" "*" - "@types/babel__traverse" "*" - -"@types/babel__generator@*": - version "7.0.2" - resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.0.2.tgz#d2112a6b21fad600d7674274293c85dce0cb47fc" - integrity sha512-NHcOfab3Zw4q5sEE2COkpfXjoE7o+PmqD9DQW4koUT3roNxwziUdXGnRndMat/LJNUtePwn1TlP4do3uoe3KZQ== - dependencies: - "@babel/types" "^7.0.0" - -"@types/babel__template@*": - version "7.0.2" - resolved "https://registry.yarnpkg.com/@types/babel__template/-/babel__template-7.0.2.tgz#4ff63d6b52eddac1de7b975a5223ed32ecea9307" - integrity sha512-/K6zCpeW7Imzgab2bLkLEbz0+1JlFSrUMdw7KoIIu+IUdu51GWaBZpd3y1VXGVXzynvGa4DaIaxNZHiON3GXUg== - dependencies: - "@babel/parser" "^7.1.0" - "@babel/types" "^7.0.0" - -"@types/babel__traverse@*", "@types/babel__traverse@^7.0.6": - version "7.0.7" - resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.0.7.tgz#2496e9ff56196cc1429c72034e07eab6121b6f3f" - integrity sha512-CeBpmX1J8kWLcDEnI3Cl2Eo6RfbGvzUctA+CjZUhOKDFbLfcr7fc4usEqLNWetrlJd7RhAkyYe2czXop4fICpw== - dependencies: - "@babel/types" "^7.3.0" - -"@types/history@*": - version "4.7.3" - resolved "https://registry.yarnpkg.com/@types/history/-/history-4.7.3.tgz#856c99cdc1551d22c22b18b5402719affec9839a" - integrity sha512-cS5owqtwzLN5kY+l+KgKdRJ/Cee8tlmQoGQuIE9tWnSmS3JMKzmxo2HIAk2wODMifGwO20d62xZQLYz+RLfXmw== - -"@types/istanbul-lib-coverage@*", "@types/istanbul-lib-coverage@^2.0.0": - version "2.0.1" - resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.1.tgz#42995b446db9a48a11a07ec083499a860e9138ff" - integrity sha512-hRJD2ahnnpLgsj6KWMYSrmXkM3rm2Dl1qkx6IOFD5FnuNPXJIG5L0dhgKXCYTRMGzU4n0wImQ/xfmRc4POUFlg== - -"@types/istanbul-lib-report@*": - version "1.1.1" - resolved "https://registry.yarnpkg.com/@types/istanbul-lib-report/-/istanbul-lib-report-1.1.1.tgz#e5471e7fa33c61358dd38426189c037a58433b8c" - integrity sha512-3BUTyMzbZa2DtDI2BkERNC6jJw2Mr2Y0oGI7mRxYNBPxppbtEK1F66u3bKwU2g+wxwWI7PAoRpJnOY1grJqzHg== - dependencies: - "@types/istanbul-lib-coverage" "*" - -"@types/istanbul-reports@^1.1.1": - version "1.1.1" - resolved "https://registry.yarnpkg.com/@types/istanbul-reports/-/istanbul-reports-1.1.1.tgz#7a8cbf6a406f36c8add871625b278eaf0b0d255a" - integrity sha512-UpYjBi8xefVChsCoBpKShdxTllC9pwISirfoZsUa2AAdQg/Jd2KQGtSbw+ya7GPo7x/wAPlH6JBhKhAsXUEZNA== - dependencies: - "@types/istanbul-lib-coverage" "*" - "@types/istanbul-lib-report" "*" - -"@types/jest-diff@*": - version "20.0.1" - resolved "https://registry.yarnpkg.com/@types/jest-diff/-/jest-diff-20.0.1.tgz#35cc15b9c4f30a18ef21852e255fdb02f6d59b89" - integrity sha512-yALhelO3i0hqZwhjtcr6dYyaLoCHbAMshwtj6cGxTvHZAKXHsYGdff6E8EPw3xLKY0ELUTQ69Q1rQiJENnccMA== - -"@types/jest@24.0.12": - version "24.0.12" - resolved "https://registry.yarnpkg.com/@types/jest/-/jest-24.0.12.tgz#0553dd0a5ac744e7dc4e8700da6d3baedbde3e8f" - integrity sha512-60sjqMhat7i7XntZckcSGV8iREJyXXI6yFHZkSZvCPUeOnEJ/VP1rU/WpEWQ56mvoh8NhC+sfKAuJRTyGtCOow== - dependencies: - "@types/jest-diff" "*" - -"@types/node@11.13.9": - version "11.13.9" - resolved "https://registry.yarnpkg.com/@types/node/-/node-11.13.9.tgz#f80697caca7f7fb2526527a5c5a2743487f05ccc" - integrity sha512-NJ4yuEVw5podZbINp3tEqUIImMSAEHaCXRiWCf3KC32l6hIKf0iPJEh2uZdT0fELfRYk310yLmMXqy2leZQUbg== - -"@types/prop-types@*": - version "15.7.1" - resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.1.tgz#f1a11e7babb0c3cad68100be381d1e064c68f1f6" - integrity sha512-CFzn9idOEpHrgdw8JsoTkaDDyRWk1jrzIV8djzcgpq0y9tG4B4lFT+Nxh52DVpDXV+n4+NPNv7M1Dj5uMp6XFg== - -"@types/q@^1.5.1": - version "1.5.2" - resolved "https://registry.yarnpkg.com/@types/q/-/q-1.5.2.tgz#690a1475b84f2a884fd07cd797c00f5f31356ea8" - integrity sha512-ce5d3q03Ex0sy4R14722Rmt6MT07Ua+k4FwDfdcToYJcMKNtRVQvJ6JCAPdAmAnbRb6CsX6aYb9m96NGod9uTw== - -"@types/react-dom@16.8.4": - version "16.8.4" - resolved "https://registry.yarnpkg.com/@types/react-dom/-/react-dom-16.8.4.tgz#7fb7ba368857c7aa0f4e4511c4710ca2c5a12a88" - integrity sha512-eIRpEW73DCzPIMaNBDP5pPIpK1KXyZwNgfxiVagb5iGiz6da+9A5hslSX6GAQKdO7SayVCS/Fr2kjqprgAvkfA== - dependencies: - "@types/react" "*" - -"@types/react-router-dom@^4.3.3": - version "4.3.5" - resolved "https://registry.yarnpkg.com/@types/react-router-dom/-/react-router-dom-4.3.5.tgz#72f229967690c890d00f96e6b85e9ee5780db31f" - integrity sha512-eFajSUASYbPHg2BDM1G8Btx+YqGgvROPIg6sBhl3O4kbDdYXdFdfrgQFf/pcBuQVObjfT9AL/dd15jilR5DIEA== - dependencies: - "@types/history" "*" - "@types/react" "*" - "@types/react-router" "*" - -"@types/react-router@*": - version "5.0.3" - resolved "https://registry.yarnpkg.com/@types/react-router/-/react-router-5.0.3.tgz#855a1606e62de3f4d69ea34fb3c0e50e98e964d5" - integrity sha512-j2Gge5cvxca+5lK9wxovmGPgpVJMwjyu5lTA/Cd6fLGoPq7FXcUE1jFkEdxeyqGGz8VfHYSHCn5Lcn24BzaNKA== - dependencies: - "@types/history" "*" - "@types/react" "*" - -"@types/react-slick@^0.23.4": - version "0.23.4" - resolved "https://registry.yarnpkg.com/@types/react-slick/-/react-slick-0.23.4.tgz#c97e2a9e7e3d1933c68593b8e82752fab1e8ce53" - integrity sha512-vXoIy4GUfB7/YgqubR4H7RALo+pRdMYCeLgWwV3MPwl5pggTlEkFBTF19R7u+LJc85uMqC7RfsbkqPLMQ4ab+A== - dependencies: - "@types/react" "*" - -"@types/react@*": - version "16.9.2" - resolved "https://registry.yarnpkg.com/@types/react/-/react-16.9.2.tgz#6d1765431a1ad1877979013906731aae373de268" - integrity sha512-jYP2LWwlh+FTqGd9v7ynUKZzjj98T8x7Yclz479QdRhHfuW9yQ+0jjnD31eXSXutmBpppj5PYNLYLRfnZJvcfg== - dependencies: - "@types/prop-types" "*" - csstype "^2.2.0" - -"@types/react@16.8.15": - version "16.8.15" - resolved "https://registry.yarnpkg.com/@types/react/-/react-16.8.15.tgz#a76515fed5aa3e996603056f54427fec5f2a5122" - integrity sha512-dMhzw1rWK+wwJWvPp5Pk12ksSrm/z/C/+lOQbMZ7YfDQYnJ02bc0wtg4EJD9qrFhuxFrf/ywNgwTboucobJqQg== - dependencies: - "@types/prop-types" "*" - csstype "^2.2.0" - -"@types/stack-utils@^1.0.1": - version "1.0.1" - resolved "https://registry.yarnpkg.com/@types/stack-utils/-/stack-utils-1.0.1.tgz#0a851d3bd96498fa25c33ab7278ed3bd65f06c3e" - integrity sha512-l42BggppR6zLmpfU6fq9HEa2oGPEI8yrSPL3GITjfRInppYFahObbIQOQK3UGxEnyQpltZLaPe75046NOZQikw== - -"@types/yargs-parser@*": - version "13.0.0" - resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-13.0.0.tgz#453743c5bbf9f1bed61d959baab5b06be029b2d0" - integrity sha512-wBlsw+8n21e6eTd4yVv8YD/E3xq0O6nNnJIquutAsFGE7EyMKz7W6RNT6BRu1SmdgmlCZ9tb0X+j+D6HGr8pZw== - -"@types/yargs@^13.0.0": - version "13.0.2" - resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-13.0.2.tgz#a64674fc0149574ecd90ba746e932b5a5f7b3653" - integrity sha512-lwwgizwk/bIIU+3ELORkyuOgDjCh7zuWDFqRtPPhhVgq9N1F7CvLNKg1TX4f2duwtKQ0p044Au9r1PLIXHrIzQ== - dependencies: - "@types/yargs-parser" "*" - -"@typescript-eslint/eslint-plugin@1.6.0": - version "1.6.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-1.6.0.tgz#a5ff3128c692393fb16efa403ec7c8a5593dab0f" - integrity sha512-U224c29E2lo861TQZs6GSmyC0OYeRNg6bE9UVIiFBxN2MlA0nq2dCrgIVyyRbC05UOcrgf2Wk/CF2gGOPQKUSQ== - dependencies: - "@typescript-eslint/parser" "1.6.0" - "@typescript-eslint/typescript-estree" "1.6.0" - requireindex "^1.2.0" - tsutils "^3.7.0" - -"@typescript-eslint/parser@1.6.0": - version "1.6.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-1.6.0.tgz#f01189c8b90848e3b8e45a6cdad27870529d1804" - integrity sha512-VB9xmSbfafI+/kI4gUK3PfrkGmrJQfh0N4EScT1gZXSZyUxpsBirPL99EWZg9MmPG0pzq/gMtgkk7/rAHj4aQw== - dependencies: - "@typescript-eslint/typescript-estree" "1.6.0" - eslint-scope "^4.0.0" - eslint-visitor-keys "^1.0.0" - -"@typescript-eslint/typescript-estree@1.6.0": - version "1.6.0" - resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-1.6.0.tgz#6cf43a07fee08b8eb52e4513b428c8cdc9751ef0" - integrity sha512-A4CanUwfaG4oXobD5y7EXbsOHjCwn8tj1RDd820etpPAjH+Icjc2K9e/DQM1Hac5zH2BSy+u6bjvvF2wwREvYA== - dependencies: - lodash.unescape "4.0.1" - semver "5.5.0" - -"@webassemblyjs/ast@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.8.5.tgz#51b1c5fe6576a34953bf4b253df9f0d490d9e359" - integrity sha512-aJMfngIZ65+t71C3y2nBBg5FFG0Okt9m0XEgWZ7Ywgn1oMAT8cNwx00Uv1cQyHtidq0Xn94R4TAywO+LCQ+ZAQ== - dependencies: - "@webassemblyjs/helper-module-context" "1.8.5" - "@webassemblyjs/helper-wasm-bytecode" "1.8.5" - "@webassemblyjs/wast-parser" "1.8.5" - -"@webassemblyjs/floating-point-hex-parser@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.8.5.tgz#1ba926a2923613edce496fd5b02e8ce8a5f49721" - integrity sha512-9p+79WHru1oqBh9ewP9zW95E3XAo+90oth7S5Re3eQnECGq59ly1Ri5tsIipKGpiStHsUYmY3zMLqtk3gTcOtQ== - -"@webassemblyjs/helper-api-error@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.8.5.tgz#c49dad22f645227c5edb610bdb9697f1aab721f7" - integrity sha512-Za/tnzsvnqdaSPOUXHyKJ2XI7PDX64kWtURyGiJJZKVEdFOsdKUCPTNEVFZq3zJ2R0G5wc2PZ5gvdTRFgm81zA== - -"@webassemblyjs/helper-buffer@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.8.5.tgz#fea93e429863dd5e4338555f42292385a653f204" - integrity sha512-Ri2R8nOS0U6G49Q86goFIPNgjyl6+oE1abW1pS84BuhP1Qcr5JqMwRFT3Ah3ADDDYGEgGs1iyb1DGX+kAi/c/Q== - -"@webassemblyjs/helper-code-frame@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.8.5.tgz#9a740ff48e3faa3022b1dff54423df9aa293c25e" - integrity sha512-VQAadSubZIhNpH46IR3yWO4kZZjMxN1opDrzePLdVKAZ+DFjkGD/rf4v1jap744uPVU6yjL/smZbRIIJTOUnKQ== - dependencies: - "@webassemblyjs/wast-printer" "1.8.5" - -"@webassemblyjs/helper-fsm@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-fsm/-/helper-fsm-1.8.5.tgz#ba0b7d3b3f7e4733da6059c9332275d860702452" - integrity sha512-kRuX/saORcg8se/ft6Q2UbRpZwP4y7YrWsLXPbbmtepKr22i8Z4O3V5QE9DbZK908dh5Xya4Un57SDIKwB9eow== - -"@webassemblyjs/helper-module-context@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-module-context/-/helper-module-context-1.8.5.tgz#def4b9927b0101dc8cbbd8d1edb5b7b9c82eb245" - integrity sha512-/O1B236mN7UNEU4t9X7Pj38i4VoU8CcMHyy3l2cV/kIF4U5KoHXDVqcDuOs1ltkac90IM4vZdHc52t1x8Yfs3g== - dependencies: - "@webassemblyjs/ast" "1.8.5" - mamacro "^0.0.3" - -"@webassemblyjs/helper-wasm-bytecode@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.8.5.tgz#537a750eddf5c1e932f3744206551c91c1b93e61" - integrity sha512-Cu4YMYG3Ddl72CbmpjU/wbP6SACcOPVbHN1dI4VJNJVgFwaKf1ppeFJrwydOG3NDHxVGuCfPlLZNyEdIYlQ6QQ== - -"@webassemblyjs/helper-wasm-section@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.8.5.tgz#74ca6a6bcbe19e50a3b6b462847e69503e6bfcbf" - integrity sha512-VV083zwR+VTrIWWtgIUpqfvVdK4ff38loRmrdDBgBT8ADXYsEZ5mPQ4Nde90N3UYatHdYoDIFb7oHzMncI02tA== - dependencies: - "@webassemblyjs/ast" "1.8.5" - "@webassemblyjs/helper-buffer" "1.8.5" - "@webassemblyjs/helper-wasm-bytecode" "1.8.5" - "@webassemblyjs/wasm-gen" "1.8.5" - -"@webassemblyjs/ieee754@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/ieee754/-/ieee754-1.8.5.tgz#712329dbef240f36bf57bd2f7b8fb9bf4154421e" - integrity sha512-aaCvQYrvKbY/n6wKHb/ylAJr27GglahUO89CcGXMItrOBqRarUMxWLJgxm9PJNuKULwN5n1csT9bYoMeZOGF3g== - dependencies: - "@xtuc/ieee754" "^1.2.0" - -"@webassemblyjs/leb128@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/leb128/-/leb128-1.8.5.tgz#044edeb34ea679f3e04cd4fd9824d5e35767ae10" - integrity sha512-plYUuUwleLIziknvlP8VpTgO4kqNaH57Y3JnNa6DLpu/sGcP6hbVdfdX5aHAV716pQBKrfuU26BJK29qY37J7A== - dependencies: - "@xtuc/long" "4.2.2" - -"@webassemblyjs/utf8@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/utf8/-/utf8-1.8.5.tgz#a8bf3b5d8ffe986c7c1e373ccbdc2a0915f0cedc" - integrity sha512-U7zgftmQriw37tfD934UNInokz6yTmn29inT2cAetAsaU9YeVCveWEwhKL1Mg4yS7q//NGdzy79nlXh3bT8Kjw== - -"@webassemblyjs/wasm-edit@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.8.5.tgz#962da12aa5acc1c131c81c4232991c82ce56e01a" - integrity sha512-A41EMy8MWw5yvqj7MQzkDjU29K7UJq1VrX2vWLzfpRHt3ISftOXqrtojn7nlPsZ9Ijhp5NwuODuycSvfAO/26Q== - dependencies: - "@webassemblyjs/ast" "1.8.5" - "@webassemblyjs/helper-buffer" "1.8.5" - "@webassemblyjs/helper-wasm-bytecode" "1.8.5" - "@webassemblyjs/helper-wasm-section" "1.8.5" - "@webassemblyjs/wasm-gen" "1.8.5" - "@webassemblyjs/wasm-opt" "1.8.5" - "@webassemblyjs/wasm-parser" "1.8.5" - "@webassemblyjs/wast-printer" "1.8.5" - -"@webassemblyjs/wasm-gen@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.8.5.tgz#54840766c2c1002eb64ed1abe720aded714f98bc" - integrity sha512-BCZBT0LURC0CXDzj5FXSc2FPTsxwp3nWcqXQdOZE4U7h7i8FqtFK5Egia6f9raQLpEKT1VL7zr4r3+QX6zArWg== - dependencies: - "@webassemblyjs/ast" "1.8.5" - "@webassemblyjs/helper-wasm-bytecode" "1.8.5" - "@webassemblyjs/ieee754" "1.8.5" - "@webassemblyjs/leb128" "1.8.5" - "@webassemblyjs/utf8" "1.8.5" - -"@webassemblyjs/wasm-opt@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.8.5.tgz#b24d9f6ba50394af1349f510afa8ffcb8a63d264" - integrity sha512-HKo2mO/Uh9A6ojzu7cjslGaHaUU14LdLbGEKqTR7PBKwT6LdPtLLh9fPY33rmr5wcOMrsWDbbdCHq4hQUdd37Q== - dependencies: - "@webassemblyjs/ast" "1.8.5" - "@webassemblyjs/helper-buffer" "1.8.5" - "@webassemblyjs/wasm-gen" "1.8.5" - "@webassemblyjs/wasm-parser" "1.8.5" - -"@webassemblyjs/wasm-parser@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.8.5.tgz#21576f0ec88b91427357b8536383668ef7c66b8d" - integrity sha512-pi0SYE9T6tfcMkthwcgCpL0cM9nRYr6/6fjgDtL6q/ZqKHdMWvxitRi5JcZ7RI4SNJJYnYNaWy5UUrHQy998lw== - dependencies: - "@webassemblyjs/ast" "1.8.5" - "@webassemblyjs/helper-api-error" "1.8.5" - "@webassemblyjs/helper-wasm-bytecode" "1.8.5" - "@webassemblyjs/ieee754" "1.8.5" - "@webassemblyjs/leb128" "1.8.5" - "@webassemblyjs/utf8" "1.8.5" - -"@webassemblyjs/wast-parser@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-parser/-/wast-parser-1.8.5.tgz#e10eecd542d0e7bd394f6827c49f3df6d4eefb8c" - integrity sha512-daXC1FyKWHF1i11obK086QRlsMsY4+tIOKgBqI1lxAnkp9xe9YMcgOxm9kLe+ttjs5aWV2KKE1TWJCN57/Btsg== - dependencies: - "@webassemblyjs/ast" "1.8.5" - "@webassemblyjs/floating-point-hex-parser" "1.8.5" - "@webassemblyjs/helper-api-error" "1.8.5" - "@webassemblyjs/helper-code-frame" "1.8.5" - "@webassemblyjs/helper-fsm" "1.8.5" - "@xtuc/long" "4.2.2" - -"@webassemblyjs/wast-printer@1.8.5": - version "1.8.5" - resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-printer/-/wast-printer-1.8.5.tgz#114bbc481fd10ca0e23b3560fa812748b0bae5bc" - integrity sha512-w0U0pD4EhlnvRyeJzBqaVSJAo9w/ce7/WPogeXLzGkO6hzhr4GnQIZ4W4uUt5b9ooAaXPtnXlj0gzsXEOUNYMg== - dependencies: - "@webassemblyjs/ast" "1.8.5" - "@webassemblyjs/wast-parser" "1.8.5" - "@xtuc/long" "4.2.2" - -"@xtuc/ieee754@^1.2.0": - version "1.2.0" - resolved "https://registry.yarnpkg.com/@xtuc/ieee754/-/ieee754-1.2.0.tgz#eef014a3145ae477a1cbc00cd1e552336dceb790" - integrity sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA== - -"@xtuc/long@4.2.2": - version "4.2.2" - resolved "https://registry.yarnpkg.com/@xtuc/long/-/long-4.2.2.tgz#d291c6a4e97989b5c61d9acf396ae4fe133a718d" - integrity sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ== - -abab@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/abab/-/abab-2.0.0.tgz#aba0ab4c5eee2d4c79d3487d85450fb2376ebb0f" - integrity sha512-sY5AXXVZv4Y1VACTtR11UJCPHHudgY5i26Qj5TypE6DKlIApbwb5uqhXcJ5UUGbvZNRh7EeIoW+LrJumBsKp7w== - -abbrev@1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" - integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q== - -accepts@~1.3.4, accepts@~1.3.5, accepts@~1.3.7: - version "1.3.7" - resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.7.tgz#531bc726517a3b2b41f850021c6cc15eaab507cd" - integrity sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA== - dependencies: - mime-types "~2.1.24" - negotiator "0.6.2" - -acorn-dynamic-import@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/acorn-dynamic-import/-/acorn-dynamic-import-4.0.0.tgz#482210140582a36b83c3e342e1cfebcaa9240948" - integrity sha512-d3OEjQV4ROpoflsnUA8HozoIR504TFxNivYEUi6uwz0IYhBkTDXGuWlNdMtybRt3nqVx/L6XqMt0FxkXuWKZhw== - -acorn-globals@^4.1.0, acorn-globals@^4.3.0: - version "4.3.3" - resolved "https://registry.yarnpkg.com/acorn-globals/-/acorn-globals-4.3.3.tgz#a86f75b69680b8780d30edd21eee4e0ea170c05e" - integrity sha512-vkR40VwS2SYO98AIeFvzWWh+xyc2qi9s7OoXSFEGIP/rOJKzjnhykaZJNnHdoq4BL2gGxI5EZOU16z896EYnOQ== - dependencies: - acorn "^6.0.1" - acorn-walk "^6.0.1" - -acorn-jsx@^5.0.0: - version "5.0.2" - resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.0.2.tgz#84b68ea44b373c4f8686023a551f61a21b7c4a4f" - integrity sha512-tiNTrP1MP0QrChmD2DdupCr6HWSFeKVw5d/dHTu4Y7rkAkRhU/Dt7dphAfIUyxtHpl/eBVip5uTNSpQJHylpAw== - -acorn-walk@^6.0.1: - version "6.2.0" - resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-6.2.0.tgz#123cb8f3b84c2171f1f7fb252615b1c78a6b1a8c" - integrity sha512-7evsyfH1cLOCdAzZAd43Cic04yKydNx0cF+7tiA19p1XnLLPU4dpCQOqpjqwokFe//vS0QqfqqjCS2JkiIs0cA== - -acorn@^5.5.3: - version "5.7.3" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-5.7.3.tgz#67aa231bf8812974b85235a96771eb6bd07ea279" - integrity sha512-T/zvzYRfbVojPWahDsE5evJdHb3oJoQfFbsrKM7w5Zcs++Tr257tia3BmMP8XYVjp1S9RZXQMh7gao96BlqZOw== - -acorn@^6.0.1, acorn@^6.0.4, acorn@^6.0.5, acorn@^6.0.7: - version "6.3.0" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-6.3.0.tgz#0087509119ffa4fc0a0041d1e93a417e68cb856e" - integrity sha512-/czfa8BwS88b9gWQVhc8eknunSA2DoJpJyTQkhheIf5E48u1N0R4q/YxxsAeqRrmK9TQ/uYfgLDfZo91UlANIA== - -add-dom-event-listener@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/add-dom-event-listener/-/add-dom-event-listener-1.1.0.tgz#6a92db3a0dd0abc254e095c0f1dc14acbbaae310" - integrity sha512-WCxx1ixHT0GQU9hb0KI/mhgRQhnU+U3GvwY6ZvVjYq8rsihIGoaIOUbY0yMPBxLH5MDtr0kz3fisWGNcbWW7Jw== - dependencies: - object-assign "4.x" - -address@1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/address/-/address-1.1.0.tgz#ef8e047847fcd2c5b6f50c16965f924fd99fe709" - integrity sha512-4diPfzWbLEIElVG4AnqP+00SULlPzNuyJFNnmMrLgyaxG6tZXJ1sn7mjBu4fHrJE+Yp/jgylOweJn2xsLMFggQ== - -address@^1.0.1: - version "1.1.2" - resolved "https://registry.yarnpkg.com/address/-/address-1.1.2.tgz#bf1116c9c758c51b7a933d296b72c221ed9428b6" - integrity sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA== - -ajv-errors@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/ajv-errors/-/ajv-errors-1.0.1.tgz#f35986aceb91afadec4102fbd85014950cefa64d" - integrity sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ== - -ajv-keywords@^3.1.0: - version "3.4.1" - resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-3.4.1.tgz#ef916e271c64ac12171fd8384eaae6b2345854da" - integrity sha512-RO1ibKvd27e6FEShVFfPALuHI3WjSVNeK5FIsmme/LYRNxjKuNj+Dt7bucLa6NdSv3JcVTyMlm9kGR84z1XpaQ== - -ajv@^6.1.0, ajv@^6.10.2, ajv@^6.5.5, ajv@^6.9.1: - version "6.10.2" - resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.10.2.tgz#d3cea04d6b017b2894ad69040fec8b623eb4bd52" - integrity sha512-TXtUUEYHuaTEbLZWIKUr5pmBuhDLy+8KYtPYdcV8qC+pOZL+NKqYwvWSRrVXHn+ZmRRAu8vJTAznH7Oag6RVRw== - dependencies: - fast-deep-equal "^2.0.1" - fast-json-stable-stringify "^2.0.0" - json-schema-traverse "^0.4.1" - uri-js "^4.2.2" - -alphanum-sort@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/alphanum-sort/-/alphanum-sort-1.0.2.tgz#97a1119649b211ad33691d9f9f486a8ec9fbe0a3" - integrity sha1-l6ERlkmyEa0zaR2fn0hqjsn74KM= - -ansi-colors@^3.0.0: - version "3.2.4" - resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-3.2.4.tgz#e3a3da4bfbae6c86a9c285625de124a234026fbf" - integrity sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA== - -ansi-escapes@^3.0.0, ansi-escapes@^3.2.0: - version "3.2.0" - resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-3.2.0.tgz#8780b98ff9dbf5638152d1f1fe5c1d7b4442976b" - integrity sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ== - -ansi-html@0.0.7: - version "0.0.7" - resolved "https://registry.yarnpkg.com/ansi-html/-/ansi-html-0.0.7.tgz#813584021962a9e9e6fd039f940d12f56ca7859e" - integrity sha1-gTWEAhliqenm/QOflA0S9WynhZ4= - -ansi-regex@^2.0.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" - integrity sha1-w7M6te42DYbg5ijwRorn7yfWVN8= - -ansi-regex@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-3.0.0.tgz#ed0317c322064f79466c02966bddb605ab37d998" - integrity sha1-7QMXwyIGT3lGbAKWa922Bas32Zg= - -ansi-regex@^4.0.0, ansi-regex@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-4.1.0.tgz#8b9f8f08cf1acb843756a839ca8c7e3168c51997" - integrity sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg== - -ansi-styles@^2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" - integrity sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4= - -ansi-styles@^3.2.0, ansi-styles@^3.2.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" - integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== - dependencies: - color-convert "^1.9.0" - -antd@^3.16.6: - version "3.22.2" - resolved "https://registry.yarnpkg.com/antd/-/antd-3.22.2.tgz#ae8279d940decf062a0708880bc4340783b7b270" - integrity sha512-IZz8yMUfi3qvVj0Y/zFMc759yOxzR0ihuCdrCh+3f/MgPVBrKR+u02/eJrDJBiSczrm4Qzb46NaNGQUR0GkxbA== - dependencies: - "@ant-design/create-react-context" "^0.2.4" - "@ant-design/icons" "~2.1.1" - "@ant-design/icons-react" "~2.0.1" - "@types/react-slick" "^0.23.4" - array-tree-filter "^2.1.0" - babel-runtime "6.x" - classnames "~2.2.6" - copy-to-clipboard "^3.2.0" - css-animation "^1.5.0" - dom-closest "^0.2.0" - enquire.js "^2.1.6" - lodash "^4.17.13" - moment "^2.24.0" - omit.js "^1.0.2" - prop-types "^15.7.2" - raf "^3.4.1" - rc-animate "^2.8.3" - rc-calendar "~9.15.5" - rc-cascader "~0.17.4" - rc-checkbox "~2.1.6" - rc-collapse "~1.11.3" - rc-dialog "~7.5.2" - rc-drawer "~2.0.1" - rc-dropdown "~2.4.1" - rc-editor-mention "^1.1.13" - rc-form "^2.4.5" - rc-input-number "~4.4.5" - rc-mentions "~0.4.0" - rc-menu "~7.4.23" - rc-notification "~3.3.1" - rc-pagination "~1.20.5" - rc-progress "~2.5.0" - rc-rate "~2.5.0" - rc-select "~9.2.0" - rc-slider "~8.6.11" - rc-steps "~3.5.0" - rc-switch "~1.9.0" - rc-table "~6.7.0" - rc-tabs "~9.6.4" - rc-time-picker "~3.7.1" - rc-tooltip "~3.7.3" - rc-tree "~2.1.0" - rc-tree-select "~2.9.1" - rc-trigger "^2.6.2" - rc-upload "~2.7.0" - rc-util "^4.10.0" - react-lazy-load "^3.0.13" - react-lifecycles-compat "^3.0.4" - react-slick "~0.25.2" - resize-observer-polyfill "^1.5.1" - shallowequal "^1.1.0" - warning "~4.0.3" - -anymatch@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-2.0.0.tgz#bcb24b4f37934d9aa7ac17b4adaf89e7c76ef2eb" - integrity sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw== - dependencies: - micromatch "^3.1.4" - normalize-path "^2.1.1" - -aproba@^1.0.3, aproba@^1.1.1: - version "1.2.0" - resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a" - integrity sha512-Y9J6ZjXtoYh8RnXVCMOU/ttDmk1aBjunq9vO0ta5x85WDQiQfUF9sIPBITdbiiIVcBo03Hi3jMxigBtsddlXRw== - -are-we-there-yet@~1.1.2: - version "1.1.5" - resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.5.tgz#4b35c2944f062a8bfcda66410760350fe9ddfc21" - integrity sha512-5hYdAkZlcG8tOLujVDTgCT+uPX0VnpAH28gWsLfzpXYm7wP6mp5Q/gYyR7YQ0cKVJcXJnl3j2kpBan13PtQf6w== - dependencies: - delegates "^1.0.0" - readable-stream "^2.0.6" - -argparse@^1.0.7: - version "1.0.10" - resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" - integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== - dependencies: - sprintf-js "~1.0.2" - -aria-query@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/aria-query/-/aria-query-3.0.0.tgz#65b3fcc1ca1155a8c9ae64d6eee297f15d5133cc" - integrity sha1-ZbP8wcoRVajJrmTW7uKX8V1RM8w= - dependencies: - ast-types-flow "0.0.7" - commander "^2.11.0" - -arr-diff@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-4.0.0.tgz#d6461074febfec71e7e15235761a329a5dc7c520" - integrity sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA= - -arr-flatten@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.1.0.tgz#36048bbff4e7b47e136644316c99669ea5ae91f1" - integrity sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg== - -arr-union@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/arr-union/-/arr-union-3.1.0.tgz#e39b09aea9def866a8f206e288af63919bae39c4" - integrity sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ= - -array-equal@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93" - integrity sha1-jCpe8kcv2ep0KwTHenUJO6J1fJM= - -array-filter@~0.0.0: - version "0.0.1" - resolved "https://registry.yarnpkg.com/array-filter/-/array-filter-0.0.1.tgz#7da8cf2e26628ed732803581fd21f67cacd2eeec" - integrity sha1-fajPLiZijtcygDWB/SH2fKzS7uw= - -array-flatten@1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" - integrity sha1-ml9pkFGx5wczKPKgCJaLZOopVdI= - -array-flatten@^2.1.0: - version "2.1.2" - resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-2.1.2.tgz#24ef80a28c1a893617e2149b0c6d0d788293b099" - integrity sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ== - -array-includes@^3.0.3: - version "3.0.3" - resolved "https://registry.yarnpkg.com/array-includes/-/array-includes-3.0.3.tgz#184b48f62d92d7452bb31b323165c7f8bd02266d" - integrity sha1-GEtI9i2S10UrsxsyMWXH+L0CJm0= - dependencies: - define-properties "^1.1.2" - es-abstract "^1.7.0" - -array-map@~0.0.0: - version "0.0.0" - resolved "https://registry.yarnpkg.com/array-map/-/array-map-0.0.0.tgz#88a2bab73d1cf7bcd5c1b118a003f66f665fa662" - integrity sha1-iKK6tz0c97zVwbEYoAP2b2ZfpmI= - -array-reduce@~0.0.0: - version "0.0.0" - resolved "https://registry.yarnpkg.com/array-reduce/-/array-reduce-0.0.0.tgz#173899d3ffd1c7d9383e4479525dbe278cab5f2b" - integrity sha1-FziZ0//Rx9k4PkR5Ul2+J4yrXys= - -array-tree-filter@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/array-tree-filter/-/array-tree-filter-2.1.0.tgz#873ac00fec83749f255ac8dd083814b4f6329190" - integrity sha512-4ROwICNlNw/Hqa9v+rk5h22KjmzB1JGTMVKP2AKJBOCgb0yL0ASf0+YvCcLNNwquOHNX48jkeZIJ3a+oOQqKcw== - -array-union@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39" - integrity sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk= - dependencies: - array-uniq "^1.0.1" - -array-uniq@^1.0.1: - version "1.0.3" - resolved "https://registry.yarnpkg.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" - integrity sha1-r2rId6Jcx/dOBYiUdThY39sk/bY= - -array-unique@^0.3.2: - version "0.3.2" - resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.3.2.tgz#a894b75d4bc4f6cd679ef3244a9fd8f46ae2d428" - integrity sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg= - -arrify@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" - integrity sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0= - -asap@~2.0.3, asap@~2.0.6: - version "2.0.6" - resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46" - integrity sha1-5QNHYR1+aQlDIIu9r+vLwvuGbUY= - -asn1.js@^4.0.0: - version "4.10.1" - resolved "https://registry.yarnpkg.com/asn1.js/-/asn1.js-4.10.1.tgz#b9c2bf5805f1e64aadeed6df3a2bfafb5a73f5a0" - integrity sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw== - dependencies: - bn.js "^4.0.0" - inherits "^2.0.1" - minimalistic-assert "^1.0.0" - -asn1@~0.2.3: - version "0.2.4" - resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.4.tgz#8d2475dfab553bb33e77b54e59e880bb8ce23136" - integrity sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg== - dependencies: - safer-buffer "~2.1.0" - -assert-plus@1.0.0, assert-plus@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525" - integrity sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU= - -assert@^1.1.1: - version "1.5.0" - resolved "https://registry.yarnpkg.com/assert/-/assert-1.5.0.tgz#55c109aaf6e0aefdb3dc4b71240c70bf574b18eb" - integrity sha512-EDsgawzwoun2CZkCgtxJbv392v4nbk9XDD06zI+kQYoBM/3RBWLlEyJARDOmhAAosBjWACEkKL6S+lIZtcAubA== - dependencies: - object-assign "^4.1.1" - util "0.10.3" - -assign-symbols@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/assign-symbols/-/assign-symbols-1.0.0.tgz#59667f41fadd4f20ccbc2bb96b8d4f7f78ec0367" - integrity sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c= - -ast-types-flow@0.0.7, ast-types-flow@^0.0.7: - version "0.0.7" - resolved "https://registry.yarnpkg.com/ast-types-flow/-/ast-types-flow-0.0.7.tgz#f70b735c6bca1a5c9c22d982c3e39e7feba3bdad" - integrity sha1-9wtzXGvKGlycItmCw+Oef+ujva0= - -astral-regex@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/astral-regex/-/astral-regex-1.0.0.tgz#6c8c3fb827dd43ee3918f27b82782ab7658a6fd9" - integrity sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg== - -async-each@^1.0.1: - version "1.0.3" - resolved "https://registry.yarnpkg.com/async-each/-/async-each-1.0.3.tgz#b727dbf87d7651602f06f4d4ac387f47d91b0cbf" - integrity sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ== - -async-limiter@~1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.1.tgz#dd379e94f0db8310b08291f9d64c3209766617fd" - integrity sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ== - -async-validator@~1.11.3: - version "1.11.5" - resolved "https://registry.yarnpkg.com/async-validator/-/async-validator-1.11.5.tgz#9d43cf49ef6bb76be5442388d19fb9a6e47597ea" - integrity sha512-XNtCsMAeAH1pdLMEg1z8/Bb3a8cdCbui9QbJATRFHHHW5kT6+NPI3zSVQUXgikTFITzsg+kYY5NTWhM2Orwt9w== - -async@^1.5.2: - version "1.5.2" - resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a" - integrity sha1-7GphrlZIDAw8skHJVhjiCJL5Zyo= - -asynckit@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" - integrity sha1-x57Zf380y48robyXkLzDZkdLS3k= - -atob@^2.1.1: - version "2.1.2" - resolved "https://registry.yarnpkg.com/atob/-/atob-2.1.2.tgz#6d9517eb9e030d2436666651e86bd9f6f13533c9" - integrity sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg== - -autoprefixer@^9.4.9: - version "9.6.1" - resolved "https://registry.yarnpkg.com/autoprefixer/-/autoprefixer-9.6.1.tgz#51967a02d2d2300bb01866c1611ec8348d355a47" - integrity sha512-aVo5WxR3VyvyJxcJC3h4FKfwCQvQWb1tSI5VHNibddCVWrcD1NvlxEweg3TSgiPztMnWfjpy2FURKA2kvDE+Tw== - dependencies: - browserslist "^4.6.3" - caniuse-lite "^1.0.30000980" - chalk "^2.4.2" - normalize-range "^0.1.2" - num2fraction "^1.2.2" - postcss "^7.0.17" - postcss-value-parser "^4.0.0" - -aws-sign2@~0.7.0: - version "0.7.0" - resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" - integrity sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg= - -aws4@^1.8.0: - version "1.8.0" - resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.8.0.tgz#f0e003d9ca9e7f59c7a508945d7b2ef9a04a542f" - integrity sha512-ReZxvNHIOv88FlT7rxcXIIC0fPt4KZqZbOlivyWtXLt8ESx84zd3kMC6iK5jVeS2qt+g7ftS7ye4fi06X5rtRQ== - -axobject-query@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/axobject-query/-/axobject-query-2.0.2.tgz#ea187abe5b9002b377f925d8bf7d1c561adf38f9" - integrity sha512-MCeek8ZH7hKyO1rWUbKNQBbl4l2eY0ntk7OGi+q0RlafrCnfPxC06WZA+uebCfmYp4mNU9jRBP1AhGyf8+W3ww== - dependencies: - ast-types-flow "0.0.7" - -babel-code-frame@^6.22.0: - version "6.26.0" - resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b" - integrity sha1-Y/1D99weO7fONZR9uP42mj9Yx0s= - dependencies: - chalk "^1.1.3" - esutils "^2.0.2" - js-tokens "^3.0.2" - -babel-eslint@10.0.1: - version "10.0.1" - resolved "https://registry.yarnpkg.com/babel-eslint/-/babel-eslint-10.0.1.tgz#919681dc099614cd7d31d45c8908695092a1faed" - integrity sha512-z7OT1iNV+TjOwHNLLyJk+HN+YVWX+CLE6fPD2SymJZOZQBs+QIexFjhm4keGTm8MW9xr4EC9Q0PbaLB24V5GoQ== - dependencies: - "@babel/code-frame" "^7.0.0" - "@babel/parser" "^7.0.0" - "@babel/traverse" "^7.0.0" - "@babel/types" "^7.0.0" - eslint-scope "3.7.1" - eslint-visitor-keys "^1.0.0" - -babel-extract-comments@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/babel-extract-comments/-/babel-extract-comments-1.0.0.tgz#0a2aedf81417ed391b85e18b4614e693a0351a21" - integrity sha512-qWWzi4TlddohA91bFwgt6zO/J0X+io7Qp184Fw0m2JYRSTZnJbFR8+07KmzudHCZgOiKRCrjhylwv9Xd8gfhVQ== - dependencies: - babylon "^6.18.0" - -babel-jest@24.7.1: - version "24.7.1" - resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-24.7.1.tgz#73902c9ff15a7dfbdc9994b0b17fcefd96042178" - integrity sha512-GPnLqfk8Mtt0i4OemjWkChi73A3ALs4w2/QbG64uAj8b5mmwzxc7jbJVRZt8NJkxi6FopVHog9S3xX6UJKb2qg== - dependencies: - "@jest/transform" "^24.7.1" - "@jest/types" "^24.7.0" - "@types/babel__core" "^7.1.0" - babel-plugin-istanbul "^5.1.0" - babel-preset-jest "^24.6.0" - chalk "^2.4.2" - slash "^2.0.0" - -babel-jest@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-24.9.0.tgz#3fc327cb8467b89d14d7bc70e315104a783ccd54" - integrity sha512-ntuddfyiN+EhMw58PTNL1ph4C9rECiQXjI4nMMBKBaNjXvqLdkXpPRcMSr4iyBrJg/+wz9brFUD6RhOAT6r4Iw== - dependencies: - "@jest/transform" "^24.9.0" - "@jest/types" "^24.9.0" - "@types/babel__core" "^7.1.0" - babel-plugin-istanbul "^5.1.0" - babel-preset-jest "^24.9.0" - chalk "^2.4.2" - slash "^2.0.0" - -babel-loader@8.0.5: - version "8.0.5" - resolved "https://registry.yarnpkg.com/babel-loader/-/babel-loader-8.0.5.tgz#225322d7509c2157655840bba52e46b6c2f2fe33" - integrity sha512-NTnHnVRd2JnRqPC0vW+iOQWU5pchDbYXsG2E6DMXEpMfUcQKclF9gmf3G3ZMhzG7IG9ji4coL0cm+FxeWxDpnw== - dependencies: - find-cache-dir "^2.0.0" - loader-utils "^1.0.2" - mkdirp "^0.5.1" - util.promisify "^1.0.0" - -babel-plugin-dynamic-import-node@2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.2.0.tgz#c0adfb07d95f4a4495e9aaac6ec386c4d7c2524e" - integrity sha512-fP899ELUnTaBcIzmrW7nniyqqdYWrWuJUyPWHxFa/c7r7hS6KC8FscNfLlBNIoPSc55kYMGEEKjPjJGCLbE1qA== - dependencies: - object.assign "^4.1.0" - -babel-plugin-dynamic-import-node@^2.3.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.0.tgz#f00f507bdaa3c3e3ff6e7e5e98d90a7acab96f7f" - integrity sha512-o6qFkpeQEBxcqt0XYlWzAVxNCSCZdUgcR8IRlhD/8DylxjjO4foPcvTW0GGKa/cVt3rvxZ7o5ippJ+/0nvLhlQ== - dependencies: - object.assign "^4.1.0" - -babel-plugin-import@^1.11.0: - version "1.12.1" - resolved "https://registry.yarnpkg.com/babel-plugin-import/-/babel-plugin-import-1.12.1.tgz#a63b0a6f8f7484db660c59665185aa3b0c2f9f3f" - integrity sha512-3BwVJFEByTUyqZWOxizr/YwYcqqre2EebmgSUtXyToJbHzJv6rTxA0LApDntvwERlmIvcM6lUktUN0snMTjOsA== - dependencies: - "@babel/helper-module-imports" "^7.0.0" - "@babel/runtime" "^7.0.0" - -babel-plugin-istanbul@^5.1.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/babel-plugin-istanbul/-/babel-plugin-istanbul-5.2.0.tgz#df4ade83d897a92df069c4d9a25cf2671293c854" - integrity sha512-5LphC0USA8t4i1zCtjbbNb6jJj/9+X6P37Qfirc/70EQ34xKlMW+a1RHGwxGI+SwWpNwZ27HqvzAobeqaXwiZw== - dependencies: - "@babel/helper-plugin-utils" "^7.0.0" - find-up "^3.0.0" - istanbul-lib-instrument "^3.3.0" - test-exclude "^5.2.3" - -babel-plugin-jest-hoist@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-24.9.0.tgz#4f837091eb407e01447c8843cbec546d0002d756" - integrity sha512-2EMA2P8Vp7lG0RAzr4HXqtYwacfMErOuv1U3wrvxHX6rD1sV6xS3WXG3r8TRQ2r6w8OhvSdWt+z41hQNwNm3Xw== - dependencies: - "@types/babel__traverse" "^7.0.6" - -babel-plugin-macros@2.5.1: - version "2.5.1" - resolved "https://registry.yarnpkg.com/babel-plugin-macros/-/babel-plugin-macros-2.5.1.tgz#4a119ac2c2e19b458c259b9accd7ee34fd57ec6f" - integrity sha512-xN3KhAxPzsJ6OQTktCanNpIFnnMsCV+t8OloKxIL72D6+SUZYFn9qfklPgef5HyyDtzYZqqb+fs1S12+gQY82Q== - dependencies: - "@babel/runtime" "^7.4.2" - cosmiconfig "^5.2.0" - resolve "^1.10.0" - -babel-plugin-named-asset-import@^0.3.2: - version "0.3.3" - resolved "https://registry.yarnpkg.com/babel-plugin-named-asset-import/-/babel-plugin-named-asset-import-0.3.3.tgz#9ba2f3ac4dc78b042651654f07e847adfe50667c" - integrity sha512-1XDRysF4894BUdMChT+2HHbtJYiO7zx5Be7U6bT8dISy7OdyETMGIAQBMPQCsY1YRf0xcubwnKKaDr5bk15JTA== - -babel-plugin-syntax-object-rest-spread@^6.8.0: - version "6.13.0" - resolved "https://registry.yarnpkg.com/babel-plugin-syntax-object-rest-spread/-/babel-plugin-syntax-object-rest-spread-6.13.0.tgz#fd6536f2bce13836ffa3a5458c4903a597bb3bf5" - integrity sha1-/WU28rzhODb/o6VFjEkDpZe7O/U= - -babel-plugin-transform-object-rest-spread@^6.26.0: - version "6.26.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-object-rest-spread/-/babel-plugin-transform-object-rest-spread-6.26.0.tgz#0f36692d50fef6b7e2d4b3ac1478137a963b7b06" - integrity sha1-DzZpLVD+9rfi1LOsFHgTepY7ewY= - dependencies: - babel-plugin-syntax-object-rest-spread "^6.8.0" - babel-runtime "^6.26.0" - -babel-plugin-transform-react-remove-prop-types@0.4.24: - version "0.4.24" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-react-remove-prop-types/-/babel-plugin-transform-react-remove-prop-types-0.4.24.tgz#f2edaf9b4c6a5fbe5c1d678bfb531078c1555f3a" - integrity sha512-eqj0hVcJUR57/Ug2zE1Yswsw4LhuqqHhD+8v120T1cl3kjg76QwtyBrdIk4WVwK+lAhBJVYCd/v+4nc4y+8JsA== - -babel-preset-jest@^24.6.0, babel-preset-jest@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/babel-preset-jest/-/babel-preset-jest-24.9.0.tgz#192b521e2217fb1d1f67cf73f70c336650ad3cdc" - integrity sha512-izTUuhE4TMfTRPF92fFwD2QfdXaZW08qvWTFCI51V8rW5x00UuPgc3ajRoWofXOuxjfcOM5zzSYsQS3H8KGCAg== - dependencies: - "@babel/plugin-syntax-object-rest-spread" "^7.0.0" - babel-plugin-jest-hoist "^24.9.0" - -babel-preset-react-app@^8.0.0: - version "8.0.0" - resolved "https://registry.yarnpkg.com/babel-preset-react-app/-/babel-preset-react-app-8.0.0.tgz#930b6e28cdcfdff97ddb8bef9226d504f244d326" - integrity sha512-6Dmj7e8l7eWE+R6sKKLRrGEQXMfcBqBYlphaAgT1ml8qT1NEP+CyTZyfjmgKGqHZfwH3RQCUOuP6y4mpGc7tgg== - dependencies: - "@babel/core" "7.4.3" - "@babel/plugin-proposal-class-properties" "7.4.0" - "@babel/plugin-proposal-decorators" "7.4.0" - "@babel/plugin-proposal-object-rest-spread" "7.4.3" - "@babel/plugin-syntax-dynamic-import" "7.2.0" - "@babel/plugin-transform-classes" "7.4.3" - "@babel/plugin-transform-destructuring" "7.4.3" - "@babel/plugin-transform-flow-strip-types" "7.4.0" - "@babel/plugin-transform-react-constant-elements" "7.2.0" - "@babel/plugin-transform-react-display-name" "7.2.0" - "@babel/plugin-transform-runtime" "7.4.3" - "@babel/preset-env" "7.4.3" - "@babel/preset-react" "7.0.0" - "@babel/preset-typescript" "7.3.3" - "@babel/runtime" "7.4.3" - babel-plugin-dynamic-import-node "2.2.0" - babel-plugin-macros "2.5.1" - babel-plugin-transform-react-remove-prop-types "0.4.24" - -babel-runtime@6.x, babel-runtime@^6.23.0, babel-runtime@^6.26.0: - version "6.26.0" - resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.26.0.tgz#965c7058668e82b55d7bfe04ff2337bc8b5647fe" - integrity sha1-llxwWGaOgrVde/4E/yM3vItWR/4= - dependencies: - core-js "^2.4.0" - regenerator-runtime "^0.11.0" - -babylon@^6.18.0: - version "6.18.0" - resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.18.0.tgz#af2f3b88fa6f5c1e4c634d1a0f8eac4f55b395e3" - integrity sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ== - -balanced-match@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" - integrity sha1-ibTRmasr7kneFk6gK4nORi1xt2c= - -base64-js@^1.0.2: - version "1.3.1" - resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.3.1.tgz#58ece8cb75dd07e71ed08c736abc5fac4dbf8df1" - integrity sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g== - -base@^0.11.1: - version "0.11.2" - resolved "https://registry.yarnpkg.com/base/-/base-0.11.2.tgz#7bde5ced145b6d551a90db87f83c558b4eb48a8f" - integrity sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg== - dependencies: - cache-base "^1.0.1" - class-utils "^0.3.5" - component-emitter "^1.2.1" - define-property "^1.0.0" - isobject "^3.0.1" - mixin-deep "^1.2.0" - pascalcase "^0.1.1" - -batch@0.6.1: - version "0.6.1" - resolved "https://registry.yarnpkg.com/batch/-/batch-0.6.1.tgz#dc34314f4e679318093fc760272525f94bf25c16" - integrity sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY= - -bcrypt-pbkdf@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz#a4301d389b6a43f9b67ff3ca11a3f6637e360e9e" - integrity sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4= - dependencies: - tweetnacl "^0.14.3" - -big.js@^5.2.2: - version "5.2.2" - resolved "https://registry.yarnpkg.com/big.js/-/big.js-5.2.2.tgz#65f0af382f578bcdc742bd9c281e9cb2d7768328" - integrity sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ== - -binary-extensions@^1.0.0: - version "1.13.1" - resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-1.13.1.tgz#598afe54755b2868a5330d2aff9d4ebb53209b65" - integrity sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw== - -bluebird@^3.5.5: - version "3.5.5" - resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.5.tgz#a8d0afd73251effbbd5fe384a77d73003c17a71f" - integrity sha512-5am6HnnfN+urzt4yfg7IgTbotDjIT/u8AJpEt0sIU9FtXfVeezXAPKswrG+xKUCOYAINpSdgZVDU6QFh+cuH3w== - -bn.js@^4.0.0, bn.js@^4.1.0, bn.js@^4.1.1, bn.js@^4.4.0: - version "4.11.8" - resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.11.8.tgz#2cde09eb5ee341f484746bb0309b3253b1b1442f" - integrity sha512-ItfYfPLkWHUjckQCk8xC+LwxgK8NYcXywGigJgSwOP8Y2iyWT4f2vsZnoOXTTbo+o5yXmIUJ4gn5538SO5S3gA== - -body-parser@1.19.0: - version "1.19.0" - resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.19.0.tgz#96b2709e57c9c4e09a6fd66a8fd979844f69f08a" - integrity sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw== - dependencies: - bytes "3.1.0" - content-type "~1.0.4" - debug "2.6.9" - depd "~1.1.2" - http-errors "1.7.2" - iconv-lite "0.4.24" - on-finished "~2.3.0" - qs "6.7.0" - raw-body "2.4.0" - type-is "~1.6.17" - -bonjour@^3.5.0: - version "3.5.0" - resolved "https://registry.yarnpkg.com/bonjour/-/bonjour-3.5.0.tgz#8e890a183d8ee9a2393b3844c691a42bcf7bc9f5" - integrity sha1-jokKGD2O6aI5OzhExpGkK897yfU= - dependencies: - array-flatten "^2.1.0" - deep-equal "^1.0.1" - dns-equal "^1.0.0" - dns-txt "^2.0.2" - multicast-dns "^6.0.1" - multicast-dns-service-types "^1.1.0" - -boolbase@^1.0.0, boolbase@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/boolbase/-/boolbase-1.0.0.tgz#68dff5fbe60c51eb37725ea9e3ed310dcc1e776e" - integrity sha1-aN/1++YMUes3cl6p4+0xDcwed24= - -brace-expansion@^1.1.7: - version "1.1.11" - resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" - integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== - dependencies: - balanced-match "^1.0.0" - concat-map "0.0.1" - -braces@^2.3.1, braces@^2.3.2: - version "2.3.2" - resolved "https://registry.yarnpkg.com/braces/-/braces-2.3.2.tgz#5979fd3f14cd531565e5fa2df1abfff1dfaee729" - integrity sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w== - dependencies: - arr-flatten "^1.1.0" - array-unique "^0.3.2" - extend-shallow "^2.0.1" - fill-range "^4.0.0" - isobject "^3.0.1" - repeat-element "^1.1.2" - snapdragon "^0.8.1" - snapdragon-node "^2.0.1" - split-string "^3.0.2" - to-regex "^3.0.1" - -brorand@^1.0.1: - version "1.1.0" - resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f" - integrity sha1-EsJe/kCkXjwyPrhnWgoM5XsiNx8= - -browser-process-hrtime@^0.1.2: - version "0.1.3" - resolved "https://registry.yarnpkg.com/browser-process-hrtime/-/browser-process-hrtime-0.1.3.tgz#616f00faef1df7ec1b5bf9cfe2bdc3170f26c7b4" - integrity sha512-bRFnI4NnjO6cnyLmOV/7PVoDEMJChlcfN0z4s1YMBY989/SvlfMI1lgCnkFUs53e9gQF+w7qu7XdllSTiSl8Aw== - -browser-resolve@^1.11.3: - version "1.11.3" - resolved "https://registry.yarnpkg.com/browser-resolve/-/browser-resolve-1.11.3.tgz#9b7cbb3d0f510e4cb86bdbd796124d28b5890af6" - integrity sha512-exDi1BYWB/6raKHmDTCicQfTkqwN5fioMFV4j8BsfMU4R2DK/QfZfK7kOVkmWCNANf0snkBzqGqAJBao9gZMdQ== - dependencies: - resolve "1.1.7" - -browserify-aes@^1.0.0, browserify-aes@^1.0.4: - version "1.2.0" - resolved "https://registry.yarnpkg.com/browserify-aes/-/browserify-aes-1.2.0.tgz#326734642f403dabc3003209853bb70ad428ef48" - integrity sha512-+7CHXqGuspUn/Sl5aO7Ea0xWGAtETPXNSAjHo48JfLdPWcMng33Xe4znFvQweqc/uzk5zSOI3H52CYnjCfb5hA== - dependencies: - buffer-xor "^1.0.3" - cipher-base "^1.0.0" - create-hash "^1.1.0" - evp_bytestokey "^1.0.3" - inherits "^2.0.1" - safe-buffer "^5.0.1" - -browserify-cipher@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/browserify-cipher/-/browserify-cipher-1.0.1.tgz#8d6474c1b870bfdabcd3bcfcc1934a10e94f15f0" - integrity sha512-sPhkz0ARKbf4rRQt2hTpAHqn47X3llLkUGn+xEJzLjwY8LRs2p0v7ljvI5EyoRO/mexrNunNECisZs+gw2zz1w== - dependencies: - browserify-aes "^1.0.4" - browserify-des "^1.0.0" - evp_bytestokey "^1.0.0" - -browserify-des@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/browserify-des/-/browserify-des-1.0.2.tgz#3af4f1f59839403572f1c66204375f7a7f703e9c" - integrity sha512-BioO1xf3hFwz4kc6iBhI3ieDFompMhrMlnDFC4/0/vd5MokpuAc3R+LYbwTA9A5Yc9pq9UYPqffKpW2ObuwX5A== - dependencies: - cipher-base "^1.0.1" - des.js "^1.0.0" - inherits "^2.0.1" - safe-buffer "^5.1.2" - -browserify-rsa@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/browserify-rsa/-/browserify-rsa-4.0.1.tgz#21e0abfaf6f2029cf2fafb133567a701d4135524" - integrity sha1-IeCr+vbyApzy+vsTNWenAdQTVSQ= - dependencies: - bn.js "^4.1.0" - randombytes "^2.0.1" - -browserify-sign@^4.0.0: - version "4.0.4" - resolved "https://registry.yarnpkg.com/browserify-sign/-/browserify-sign-4.0.4.tgz#aa4eb68e5d7b658baa6bf6a57e630cbd7a93d298" - integrity sha1-qk62jl17ZYuqa/alfmMMvXqT0pg= - dependencies: - bn.js "^4.1.1" - browserify-rsa "^4.0.0" - create-hash "^1.1.0" - create-hmac "^1.1.2" - elliptic "^6.0.0" - inherits "^2.0.1" - parse-asn1 "^5.0.0" - -browserify-zlib@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/browserify-zlib/-/browserify-zlib-0.2.0.tgz#2869459d9aa3be245fe8fe2ca1f46e2e7f54d73f" - integrity sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA== - dependencies: - pako "~1.0.5" - -browserslist@4.6.6, browserslist@^4.0.0, browserslist@^4.1.1, browserslist@^4.4.2, browserslist@^4.5.2, browserslist@^4.6.0, browserslist@^4.6.3, browserslist@^4.6.6: - version "4.6.6" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.6.6.tgz#6e4bf467cde520bc9dbdf3747dafa03531cec453" - integrity sha512-D2Nk3W9JL9Fp/gIcWei8LrERCS+eXu9AM5cfXA8WEZ84lFks+ARnZ0q/R69m2SV3Wjma83QDDPxsNKXUwdIsyA== - dependencies: - caniuse-lite "^1.0.30000984" - electron-to-chromium "^1.3.191" - node-releases "^1.1.25" - -bser@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/bser/-/bser-2.1.0.tgz#65fc784bf7f87c009b973c12db6546902fa9c7b5" - integrity sha512-8zsjWrQkkBoLK6uxASk1nJ2SKv97ltiGDo6A3wA0/yRPz+CwmEyDo0hUrhIuukG2JHpAl3bvFIixw2/3Hi0DOg== - dependencies: - node-int64 "^0.4.0" - -buffer-from@^1.0.0: - version "1.1.1" - resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef" - integrity sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A== - -buffer-indexof@^1.0.0: - version "1.1.1" - resolved "https://registry.yarnpkg.com/buffer-indexof/-/buffer-indexof-1.1.1.tgz#52fabcc6a606d1a00302802648ef68f639da268c" - integrity sha512-4/rOEg86jivtPTeOUUT61jJO1Ya1TrR/OkqCSZDyq84WJh3LuuiphBYJN+fm5xufIk4XAFcEwte/8WzC8If/1g== - -buffer-xor@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/buffer-xor/-/buffer-xor-1.0.3.tgz#26e61ed1422fb70dd42e6e36729ed51d855fe8d9" - integrity sha1-JuYe0UIvtw3ULm42cp7VHYVf6Nk= - -buffer@^4.3.0: - version "4.9.1" - resolved "https://registry.yarnpkg.com/buffer/-/buffer-4.9.1.tgz#6d1bb601b07a4efced97094132093027c95bc298" - integrity sha1-bRu2AbB6TvztlwlBMgkwJ8lbwpg= - dependencies: - base64-js "^1.0.2" - ieee754 "^1.1.4" - isarray "^1.0.0" - -builtin-status-codes@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/builtin-status-codes/-/builtin-status-codes-3.0.0.tgz#85982878e21b98e1c66425e03d0174788f569ee8" - integrity sha1-hZgoeOIbmOHGZCXgPQF0eI9Wnug= - -bytes@3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.0.0.tgz#d32815404d689699f85a4ea4fa8755dd13a96048" - integrity sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg= - -bytes@3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.0.tgz#f6cf7933a360e0588fa9fde85651cdc7f805d1f6" - integrity sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg== - -cacache@^11.0.2: - version "11.3.3" - resolved "https://registry.yarnpkg.com/cacache/-/cacache-11.3.3.tgz#8bd29df8c6a718a6ebd2d010da4d7972ae3bbadc" - integrity sha512-p8WcneCytvzPxhDvYp31PD039vi77I12W+/KfR9S8AZbaiARFBCpsPJS+9uhWfeBfeAtW7o/4vt3MUqLkbY6nA== - dependencies: - bluebird "^3.5.5" - chownr "^1.1.1" - figgy-pudding "^3.5.1" - glob "^7.1.4" - graceful-fs "^4.1.15" - lru-cache "^5.1.1" - mississippi "^3.0.0" - mkdirp "^0.5.1" - move-concurrently "^1.0.1" - promise-inflight "^1.0.1" - rimraf "^2.6.3" - ssri "^6.0.1" - unique-filename "^1.1.1" - y18n "^4.0.0" - -cacache@^12.0.2: - version "12.0.3" - resolved "https://registry.yarnpkg.com/cacache/-/cacache-12.0.3.tgz#be99abba4e1bf5df461cd5a2c1071fc432573390" - integrity sha512-kqdmfXEGFepesTuROHMs3MpFLWrPkSSpRqOw80RCflZXy/khxaArvFrQ7uJxSUduzAufc6G0g1VUCOZXxWavPw== - dependencies: - bluebird "^3.5.5" - chownr "^1.1.1" - figgy-pudding "^3.5.1" - glob "^7.1.4" - graceful-fs "^4.1.15" - infer-owner "^1.0.3" - lru-cache "^5.1.1" - mississippi "^3.0.0" - mkdirp "^0.5.1" - move-concurrently "^1.0.1" - promise-inflight "^1.0.1" - rimraf "^2.6.3" - ssri "^6.0.1" - unique-filename "^1.1.1" - y18n "^4.0.0" - -cache-base@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/cache-base/-/cache-base-1.0.1.tgz#0a7f46416831c8b662ee36fe4e7c59d76f666ab2" - integrity sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ== - dependencies: - collection-visit "^1.0.0" - component-emitter "^1.2.1" - get-value "^2.0.6" - has-value "^1.0.0" - isobject "^3.0.1" - set-value "^2.0.0" - to-object-path "^0.3.0" - union-value "^1.0.0" - unset-value "^1.0.0" - -call-me-maybe@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/call-me-maybe/-/call-me-maybe-1.0.1.tgz#26d208ea89e37b5cbde60250a15f031c16a4d66b" - integrity sha1-JtII6onje1y95gJQoV8DHBak1ms= - -caller-callsite@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/caller-callsite/-/caller-callsite-2.0.0.tgz#847e0fce0a223750a9a027c54b33731ad3154134" - integrity sha1-hH4PzgoiN1CpoCfFSzNzGtMVQTQ= - dependencies: - callsites "^2.0.0" - -caller-path@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/caller-path/-/caller-path-2.0.0.tgz#468f83044e369ab2010fac5f06ceee15bb2cb1f4" - integrity sha1-Ro+DBE42mrIBD6xfBs7uFbsssfQ= - dependencies: - caller-callsite "^2.0.0" - -callsites@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/callsites/-/callsites-2.0.0.tgz#06eb84f00eea413da86affefacbffb36093b3c50" - integrity sha1-BuuE8A7qQT2oav/vrL/7Ngk7PFA= - -callsites@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" - integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== - -camel-case@3.0.x: - version "3.0.0" - resolved "https://registry.yarnpkg.com/camel-case/-/camel-case-3.0.0.tgz#ca3c3688a4e9cf3a4cda777dc4dcbc713249cf73" - integrity sha1-yjw2iKTpzzpM2nd9xNy8cTJJz3M= - dependencies: - no-case "^2.2.0" - upper-case "^1.1.1" - -camelcase@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-4.1.0.tgz#d545635be1e33c542649c69173e5de6acfae34dd" - integrity sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0= - -camelcase@^5.0.0, camelcase@^5.2.0, camelcase@^5.3.1: - version "5.3.1" - resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320" - integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg== - -caniuse-api@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/caniuse-api/-/caniuse-api-3.0.0.tgz#5e4d90e2274961d46291997df599e3ed008ee4c0" - integrity sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw== - dependencies: - browserslist "^4.0.0" - caniuse-lite "^1.0.0" - lodash.memoize "^4.1.2" - lodash.uniq "^4.5.0" - -caniuse-lite@^1.0.0, caniuse-lite@^1.0.30000939, caniuse-lite@^1.0.30000980, caniuse-lite@^1.0.30000984: - version "1.0.30000989" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30000989.tgz#b9193e293ccf7e4426c5245134b8f2a56c0ac4b9" - integrity sha512-vrMcvSuMz16YY6GSVZ0dWDTJP8jqk3iFQ/Aq5iqblPwxSVVZI+zxDyTX0VPqtQsDnfdrBDcsmhgTEOh5R8Lbpw== - -capture-exit@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/capture-exit/-/capture-exit-2.0.0.tgz#fb953bfaebeb781f62898239dabb426d08a509a4" - integrity sha512-PiT/hQmTonHhl/HFGN+Lx3JJUznrVYJ3+AQsnthneZbvW7x+f08Tk7yLJTLEOUvBTbduLeeBkxEaYXUOUrRq6g== - dependencies: - rsvp "^4.8.4" - -case-sensitive-paths-webpack-plugin@2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/case-sensitive-paths-webpack-plugin/-/case-sensitive-paths-webpack-plugin-2.2.0.tgz#3371ef6365ef9c25fa4b81c16ace0e9c7dc58c3e" - integrity sha512-u5ElzokS8A1pm9vM3/iDgTcI3xqHxuCao94Oz8etI3cf0Tio0p8izkDYbTIn09uP3yUUr6+veaE6IkjnTYS46g== - -caseless@~0.12.0: - version "0.12.0" - resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.12.0.tgz#1b681c21ff84033c826543090689420d187151dc" - integrity sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw= - -chalk@2.4.2, chalk@^2.0.0, chalk@^2.0.1, chalk@^2.1.0, chalk@^2.4.1, chalk@^2.4.2: - version "2.4.2" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" - integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== - dependencies: - ansi-styles "^3.2.1" - escape-string-regexp "^1.0.5" - supports-color "^5.3.0" - -chalk@^1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" - integrity sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg= - dependencies: - ansi-styles "^2.2.1" - escape-string-regexp "^1.0.2" - has-ansi "^2.0.0" - strip-ansi "^3.0.0" - supports-color "^2.0.0" - -chardet@^0.7.0: - version "0.7.0" - resolved "https://registry.yarnpkg.com/chardet/-/chardet-0.7.0.tgz#90094849f0937f2eedc2425d0d28a9e5f0cbad9e" - integrity sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA== - -chokidar@^2.0.0, chokidar@^2.0.2, chokidar@^2.0.4: - version "2.1.8" - resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-2.1.8.tgz#804b3a7b6a99358c3c5c61e71d8728f041cff917" - integrity sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg== - dependencies: - anymatch "^2.0.0" - async-each "^1.0.1" - braces "^2.3.2" - glob-parent "^3.1.0" - inherits "^2.0.3" - is-binary-path "^1.0.0" - is-glob "^4.0.0" - normalize-path "^3.0.0" - path-is-absolute "^1.0.0" - readdirp "^2.2.1" - upath "^1.1.1" - optionalDependencies: - fsevents "^1.2.7" - -chownr@^1.1.1: - version "1.1.2" - resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.2.tgz#a18f1e0b269c8a6a5d3c86eb298beb14c3dd7bf6" - integrity sha512-GkfeAQh+QNy3wquu9oIZr6SS5x7wGdSgNQvD10X3r+AZr1Oys22HW8kAmDMvNg2+Dm0TeGaEuO8gFwdBXxwO8A== - -chrome-trace-event@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/chrome-trace-event/-/chrome-trace-event-1.0.2.tgz#234090ee97c7d4ad1a2c4beae27505deffc608a4" - integrity sha512-9e/zx1jw7B4CO+c/RXoCsfg/x1AfUBioy4owYH0bJprEYAx5hRFLRhWBqHAG57D0ZM4H7vxbP7bPe0VwhQRYDQ== - dependencies: - tslib "^1.9.0" - -ci-info@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-2.0.0.tgz#67a9e964be31a51e15e5010d58e6f12834002f46" - integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ== - -cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3: - version "1.0.4" - resolved "https://registry.yarnpkg.com/cipher-base/-/cipher-base-1.0.4.tgz#8760e4ecc272f4c363532f926d874aae2c1397de" - integrity sha512-Kkht5ye6ZGmwv40uUDZztayT2ThLQGfnj/T71N/XzeZeo3nf8foyW7zGTsPYkEya3m5f3cAypH+qe7YOrM1U2Q== - dependencies: - inherits "^2.0.1" - safe-buffer "^5.0.1" - -class-utils@^0.3.5: - version "0.3.6" - resolved "https://registry.yarnpkg.com/class-utils/-/class-utils-0.3.6.tgz#f93369ae8b9a7ce02fd41faad0ca83033190c463" - integrity sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg== - dependencies: - arr-union "^3.1.0" - define-property "^0.2.5" - isobject "^3.0.0" - static-extend "^0.1.1" - -classnames@2.x, classnames@^2.2.0, classnames@^2.2.1, classnames@^2.2.3, classnames@^2.2.5, classnames@^2.2.6, classnames@~2.2.6: - version "2.2.6" - resolved "https://registry.yarnpkg.com/classnames/-/classnames-2.2.6.tgz#43935bffdd291f326dad0a205309b38d00f650ce" - integrity sha512-JR/iSQOSt+LQIWwrwEzJ9uk0xfN3mTVYMwt1Ir5mUcSN6pU+V4zQFFaJsclJbPuAUQH+yfWef6tm7l1quW3C8Q== - -clean-css@4.2.x: - version "4.2.1" - resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-4.2.1.tgz#2d411ef76b8569b6d0c84068dabe85b0aa5e5c17" - integrity sha512-4ZxI6dy4lrY6FHzfiy1aEOXgu4LIsW2MhwG0VBKdcoGoH/XLFgaHSdLTGr4O8Be6A8r3MOphEiI8Gc1n0ecf3g== - dependencies: - source-map "~0.6.0" - -cli-cursor@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-2.1.0.tgz#b35dac376479facc3e94747d41d0d0f5238ffcb5" - integrity sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU= - dependencies: - restore-cursor "^2.0.0" - -cli-width@^2.0.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-2.2.0.tgz#ff19ede8a9a5e579324147b0c11f0fbcbabed639" - integrity sha1-/xnt6Kml5XkyQUewwR8PvLq+1jk= - -cliui@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/cliui/-/cliui-4.1.0.tgz#348422dbe82d800b3022eef4f6ac10bf2e4d1b49" - integrity sha512-4FG+RSG9DL7uEwRUZXZn3SS34DiDPfzP0VOiEwtUWlE+AR2EIg+hSyvrIgUUfhdgR/UkAeW2QHgeP+hWrXs7jQ== - dependencies: - string-width "^2.1.1" - strip-ansi "^4.0.0" - wrap-ansi "^2.0.0" - -cliui@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/cliui/-/cliui-5.0.0.tgz#deefcfdb2e800784aa34f46fa08e06851c7bbbc5" - integrity sha512-PYeGSEmmHM6zvoef2w8TPzlrnNpXIjTipYK780YswmIP9vjxmd6Y2a3CB2Ks6/AU8NHjZugXvo8w3oWM2qnwXA== - dependencies: - string-width "^3.1.0" - strip-ansi "^5.2.0" - wrap-ansi "^5.1.0" - -clone-deep@^0.2.4: - version "0.2.4" - resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-0.2.4.tgz#4e73dd09e9fb971cc38670c5dced9c1896481cc6" - integrity sha1-TnPdCen7lxzDhnDF3O2cGJZIHMY= - dependencies: - for-own "^0.1.3" - is-plain-object "^2.0.1" - kind-of "^3.0.2" - lazy-cache "^1.0.3" - shallow-clone "^0.1.2" - -clone-deep@^2.0.1: - version "2.0.2" - resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-2.0.2.tgz#00db3a1e173656730d1188c3d6aced6d7ea97713" - integrity sha512-SZegPTKjCgpQH63E+eN6mVEEPdQBOUzjyJm5Pora4lrwWRFS8I0QAxV/KD6vV/i0WuijHZWQC1fMsPEdxfdVCQ== - dependencies: - for-own "^1.0.0" - is-plain-object "^2.0.4" - kind-of "^6.0.0" - shallow-clone "^1.0.0" - -clone@^2.1.1, clone@^2.1.2: - version "2.1.2" - resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.2.tgz#1b7f4b9f591f1e8f83670401600345a02887435f" - integrity sha1-G39Ln1kfHo+DZwQBYANFoCiHQ18= - -co@^4.6.0: - version "4.6.0" - resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" - integrity sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ= - -coa@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/coa/-/coa-2.0.2.tgz#43f6c21151b4ef2bf57187db0d73de229e3e7ec3" - integrity sha512-q5/jG+YQnSy4nRTV4F7lPepBJZ8qBNJJDBuJdoejDyLXgmL7IEo+Le2JDZudFTFt7mrCqIRaSjws4ygRCTCAXA== - dependencies: - "@types/q" "^1.5.1" - chalk "^2.4.1" - q "^1.1.2" - -code-point-at@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77" - integrity sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c= - -collection-visit@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/collection-visit/-/collection-visit-1.0.0.tgz#4bc0373c164bc3291b4d368c829cf1a80a59dca0" - integrity sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA= - dependencies: - map-visit "^1.0.0" - object-visit "^1.0.0" - -color-convert@^1.9.0, color-convert@^1.9.1: - version "1.9.3" - resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" - integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== - dependencies: - color-name "1.1.3" - -color-name@1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" - integrity sha1-p9BVi9icQveV3UIyj3QIMcpTvCU= - -color-name@^1.0.0: - version "1.1.4" - resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" - integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== - -color-string@^1.5.2: - version "1.5.3" - resolved "https://registry.yarnpkg.com/color-string/-/color-string-1.5.3.tgz#c9bbc5f01b58b5492f3d6857459cb6590ce204cc" - integrity sha512-dC2C5qeWoYkxki5UAXapdjqO672AM4vZuPGRQfO8b5HKuKGBbKWpITyDYN7TOFKvRW7kOgAn3746clDBMDJyQw== - dependencies: - color-name "^1.0.0" - simple-swizzle "^0.2.2" - -color@^3.0.0: - version "3.1.2" - resolved "https://registry.yarnpkg.com/color/-/color-3.1.2.tgz#68148e7f85d41ad7649c5fa8c8106f098d229e10" - integrity sha512-vXTJhHebByxZn3lDvDJYw4lR5+uB3vuoHsuYA5AKuxRVn5wzzIfQKGLBmgdVRHKTJYeK5rvJcHnrd0Li49CFpg== - dependencies: - color-convert "^1.9.1" - color-string "^1.5.2" - -combined-stream@^1.0.6, combined-stream@~1.0.6: - version "1.0.8" - resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" - integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== - dependencies: - delayed-stream "~1.0.0" - -commander@2.17.x: - version "2.17.1" - resolved "https://registry.yarnpkg.com/commander/-/commander-2.17.1.tgz#bd77ab7de6de94205ceacc72f1716d29f20a77bf" - integrity sha512-wPMUt6FnH2yzG95SA6mzjQOEKUU3aLaDEmzs1ti+1E9h+CsrZghRlqEM/EJ4KscsQVG8uNN4uVreUeT8+drlgg== - -commander@^2.11.0, commander@^2.19.0, commander@^2.20.0, commander@~2.20.0: - version "2.20.0" - resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.0.tgz#d58bb2b5c1ee8f87b0d340027e9e94e222c5a422" - integrity sha512-7j2y+40w61zy6YC2iRNpUe/NwhNyoXrYpHMrSunaMG64nRnaf96zO/KMQR4OyN/UnE5KLyEBnKHd4aG3rskjpQ== - -commander@~2.19.0: - version "2.19.0" - resolved "https://registry.yarnpkg.com/commander/-/commander-2.19.0.tgz#f6198aa84e5b83c46054b94ddedbfed5ee9ff12a" - integrity sha512-6tvAOO+D6OENvRAh524Dh9jcfKTYDQAqvqezbCW82xj5X0pSrcpxtvRKHLG0yBY6SD7PSDrJaj+0AiOcKVd1Xg== - -common-tags@^1.8.0: - version "1.8.0" - resolved "https://registry.yarnpkg.com/common-tags/-/common-tags-1.8.0.tgz#8e3153e542d4a39e9b10554434afaaf98956a937" - integrity sha512-6P6g0uetGpW/sdyUy/iQQCbFF0kWVMSIVSyYz7Zgjcgh8mgw8PQzDNZeyZ5DQ2gM7LBoZPHmnjz8rUthkBG5tw== - -commondir@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b" - integrity sha1-3dgA2gxmEnOTzKWVDqloo6rxJTs= - -component-classes@1.x, component-classes@^1.2.5, component-classes@^1.2.6: - version "1.2.6" - resolved "https://registry.yarnpkg.com/component-classes/-/component-classes-1.2.6.tgz#c642394c3618a4d8b0b8919efccbbd930e5cd691" - integrity sha1-xkI5TDYYpNiwuJGe/Mu9kw5c1pE= - dependencies: - component-indexof "0.0.3" - -component-emitter@^1.2.1: - version "1.3.0" - resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.3.0.tgz#16e4070fba8ae29b679f2215853ee181ab2eabc0" - integrity sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg== - -component-indexof@0.0.3: - version "0.0.3" - resolved "https://registry.yarnpkg.com/component-indexof/-/component-indexof-0.0.3.tgz#11d091312239eb8f32c8f25ae9cb002ffe8d3c24" - integrity sha1-EdCRMSI5648yyPJa6csAL/6NPCQ= - -compressible@~2.0.16: - version "2.0.17" - resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.17.tgz#6e8c108a16ad58384a977f3a482ca20bff2f38c1" - integrity sha512-BGHeLCK1GV7j1bSmQQAi26X+GgWcTjLr/0tzSvMCl3LH1w1IJ4PFSPoV5316b30cneTziC+B1a+3OjoSUcQYmw== - dependencies: - mime-db ">= 1.40.0 < 2" - -compression@^1.5.2: - version "1.7.4" - resolved "https://registry.yarnpkg.com/compression/-/compression-1.7.4.tgz#95523eff170ca57c29a0ca41e6fe131f41e5bb8f" - integrity sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ== - dependencies: - accepts "~1.3.5" - bytes "3.0.0" - compressible "~2.0.16" - debug "2.6.9" - on-headers "~1.0.2" - safe-buffer "5.1.2" - vary "~1.1.2" - -concat-map@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" - integrity sha1-2Klr13/Wjfd5OnMDajug1UBdR3s= - -concat-stream@^1.5.0: - version "1.6.2" - resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.6.2.tgz#904bdf194cd3122fc675c77fc4ac3d4ff0fd1a34" - integrity sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw== - dependencies: - buffer-from "^1.0.0" - inherits "^2.0.3" - readable-stream "^2.2.2" - typedarray "^0.0.6" - -confusing-browser-globals@^1.0.7: - version "1.0.8" - resolved "https://registry.yarnpkg.com/confusing-browser-globals/-/confusing-browser-globals-1.0.8.tgz#93ffec1f82a6e2bf2bc36769cc3a92fa20e502f3" - integrity sha512-lI7asCibVJ6Qd3FGU7mu4sfG4try4LX3+GVS+Gv8UlrEf2AeW57piecapnog2UHZSbcX/P/1UDWVaTsblowlZg== - -connect-history-api-fallback@^1.3.0: - version "1.6.0" - resolved "https://registry.yarnpkg.com/connect-history-api-fallback/-/connect-history-api-fallback-1.6.0.tgz#8b32089359308d111115d81cad3fceab888f97bc" - integrity sha512-e54B99q/OUoH64zYYRf3HBP5z24G38h5D3qXu23JGRoigpX5Ss4r9ZnDk3g0Z8uQC2x2lPaJ+UlWBc1ZWBWdLg== - -console-browserify@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10" - integrity sha1-8CQcRXMKn8YyOyBtvzjtx0HQuxA= - dependencies: - date-now "^0.1.4" - -console-control-strings@^1.0.0, console-control-strings@~1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e" - integrity sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4= - -constants-browserify@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/constants-browserify/-/constants-browserify-1.0.0.tgz#c20b96d8c617748aaf1c16021760cd27fcb8cb75" - integrity sha1-wguW2MYXdIqvHBYCF2DNJ/y4y3U= - -contains-path@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/contains-path/-/contains-path-0.1.0.tgz#fe8cf184ff6670b6baef01a9d4861a5cbec4120a" - integrity sha1-/ozxhP9mcLa67wGp1IYaXL7EEgo= - -content-disposition@0.5.3: - version "0.5.3" - resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.3.tgz#e130caf7e7279087c5616c2007d0485698984fbd" - integrity sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g== - dependencies: - safe-buffer "5.1.2" - -content-type@~1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b" - integrity sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA== - -convert-source-map@^1.1.0, convert-source-map@^1.4.0: - version "1.6.0" - resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.6.0.tgz#51b537a8c43e0f04dec1993bffcdd504e758ac20" - integrity sha512-eFu7XigvxdZ1ETfbgPBohgyQ/Z++C0eEhTor0qRwBw9unw+L0/6V8wkSuGgzdThkiS5lSpdptOQPD8Ak40a+7A== - dependencies: - safe-buffer "~5.1.1" - -cookie-signature@1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" - integrity sha1-4wOogrNCzD7oylE6eZmXNNqzriw= - -cookie@0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.4.0.tgz#beb437e7022b3b6d49019d088665303ebe9c14ba" - integrity sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg== - -copy-concurrently@^1.0.0: - version "1.0.5" - resolved "https://registry.yarnpkg.com/copy-concurrently/-/copy-concurrently-1.0.5.tgz#92297398cae34937fcafd6ec8139c18051f0b5e0" - integrity sha512-f2domd9fsVDFtaFcbaRZuYXwtdmnzqbADSwhSWYxYB/Q8zsdUUFMXVRwXGDMWmbEzAn1kdRrtI1T/KTFOL4X2A== - dependencies: - aproba "^1.1.1" - fs-write-stream-atomic "^1.0.8" - iferr "^0.1.5" - mkdirp "^0.5.1" - rimraf "^2.5.4" - run-queue "^1.0.0" - -copy-descriptor@^0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/copy-descriptor/-/copy-descriptor-0.1.1.tgz#676f6eb3c39997c2ee1ac3a924fd6124748f578d" - integrity sha1-Z29us8OZl8LuGsOpJP1hJHSPV40= - -copy-to-clipboard@^3.2.0: - version "3.2.0" - resolved "https://registry.yarnpkg.com/copy-to-clipboard/-/copy-to-clipboard-3.2.0.tgz#d2724a3ccbfed89706fac8a894872c979ac74467" - integrity sha512-eOZERzvCmxS8HWzugj4Uxl8OJxa7T2k1Gi0X5qavwydHIfuSHq2dTD09LOg/XyGq4Zpb5IsR/2OJ5lbOegz78w== - dependencies: - toggle-selection "^1.0.6" - -core-js-compat@^3.0.0, core-js-compat@^3.1.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.2.1.tgz#0cbdbc2e386e8e00d3b85dc81c848effec5b8150" - integrity sha512-MwPZle5CF9dEaMYdDeWm73ao/IflDH+FjeJCWEADcEgFSE9TLimFKwJsfmkwzI8eC0Aj0mgvMDjeQjrElkz4/A== - dependencies: - browserslist "^4.6.6" - semver "^6.3.0" - -core-js@3.1.4: - version "3.1.4" - resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.1.4.tgz#3a2837fc48e582e1ae25907afcd6cf03b0cc7a07" - integrity sha512-YNZN8lt82XIMLnLirj9MhKDFZHalwzzrL9YLt6eb0T5D0EDl4IQ90IGkua8mHbnxNrkj1d8hbdizMc0Qmg1WnQ== - -core-js@^1.0.0: - version "1.2.7" - resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636" - integrity sha1-ZSKUwUZR2yj6k70tX/KYOk8IxjY= - -core-js@^2.4.0: - version "2.6.9" - resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.6.9.tgz#6b4b214620c834152e179323727fc19741b084f2" - integrity sha512-HOpZf6eXmnl7la+cUdMnLvUxKNqLUzJvgIziQ0DiF3JwSImNphIqdGqzj6hIKyX04MmV0poclQ7+wjWvxQyR2A== - -core-util-is@1.0.2, core-util-is@~1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7" - integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac= - -cosmiconfig@^5.0.0, cosmiconfig@^5.2.0, cosmiconfig@^5.2.1: - version "5.2.1" - resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-5.2.1.tgz#040f726809c591e77a17c0a3626ca45b4f168b1a" - integrity sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA== - dependencies: - import-fresh "^2.0.0" - is-directory "^0.3.1" - js-yaml "^3.13.1" - parse-json "^4.0.0" - -create-ecdh@^4.0.0: - version "4.0.3" - resolved "https://registry.yarnpkg.com/create-ecdh/-/create-ecdh-4.0.3.tgz#c9111b6f33045c4697f144787f9254cdc77c45ff" - integrity sha512-GbEHQPMOswGpKXM9kCWVrremUcBmjteUaQ01T9rkKCPDXfUHX0IoP9LpHYo2NPFampa4e+/pFDc3jQdxrxQLaw== - dependencies: - bn.js "^4.1.0" - elliptic "^6.0.0" - -create-hash@^1.1.0, create-hash@^1.1.2: - version "1.2.0" - resolved "https://registry.yarnpkg.com/create-hash/-/create-hash-1.2.0.tgz#889078af11a63756bcfb59bd221996be3a9ef196" - integrity sha512-z00bCGNHDG8mHAkP7CtT1qVu+bFQUPjYq/4Iv3C3kWjTFV10zIjfSoeqXo9Asws8gwSHDGj/hl2u4OGIjapeCg== - dependencies: - cipher-base "^1.0.1" - inherits "^2.0.1" - md5.js "^1.3.4" - ripemd160 "^2.0.1" - sha.js "^2.4.0" - -create-hmac@^1.1.0, create-hmac@^1.1.2, create-hmac@^1.1.4: - version "1.1.7" - resolved "https://registry.yarnpkg.com/create-hmac/-/create-hmac-1.1.7.tgz#69170c78b3ab957147b2b8b04572e47ead2243ff" - integrity sha512-MJG9liiZ+ogc4TzUwuvbER1JRdgvUFSB5+VR/g5h82fGaIRWMWddtKBHi7/sVhfjQZ6SehlyhvQYrcYkaUIpLg== - dependencies: - cipher-base "^1.0.3" - create-hash "^1.1.0" - inherits "^2.0.1" - ripemd160 "^2.0.0" - safe-buffer "^5.0.1" - sha.js "^2.4.8" - -create-react-class@^15.5.3: - version "15.6.3" - resolved "https://registry.yarnpkg.com/create-react-class/-/create-react-class-15.6.3.tgz#2d73237fb3f970ae6ebe011a9e66f46dbca80036" - integrity sha512-M+/3Q6E6DLO6Yx3OwrWjwHBnvfXXYA7W+dFjt/ZDBemHO1DDZhsalX/NUtnTYclN6GfnBDRh4qRHjcDHmlJBJg== - dependencies: - fbjs "^0.8.9" - loose-envify "^1.3.1" - object-assign "^4.1.1" - -cross-spawn@6.0.5, cross-spawn@^6.0.0, cross-spawn@^6.0.5: - version "6.0.5" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-6.0.5.tgz#4a5ec7c64dfae22c3a14124dbacdee846d80cbc4" - integrity sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ== - dependencies: - nice-try "^1.0.4" - path-key "^2.0.1" - semver "^5.5.0" - shebang-command "^1.2.0" - which "^1.2.9" - -crypto-browserify@^3.11.0: - version "3.12.0" - resolved "https://registry.yarnpkg.com/crypto-browserify/-/crypto-browserify-3.12.0.tgz#396cf9f3137f03e4b8e532c58f698254e00f80ec" - integrity sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg== - dependencies: - browserify-cipher "^1.0.0" - browserify-sign "^4.0.0" - create-ecdh "^4.0.0" - create-hash "^1.1.0" - create-hmac "^1.1.0" - diffie-hellman "^5.0.0" - inherits "^2.0.1" - pbkdf2 "^3.0.3" - public-encrypt "^4.0.0" - randombytes "^2.0.0" - randomfill "^1.0.3" - -css-animation@1.x, css-animation@^1.3.2, css-animation@^1.5.0: - version "1.6.1" - resolved "https://registry.yarnpkg.com/css-animation/-/css-animation-1.6.1.tgz#162064a3b0d51f958b7ff37b3d6d4de18e17039e" - integrity sha512-/48+/BaEaHRY6kNQ2OIPzKf9A6g8WjZYjhiNDNuIVbsm5tXCGIAsHDjB4Xu1C4vXJtUWZo26O68OQkDpNBaPog== - dependencies: - babel-runtime "6.x" - component-classes "^1.2.5" - -css-blank-pseudo@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/css-blank-pseudo/-/css-blank-pseudo-0.1.4.tgz#dfdefd3254bf8a82027993674ccf35483bfcb3c5" - integrity sha512-LHz35Hr83dnFeipc7oqFDmsjHdljj3TQtxGGiNWSOsTLIAubSm4TEz8qCaKFpk7idaQ1GfWscF4E6mgpBysA1w== - dependencies: - postcss "^7.0.5" - -css-color-names@0.0.4, css-color-names@^0.0.4: - version "0.0.4" - resolved "https://registry.yarnpkg.com/css-color-names/-/css-color-names-0.0.4.tgz#808adc2e79cf84738069b646cb20ec27beb629e0" - integrity sha1-gIrcLnnPhHOAabZGyyDsJ762KeA= - -css-declaration-sorter@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/css-declaration-sorter/-/css-declaration-sorter-4.0.1.tgz#c198940f63a76d7e36c1e71018b001721054cb22" - integrity sha512-BcxQSKTSEEQUftYpBVnsH4SF05NTuBokb19/sBt6asXGKZ/6VP7PLG1CBCkFDYOnhXhPh0jMhO6xZ71oYHXHBA== - dependencies: - postcss "^7.0.1" - timsort "^0.3.0" - -css-has-pseudo@^0.10.0: - version "0.10.0" - resolved "https://registry.yarnpkg.com/css-has-pseudo/-/css-has-pseudo-0.10.0.tgz#3c642ab34ca242c59c41a125df9105841f6966ee" - integrity sha512-Z8hnfsZu4o/kt+AuFzeGpLVhFOGO9mluyHBaA2bA8aCGTwah5sT3WV/fTHH8UNZUytOIImuGPrl/prlb4oX4qQ== - dependencies: - postcss "^7.0.6" - postcss-selector-parser "^5.0.0-rc.4" - -css-loader@2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-2.1.1.tgz#d8254f72e412bb2238bb44dd674ffbef497333ea" - integrity sha512-OcKJU/lt232vl1P9EEDamhoO9iKY3tIjY5GU+XDLblAykTdgs6Ux9P1hTHve8nFKy5KPpOXOsVI/hIwi3841+w== - dependencies: - camelcase "^5.2.0" - icss-utils "^4.1.0" - loader-utils "^1.2.3" - normalize-path "^3.0.0" - postcss "^7.0.14" - postcss-modules-extract-imports "^2.0.0" - postcss-modules-local-by-default "^2.0.6" - postcss-modules-scope "^2.1.0" - postcss-modules-values "^2.0.0" - postcss-value-parser "^3.3.0" - schema-utils "^1.0.0" - -css-prefers-color-scheme@^3.1.1: - version "3.1.1" - resolved "https://registry.yarnpkg.com/css-prefers-color-scheme/-/css-prefers-color-scheme-3.1.1.tgz#6f830a2714199d4f0d0d0bb8a27916ed65cff1f4" - integrity sha512-MTu6+tMs9S3EUqzmqLXEcgNRbNkkD/TGFvowpeoWJn5Vfq7FMgsmRQs9X5NXAURiOBmOxm/lLjsDNXDE6k9bhg== - dependencies: - postcss "^7.0.5" - -css-select-base-adapter@^0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz#3b2ff4972cc362ab88561507a95408a1432135d7" - integrity sha512-jQVeeRG70QI08vSTwf1jHxp74JoZsr2XSgETae8/xC8ovSnL2WF87GTLO86Sbwdt2lK4Umg4HnnwMO4YF3Ce7w== - -css-select@^1.1.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/css-select/-/css-select-1.2.0.tgz#2b3a110539c5355f1cd8d314623e870b121ec858" - integrity sha1-KzoRBTnFNV8c2NMUYj6HCxIeyFg= - dependencies: - boolbase "~1.0.0" - css-what "2.1" - domutils "1.5.1" - nth-check "~1.0.1" - -css-select@^2.0.0: - version "2.0.2" - resolved "https://registry.yarnpkg.com/css-select/-/css-select-2.0.2.tgz#ab4386cec9e1f668855564b17c3733b43b2a5ede" - integrity sha512-dSpYaDVoWaELjvZ3mS6IKZM/y2PMPa/XYoEfYNZePL4U/XgyxZNroHEHReDx/d+VgXh9VbCTtFqLkFbmeqeaRQ== - dependencies: - boolbase "^1.0.0" - css-what "^2.1.2" - domutils "^1.7.0" - nth-check "^1.0.2" - -css-tree@1.0.0-alpha.29: - version "1.0.0-alpha.29" - resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.0.0-alpha.29.tgz#3fa9d4ef3142cbd1c301e7664c1f352bd82f5a39" - integrity sha512-sRNb1XydwkW9IOci6iB2xmy8IGCj6r/fr+JWitvJ2JxQRPzN3T4AGGVWCMlVmVwM1gtgALJRmGIlWv5ppnGGkg== - dependencies: - mdn-data "~1.1.0" - source-map "^0.5.3" - -css-tree@1.0.0-alpha.33: - version "1.0.0-alpha.33" - resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.0.0-alpha.33.tgz#970e20e5a91f7a378ddd0fc58d0b6c8d4f3be93e" - integrity sha512-SPt57bh5nQnpsTBsx/IXbO14sRc9xXu5MtMAVuo0BaQQmyf0NupNPPSoMaqiAF5tDFafYsTkfeH4Q/HCKXkg4w== - dependencies: - mdn-data "2.0.4" - source-map "^0.5.3" - -css-unit-converter@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/css-unit-converter/-/css-unit-converter-1.1.1.tgz#d9b9281adcfd8ced935bdbaba83786897f64e996" - integrity sha1-2bkoGtz9jO2TW9urqDeGiX9k6ZY= - -css-what@2.1, css-what@^2.1.2: - version "2.1.3" - resolved "https://registry.yarnpkg.com/css-what/-/css-what-2.1.3.tgz#a6d7604573365fe74686c3f311c56513d88285f2" - integrity sha512-a+EPoD+uZiNfh+5fxw2nO9QwFa6nJe2Or35fGY6Ipw1R3R4AGz1d1TEZrCegvw2YTmZ0jXirGYlzxxpYSHwpEg== - -cssdb@^4.3.0: - version "4.4.0" - resolved "https://registry.yarnpkg.com/cssdb/-/cssdb-4.4.0.tgz#3bf2f2a68c10f5c6a08abd92378331ee803cddb0" - integrity sha512-LsTAR1JPEM9TpGhl/0p3nQecC2LJ0kD8X5YARu1hk/9I1gril5vDtMZyNxcEpxxDj34YNck/ucjuoUd66K03oQ== - -cssesc@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-2.0.0.tgz#3b13bd1bb1cb36e1bcb5a4dcd27f54c5dcb35703" - integrity sha512-MsCAG1z9lPdoO/IUMLSBWBSVxVtJ1395VGIQ+Fc2gNdkQ1hNDnQdw3YhA71WJCBW1vdwA0cAnk/DnW6bqoEUYg== - -cssesc@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/cssesc/-/cssesc-3.0.0.tgz#37741919903b868565e1c09ea747445cd18983ee" - integrity sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg== - -cssnano-preset-default@^4.0.7: - version "4.0.7" - resolved "https://registry.yarnpkg.com/cssnano-preset-default/-/cssnano-preset-default-4.0.7.tgz#51ec662ccfca0f88b396dcd9679cdb931be17f76" - integrity sha512-x0YHHx2h6p0fCl1zY9L9roD7rnlltugGu7zXSKQx6k2rYw0Hi3IqxcoAGF7u9Q5w1nt7vK0ulxV8Lo+EvllGsA== - dependencies: - css-declaration-sorter "^4.0.1" - cssnano-util-raw-cache "^4.0.1" - postcss "^7.0.0" - postcss-calc "^7.0.1" - postcss-colormin "^4.0.3" - postcss-convert-values "^4.0.1" - postcss-discard-comments "^4.0.2" - postcss-discard-duplicates "^4.0.2" - postcss-discard-empty "^4.0.1" - postcss-discard-overridden "^4.0.1" - postcss-merge-longhand "^4.0.11" - postcss-merge-rules "^4.0.3" - postcss-minify-font-values "^4.0.2" - postcss-minify-gradients "^4.0.2" - postcss-minify-params "^4.0.2" - postcss-minify-selectors "^4.0.2" - postcss-normalize-charset "^4.0.1" - postcss-normalize-display-values "^4.0.2" - postcss-normalize-positions "^4.0.2" - postcss-normalize-repeat-style "^4.0.2" - postcss-normalize-string "^4.0.2" - postcss-normalize-timing-functions "^4.0.2" - postcss-normalize-unicode "^4.0.1" - postcss-normalize-url "^4.0.1" - postcss-normalize-whitespace "^4.0.2" - postcss-ordered-values "^4.1.2" - postcss-reduce-initial "^4.0.3" - postcss-reduce-transforms "^4.0.2" - postcss-svgo "^4.0.2" - postcss-unique-selectors "^4.0.1" - -cssnano-util-get-arguments@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/cssnano-util-get-arguments/-/cssnano-util-get-arguments-4.0.0.tgz#ed3a08299f21d75741b20f3b81f194ed49cc150f" - integrity sha1-7ToIKZ8h11dBsg87gfGU7UnMFQ8= - -cssnano-util-get-match@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/cssnano-util-get-match/-/cssnano-util-get-match-4.0.0.tgz#c0e4ca07f5386bb17ec5e52250b4f5961365156d" - integrity sha1-wOTKB/U4a7F+xeUiULT1lhNlFW0= - -cssnano-util-raw-cache@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/cssnano-util-raw-cache/-/cssnano-util-raw-cache-4.0.1.tgz#b26d5fd5f72a11dfe7a7846fb4c67260f96bf282" - integrity sha512-qLuYtWK2b2Dy55I8ZX3ky1Z16WYsx544Q0UWViebptpwn/xDBmog2TLg4f+DBMg1rJ6JDWtn96WHbOKDWt1WQA== - dependencies: - postcss "^7.0.0" - -cssnano-util-same-parent@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/cssnano-util-same-parent/-/cssnano-util-same-parent-4.0.1.tgz#574082fb2859d2db433855835d9a8456ea18bbf3" - integrity sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q== - -cssnano@^4.1.0: - version "4.1.10" - resolved "https://registry.yarnpkg.com/cssnano/-/cssnano-4.1.10.tgz#0ac41f0b13d13d465487e111b778d42da631b8b2" - integrity sha512-5wny+F6H4/8RgNlaqab4ktc3e0/blKutmq8yNlBFXA//nSFFAqAngjNVRzUvCgYROULmZZUoosL/KSoZo5aUaQ== - dependencies: - cosmiconfig "^5.0.0" - cssnano-preset-default "^4.0.7" - is-resolvable "^1.0.0" - postcss "^7.0.0" - -csso@^3.5.1: - version "3.5.1" - resolved "https://registry.yarnpkg.com/csso/-/csso-3.5.1.tgz#7b9eb8be61628973c1b261e169d2f024008e758b" - integrity sha512-vrqULLffYU1Q2tLdJvaCYbONStnfkfimRxXNaGjxMldI0C7JPBC4rB1RyjhfdZ4m1frm8pM9uRPKH3d2knZ8gg== - dependencies: - css-tree "1.0.0-alpha.29" - -cssom@0.3.x, "cssom@>= 0.3.2 < 0.4.0", cssom@^0.3.4: - version "0.3.8" - resolved "https://registry.yarnpkg.com/cssom/-/cssom-0.3.8.tgz#9f1276f5b2b463f2114d3f2c75250af8c1a36f4a" - integrity sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg== - -cssstyle@^1.0.0, cssstyle@^1.1.1: - version "1.4.0" - resolved "https://registry.yarnpkg.com/cssstyle/-/cssstyle-1.4.0.tgz#9d31328229d3c565c61e586b02041a28fccdccf1" - integrity sha512-GBrLZYZ4X4x6/QEoBnIrqb8B/f5l4+8me2dkom/j1Gtbxy0kBv6OGzKuAsGM75bkGwGAFkt56Iwg28S3XTZgSA== - dependencies: - cssom "0.3.x" - -csstype@^2.2.0: - version "2.6.6" - resolved "https://registry.yarnpkg.com/csstype/-/csstype-2.6.6.tgz#c34f8226a94bbb10c32cc0d714afdf942291fc41" - integrity sha512-RpFbQGUE74iyPgvr46U9t1xoQBM8T4BL8SxrN66Le2xYAPSaDJJKeztV3awugusb3g3G9iL8StmkBBXhcbbXhg== - -customize-cra@^0.2.12: - version "0.2.14" - resolved "https://registry.yarnpkg.com/customize-cra/-/customize-cra-0.2.14.tgz#41f9b2d96d9a318bec760c4c9b3dc9c26d5a7594" - integrity sha512-LtEMXNzkhnnqGPc1dP5fnPlF1ic1dj34hDbRVJIzfMQgOaGByHhx51fTR7fv7sTPEbCPrOBP777MkCo0GPV57g== - dependencies: - lodash.flow "^3.5.0" - -cyclist@~0.2.2: - version "0.2.2" - resolved "https://registry.yarnpkg.com/cyclist/-/cyclist-0.2.2.tgz#1b33792e11e914a2fd6d6ed6447464444e5fa640" - integrity sha1-GzN5LhHpFKL9bW7WRHRkRE5fpkA= - -damerau-levenshtein@^1.0.4: - version "1.0.5" - resolved "https://registry.yarnpkg.com/damerau-levenshtein/-/damerau-levenshtein-1.0.5.tgz#780cf7144eb2e8dbd1c3bb83ae31100ccc31a414" - integrity sha512-CBCRqFnpu715iPmw1KrdOrzRqbdFwQTwAWyyyYS42+iAgHCuXZ+/TdMgQkUENPomxEz9z1BEzuQU2Xw0kUuAgA== - -dashdash@^1.12.0: - version "1.14.1" - resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0" - integrity sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA= - dependencies: - assert-plus "^1.0.0" - -data-urls@^1.0.0, data-urls@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/data-urls/-/data-urls-1.1.0.tgz#15ee0582baa5e22bb59c77140da8f9c76963bbfe" - integrity sha512-YTWYI9se1P55u58gL5GkQHW4P6VJBJ5iBT+B5a7i2Tjadhv52paJG0qHX4A0OR6/t52odI64KP2YvFpkDOi3eQ== - dependencies: - abab "^2.0.0" - whatwg-mimetype "^2.2.0" - whatwg-url "^7.0.0" - -date-now@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b" - integrity sha1-6vQ5/U1ISK105cx9vvIAZyueNFs= - -debug@2.6.9, debug@^2.2.0, debug@^2.3.3, debug@^2.6.0, debug@^2.6.8, debug@^2.6.9: - version "2.6.9" - resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" - integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== - dependencies: - ms "2.0.0" - -debug@^3.0.0, debug@^3.2.5, debug@^3.2.6: - version "3.2.6" - resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.6.tgz#e83d17de16d8a7efb7717edbe5fb10135eee629b" - integrity sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ== - dependencies: - ms "^2.1.1" - -debug@^4.0.1, debug@^4.1.0, debug@^4.1.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.1.1.tgz#3b72260255109c6b589cee050f1d516139664791" - integrity sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw== - dependencies: - ms "^2.1.1" - -decamelize@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" - integrity sha1-9lNNFRSCabIDUue+4m9QH5oZEpA= - -decamelize@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-2.0.0.tgz#656d7bbc8094c4c788ea53c5840908c9c7d063c7" - integrity sha512-Ikpp5scV3MSYxY39ymh45ZLEecsTdv/Xj2CaQfI8RLMuwi7XvjX9H/fhraiSuU+C5w5NTDu4ZU72xNiZnurBPg== - dependencies: - xregexp "4.0.0" - -decode-uri-component@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545" - integrity sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU= - -deep-equal@^1.0.1: - version "1.1.0" - resolved "https://registry.yarnpkg.com/deep-equal/-/deep-equal-1.1.0.tgz#3103cdf8ab6d32cf4a8df7865458f2b8d33f3745" - integrity sha512-ZbfWJq/wN1Z273o7mUSjILYqehAktR2NVoSrOukDkU9kg2v/Uv89yU4Cvz8seJeAmtN5oqiefKq8FPuXOboqLw== - dependencies: - is-arguments "^1.0.4" - is-date-object "^1.0.1" - is-regex "^1.0.4" - object-is "^1.0.1" - object-keys "^1.1.1" - regexp.prototype.flags "^1.2.0" - -deep-extend@^0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" - integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA== - -deep-is@~0.1.3: - version "0.1.3" - resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" - integrity sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ= - -default-gateway@^4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/default-gateway/-/default-gateway-4.2.0.tgz#167104c7500c2115f6dd69b0a536bb8ed720552b" - integrity sha512-h6sMrVB1VMWVrW13mSc6ia/DwYYw5MN6+exNu1OaJeFac5aSAvwM7lZ0NVfTABuSkQelr4h5oebg3KB1XPdjgA== - dependencies: - execa "^1.0.0" - ip-regex "^2.1.0" - -define-properties@^1.1.2, define-properties@^1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.1.3.tgz#cf88da6cbee26fe6db7094f61d870cbd84cee9f1" - integrity sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ== - dependencies: - object-keys "^1.0.12" - -define-property@^0.2.5: - version "0.2.5" - resolved "https://registry.yarnpkg.com/define-property/-/define-property-0.2.5.tgz#c35b1ef918ec3c990f9a5bc57be04aacec5c8116" - integrity sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY= - dependencies: - is-descriptor "^0.1.0" - -define-property@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/define-property/-/define-property-1.0.0.tgz#769ebaaf3f4a63aad3af9e8d304c9bbe79bfb0e6" - integrity sha1-dp66rz9KY6rTr56NMEybvnm/sOY= - dependencies: - is-descriptor "^1.0.0" - -define-property@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/define-property/-/define-property-2.0.2.tgz#d459689e8d654ba77e02a817f8710d702cb16e9d" - integrity sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ== - dependencies: - is-descriptor "^1.0.2" - isobject "^3.0.1" - -del@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/del/-/del-3.0.0.tgz#53ecf699ffcbcb39637691ab13baf160819766e5" - integrity sha1-U+z2mf/LyzljdpGrE7rxYIGXZuU= - dependencies: - globby "^6.1.0" - is-path-cwd "^1.0.0" - is-path-in-cwd "^1.0.0" - p-map "^1.1.1" - pify "^3.0.0" - rimraf "^2.2.8" - -delayed-stream@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" - integrity sha1-3zrhmayt+31ECqrgsp4icrJOxhk= - -delegates@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a" - integrity sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o= - -depd@~1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9" - integrity sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak= - -des.js@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/des.js/-/des.js-1.0.0.tgz#c074d2e2aa6a8a9a07dbd61f9a15c2cd83ec8ecc" - integrity sha1-wHTS4qpqipoH29YfmhXCzYPsjsw= - dependencies: - inherits "^2.0.1" - minimalistic-assert "^1.0.0" - -destroy@~1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80" - integrity sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA= - -detect-libc@^1.0.2: - version "1.0.3" - resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b" - integrity sha1-+hN8S9aY7fVc1c0CrFWfkaTEups= - -detect-newline@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-2.1.0.tgz#f41f1c10be4b00e87b5f13da680759f2c5bfd3e2" - integrity sha1-9B8cEL5LAOh7XxPaaAdZ8sW/0+I= - -detect-node@^2.0.4: - version "2.0.4" - resolved "https://registry.yarnpkg.com/detect-node/-/detect-node-2.0.4.tgz#014ee8f8f669c5c58023da64b8179c083a28c46c" - integrity sha512-ZIzRpLJrOj7jjP2miAtgqIfmzbxa4ZOr5jJc601zklsfEx9oTzmmj2nVpIPRpNlRTIh8lc1kyViIY7BWSGNmKw== - -detect-port-alt@1.1.6: - version "1.1.6" - resolved "https://registry.yarnpkg.com/detect-port-alt/-/detect-port-alt-1.1.6.tgz#24707deabe932d4a3cf621302027c2b266568275" - integrity sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q== - dependencies: - address "^1.0.1" - debug "^2.6.0" - -diff-sequences@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-24.9.0.tgz#5715d6244e2aa65f48bba0bc972db0b0b11e95b5" - integrity sha512-Dj6Wk3tWyTE+Fo1rW8v0Xhwk80um6yFYKbuAxc9c3EZxIHFDYwbi34Uk42u1CdnIiVorvt4RmlSDjIPyzGC2ew== - -diffie-hellman@^5.0.0: - version "5.0.3" - resolved "https://registry.yarnpkg.com/diffie-hellman/-/diffie-hellman-5.0.3.tgz#40e8ee98f55a2149607146921c63e1ae5f3d2875" - integrity sha512-kqag/Nl+f3GwyK25fhUMYj81BUOrZ9IuJsjIcDE5icNM9FJHAVm3VcUDxdLPoQtTuUylWm6ZIknYJwwaPxsUzg== - dependencies: - bn.js "^4.1.0" - miller-rabin "^4.0.0" - randombytes "^2.0.0" - -dir-glob@2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-2.0.0.tgz#0b205d2b6aef98238ca286598a8204d29d0a0034" - integrity sha512-37qirFDz8cA5fimp9feo43fSuRo2gHwaIn6dXL8Ber1dGwUosDrGZeCCXq57WnIqE4aQ+u3eQZzsk1yOzhdwag== - dependencies: - arrify "^1.0.1" - path-type "^3.0.0" - -dns-equal@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/dns-equal/-/dns-equal-1.0.0.tgz#b39e7f1da6eb0a75ba9c17324b34753c47e0654d" - integrity sha1-s55/HabrCnW6nBcySzR1PEfgZU0= - -dns-packet@^1.3.1: - version "1.3.1" - resolved "https://registry.yarnpkg.com/dns-packet/-/dns-packet-1.3.1.tgz#12aa426981075be500b910eedcd0b47dd7deda5a" - integrity sha512-0UxfQkMhYAUaZI+xrNZOz/as5KgDU0M/fQ9b6SpkyLbk3GEswDi6PADJVaYJradtRVsRIlF1zLyOodbcTCDzUg== - dependencies: - ip "^1.1.0" - safe-buffer "^5.0.1" - -dns-txt@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/dns-txt/-/dns-txt-2.0.2.tgz#b91d806f5d27188e4ab3e7d107d881a1cc4642b6" - integrity sha1-uR2Ab10nGI5Ks+fRB9iBocxGQrY= - dependencies: - buffer-indexof "^1.0.0" - -doctrine@1.5.0: - version "1.5.0" - resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-1.5.0.tgz#379dce730f6166f76cefa4e6707a159b02c5a6fa" - integrity sha1-N53Ocw9hZvds76TmcHoVmwLFpvo= - dependencies: - esutils "^2.0.2" - isarray "^1.0.0" - -doctrine@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-2.1.0.tgz#5cd01fc101621b42c4cd7f5d1a66243716d3f39d" - integrity sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw== - dependencies: - esutils "^2.0.2" - -doctrine@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-3.0.0.tgz#addebead72a6574db783639dc87a121773973961" - integrity sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w== - dependencies: - esutils "^2.0.2" - -dom-align@^1.7.0: - version "1.10.2" - resolved "https://registry.yarnpkg.com/dom-align/-/dom-align-1.10.2.tgz#540ea1c9e20462bd11b9fc28c561dc8351ece4c6" - integrity sha512-AYZUzLepy05E9bCY4ExoqHrrIlM49PEak9oF93JEFoibqKL0F7w5DLM70/rosLOawerWZ3MlepQcl+EmHskOyw== - -dom-closest@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/dom-closest/-/dom-closest-0.2.0.tgz#ebd9f91d1bf22e8d6f477876bbcd3ec90216c0cf" - integrity sha1-69n5HRvyLo1vR3h2u80+yQIWwM8= - dependencies: - dom-matches ">=1.0.1" - -dom-converter@^0.2: - version "0.2.0" - resolved "https://registry.yarnpkg.com/dom-converter/-/dom-converter-0.2.0.tgz#6721a9daee2e293682955b6afe416771627bb768" - integrity sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA== - dependencies: - utila "~0.4" - -dom-matches@>=1.0.1: - version "2.0.0" - resolved "https://registry.yarnpkg.com/dom-matches/-/dom-matches-2.0.0.tgz#d2728b416a87533980eb089b848d253cf23a758c" - integrity sha1-0nKLQWqHUzmA6wibhI0lPPI6dYw= - -dom-scroll-into-view@1.x, dom-scroll-into-view@^1.2.0, dom-scroll-into-view@^1.2.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/dom-scroll-into-view/-/dom-scroll-into-view-1.2.1.tgz#e8f36732dd089b0201a88d7815dc3f88e6d66c7e" - integrity sha1-6PNnMt0ImwIBqI14Fdw/iObWbH4= - -dom-serializer@0: - version "0.2.1" - resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.2.1.tgz#13650c850daffea35d8b626a4cfc4d3a17643fdb" - integrity sha512-sK3ujri04WyjwQXVoK4PU3y8ula1stq10GJZpqHIUgoGZdsGzAGu65BnU3d08aTVSvO7mGPZUc0wTEDL+qGE0Q== - dependencies: - domelementtype "^2.0.1" - entities "^2.0.0" - -domain-browser@^1.1.1: - version "1.2.0" - resolved "https://registry.yarnpkg.com/domain-browser/-/domain-browser-1.2.0.tgz#3d31f50191a6749dd1375a7f522e823d42e54eda" - integrity sha512-jnjyiM6eRyZl2H+W8Q/zLMA481hzi0eszAaBUzIVnmYVDBbnLxVNnfu1HgEBvCbL+71FrxMl3E6lpKH7Ge3OXA== - -domelementtype@1, domelementtype@^1.3.1: - version "1.3.1" - resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.1.tgz#d048c44b37b0d10a7f2a3d5fee3f4333d790481f" - integrity sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w== - -domelementtype@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-2.0.1.tgz#1f8bdfe91f5a78063274e803b4bdcedf6e94f94d" - integrity sha512-5HOHUDsYZWV8FGWN0Njbr/Rn7f/eWSQi1v7+HsUVwXgn8nWWlL64zKDkS0n8ZmQ3mlWOMuXOnR+7Nx/5tMO5AQ== - -domexception@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/domexception/-/domexception-1.0.1.tgz#937442644ca6a31261ef36e3ec677fe805582c90" - integrity sha512-raigMkn7CJNNo6Ihro1fzG7wr3fHuYVytzquZKX5n0yizGsTcYgzdIUwj1X9pK0VvjeihV+XiclP+DjwbsSKug== - dependencies: - webidl-conversions "^4.0.2" - -domhandler@^2.3.0: - version "2.4.2" - resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.4.2.tgz#8805097e933d65e85546f726d60f5eb88b44f803" - integrity sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA== - dependencies: - domelementtype "1" - -domutils@1.5.1: - version "1.5.1" - resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf" - integrity sha1-3NhIiib1Y9YQeeSMn3t+Mjc2gs8= - dependencies: - dom-serializer "0" - domelementtype "1" - -domutils@^1.5.1, domutils@^1.7.0: - version "1.7.0" - resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.7.0.tgz#56ea341e834e06e6748af7a1cb25da67ea9f8c2a" - integrity sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg== - dependencies: - dom-serializer "0" - domelementtype "1" - -dot-prop@^4.1.1: - version "4.2.0" - resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-4.2.0.tgz#1f19e0c2e1aa0e32797c49799f2837ac6af69c57" - integrity sha512-tUMXrxlExSW6U2EXiiKGSBVdYgtV8qlHL+C10TsW4PURY/ic+eaysnSkwB4kA/mBlCyy/IKDJ+Lc3wbWeaXtuQ== - dependencies: - is-obj "^1.0.0" - -dotenv-expand@4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/dotenv-expand/-/dotenv-expand-4.2.0.tgz#def1f1ca5d6059d24a766e587942c21106ce1275" - integrity sha1-3vHxyl1gWdJKdm5YeULCEQbOEnU= - -dotenv@6.2.0, dotenv@^6.2.0: - version "6.2.0" - resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-6.2.0.tgz#941c0410535d942c8becf28d3f357dbd9d476064" - integrity sha512-HygQCKUBSFl8wKQZBSemMywRWcEDNidvNbjGVyZu3nbZ8qq9ubiPoGLMdRDpfSrpkkm9BXYFkpKxxFX38o/76w== - -draft-js@^0.10.0, draft-js@~0.10.0: - version "0.10.5" - resolved "https://registry.yarnpkg.com/draft-js/-/draft-js-0.10.5.tgz#bfa9beb018fe0533dbb08d6675c371a6b08fa742" - integrity sha512-LE6jSCV9nkPhfVX2ggcRLA4FKs6zWq9ceuO/88BpXdNCS7mjRTgs0NsV6piUCJX9YxMsB9An33wnkMmU2sD2Zg== - dependencies: - fbjs "^0.8.15" - immutable "~3.7.4" - object-assign "^4.1.0" - -duplexer@^0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/duplexer/-/duplexer-0.1.1.tgz#ace6ff808c1ce66b57d1ebf97977acb02334cfc1" - integrity sha1-rOb/gIwc5mtX0ev5eXessCM0z8E= - -duplexify@^3.4.2, duplexify@^3.6.0: - version "3.7.1" - resolved "https://registry.yarnpkg.com/duplexify/-/duplexify-3.7.1.tgz#2a4df5317f6ccfd91f86d6fd25d8d8a103b88309" - integrity sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g== - dependencies: - end-of-stream "^1.0.0" - inherits "^2.0.1" - readable-stream "^2.0.0" - stream-shift "^1.0.0" - -ecc-jsbn@~0.1.1: - version "0.1.2" - resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" - integrity sha1-OoOpBOVDUyh4dMVkt1SThoSamMk= - dependencies: - jsbn "~0.1.0" - safer-buffer "^2.1.0" - -ee-first@1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" - integrity sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0= - -electron-to-chromium@^1.3.191: - version "1.3.243" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.243.tgz#32f64f00fa121532d1d49f5c0a15fd77f52ae889" - integrity sha512-+edFdHGxLSmAKftXa5xZIg19rHkkJLiW+tRu0VMVG3RKztyeKX7d3pXf707lS6+BxB9uBun3RShbxCI1PtBAgQ== - -elliptic@^6.0.0: - version "6.5.0" - resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.0.tgz#2b8ed4c891b7de3200e14412a5b8248c7af505ca" - integrity sha512-eFOJTMyCYb7xtE/caJ6JJu+bhi67WCYNbkGSknu20pmM8Ke/bqOfdnZWxyoGN26JgfxTbXrsCkEw4KheCT/KGg== - dependencies: - bn.js "^4.4.0" - brorand "^1.0.1" - hash.js "^1.0.0" - hmac-drbg "^1.0.0" - inherits "^2.0.1" - minimalistic-assert "^1.0.0" - minimalistic-crypto-utils "^1.0.0" - -emoji-regex@^7.0.1, emoji-regex@^7.0.2: - version "7.0.3" - resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-7.0.3.tgz#933a04052860c85e83c122479c4748a8e4c72156" - integrity sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA== - -emojis-list@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/emojis-list/-/emojis-list-2.1.0.tgz#4daa4d9db00f9819880c79fa457ae5b09a1fd389" - integrity sha1-TapNnbAPmBmIDHn6RXrlsJof04k= - -encodeurl@~1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" - integrity sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k= - -encoding@^0.1.11: - version "0.1.12" - resolved "https://registry.yarnpkg.com/encoding/-/encoding-0.1.12.tgz#538b66f3ee62cd1ab51ec323829d1f9480c74beb" - integrity sha1-U4tm8+5izRq1HsMjgp0flIDHS+s= - dependencies: - iconv-lite "~0.4.13" - -end-of-stream@^1.0.0, end-of-stream@^1.1.0: - version "1.4.1" - resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.1.tgz#ed29634d19baba463b6ce6b80a37213eab71ec43" - integrity sha512-1MkrZNvWTKCaigbn+W15elq2BB/L22nqrSY5DKlo3X6+vclJm8Bb5djXJBmEX6fS3+zCh/F4VBK5Z2KxJt4s2Q== - dependencies: - once "^1.4.0" - -enhanced-resolve@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-4.1.0.tgz#41c7e0bfdfe74ac1ffe1e57ad6a5c6c9f3742a7f" - integrity sha512-F/7vkyTtyc/llOIn8oWclcB25KdRaiPBpZYDgJHgh/UHtpgT2p2eldQgtQnLtUvfMKPKxbRaQM/hHkvLHt1Vng== - dependencies: - graceful-fs "^4.1.2" - memory-fs "^0.4.0" - tapable "^1.0.0" - -enquire.js@^2.1.6: - version "2.1.6" - resolved "https://registry.yarnpkg.com/enquire.js/-/enquire.js-2.1.6.tgz#3e8780c9b8b835084c3f60e166dbc3c2a3c89814" - integrity sha1-PoeAybi4NQhMP2DhZtvDwqPImBQ= - -entities@^1.1.1: - version "1.1.2" - resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.2.tgz#bdfa735299664dfafd34529ed4f8522a275fea56" - integrity sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w== - -entities@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/entities/-/entities-2.0.0.tgz#68d6084cab1b079767540d80e56a39b423e4abf4" - integrity sha512-D9f7V0JSRwIxlRI2mjMqufDrRDnx8p+eEOz7aUM9SuvF8gsBzra0/6tbjl1m8eQHrZlYj6PxqE00hZ1SAIKPLw== - -errno@^0.1.1, errno@^0.1.3, errno@~0.1.7: - version "0.1.7" - resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.7.tgz#4684d71779ad39af177e3f007996f7c67c852618" - integrity sha512-MfrRBDWzIWifgq6tJj60gkAwtLNb6sQPlcFrSOflcP1aFmmruKQ2wRnze/8V6kgyz7H3FF8Npzv78mZ7XLLflg== - dependencies: - prr "~1.0.1" - -error-ex@^1.2.0, error-ex@^1.3.1: - version "1.3.2" - resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" - integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== - dependencies: - is-arrayish "^0.2.1" - -es-abstract@^1.11.0, es-abstract@^1.12.0, es-abstract@^1.5.1, es-abstract@^1.7.0: - version "1.13.0" - resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.13.0.tgz#ac86145fdd5099d8dd49558ccba2eaf9b88e24e9" - integrity sha512-vDZfg/ykNxQVwup/8E1BZhVzFfBxs9NqMzGcvIJrqg5k2/5Za2bWo40dK2J1pgLngZ7c+Shh8lwYtLGyrwPutg== - dependencies: - es-to-primitive "^1.2.0" - function-bind "^1.1.1" - has "^1.0.3" - is-callable "^1.1.4" - is-regex "^1.0.4" - object-keys "^1.0.12" - -es-to-primitive@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.0.tgz#edf72478033456e8dda8ef09e00ad9650707f377" - integrity sha512-qZryBOJjV//LaxLTV6UC//WewneB3LcXOL9NP++ozKVXsIIIpm/2c13UDiD9Jp2eThsecw9m3jPqDwTyobcdbg== - dependencies: - is-callable "^1.1.4" - is-date-object "^1.0.1" - is-symbol "^1.0.2" - -escape-html@~1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" - integrity sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg= - -escape-string-regexp@1.0.5, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" - integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ= - -escodegen@^1.11.0, escodegen@^1.9.1: - version "1.12.0" - resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-1.12.0.tgz#f763daf840af172bb3a2b6dd7219c0e17f7ff541" - integrity sha512-TuA+EhsanGcme5T3R0L80u4t8CpbXQjegRmf7+FPTJrtCTErXFeelblRgHQa1FofEzqYYJmJ/OqjTwREp9qgmg== - dependencies: - esprima "^3.1.3" - estraverse "^4.2.0" - esutils "^2.0.2" - optionator "^0.8.1" - optionalDependencies: - source-map "~0.6.1" - -eslint-config-react-app@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/eslint-config-react-app/-/eslint-config-react-app-4.0.1.tgz#23fd0fd7ea89442ef1e733f66a7207674b23c8db" - integrity sha512-ZsaoXUIGsK8FCi/x4lT2bZR5mMkL/Kgj+Lnw690rbvvUr/uiwgFiD8FcfAhkCycm7Xte6O5lYz4EqMx2vX7jgw== - dependencies: - confusing-browser-globals "^1.0.7" - -eslint-import-resolver-node@^0.3.2: - version "0.3.2" - resolved "https://registry.yarnpkg.com/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.2.tgz#58f15fb839b8d0576ca980413476aab2472db66a" - integrity sha512-sfmTqJfPSizWu4aymbPr4Iidp5yKm8yDkHp+Ir3YiTHiiDfxh69mOUsmiqW6RZ9zRXFaF64GtYmN7e+8GHBv6Q== - dependencies: - debug "^2.6.9" - resolve "^1.5.0" - -eslint-loader@2.1.2: - version "2.1.2" - resolved "https://registry.yarnpkg.com/eslint-loader/-/eslint-loader-2.1.2.tgz#453542a1230d6ffac90e4e7cb9cadba9d851be68" - integrity sha512-rA9XiXEOilLYPOIInvVH5S/hYfyTPyxag6DZhoQOduM+3TkghAEQ3VcFO8VnX4J4qg/UIBzp72aOf/xvYmpmsg== - dependencies: - loader-fs-cache "^1.0.0" - loader-utils "^1.0.2" - object-assign "^4.0.1" - object-hash "^1.1.4" - rimraf "^2.6.1" - -eslint-module-utils@^2.3.0: - version "2.4.1" - resolved "https://registry.yarnpkg.com/eslint-module-utils/-/eslint-module-utils-2.4.1.tgz#7b4675875bf96b0dbf1b21977456e5bb1f5e018c" - integrity sha512-H6DOj+ejw7Tesdgbfs4jeS4YMFrT8uI8xwd1gtQqXssaR0EQ26L+2O/w6wkYFy2MymON0fTwHmXBvvfLNZVZEw== - dependencies: - debug "^2.6.8" - pkg-dir "^2.0.0" - -eslint-plugin-flowtype@2.50.1: - version "2.50.1" - resolved "https://registry.yarnpkg.com/eslint-plugin-flowtype/-/eslint-plugin-flowtype-2.50.1.tgz#36d4c961ac8b9e9e1dc091d3fba0537dad34ae8a" - integrity sha512-9kRxF9hfM/O6WGZcZPszOVPd2W0TLHBtceulLTsGfwMPtiCCLnCW0ssRiOOiXyqrCA20pm1iXdXm7gQeN306zQ== - dependencies: - lodash "^4.17.10" - -eslint-plugin-import@2.16.0: - version "2.16.0" - resolved "https://registry.yarnpkg.com/eslint-plugin-import/-/eslint-plugin-import-2.16.0.tgz#97ac3e75d0791c4fac0e15ef388510217be7f66f" - integrity sha512-z6oqWlf1x5GkHIFgrSvtmudnqM6Q60KM4KvpWi5ubonMjycLjndvd5+8VAZIsTlHC03djdgJuyKG6XO577px6A== - dependencies: - contains-path "^0.1.0" - debug "^2.6.9" - doctrine "1.5.0" - eslint-import-resolver-node "^0.3.2" - eslint-module-utils "^2.3.0" - has "^1.0.3" - lodash "^4.17.11" - minimatch "^3.0.4" - read-pkg-up "^2.0.0" - resolve "^1.9.0" - -eslint-plugin-jsx-a11y@6.2.1: - version "6.2.1" - resolved "https://registry.yarnpkg.com/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.2.1.tgz#4ebba9f339b600ff415ae4166e3e2e008831cf0c" - integrity sha512-cjN2ObWrRz0TTw7vEcGQrx+YltMvZoOEx4hWU8eEERDnBIU00OTq7Vr+jA7DFKxiwLNv4tTh5Pq2GUNEa8b6+w== - dependencies: - aria-query "^3.0.0" - array-includes "^3.0.3" - ast-types-flow "^0.0.7" - axobject-query "^2.0.2" - damerau-levenshtein "^1.0.4" - emoji-regex "^7.0.2" - has "^1.0.3" - jsx-ast-utils "^2.0.1" - -eslint-plugin-react-hooks@^1.5.0: - version "1.7.0" - resolved "https://registry.yarnpkg.com/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-1.7.0.tgz#6210b6d5a37205f0b92858f895a4e827020a7d04" - integrity sha512-iXTCFcOmlWvw4+TOE8CLWj6yX1GwzT0Y6cUfHHZqWnSk144VmVIRcVGtUAzrLES7C798lmvnt02C7rxaOX1HNA== - -eslint-plugin-react@7.12.4: - version "7.12.4" - resolved "https://registry.yarnpkg.com/eslint-plugin-react/-/eslint-plugin-react-7.12.4.tgz#b1ecf26479d61aee650da612e425c53a99f48c8c" - integrity sha512-1puHJkXJY+oS1t467MjbqjvX53uQ05HXwjqDgdbGBqf5j9eeydI54G3KwiJmWciQ0HTBacIKw2jgwSBSH3yfgQ== - dependencies: - array-includes "^3.0.3" - doctrine "^2.1.0" - has "^1.0.3" - jsx-ast-utils "^2.0.1" - object.fromentries "^2.0.0" - prop-types "^15.6.2" - resolve "^1.9.0" - -eslint-scope@3.7.1: - version "3.7.1" - resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-3.7.1.tgz#3d63c3edfda02e06e01a452ad88caacc7cdcb6e8" - integrity sha1-PWPD7f2gLgbgGkUq2IyqzHzctug= - dependencies: - esrecurse "^4.1.0" - estraverse "^4.1.1" - -eslint-scope@^4.0.0, eslint-scope@^4.0.3: - version "4.0.3" - resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-4.0.3.tgz#ca03833310f6889a3264781aa82e63eb9cfe7848" - integrity sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg== - dependencies: - esrecurse "^4.1.0" - estraverse "^4.1.1" - -eslint-utils@^1.3.1: - version "1.4.2" - resolved "https://registry.yarnpkg.com/eslint-utils/-/eslint-utils-1.4.2.tgz#166a5180ef6ab7eb462f162fd0e6f2463d7309ab" - integrity sha512-eAZS2sEUMlIeCjBeubdj45dmBHQwPHWyBcT1VSYB7o9x9WRRqKxyUoiXlRjyAwzN7YEzHJlYg0NmzDRWx6GP4Q== - dependencies: - eslint-visitor-keys "^1.0.0" - -eslint-visitor-keys@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz#e2a82cea84ff246ad6fb57f9bde5b46621459ec2" - integrity sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A== - -eslint@^5.16.0: - version "5.16.0" - resolved "https://registry.yarnpkg.com/eslint/-/eslint-5.16.0.tgz#a1e3ac1aae4a3fbd8296fcf8f7ab7314cbb6abea" - integrity sha512-S3Rz11i7c8AA5JPv7xAH+dOyq/Cu/VXHiHXBPOU1k/JAM5dXqQPt3qcrhpHSorXmrpu2g0gkIBVXAqCpzfoZIg== - dependencies: - "@babel/code-frame" "^7.0.0" - ajv "^6.9.1" - chalk "^2.1.0" - cross-spawn "^6.0.5" - debug "^4.0.1" - doctrine "^3.0.0" - eslint-scope "^4.0.3" - eslint-utils "^1.3.1" - eslint-visitor-keys "^1.0.0" - espree "^5.0.1" - esquery "^1.0.1" - esutils "^2.0.2" - file-entry-cache "^5.0.1" - functional-red-black-tree "^1.0.1" - glob "^7.1.2" - globals "^11.7.0" - ignore "^4.0.6" - import-fresh "^3.0.0" - imurmurhash "^0.1.4" - inquirer "^6.2.2" - js-yaml "^3.13.0" - json-stable-stringify-without-jsonify "^1.0.1" - levn "^0.3.0" - lodash "^4.17.11" - minimatch "^3.0.4" - mkdirp "^0.5.1" - natural-compare "^1.4.0" - optionator "^0.8.2" - path-is-inside "^1.0.2" - progress "^2.0.0" - regexpp "^2.0.1" - semver "^5.5.1" - strip-ansi "^4.0.0" - strip-json-comments "^2.0.1" - table "^5.2.3" - text-table "^0.2.0" - -espree@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/espree/-/espree-5.0.1.tgz#5d6526fa4fc7f0788a5cf75b15f30323e2f81f7a" - integrity sha512-qWAZcWh4XE/RwzLJejfcofscgMc9CamR6Tn1+XRXNzrvUSSbiAjGOI/fggztjIi7y9VLPqnICMIPiGyr8JaZ0A== - dependencies: - acorn "^6.0.7" - acorn-jsx "^5.0.0" - eslint-visitor-keys "^1.0.0" - -esprima@^3.1.3: - version "3.1.3" - resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633" - integrity sha1-/cpRzuYTOJXjyI1TXOSdv/YqRjM= - -esprima@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" - integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== - -esquery@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.0.1.tgz#406c51658b1f5991a5f9b62b1dc25b00e3e5c708" - integrity sha512-SmiyZ5zIWH9VM+SRUReLS5Q8a7GxtRdxEBVZpm98rJM7Sb+A9DVCndXfkeFUd3byderg+EbDkfnevfCwynWaNA== - dependencies: - estraverse "^4.0.0" - -esrecurse@^4.1.0: - version "4.2.1" - resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.2.1.tgz#007a3b9fdbc2b3bb87e4879ea19c92fdbd3942cf" - integrity sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ== - dependencies: - estraverse "^4.1.0" - -estraverse@^4.0.0, estraverse@^4.1.0, estraverse@^4.1.1, estraverse@^4.2.0: - version "4.3.0" - resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d" - integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw== - -esutils@^2.0.0, esutils@^2.0.2: - version "2.0.3" - resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" - integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== - -etag@~1.8.1: - version "1.8.1" - resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" - integrity sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc= - -eventemitter3@^3.0.0: - version "3.1.2" - resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-3.1.2.tgz#2d3d48f9c346698fce83a85d7d664e98535df6e7" - integrity sha512-tvtQIeLVHjDkJYnzf2dgVMxfuSGJeM/7UCG17TT4EumTfNtF+0nebF/4zWOIkCreAbtNqhGEboB6BWrwqNaw4Q== - -eventlistener@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/eventlistener/-/eventlistener-0.0.1.tgz#ed2baabb852227af2bcf889152c72c63ca532eb8" - integrity sha1-7Suqu4UiJ68rz4iRUscsY8pTLrg= - -events@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/events/-/events-3.0.0.tgz#9a0a0dfaf62893d92b875b8f2698ca4114973e88" - integrity sha512-Dc381HFWJzEOhQ+d8pkNon++bk9h6cdAoAj4iE6Q4y6xgTzySWXlKn05/TVNpjnfRqi/X0EpJEJohPjNI3zpVA== - -eventsource@^1.0.7: - version "1.0.7" - resolved "https://registry.yarnpkg.com/eventsource/-/eventsource-1.0.7.tgz#8fbc72c93fcd34088090bc0a4e64f4b5cee6d8d0" - integrity sha512-4Ln17+vVT0k8aWq+t/bF5arcS3EpT9gYtW66EPacdj/mAFevznsnyoHLPy2BA8gbIQeIHoPsvwmfBftfcG//BQ== - dependencies: - original "^1.0.0" - -evp_bytestokey@^1.0.0, evp_bytestokey@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz#7fcbdb198dc71959432efe13842684e0525acb02" - integrity sha512-/f2Go4TognH/KvCISP7OUsHn85hT9nUkxxA9BEWxFn+Oj9o8ZNLm/40hdlgSLyuOimsrTKLUMEorQexp/aPQeA== - dependencies: - md5.js "^1.3.4" - safe-buffer "^5.1.1" - -exec-sh@^0.3.2: - version "0.3.2" - resolved "https://registry.yarnpkg.com/exec-sh/-/exec-sh-0.3.2.tgz#6738de2eb7c8e671d0366aea0b0db8c6f7d7391b" - integrity sha512-9sLAvzhI5nc8TpuQUh4ahMdCrWT00wPWz7j47/emR5+2qEfoZP5zzUXvx+vdx+H6ohhnsYC31iX04QLYJK8zTg== - -execa@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/execa/-/execa-1.0.0.tgz#c6236a5bb4df6d6f15e88e7f017798216749ddd8" - integrity sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA== - dependencies: - cross-spawn "^6.0.0" - get-stream "^4.0.0" - is-stream "^1.1.0" - npm-run-path "^2.0.0" - p-finally "^1.0.0" - signal-exit "^3.0.0" - strip-eof "^1.0.0" - -exit@^0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c" - integrity sha1-BjJjj42HfMghB9MKD/8aF8uhzQw= - -expand-brackets@^2.1.4: - version "2.1.4" - resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-2.1.4.tgz#b77735e315ce30f6b6eff0f83b04151a22449622" - integrity sha1-t3c14xXOMPa27/D4OwQVGiJEliI= - dependencies: - debug "^2.3.3" - define-property "^0.2.5" - extend-shallow "^2.0.1" - posix-character-classes "^0.1.0" - regex-not "^1.0.0" - snapdragon "^0.8.1" - to-regex "^3.0.1" - -expect@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/expect/-/expect-24.9.0.tgz#b75165b4817074fa4a157794f46fe9f1ba15b6ca" - integrity sha512-wvVAx8XIol3Z5m9zvZXiyZOQ+sRJqNTIm6sGjdWlaZIeupQGO3WbYI+15D/AmEwZywL6wtJkbAbJtzkOfBuR0Q== - dependencies: - "@jest/types" "^24.9.0" - ansi-styles "^3.2.0" - jest-get-type "^24.9.0" - jest-matcher-utils "^24.9.0" - jest-message-util "^24.9.0" - jest-regex-util "^24.9.0" - -express@^4.16.2: - version "4.17.1" - resolved "https://registry.yarnpkg.com/express/-/express-4.17.1.tgz#4491fc38605cf51f8629d39c2b5d026f98a4c134" - integrity sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g== - dependencies: - accepts "~1.3.7" - array-flatten "1.1.1" - body-parser "1.19.0" - content-disposition "0.5.3" - content-type "~1.0.4" - cookie "0.4.0" - cookie-signature "1.0.6" - debug "2.6.9" - depd "~1.1.2" - encodeurl "~1.0.2" - escape-html "~1.0.3" - etag "~1.8.1" - finalhandler "~1.1.2" - fresh "0.5.2" - merge-descriptors "1.0.1" - methods "~1.1.2" - on-finished "~2.3.0" - parseurl "~1.3.3" - path-to-regexp "0.1.7" - proxy-addr "~2.0.5" - qs "6.7.0" - range-parser "~1.2.1" - safe-buffer "5.1.2" - send "0.17.1" - serve-static "1.14.1" - setprototypeof "1.1.1" - statuses "~1.5.0" - type-is "~1.6.18" - utils-merge "1.0.1" - vary "~1.1.2" - -extend-shallow@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-2.0.1.tgz#51af7d614ad9a9f610ea1bafbb989d6b1c56890f" - integrity sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8= - dependencies: - is-extendable "^0.1.0" - -extend-shallow@^3.0.0, extend-shallow@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/extend-shallow/-/extend-shallow-3.0.2.tgz#26a71aaf073b39fb2127172746131c2704028db8" - integrity sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg= - dependencies: - assign-symbols "^1.0.0" - is-extendable "^1.0.1" - -extend@~3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" - integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g== - -external-editor@^3.0.3: - version "3.1.0" - resolved "https://registry.yarnpkg.com/external-editor/-/external-editor-3.1.0.tgz#cb03f740befae03ea4d283caed2741a83f335495" - integrity sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew== - dependencies: - chardet "^0.7.0" - iconv-lite "^0.4.24" - tmp "^0.0.33" - -extglob@^2.0.4: - version "2.0.4" - resolved "https://registry.yarnpkg.com/extglob/-/extglob-2.0.4.tgz#ad00fe4dc612a9232e8718711dc5cb5ab0285543" - integrity sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw== - dependencies: - array-unique "^0.3.2" - define-property "^1.0.0" - expand-brackets "^2.1.4" - extend-shallow "^2.0.1" - fragment-cache "^0.2.1" - regex-not "^1.0.0" - snapdragon "^0.8.1" - to-regex "^3.0.1" - -extsprintf@1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" - integrity sha1-lpGEQOMEGnpBT4xS48V06zw+HgU= - -extsprintf@^1.2.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.0.tgz#e2689f8f356fad62cca65a3a91c5df5f9551692f" - integrity sha1-4mifjzVvrWLMplo6kcXfX5VRaS8= - -fast-deep-equal@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-2.0.1.tgz#7b05218ddf9667bf7f370bf7fdb2cb15fdd0aa49" - integrity sha1-ewUhjd+WZ79/Nwv3/bLLFf3Qqkk= - -fast-glob@^2.0.2: - version "2.2.7" - resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-2.2.7.tgz#6953857c3afa475fff92ee6015d52da70a4cd39d" - integrity sha512-g1KuQwHOZAmOZMuBtHdxDtju+T2RT8jgCC9aANsbpdiDDTSnjgfuVsIBNKbUeJI3oKMRExcfNDtJl4OhbffMsw== - dependencies: - "@mrmlnc/readdir-enhanced" "^2.2.1" - "@nodelib/fs.stat" "^1.1.2" - glob-parent "^3.1.0" - is-glob "^4.0.0" - merge2 "^1.2.3" - micromatch "^3.1.10" - -fast-json-stable-stringify@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz#d5142c0caee6b1189f87d3a76111064f86c8bbf2" - integrity sha1-1RQsDK7msRifh9OnYREGT4bIu/I= - -fast-levenshtein@~2.0.4: - version "2.0.6" - resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" - integrity sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc= - -faye-websocket@^0.10.0: - version "0.10.0" - resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4" - integrity sha1-TkkvjQTftviQA1B/btvy1QHnxvQ= - dependencies: - websocket-driver ">=0.5.1" - -faye-websocket@~0.11.1: - version "0.11.3" - resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.11.3.tgz#5c0e9a8968e8912c286639fde977a8b209f2508e" - integrity sha512-D2y4bovYpzziGgbHYtGCMjlJM36vAl/y+xUyn1C+FVx8szd1E+86KwVw6XvYSzOP8iMpm1X0I4xJD+QtUb36OA== - dependencies: - websocket-driver ">=0.5.1" - -fb-watchman@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-2.0.0.tgz#54e9abf7dfa2f26cd9b1636c588c1afc05de5d58" - integrity sha1-VOmr99+i8mzZsWNsWIwa/AXeXVg= - dependencies: - bser "^2.0.0" - -fbjs@^0.8.15, fbjs@^0.8.16, fbjs@^0.8.9: - version "0.8.17" - resolved "https://registry.yarnpkg.com/fbjs/-/fbjs-0.8.17.tgz#c4d598ead6949112653d6588b01a5cdcd9f90fdd" - integrity sha1-xNWY6taUkRJlPWWIsBpc3Nn5D90= - dependencies: - core-js "^1.0.0" - isomorphic-fetch "^2.1.1" - loose-envify "^1.0.0" - object-assign "^4.1.0" - promise "^7.1.1" - setimmediate "^1.0.5" - ua-parser-js "^0.7.18" - -figgy-pudding@^3.5.1: - version "3.5.1" - resolved "https://registry.yarnpkg.com/figgy-pudding/-/figgy-pudding-3.5.1.tgz#862470112901c727a0e495a80744bd5baa1d6790" - integrity sha512-vNKxJHTEKNThjfrdJwHc7brvM6eVevuO5nTj6ez8ZQ1qbXTvGthucRF7S4vf2cr71QVnT70V34v0S1DyQsti0w== - -figures@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/figures/-/figures-2.0.0.tgz#3ab1a2d2a62c8bfb431a0c94cb797a2fce27c962" - integrity sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI= - dependencies: - escape-string-regexp "^1.0.5" - -file-entry-cache@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-5.0.1.tgz#ca0f6efa6dd3d561333fb14515065c2fafdf439c" - integrity sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g== - dependencies: - flat-cache "^2.0.1" - -file-loader@3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/file-loader/-/file-loader-3.0.1.tgz#f8e0ba0b599918b51adfe45d66d1e771ad560faa" - integrity sha512-4sNIOXgtH/9WZq4NvlfU3Opn5ynUsqBwSLyM+I7UOwdGigTBYfVVQEwe/msZNX/j4pCJTIM14Fsw66Svo1oVrw== - dependencies: - loader-utils "^1.0.2" - schema-utils "^1.0.0" - -filesize@3.6.1: - version "3.6.1" - resolved "https://registry.yarnpkg.com/filesize/-/filesize-3.6.1.tgz#090bb3ee01b6f801a8a8be99d31710b3422bb317" - integrity sha512-7KjR1vv6qnicaPMi1iiTcI85CyYwRO/PSFCu6SvqL8jN2Wjt/NIYQTFtFs7fSDCYOstUkEWIQGFUg5YZQfjlcg== - -fill-range@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-4.0.0.tgz#d544811d428f98eb06a63dc402d2403c328c38f7" - integrity sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc= - dependencies: - extend-shallow "^2.0.1" - is-number "^3.0.0" - repeat-string "^1.6.1" - to-regex-range "^2.1.0" - -finalhandler@~1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.1.2.tgz#b7e7d000ffd11938d0fdb053506f6ebabe9f587d" - integrity sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA== - dependencies: - debug "2.6.9" - encodeurl "~1.0.2" - escape-html "~1.0.3" - on-finished "~2.3.0" - parseurl "~1.3.3" - statuses "~1.5.0" - unpipe "~1.0.0" - -find-cache-dir@^0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-0.1.1.tgz#c8defae57c8a52a8a784f9e31c57c742e993a0b9" - integrity sha1-yN765XyKUqinhPnjHFfHQumToLk= - dependencies: - commondir "^1.0.1" - mkdirp "^0.5.1" - pkg-dir "^1.0.0" - -find-cache-dir@^2.0.0, find-cache-dir@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-2.1.0.tgz#8d0f94cd13fe43c6c7c261a0d86115ca918c05f7" - integrity sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ== - dependencies: - commondir "^1.0.1" - make-dir "^2.0.0" - pkg-dir "^3.0.0" - -find-up@3.0.0, find-up@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/find-up/-/find-up-3.0.0.tgz#49169f1d7993430646da61ecc5ae355c21c97b73" - integrity sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg== - dependencies: - locate-path "^3.0.0" - -find-up@^1.0.0: - version "1.1.2" - resolved "https://registry.yarnpkg.com/find-up/-/find-up-1.1.2.tgz#6b2e9822b1a2ce0a60ab64d610eccad53cb24d0f" - integrity sha1-ay6YIrGizgpgq2TWEOzK1TyyTQ8= - dependencies: - path-exists "^2.0.0" - pinkie-promise "^2.0.0" - -find-up@^2.0.0, find-up@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/find-up/-/find-up-2.1.0.tgz#45d1b7e506c717ddd482775a2b77920a3c0c57a7" - integrity sha1-RdG35QbHF93UgndaK3eSCjwMV6c= - dependencies: - locate-path "^2.0.0" - -flat-cache@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-2.0.1.tgz#5d296d6f04bda44a4630a301413bdbc2ec085ec0" - integrity sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA== - dependencies: - flatted "^2.0.0" - rimraf "2.6.3" - write "1.0.3" - -flatted@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/flatted/-/flatted-2.0.1.tgz#69e57caa8f0eacbc281d2e2cb458d46fdb449e08" - integrity sha512-a1hQMktqW9Nmqr5aktAux3JMNqaucxGcjtjWnZLHX7yyPCmlSV3M54nGYbqT8K+0GhF3NBgmJCc3ma+WOgX8Jg== - -flatten@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/flatten/-/flatten-1.0.2.tgz#dae46a9d78fbe25292258cc1e780a41d95c03782" - integrity sha1-2uRqnXj74lKSJYzB54CkHZXAN4I= - -flush-write-stream@^1.0.0: - version "1.1.1" - resolved "https://registry.yarnpkg.com/flush-write-stream/-/flush-write-stream-1.1.1.tgz#8dd7d873a1babc207d94ead0c2e0e44276ebf2e8" - integrity sha512-3Z4XhFZ3992uIq0XOqb9AreonueSYphE6oYbpt5+3u06JWklbsPkNv3ZKkP9Bz/r+1MWCaMoSQ28P85+1Yc77w== - dependencies: - inherits "^2.0.3" - readable-stream "^2.3.6" - -follow-redirects@^1.0.0: - version "1.8.1" - resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.8.1.tgz#24804f9eaab67160b0e840c085885d606371a35b" - integrity sha512-micCIbldHioIegeKs41DoH0KS3AXfFzgS30qVkM6z/XOE/GJgvmsoc839NUqa1B9udYe9dQxgv7KFwng6+p/dw== - dependencies: - debug "^3.0.0" - -for-in@^0.1.3: - version "0.1.8" - resolved "https://registry.yarnpkg.com/for-in/-/for-in-0.1.8.tgz#d8773908e31256109952b1fdb9b3fa867d2775e1" - integrity sha1-2Hc5COMSVhCZUrH9ubP6hn0ndeE= - -for-in@^1.0.1, for-in@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80" - integrity sha1-gQaNKVqBQuwKxybG4iAMMPttXoA= - -for-own@^0.1.3: - version "0.1.5" - resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce" - integrity sha1-UmXGgaTylNq78XyVCbZ2OqhFEM4= - dependencies: - for-in "^1.0.1" - -for-own@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/for-own/-/for-own-1.0.0.tgz#c63332f415cedc4b04dbfe70cf836494c53cb44b" - integrity sha1-xjMy9BXO3EsE2/5wz4NklMU8tEs= - dependencies: - for-in "^1.0.1" - -forever-agent@~0.6.1: - version "0.6.1" - resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91" - integrity sha1-+8cfDEGt6zf5bFd60e1C2P2sypE= - -fork-ts-checker-webpack-plugin@1.5.0: - version "1.5.0" - resolved "https://registry.yarnpkg.com/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-1.5.0.tgz#ce1d77190b44d81a761b10b6284a373795e41f0c" - integrity sha512-zEhg7Hz+KhZlBhILYpXy+Beu96gwvkROWJiTXOCyOOMMrdBIRPvsBpBqgTI4jfJGrJXcqGwJR8zsBGDmzY0jsA== - dependencies: - babel-code-frame "^6.22.0" - chalk "^2.4.1" - chokidar "^2.0.4" - micromatch "^3.1.10" - minimatch "^3.0.4" - semver "^5.6.0" - tapable "^1.0.0" - worker-rpc "^0.1.0" - -form-data@~2.3.2: - version "2.3.3" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-2.3.3.tgz#dcce52c05f644f298c6a7ab936bd724ceffbf3a6" - integrity sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ== - dependencies: - asynckit "^0.4.0" - combined-stream "^1.0.6" - mime-types "^2.1.12" - -forwarded@~0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.2.tgz#98c23dab1175657b8c0573e8ceccd91b0ff18c84" - integrity sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ= - -fragment-cache@^0.2.1: - version "0.2.1" - resolved "https://registry.yarnpkg.com/fragment-cache/-/fragment-cache-0.2.1.tgz#4290fad27f13e89be7f33799c6bc5a0abfff0d19" - integrity sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk= - dependencies: - map-cache "^0.2.2" - -fresh@0.5.2: - version "0.5.2" - resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7" - integrity sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac= - -from2@^2.1.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/from2/-/from2-2.3.0.tgz#8bfb5502bde4a4d36cfdeea007fcca21d7e382af" - integrity sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8= - dependencies: - inherits "^2.0.1" - readable-stream "^2.0.0" - -fs-extra@7.0.1, fs-extra@^7.0.0: - version "7.0.1" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-7.0.1.tgz#4f189c44aa123b895f722804f55ea23eadc348e9" - integrity sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw== - dependencies: - graceful-fs "^4.1.2" - jsonfile "^4.0.0" - universalify "^0.1.0" - -fs-extra@^4.0.2: - version "4.0.3" - resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-4.0.3.tgz#0d852122e5bc5beb453fb028e9c0c9bf36340c94" - integrity sha512-q6rbdDd1o2mAnQreO7YADIxf/Whx4AHBiRf6d+/cVT8h44ss+lHgxf1FemcqDnQt9X3ct4McHr+JMGlYSsK7Cg== - dependencies: - graceful-fs "^4.1.2" - jsonfile "^4.0.0" - universalify "^0.1.0" - -fs-minipass@^1.2.5: - version "1.2.6" - resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-1.2.6.tgz#2c5cc30ded81282bfe8a0d7c7c1853ddeb102c07" - integrity sha512-crhvyXcMejjv3Z5d2Fa9sf5xLYVCF5O1c71QxbVnbLsmYMBEvDAftewesN/HhY03YRoA7zOMxjNGrF5svGaaeQ== - dependencies: - minipass "^2.2.1" - -fs-write-stream-atomic@^1.0.8: - version "1.0.10" - resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz#b47df53493ef911df75731e70a9ded0189db40c9" - integrity sha1-tH31NJPvkR33VzHnCp3tAYnbQMk= - dependencies: - graceful-fs "^4.1.2" - iferr "^0.1.5" - imurmurhash "^0.1.4" - readable-stream "1 || 2" - -fs.realpath@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" - integrity sha1-FQStJSMVjKpA20onh8sBQRmU6k8= - -fsevents@2.0.6: - version "2.0.6" - resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.0.6.tgz#87b19df0bfb4a1a51d7ddb51b01b5f3bedb40c33" - integrity sha512-vfmKZp3XPM36DNF0qhW+Cdxk7xm7gTEHY1clv1Xq1arwRQuKZgAhw+NZNWbJBtuaNxzNXwhfdPYRrvIbjfS33A== - -fsevents@^1.2.7: - version "1.2.9" - resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-1.2.9.tgz#3f5ed66583ccd6f400b5a00db6f7e861363e388f" - integrity sha512-oeyj2H3EjjonWcFjD5NvZNE9Rqe4UW+nQBU2HNeKw0koVLEFIhtyETyAakeAM3de7Z/SW5kcA+fZUait9EApnw== - dependencies: - nan "^2.12.1" - node-pre-gyp "^0.12.0" - -function-bind@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" - integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A== - -functional-red-black-tree@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz#1b0ab3bd553b2a0d6399d29c0e3ea0b252078327" - integrity sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc= - -gauge@~2.7.3: - version "2.7.4" - resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" - integrity sha1-LANAXHU4w51+s3sxcCLjJfsBi/c= - dependencies: - aproba "^1.0.3" - console-control-strings "^1.0.0" - has-unicode "^2.0.0" - object-assign "^4.1.0" - signal-exit "^3.0.0" - string-width "^1.0.1" - strip-ansi "^3.0.1" - wide-align "^1.1.0" - -get-caller-file@^1.0.1: - version "1.0.3" - resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-1.0.3.tgz#f978fa4c90d1dfe7ff2d6beda2a515e713bdcf4a" - integrity sha512-3t6rVToeoZfYSGd8YoLFR2DJkiQrIiUrGcjvFX2mDw3bn6k2OtwHN0TNCLbBO+w8qTvimhDkv+LSscbJY1vE6w== - -get-caller-file@^2.0.1: - version "2.0.5" - resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" - integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== - -get-own-enumerable-property-symbols@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.0.tgz#b877b49a5c16aefac3655f2ed2ea5b684df8d203" - integrity sha512-CIJYJC4GGF06TakLg8z4GQKvDsx9EMspVxOYih7LerEL/WosUnFIww45CGfxfeKHqlg3twgUrYRT1O3WQqjGCg== - -get-stream@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-4.1.0.tgz#c1b255575f3dc21d59bfc79cd3d2b46b1c3a54b5" - integrity sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w== - dependencies: - pump "^3.0.0" - -get-value@^2.0.3, get-value@^2.0.6: - version "2.0.6" - resolved "https://registry.yarnpkg.com/get-value/-/get-value-2.0.6.tgz#dc15ca1c672387ca76bd37ac0a395ba2042a2c28" - integrity sha1-3BXKHGcjh8p2vTesCjlbogQqLCg= - -getpass@^0.1.1: - version "0.1.7" - resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.7.tgz#5eff8e3e684d569ae4cb2b1282604e8ba62149fa" - integrity sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo= - dependencies: - assert-plus "^1.0.0" - -glob-parent@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-3.1.0.tgz#9e6af6299d8d3bd2bd40430832bd113df906c5ae" - integrity sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4= - dependencies: - is-glob "^3.1.0" - path-dirname "^1.0.0" - -glob-to-regexp@^0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz#8c5a1494d2066c570cc3bfe4496175acc4d502ab" - integrity sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs= - -glob@^7.0.3, glob@^7.1.1, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4: - version "7.1.4" - resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.4.tgz#aa608a2f6c577ad357e1ae5a5c26d9a8d1969255" - integrity sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A== - dependencies: - fs.realpath "^1.0.0" - inflight "^1.0.4" - inherits "2" - minimatch "^3.0.4" - once "^1.3.0" - path-is-absolute "^1.0.0" - -global-modules@2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/global-modules/-/global-modules-2.0.0.tgz#997605ad2345f27f51539bea26574421215c7780" - integrity sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A== - dependencies: - global-prefix "^3.0.0" - -global-prefix@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/global-prefix/-/global-prefix-3.0.0.tgz#fc85f73064df69f50421f47f883fe5b913ba9b97" - integrity sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg== - dependencies: - ini "^1.3.5" - kind-of "^6.0.2" - which "^1.3.1" - -globals@^11.1.0, globals@^11.7.0: - version "11.12.0" - resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" - integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== - -globby@8.0.2: - version "8.0.2" - resolved "https://registry.yarnpkg.com/globby/-/globby-8.0.2.tgz#5697619ccd95c5275dbb2d6faa42087c1a941d8d" - integrity sha512-yTzMmKygLp8RUpG1Ymu2VXPSJQZjNAZPD4ywgYEaG7e4tBJeUQBO8OpXrf1RCNcEs5alsoJYPAMiIHP0cmeC7w== - dependencies: - array-union "^1.0.1" - dir-glob "2.0.0" - fast-glob "^2.0.2" - glob "^7.1.2" - ignore "^3.3.5" - pify "^3.0.0" - slash "^1.0.0" - -globby@^6.1.0: - version "6.1.0" - resolved "https://registry.yarnpkg.com/globby/-/globby-6.1.0.tgz#f5a6d70e8395e21c858fb0489d64df02424d506c" - integrity sha1-9abXDoOV4hyFj7BInWTfAkJNUGw= - dependencies: - array-union "^1.0.1" - glob "^7.0.3" - object-assign "^4.0.1" - pify "^2.0.0" - pinkie-promise "^2.0.0" - -graceful-fs@^4.1.11, graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.6: - version "4.2.2" - resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.2.tgz#6f0952605d0140c1cfdb138ed005775b92d67b02" - integrity sha512-IItsdsea19BoLC7ELy13q1iJFNmd7ofZH5+X/pJr90/nRoPEX0DJo1dHDbgtYWOhJhcCgMDTOw84RZ72q6lB+Q== - -growly@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/growly/-/growly-1.3.0.tgz#f10748cbe76af964b7c96c93c6bcc28af120c081" - integrity sha1-8QdIy+dq+WS3yWyTxrzCivEgwIE= - -gud@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/gud/-/gud-1.0.0.tgz#a489581b17e6a70beca9abe3ae57de7a499852c0" - integrity sha512-zGEOVKFM5sVPPrYs7J5/hYEw2Pof8KCyOwyhG8sAF26mCAeUFAcYPu1mwB7hhpIP29zOIBaDqwuHdLp0jvZXjw== - -gzip-size@5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/gzip-size/-/gzip-size-5.1.1.tgz#cb9bee692f87c0612b232840a873904e4c135274" - integrity sha512-FNHi6mmoHvs1mxZAds4PpdCS6QG8B4C1krxJsMutgxl5t3+GlRTzzI3NEkifXx2pVsOvJdOGSmIgDhQ55FwdPA== - dependencies: - duplexer "^0.1.1" - pify "^4.0.1" - -hammerjs@^2.0.8: - version "2.0.8" - resolved "https://registry.yarnpkg.com/hammerjs/-/hammerjs-2.0.8.tgz#04ef77862cff2bb79d30f7692095930222bf60f1" - integrity sha1-BO93hiz/K7edMPdpIJWTAiK/YPE= - -handle-thing@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/handle-thing/-/handle-thing-2.0.0.tgz#0e039695ff50c93fc288557d696f3c1dc6776754" - integrity sha512-d4sze1JNC454Wdo2fkuyzCr6aHcbL6PGGuFAz0Li/NcOm1tCHGnWDRmJP85dh9IhQErTc2svWFEX5xHIOo//kQ== - -handlebars@^4.1.2: - version "4.1.2" - resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-4.1.2.tgz#b6b37c1ced0306b221e094fc7aca3ec23b131b67" - integrity sha512-nvfrjqvt9xQ8Z/w0ijewdD/vvWDTOweBUm96NTr66Wfvo1mJenBLwcYmPs3TIBP5ruzYGD7Hx/DaM9RmhroGPw== - dependencies: - neo-async "^2.6.0" - optimist "^0.6.1" - source-map "^0.6.1" - optionalDependencies: - uglify-js "^3.1.4" - -har-schema@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" - integrity sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI= - -har-validator@~5.1.0: - version "5.1.3" - resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.3.tgz#1ef89ebd3e4996557675eed9893110dc350fa080" - integrity sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g== - dependencies: - ajv "^6.5.5" - har-schema "^2.0.0" - -harmony-reflect@^1.4.6: - version "1.6.1" - resolved "https://registry.yarnpkg.com/harmony-reflect/-/harmony-reflect-1.6.1.tgz#c108d4f2bb451efef7a37861fdbdae72c9bdefa9" - integrity sha512-WJTeyp0JzGtHcuMsi7rw2VwtkvLa+JyfEKJCFyfcS0+CDkjQ5lHPu7zEhFZP+PDSRrEgXa5Ah0l1MbgbE41XjA== - -has-ansi@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91" - integrity sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE= - dependencies: - ansi-regex "^2.0.0" - -has-flag@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" - integrity sha1-tdRU3CGZriJWmfNGfloH87lVuv0= - -has-symbols@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.0.tgz#ba1a8f1af2a0fc39650f5c850367704122063b44" - integrity sha1-uhqPGvKg/DllD1yFA2dwQSIGO0Q= - -has-unicode@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" - integrity sha1-4Ob+aijPUROIVeCG0Wkedx3iqLk= - -has-value@^0.3.1: - version "0.3.1" - resolved "https://registry.yarnpkg.com/has-value/-/has-value-0.3.1.tgz#7b1f58bada62ca827ec0a2078025654845995e1f" - integrity sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8= - dependencies: - get-value "^2.0.3" - has-values "^0.1.4" - isobject "^2.0.0" - -has-value@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/has-value/-/has-value-1.0.0.tgz#18b281da585b1c5c51def24c930ed29a0be6b177" - integrity sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc= - dependencies: - get-value "^2.0.6" - has-values "^1.0.0" - isobject "^3.0.0" - -has-values@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/has-values/-/has-values-0.1.4.tgz#6d61de95d91dfca9b9a02089ad384bff8f62b771" - integrity sha1-bWHeldkd/Km5oCCJrThL/49it3E= - -has-values@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/has-values/-/has-values-1.0.0.tgz#95b0b63fec2146619a6fe57fe75628d5a39efe4f" - integrity sha1-lbC2P+whRmGab+V/51Yo1aOe/k8= - dependencies: - is-number "^3.0.0" - kind-of "^4.0.0" - -has@^1.0.0, has@^1.0.1, has@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796" - integrity sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw== - dependencies: - function-bind "^1.1.1" - -hash-base@^3.0.0: - version "3.0.4" - resolved "https://registry.yarnpkg.com/hash-base/-/hash-base-3.0.4.tgz#5fc8686847ecd73499403319a6b0a3f3f6ae4918" - integrity sha1-X8hoaEfs1zSZQDMZprCj8/auSRg= - dependencies: - inherits "^2.0.1" - safe-buffer "^5.0.1" - -hash.js@^1.0.0, hash.js@^1.0.3: - version "1.1.7" - resolved "https://registry.yarnpkg.com/hash.js/-/hash.js-1.1.7.tgz#0babca538e8d4ee4a0f8988d68866537a003cf42" - integrity sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA== - dependencies: - inherits "^2.0.3" - minimalistic-assert "^1.0.1" - -he@1.2.x: - version "1.2.0" - resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" - integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== - -hex-color-regex@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/hex-color-regex/-/hex-color-regex-1.1.0.tgz#4c06fccb4602fe2602b3c93df82d7e7dbf1a8a8e" - integrity sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ== - -history@^4.9.0: - version "4.9.0" - resolved "https://registry.yarnpkg.com/history/-/history-4.9.0.tgz#84587c2068039ead8af769e9d6a6860a14fa1bca" - integrity sha512-H2DkjCjXf0Op9OAr6nJ56fcRkTSNrUiv41vNJ6IswJjif6wlpZK0BTfFbi7qK9dXLSYZxkq5lBsj3vUjlYBYZA== - dependencies: - "@babel/runtime" "^7.1.2" - loose-envify "^1.2.0" - resolve-pathname "^2.2.0" - tiny-invariant "^1.0.2" - tiny-warning "^1.0.0" - value-equal "^0.4.0" - -hmac-drbg@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/hmac-drbg/-/hmac-drbg-1.0.1.tgz#d2745701025a6c775a6c545793ed502fc0c649a1" - integrity sha1-0nRXAQJabHdabFRXk+1QL8DGSaE= - dependencies: - hash.js "^1.0.3" - minimalistic-assert "^1.0.0" - minimalistic-crypto-utils "^1.0.1" - -hoist-non-react-statics@^2.3.1: - version "2.5.5" - resolved "https://registry.yarnpkg.com/hoist-non-react-statics/-/hoist-non-react-statics-2.5.5.tgz#c5903cf409c0dfd908f388e619d86b9c1174cb47" - integrity sha512-rqcy4pJo55FTTLWt+bU8ukscqHeE/e9KWvsOW2b/a3afxQZhwkQdT1rPPCJ0rYXdj4vNcasY8zHTH+jF/qStxw== - -hoist-non-react-statics@^3.1.0, hoist-non-react-statics@^3.3.0: - version "3.3.0" - resolved "https://registry.yarnpkg.com/hoist-non-react-statics/-/hoist-non-react-statics-3.3.0.tgz#b09178f0122184fb95acf525daaecb4d8f45958b" - integrity sha512-0XsbTXxgiaCDYDIWFcwkmerZPSwywfUqYmwT4jzewKTQSWoE6FCMoUVOeBJWK3E/CrWbxRG3m5GzY4lnIwGRBA== - dependencies: - react-is "^16.7.0" - -hosted-git-info@^2.1.4: - version "2.8.4" - resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.8.4.tgz#44119abaf4bc64692a16ace34700fed9c03e2546" - integrity sha512-pzXIvANXEFrc5oFFXRMkbLPQ2rXRoDERwDLyrcUxGhaZhgP54BBSl9Oheh7Vv0T090cszWBxPjkQQ5Sq1PbBRQ== - -hpack.js@^2.1.6: - version "2.1.6" - resolved "https://registry.yarnpkg.com/hpack.js/-/hpack.js-2.1.6.tgz#87774c0949e513f42e84575b3c45681fade2a0b2" - integrity sha1-h3dMCUnlE/QuhFdbPEVoH63ioLI= - dependencies: - inherits "^2.0.1" - obuf "^1.0.0" - readable-stream "^2.0.1" - wbuf "^1.1.0" - -hsl-regex@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/hsl-regex/-/hsl-regex-1.0.0.tgz#d49330c789ed819e276a4c0d272dffa30b18fe6e" - integrity sha1-1JMwx4ntgZ4nakwNJy3/owsY/m4= - -hsla-regex@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/hsla-regex/-/hsla-regex-1.0.0.tgz#c1ce7a3168c8c6614033a4b5f7877f3b225f9c38" - integrity sha1-wc56MWjIxmFAM6S194d/OyJfnDg= - -html-comment-regex@^1.1.0: - version "1.1.2" - resolved "https://registry.yarnpkg.com/html-comment-regex/-/html-comment-regex-1.1.2.tgz#97d4688aeb5c81886a364faa0cad1dda14d433a7" - integrity sha512-P+M65QY2JQ5Y0G9KKdlDpo0zK+/OHptU5AaBwUfAIDJZk1MYf32Frm84EcOytfJE0t5JvkAnKlmjsXDnWzCJmQ== - -html-encoding-sniffer@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-1.0.2.tgz#e70d84b94da53aa375e11fe3a351be6642ca46f8" - integrity sha512-71lZziiDnsuabfdYiUeWdCVyKuqwWi23L8YeIgV9jSSZHCtb6wB1BKWooH7L3tn4/FuZJMVWyNaIDr4RGmaSYw== - dependencies: - whatwg-encoding "^1.0.1" - -html-entities@^1.2.0: - version "1.2.1" - resolved "https://registry.yarnpkg.com/html-entities/-/html-entities-1.2.1.tgz#0df29351f0721163515dfb9e5543e5f6eed5162f" - integrity sha1-DfKTUfByEWNRXfueVUPl9u7VFi8= - -html-minifier@^3.5.20: - version "3.5.21" - resolved "https://registry.yarnpkg.com/html-minifier/-/html-minifier-3.5.21.tgz#d0040e054730e354db008463593194015212d20c" - integrity sha512-LKUKwuJDhxNa3uf/LPR/KVjm/l3rBqtYeCOAekvG8F1vItxMUpueGd94i/asDDr8/1u7InxzFA5EeGjhhG5mMA== - dependencies: - camel-case "3.0.x" - clean-css "4.2.x" - commander "2.17.x" - he "1.2.x" - param-case "2.1.x" - relateurl "0.2.x" - uglify-js "3.4.x" - -html-webpack-plugin@4.0.0-beta.5: - version "4.0.0-beta.5" - resolved "https://registry.yarnpkg.com/html-webpack-plugin/-/html-webpack-plugin-4.0.0-beta.5.tgz#2c53083c1151bfec20479b1f8aaf0039e77b5513" - integrity sha512-y5l4lGxOW3pz3xBTFdfB9rnnrWRPVxlAhX6nrBYIcW+2k2zC3mSp/3DxlWVCMBfnO6UAnoF8OcFn0IMy6kaKAQ== - dependencies: - html-minifier "^3.5.20" - loader-utils "^1.1.0" - lodash "^4.17.11" - pretty-error "^2.1.1" - tapable "^1.1.0" - util.promisify "1.0.0" - -htmlparser2@^3.3.0: - version "3.10.1" - resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-3.10.1.tgz#bd679dc3f59897b6a34bb10749c855bb53a9392f" - integrity sha512-IgieNijUMbkDovyoKObU1DUhm1iwNYE/fuifEoEHfd1oZKZDaONBSkal7Y01shxsM49R4XaMdGez3WnF9UfiCQ== - dependencies: - domelementtype "^1.3.1" - domhandler "^2.3.0" - domutils "^1.5.1" - entities "^1.1.1" - inherits "^2.0.1" - readable-stream "^3.1.1" - -http-deceiver@^1.2.7: - version "1.2.7" - resolved "https://registry.yarnpkg.com/http-deceiver/-/http-deceiver-1.2.7.tgz#fa7168944ab9a519d337cb0bec7284dc3e723d87" - integrity sha1-+nFolEq5pRnTN8sL7HKE3D5yPYc= - -http-errors@1.7.2: - version "1.7.2" - resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.7.2.tgz#4f5029cf13239f31036e5b2e55292bcfbcc85c8f" - integrity sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg== - dependencies: - depd "~1.1.2" - inherits "2.0.3" - setprototypeof "1.1.1" - statuses ">= 1.5.0 < 2" - toidentifier "1.0.0" - -http-errors@~1.6.2: - version "1.6.3" - resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.6.3.tgz#8b55680bb4be283a0b5bf4ea2e38580be1d9320d" - integrity sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0= - dependencies: - depd "~1.1.2" - inherits "2.0.3" - setprototypeof "1.1.0" - statuses ">= 1.4.0 < 2" - -http-errors@~1.7.2: - version "1.7.3" - resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.7.3.tgz#6c619e4f9c60308c38519498c14fbb10aacebb06" - integrity sha512-ZTTX0MWrsQ2ZAhA1cejAwDLycFsd7I7nVtnkT3Ol0aqodaKW+0CTZDQ1uBv5whptCnc8e8HeRRJxRs0kmm/Qfw== - dependencies: - depd "~1.1.2" - inherits "2.0.4" - setprototypeof "1.1.1" - statuses ">= 1.5.0 < 2" - toidentifier "1.0.0" - -"http-parser-js@>=0.4.0 <0.4.11": - version "0.4.10" - resolved "https://registry.yarnpkg.com/http-parser-js/-/http-parser-js-0.4.10.tgz#92c9c1374c35085f75db359ec56cc257cbb93fa4" - integrity sha1-ksnBN0w1CF912zWexWzCV8u5P6Q= - -http-proxy-middleware@^0.19.1: - version "0.19.1" - resolved "https://registry.yarnpkg.com/http-proxy-middleware/-/http-proxy-middleware-0.19.1.tgz#183c7dc4aa1479150306498c210cdaf96080a43a" - integrity sha512-yHYTgWMQO8VvwNS22eLLloAkvungsKdKTLO8AJlftYIKNfJr3GK3zK0ZCfzDDGUBttdGc8xFy1mCitvNKQtC3Q== - dependencies: - http-proxy "^1.17.0" - is-glob "^4.0.0" - lodash "^4.17.11" - micromatch "^3.1.10" - -http-proxy@^1.17.0: - version "1.17.0" - resolved "https://registry.yarnpkg.com/http-proxy/-/http-proxy-1.17.0.tgz#7ad38494658f84605e2f6db4436df410f4e5be9a" - integrity sha512-Taqn+3nNvYRfJ3bGvKfBSRwy1v6eePlm3oc/aWVxZp57DQr5Eq3xhKJi7Z4hZpS8PC3H4qI+Yly5EmFacGuA/g== - dependencies: - eventemitter3 "^3.0.0" - follow-redirects "^1.0.0" - requires-port "^1.0.0" - -http-signature@~1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/http-signature/-/http-signature-1.2.0.tgz#9aecd925114772f3d95b65a60abb8f7c18fbace1" - integrity sha1-muzZJRFHcvPZW2WmCruPfBj7rOE= - dependencies: - assert-plus "^1.0.0" - jsprim "^1.2.2" - sshpk "^1.7.0" - -https-browserify@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/https-browserify/-/https-browserify-1.0.0.tgz#ec06c10e0a34c0f2faf199f7fd7fc78fffd03c73" - integrity sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM= - -iconv-lite@0.4.24, iconv-lite@^0.4.24, iconv-lite@^0.4.4, iconv-lite@~0.4.13: - version "0.4.24" - resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" - integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== - dependencies: - safer-buffer ">= 2.1.2 < 3" - -icss-replace-symbols@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/icss-replace-symbols/-/icss-replace-symbols-1.1.0.tgz#06ea6f83679a7749e386cfe1fe812ae5db223ded" - integrity sha1-Bupvg2ead0njhs/h/oEq5dsiPe0= - -icss-utils@^4.1.0: - version "4.1.1" - resolved "https://registry.yarnpkg.com/icss-utils/-/icss-utils-4.1.1.tgz#21170b53789ee27447c2f47dd683081403f9a467" - integrity sha512-4aFq7wvWyMHKgxsH8QQtGpvbASCf+eM3wPRLI6R+MgAnTCZ6STYsRvttLvRWK0Nfif5piF394St3HeJDaljGPA== - dependencies: - postcss "^7.0.14" - -identity-obj-proxy@3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/identity-obj-proxy/-/identity-obj-proxy-3.0.0.tgz#94d2bda96084453ef36fbc5aaec37e0f79f1fc14" - integrity sha1-lNK9qWCERT7zb7xarsN+D3nx/BQ= - dependencies: - harmony-reflect "^1.4.6" - -ieee754@^1.1.4: - version "1.1.13" - resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.1.13.tgz#ec168558e95aa181fd87d37f55c32bbcb6708b84" - integrity sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg== - -iferr@^0.1.5: - version "0.1.5" - resolved "https://registry.yarnpkg.com/iferr/-/iferr-0.1.5.tgz#c60eed69e6d8fdb6b3104a1fcbca1c192dc5b501" - integrity sha1-xg7taebY/bazEEofy8ocGS3FtQE= - -ignore-walk@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.1.tgz#a83e62e7d272ac0e3b551aaa82831a19b69f82f8" - integrity sha512-DTVlMx3IYPe0/JJcYP7Gxg7ttZZu3IInhuEhbchuqneY9wWe5Ojy2mXLBaQFUQmo0AW2r3qG7m1mg86js+gnlQ== - dependencies: - minimatch "^3.0.4" - -ignore@^3.3.5: - version "3.3.10" - resolved "https://registry.yarnpkg.com/ignore/-/ignore-3.3.10.tgz#0a97fb876986e8081c631160f8f9f389157f0043" - integrity sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug== - -ignore@^4.0.6: - version "4.0.6" - resolved "https://registry.yarnpkg.com/ignore/-/ignore-4.0.6.tgz#750e3db5862087b4737ebac8207ffd1ef27b25fc" - integrity sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg== - -image-size@~0.5.0: - version "0.5.5" - resolved "https://registry.yarnpkg.com/image-size/-/image-size-0.5.5.tgz#09dfd4ab9d20e29eb1c3e80b8990378df9e3cb9c" - integrity sha1-Cd/Uq50g4p6xw+gLiZA3jfnjy5w= - -immer@1.10.0: - version "1.10.0" - resolved "https://registry.yarnpkg.com/immer/-/immer-1.10.0.tgz#bad67605ba9c810275d91e1c2a47d4582e98286d" - integrity sha512-O3sR1/opvCDGLEVcvrGTMtLac8GJ5IwZC4puPrLuRj3l7ICKvkmA0vGuU9OW8mV9WIBRnaxp5GJh9IEAaNOoYg== - -immutable@^3.7.4: - version "3.8.2" - resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.8.2.tgz#c2439951455bb39913daf281376f1530e104adf3" - integrity sha1-wkOZUUVbs5kT2vKBN28VMOEErfM= - -immutable@~3.7.4: - version "3.7.6" - resolved "https://registry.yarnpkg.com/immutable/-/immutable-3.7.6.tgz#13b4d3cb12befa15482a26fe1b2ebae640071e4b" - integrity sha1-E7TTyxK++hVIKib+Gy665kAHHks= - -import-cwd@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/import-cwd/-/import-cwd-2.1.0.tgz#aa6cf36e722761285cb371ec6519f53e2435b0a9" - integrity sha1-qmzzbnInYShcs3HsZRn1PiQ1sKk= - dependencies: - import-from "^2.1.0" - -import-fresh@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-2.0.0.tgz#d81355c15612d386c61f9ddd3922d4304822a546" - integrity sha1-2BNVwVYS04bGH53dOSLUMEgipUY= - dependencies: - caller-path "^2.0.0" - resolve-from "^3.0.0" - -import-fresh@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.1.0.tgz#6d33fa1dcef6df930fae003446f33415af905118" - integrity sha512-PpuksHKGt8rXfWEr9m9EHIpgyyaltBy8+eF6GJM0QCAxMgxCfucMF3mjecK2QsJr0amJW7gTqh5/wht0z2UhEQ== - dependencies: - parent-module "^1.0.0" - resolve-from "^4.0.0" - -import-from@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/import-from/-/import-from-2.1.0.tgz#335db7f2a7affd53aaa471d4b8021dee36b7f3b1" - integrity sha1-M1238qev/VOqpHHUuAId7ja387E= - dependencies: - resolve-from "^3.0.0" - -import-local@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/import-local/-/import-local-2.0.0.tgz#55070be38a5993cf18ef6db7e961f5bee5c5a09d" - integrity sha512-b6s04m3O+s3CGSbqDIyP4R6aAwAeYlVq9+WUWep6iHa8ETRf9yei1U48C5MmfJmV9AiLYYBKPMq/W+/WRpQmCQ== - dependencies: - pkg-dir "^3.0.0" - resolve-cwd "^2.0.0" - -imurmurhash@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" - integrity sha1-khi5srkoojixPcT7a21XbyMUU+o= - -indexes-of@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/indexes-of/-/indexes-of-1.0.1.tgz#f30f716c8e2bd346c7b67d3df3915566a7c05607" - integrity sha1-8w9xbI4r00bHtn0985FVZqfAVgc= - -infer-owner@^1.0.3: - version "1.0.4" - resolved "https://registry.yarnpkg.com/infer-owner/-/infer-owner-1.0.4.tgz#c4cefcaa8e51051c2a40ba2ce8a3d27295af9467" - integrity sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A== - -inflight@^1.0.4: - version "1.0.6" - resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" - integrity sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk= - dependencies: - once "^1.3.0" - wrappy "1" - -inherits@2, inherits@2.0.4, inherits@^2.0.1, inherits@^2.0.3, inherits@~2.0.1, inherits@~2.0.3: - version "2.0.4" - resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" - integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== - -inherits@2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.1.tgz#b17d08d326b4423e568eff719f91b0b1cbdf69f1" - integrity sha1-sX0I0ya0Qj5Wjv9xn5GwscvfafE= - -inherits@2.0.3: - version "2.0.3" - resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.3.tgz#633c2c83e3da42a502f52466022480f4208261de" - integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4= - -ini@^1.3.5, ini@~1.3.0: - version "1.3.5" - resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.5.tgz#eee25f56db1c9ec6085e0c22778083f596abf927" - integrity sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw== - -inquirer@6.5.0: - version "6.5.0" - resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-6.5.0.tgz#2303317efc9a4ea7ec2e2df6f86569b734accf42" - integrity sha512-scfHejeG/lVZSpvCXpsB4j/wQNPM5JC8kiElOI0OUTwmc1RTpXr4H32/HOlQHcZiYl2z2VElwuCVDRG8vFmbnA== - dependencies: - ansi-escapes "^3.2.0" - chalk "^2.4.2" - cli-cursor "^2.1.0" - cli-width "^2.0.0" - external-editor "^3.0.3" - figures "^2.0.0" - lodash "^4.17.12" - mute-stream "0.0.7" - run-async "^2.2.0" - rxjs "^6.4.0" - string-width "^2.1.0" - strip-ansi "^5.1.0" - through "^2.3.6" - -inquirer@^6.2.2: - version "6.5.2" - resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-6.5.2.tgz#ad50942375d036d327ff528c08bd5fab089928ca" - integrity sha512-cntlB5ghuB0iuO65Ovoi8ogLHiWGs/5yNrtUcKjFhSSiVeAIVpD7koaSU9RM8mpXw5YDi9RdYXGQMaOURB7ycQ== - dependencies: - ansi-escapes "^3.2.0" - chalk "^2.4.2" - cli-cursor "^2.1.0" - cli-width "^2.0.0" - external-editor "^3.0.3" - figures "^2.0.0" - lodash "^4.17.12" - mute-stream "0.0.7" - run-async "^2.2.0" - rxjs "^6.4.0" - string-width "^2.1.0" - strip-ansi "^5.1.0" - through "^2.3.6" - -internal-ip@^4.2.0: - version "4.3.0" - resolved "https://registry.yarnpkg.com/internal-ip/-/internal-ip-4.3.0.tgz#845452baad9d2ca3b69c635a137acb9a0dad0907" - integrity sha512-S1zBo1D6zcsyuC6PMmY5+55YMILQ9av8lotMx447Bq6SAgo/sDK6y6uUKmuYhW7eacnIhFfsPmCNYdDzsnnDCg== - dependencies: - default-gateway "^4.2.0" - ipaddr.js "^1.9.0" - -invariant@^2.2.2, invariant@^2.2.4: - version "2.2.4" - resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" - integrity sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA== - dependencies: - loose-envify "^1.0.0" - -invert-kv@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/invert-kv/-/invert-kv-2.0.0.tgz#7393f5afa59ec9ff5f67a27620d11c226e3eec02" - integrity sha512-wPVv/y/QQ/Uiirj/vh3oP+1Ww+AWehmi1g5fFWGPF6IpCBCDVrhgHRMvrLfdYcwDh3QJbGXDW4JAuzxElLSqKA== - -ip-regex@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/ip-regex/-/ip-regex-2.1.0.tgz#fa78bf5d2e6913c911ce9f819ee5146bb6d844e9" - integrity sha1-+ni/XS5pE8kRzp+BnuUUa7bYROk= - -ip@^1.1.0, ip@^1.1.5: - version "1.1.5" - resolved "https://registry.yarnpkg.com/ip/-/ip-1.1.5.tgz#bdded70114290828c0a039e72ef25f5aaec4354a" - integrity sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo= - -ipaddr.js@1.9.0: - version "1.9.0" - resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.0.tgz#37df74e430a0e47550fe54a2defe30d8acd95f65" - integrity sha512-M4Sjn6N/+O6/IXSJseKqHoFc+5FdGJ22sXqnjTpdZweHK64MzEPAyQZyEU3R/KRv2GLoa7nNtg/C2Ev6m7z+eA== - -ipaddr.js@^1.9.0: - version "1.9.1" - resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz#bff38543eeb8984825079ff3a2a8e6cbd46781b3" - integrity sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g== - -is-absolute-url@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/is-absolute-url/-/is-absolute-url-2.1.0.tgz#50530dfb84fcc9aa7dbe7852e83a37b93b9f2aa6" - integrity sha1-UFMN+4T8yap9vnhS6Do3uTufKqY= - -is-accessor-descriptor@^0.1.6: - version "0.1.6" - resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz#a9e12cb3ae8d876727eeef3843f8a0897b5c98d6" - integrity sha1-qeEss66Nh2cn7u84Q/igiXtcmNY= - dependencies: - kind-of "^3.0.2" - -is-accessor-descriptor@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz#169c2f6d3df1f992618072365c9b0ea1f6878656" - integrity sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ== - dependencies: - kind-of "^6.0.0" - -is-arguments@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/is-arguments/-/is-arguments-1.0.4.tgz#3faf966c7cba0ff437fb31f6250082fcf0448cf3" - integrity sha512-xPh0Rmt8NE65sNzvyUmWgI1tz3mKq74lGA0mL8LYZcoIzKOzDh6HmrYm3d18k60nHerC8A9Km8kYu87zfSFnLA== - -is-arrayish@^0.2.1: - version "0.2.1" - resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" - integrity sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0= - -is-arrayish@^0.3.1: - version "0.3.2" - resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.3.2.tgz#4574a2ae56f7ab206896fb431eaeed066fdf8f03" - integrity sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ== - -is-binary-path@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/is-binary-path/-/is-binary-path-1.0.1.tgz#75f16642b480f187a711c814161fd3a4a7655898" - integrity sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg= - dependencies: - binary-extensions "^1.0.0" - -is-buffer@^1.0.2, is-buffer@^1.1.5: - version "1.1.6" - resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-1.1.6.tgz#efaa2ea9daa0d7ab2ea13a97b2b8ad51fefbe8be" - integrity sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w== - -is-callable@^1.1.4: - version "1.1.4" - resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.1.4.tgz#1e1adf219e1eeb684d691f9d6a05ff0d30a24d75" - integrity sha512-r5p9sxJjYnArLjObpjA4xu5EKI3CuKHkJXMhT7kwbpUyIFD1n5PMAsoPvWnvtZiNz7LjkYDRZhd7FlI0eMijEA== - -is-ci@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-ci/-/is-ci-2.0.0.tgz#6bc6334181810e04b5c22b3d589fdca55026404c" - integrity sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w== - dependencies: - ci-info "^2.0.0" - -is-color-stop@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/is-color-stop/-/is-color-stop-1.1.0.tgz#cfff471aee4dd5c9e158598fbe12967b5cdad345" - integrity sha1-z/9HGu5N1cnhWFmPvhKWe1za00U= - dependencies: - css-color-names "^0.0.4" - hex-color-regex "^1.1.0" - hsl-regex "^1.0.0" - hsla-regex "^1.0.0" - rgb-regex "^1.0.1" - rgba-regex "^1.0.0" - -is-data-descriptor@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz#0b5ee648388e2c860282e793f1856fec3f301b56" - integrity sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y= - dependencies: - kind-of "^3.0.2" - -is-data-descriptor@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz#d84876321d0e7add03990406abbbbd36ba9268c7" - integrity sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ== - dependencies: - kind-of "^6.0.0" - -is-date-object@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.1.tgz#9aa20eb6aeebbff77fbd33e74ca01b33581d3a16" - integrity sha1-mqIOtq7rv/d/vTPnTKAbM1gdOhY= - -is-descriptor@^0.1.0: - version "0.1.6" - resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-0.1.6.tgz#366d8240dde487ca51823b1ab9f07a10a78251ca" - integrity sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg== - dependencies: - is-accessor-descriptor "^0.1.6" - is-data-descriptor "^0.1.4" - kind-of "^5.0.0" - -is-descriptor@^1.0.0, is-descriptor@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-descriptor/-/is-descriptor-1.0.2.tgz#3b159746a66604b04f8c81524ba365c5f14d86ec" - integrity sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg== - dependencies: - is-accessor-descriptor "^1.0.0" - is-data-descriptor "^1.0.0" - kind-of "^6.0.2" - -is-directory@^0.3.1: - version "0.3.1" - resolved "https://registry.yarnpkg.com/is-directory/-/is-directory-0.3.1.tgz#61339b6f2475fc772fd9c9d83f5c8575dc154ae1" - integrity sha1-YTObbyR1/Hcv2cnYP1yFddwVSuE= - -is-extendable@^0.1.0, is-extendable@^0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-0.1.1.tgz#62b110e289a471418e3ec36a617d472e301dfc89" - integrity sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik= - -is-extendable@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/is-extendable/-/is-extendable-1.0.1.tgz#a7470f9e426733d81bd81e1155264e3a3507cab4" - integrity sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA== - dependencies: - is-plain-object "^2.0.4" - -is-extglob@^2.1.0, is-extglob@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" - integrity sha1-qIwCU1eR8C7TfHahueqXc8gz+MI= - -is-fullwidth-code-point@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" - integrity sha1-754xOG8DGn8NZDr4L95QxFfvAMs= - dependencies: - number-is-nan "^1.0.0" - -is-fullwidth-code-point@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" - integrity sha1-o7MKXE8ZkYMWeqq5O+764937ZU8= - -is-generator-fn@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/is-generator-fn/-/is-generator-fn-2.1.0.tgz#7d140adc389aaf3011a8f2a2a4cfa6faadffb118" - integrity sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ== - -is-glob@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-3.1.0.tgz#7ba5ae24217804ac70707b96922567486cc3e84a" - integrity sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo= - dependencies: - is-extglob "^2.1.0" - -is-glob@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.1.tgz#7567dbe9f2f5e2467bc77ab83c4a29482407a5dc" - integrity sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg== - dependencies: - is-extglob "^2.1.1" - -is-number@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/is-number/-/is-number-3.0.0.tgz#24fd6201a4782cf50561c810276afc7d12d71195" - integrity sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU= - dependencies: - kind-of "^3.0.2" - -is-obj@^1.0.0, is-obj@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-1.0.1.tgz#3e4729ac1f5fde025cd7d83a896dab9f4f67db0f" - integrity sha1-PkcprB9f3gJc19g6iW2rn09n2w8= - -is-path-cwd@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-path-cwd/-/is-path-cwd-1.0.0.tgz#d225ec23132e89edd38fda767472e62e65f1106d" - integrity sha1-0iXsIxMuie3Tj9p2dHLmLmXxEG0= - -is-path-in-cwd@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/is-path-in-cwd/-/is-path-in-cwd-1.0.1.tgz#5ac48b345ef675339bd6c7a48a912110b241cf52" - integrity sha512-FjV1RTW48E7CWM7eE/J2NJvAEEVektecDBVBE5Hh3nM1Jd0kvhHtX68Pr3xsDf857xt3Y4AkwVULK1Vku62aaQ== - dependencies: - is-path-inside "^1.0.0" - -is-path-inside@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-1.0.1.tgz#8ef5b7de50437a3fdca6b4e865ef7aa55cb48036" - integrity sha1-jvW33lBDej/cprToZe96pVy0gDY= - dependencies: - path-is-inside "^1.0.1" - -is-plain-object@^2.0.1, is-plain-object@^2.0.3, is-plain-object@^2.0.4: - version "2.0.4" - resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" - integrity sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og== - dependencies: - isobject "^3.0.1" - -is-promise@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/is-promise/-/is-promise-2.1.0.tgz#79a2a9ece7f096e80f36d2b2f3bc16c1ff4bf3fa" - integrity sha1-eaKp7OfwlugPNtKy87wWwf9L8/o= - -is-regex@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.0.4.tgz#5517489b547091b0930e095654ced25ee97e9491" - integrity sha1-VRdIm1RwkbCTDglWVM7SXul+lJE= - dependencies: - has "^1.0.1" - -is-regexp@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-regexp/-/is-regexp-1.0.0.tgz#fd2d883545c46bac5a633e7b9a09e87fa2cb5069" - integrity sha1-/S2INUXEa6xaYz57mgnof6LLUGk= - -is-resolvable@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/is-resolvable/-/is-resolvable-1.1.0.tgz#fb18f87ce1feb925169c9a407c19318a3206ed88" - integrity sha512-qgDYXFSR5WvEfuS5dMj6oTMEbrrSaM0CrFk2Yiq/gXnBvD9pMa2jGXxyhGLfvhZpuMZe18CJpFxAt3CRs42NMg== - -is-root@2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/is-root/-/is-root-2.1.0.tgz#809e18129cf1129644302a4f8544035d51984a9c" - integrity sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg== - -is-stream@^1.0.1, is-stream@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44" - integrity sha1-EtSj3U5o4Lec6428hBc66A2RykQ= - -is-svg@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/is-svg/-/is-svg-3.0.0.tgz#9321dbd29c212e5ca99c4fa9794c714bcafa2f75" - integrity sha512-gi4iHK53LR2ujhLVVj+37Ykh9GLqYHX6JOVXbLAucaG/Cqw9xwdFOjDM2qeifLs1sF1npXXFvDu0r5HNgCMrzQ== - dependencies: - html-comment-regex "^1.1.0" - -is-symbol@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.2.tgz#a055f6ae57192caee329e7a860118b497a950f38" - integrity sha512-HS8bZ9ox60yCJLH9snBpIwv9pYUAkcuLhSA1oero1UB5y9aiQpRA8y2ex945AOtCZL1lJDeIk3G5LthswI46Lw== - dependencies: - has-symbols "^1.0.0" - -is-typedarray@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" - integrity sha1-5HnICFjfDBsR3dppQPlgEfzaSpo= - -is-windows@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-1.0.2.tgz#d1850eb9791ecd18e6182ce12a30f396634bb19d" - integrity sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA== - -is-wsl@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-1.1.0.tgz#1f16e4aa22b04d1336b66188a66af3c600c3a66d" - integrity sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0= - -isarray@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/isarray/-/isarray-0.0.1.tgz#8a18acfca9a8f4177e09abfc6038939b05d1eedf" - integrity sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8= - -isarray@1.0.0, isarray@^1.0.0, isarray@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" - integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE= - -isexe@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" - integrity sha1-6PvzdNxVb/iUehDcsFctYz8s+hA= - -ismobilejs@^0.5.1: - version "0.5.2" - resolved "https://registry.yarnpkg.com/ismobilejs/-/ismobilejs-0.5.2.tgz#e81bacf6187c532ad8348355f4fecd6e6adfdce1" - integrity sha512-ta9UdV60xVZk/ZafFtSFslQaE76SvNkcs1r73d2PVR21zVzx9xuYv9tNe4MxA1NN7WoeCc2RjGot3Bz1eHDx3Q== - -isobject@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/isobject/-/isobject-2.1.0.tgz#f065561096a3f1da2ef46272f815c840d87e0c89" - integrity sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk= - dependencies: - isarray "1.0.0" - -isobject@^3.0.0, isobject@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" - integrity sha1-TkMekrEalzFjaqH5yNHMvP2reN8= - -isomorphic-fetch@^2.1.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/isomorphic-fetch/-/isomorphic-fetch-2.2.1.tgz#611ae1acf14f5e81f729507472819fe9733558a9" - integrity sha1-YRrhrPFPXoH3KVB0coGf6XM1WKk= - dependencies: - node-fetch "^1.0.1" - whatwg-fetch ">=0.10.0" - -isstream@~0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" - integrity sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo= - -istanbul-lib-coverage@^2.0.2, istanbul-lib-coverage@^2.0.5: - version "2.0.5" - resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.5.tgz#675f0ab69503fad4b1d849f736baaca803344f49" - integrity sha512-8aXznuEPCJvGnMSRft4udDRDtb1V3pkQkMMI5LI+6HuQz5oQ4J2UFn1H82raA3qJtyOLkkwVqICBQkjnGtn5mA== - -istanbul-lib-instrument@^3.0.1, istanbul-lib-instrument@^3.3.0: - version "3.3.0" - resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-3.3.0.tgz#a5f63d91f0bbc0c3e479ef4c5de027335ec6d630" - integrity sha512-5nnIN4vo5xQZHdXno/YDXJ0G+I3dAm4XgzfSVTPLQpj/zAV2dV6Juy0yaf10/zrJOJeHoN3fraFe+XRq2bFVZA== - dependencies: - "@babel/generator" "^7.4.0" - "@babel/parser" "^7.4.3" - "@babel/template" "^7.4.0" - "@babel/traverse" "^7.4.3" - "@babel/types" "^7.4.0" - istanbul-lib-coverage "^2.0.5" - semver "^6.0.0" - -istanbul-lib-report@^2.0.4: - version "2.0.8" - resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-2.0.8.tgz#5a8113cd746d43c4889eba36ab10e7d50c9b4f33" - integrity sha512-fHBeG573EIihhAblwgxrSenp0Dby6tJMFR/HvlerBsrCTD5bkUuoNtn3gVh29ZCS824cGGBPn7Sg7cNk+2xUsQ== - dependencies: - istanbul-lib-coverage "^2.0.5" - make-dir "^2.1.0" - supports-color "^6.1.0" - -istanbul-lib-source-maps@^3.0.1: - version "3.0.6" - resolved "https://registry.yarnpkg.com/istanbul-lib-source-maps/-/istanbul-lib-source-maps-3.0.6.tgz#284997c48211752ec486253da97e3879defba8c8" - integrity sha512-R47KzMtDJH6X4/YW9XTx+jrLnZnscW4VpNN+1PViSYTejLVPWv7oov+Duf8YQSPyVRUvueQqz1TcsC6mooZTXw== - dependencies: - debug "^4.1.1" - istanbul-lib-coverage "^2.0.5" - make-dir "^2.1.0" - rimraf "^2.6.3" - source-map "^0.6.1" - -istanbul-reports@^2.2.6: - version "2.2.6" - resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-2.2.6.tgz#7b4f2660d82b29303a8fe6091f8ca4bf058da1af" - integrity sha512-SKi4rnMyLBKe0Jy2uUdx28h8oG7ph2PPuQPvIAh31d+Ci+lSiEu4C+h3oBPuJ9+mPKhOyW0M8gY4U5NM1WLeXA== - dependencies: - handlebars "^4.1.2" - -jest-changed-files@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-24.9.0.tgz#08d8c15eb79a7fa3fc98269bc14b451ee82f8039" - integrity sha512-6aTWpe2mHF0DhL28WjdkO8LyGjs3zItPET4bMSeXU6T3ub4FPMw+mcOcbdGXQOAfmLcxofD23/5Bl9Z4AkFwqg== - dependencies: - "@jest/types" "^24.9.0" - execa "^1.0.0" - throat "^4.0.0" - -jest-cli@^24.7.1: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-24.9.0.tgz#ad2de62d07472d419c6abc301fc432b98b10d2af" - integrity sha512-+VLRKyitT3BWoMeSUIHRxV/2g8y9gw91Jh5z2UmXZzkZKpbC08CSehVxgHUwTpy+HwGcns/tqafQDJW7imYvGg== - dependencies: - "@jest/core" "^24.9.0" - "@jest/test-result" "^24.9.0" - "@jest/types" "^24.9.0" - chalk "^2.0.1" - exit "^0.1.2" - import-local "^2.0.0" - is-ci "^2.0.0" - jest-config "^24.9.0" - jest-util "^24.9.0" - jest-validate "^24.9.0" - prompts "^2.0.1" - realpath-native "^1.1.0" - yargs "^13.3.0" - -jest-config@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-24.9.0.tgz#fb1bbc60c73a46af03590719efa4825e6e4dd1b5" - integrity sha512-RATtQJtVYQrp7fvWg6f5y3pEFj9I+H8sWw4aKxnDZ96mob5i5SD6ZEGWgMLXQ4LE8UurrjbdlLWdUeo+28QpfQ== - dependencies: - "@babel/core" "^7.1.0" - "@jest/test-sequencer" "^24.9.0" - "@jest/types" "^24.9.0" - babel-jest "^24.9.0" - chalk "^2.0.1" - glob "^7.1.1" - jest-environment-jsdom "^24.9.0" - jest-environment-node "^24.9.0" - jest-get-type "^24.9.0" - jest-jasmine2 "^24.9.0" - jest-regex-util "^24.3.0" - jest-resolve "^24.9.0" - jest-util "^24.9.0" - jest-validate "^24.9.0" - micromatch "^3.1.10" - pretty-format "^24.9.0" - realpath-native "^1.1.0" - -jest-diff@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-24.9.0.tgz#931b7d0d5778a1baf7452cb816e325e3724055da" - integrity sha512-qMfrTs8AdJE2iqrTp0hzh7kTd2PQWrsFyj9tORoKmu32xjPjeE4NyjVRDz8ybYwqS2ik8N4hsIpiVTyFeo2lBQ== - dependencies: - chalk "^2.0.1" - diff-sequences "^24.9.0" - jest-get-type "^24.9.0" - pretty-format "^24.9.0" - -jest-docblock@^24.3.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-24.9.0.tgz#7970201802ba560e1c4092cc25cbedf5af5a8ce2" - integrity sha512-F1DjdpDMJMA1cN6He0FNYNZlo3yYmOtRUnktrT9Q37njYzC5WEaDdmbynIgy0L/IvXvvgsG8OsqhLPXTpfmZAA== - dependencies: - detect-newline "^2.1.0" - -jest-each@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-24.9.0.tgz#eb2da602e2a610898dbc5f1f6df3ba86b55f8b05" - integrity sha512-ONi0R4BvW45cw8s2Lrx8YgbeXL1oCQ/wIDwmsM3CqM/nlblNCPmnC3IPQlMbRFZu3wKdQ2U8BqM6lh3LJ5Bsog== - dependencies: - "@jest/types" "^24.9.0" - chalk "^2.0.1" - jest-get-type "^24.9.0" - jest-util "^24.9.0" - pretty-format "^24.9.0" - -jest-environment-jsdom-fourteen@0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/jest-environment-jsdom-fourteen/-/jest-environment-jsdom-fourteen-0.1.0.tgz#aad6393a9d4b565b69a609109bf469f62bf18ccc" - integrity sha512-4vtoRMg7jAstitRzL4nbw83VmGH8Rs13wrND3Ud2o1fczDhMUF32iIrNKwYGgeOPUdfvZU4oy8Bbv+ni1fgVCA== - dependencies: - jest-mock "^24.5.0" - jest-util "^24.5.0" - jsdom "^14.0.0" - -jest-environment-jsdom@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-environment-jsdom/-/jest-environment-jsdom-24.9.0.tgz#4b0806c7fc94f95edb369a69cc2778eec2b7375b" - integrity sha512-Zv9FV9NBRzLuALXjvRijO2351DRQeLYXtpD4xNvfoVFw21IOKNhZAEUKcbiEtjTkm2GsJ3boMVgkaR7rN8qetA== - dependencies: - "@jest/environment" "^24.9.0" - "@jest/fake-timers" "^24.9.0" - "@jest/types" "^24.9.0" - jest-mock "^24.9.0" - jest-util "^24.9.0" - jsdom "^11.5.1" - -jest-environment-node@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-24.9.0.tgz#333d2d2796f9687f2aeebf0742b519f33c1cbfd3" - integrity sha512-6d4V2f4nxzIzwendo27Tr0aFm+IXWa0XEUnaH6nU0FMaozxovt+sfRvh4J47wL1OvF83I3SSTu0XK+i4Bqe7uA== - dependencies: - "@jest/environment" "^24.9.0" - "@jest/fake-timers" "^24.9.0" - "@jest/types" "^24.9.0" - jest-mock "^24.9.0" - jest-util "^24.9.0" - -jest-get-type@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-24.9.0.tgz#1684a0c8a50f2e4901b6644ae861f579eed2ef0e" - integrity sha512-lUseMzAley4LhIcpSP9Jf+fTrQ4a1yHQwLNeeVa2cEmbCGeoZAtYPOIv8JaxLD/sUpKxetKGP+gsHl8f8TSj8Q== - -jest-haste-map@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-24.9.0.tgz#b38a5d64274934e21fa417ae9a9fbeb77ceaac7d" - integrity sha512-kfVFmsuWui2Sj1Rp1AJ4D9HqJwE4uwTlS/vO+eRUaMmd54BFpli2XhMQnPC2k4cHFVbB2Q2C+jtI1AGLgEnCjQ== - dependencies: - "@jest/types" "^24.9.0" - anymatch "^2.0.0" - fb-watchman "^2.0.0" - graceful-fs "^4.1.15" - invariant "^2.2.4" - jest-serializer "^24.9.0" - jest-util "^24.9.0" - jest-worker "^24.9.0" - micromatch "^3.1.10" - sane "^4.0.3" - walker "^1.0.7" - optionalDependencies: - fsevents "^1.2.7" - -jest-jasmine2@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-jasmine2/-/jest-jasmine2-24.9.0.tgz#1f7b1bd3242c1774e62acabb3646d96afc3be6a0" - integrity sha512-Cq7vkAgaYKp+PsX+2/JbTarrk0DmNhsEtqBXNwUHkdlbrTBLtMJINADf2mf5FkowNsq8evbPc07/qFO0AdKTzw== - dependencies: - "@babel/traverse" "^7.1.0" - "@jest/environment" "^24.9.0" - "@jest/test-result" "^24.9.0" - "@jest/types" "^24.9.0" - chalk "^2.0.1" - co "^4.6.0" - expect "^24.9.0" - is-generator-fn "^2.0.0" - jest-each "^24.9.0" - jest-matcher-utils "^24.9.0" - jest-message-util "^24.9.0" - jest-runtime "^24.9.0" - jest-snapshot "^24.9.0" - jest-util "^24.9.0" - pretty-format "^24.9.0" - throat "^4.0.0" - -jest-leak-detector@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-24.9.0.tgz#b665dea7c77100c5c4f7dfcb153b65cf07dcf96a" - integrity sha512-tYkFIDsiKTGwb2FG1w8hX9V0aUb2ot8zY/2nFg087dUageonw1zrLMP4W6zsRO59dPkTSKie+D4rhMuP9nRmrA== - dependencies: - jest-get-type "^24.9.0" - pretty-format "^24.9.0" - -jest-matcher-utils@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-24.9.0.tgz#f5b3661d5e628dffe6dd65251dfdae0e87c3a073" - integrity sha512-OZz2IXsu6eaiMAwe67c1T+5tUAtQyQx27/EMEkbFAGiw52tB9em+uGbzpcgYVpA8wl0hlxKPZxrly4CXU/GjHA== - dependencies: - chalk "^2.0.1" - jest-diff "^24.9.0" - jest-get-type "^24.9.0" - pretty-format "^24.9.0" - -jest-message-util@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-24.9.0.tgz#527f54a1e380f5e202a8d1149b0ec872f43119e3" - integrity sha512-oCj8FiZ3U0hTP4aSui87P4L4jC37BtQwUMqk+zk/b11FR19BJDeZsZAvIHutWnmtw7r85UmR3CEWZ0HWU2mAlw== - dependencies: - "@babel/code-frame" "^7.0.0" - "@jest/test-result" "^24.9.0" - "@jest/types" "^24.9.0" - "@types/stack-utils" "^1.0.1" - chalk "^2.0.1" - micromatch "^3.1.10" - slash "^2.0.0" - stack-utils "^1.0.1" - -jest-mock@^24.5.0, jest-mock@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-24.9.0.tgz#c22835541ee379b908673ad51087a2185c13f1c6" - integrity sha512-3BEYN5WbSq9wd+SyLDES7AHnjH9A/ROBwmz7l2y+ol+NtSFO8DYiEBzoO1CeFc9a8DYy10EO4dDFVv/wN3zl1w== - dependencies: - "@jest/types" "^24.9.0" - -jest-pnp-resolver@^1.2.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/jest-pnp-resolver/-/jest-pnp-resolver-1.2.1.tgz#ecdae604c077a7fbc70defb6d517c3c1c898923a" - integrity sha512-pgFw2tm54fzgYvc/OHrnysABEObZCUNFnhjoRjaVOCN8NYc032/gVjPaHD4Aq6ApkSieWtfKAFQtmDKAmhupnQ== - -jest-regex-util@^24.3.0, jest-regex-util@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-24.9.0.tgz#c13fb3380bde22bf6575432c493ea8fe37965636" - integrity sha512-05Cmb6CuxaA+Ys6fjr3PhvV3bGQmO+2p2La4hFbU+W5uOc479f7FdLXUWXw4pYMAhhSZIuKHwSXSu6CsSBAXQA== - -jest-resolve-dependencies@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-24.9.0.tgz#ad055198959c4cfba8a4f066c673a3f0786507ab" - integrity sha512-Fm7b6AlWnYhT0BXy4hXpactHIqER7erNgIsIozDXWl5dVm+k8XdGVe1oTg1JyaFnOxarMEbax3wyRJqGP2Pq+g== - dependencies: - "@jest/types" "^24.9.0" - jest-regex-util "^24.3.0" - jest-snapshot "^24.9.0" - -jest-resolve@24.7.1: - version "24.7.1" - resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-24.7.1.tgz#e4150198299298380a75a9fd55043fa3b9b17fde" - integrity sha512-Bgrc+/UUZpGJ4323sQyj85hV9d+ANyPNu6XfRDUcyFNX1QrZpSoM0kE4Mb2vZMAYTJZsBFzYe8X1UaOkOELSbw== - dependencies: - "@jest/types" "^24.7.0" - browser-resolve "^1.11.3" - chalk "^2.0.1" - jest-pnp-resolver "^1.2.1" - realpath-native "^1.1.0" - -jest-resolve@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-24.9.0.tgz#dff04c7687af34c4dd7e524892d9cf77e5d17321" - integrity sha512-TaLeLVL1l08YFZAt3zaPtjiVvyy4oSA6CRe+0AFPPVX3Q/VI0giIWWoAvoS5L96vj9Dqxj4fB5p2qrHCmTU/MQ== - dependencies: - "@jest/types" "^24.9.0" - browser-resolve "^1.11.3" - chalk "^2.0.1" - jest-pnp-resolver "^1.2.1" - realpath-native "^1.1.0" - -jest-runner@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-24.9.0.tgz#574fafdbd54455c2b34b4bdf4365a23857fcdf42" - integrity sha512-KksJQyI3/0mhcfspnxxEOBueGrd5E4vV7ADQLT9ESaCzz02WnbdbKWIf5Mkaucoaj7obQckYPVX6JJhgUcoWWg== - dependencies: - "@jest/console" "^24.7.1" - "@jest/environment" "^24.9.0" - "@jest/test-result" "^24.9.0" - "@jest/types" "^24.9.0" - chalk "^2.4.2" - exit "^0.1.2" - graceful-fs "^4.1.15" - jest-config "^24.9.0" - jest-docblock "^24.3.0" - jest-haste-map "^24.9.0" - jest-jasmine2 "^24.9.0" - jest-leak-detector "^24.9.0" - jest-message-util "^24.9.0" - jest-resolve "^24.9.0" - jest-runtime "^24.9.0" - jest-util "^24.9.0" - jest-worker "^24.6.0" - source-map-support "^0.5.6" - throat "^4.0.0" - -jest-runtime@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-24.9.0.tgz#9f14583af6a4f7314a6a9d9f0226e1a781c8e4ac" - integrity sha512-8oNqgnmF3v2J6PVRM2Jfuj8oX3syKmaynlDMMKQ4iyzbQzIG6th5ub/lM2bCMTmoTKM3ykcUYI2Pw9xwNtjMnw== - dependencies: - "@jest/console" "^24.7.1" - "@jest/environment" "^24.9.0" - "@jest/source-map" "^24.3.0" - "@jest/transform" "^24.9.0" - "@jest/types" "^24.9.0" - "@types/yargs" "^13.0.0" - chalk "^2.0.1" - exit "^0.1.2" - glob "^7.1.3" - graceful-fs "^4.1.15" - jest-config "^24.9.0" - jest-haste-map "^24.9.0" - jest-message-util "^24.9.0" - jest-mock "^24.9.0" - jest-regex-util "^24.3.0" - jest-resolve "^24.9.0" - jest-snapshot "^24.9.0" - jest-util "^24.9.0" - jest-validate "^24.9.0" - realpath-native "^1.1.0" - slash "^2.0.0" - strip-bom "^3.0.0" - yargs "^13.3.0" - -jest-serializer@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-serializer/-/jest-serializer-24.9.0.tgz#e6d7d7ef96d31e8b9079a714754c5d5c58288e73" - integrity sha512-DxYipDr8OvfrKH3Kel6NdED3OXxjvxXZ1uIY2I9OFbGg+vUkkg7AGvi65qbhbWNPvDckXmzMPbK3u3HaDO49bQ== - -jest-snapshot@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-snapshot/-/jest-snapshot-24.9.0.tgz#ec8e9ca4f2ec0c5c87ae8f925cf97497b0e951ba" - integrity sha512-uI/rszGSs73xCM0l+up7O7a40o90cnrk429LOiK3aeTvfC0HHmldbd81/B7Ix81KSFe1lwkbl7GnBGG4UfuDew== - dependencies: - "@babel/types" "^7.0.0" - "@jest/types" "^24.9.0" - chalk "^2.0.1" - expect "^24.9.0" - jest-diff "^24.9.0" - jest-get-type "^24.9.0" - jest-matcher-utils "^24.9.0" - jest-message-util "^24.9.0" - jest-resolve "^24.9.0" - mkdirp "^0.5.1" - natural-compare "^1.4.0" - pretty-format "^24.9.0" - semver "^6.2.0" - -jest-util@^24.5.0, jest-util@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-24.9.0.tgz#7396814e48536d2e85a37de3e4c431d7cb140162" - integrity sha512-x+cZU8VRmOJxbA1K5oDBdxQmdq0OIdADarLxk0Mq+3XS4jgvhG/oKGWcIDCtPG0HgjxOYvF+ilPJQsAyXfbNOg== - dependencies: - "@jest/console" "^24.9.0" - "@jest/fake-timers" "^24.9.0" - "@jest/source-map" "^24.9.0" - "@jest/test-result" "^24.9.0" - "@jest/types" "^24.9.0" - callsites "^3.0.0" - chalk "^2.0.1" - graceful-fs "^4.1.15" - is-ci "^2.0.0" - mkdirp "^0.5.1" - slash "^2.0.0" - source-map "^0.6.0" - -jest-validate@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-24.9.0.tgz#0775c55360d173cd854e40180756d4ff52def8ab" - integrity sha512-HPIt6C5ACwiqSiwi+OfSSHbK8sG7akG8eATl+IPKaeIjtPOeBUd/g3J7DghugzxrGjI93qS/+RPKe1H6PqvhRQ== - dependencies: - "@jest/types" "^24.9.0" - camelcase "^5.3.1" - chalk "^2.0.1" - jest-get-type "^24.9.0" - leven "^3.1.0" - pretty-format "^24.9.0" - -jest-watch-typeahead@0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/jest-watch-typeahead/-/jest-watch-typeahead-0.3.0.tgz#f56d9ee17ea71ecbf8253fed213df3185a1584c9" - integrity sha512-+uOtlppt9ysST6k6ZTqsPI0WNz2HLa8bowiZylZoQCQaAVn7XsVmHhZREkz73FhKelrFrpne4hQQjdq42nFEmA== - dependencies: - ansi-escapes "^3.0.0" - chalk "^2.4.1" - jest-watcher "^24.3.0" - slash "^2.0.0" - string-length "^2.0.0" - strip-ansi "^5.0.0" - -jest-watcher@^24.3.0, jest-watcher@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-24.9.0.tgz#4b56e5d1ceff005f5b88e528dc9afc8dd4ed2b3b" - integrity sha512-+/fLOfKPXXYJDYlks62/4R4GoT+GU1tYZed99JSCOsmzkkF7727RqKrjNAxtfO4YpGv11wybgRvCjR73lK2GZw== - dependencies: - "@jest/test-result" "^24.9.0" - "@jest/types" "^24.9.0" - "@types/yargs" "^13.0.0" - ansi-escapes "^3.0.0" - chalk "^2.0.1" - jest-util "^24.9.0" - string-length "^2.0.0" - -jest-worker@^24.6.0, jest-worker@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-24.9.0.tgz#5dbfdb5b2d322e98567898238a9697bcce67b3e5" - integrity sha512-51PE4haMSXcHohnSMdM42anbvZANYTqMrr52tVKPqqsPJMzoP6FYYDVqahX/HrAoKEKz3uUPzSvKs9A3qR4iVw== - dependencies: - merge-stream "^2.0.0" - supports-color "^6.1.0" - -jest@24.7.1: - version "24.7.1" - resolved "https://registry.yarnpkg.com/jest/-/jest-24.7.1.tgz#0d94331cf510c75893ee32f87d7321d5bf8f2501" - integrity sha512-AbvRar5r++izmqo5gdbAjTeA6uNRGoNRuj5vHB0OnDXo2DXWZJVuaObiGgtlvhKb+cWy2oYbQSfxv7Q7GjnAtA== - dependencies: - import-local "^2.0.0" - jest-cli "^24.7.1" - -js-levenshtein@^1.1.3: - version "1.1.6" - resolved "https://registry.yarnpkg.com/js-levenshtein/-/js-levenshtein-1.1.6.tgz#c6cee58eb3550372df8deb85fad5ce66ce01d59d" - integrity sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g== - -"js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" - integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== - -js-tokens@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" - integrity sha1-mGbfOVECEw449/mWvOtlRDIJwls= - -js-yaml@^3.13.0, js-yaml@^3.13.1: - version "3.13.1" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.13.1.tgz#aff151b30bfdfa8e49e05da22e7415e9dfa37847" - integrity sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw== - dependencies: - argparse "^1.0.7" - esprima "^4.0.0" - -jsbn@~0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-0.1.1.tgz#a5e654c2e5a2deb5f201d96cefbca80c0ef2f513" - integrity sha1-peZUwuWi3rXyAdls77yoDA7y9RM= - -jsdom@^11.5.1: - version "11.12.0" - resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-11.12.0.tgz#1a80d40ddd378a1de59656e9e6dc5a3ba8657bc8" - integrity sha512-y8Px43oyiBM13Zc1z780FrfNLJCXTL40EWlty/LXUtcjykRBNgLlCjWXpfSPBl2iv+N7koQN+dvqszHZgT/Fjw== - dependencies: - abab "^2.0.0" - acorn "^5.5.3" - acorn-globals "^4.1.0" - array-equal "^1.0.0" - cssom ">= 0.3.2 < 0.4.0" - cssstyle "^1.0.0" - data-urls "^1.0.0" - domexception "^1.0.1" - escodegen "^1.9.1" - html-encoding-sniffer "^1.0.2" - left-pad "^1.3.0" - nwsapi "^2.0.7" - parse5 "4.0.0" - pn "^1.1.0" - request "^2.87.0" - request-promise-native "^1.0.5" - sax "^1.2.4" - symbol-tree "^3.2.2" - tough-cookie "^2.3.4" - w3c-hr-time "^1.0.1" - webidl-conversions "^4.0.2" - whatwg-encoding "^1.0.3" - whatwg-mimetype "^2.1.0" - whatwg-url "^6.4.1" - ws "^5.2.0" - xml-name-validator "^3.0.0" - -jsdom@^14.0.0: - version "14.1.0" - resolved "https://registry.yarnpkg.com/jsdom/-/jsdom-14.1.0.tgz#916463b6094956b0a6c1782c94e380cd30e1981b" - integrity sha512-O901mfJSuTdwU2w3Sn+74T+RnDVP+FuV5fH8tcPWyqrseRAb0s5xOtPgCFiPOtLcyK7CLIJwPyD83ZqQWvA5ng== - dependencies: - abab "^2.0.0" - acorn "^6.0.4" - acorn-globals "^4.3.0" - array-equal "^1.0.0" - cssom "^0.3.4" - cssstyle "^1.1.1" - data-urls "^1.1.0" - domexception "^1.0.1" - escodegen "^1.11.0" - html-encoding-sniffer "^1.0.2" - nwsapi "^2.1.3" - parse5 "5.1.0" - pn "^1.1.0" - request "^2.88.0" - request-promise-native "^1.0.5" - saxes "^3.1.9" - symbol-tree "^3.2.2" - tough-cookie "^2.5.0" - w3c-hr-time "^1.0.1" - w3c-xmlserializer "^1.1.2" - webidl-conversions "^4.0.2" - whatwg-encoding "^1.0.5" - whatwg-mimetype "^2.3.0" - whatwg-url "^7.0.0" - ws "^6.1.2" - xml-name-validator "^3.0.0" - -jsesc@^2.5.1: - version "2.5.2" - resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" - integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA== - -jsesc@~0.5.0: - version "0.5.0" - resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d" - integrity sha1-597mbjXW/Bb3EP6R1c9p9w8IkR0= - -json-parse-better-errors@^1.0.1, json-parse-better-errors@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9" - integrity sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw== - -json-schema-traverse@^0.4.1: - version "0.4.1" - resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" - integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== - -json-schema@0.2.3: - version "0.2.3" - resolved "https://registry.yarnpkg.com/json-schema/-/json-schema-0.2.3.tgz#b480c892e59a2f05954ce727bd3f2a4e882f9e13" - integrity sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM= - -json-stable-stringify-without-jsonify@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" - integrity sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE= - -json-stable-stringify@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz#9a759d39c5f2ff503fd5300646ed445f88c4f9af" - integrity sha1-mnWdOcXy/1A/1TAGRu1EX4jE+a8= - dependencies: - jsonify "~0.0.0" - -json-stringify-safe@~5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" - integrity sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus= - -json2mq@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/json2mq/-/json2mq-0.2.0.tgz#b637bd3ba9eabe122c83e9720483aeb10d2c904a" - integrity sha1-tje9O6nqvhIsg+lyBIOusQ0skEo= - dependencies: - string-convert "^0.2.0" - -json3@^3.3.2: - version "3.3.3" - resolved "https://registry.yarnpkg.com/json3/-/json3-3.3.3.tgz#7fc10e375fc5ae42c4705a5cc0aa6f62be305b81" - integrity sha512-c7/8mbUsKigAbLkD5B010BK4D9LZm7A1pNItkEwiUZRpIN66exu/e7YQWysGun+TRKaJp8MhemM+VkfWv42aCA== - -json5@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.1.tgz#779fb0018604fa854eacbf6252180d83543e3dbe" - integrity sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow== - dependencies: - minimist "^1.2.0" - -json5@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/json5/-/json5-2.1.0.tgz#e7a0c62c48285c628d20a10b85c89bb807c32850" - integrity sha512-8Mh9h6xViijj36g7Dxi+Y4S6hNGV96vcJZr/SrlHh1LR/pEn/8j/+qIBbs44YKl69Lrfctp4QD+AdWLTMqEZAQ== - dependencies: - minimist "^1.2.0" - -jsonfile@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-4.0.0.tgz#8771aae0799b64076b76640fca058f9c10e33ecb" - integrity sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss= - optionalDependencies: - graceful-fs "^4.1.6" - -jsonify@~0.0.0: - version "0.0.0" - resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73" - integrity sha1-LHS27kHZPKUbe1qu6PUDYx0lKnM= - -jsprim@^1.2.2: - version "1.4.1" - resolved "https://registry.yarnpkg.com/jsprim/-/jsprim-1.4.1.tgz#313e66bc1e5cc06e438bc1b7499c2e5c56acb6a2" - integrity sha1-MT5mvB5cwG5Di8G3SZwuXFastqI= - dependencies: - assert-plus "1.0.0" - extsprintf "1.3.0" - json-schema "0.2.3" - verror "1.10.0" - -jsx-ast-utils@^2.0.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/jsx-ast-utils/-/jsx-ast-utils-2.2.1.tgz#4d4973ebf8b9d2837ee91a8208cc66f3a2776cfb" - integrity sha512-v3FxCcAf20DayI+uxnCuw795+oOIkVu6EnJ1+kSzhqqTZHNkTZ7B66ZgLp4oLJ/gbA64cI0B7WRoHZMSRdyVRQ== - dependencies: - array-includes "^3.0.3" - object.assign "^4.1.0" - -killable@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/killable/-/killable-1.0.1.tgz#4c8ce441187a061c7474fb87ca08e2a638194892" - integrity sha512-LzqtLKlUwirEUyl/nicirVmNiPvYs7l5n8wOPP7fyJVpUPkvCnW/vuiXGpylGUlnPDnB7311rARzAt3Mhswpjg== - -kind-of@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-2.0.1.tgz#018ec7a4ce7e3a86cb9141be519d24c8faa981b5" - integrity sha1-AY7HpM5+OobLkUG+UZ0kyPqpgbU= - dependencies: - is-buffer "^1.0.2" - -kind-of@^3.0.2, kind-of@^3.0.3, kind-of@^3.2.0: - version "3.2.2" - resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64" - integrity sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ= - dependencies: - is-buffer "^1.1.5" - -kind-of@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-4.0.0.tgz#20813df3d712928b207378691a45066fae72dd57" - integrity sha1-IIE989cSkosgc3hpGkUGb65y3Vc= - dependencies: - is-buffer "^1.1.5" - -kind-of@^5.0.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-5.1.0.tgz#729c91e2d857b7a419a1f9aa65685c4c33f5845d" - integrity sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw== - -kind-of@^6.0.0, kind-of@^6.0.2: - version "6.0.2" - resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.2.tgz#01146b36a6218e64e58f3a8d66de5d7fc6f6d051" - integrity sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA== - -kleur@^3.0.3: - version "3.0.3" - resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" - integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== - -last-call-webpack-plugin@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/last-call-webpack-plugin/-/last-call-webpack-plugin-3.0.0.tgz#9742df0e10e3cf46e5c0381c2de90d3a7a2d7555" - integrity sha512-7KI2l2GIZa9p2spzPIVZBYyNKkN+e/SQPpnjlTiPhdbDW3F86tdKKELxKpzJ5sgU19wQWsACULZmpTPYHeWO5w== - dependencies: - lodash "^4.17.5" - webpack-sources "^1.1.0" - -lazy-cache@^0.2.3: - version "0.2.7" - resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-0.2.7.tgz#7feddf2dcb6edb77d11ef1d117ab5ffdf0ab1b65" - integrity sha1-f+3fLctu23fRHvHRF6tf/fCrG2U= - -lazy-cache@^1.0.3: - version "1.0.4" - resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e" - integrity sha1-odePw6UEdMuAhF07O24dpJpEbo4= - -lcid@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/lcid/-/lcid-2.0.0.tgz#6ef5d2df60e52f82eb228a4c373e8d1f397253cf" - integrity sha512-avPEb8P8EGnwXKClwsNUgryVjllcRqtMYa49NTsbQagYuT1DcXnl1915oxWjoyGrXR6zH/Y0Zc96xWsPcoDKeA== - dependencies: - invert-kv "^2.0.0" - -left-pad@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/left-pad/-/left-pad-1.3.0.tgz#5b8a3a7765dfe001261dde915589e782f8c94d1e" - integrity sha512-XI5MPzVNApjAyhQzphX8BkmKsKUxD4LdyK24iZeQGinBN9yTQT3bFlCBy/aVx2HrNcqQGsdot8ghrjyrvMCoEA== - -less-loader@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/less-loader/-/less-loader-5.0.0.tgz#498dde3a6c6c4f887458ee9ed3f086a12ad1b466" - integrity sha512-bquCU89mO/yWLaUq0Clk7qCsKhsF/TZpJUzETRvJa9KSVEL9SO3ovCvdEHISBhrC81OwC8QSVX7E0bzElZj9cg== - dependencies: - clone "^2.1.1" - loader-utils "^1.1.0" - pify "^4.0.1" - -less@^3.9.0: - version "3.10.3" - resolved "https://registry.yarnpkg.com/less/-/less-3.10.3.tgz#417a0975d5eeecc52cff4bcfa3c09d35781e6792" - integrity sha512-vz32vqfgmoxF1h3K4J+yKCtajH0PWmjkIFgbs5d78E/c/e+UQTnI+lWK+1eQRE95PXM2mC3rJlLSSP9VQHnaow== - dependencies: - clone "^2.1.2" - optionalDependencies: - errno "^0.1.1" - graceful-fs "^4.1.2" - image-size "~0.5.0" - mime "^1.4.1" - mkdirp "^0.5.0" - promise "^7.1.1" - request "^2.83.0" - source-map "~0.6.0" - -leven@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/leven/-/leven-3.1.0.tgz#77891de834064cccba82ae7842bb6b14a13ed7f2" - integrity sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A== - -levn@^0.3.0, levn@~0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" - integrity sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4= - dependencies: - prelude-ls "~1.1.2" - type-check "~0.3.2" - -load-json-file@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-2.0.0.tgz#7947e42149af80d696cbf797bcaabcfe1fe29ca8" - integrity sha1-eUfkIUmvgNaWy/eXvKq8/h/inKg= - dependencies: - graceful-fs "^4.1.2" - parse-json "^2.2.0" - pify "^2.0.0" - strip-bom "^3.0.0" - -load-json-file@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-4.0.0.tgz#2f5f45ab91e33216234fd53adab668eb4ec0993b" - integrity sha1-L19Fq5HjMhYjT9U62rZo607AmTs= - dependencies: - graceful-fs "^4.1.2" - parse-json "^4.0.0" - pify "^3.0.0" - strip-bom "^3.0.0" - -loader-fs-cache@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/loader-fs-cache/-/loader-fs-cache-1.0.2.tgz#54cedf6b727e1779fd8f01205f05f6e88706f086" - integrity sha512-70IzT/0/L+M20jUlEqZhZyArTU6VKLRTYRDAYN26g4jfzpJqjipLL3/hgYpySqI9PwsVRHHFja0LfEmsx9X2Cw== - dependencies: - find-cache-dir "^0.1.1" - mkdirp "0.5.1" - -loader-runner@^2.3.0: - version "2.4.0" - resolved "https://registry.yarnpkg.com/loader-runner/-/loader-runner-2.4.0.tgz#ed47066bfe534d7e84c4c7b9998c2a75607d9357" - integrity sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw== - -loader-utils@1.2.3, loader-utils@^1.0.1, loader-utils@^1.0.2, loader-utils@^1.1.0, loader-utils@^1.2.3: - version "1.2.3" - resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-1.2.3.tgz#1ff5dc6911c9f0a062531a4c04b609406108c2c7" - integrity sha512-fkpz8ejdnEMG3s37wGL07iSBDg99O9D5yflE9RGNH3hRdx9SOwYfnGYdZOUIZitN8E+E2vkq3MUMYMvPYl5ZZA== - dependencies: - big.js "^5.2.2" - emojis-list "^2.0.0" - json5 "^1.0.1" - -locate-path@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-2.0.0.tgz#2b568b265eec944c6d9c0de9c3dbbbca0354cd8e" - integrity sha1-K1aLJl7slExtnA3pw9u7ygNUzY4= - dependencies: - p-locate "^2.0.0" - path-exists "^3.0.0" - -locate-path@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-3.0.0.tgz#dbec3b3ab759758071b58fe59fc41871af21400e" - integrity sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A== - dependencies: - p-locate "^3.0.0" - path-exists "^3.0.0" - -lodash._getnative@^3.0.0: - version "3.9.1" - resolved "https://registry.yarnpkg.com/lodash._getnative/-/lodash._getnative-3.9.1.tgz#570bc7dede46d61cdcde687d65d3eecbaa3aaff5" - integrity sha1-VwvH3t5G1hzc3mh9ZdPuy6o6r/U= - -lodash._reinterpolate@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz#0ccf2d89166af03b3663c796538b75ac6e114d9d" - integrity sha1-DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0= - -lodash.debounce@^4.0.0, lodash.debounce@^4.0.8: - version "4.0.8" - resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" - integrity sha1-gteb/zCmfEAF/9XiUVMArZyk168= - -lodash.flow@^3.5.0: - version "3.5.0" - resolved "https://registry.yarnpkg.com/lodash.flow/-/lodash.flow-3.5.0.tgz#87bf40292b8cf83e4e8ce1a3ae4209e20071675a" - integrity sha1-h79AKSuM+D5OjOGjrkIJ4gBxZ1o= - -lodash.isarguments@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz#2f573d85c6a24289ff00663b491c1d338ff3458a" - integrity sha1-L1c9hcaiQon/AGY7SRwdM4/zRYo= - -lodash.isarray@^3.0.0: - version "3.0.4" - resolved "https://registry.yarnpkg.com/lodash.isarray/-/lodash.isarray-3.0.4.tgz#79e4eb88c36a8122af86f844aa9bcd851b5fbb55" - integrity sha1-eeTriMNqgSKvhvhEqpvNhRtfu1U= - -lodash.keys@^3.1.2: - version "3.1.2" - resolved "https://registry.yarnpkg.com/lodash.keys/-/lodash.keys-3.1.2.tgz#4dbc0472b156be50a0b286855d1bd0b0c656098a" - integrity sha1-TbwEcrFWvlCgsoaFXRvQsMZWCYo= - dependencies: - lodash._getnative "^3.0.0" - lodash.isarguments "^3.0.0" - lodash.isarray "^3.0.0" - -lodash.memoize@^4.1.2: - version "4.1.2" - resolved "https://registry.yarnpkg.com/lodash.memoize/-/lodash.memoize-4.1.2.tgz#bcc6c49a42a2840ed997f323eada5ecd182e0bfe" - integrity sha1-vMbEmkKihA7Zl/Mj6tpezRguC/4= - -lodash.sortby@^4.7.0: - version "4.7.0" - resolved "https://registry.yarnpkg.com/lodash.sortby/-/lodash.sortby-4.7.0.tgz#edd14c824e2cc9c1e0b0a1b42bb5210516a42438" - integrity sha1-7dFMgk4sycHgsKG0K7UhBRakJDg= - -lodash.tail@^4.1.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/lodash.tail/-/lodash.tail-4.1.1.tgz#d2333a36d9e7717c8ad2f7cacafec7c32b444664" - integrity sha1-0jM6NtnncXyK0vfKyv7HwytERmQ= - -lodash.template@^4.4.0, lodash.template@^4.5.0: - version "4.5.0" - resolved "https://registry.yarnpkg.com/lodash.template/-/lodash.template-4.5.0.tgz#f976195cf3f347d0d5f52483569fe8031ccce8ab" - integrity sha512-84vYFxIkmidUiFxidA/KjjH9pAycqW+h980j7Fuz5qxRtO9pgB7MDFTdys1N7A5mcucRiDyEq4fusljItR1T/A== - dependencies: - lodash._reinterpolate "^3.0.0" - lodash.templatesettings "^4.0.0" - -lodash.templatesettings@^4.0.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/lodash.templatesettings/-/lodash.templatesettings-4.2.0.tgz#e481310f049d3cf6d47e912ad09313b154f0fb33" - integrity sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ== - dependencies: - lodash._reinterpolate "^3.0.0" - -lodash.throttle@^4.0.0: - version "4.1.1" - resolved "https://registry.yarnpkg.com/lodash.throttle/-/lodash.throttle-4.1.1.tgz#c23e91b710242ac70c37f1e1cda9274cc39bf2f4" - integrity sha1-wj6RtxAkKscMN/HhzaknTMOb8vQ= - -lodash.unescape@4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/lodash.unescape/-/lodash.unescape-4.0.1.tgz#bf2249886ce514cda112fae9218cdc065211fc9c" - integrity sha1-vyJJiGzlFM2hEvrpIYzcBlIR/Jw= - -lodash.uniq@^4.5.0: - version "4.5.0" - resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773" - integrity sha1-0CJTc662Uq3BvILklFM5qEJ1R3M= - -"lodash@>=3.5 <5", lodash@^4.16.5, lodash@^4.17.10, lodash@^4.17.11, lodash@^4.17.12, lodash@^4.17.13, lodash@^4.17.14, lodash@^4.17.4, lodash@^4.17.5: - version "4.17.15" - resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.15.tgz#b447f6670a0455bbfeedd11392eff330ea097548" - integrity sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A== - -loglevel@^1.4.1: - version "1.6.3" - resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.6.3.tgz#77f2eb64be55a404c9fd04ad16d57c1d6d6b1280" - integrity sha512-LoEDv5pgpvWgPF4kNYuIp0qqSJVWak/dML0RY74xlzMZiT9w77teNAwKYKWBTYjlokMirg+o3jBwp+vlLrcfAA== - -loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.2.0, loose-envify@^1.3.1, loose-envify@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" - integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== - dependencies: - js-tokens "^3.0.0 || ^4.0.0" - -lower-case@^1.1.1: - version "1.1.4" - resolved "https://registry.yarnpkg.com/lower-case/-/lower-case-1.1.4.tgz#9a2cabd1b9e8e0ae993a4bf7d5875c39c42e8eac" - integrity sha1-miyr0bno4K6ZOkv31YdcOcQujqw= - -lru-cache@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" - integrity sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w== - dependencies: - yallist "^3.0.2" - -make-dir@^2.0.0, make-dir@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-2.1.0.tgz#5f0310e18b8be898cc07009295a30ae41e91e6f5" - integrity sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA== - dependencies: - pify "^4.0.1" - semver "^5.6.0" - -makeerror@1.0.x: - version "1.0.11" - resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.11.tgz#e01a5c9109f2af79660e4e8b9587790184f5a96c" - integrity sha1-4BpckQnyr3lmDk6LlYd5AYT1qWw= - dependencies: - tmpl "1.0.x" - -mamacro@^0.0.3: - version "0.0.3" - resolved "https://registry.yarnpkg.com/mamacro/-/mamacro-0.0.3.tgz#ad2c9576197c9f1abf308d0787865bd975a3f3e4" - integrity sha512-qMEwh+UujcQ+kbz3T6V+wAmO2U8veoq2w+3wY8MquqwVA3jChfwY+Tk52GZKDfACEPjuZ7r2oJLejwpt8jtwTA== - -map-age-cleaner@^0.1.1: - version "0.1.3" - resolved "https://registry.yarnpkg.com/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz#7d583a7306434c055fe474b0f45078e6e1b4b92a" - integrity sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w== - dependencies: - p-defer "^1.0.0" - -map-cache@^0.2.2: - version "0.2.2" - resolved "https://registry.yarnpkg.com/map-cache/-/map-cache-0.2.2.tgz#c32abd0bd6525d9b051645bb4f26ac5dc98a0dbf" - integrity sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8= - -map-visit@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/map-visit/-/map-visit-1.0.0.tgz#ecdca8f13144e660f1b5bd41f12f3479d98dfb8f" - integrity sha1-7Nyo8TFE5mDxtb1B8S80edmN+48= - dependencies: - object-visit "^1.0.0" - -md5.js@^1.3.4: - version "1.3.5" - resolved "https://registry.yarnpkg.com/md5.js/-/md5.js-1.3.5.tgz#b5d07b8e3216e3e27cd728d72f70d1e6a342005f" - integrity sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg== - dependencies: - hash-base "^3.0.0" - inherits "^2.0.1" - safe-buffer "^5.1.2" - -mdn-data@2.0.4: - version "2.0.4" - resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.4.tgz#699b3c38ac6f1d728091a64650b65d388502fd5b" - integrity sha512-iV3XNKw06j5Q7mi6h+9vbx23Tv7JkjEVgKHW4pimwyDGWm0OIQntJJ+u1C6mg6mK1EaTv42XQ7w76yuzH7M2cA== - -mdn-data@~1.1.0: - version "1.1.4" - resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-1.1.4.tgz#50b5d4ffc4575276573c4eedb8780812a8419f01" - integrity sha512-FSYbp3lyKjyj3E7fMl6rYvUdX0FBXaluGqlFoYESWQlyUTq8R+wp0rkFxoYFqZlHCvsUXGjyJmLQSnXToYhOSA== - -media-typer@0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" - integrity sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g= - -mem@^4.0.0: - version "4.3.0" - resolved "https://registry.yarnpkg.com/mem/-/mem-4.3.0.tgz#461af497bc4ae09608cdb2e60eefb69bff744178" - integrity sha512-qX2bG48pTqYRVmDB37rn/6PT7LcR8T7oAX3bf99u1Tt1nzxYfxkgqDwUwolPlXweM0XzBOBFzSx4kfp7KP1s/w== - dependencies: - map-age-cleaner "^0.1.1" - mimic-fn "^2.0.0" - p-is-promise "^2.0.0" - -memory-fs@^0.4.0, memory-fs@^0.4.1, memory-fs@~0.4.1: - version "0.4.1" - resolved "https://registry.yarnpkg.com/memory-fs/-/memory-fs-0.4.1.tgz#3a9a20b8462523e447cfbc7e8bb80ed667bfc552" - integrity sha1-OpoguEYlI+RHz7x+i7gO1me/xVI= - dependencies: - errno "^0.1.3" - readable-stream "^2.0.1" - -merge-deep@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/merge-deep/-/merge-deep-3.0.2.tgz#f39fa100a4f1bd34ff29f7d2bf4508fbb8d83ad2" - integrity sha512-T7qC8kg4Zoti1cFd8Cr0M+qaZfOwjlPDEdZIIPPB2JZctjaPM4fX+i7HOId69tAti2fvO6X5ldfYUONDODsrkA== - dependencies: - arr-union "^3.1.0" - clone-deep "^0.2.4" - kind-of "^3.0.2" - -merge-descriptors@1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" - integrity sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E= - -merge-stream@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" - integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== - -merge2@^1.2.3: - version "1.2.4" - resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.2.4.tgz#c9269589e6885a60cf80605d9522d4b67ca646e3" - integrity sha512-FYE8xI+6pjFOhokZu0We3S5NKCirLbCzSh2Usf3qEyr4X8U+0jNg9P8RZ4qz+V2UoECLVwSyzU3LxXBaLGtD3A== - -methods@~1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" - integrity sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4= - -microevent.ts@~0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/microevent.ts/-/microevent.ts-0.1.1.tgz#70b09b83f43df5172d0205a63025bce0f7357fa0" - integrity sha512-jo1OfR4TaEwd5HOrt5+tAZ9mqT4jmpNAusXtyfNzqVm9uiSYFZlKM1wYL4oU7azZW/PxQW53wM0S6OR1JHNa2g== - -micromatch@^3.1.10, micromatch@^3.1.4, micromatch@^3.1.8: - version "3.1.10" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-3.1.10.tgz#70859bc95c9840952f359a068a3fc49f9ecfac23" - integrity sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg== - dependencies: - arr-diff "^4.0.0" - array-unique "^0.3.2" - braces "^2.3.1" - define-property "^2.0.2" - extend-shallow "^3.0.2" - extglob "^2.0.4" - fragment-cache "^0.2.1" - kind-of "^6.0.2" - nanomatch "^1.2.9" - object.pick "^1.3.0" - regex-not "^1.0.0" - snapdragon "^0.8.1" - to-regex "^3.0.2" - -miller-rabin@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/miller-rabin/-/miller-rabin-4.0.1.tgz#f080351c865b0dc562a8462966daa53543c78a4d" - integrity sha512-115fLhvZVqWwHPbClyntxEVfVDfl9DLLTuJvq3g2O/Oxi8AiNouAHvDSzHS0viUJc+V5vm3eq91Xwqn9dp4jRA== - dependencies: - bn.js "^4.0.0" - brorand "^1.0.1" - -mime-db@1.40.0, "mime-db@>= 1.40.0 < 2": - version "1.40.0" - resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.40.0.tgz#a65057e998db090f732a68f6c276d387d4126c32" - integrity sha512-jYdeOMPy9vnxEqFRRo6ZvTZ8d9oPb+k18PKoYNYUe2stVEBPPwsln/qWzdbmaIvnhZ9v2P+CuecK+fpUfsV2mA== - -mime-types@^2.1.12, mime-types@~2.1.17, mime-types@~2.1.19, mime-types@~2.1.24: - version "2.1.24" - resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.24.tgz#b6f8d0b3e951efb77dedeca194cff6d16f676f81" - integrity sha512-WaFHS3MCl5fapm3oLxU4eYDw77IQM2ACcxQ9RIxfaC3ooc6PFuBMGZZsYpvoXS5D5QTWPieo1jjLdAm3TBP3cQ== - dependencies: - mime-db "1.40.0" - -mime@1.6.0, mime@^1.4.1: - version "1.6.0" - resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1" - integrity sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg== - -mime@^2.0.3, mime@^2.4.2: - version "2.4.4" - resolved "https://registry.yarnpkg.com/mime/-/mime-2.4.4.tgz#bd7b91135fc6b01cde3e9bae33d659b63d8857e5" - integrity sha512-LRxmNwziLPT828z+4YkNzloCFC2YM4wrB99k+AV5ZbEyfGNWfG8SO1FUXLmLDBSo89NrJZ4DIWeLjy1CHGhMGA== - -mimic-fn@^1.0.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-1.2.0.tgz#820c86a39334640e99516928bd03fca88057d022" - integrity sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ== - -mimic-fn@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" - integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== - -mini-create-react-context@^0.3.0: - version "0.3.2" - resolved "https://registry.yarnpkg.com/mini-create-react-context/-/mini-create-react-context-0.3.2.tgz#79fc598f283dd623da8e088b05db8cddab250189" - integrity sha512-2v+OeetEyliMt5VHMXsBhABoJ0/M4RCe7fatd/fBy6SMiKazUSEt3gxxypfnk2SHMkdBYvorHRoQxuGoiwbzAw== - dependencies: - "@babel/runtime" "^7.4.0" - gud "^1.0.0" - tiny-warning "^1.0.2" - -mini-css-extract-plugin@0.5.0: - version "0.5.0" - resolved "https://registry.yarnpkg.com/mini-css-extract-plugin/-/mini-css-extract-plugin-0.5.0.tgz#ac0059b02b9692515a637115b0cc9fed3a35c7b0" - integrity sha512-IuaLjruM0vMKhUUT51fQdQzBYTX49dLj8w68ALEAe2A4iYNpIC4eMac67mt3NzycvjOlf07/kYxJDc0RTl1Wqw== - dependencies: - loader-utils "^1.1.0" - schema-utils "^1.0.0" - webpack-sources "^1.1.0" - -mini-store@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/mini-store/-/mini-store-2.0.0.tgz#0843c048d6942ce55e3e78b1b67fc063022b5488" - integrity sha512-EG0CuwpQmX+XL4QVS0kxNwHW5ftSbhygu1qxQH0pipugjnPkbvkalCdQbEihMwtQY6d3MTN+MS0q+aurs+RfLQ== - dependencies: - hoist-non-react-statics "^2.3.1" - prop-types "^15.6.0" - react-lifecycles-compat "^3.0.4" - shallowequal "^1.0.2" - -minimalistic-assert@^1.0.0, minimalistic-assert@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7" - integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A== - -minimalistic-crypto-utils@^1.0.0, minimalistic-crypto-utils@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz#f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a" - integrity sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo= - -minimatch@3.0.4, minimatch@^3.0.4: - version "3.0.4" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" - integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== - dependencies: - brace-expansion "^1.1.7" - -minimist@0.0.8: - version "0.0.8" - resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.8.tgz#857fcabfc3397d2625b8228262e86aa7a011b05d" - integrity sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0= - -minimist@^1.1.1, minimist@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.0.tgz#a35008b20f41383eec1fb914f4cd5df79a264284" - integrity sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ= - -minimist@~0.0.1: - version "0.0.10" - resolved "https://registry.yarnpkg.com/minimist/-/minimist-0.0.10.tgz#de3f98543dbf96082be48ad1a0c7cda836301dcf" - integrity sha1-3j+YVD2/lggr5IrRoMfNqDYwHc8= - -minipass@^2.2.1, minipass@^2.3.5: - version "2.4.0" - resolved "https://registry.yarnpkg.com/minipass/-/minipass-2.4.0.tgz#38f0af94f42fb6f34d3d7d82a90e2c99cd3ff485" - integrity sha512-6PmOuSP4NnZXzs2z6rbwzLJu/c5gdzYg1mRI/WIYdx45iiX7T+a4esOzavD6V/KmBzAaopFSTZPZcUx73bqKWA== - dependencies: - safe-buffer "^5.1.2" - yallist "^3.0.0" - -minizlib@^1.2.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/minizlib/-/minizlib-1.2.1.tgz#dd27ea6136243c7c880684e8672bb3a45fd9b614" - integrity sha512-7+4oTUOWKg7AuL3vloEWekXY2/D20cevzsrNT2kGWm+39J9hGTCBv8VI5Pm5lXZ/o3/mdR4f8rflAPhnQb8mPA== - dependencies: - minipass "^2.2.1" - -mississippi@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/mississippi/-/mississippi-3.0.0.tgz#ea0a3291f97e0b5e8776b363d5f0a12d94c67022" - integrity sha512-x471SsVjUtBRtcvd4BzKE9kFC+/2TeWgKCgw0bZcw1b9l2X3QX5vCWgF+KaZaYm87Ss//rHnWryupDrgLvmSkA== - dependencies: - concat-stream "^1.5.0" - duplexify "^3.4.2" - end-of-stream "^1.1.0" - flush-write-stream "^1.0.0" - from2 "^2.1.0" - parallel-transform "^1.1.0" - pump "^3.0.0" - pumpify "^1.3.3" - stream-each "^1.1.0" - through2 "^2.0.0" - -mixin-deep@^1.2.0: - version "1.3.2" - resolved "https://registry.yarnpkg.com/mixin-deep/-/mixin-deep-1.3.2.tgz#1120b43dc359a785dce65b55b82e257ccf479566" - integrity sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA== - dependencies: - for-in "^1.0.2" - is-extendable "^1.0.1" - -mixin-object@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/mixin-object/-/mixin-object-2.0.1.tgz#4fb949441dab182540f1fe035ba60e1947a5e57e" - integrity sha1-T7lJRB2rGCVA8f4DW6YOGUel5X4= - dependencies: - for-in "^0.1.3" - is-extendable "^0.1.1" - -mkdirp@0.5.1, mkdirp@0.5.x, mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkdirp@~0.5.1: - version "0.5.1" - resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.1.tgz#30057438eac6cf7f8c4767f38648d6697d75c903" - integrity sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM= - dependencies: - minimist "0.0.8" - -moment@2.x, moment@^2.24.0: - version "2.24.0" - resolved "https://registry.yarnpkg.com/moment/-/moment-2.24.0.tgz#0d055d53f5052aa653c9f6eb68bb5d12bf5c2b5b" - integrity sha512-bV7f+6l2QigeBBZSM/6yTNq4P2fNpSWj/0e7jQcy87A8e7o2nAfP/34/2ky5Vw4B9S446EtIhodAzkFCcR4dQg== - -move-concurrently@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/move-concurrently/-/move-concurrently-1.0.1.tgz#be2c005fda32e0b29af1f05d7c4b33214c701f92" - integrity sha1-viwAX9oy4LKa8fBdfEszIUxwH5I= - dependencies: - aproba "^1.1.1" - copy-concurrently "^1.0.0" - fs-write-stream-atomic "^1.0.8" - mkdirp "^0.5.1" - rimraf "^2.5.4" - run-queue "^1.0.3" - -ms@2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" - integrity sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g= - -ms@2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.1.tgz#30a5864eb3ebb0a66f2ebe6d727af06a09d86e0a" - integrity sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg== - -ms@^2.1.1: - version "2.1.2" - resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" - integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== - -multicast-dns-service-types@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/multicast-dns-service-types/-/multicast-dns-service-types-1.1.0.tgz#899f11d9686e5e05cb91b35d5f0e63b773cfc901" - integrity sha1-iZ8R2WhuXgXLkbNdXw5jt3PPyQE= - -multicast-dns@^6.0.1: - version "6.2.3" - resolved "https://registry.yarnpkg.com/multicast-dns/-/multicast-dns-6.2.3.tgz#a0ec7bd9055c4282f790c3c82f4e28db3b31b229" - integrity sha512-ji6J5enbMyGRHIAkAOu3WdV8nggqviKCEKtXcOqfphZZtQrmHKycfynJ2V7eVPUA4NhJ6V7Wf4TmGbTwKE9B6g== - dependencies: - dns-packet "^1.3.1" - thunky "^1.0.2" - -mutationobserver-shim@^0.3.2: - version "0.3.3" - resolved "https://registry.yarnpkg.com/mutationobserver-shim/-/mutationobserver-shim-0.3.3.tgz#65869630bc89d7bf8c9cd9cb82188cd955aacd2b" - integrity sha512-gciOLNN8Vsf7YzcqRjKzlAJ6y7e+B86u7i3KXes0xfxx/nfLmozlW1Vn+Sc9x3tPIePFgc1AeIFhtRgkqTjzDQ== - -mute-stream@0.0.7: - version "0.0.7" - resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.7.tgz#3075ce93bc21b8fab43e1bc4da7e8115ed1e7bab" - integrity sha1-MHXOk7whuPq0PhvE2n6BFe0ee6s= - -nan@^2.12.1: - version "2.14.0" - resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.0.tgz#7818f722027b2459a86f0295d434d1fc2336c52c" - integrity sha512-INOFj37C7k3AfaNTtX8RhsTw7qRy7eLET14cROi9+5HAVbbHuIWUHEauBv5qT4Av2tWasiTY1Jw6puUNqRJXQg== - -nanomatch@^1.2.9: - version "1.2.13" - resolved "https://registry.yarnpkg.com/nanomatch/-/nanomatch-1.2.13.tgz#b87a8aa4fc0de8fe6be88895b38983ff265bd119" - integrity sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA== - dependencies: - arr-diff "^4.0.0" - array-unique "^0.3.2" - define-property "^2.0.2" - extend-shallow "^3.0.2" - fragment-cache "^0.2.1" - is-windows "^1.0.2" - kind-of "^6.0.2" - object.pick "^1.3.0" - regex-not "^1.0.0" - snapdragon "^0.8.1" - to-regex "^3.0.1" - -natural-compare@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" - integrity sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc= - -needle@^2.2.1: - version "2.4.0" - resolved "https://registry.yarnpkg.com/needle/-/needle-2.4.0.tgz#6833e74975c444642590e15a750288c5f939b57c" - integrity sha512-4Hnwzr3mi5L97hMYeNl8wRW/Onhy4nUKR/lVemJ8gJedxxUyBLm9kkrDColJvoSfwi0jCNhD+xCdOtiGDQiRZg== - dependencies: - debug "^3.2.6" - iconv-lite "^0.4.4" - sax "^1.2.4" - -negotiator@0.6.2: - version "0.6.2" - resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.2.tgz#feacf7ccf525a77ae9634436a64883ffeca346fb" - integrity sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw== - -neo-async@^2.5.0, neo-async@^2.6.0: - version "2.6.1" - resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.1.tgz#ac27ada66167fa8849a6addd837f6b189ad2081c" - integrity sha512-iyam8fBuCUpWeKPGpaNMetEocMt364qkCsfL9JuhjXX6dRnguRVOfk2GZaDpPjcOKiiXCPINZC1GczQ7iTq3Zw== - -nice-try@^1.0.4: - version "1.0.5" - resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366" - integrity sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ== - -no-case@^2.2.0: - version "2.3.2" - resolved "https://registry.yarnpkg.com/no-case/-/no-case-2.3.2.tgz#60b813396be39b3f1288a4c1ed5d1e7d28b464ac" - integrity sha512-rmTZ9kz+f3rCvK2TD1Ue/oZlns7OGoIWP4fc3llxxRXlOkHKoWPPWJOfFYpITabSow43QJbRIoHQXtt10VldyQ== - dependencies: - lower-case "^1.1.1" - -node-fetch@^1.0.1: - version "1.7.3" - resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-1.7.3.tgz#980f6f72d85211a5347c6b2bc18c5b84c3eb47ef" - integrity sha512-NhZ4CsKx7cYm2vSrBAr2PvFOe6sWDf0UYLRqA6svUYg7+/TSfVAu49jYC4BvQ4Sms9SZgdqGBgroqfDhJdTyKQ== - dependencies: - encoding "^0.1.11" - is-stream "^1.0.1" - -node-forge@0.7.5: - version "0.7.5" - resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-0.7.5.tgz#6c152c345ce11c52f465c2abd957e8639cd674df" - integrity sha512-MmbQJ2MTESTjt3Gi/3yG1wGpIMhUfcIypUCGtTizFR9IiccFwxSpfp0vtIZlkFclEqERemxfnSdZEMR9VqqEFQ== - -node-int64@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b" - integrity sha1-h6kGXNs1XTGC2PlM4RGIuCXGijs= - -node-libs-browser@^2.0.0: - version "2.2.1" - resolved "https://registry.yarnpkg.com/node-libs-browser/-/node-libs-browser-2.2.1.tgz#b64f513d18338625f90346d27b0d235e631f6425" - integrity sha512-h/zcD8H9kaDZ9ALUWwlBUDo6TKF8a7qBSCSEGfjTVIYeqsioSKaAX+BN7NgiMGp6iSIXZ3PxgCu8KS3b71YK5Q== - dependencies: - assert "^1.1.1" - browserify-zlib "^0.2.0" - buffer "^4.3.0" - console-browserify "^1.1.0" - constants-browserify "^1.0.0" - crypto-browserify "^3.11.0" - domain-browser "^1.1.1" - events "^3.0.0" - https-browserify "^1.0.0" - os-browserify "^0.3.0" - path-browserify "0.0.1" - process "^0.11.10" - punycode "^1.2.4" - querystring-es3 "^0.2.0" - readable-stream "^2.3.3" - stream-browserify "^2.0.1" - stream-http "^2.7.2" - string_decoder "^1.0.0" - timers-browserify "^2.0.4" - tty-browserify "0.0.0" - url "^0.11.0" - util "^0.11.0" - vm-browserify "^1.0.1" - -node-modules-regexp@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/node-modules-regexp/-/node-modules-regexp-1.0.0.tgz#8d9dbe28964a4ac5712e9131642107c71e90ec40" - integrity sha1-jZ2+KJZKSsVxLpExZCEHxx6Q7EA= - -node-notifier@^5.4.2: - version "5.4.3" - resolved "https://registry.yarnpkg.com/node-notifier/-/node-notifier-5.4.3.tgz#cb72daf94c93904098e28b9c590fd866e464bd50" - integrity sha512-M4UBGcs4jeOK9CjTsYwkvH6/MzuUmGCyTW+kCY7uO+1ZVr0+FHGdPdIf5CCLqAaxnRrWidyoQlNkMIIVwbKB8Q== - dependencies: - growly "^1.3.0" - is-wsl "^1.1.0" - semver "^5.5.0" - shellwords "^0.1.1" - which "^1.3.0" - -node-pre-gyp@^0.12.0: - version "0.12.0" - resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.12.0.tgz#39ba4bb1439da030295f899e3b520b7785766149" - integrity sha512-4KghwV8vH5k+g2ylT+sLTjy5wmUOb9vPhnM8NHvRf9dHmnW/CndrFXy2aRPaPST6dugXSdHXfeaHQm77PIz/1A== - dependencies: - detect-libc "^1.0.2" - mkdirp "^0.5.1" - needle "^2.2.1" - nopt "^4.0.1" - npm-packlist "^1.1.6" - npmlog "^4.0.2" - rc "^1.2.7" - rimraf "^2.6.1" - semver "^5.3.0" - tar "^4" - -node-releases@^1.1.25: - version "1.1.28" - resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.28.tgz#503c3c70d0e4732b84e7aaa2925fbdde10482d4a" - integrity sha512-AQw4emh6iSXnCpDiFe0phYcThiccmkNWMZnFZ+lDJjAP8J0m2fVd59duvUUyuTirQOhIAajTFkzG6FHCLBO59g== - dependencies: - semver "^5.3.0" - -nopt@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.1.tgz#d0d4685afd5415193c8c7505602d0d17cd64474d" - integrity sha1-0NRoWv1UFRk8jHUFYC0NF81kR00= - dependencies: - abbrev "1" - osenv "^0.1.4" - -normalize-package-data@^2.3.2: - version "2.5.0" - resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.5.0.tgz#e66db1838b200c1dfc233225d12cb36520e234a8" - integrity sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA== - dependencies: - hosted-git-info "^2.1.4" - resolve "^1.10.0" - semver "2 || 3 || 4 || 5" - validate-npm-package-license "^3.0.1" - -normalize-path@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-2.1.1.tgz#1ab28b556e198363a8c1a6f7e6fa20137fe6aed9" - integrity sha1-GrKLVW4Zg2Oowab35vogE3/mrtk= - dependencies: - remove-trailing-separator "^1.0.1" - -normalize-path@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" - integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== - -normalize-range@^0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/normalize-range/-/normalize-range-0.1.2.tgz#2d10c06bdfd312ea9777695a4d28439456b75942" - integrity sha1-LRDAa9/TEuqXd2laTShDlFa3WUI= - -normalize-url@^3.0.0: - version "3.3.0" - resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-3.3.0.tgz#b2e1c4dc4f7c6d57743df733a4f5978d18650559" - integrity sha512-U+JJi7duF1o+u2pynbp2zXDW2/PADgC30f0GsHZtRh+HOcXHnw137TrNlyxxRvWW5fjKd3bcLHPxofWuCjaeZg== - -npm-bundled@^1.0.1: - version "1.0.6" - resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.0.6.tgz#e7ba9aadcef962bb61248f91721cd932b3fe6bdd" - integrity sha512-8/JCaftHwbd//k6y2rEWp6k1wxVfpFzB6t1p825+cUb7Ym2XQfhwIC5KwhrvzZRJu+LtDE585zVaS32+CGtf0g== - -npm-packlist@^1.1.6: - version "1.4.4" - resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.4.4.tgz#866224233850ac534b63d1a6e76050092b5d2f44" - integrity sha512-zTLo8UcVYtDU3gdeaFu2Xu0n0EvelfHDGuqtNIn5RO7yQj4H1TqNdBc/yZjxnWA0PVB8D3Woyp0i5B43JwQ6Vw== - dependencies: - ignore-walk "^3.0.1" - npm-bundled "^1.0.1" - -npm-run-path@^2.0.0: - version "2.0.2" - resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f" - integrity sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8= - dependencies: - path-key "^2.0.0" - -npmlog@^4.0.2: - version "4.1.2" - resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.1.2.tgz#08a7f2a8bf734604779a9efa4ad5cc717abb954b" - integrity sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg== - dependencies: - are-we-there-yet "~1.1.2" - console-control-strings "~1.1.0" - gauge "~2.7.3" - set-blocking "~2.0.0" - -nth-check@^1.0.2, nth-check@~1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/nth-check/-/nth-check-1.0.2.tgz#b2bd295c37e3dd58a3bf0700376663ba4d9cf05c" - integrity sha512-WeBOdju8SnzPN5vTUJYxYUxLeXpCaVP5i5e0LF8fg7WORF2Wd7wFX/pk0tYZk7s8T+J7VLy0Da6J1+wCT0AtHg== - dependencies: - boolbase "~1.0.0" - -num2fraction@^1.2.2: - version "1.2.2" - resolved "https://registry.yarnpkg.com/num2fraction/-/num2fraction-1.2.2.tgz#6f682b6a027a4e9ddfa4564cd2589d1d4e669ede" - integrity sha1-b2gragJ6Tp3fpFZM0lidHU5mnt4= - -number-is-nan@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" - integrity sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0= - -nwsapi@^2.0.7, nwsapi@^2.1.3: - version "2.1.4" - resolved "https://registry.yarnpkg.com/nwsapi/-/nwsapi-2.1.4.tgz#e006a878db23636f8e8a67d33ca0e4edf61a842f" - integrity sha512-iGfd9Y6SFdTNldEy2L0GUhcarIutFmk+MPWIn9dmj8NMIup03G08uUF2KGbbmv/Ux4RT0VZJoP/sVbWA6d/VIw== - -oauth-sign@~0.9.0: - version "0.9.0" - resolved "https://registry.yarnpkg.com/oauth-sign/-/oauth-sign-0.9.0.tgz#47a7b016baa68b5fa0ecf3dee08a85c679ac6455" - integrity sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ== - -object-assign@4.1.1, object-assign@4.x, object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" - integrity sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM= - -object-copy@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/object-copy/-/object-copy-0.1.0.tgz#7e7d858b781bd7c991a41ba975ed3812754e998c" - integrity sha1-fn2Fi3gb18mRpBupde04EnVOmYw= - dependencies: - copy-descriptor "^0.1.0" - define-property "^0.2.5" - kind-of "^3.0.3" - -object-hash@^1.1.4: - version "1.3.1" - resolved "https://registry.yarnpkg.com/object-hash/-/object-hash-1.3.1.tgz#fde452098a951cb145f039bb7d455449ddc126df" - integrity sha512-OSuu/pU4ENM9kmREg0BdNrUDIl1heYa4mBZacJc+vVWz4GtAwu7jO8s4AIt2aGRUTqxykpWzI3Oqnsm13tTMDA== - -object-is@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/object-is/-/object-is-1.0.1.tgz#0aa60ec9989a0b3ed795cf4d06f62cf1ad6539b6" - integrity sha1-CqYOyZiaCz7Xlc9NBvYs8a1lObY= - -object-keys@^1.0.11, object-keys@^1.0.12, object-keys@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" - integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== - -object-visit@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/object-visit/-/object-visit-1.0.1.tgz#f79c4493af0c5377b59fe39d395e41042dd045bb" - integrity sha1-95xEk68MU3e1n+OdOV5BBC3QRbs= - dependencies: - isobject "^3.0.0" - -object.assign@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.0.tgz#968bf1100d7956bb3ca086f006f846b3bc4008da" - integrity sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w== - dependencies: - define-properties "^1.1.2" - function-bind "^1.1.1" - has-symbols "^1.0.0" - object-keys "^1.0.11" - -object.fromentries@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/object.fromentries/-/object.fromentries-2.0.0.tgz#49a543d92151f8277b3ac9600f1e930b189d30ab" - integrity sha512-9iLiI6H083uiqUuvzyY6qrlmc/Gz8hLQFOcb/Ri/0xXFkSNS3ctV+CbE6yM2+AnkYfOB3dGjdzC0wrMLIhQICA== - dependencies: - define-properties "^1.1.2" - es-abstract "^1.11.0" - function-bind "^1.1.1" - has "^1.0.1" - -object.getownpropertydescriptors@^2.0.3: - version "2.0.3" - resolved "https://registry.yarnpkg.com/object.getownpropertydescriptors/-/object.getownpropertydescriptors-2.0.3.tgz#8758c846f5b407adab0f236e0986f14b051caa16" - integrity sha1-h1jIRvW0B62rDyNuCYbxSwUcqhY= - dependencies: - define-properties "^1.1.2" - es-abstract "^1.5.1" - -object.pick@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/object.pick/-/object.pick-1.3.0.tgz#87a10ac4c1694bd2e1cbf53591a66141fb5dd747" - integrity sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c= - dependencies: - isobject "^3.0.1" - -object.values@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.1.0.tgz#bf6810ef5da3e5325790eaaa2be213ea84624da9" - integrity sha512-8mf0nKLAoFX6VlNVdhGj31SVYpaNFtUnuoOXWyFEstsWRgU837AK+JYM0iAxwkSzGRbwn8cbFmgbyxj1j4VbXg== - dependencies: - define-properties "^1.1.3" - es-abstract "^1.12.0" - function-bind "^1.1.1" - has "^1.0.3" - -obuf@^1.0.0, obuf@^1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/obuf/-/obuf-1.1.2.tgz#09bea3343d41859ebd446292d11c9d4db619084e" - integrity sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg== - -omit.js@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/omit.js/-/omit.js-1.0.2.tgz#91a14f0eba84066dfa015bf30e474c47f30bc858" - integrity sha512-/QPc6G2NS+8d4L/cQhbk6Yit1WTB6Us2g84A7A/1+w9d/eRGHyEqC5kkQtHVoHZ5NFWGG7tUGgrhVZwgZanKrQ== - dependencies: - babel-runtime "^6.23.0" - -on-finished@~2.3.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947" - integrity sha1-IPEzZIGwg811M3mSoWlxqi2QaUc= - dependencies: - ee-first "1.1.1" - -on-headers@~1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.2.tgz#772b0ae6aaa525c399e489adfad90c403eb3c28f" - integrity sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA== - -once@^1.3.0, once@^1.3.1, once@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" - integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E= - dependencies: - wrappy "1" - -onetime@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/onetime/-/onetime-2.0.1.tgz#067428230fd67443b2794b22bba528b6867962d4" - integrity sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ= - dependencies: - mimic-fn "^1.0.0" - -open@^6.3.0: - version "6.4.0" - resolved "https://registry.yarnpkg.com/open/-/open-6.4.0.tgz#5c13e96d0dc894686164f18965ecfe889ecfc8a9" - integrity sha512-IFenVPgF70fSm1keSd2iDBIDIBZkroLeuffXq+wKTzTJlBpesFWojV9lb8mzOfaAzM1sr7HQHuO0vtV0zYekGg== - dependencies: - is-wsl "^1.1.0" - -opn@^5.1.0: - version "5.5.0" - resolved "https://registry.yarnpkg.com/opn/-/opn-5.5.0.tgz#fc7164fab56d235904c51c3b27da6758ca3b9bfc" - integrity sha512-PqHpggC9bLV0VeWcdKhkpxY+3JTzetLSqTCWL/z/tFIbI6G8JCjondXklT1JinczLz2Xib62sSp0T/gKT4KksA== - dependencies: - is-wsl "^1.1.0" - -optimist@^0.6.1: - version "0.6.1" - resolved "https://registry.yarnpkg.com/optimist/-/optimist-0.6.1.tgz#da3ea74686fa21a19a111c326e90eb15a0196686" - integrity sha1-2j6nRob6IaGaERwybpDrFaAZZoY= - dependencies: - minimist "~0.0.1" - wordwrap "~0.0.2" - -optimize-css-assets-webpack-plugin@5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/optimize-css-assets-webpack-plugin/-/optimize-css-assets-webpack-plugin-5.0.1.tgz#9eb500711d35165b45e7fd60ba2df40cb3eb9159" - integrity sha512-Rqm6sSjWtx9FchdP0uzTQDc7GXDKnwVEGoSxjezPkzMewx7gEWE9IMUYKmigTRC4U3RaNSwYVnUDLuIdtTpm0A== - dependencies: - cssnano "^4.1.0" - last-call-webpack-plugin "^3.0.0" - -optionator@^0.8.1, optionator@^0.8.2: - version "0.8.2" - resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.8.2.tgz#364c5e409d3f4d6301d6c0b4c05bba50180aeb64" - integrity sha1-NkxeQJ0/TWMB1sC0wFu6UBgK62Q= - dependencies: - deep-is "~0.1.3" - fast-levenshtein "~2.0.4" - levn "~0.3.0" - prelude-ls "~1.1.2" - type-check "~0.3.2" - wordwrap "~1.0.0" - -original@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/original/-/original-1.0.2.tgz#e442a61cffe1c5fd20a65f3261c26663b303f25f" - integrity sha512-hyBVl6iqqUOJ8FqRe+l/gS8H+kKYjrEndd5Pm1MfBtsEKA038HkkdbAl/72EAXGyonD/PFsvmVG+EvcIpliMBg== - dependencies: - url-parse "^1.4.3" - -os-browserify@^0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/os-browserify/-/os-browserify-0.3.0.tgz#854373c7f5c2315914fc9bfc6bd8238fdda1ec27" - integrity sha1-hUNzx/XCMVkU/Jv8a9gjj92h7Cc= - -os-homedir@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" - integrity sha1-/7xJiDNuDoM94MFox+8VISGqf7M= - -os-locale@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/os-locale/-/os-locale-3.1.0.tgz#a802a6ee17f24c10483ab9935719cef4ed16bf1a" - integrity sha512-Z8l3R4wYWM40/52Z+S265okfFj8Kt2cC2MKY+xNi3kFs+XGI7WXu/I309QQQYbRW4ijiZ+yxs9pqEhJh0DqW3Q== - dependencies: - execa "^1.0.0" - lcid "^2.0.0" - mem "^4.0.0" - -os-tmpdir@^1.0.0, os-tmpdir@~1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" - integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ= - -osenv@^0.1.4: - version "0.1.5" - resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.5.tgz#85cdfafaeb28e8677f416e287592b5f3f49ea410" - integrity sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g== - dependencies: - os-homedir "^1.0.0" - os-tmpdir "^1.0.0" - -p-defer@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-1.0.0.tgz#9f6eb182f6c9aa8cd743004a7d4f96b196b0fb0c" - integrity sha1-n26xgvbJqozXQwBKfU+WsZaw+ww= - -p-each-series@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/p-each-series/-/p-each-series-1.0.0.tgz#930f3d12dd1f50e7434457a22cd6f04ac6ad7f71" - integrity sha1-kw89Et0fUOdDRFeiLNbwSsatf3E= - dependencies: - p-reduce "^1.0.0" - -p-finally@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" - integrity sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4= - -p-is-promise@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/p-is-promise/-/p-is-promise-2.1.0.tgz#918cebaea248a62cf7ffab8e3bca8c5f882fc42e" - integrity sha512-Y3W0wlRPK8ZMRbNq97l4M5otioeA5lm1z7bkNkxCka8HSPjR0xRWmpCmc9utiaLP9Jb1eD8BgeIxTW4AIF45Pg== - -p-limit@^1.1.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.3.0.tgz#b86bd5f0c25690911c7590fcbfc2010d54b3ccb8" - integrity sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q== - dependencies: - p-try "^1.0.0" - -p-limit@^2.0.0: - version "2.2.1" - resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.2.1.tgz#aa07a788cc3151c939b5131f63570f0dd2009537" - integrity sha512-85Tk+90UCVWvbDavCLKPOLC9vvY8OwEX/RtKF+/1OADJMVlFfEHOiMTPVyxg7mk/dKa+ipdHm0OUkTvCpMTuwg== - dependencies: - p-try "^2.0.0" - -p-locate@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-2.0.0.tgz#20a0103b222a70c8fd39cc2e580680f3dde5ec43" - integrity sha1-IKAQOyIqcMj9OcwuWAaA893l7EM= - dependencies: - p-limit "^1.1.0" - -p-locate@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-3.0.0.tgz#322d69a05c0264b25997d9f40cd8a891ab0064a4" - integrity sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ== - dependencies: - p-limit "^2.0.0" - -p-map@^1.1.1: - version "1.2.0" - resolved "https://registry.yarnpkg.com/p-map/-/p-map-1.2.0.tgz#e4e94f311eabbc8633a1e79908165fca26241b6b" - integrity sha512-r6zKACMNhjPJMTl8KcFH4li//gkrXWfbD6feV8l6doRHlzljFWGJ2AP6iKaCJXyZmAUMOPtvbW7EXkbWO/pLEA== - -p-reduce@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/p-reduce/-/p-reduce-1.0.0.tgz#18c2b0dd936a4690a529f8231f58a0fdb6a47dfa" - integrity sha1-GMKw3ZNqRpClKfgjH1ig/bakffo= - -p-try@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/p-try/-/p-try-1.0.0.tgz#cbc79cdbaf8fd4228e13f621f2b1a237c1b207b3" - integrity sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M= - -p-try@^2.0.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" - integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== - -pako@~1.0.5: - version "1.0.10" - resolved "https://registry.yarnpkg.com/pako/-/pako-1.0.10.tgz#4328badb5086a426aa90f541977d4955da5c9732" - integrity sha512-0DTvPVU3ed8+HNXOu5Bs+o//Mbdj9VNQMUOe9oKCwh8l0GNwpTDMKCWbRjgtD291AWnkAgkqA/LOnQS8AmS1tw== - -parallel-transform@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/parallel-transform/-/parallel-transform-1.1.0.tgz#d410f065b05da23081fcd10f28854c29bda33b06" - integrity sha1-1BDwZbBdojCB/NEPKIVMKb2jOwY= - dependencies: - cyclist "~0.2.2" - inherits "^2.0.3" - readable-stream "^2.1.5" - -param-case@2.1.x: - version "2.1.1" - resolved "https://registry.yarnpkg.com/param-case/-/param-case-2.1.1.tgz#df94fd8cf6531ecf75e6bef9a0858fbc72be2247" - integrity sha1-35T9jPZTHs915r75oIWPvHK+Ikc= - dependencies: - no-case "^2.2.0" - -parent-module@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" - integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== - dependencies: - callsites "^3.0.0" - -parse-asn1@^5.0.0: - version "5.1.4" - resolved "https://registry.yarnpkg.com/parse-asn1/-/parse-asn1-5.1.4.tgz#37f6628f823fbdeb2273b4d540434a22f3ef1fcc" - integrity sha512-Qs5duJcuvNExRfFZ99HDD3z4mAi3r9Wl/FOjEOijlxwCZs7E7mW2vjTpgQ4J8LpTF8x5v+1Vn5UQFejmWT11aw== - dependencies: - asn1.js "^4.0.0" - browserify-aes "^1.0.0" - create-hash "^1.1.0" - evp_bytestokey "^1.0.0" - pbkdf2 "^3.0.3" - safe-buffer "^5.1.1" - -parse-json@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-2.2.0.tgz#f480f40434ef80741f8469099f8dea18f55a4dc9" - integrity sha1-9ID0BDTvgHQfhGkJn43qGPVaTck= - dependencies: - error-ex "^1.2.0" - -parse-json@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-4.0.0.tgz#be35f5425be1f7f6c747184f98a788cb99477ee0" - integrity sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA= - dependencies: - error-ex "^1.3.1" - json-parse-better-errors "^1.0.1" - -parse5@4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/parse5/-/parse5-4.0.0.tgz#6d78656e3da8d78b4ec0b906f7c08ef1dfe3f608" - integrity sha512-VrZ7eOd3T1Fk4XWNXMgiGBK/z0MG48BWG2uQNU4I72fkQuKUTZpl+u9k+CxEG0twMVzSmXEEz12z5Fnw1jIQFA== - -parse5@5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/parse5/-/parse5-5.1.0.tgz#c59341c9723f414c452975564c7c00a68d58acd2" - integrity sha512-fxNG2sQjHvlVAYmzBZS9YlDp6PTSSDwa98vkD4QgVDDCAo84z5X1t5XyJQ62ImdLXx5NdIIfihey6xpum9/gRQ== - -parseurl@~1.3.2, parseurl@~1.3.3: - version "1.3.3" - resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4" - integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ== - -pascalcase@^0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/pascalcase/-/pascalcase-0.1.1.tgz#b363e55e8006ca6fe21784d2db22bd15d7917f14" - integrity sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ= - -path-browserify@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/path-browserify/-/path-browserify-0.0.1.tgz#e6c4ddd7ed3aa27c68a20cc4e50e1a4ee83bbc4a" - integrity sha512-BapA40NHICOS+USX9SN4tyhq+A2RrN/Ws5F0Z5aMHDp98Fl86lX8Oti8B7uN93L4Ifv4fHOEA+pQw87gmMO/lQ== - -path-dirname@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/path-dirname/-/path-dirname-1.0.2.tgz#cc33d24d525e099a5388c0336c6e32b9160609e0" - integrity sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA= - -path-exists@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b" - integrity sha1-D+tsZPD8UY2adU3V77YscCJ2H0s= - dependencies: - pinkie-promise "^2.0.0" - -path-exists@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" - integrity sha1-zg6+ql94yxiSXqfYENe1mwEP1RU= - -path-is-absolute@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" - integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= - -path-is-inside@^1.0.1, path-is-inside@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/path-is-inside/-/path-is-inside-1.0.2.tgz#365417dede44430d1c11af61027facf074bdfc53" - integrity sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM= - -path-key@^2.0.0, path-key@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40" - integrity sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A= - -path-parse@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.6.tgz#d62dbb5679405d72c4737ec58600e9ddcf06d24c" - integrity sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw== - -path-to-regexp@0.1.7: - version "0.1.7" - resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" - integrity sha1-32BBeABfUi8V60SQ5yR6G/qmf4w= - -path-to-regexp@^1.7.0: - version "1.7.0" - resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-1.7.0.tgz#59fde0f435badacba103a84e9d3bc64e96b9937d" - integrity sha1-Wf3g9DW62suhA6hOnTvGTpa5k30= - dependencies: - isarray "0.0.1" - -path-type@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/path-type/-/path-type-2.0.0.tgz#f012ccb8415b7096fc2daa1054c3d72389594c73" - integrity sha1-8BLMuEFbcJb8LaoQVMPXI4lZTHM= - dependencies: - pify "^2.0.0" - -path-type@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/path-type/-/path-type-3.0.0.tgz#cef31dc8e0a1a3bb0d105c0cd97cf3bf47f4e36f" - integrity sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg== - dependencies: - pify "^3.0.0" - -pbkdf2@^3.0.3: - version "3.0.17" - resolved "https://registry.yarnpkg.com/pbkdf2/-/pbkdf2-3.0.17.tgz#976c206530617b14ebb32114239f7b09336e93a6" - integrity sha512-U/il5MsrZp7mGg3mSQfn742na2T+1/vHDCG5/iTI3X9MKUuYUZVLQhyRsg06mCgDBTd57TxzgZt7P+fYfjRLtA== - dependencies: - create-hash "^1.1.2" - create-hmac "^1.1.4" - ripemd160 "^2.0.1" - safe-buffer "^5.0.1" - sha.js "^2.4.8" - -performance-now@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/performance-now/-/performance-now-2.1.0.tgz#6309f4e0e5fa913ec1c69307ae364b4b377c9e7b" - integrity sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns= - -pify@^2.0.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" - integrity sha1-7RQaasBDqEnqWISY59yosVMw6Qw= - -pify@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176" - integrity sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY= - -pify@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/pify/-/pify-4.0.1.tgz#4b2cd25c50d598735c50292224fd8c6df41e3231" - integrity sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g== - -pinkie-promise@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" - integrity sha1-ITXW36ejWMBprJsXh3YogihFD/o= - dependencies: - pinkie "^2.0.0" - -pinkie@^2.0.0: - version "2.0.4" - resolved "https://registry.yarnpkg.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" - integrity sha1-clVrgM+g1IqXToDnckjoDtT3+HA= - -pirates@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/pirates/-/pirates-4.0.1.tgz#643a92caf894566f91b2b986d2c66950a8e2fb87" - integrity sha512-WuNqLTbMI3tmfef2TKxlQmAiLHKtFhlsCZnPIpuv2Ow0RDVO8lfy1Opf4NUzlMXLjPl+Men7AuVdX6TA+s+uGA== - dependencies: - node-modules-regexp "^1.0.0" - -pkg-dir@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-1.0.0.tgz#7a4b508a8d5bb2d629d447056ff4e9c9314cf3d4" - integrity sha1-ektQio1bstYp1EcFb/TpyTFM89Q= - dependencies: - find-up "^1.0.0" - -pkg-dir@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-2.0.0.tgz#f6d5d1109e19d63edf428e0bd57e12777615334b" - integrity sha1-9tXREJ4Z1j7fQo4L1X4Sd3YVM0s= - dependencies: - find-up "^2.1.0" - -pkg-dir@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-3.0.0.tgz#2749020f239ed990881b1f71210d51eb6523bea3" - integrity sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw== - dependencies: - find-up "^3.0.0" - -pkg-up@2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/pkg-up/-/pkg-up-2.0.0.tgz#c819ac728059a461cab1c3889a2be3c49a004d7f" - integrity sha1-yBmscoBZpGHKscOImivjxJoATX8= - dependencies: - find-up "^2.1.0" - -pn@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/pn/-/pn-1.1.0.tgz#e2f4cef0e219f463c179ab37463e4e1ecdccbafb" - integrity sha512-2qHaIQr2VLRFoxe2nASzsV6ef4yOOH+Fi9FBOVH6cqeSgUnoyySPZkxzLuzd+RYOQTRpROA0ztTMqxROKSb/nA== - -pnp-webpack-plugin@1.2.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/pnp-webpack-plugin/-/pnp-webpack-plugin-1.2.1.tgz#cd9d698df2a6fcf7255093c1c9511adf65b9421b" - integrity sha512-W6GctK7K2qQiVR+gYSv/Gyt6jwwIH4vwdviFqx+Y2jAtVf5eZyYIDf5Ac2NCDMBiX5yWscBLZElPTsyA1UtVVA== - dependencies: - ts-pnp "^1.0.0" - -portfinder@^1.0.9: - version "1.0.23" - resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-1.0.23.tgz#894db4bcc5daf02b6614517ce89cd21a38226b82" - integrity sha512-B729mL/uLklxtxuiJKfQ84WPxNw5a7Yhx3geQZdcA4GjNjZSTSSMMWyoennMVnTWSmAR0lMdzWYN0JLnHrg1KQ== - dependencies: - async "^1.5.2" - debug "^2.2.0" - mkdirp "0.5.x" - -posix-character-classes@^0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/posix-character-classes/-/posix-character-classes-0.1.1.tgz#01eac0fe3b5af71a2a6c02feabb8c1fef7e00eab" - integrity sha1-AerA/jta9xoqbAL+q7jB/vfgDqs= - -postcss-attribute-case-insensitive@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/postcss-attribute-case-insensitive/-/postcss-attribute-case-insensitive-4.0.1.tgz#b2a721a0d279c2f9103a36331c88981526428cc7" - integrity sha512-L2YKB3vF4PetdTIthQVeT+7YiSzMoNMLLYxPXXppOOP7NoazEAy45sh2LvJ8leCQjfBcfkYQs8TtCcQjeZTp8A== - dependencies: - postcss "^7.0.2" - postcss-selector-parser "^5.0.0" - -postcss-browser-comments@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/postcss-browser-comments/-/postcss-browser-comments-2.0.0.tgz#dc48d6a8ddbff188a80a000b7393436cb18aed88" - integrity sha512-xGG0UvoxwBc4Yx4JX3gc0RuDl1kc4bVihCzzk6UC72YPfq5fu3c717Nu8Un3nvnq1BJ31gBnFXIG/OaUTnpHgA== - dependencies: - postcss "^7.0.2" - -postcss-calc@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/postcss-calc/-/postcss-calc-7.0.1.tgz#36d77bab023b0ecbb9789d84dcb23c4941145436" - integrity sha512-oXqx0m6tb4N3JGdmeMSc/i91KppbYsFZKdH0xMOqK8V1rJlzrKlTdokz8ozUXLVejydRN6u2IddxpcijRj2FqQ== - dependencies: - css-unit-converter "^1.1.1" - postcss "^7.0.5" - postcss-selector-parser "^5.0.0-rc.4" - postcss-value-parser "^3.3.1" - -postcss-color-functional-notation@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/postcss-color-functional-notation/-/postcss-color-functional-notation-2.0.1.tgz#5efd37a88fbabeb00a2966d1e53d98ced93f74e0" - integrity sha512-ZBARCypjEDofW4P6IdPVTLhDNXPRn8T2s1zHbZidW6rPaaZvcnCS2soYFIQJrMZSxiePJ2XIYTlcb2ztr/eT2g== - dependencies: - postcss "^7.0.2" - postcss-values-parser "^2.0.0" - -postcss-color-gray@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/postcss-color-gray/-/postcss-color-gray-5.0.0.tgz#532a31eb909f8da898ceffe296fdc1f864be8547" - integrity sha512-q6BuRnAGKM/ZRpfDascZlIZPjvwsRye7UDNalqVz3s7GDxMtqPY6+Q871liNxsonUw8oC61OG+PSaysYpl1bnw== - dependencies: - "@csstools/convert-colors" "^1.4.0" - postcss "^7.0.5" - postcss-values-parser "^2.0.0" - -postcss-color-hex-alpha@^5.0.2: - version "5.0.3" - resolved "https://registry.yarnpkg.com/postcss-color-hex-alpha/-/postcss-color-hex-alpha-5.0.3.tgz#a8d9ca4c39d497c9661e374b9c51899ef0f87388" - integrity sha512-PF4GDel8q3kkreVXKLAGNpHKilXsZ6xuu+mOQMHWHLPNyjiUBOr75sp5ZKJfmv1MCus5/DWUGcK9hm6qHEnXYw== - dependencies: - postcss "^7.0.14" - postcss-values-parser "^2.0.1" - -postcss-color-mod-function@^3.0.3: - version "3.0.3" - resolved "https://registry.yarnpkg.com/postcss-color-mod-function/-/postcss-color-mod-function-3.0.3.tgz#816ba145ac11cc3cb6baa905a75a49f903e4d31d" - integrity sha512-YP4VG+xufxaVtzV6ZmhEtc+/aTXH3d0JLpnYfxqTvwZPbJhWqp8bSY3nfNzNRFLgB4XSaBA82OE4VjOOKpCdVQ== - dependencies: - "@csstools/convert-colors" "^1.4.0" - postcss "^7.0.2" - postcss-values-parser "^2.0.0" - -postcss-color-rebeccapurple@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/postcss-color-rebeccapurple/-/postcss-color-rebeccapurple-4.0.1.tgz#c7a89be872bb74e45b1e3022bfe5748823e6de77" - integrity sha512-aAe3OhkS6qJXBbqzvZth2Au4V3KieR5sRQ4ptb2b2O8wgvB3SJBsdG+jsn2BZbbwekDG8nTfcCNKcSfe/lEy8g== - dependencies: - postcss "^7.0.2" - postcss-values-parser "^2.0.0" - -postcss-colormin@^4.0.3: - version "4.0.3" - resolved "https://registry.yarnpkg.com/postcss-colormin/-/postcss-colormin-4.0.3.tgz#ae060bce93ed794ac71264f08132d550956bd381" - integrity sha512-WyQFAdDZpExQh32j0U0feWisZ0dmOtPl44qYmJKkq9xFWY3p+4qnRzCHeNrkeRhwPHz9bQ3mo0/yVkaply0MNw== - dependencies: - browserslist "^4.0.0" - color "^3.0.0" - has "^1.0.0" - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - -postcss-convert-values@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/postcss-convert-values/-/postcss-convert-values-4.0.1.tgz#ca3813ed4da0f812f9d43703584e449ebe189a7f" - integrity sha512-Kisdo1y77KUC0Jmn0OXU/COOJbzM8cImvw1ZFsBgBgMgb1iL23Zs/LXRe3r+EZqM3vGYKdQ2YJVQ5VkJI+zEJQ== - dependencies: - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - -postcss-custom-media@^7.0.7: - version "7.0.8" - resolved "https://registry.yarnpkg.com/postcss-custom-media/-/postcss-custom-media-7.0.8.tgz#fffd13ffeffad73621be5f387076a28b00294e0c" - integrity sha512-c9s5iX0Ge15o00HKbuRuTqNndsJUbaXdiNsksnVH8H4gdc+zbLzr/UasOwNG6CTDpLFekVY4672eWdiiWu2GUg== - dependencies: - postcss "^7.0.14" - -postcss-custom-properties@^8.0.9: - version "8.0.11" - resolved "https://registry.yarnpkg.com/postcss-custom-properties/-/postcss-custom-properties-8.0.11.tgz#2d61772d6e92f22f5e0d52602df8fae46fa30d97" - integrity sha512-nm+o0eLdYqdnJ5abAJeXp4CEU1c1k+eB2yMCvhgzsds/e0umabFrN6HoTy/8Q4K5ilxERdl/JD1LO5ANoYBeMA== - dependencies: - postcss "^7.0.17" - postcss-values-parser "^2.0.1" - -postcss-custom-selectors@^5.1.2: - version "5.1.2" - resolved "https://registry.yarnpkg.com/postcss-custom-selectors/-/postcss-custom-selectors-5.1.2.tgz#64858c6eb2ecff2fb41d0b28c9dd7b3db4de7fba" - integrity sha512-DSGDhqinCqXqlS4R7KGxL1OSycd1lydugJ1ky4iRXPHdBRiozyMHrdu0H3o7qNOCiZwySZTUI5MV0T8QhCLu+w== - dependencies: - postcss "^7.0.2" - postcss-selector-parser "^5.0.0-rc.3" - -postcss-dir-pseudo-class@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/postcss-dir-pseudo-class/-/postcss-dir-pseudo-class-5.0.0.tgz#6e3a4177d0edb3abcc85fdb6fbb1c26dabaeaba2" - integrity sha512-3pm4oq8HYWMZePJY+5ANriPs3P07q+LW6FAdTlkFH2XqDdP4HeeJYMOzn0HYLhRSjBO3fhiqSwwU9xEULSrPgw== - dependencies: - postcss "^7.0.2" - postcss-selector-parser "^5.0.0-rc.3" - -postcss-discard-comments@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-discard-comments/-/postcss-discard-comments-4.0.2.tgz#1fbabd2c246bff6aaad7997b2b0918f4d7af4033" - integrity sha512-RJutN259iuRf3IW7GZyLM5Sw4GLTOH8FmsXBnv8Ab/Tc2k4SR4qbV4DNbyyY4+Sjo362SyDmW2DQ7lBSChrpkg== - dependencies: - postcss "^7.0.0" - -postcss-discard-duplicates@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-discard-duplicates/-/postcss-discard-duplicates-4.0.2.tgz#3fe133cd3c82282e550fc9b239176a9207b784eb" - integrity sha512-ZNQfR1gPNAiXZhgENFfEglF93pciw0WxMkJeVmw8eF+JZBbMD7jp6C67GqJAXVZP2BWbOztKfbsdmMp/k8c6oQ== - dependencies: - postcss "^7.0.0" - -postcss-discard-empty@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/postcss-discard-empty/-/postcss-discard-empty-4.0.1.tgz#c8c951e9f73ed9428019458444a02ad90bb9f765" - integrity sha512-B9miTzbznhDjTfjvipfHoqbWKwd0Mj+/fL5s1QOz06wufguil+Xheo4XpOnc4NqKYBCNqqEzgPv2aPBIJLox0w== - dependencies: - postcss "^7.0.0" - -postcss-discard-overridden@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/postcss-discard-overridden/-/postcss-discard-overridden-4.0.1.tgz#652aef8a96726f029f5e3e00146ee7a4e755ff57" - integrity sha512-IYY2bEDD7g1XM1IDEsUT4//iEYCxAmP5oDSFMVU/JVvT7gh+l4fmjciLqGgwjdWpQIdb0Che2VX00QObS5+cTg== - dependencies: - postcss "^7.0.0" - -postcss-double-position-gradients@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/postcss-double-position-gradients/-/postcss-double-position-gradients-1.0.0.tgz#fc927d52fddc896cb3a2812ebc5df147e110522e" - integrity sha512-G+nV8EnQq25fOI8CH/B6krEohGWnF5+3A6H/+JEpOncu5dCnkS1QQ6+ct3Jkaepw1NGVqqOZH6lqrm244mCftA== - dependencies: - postcss "^7.0.5" - postcss-values-parser "^2.0.0" - -postcss-env-function@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/postcss-env-function/-/postcss-env-function-2.0.2.tgz#0f3e3d3c57f094a92c2baf4b6241f0b0da5365d7" - integrity sha512-rwac4BuZlITeUbiBq60h/xbLzXY43qOsIErngWa4l7Mt+RaSkT7QBjXVGTcBHupykkblHMDrBFh30zchYPaOUw== - dependencies: - postcss "^7.0.2" - postcss-values-parser "^2.0.0" - -postcss-flexbugs-fixes@4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/postcss-flexbugs-fixes/-/postcss-flexbugs-fixes-4.1.0.tgz#e094a9df1783e2200b7b19f875dcad3b3aff8b20" - integrity sha512-jr1LHxQvStNNAHlgco6PzY308zvLklh7SJVYuWUwyUQncofaAlD2l+P/gxKHOdqWKe7xJSkVLFF/2Tp+JqMSZA== - dependencies: - postcss "^7.0.0" - -postcss-focus-visible@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/postcss-focus-visible/-/postcss-focus-visible-4.0.0.tgz#477d107113ade6024b14128317ade2bd1e17046e" - integrity sha512-Z5CkWBw0+idJHSV6+Bgf2peDOFf/x4o+vX/pwcNYrWpXFrSfTkQ3JQ1ojrq9yS+upnAlNRHeg8uEwFTgorjI8g== - dependencies: - postcss "^7.0.2" - -postcss-focus-within@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/postcss-focus-within/-/postcss-focus-within-3.0.0.tgz#763b8788596cee9b874c999201cdde80659ef680" - integrity sha512-W0APui8jQeBKbCGZudW37EeMCjDeVxKgiYfIIEo8Bdh5SpB9sxds/Iq8SEuzS0Q4YFOlG7EPFulbbxujpkrV2w== - dependencies: - postcss "^7.0.2" - -postcss-font-variant@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/postcss-font-variant/-/postcss-font-variant-4.0.0.tgz#71dd3c6c10a0d846c5eda07803439617bbbabacc" - integrity sha512-M8BFYKOvCrI2aITzDad7kWuXXTm0YhGdP9Q8HanmN4EF1Hmcgs1KK5rSHylt/lUJe8yLxiSwWAHdScoEiIxztg== - dependencies: - postcss "^7.0.2" - -postcss-gap-properties@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/postcss-gap-properties/-/postcss-gap-properties-2.0.0.tgz#431c192ab3ed96a3c3d09f2ff615960f902c1715" - integrity sha512-QZSqDaMgXCHuHTEzMsS2KfVDOq7ZFiknSpkrPJY6jmxbugUPTuSzs/vuE5I3zv0WAS+3vhrlqhijiprnuQfzmg== - dependencies: - postcss "^7.0.2" - -postcss-image-set-function@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/postcss-image-set-function/-/postcss-image-set-function-3.0.1.tgz#28920a2f29945bed4c3198d7df6496d410d3f288" - integrity sha512-oPTcFFip5LZy8Y/whto91L9xdRHCWEMs3e1MdJxhgt4jy2WYXfhkng59fH5qLXSCPN8k4n94p1Czrfe5IOkKUw== - dependencies: - postcss "^7.0.2" - postcss-values-parser "^2.0.0" - -postcss-initial@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/postcss-initial/-/postcss-initial-3.0.1.tgz#99d319669a13d6c06ef8e70d852f68cb1b399b61" - integrity sha512-I2Sz83ZSHybMNh02xQDK609lZ1/QOyYeuizCjzEhlMgeV/HcDJapQiH4yTqLjZss0X6/6VvKFXUeObaHpJoINw== - dependencies: - lodash.template "^4.5.0" - postcss "^7.0.2" - -postcss-lab-function@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/postcss-lab-function/-/postcss-lab-function-2.0.1.tgz#bb51a6856cd12289ab4ae20db1e3821ef13d7d2e" - integrity sha512-whLy1IeZKY+3fYdqQFuDBf8Auw+qFuVnChWjmxm/UhHWqNHZx+B99EwxTvGYmUBqe3Fjxs4L1BoZTJmPu6usVg== - dependencies: - "@csstools/convert-colors" "^1.4.0" - postcss "^7.0.2" - postcss-values-parser "^2.0.0" - -postcss-load-config@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/postcss-load-config/-/postcss-load-config-2.1.0.tgz#c84d692b7bb7b41ddced94ee62e8ab31b417b003" - integrity sha512-4pV3JJVPLd5+RueiVVB+gFOAa7GWc25XQcMp86Zexzke69mKf6Nx9LRcQywdz7yZI9n1udOxmLuAwTBypypF8Q== - dependencies: - cosmiconfig "^5.0.0" - import-cwd "^2.0.0" - -postcss-loader@3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/postcss-loader/-/postcss-loader-3.0.0.tgz#6b97943e47c72d845fa9e03f273773d4e8dd6c2d" - integrity sha512-cLWoDEY5OwHcAjDnkyRQzAXfs2jrKjXpO/HQFcc5b5u/r7aa471wdmChmwfnv7x2u840iat/wi0lQ5nbRgSkUA== - dependencies: - loader-utils "^1.1.0" - postcss "^7.0.0" - postcss-load-config "^2.0.0" - schema-utils "^1.0.0" - -postcss-logical@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/postcss-logical/-/postcss-logical-3.0.0.tgz#2495d0f8b82e9f262725f75f9401b34e7b45d5b5" - integrity sha512-1SUKdJc2vuMOmeItqGuNaC+N8MzBWFWEkAnRnLpFYj1tGGa7NqyVBujfRtgNa2gXR+6RkGUiB2O5Vmh7E2RmiA== - dependencies: - postcss "^7.0.2" - -postcss-media-minmax@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/postcss-media-minmax/-/postcss-media-minmax-4.0.0.tgz#b75bb6cbc217c8ac49433e12f22048814a4f5ed5" - integrity sha512-fo9moya6qyxsjbFAYl97qKO9gyre3qvbMnkOZeZwlsW6XYFsvs2DMGDlchVLfAd8LHPZDxivu/+qW2SMQeTHBw== - dependencies: - postcss "^7.0.2" - -postcss-merge-longhand@^4.0.11: - version "4.0.11" - resolved "https://registry.yarnpkg.com/postcss-merge-longhand/-/postcss-merge-longhand-4.0.11.tgz#62f49a13e4a0ee04e7b98f42bb16062ca2549e24" - integrity sha512-alx/zmoeXvJjp7L4mxEMjh8lxVlDFX1gqWHzaaQewwMZiVhLo42TEClKaeHbRf6J7j82ZOdTJ808RtN0ZOZwvw== - dependencies: - css-color-names "0.0.4" - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - stylehacks "^4.0.0" - -postcss-merge-rules@^4.0.3: - version "4.0.3" - resolved "https://registry.yarnpkg.com/postcss-merge-rules/-/postcss-merge-rules-4.0.3.tgz#362bea4ff5a1f98e4075a713c6cb25aefef9a650" - integrity sha512-U7e3r1SbvYzO0Jr3UT/zKBVgYYyhAz0aitvGIYOYK5CPmkNih+WDSsS5tvPrJ8YMQYlEMvsZIiqmn7HdFUaeEQ== - dependencies: - browserslist "^4.0.0" - caniuse-api "^3.0.0" - cssnano-util-same-parent "^4.0.0" - postcss "^7.0.0" - postcss-selector-parser "^3.0.0" - vendors "^1.0.0" - -postcss-minify-font-values@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-minify-font-values/-/postcss-minify-font-values-4.0.2.tgz#cd4c344cce474343fac5d82206ab2cbcb8afd5a6" - integrity sha512-j85oO6OnRU9zPf04+PZv1LYIYOprWm6IA6zkXkrJXyRveDEuQggG6tvoy8ir8ZwjLxLuGfNkCZEQG7zan+Hbtg== - dependencies: - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - -postcss-minify-gradients@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-minify-gradients/-/postcss-minify-gradients-4.0.2.tgz#93b29c2ff5099c535eecda56c4aa6e665a663471" - integrity sha512-qKPfwlONdcf/AndP1U8SJ/uzIJtowHlMaSioKzebAXSG4iJthlWC9iSWznQcX4f66gIWX44RSA841HTHj3wK+Q== - dependencies: - cssnano-util-get-arguments "^4.0.0" - is-color-stop "^1.0.0" - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - -postcss-minify-params@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-minify-params/-/postcss-minify-params-4.0.2.tgz#6b9cef030c11e35261f95f618c90036d680db874" - integrity sha512-G7eWyzEx0xL4/wiBBJxJOz48zAKV2WG3iZOqVhPet/9geefm/Px5uo1fzlHu+DOjT+m0Mmiz3jkQzVHe6wxAWg== - dependencies: - alphanum-sort "^1.0.0" - browserslist "^4.0.0" - cssnano-util-get-arguments "^4.0.0" - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - uniqs "^2.0.0" - -postcss-minify-selectors@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-minify-selectors/-/postcss-minify-selectors-4.0.2.tgz#e2e5eb40bfee500d0cd9243500f5f8ea4262fbd8" - integrity sha512-D5S1iViljXBj9kflQo4YutWnJmwm8VvIsU1GeXJGiG9j8CIg9zs4voPMdQDUmIxetUOh60VilsNzCiAFTOqu3g== - dependencies: - alphanum-sort "^1.0.0" - has "^1.0.0" - postcss "^7.0.0" - postcss-selector-parser "^3.0.0" - -postcss-modules-extract-imports@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-2.0.0.tgz#818719a1ae1da325f9832446b01136eeb493cd7e" - integrity sha512-LaYLDNS4SG8Q5WAWqIJgdHPJrDDr/Lv775rMBFUbgjTz6j34lUznACHcdRWroPvXANP2Vj7yNK57vp9eFqzLWQ== - dependencies: - postcss "^7.0.5" - -postcss-modules-local-by-default@^2.0.6: - version "2.0.6" - resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-2.0.6.tgz#dd9953f6dd476b5fd1ef2d8830c8929760b56e63" - integrity sha512-oLUV5YNkeIBa0yQl7EYnxMgy4N6noxmiwZStaEJUSe2xPMcdNc8WmBQuQCx18H5psYbVxz8zoHk0RAAYZXP9gA== - dependencies: - postcss "^7.0.6" - postcss-selector-parser "^6.0.0" - postcss-value-parser "^3.3.1" - -postcss-modules-scope@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-2.1.0.tgz#ad3f5bf7856114f6fcab901b0502e2a2bc39d4eb" - integrity sha512-91Rjps0JnmtUB0cujlc8KIKCsJXWjzuxGeT/+Q2i2HXKZ7nBUeF9YQTZZTNvHVoNYj1AthsjnGLtqDUE0Op79A== - dependencies: - postcss "^7.0.6" - postcss-selector-parser "^6.0.0" - -postcss-modules-values@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/postcss-modules-values/-/postcss-modules-values-2.0.0.tgz#479b46dc0c5ca3dc7fa5270851836b9ec7152f64" - integrity sha512-Ki7JZa7ff1N3EIMlPnGTZfUMe69FFwiQPnVSXC9mnn3jozCRBYIxiZd44yJOV2AmabOo4qFf8s0dC/+lweG7+w== - dependencies: - icss-replace-symbols "^1.1.0" - postcss "^7.0.6" - -postcss-nesting@^7.0.0: - version "7.0.1" - resolved "https://registry.yarnpkg.com/postcss-nesting/-/postcss-nesting-7.0.1.tgz#b50ad7b7f0173e5b5e3880c3501344703e04c052" - integrity sha512-FrorPb0H3nuVq0Sff7W2rnc3SmIcruVC6YwpcS+k687VxyxO33iE1amna7wHuRVzM8vfiYofXSBHNAZ3QhLvYg== - dependencies: - postcss "^7.0.2" - -postcss-normalize-charset@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/postcss-normalize-charset/-/postcss-normalize-charset-4.0.1.tgz#8b35add3aee83a136b0471e0d59be58a50285dd4" - integrity sha512-gMXCrrlWh6G27U0hF3vNvR3w8I1s2wOBILvA87iNXaPvSNo5uZAMYsZG7XjCUf1eVxuPfyL4TJ7++SGZLc9A3g== - dependencies: - postcss "^7.0.0" - -postcss-normalize-display-values@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-normalize-display-values/-/postcss-normalize-display-values-4.0.2.tgz#0dbe04a4ce9063d4667ed2be476bb830c825935a" - integrity sha512-3F2jcsaMW7+VtRMAqf/3m4cPFhPD3EFRgNs18u+k3lTJJlVe7d0YPO+bnwqo2xg8YiRpDXJI2u8A0wqJxMsQuQ== - dependencies: - cssnano-util-get-match "^4.0.0" - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - -postcss-normalize-positions@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-normalize-positions/-/postcss-normalize-positions-4.0.2.tgz#05f757f84f260437378368a91f8932d4b102917f" - integrity sha512-Dlf3/9AxpxE+NF1fJxYDeggi5WwV35MXGFnnoccP/9qDtFrTArZ0D0R+iKcg5WsUd8nUYMIl8yXDCtcrT8JrdA== - dependencies: - cssnano-util-get-arguments "^4.0.0" - has "^1.0.0" - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - -postcss-normalize-repeat-style@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-4.0.2.tgz#c4ebbc289f3991a028d44751cbdd11918b17910c" - integrity sha512-qvigdYYMpSuoFs3Is/f5nHdRLJN/ITA7huIoCyqqENJe9PvPmLhNLMu7QTjPdtnVf6OcYYO5SHonx4+fbJE1+Q== - dependencies: - cssnano-util-get-arguments "^4.0.0" - cssnano-util-get-match "^4.0.0" - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - -postcss-normalize-string@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-normalize-string/-/postcss-normalize-string-4.0.2.tgz#cd44c40ab07a0c7a36dc5e99aace1eca4ec2690c" - integrity sha512-RrERod97Dnwqq49WNz8qo66ps0swYZDSb6rM57kN2J+aoyEAJfZ6bMx0sx/F9TIEX0xthPGCmeyiam/jXif0eA== - dependencies: - has "^1.0.0" - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - -postcss-normalize-timing-functions@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-4.0.2.tgz#8e009ca2a3949cdaf8ad23e6b6ab99cb5e7d28d9" - integrity sha512-acwJY95edP762e++00Ehq9L4sZCEcOPyaHwoaFOhIwWCDfik6YvqsYNxckee65JHLKzuNSSmAdxwD2Cud1Z54A== - dependencies: - cssnano-util-get-match "^4.0.0" - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - -postcss-normalize-unicode@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/postcss-normalize-unicode/-/postcss-normalize-unicode-4.0.1.tgz#841bd48fdcf3019ad4baa7493a3d363b52ae1cfb" - integrity sha512-od18Uq2wCYn+vZ/qCOeutvHjB5jm57ToxRaMeNuf0nWVHaP9Hua56QyMF6fs/4FSUnVIw0CBPsU0K4LnBPwYwg== - dependencies: - browserslist "^4.0.0" - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - -postcss-normalize-url@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/postcss-normalize-url/-/postcss-normalize-url-4.0.1.tgz#10e437f86bc7c7e58f7b9652ed878daaa95faae1" - integrity sha512-p5oVaF4+IHwu7VpMan/SSpmpYxcJMtkGppYf0VbdH5B6hN8YNmVyJLuY9FmLQTzY3fag5ESUUHDqM+heid0UVA== - dependencies: - is-absolute-url "^2.0.0" - normalize-url "^3.0.0" - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - -postcss-normalize-whitespace@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-normalize-whitespace/-/postcss-normalize-whitespace-4.0.2.tgz#bf1d4070fe4fcea87d1348e825d8cc0c5faa7d82" - integrity sha512-tO8QIgrsI3p95r8fyqKV+ufKlSHh9hMJqACqbv2XknufqEDhDvbguXGBBqxw9nsQoXWf0qOqppziKJKHMD4GtA== - dependencies: - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - -postcss-normalize@7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/postcss-normalize/-/postcss-normalize-7.0.1.tgz#eb51568d962b8aa61a8318383c8bb7e54332282e" - integrity sha512-NOp1fwrG+6kVXWo7P9SizCHX6QvioxFD/hZcI2MLxPmVnFJFC0j0DDpIuNw2tUDeCFMni59gCVgeJ1/hYhj2OQ== - dependencies: - "@csstools/normalize.css" "^9.0.1" - browserslist "^4.1.1" - postcss "^7.0.2" - postcss-browser-comments "^2.0.0" - -postcss-ordered-values@^4.1.2: - version "4.1.2" - resolved "https://registry.yarnpkg.com/postcss-ordered-values/-/postcss-ordered-values-4.1.2.tgz#0cf75c820ec7d5c4d280189559e0b571ebac0eee" - integrity sha512-2fCObh5UanxvSxeXrtLtlwVThBvHn6MQcu4ksNT2tsaV2Fg76R2CV98W7wNSlX+5/pFwEyaDwKLLoEV7uRybAw== - dependencies: - cssnano-util-get-arguments "^4.0.0" - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - -postcss-overflow-shorthand@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/postcss-overflow-shorthand/-/postcss-overflow-shorthand-2.0.0.tgz#31ecf350e9c6f6ddc250a78f0c3e111f32dd4c30" - integrity sha512-aK0fHc9CBNx8jbzMYhshZcEv8LtYnBIRYQD5i7w/K/wS9c2+0NSR6B3OVMu5y0hBHYLcMGjfU+dmWYNKH0I85g== - dependencies: - postcss "^7.0.2" - -postcss-page-break@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/postcss-page-break/-/postcss-page-break-2.0.0.tgz#add52d0e0a528cabe6afee8b46e2abb277df46bf" - integrity sha512-tkpTSrLpfLfD9HvgOlJuigLuk39wVTbbd8RKcy8/ugV2bNBUW3xU+AIqyxhDrQr1VUj1RmyJrBn1YWrqUm9zAQ== - dependencies: - postcss "^7.0.2" - -postcss-place@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/postcss-place/-/postcss-place-4.0.1.tgz#e9f39d33d2dc584e46ee1db45adb77ca9d1dcc62" - integrity sha512-Zb6byCSLkgRKLODj/5mQugyuj9bvAAw9LqJJjgwz5cYryGeXfFZfSXoP1UfveccFmeq0b/2xxwcTEVScnqGxBg== - dependencies: - postcss "^7.0.2" - postcss-values-parser "^2.0.0" - -postcss-preset-env@6.6.0: - version "6.6.0" - resolved "https://registry.yarnpkg.com/postcss-preset-env/-/postcss-preset-env-6.6.0.tgz#642e7d962e2bdc2e355db117c1eb63952690ed5b" - integrity sha512-I3zAiycfqXpPIFD6HXhLfWXIewAWO8emOKz+QSsxaUZb9Dp8HbF5kUf+4Wy/AxR33o+LRoO8blEWCHth0ZsCLA== - dependencies: - autoprefixer "^9.4.9" - browserslist "^4.4.2" - caniuse-lite "^1.0.30000939" - css-blank-pseudo "^0.1.4" - css-has-pseudo "^0.10.0" - css-prefers-color-scheme "^3.1.1" - cssdb "^4.3.0" - postcss "^7.0.14" - postcss-attribute-case-insensitive "^4.0.1" - postcss-color-functional-notation "^2.0.1" - postcss-color-gray "^5.0.0" - postcss-color-hex-alpha "^5.0.2" - postcss-color-mod-function "^3.0.3" - postcss-color-rebeccapurple "^4.0.1" - postcss-custom-media "^7.0.7" - postcss-custom-properties "^8.0.9" - postcss-custom-selectors "^5.1.2" - postcss-dir-pseudo-class "^5.0.0" - postcss-double-position-gradients "^1.0.0" - postcss-env-function "^2.0.2" - postcss-focus-visible "^4.0.0" - postcss-focus-within "^3.0.0" - postcss-font-variant "^4.0.0" - postcss-gap-properties "^2.0.0" - postcss-image-set-function "^3.0.1" - postcss-initial "^3.0.0" - postcss-lab-function "^2.0.1" - postcss-logical "^3.0.0" - postcss-media-minmax "^4.0.0" - postcss-nesting "^7.0.0" - postcss-overflow-shorthand "^2.0.0" - postcss-page-break "^2.0.0" - postcss-place "^4.0.1" - postcss-pseudo-class-any-link "^6.0.0" - postcss-replace-overflow-wrap "^3.0.0" - postcss-selector-matches "^4.0.0" - postcss-selector-not "^4.0.0" - -postcss-pseudo-class-any-link@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/postcss-pseudo-class-any-link/-/postcss-pseudo-class-any-link-6.0.0.tgz#2ed3eed393b3702879dec4a87032b210daeb04d1" - integrity sha512-lgXW9sYJdLqtmw23otOzrtbDXofUdfYzNm4PIpNE322/swES3VU9XlXHeJS46zT2onFO7V1QFdD4Q9LiZj8mew== - dependencies: - postcss "^7.0.2" - postcss-selector-parser "^5.0.0-rc.3" - -postcss-reduce-initial@^4.0.3: - version "4.0.3" - resolved "https://registry.yarnpkg.com/postcss-reduce-initial/-/postcss-reduce-initial-4.0.3.tgz#7fd42ebea5e9c814609639e2c2e84ae270ba48df" - integrity sha512-gKWmR5aUulSjbzOfD9AlJiHCGH6AEVLaM0AV+aSioxUDd16qXP1PCh8d1/BGVvpdWn8k/HiK7n6TjeoXN1F7DA== - dependencies: - browserslist "^4.0.0" - caniuse-api "^3.0.0" - has "^1.0.0" - postcss "^7.0.0" - -postcss-reduce-transforms@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-reduce-transforms/-/postcss-reduce-transforms-4.0.2.tgz#17efa405eacc6e07be3414a5ca2d1074681d4e29" - integrity sha512-EEVig1Q2QJ4ELpJXMZR8Vt5DQx8/mo+dGWSR7vWXqcob2gQLyQGsionYcGKATXvQzMPn6DSN1vTN7yFximdIAg== - dependencies: - cssnano-util-get-match "^4.0.0" - has "^1.0.0" - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - -postcss-replace-overflow-wrap@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/postcss-replace-overflow-wrap/-/postcss-replace-overflow-wrap-3.0.0.tgz#61b360ffdaedca84c7c918d2b0f0d0ea559ab01c" - integrity sha512-2T5hcEHArDT6X9+9dVSPQdo7QHzG4XKclFT8rU5TzJPDN7RIRTbO9c4drUISOVemLj03aezStHCR2AIcr8XLpw== - dependencies: - postcss "^7.0.2" - -postcss-safe-parser@4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/postcss-safe-parser/-/postcss-safe-parser-4.0.1.tgz#8756d9e4c36fdce2c72b091bbc8ca176ab1fcdea" - integrity sha512-xZsFA3uX8MO3yAda03QrG3/Eg1LN3EPfjjf07vke/46HERLZyHrTsQ9E1r1w1W//fWEhtYNndo2hQplN2cVpCQ== - dependencies: - postcss "^7.0.0" - -postcss-selector-matches@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/postcss-selector-matches/-/postcss-selector-matches-4.0.0.tgz#71c8248f917ba2cc93037c9637ee09c64436fcff" - integrity sha512-LgsHwQR/EsRYSqlwdGzeaPKVT0Ml7LAT6E75T8W8xLJY62CE4S/l03BWIt3jT8Taq22kXP08s2SfTSzaraoPww== - dependencies: - balanced-match "^1.0.0" - postcss "^7.0.2" - -postcss-selector-not@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/postcss-selector-not/-/postcss-selector-not-4.0.0.tgz#c68ff7ba96527499e832724a2674d65603b645c0" - integrity sha512-W+bkBZRhqJaYN8XAnbbZPLWMvZD1wKTu0UxtFKdhtGjWYmxhkUneoeOhRJKdAE5V7ZTlnbHfCR+6bNwK9e1dTQ== - dependencies: - balanced-match "^1.0.0" - postcss "^7.0.2" - -postcss-selector-parser@^3.0.0: - version "3.1.1" - resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-3.1.1.tgz#4f875f4afb0c96573d5cf4d74011aee250a7e865" - integrity sha1-T4dfSvsMllc9XPTXQBGu4lCn6GU= - dependencies: - dot-prop "^4.1.1" - indexes-of "^1.0.1" - uniq "^1.0.1" - -postcss-selector-parser@^5.0.0, postcss-selector-parser@^5.0.0-rc.3, postcss-selector-parser@^5.0.0-rc.4: - version "5.0.0" - resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-5.0.0.tgz#249044356697b33b64f1a8f7c80922dddee7195c" - integrity sha512-w+zLE5Jhg6Liz8+rQOWEAwtwkyqpfnmsinXjXg6cY7YIONZZtgvE0v2O0uhQBs0peNomOJwWRKt6JBfTdTd3OQ== - dependencies: - cssesc "^2.0.0" - indexes-of "^1.0.1" - uniq "^1.0.1" - -postcss-selector-parser@^6.0.0: - version "6.0.2" - resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.0.2.tgz#934cf799d016c83411859e09dcecade01286ec5c" - integrity sha512-36P2QR59jDTOAiIkqEprfJDsoNrvwFei3eCqKd1Y0tUsBimsq39BLp7RD+JWny3WgB1zGhJX8XVePwm9k4wdBg== - dependencies: - cssesc "^3.0.0" - indexes-of "^1.0.1" - uniq "^1.0.1" - -postcss-svgo@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-svgo/-/postcss-svgo-4.0.2.tgz#17b997bc711b333bab143aaed3b8d3d6e3d38258" - integrity sha512-C6wyjo3VwFm0QgBy+Fu7gCYOkCmgmClghO+pjcxvrcBKtiKt0uCF+hvbMO1fyv5BMImRK90SMb+dwUnfbGd+jw== - dependencies: - is-svg "^3.0.0" - postcss "^7.0.0" - postcss-value-parser "^3.0.0" - svgo "^1.0.0" - -postcss-unique-selectors@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/postcss-unique-selectors/-/postcss-unique-selectors-4.0.1.tgz#9446911f3289bfd64c6d680f073c03b1f9ee4bac" - integrity sha512-+JanVaryLo9QwZjKrmJgkI4Fn8SBgRO6WXQBJi7KiAVPlmxikB5Jzc4EvXMT2H0/m0RjrVVm9rGNhZddm/8Spg== - dependencies: - alphanum-sort "^1.0.0" - postcss "^7.0.0" - uniqs "^2.0.0" - -postcss-value-parser@^3.0.0, postcss-value-parser@^3.3.0, postcss-value-parser@^3.3.1: - version "3.3.1" - resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz#9ff822547e2893213cf1c30efa51ac5fd1ba8281" - integrity sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ== - -postcss-value-parser@^4.0.0: - version "4.0.2" - resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.0.2.tgz#482282c09a42706d1fc9a069b73f44ec08391dc9" - integrity sha512-LmeoohTpp/K4UiyQCwuGWlONxXamGzCMtFxLq4W1nZVGIQLYvMCJx3yAF9qyyuFpflABI9yVdtJAqbihOsCsJQ== - -postcss-values-parser@^2.0.0, postcss-values-parser@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/postcss-values-parser/-/postcss-values-parser-2.0.1.tgz#da8b472d901da1e205b47bdc98637b9e9e550e5f" - integrity sha512-2tLuBsA6P4rYTNKCXYG/71C7j1pU6pK503suYOmn4xYrQIzW+opD+7FAFNuGSdZC/3Qfy334QbeMu7MEb8gOxg== - dependencies: - flatten "^1.0.2" - indexes-of "^1.0.1" - uniq "^1.0.1" - -postcss@^7.0.0, postcss@^7.0.1, postcss@^7.0.14, postcss@^7.0.17, postcss@^7.0.2, postcss@^7.0.5, postcss@^7.0.6: - version "7.0.17" - resolved "https://registry.yarnpkg.com/postcss/-/postcss-7.0.17.tgz#4da1bdff5322d4a0acaab4d87f3e782436bad31f" - integrity sha512-546ZowA+KZ3OasvQZHsbuEpysvwTZNGJv9EfyCQdsIDltPSWHAeTQ5fQy/Npi2ZDtLI3zs7Ps/p6wThErhm9fQ== - dependencies: - chalk "^2.4.2" - source-map "^0.6.1" - supports-color "^6.1.0" - -prelude-ls@~1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" - integrity sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ= - -pretty-bytes@^5.1.0: - version "5.3.0" - resolved "https://registry.yarnpkg.com/pretty-bytes/-/pretty-bytes-5.3.0.tgz#f2849e27db79fb4d6cfe24764fc4134f165989f2" - integrity sha512-hjGrh+P926p4R4WbaB6OckyRtO0F0/lQBiT+0gnxjV+5kjPBrfVBFCsCLbMqVQeydvIoouYTCmmEURiH3R1Bdg== - -pretty-error@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/pretty-error/-/pretty-error-2.1.1.tgz#5f4f87c8f91e5ae3f3ba87ab4cf5e03b1a17f1a3" - integrity sha1-X0+HyPkeWuPzuoerTPXgOxoX8aM= - dependencies: - renderkid "^2.0.1" - utila "~0.4" - -pretty-format@^24.9.0: - version "24.9.0" - resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-24.9.0.tgz#12fac31b37019a4eea3c11aa9a959eb7628aa7c9" - integrity sha512-00ZMZUiHaJrNfk33guavqgvfJS30sLYf0f8+Srklv0AMPodGGHcoHgksZ3OThYnIvOd+8yMCn0YiEOogjlgsnA== - dependencies: - "@jest/types" "^24.9.0" - ansi-regex "^4.0.0" - ansi-styles "^3.2.0" - react-is "^16.8.4" - -private@^0.1.6: - version "0.1.8" - resolved "https://registry.yarnpkg.com/private/-/private-0.1.8.tgz#2381edb3689f7a53d653190060fcf822d2f368ff" - integrity sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg== - -process-nextick-args@~2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2" - integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag== - -process@^0.11.10: - version "0.11.10" - resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" - integrity sha1-czIwDoQBYb2j5podHZGn1LwW8YI= - -progress@^2.0.0: - version "2.0.3" - resolved "https://registry.yarnpkg.com/progress/-/progress-2.0.3.tgz#7e8cf8d8f5b8f239c1bc68beb4eb78567d572ef8" - integrity sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA== - -promise-inflight@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/promise-inflight/-/promise-inflight-1.0.1.tgz#98472870bf228132fcbdd868129bad12c3c029e3" - integrity sha1-mEcocL8igTL8vdhoEputEsPAKeM= - -promise@8.0.3: - version "8.0.3" - resolved "https://registry.yarnpkg.com/promise/-/promise-8.0.3.tgz#f592e099c6cddc000d538ee7283bb190452b0bf6" - integrity sha512-HeRDUL1RJiLhyA0/grn+PTShlBAcLuh/1BJGtrvjwbvRDCTLLMEz9rOGCV+R3vHY4MixIuoMEd9Yq/XvsTPcjw== - dependencies: - asap "~2.0.6" - -promise@^7.1.1: - version "7.3.1" - resolved "https://registry.yarnpkg.com/promise/-/promise-7.3.1.tgz#064b72602b18f90f29192b8b1bc418ffd1ebd3bf" - integrity sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg== - dependencies: - asap "~2.0.3" - -prompts@^2.0.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.2.1.tgz#f901dd2a2dfee080359c0e20059b24188d75ad35" - integrity sha512-VObPvJiWPhpZI6C5m60XOzTfnYg/xc/an+r9VYymj9WJW3B/DIH+REzjpAACPf8brwPeP+7vz3bIim3S+AaMjw== - dependencies: - kleur "^3.0.3" - sisteransi "^1.0.3" - -prop-types@15.x, prop-types@^15.5.10, prop-types@^15.5.4, prop-types@^15.5.6, prop-types@^15.5.7, prop-types@^15.5.8, prop-types@^15.5.9, prop-types@^15.6.0, prop-types@^15.6.2, prop-types@^15.7.2: - version "15.7.2" - resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.7.2.tgz#52c41e75b8c87e72b9d9360e0206b99dcbffa6c5" - integrity sha512-8QQikdH7//R2vurIJSutZ1smHYTcLpRWEOlHnzcWHmBYrOGUysKwSsrC89BCiFj3CbrfJ/nXFdJepOVrY1GCHQ== - dependencies: - loose-envify "^1.4.0" - object-assign "^4.1.1" - react-is "^16.8.1" - -proxy-addr@~2.0.5: - version "2.0.5" - resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.5.tgz#34cbd64a2d81f4b1fd21e76f9f06c8a45299ee34" - integrity sha512-t/7RxHXPH6cJtP0pRG6smSr9QJidhB+3kXu0KgXnbGYMgzEnUxRQ4/LDdfOwZEMyIh3/xHb8PX3t+lfL9z+YVQ== - dependencies: - forwarded "~0.1.2" - ipaddr.js "1.9.0" - -prr@~1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/prr/-/prr-1.0.1.tgz#d3fc114ba06995a45ec6893f484ceb1d78f5f476" - integrity sha1-0/wRS6BplaRexok/SEzrHXj19HY= - -psl@^1.1.24, psl@^1.1.28: - version "1.3.0" - resolved "https://registry.yarnpkg.com/psl/-/psl-1.3.0.tgz#e1ebf6a3b5564fa8376f3da2275da76d875ca1bd" - integrity sha512-avHdspHO+9rQTLbv1RO+MPYeP/SzsCoxofjVnHanETfQhTJrmB0HlDoW+EiN/R+C0BZ+gERab9NY0lPN2TxNag== - -public-encrypt@^4.0.0: - version "4.0.3" - resolved "https://registry.yarnpkg.com/public-encrypt/-/public-encrypt-4.0.3.tgz#4fcc9d77a07e48ba7527e7cbe0de33d0701331e0" - integrity sha512-zVpa8oKZSz5bTMTFClc1fQOnyyEzpl5ozpi1B5YcvBrdohMjH2rfsBtyXcuNuwjsDIXmBYlF2N5FlJYhR29t8Q== - dependencies: - bn.js "^4.1.0" - browserify-rsa "^4.0.0" - create-hash "^1.1.0" - parse-asn1 "^5.0.0" - randombytes "^2.0.1" - safe-buffer "^5.1.2" - -pump@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/pump/-/pump-2.0.1.tgz#12399add6e4cf7526d973cbc8b5ce2e2908b3909" - integrity sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA== - dependencies: - end-of-stream "^1.1.0" - once "^1.3.1" - -pump@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64" - integrity sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww== - dependencies: - end-of-stream "^1.1.0" - once "^1.3.1" - -pumpify@^1.3.3: - version "1.5.1" - resolved "https://registry.yarnpkg.com/pumpify/-/pumpify-1.5.1.tgz#36513be246ab27570b1a374a5ce278bfd74370ce" - integrity sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ== - dependencies: - duplexify "^3.6.0" - inherits "^2.0.3" - pump "^2.0.0" - -punycode@1.3.2: - version "1.3.2" - resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.3.2.tgz#9653a036fb7c1ee42342f2325cceefea3926c48d" - integrity sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0= - -punycode@^1.2.4, punycode@^1.4.1: - version "1.4.1" - resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" - integrity sha1-wNWmOycYgArY4esPpSachN1BhF4= - -punycode@^2.1.0, punycode@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.1.1.tgz#b58b010ac40c22c5657616c8d2c2c02c7bf479ec" - integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A== - -q@^1.1.2: - version "1.5.1" - resolved "https://registry.yarnpkg.com/q/-/q-1.5.1.tgz#7e32f75b41381291d04611f1bf14109ac00651d7" - integrity sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc= - -qs@6.7.0: - version "6.7.0" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.7.0.tgz#41dc1a015e3d581f1621776be31afb2876a9b1bc" - integrity sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ== - -qs@~6.5.2: - version "6.5.2" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.2.tgz#cb3ae806e8740444584ef154ce8ee98d403f3e36" - integrity sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA== - -querystring-es3@^0.2.0: - version "0.2.1" - resolved "https://registry.yarnpkg.com/querystring-es3/-/querystring-es3-0.2.1.tgz#9ec61f79049875707d69414596fd907a4d711e73" - integrity sha1-nsYfeQSYdXB9aUFFlv2Qek1xHnM= - -querystring@0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/querystring/-/querystring-0.2.0.tgz#b209849203bb25df820da756e747005878521620" - integrity sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA= - -querystringify@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/querystringify/-/querystringify-2.1.1.tgz#60e5a5fd64a7f8bfa4d2ab2ed6fdf4c85bad154e" - integrity sha512-w7fLxIRCRT7U8Qu53jQnJyPkYZIaR4n5151KMfcJlO/A9397Wxb1amJvROTK6TOnp7PfoAmg/qXiNHI+08jRfA== - -raf@3.4.1, raf@^3.4.0, raf@^3.4.1: - version "3.4.1" - resolved "https://registry.yarnpkg.com/raf/-/raf-3.4.1.tgz#0742e99a4a6552f445d73e3ee0328af0ff1ede39" - integrity sha512-Sq4CW4QhwOHE8ucn6J34MqtZCeWFP2aQSmrlroYgqAV1PjStIhJXxYuTgUIfkEk7zTLjmIjLmU5q+fbD1NnOJA== - dependencies: - performance-now "^2.1.0" - -randombytes@^2.0.0, randombytes@^2.0.1, randombytes@^2.0.5: - version "2.1.0" - resolved "https://registry.yarnpkg.com/randombytes/-/randombytes-2.1.0.tgz#df6f84372f0270dc65cdf6291349ab7a473d4f2a" - integrity sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ== - dependencies: - safe-buffer "^5.1.0" - -randomfill@^1.0.3: - version "1.0.4" - resolved "https://registry.yarnpkg.com/randomfill/-/randomfill-1.0.4.tgz#c92196fc86ab42be983f1bf31778224931d61458" - integrity sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw== - dependencies: - randombytes "^2.0.5" - safe-buffer "^5.1.0" - -range-parser@^1.2.1, range-parser@~1.2.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031" - integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== - -raw-body@2.4.0: - version "2.4.0" - resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.4.0.tgz#a1ce6fb9c9bc356ca52e89256ab59059e13d0332" - integrity sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q== - dependencies: - bytes "3.1.0" - http-errors "1.7.2" - iconv-lite "0.4.24" - unpipe "1.0.0" - -rc-align@^2.4.0, rc-align@^2.4.1: - version "2.4.5" - resolved "https://registry.yarnpkg.com/rc-align/-/rc-align-2.4.5.tgz#c941a586f59d1017f23a428f0b468663fb7102ab" - integrity sha512-nv9wYUYdfyfK+qskThf4BQUSIadeI/dCsfaMZfNEoxm9HwOIioQ+LyqmMK6jWHAZQgOzMLaqawhuBXlF63vgjw== - dependencies: - babel-runtime "^6.26.0" - dom-align "^1.7.0" - prop-types "^15.5.8" - rc-util "^4.0.4" - -rc-animate@2.x, rc-animate@^2.3.0, rc-animate@^2.6.0, rc-animate@^2.8.2, rc-animate@^2.8.3: - version "2.10.0" - resolved "https://registry.yarnpkg.com/rc-animate/-/rc-animate-2.10.0.tgz#d2224cee4700cc9e9836700eb47af6b6e41a080c" - integrity sha512-gZM3WteZO0e3X8B71KP0bs95EY2tAPRuiZyKnlhdLpOjTX/64SrhDZM3pT2Z8mJjKWNiiB5q2SSSf+BD8ljwVw== - dependencies: - babel-runtime "6.x" - classnames "^2.2.6" - css-animation "^1.3.2" - prop-types "15.x" - raf "^3.4.0" - rc-util "^4.8.0" - react-lifecycles-compat "^3.0.4" - -rc-animate@^3.0.0-rc.1: - version "3.0.0-rc.6" - resolved "https://registry.yarnpkg.com/rc-animate/-/rc-animate-3.0.0-rc.6.tgz#04288eefa118e0cae214536c8a903ffaac1bc3fb" - integrity sha512-oBLPpiT6Q4t6YvD/pkLcmofBP1p01TX0Otse8Q4+Mxt8J+VSDflLZGIgf62EwkvRwsQUkLPjZVFBsldnPKLzjg== - dependencies: - babel-runtime "6.x" - classnames "^2.2.5" - component-classes "^1.2.6" - fbjs "^0.8.16" - prop-types "15.x" - raf "^3.4.0" - rc-util "^4.5.0" - react-lifecycles-compat "^3.0.4" - -rc-calendar@~9.15.5: - version "9.15.5" - resolved "https://registry.yarnpkg.com/rc-calendar/-/rc-calendar-9.15.5.tgz#21ab0cb2a5659d85fe6faead13d3ed764dd47c01" - integrity sha512-nvoEXk5P0DADt5b7FHlKiXKj+IhoWawQGSkb5soa6gXQIfoqQJ5+zB2Ogy7k1RxNbxQu4iIkEW/a3+HObVRDdA== - dependencies: - babel-runtime "6.x" - classnames "2.x" - moment "2.x" - prop-types "^15.5.8" - rc-trigger "^2.2.0" - rc-util "^4.1.1" - react-lifecycles-compat "^3.0.4" - -rc-cascader@~0.17.4: - version "0.17.4" - resolved "https://registry.yarnpkg.com/rc-cascader/-/rc-cascader-0.17.4.tgz#bb38ba3ed0990bfaa5ee547467d85ecc0d152f96" - integrity sha512-CeFQJIMzY7x++uPqlx4Xl/cH8iTs8nRoW522+DLb21kdL5kWqKlK+3iHXExoxcAymjwo5ScIiXi+NY4m8Pgq9w== - dependencies: - array-tree-filter "^2.1.0" - prop-types "^15.5.8" - rc-trigger "^2.2.0" - rc-util "^4.0.4" - react-lifecycles-compat "^3.0.4" - shallow-equal "^1.0.0" - warning "^4.0.1" - -rc-checkbox@~2.1.6: - version "2.1.7" - resolved "https://registry.yarnpkg.com/rc-checkbox/-/rc-checkbox-2.1.7.tgz#ae6785525cf35fa4c62d706c4a1ff7b2b1fcb821" - integrity sha512-8L+0XuucUOMUM6F/7qH+hnQpEHPZfW1Um02lUHEVdpZNor5mC0Fj4x8GvTtwcM1pAl5tD3I6lHYD8cE1W8RZJw== - dependencies: - babel-runtime "^6.23.0" - classnames "2.x" - prop-types "15.x" - react-lifecycles-compat "^3.0.4" - -rc-collapse@~1.11.3: - version "1.11.6" - resolved "https://registry.yarnpkg.com/rc-collapse/-/rc-collapse-1.11.6.tgz#9a70ac2bc2715e1ef7bae8e308bc0e844618d119" - integrity sha512-qckXftNVD7fawl/yrQYoMcKF9e8TFP9lJGrAQ1K6xA1xhSq6T9I++lsRRF57D1flxALkjXESpJpiSpVjM7sblA== - dependencies: - classnames "2.x" - css-animation "1.x" - prop-types "^15.5.6" - rc-animate "2.x" - react-is "^16.7.0" - react-lifecycles-compat "^3.0.4" - shallowequal "^1.1.0" - -rc-dialog@~7.5.2: - version "7.5.5" - resolved "https://registry.yarnpkg.com/rc-dialog/-/rc-dialog-7.5.5.tgz#67bf2657a239d29fdd21e06c9b13017dbb110a75" - integrity sha512-WbGCPuibf4VDiKfx0+vJPQecJFiQtweJgvhXEXDQl8460bdME2TB9SJB7YVht8tzNS/5fDUbkDfYeO7VFLb5Wg== - dependencies: - babel-runtime "6.x" - rc-animate "2.x" - rc-util "^4.8.1" - -rc-drawer@~2.0.1: - version "2.0.8" - resolved "https://registry.yarnpkg.com/rc-drawer/-/rc-drawer-2.0.8.tgz#ba0500590804283308f77acc22fff35f395e979a" - integrity sha512-BRX+pvC3aqNA/uuKJCHku7X5NBNdpQdewhnY/lrf2XmdpYXNKdHbf0TKsmM3P3N+eyqd/2/wePLCru++vbbBpg== - dependencies: - babel-runtime "6.x" - classnames "^2.2.5" - rc-util "^4.7.0" - react-lifecycles-compat "^3.0.4" - -rc-dropdown@~2.4.1: - version "2.4.1" - resolved "https://registry.yarnpkg.com/rc-dropdown/-/rc-dropdown-2.4.1.tgz#aaef6eb3a5152cdd9982895c2a78d9b5f046cdec" - integrity sha512-p0XYn0wrOpAZ2fUGE6YJ6U8JBNc5ASijznZ6dkojdaEfQJAeZtV9KMEewhxkVlxGSbbdXe10ptjBlTEW9vEwEg== - dependencies: - babel-runtime "^6.26.0" - classnames "^2.2.6" - prop-types "^15.5.8" - rc-trigger "^2.5.1" - react-lifecycles-compat "^3.0.2" - -rc-editor-core@~0.8.3: - version "0.8.10" - resolved "https://registry.yarnpkg.com/rc-editor-core/-/rc-editor-core-0.8.10.tgz#6f215bc5df9c33ffa9f6c5b30ca73a7dabe8ab7c" - integrity sha512-T3aHpeMCIYA1sdAI7ynHHjXy5fqp83uPlD68ovZ0oClTSc3tbHmyCxXlA+Ti4YgmcpCYv7avF6a+TIbAka53kw== - dependencies: - babel-runtime "^6.26.0" - classnames "^2.2.5" - draft-js "^0.10.0" - immutable "^3.7.4" - lodash "^4.16.5" - prop-types "^15.5.8" - setimmediate "^1.0.5" - -rc-editor-mention@^1.1.13: - version "1.1.13" - resolved "https://registry.yarnpkg.com/rc-editor-mention/-/rc-editor-mention-1.1.13.tgz#9f1cab1065f86b01523840321790c2ab12ac5e8b" - integrity sha512-3AOmGir91Fi2ogfRRaXLtqlNuIwQpvla7oUnGHS1+3eo7b+fUp5IlKcagqtwUBB5oDNofoySXkLBxzWvSYNp/Q== - dependencies: - babel-runtime "^6.23.0" - classnames "^2.2.5" - dom-scroll-into-view "^1.2.0" - draft-js "~0.10.0" - immutable "~3.7.4" - prop-types "^15.5.8" - rc-animate "^2.3.0" - rc-editor-core "~0.8.3" - -rc-form@^2.4.5: - version "2.4.8" - resolved "https://registry.yarnpkg.com/rc-form/-/rc-form-2.4.8.tgz#79a1f124d4fa81dff2af2992e94aa3e58cad683c" - integrity sha512-hlHajcYg51pFQf+B6neAbhy2ZA+8DmxnDxiOYZRAXCLhPN788ZnrtZq5/iADDWcZqjHFnXiThoZE/Fu8syciDQ== - dependencies: - async-validator "~1.11.3" - babel-runtime "6.x" - create-react-class "^15.5.3" - dom-scroll-into-view "1.x" - hoist-non-react-statics "^3.3.0" - lodash "^4.17.4" - warning "^4.0.3" - -rc-hammerjs@~0.6.0: - version "0.6.9" - resolved "https://registry.yarnpkg.com/rc-hammerjs/-/rc-hammerjs-0.6.9.tgz#9a4ddbda1b2ec8f9b9596091a6a989842a243907" - integrity sha512-4llgWO3RgLyVbEqUdGsDfzUDqklRlQW5VEhE3x35IvhV+w//VPRG34SBavK3D2mD/UaLKaohgU41V4agiftC8g== - dependencies: - babel-runtime "6.x" - hammerjs "^2.0.8" - prop-types "^15.5.9" - -rc-input-number@~4.4.5: - version "4.4.5" - resolved "https://registry.yarnpkg.com/rc-input-number/-/rc-input-number-4.4.5.tgz#81473d2885a6b312e486c4f2ba3f441c1ab88520" - integrity sha512-Dt20e8Ylc/N/6oXiPUlwDVdx3fz7W5umUOa4z5pBuWFG7NPlBVXRWkq7+nbnTyaK24UxN67PVpmD3+Omo+QRZQ== - dependencies: - babel-runtime "6.x" - classnames "^2.2.0" - prop-types "^15.5.7" - rc-util "^4.5.1" - rmc-feedback "^2.0.0" - -rc-mentions@~0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/rc-mentions/-/rc-mentions-0.4.0.tgz#483552c088290dbcddd059a0846e9f207ecc3539" - integrity sha512-xnkQBTUFp4llaJuDOLVFKX9ELrXFHk1FuUdIIC/ijQ6cLjDhCUu+jpHNcXWuQ/yIFzF376VlXkmT57iqxSnZzw== - dependencies: - "@ant-design/create-react-context" "^0.2.4" - classnames "^2.2.6" - rc-menu "^7.4.22" - rc-trigger "^2.6.2" - rc-util "^4.6.0" - react-lifecycles-compat "^3.0.4" - -rc-menu@^7.3.0, rc-menu@^7.4.22, rc-menu@~7.4.23: - version "7.4.23" - resolved "https://registry.yarnpkg.com/rc-menu/-/rc-menu-7.4.23.tgz#e07d497864274076299d7d8a84d14fc86b6bd30d" - integrity sha512-d0pUMN0Zr3GCFxNpas8p7AUTeX8viItUOQXku4AsyX82ZzUz79HgGul2Nk17BIFTtLzqdB7/NT6WVb5PAOOILw== - dependencies: - babel-runtime "6.x" - classnames "2.x" - dom-scroll-into-view "1.x" - ismobilejs "^0.5.1" - mini-store "^2.0.0" - mutationobserver-shim "^0.3.2" - prop-types "^15.5.6" - rc-animate "2.x" - rc-trigger "^2.3.0" - rc-util "^4.1.0" - resize-observer-polyfill "^1.5.0" - -rc-notification@~3.3.1: - version "3.3.1" - resolved "https://registry.yarnpkg.com/rc-notification/-/rc-notification-3.3.1.tgz#0baa3e70f8d40ab015ce8fa78c260c490fc7beb4" - integrity sha512-U5+f4BmBVfMSf3OHSLyRagsJ74yKwlrQAtbbL5ijoA0F2C60BufwnOcHG18tVprd7iaIjzZt1TKMmQSYSvgrig== - dependencies: - babel-runtime "6.x" - classnames "2.x" - prop-types "^15.5.8" - rc-animate "2.x" - rc-util "^4.0.4" - -rc-pagination@~1.20.5: - version "1.20.5" - resolved "https://registry.yarnpkg.com/rc-pagination/-/rc-pagination-1.20.5.tgz#b64395a702e9fc452e26d0e491ccf7d9345309f7" - integrity sha512-gnVAowVIbRilW6bXYWCEpTsrtmAWTpM3qO/bltYfqTVKxgb6/sDqjRvCksJGy/D81pYkEkKeA9foWsgUgbUsQw== - dependencies: - babel-runtime "6.x" - classnames "^2.2.6" - prop-types "^15.5.7" - react-lifecycles-compat "^3.0.4" - -rc-progress@~2.5.0: - version "2.5.2" - resolved "https://registry.yarnpkg.com/rc-progress/-/rc-progress-2.5.2.tgz#ab01ba4e5d2fa36fc9f6f058b10b720e7315560c" - integrity sha512-ajI+MJkbBz9zYDuE9GQsY5gsyqPF7HFioZEDZ9Fmc+ebNZoiSeSJsTJImPFCg0dW/5WiRGUy2F69SX1aPtSJgA== - dependencies: - babel-runtime "6.x" - prop-types "^15.5.8" - -rc-rate@~2.5.0: - version "2.5.0" - resolved "https://registry.yarnpkg.com/rc-rate/-/rc-rate-2.5.0.tgz#72d4984a03d0a7a0e6779c7a79efcea27626abf6" - integrity sha512-aXX5klRqbVZxvLghcKnLqqo7LvLVCHswEDteWsm5Gb7NBIPa1YKTcAbvb5SZ4Z4i4EeRoZaPwygRAWsQgGtbKw== - dependencies: - classnames "^2.2.5" - prop-types "^15.5.8" - rc-util "^4.3.0" - react-lifecycles-compat "^3.0.4" - -rc-select@~9.2.0: - version "9.2.1" - resolved "https://registry.yarnpkg.com/rc-select/-/rc-select-9.2.1.tgz#0fbf47a933c36e1e6ef76c88fab32f58029e6a01" - integrity sha512-nW/Zr2OCgxN26OX8ff3xcO1wK0e1l5ixnEfyN15Rbdk7TNI/rIPJIjPCQAoihRpk9A2C/GH8pahjlvKV1Vj++g== - dependencies: - babel-runtime "^6.23.0" - classnames "2.x" - component-classes "1.x" - dom-scroll-into-view "1.x" - prop-types "^15.5.8" - raf "^3.4.0" - rc-animate "2.x" - rc-menu "^7.3.0" - rc-trigger "^2.5.4" - rc-util "^4.0.4" - react-lifecycles-compat "^3.0.2" - warning "^4.0.2" - -rc-slider@~8.6.11: - version "8.6.13" - resolved "https://registry.yarnpkg.com/rc-slider/-/rc-slider-8.6.13.tgz#88a8150c2dda6709f3f119135de11fba80af765b" - integrity sha512-fCUe8pPn8n9pq1ARX44nN2nzJoATtna4x/PdskUrxIvZXN8ja7HuceN/hq6kokZjo3FBD2B1yMZvZh6oi68l6Q== - dependencies: - babel-runtime "6.x" - classnames "^2.2.5" - prop-types "^15.5.4" - rc-tooltip "^3.7.0" - rc-util "^4.0.4" - shallowequal "^1.0.1" - warning "^4.0.3" - -rc-steps@~3.5.0: - version "3.5.0" - resolved "https://registry.yarnpkg.com/rc-steps/-/rc-steps-3.5.0.tgz#36b2a7f1f49907b0d90363884b18623caf9fb600" - integrity sha512-2Vkkrpa7PZbg7qPsqTNzVDov4u78cmxofjjnIHiGB9+9rqKS8oTLPzbW2uiWDr3Lk+yGwh8rbpGO1E6VAgBCOg== - dependencies: - babel-runtime "^6.23.0" - classnames "^2.2.3" - lodash "^4.17.5" - prop-types "^15.5.7" - -rc-switch@~1.9.0: - version "1.9.0" - resolved "https://registry.yarnpkg.com/rc-switch/-/rc-switch-1.9.0.tgz#ab2b878f2713c681358a453391976c9b95b290f7" - integrity sha512-Isas+egaK6qSk64jaEw4GgPStY4umYDbT7ZY93bZF1Af+b/JEsKsJdNOU2qG3WI0Z6tXo2DDq0kJCv8Yhu0zww== - dependencies: - classnames "^2.2.1" - prop-types "^15.5.6" - react-lifecycles-compat "^3.0.4" - -rc-table@~6.7.0: - version "6.7.0" - resolved "https://registry.yarnpkg.com/rc-table/-/rc-table-6.7.0.tgz#8aca002f84a43a2d51a4fcda0f7a51694154286d" - integrity sha512-zzu7UtEHLTzZibB1EOoeKQejH21suoxRQx3evlGGLwz5NUh2HDUHobSr12z5Kd8EPr1+y/LPzXJdX1ctFPC+hA== - dependencies: - babel-runtime "6.x" - classnames "^2.2.5" - component-classes "^1.2.6" - lodash "^4.17.5" - mini-store "^2.0.0" - prop-types "^15.5.8" - rc-util "^4.0.4" - react-lifecycles-compat "^3.0.2" - shallowequal "^1.0.2" - warning "^3.0.0" - -rc-tabs@~9.6.4: - version "9.6.6" - resolved "https://registry.yarnpkg.com/rc-tabs/-/rc-tabs-9.6.6.tgz#1378aae0e4a04d6c3f2bd61bfcb7f28a7ef3807f" - integrity sha512-8Vs4tLZKQODl72RetTNm+yVOuboAhtJlvf9fbxWJ4WiYuzMxU7Y8RZ8yVNDGt3+4WzCJUI53CtobptBWwcUkDA== - dependencies: - "@ant-design/create-react-context" "^0.2.4" - babel-runtime "6.x" - classnames "2.x" - lodash "^4.17.5" - prop-types "15.x" - raf "^3.4.1" - rc-hammerjs "~0.6.0" - rc-util "^4.0.4" - react-lifecycles-compat "^3.0.4" - resize-observer-polyfill "^1.5.1" - warning "^4.0.3" - -rc-time-picker@~3.7.1: - version "3.7.2" - resolved "https://registry.yarnpkg.com/rc-time-picker/-/rc-time-picker-3.7.2.tgz#fabe5501adf1374d31a2d3b47f1ba89fc2dc2467" - integrity sha512-UVWO9HXGyZoM4I2THlJsEAFcZQz+tYwdcpoHXCEFZsRLz9L2+7vV4EMp9Wa3UrtzMFEt83qSAX/90dCJeKl9sg== - dependencies: - classnames "2.x" - moment "2.x" - prop-types "^15.5.8" - raf "^3.4.1" - rc-trigger "^2.2.0" - react-lifecycles-compat "^3.0.4" - -rc-tooltip@^3.7.0, rc-tooltip@~3.7.3: - version "3.7.3" - resolved "https://registry.yarnpkg.com/rc-tooltip/-/rc-tooltip-3.7.3.tgz#280aec6afcaa44e8dff0480fbaff9e87fc00aecc" - integrity sha512-dE2ibukxxkrde7wH9W8ozHKUO4aQnPZ6qBHtrTH9LoO836PjDdiaWO73fgPB05VfJs9FbZdmGPVEbXCeOP99Ww== - dependencies: - babel-runtime "6.x" - prop-types "^15.5.8" - rc-trigger "^2.2.2" - -rc-tree-select@~2.9.1: - version "2.9.1" - resolved "https://registry.yarnpkg.com/rc-tree-select/-/rc-tree-select-2.9.1.tgz#d076b8ce5bf432df3fdd8a6a01cdd9c93c8e7399" - integrity sha512-AfJQC1ZzaeH+Onmx84TtVLUL2guBZe7exA8XSfj1RRB1doDbYGTtybzpP3CEw/tuSftSRnz+iPt+iaxRTrgXRw== - dependencies: - classnames "^2.2.1" - dom-scroll-into-view "^1.2.1" - prop-types "^15.5.8" - raf "^3.4.0" - rc-animate "^2.8.2" - rc-tree "~2.0.0" - rc-trigger "^3.0.0-rc.2" - rc-util "^4.5.0" - react-lifecycles-compat "^3.0.4" - shallowequal "^1.0.2" - warning "^4.0.1" - -rc-tree@~2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/rc-tree/-/rc-tree-2.0.0.tgz#68fc4c9ab696943b279a143619e2ecf05918fb53" - integrity sha512-DAT/jsbnFbHqG9Df9OaVG93CAVtTsJVnJiwKX+wqsG8TChpty3s6QX3zJZ+gBgjkq4ikLbu1kuFJtX63EKhSAA== - dependencies: - babel-runtime "^6.23.0" - classnames "2.x" - prop-types "^15.5.8" - rc-animate "^2.6.0" - rc-util "^4.5.1" - react-lifecycles-compat "^3.0.4" - warning "^3.0.0" - -rc-tree@~2.1.0: - version "2.1.2" - resolved "https://registry.yarnpkg.com/rc-tree/-/rc-tree-2.1.2.tgz#c70546cf1081e1c27bffa314a966e2e4d06b3f2b" - integrity sha512-IQG0bkY4bfK11oVIF44Y4V3IuIOAmIIc5j8b8XGkRjsnUOElRr/BNqKCvg9h2UsNJm1J2xv4OA0HfEIv70765Q== - dependencies: - "@ant-design/create-react-context" "^0.2.4" - classnames "2.x" - prop-types "^15.5.8" - rc-animate "^2.6.0" - rc-util "^4.5.1" - react-lifecycles-compat "^3.0.4" - warning "^4.0.3" - -rc-trigger@^2.2.0, rc-trigger@^2.2.2, rc-trigger@^2.3.0, rc-trigger@^2.5.1, rc-trigger@^2.5.4, rc-trigger@^2.6.2: - version "2.6.5" - resolved "https://registry.yarnpkg.com/rc-trigger/-/rc-trigger-2.6.5.tgz#140a857cf28bd0fa01b9aecb1e26a50a700e9885" - integrity sha512-m6Cts9hLeZWsTvWnuMm7oElhf+03GOjOLfTuU0QmdB9ZrW7jR2IpI5rpNM7i9MvAAlMAmTx5Zr7g3uu/aMvZAw== - dependencies: - babel-runtime "6.x" - classnames "^2.2.6" - prop-types "15.x" - rc-align "^2.4.0" - rc-animate "2.x" - rc-util "^4.4.0" - react-lifecycles-compat "^3.0.4" - -rc-trigger@^3.0.0-rc.2: - version "3.0.0-rc.3" - resolved "https://registry.yarnpkg.com/rc-trigger/-/rc-trigger-3.0.0-rc.3.tgz#35842df1674d25315e1426a44882a4c97652258b" - integrity sha512-4vB6cpxcUdm2qO5VtB9q1TZz0MoWm9BzFLvGknulphGrl1qI6uxUsPDCvqnmujdpDdAKGGfjxntFpA7RtAwkFQ== - dependencies: - babel-runtime "6.x" - classnames "^2.2.6" - prop-types "15.x" - raf "^3.4.0" - rc-align "^2.4.1" - rc-animate "^3.0.0-rc.1" - rc-util "^4.4.0" - -rc-upload@~2.7.0: - version "2.7.0" - resolved "https://registry.yarnpkg.com/rc-upload/-/rc-upload-2.7.0.tgz#f279b758655eb5f99ebf82a5a2648d80d88e0ff4" - integrity sha512-Oh9EJB4xE8MQUZ2D0OUST3UMIBjHjnO2IjPNW/cbPredxZz+lzbLPCZxcxRwUwu1gt0LA968UWXAgT1EvZdFfA== - dependencies: - babel-runtime "6.x" - classnames "^2.2.5" - prop-types "^15.5.7" - warning "4.x" - -rc-util@^4.0.4, rc-util@^4.1.0, rc-util@^4.1.1, rc-util@^4.10.0, rc-util@^4.3.0, rc-util@^4.4.0, rc-util@^4.5.0, rc-util@^4.5.1, rc-util@^4.6.0, rc-util@^4.7.0, rc-util@^4.8.0, rc-util@^4.8.1: - version "4.11.0" - resolved "https://registry.yarnpkg.com/rc-util/-/rc-util-4.11.0.tgz#cf437dcff74ca08a8565ae14f0368acb3a650796" - integrity sha512-nB29kXOXsSVjBkWfH+Z1GVh6tRg7XGZtZ0Yfie+OI0stCDixGQ1cPrS6iYxlg+AV2St6COCK5MFrCmpTgghh0w== - dependencies: - add-dom-event-listener "^1.1.0" - babel-runtime "6.x" - prop-types "^15.5.10" - react-lifecycles-compat "^3.0.4" - shallowequal "^0.2.2" - -rc@^1.2.7: - version "1.2.8" - resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" - integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw== - dependencies: - deep-extend "^0.6.0" - ini "~1.3.0" - minimist "^1.2.0" - strip-json-comments "~2.0.1" - -react-app-polyfill@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/react-app-polyfill/-/react-app-polyfill-1.0.2.tgz#2a51175885c88245a2a356dc46df29f38ec9f060" - integrity sha512-yZcpLnIr0FOIzrOOz9JC37NWAWEuCaQWmYn9EWjEzlCW4cOmA5MkT5L3iP8QuUeFnoqVCTJgjIWYbXEJgNXhGA== - dependencies: - core-js "3.1.4" - object-assign "4.1.1" - promise "8.0.3" - raf "3.4.1" - regenerator-runtime "0.13.3" - whatwg-fetch "3.0.0" - -react-app-rewired@^2.1.3: - version "2.1.3" - resolved "https://registry.yarnpkg.com/react-app-rewired/-/react-app-rewired-2.1.3.tgz#5ae8583ecc9f9f968d40b735d2abbe871378a52f" - integrity sha512-NXC2EsQrnEMV7xD70rHcBq0B4PSEzjY/K2m/e+GRgit2jZO/uZApnpCZSKvIX2leLRN69Sqf2id0VXZ1F62CDw== - dependencies: - cross-spawn "^6.0.5" - dotenv "^6.2.0" - semver "^5.6.0" - -react-dev-utils@^9.0.0: - version "9.0.3" - resolved "https://registry.yarnpkg.com/react-dev-utils/-/react-dev-utils-9.0.3.tgz#7607455587abb84599451460eb37cef0b684131a" - integrity sha512-OyInhcwsvycQ3Zr2pQN+HV4gtRXrky5mJXIy4HnqrWa+mI624xfYfqGuC9dYbxp4Qq3YZzP8GSGQjv0AgNU15w== - dependencies: - "@babel/code-frame" "7.5.5" - address "1.1.0" - browserslist "4.6.6" - chalk "2.4.2" - cross-spawn "6.0.5" - detect-port-alt "1.1.6" - escape-string-regexp "1.0.5" - filesize "3.6.1" - find-up "3.0.0" - fork-ts-checker-webpack-plugin "1.5.0" - global-modules "2.0.0" - globby "8.0.2" - gzip-size "5.1.1" - immer "1.10.0" - inquirer "6.5.0" - is-root "2.1.0" - loader-utils "1.2.3" - open "^6.3.0" - pkg-up "2.0.0" - react-error-overlay "^6.0.1" - recursive-readdir "2.2.2" - shell-quote "1.6.1" - sockjs-client "1.3.0" - strip-ansi "5.2.0" - text-table "0.2.0" - -react-dom@^16.8.6: - version "16.9.0" - resolved "https://registry.yarnpkg.com/react-dom/-/react-dom-16.9.0.tgz#5e65527a5e26f22ae3701131bcccaee9fb0d3962" - integrity sha512-YFT2rxO9hM70ewk9jq0y6sQk8cL02xm4+IzYBz75CQGlClQQ1Bxq0nhHF6OtSbit+AIahujJgb/CPRibFkMNJQ== - dependencies: - loose-envify "^1.1.0" - object-assign "^4.1.1" - prop-types "^15.6.2" - scheduler "^0.15.0" - -react-error-overlay@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/react-error-overlay/-/react-error-overlay-6.0.1.tgz#b8d3cf9bb991c02883225c48044cb3ee20413e0f" - integrity sha512-V9yoTr6MeZXPPd4nV/05eCBvGH9cGzc52FN8fs0O0TVQ3HYYf1n7EgZVtHbldRq5xU9zEzoXIITjYNIfxDDdUw== - -react-is@^16.6.0, react-is@^16.7.0, react-is@^16.8.1, react-is@^16.8.4: - version "16.9.0" - resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.9.0.tgz#21ca9561399aad0ff1a7701c01683e8ca981edcb" - integrity sha512-tJBzzzIgnnRfEm046qRcURvwQnZVXmuCbscxUO5RWrGTXpon2d4c8mI0D8WE6ydVIm29JiLB6+RslkIvym9Rjw== - -react-lazy-load@^3.0.13: - version "3.0.13" - resolved "https://registry.yarnpkg.com/react-lazy-load/-/react-lazy-load-3.0.13.tgz#3b0a92d336d43d3f0d73cbe6f35b17050b08b824" - integrity sha1-OwqS0zbUPT8Nc8vm81sXBQsIuCQ= - dependencies: - eventlistener "0.0.1" - lodash.debounce "^4.0.0" - lodash.throttle "^4.0.0" - prop-types "^15.5.8" - -react-lifecycles-compat@^3.0.2, react-lifecycles-compat@^3.0.4: - version "3.0.4" - resolved "https://registry.yarnpkg.com/react-lifecycles-compat/-/react-lifecycles-compat-3.0.4.tgz#4f1a273afdfc8f3488a8c516bfda78f872352362" - integrity sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA== - -react-router-dom@^5.0.0: - version "5.0.1" - resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-5.0.1.tgz#ee66f4a5d18b6089c361958e443489d6bab714be" - integrity sha512-zaVHSy7NN0G91/Bz9GD4owex5+eop+KvgbxXsP/O+iW1/Ln+BrJ8QiIR5a6xNPtrdTvLkxqlDClx13QO1uB8CA== - dependencies: - "@babel/runtime" "^7.1.2" - history "^4.9.0" - loose-envify "^1.3.1" - prop-types "^15.6.2" - react-router "5.0.1" - tiny-invariant "^1.0.2" - tiny-warning "^1.0.0" - -react-router@5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/react-router/-/react-router-5.0.1.tgz#04ee77df1d1ab6cb8939f9f01ad5702dbadb8b0f" - integrity sha512-EM7suCPNKb1NxcTZ2LEOWFtQBQRQXecLxVpdsP4DW4PbbqYWeRiLyV/Tt1SdCrvT2jcyXAXmVTmzvSzrPR63Bg== - dependencies: - "@babel/runtime" "^7.1.2" - history "^4.9.0" - hoist-non-react-statics "^3.1.0" - loose-envify "^1.3.1" - mini-create-react-context "^0.3.0" - path-to-regexp "^1.7.0" - prop-types "^15.6.2" - react-is "^16.6.0" - tiny-invariant "^1.0.2" - tiny-warning "^1.0.0" - -react-scripts@3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/react-scripts/-/react-scripts-3.0.0.tgz#a715613ef3eace025907b409cec8505096e0233e" - integrity sha512-F4HegoBuUKZvEzXYksQu05Y6vJqallhHkQUEL6M7OQ5rYLBQC/4MTK6km9ZZvEK9TqMy1XA8SSEJGJgTEr6bSQ== - dependencies: - "@babel/core" "7.4.3" - "@svgr/webpack" "4.1.0" - "@typescript-eslint/eslint-plugin" "1.6.0" - "@typescript-eslint/parser" "1.6.0" - babel-eslint "10.0.1" - babel-jest "24.7.1" - babel-loader "8.0.5" - babel-plugin-named-asset-import "^0.3.2" - babel-preset-react-app "^8.0.0" - case-sensitive-paths-webpack-plugin "2.2.0" - css-loader "2.1.1" - dotenv "6.2.0" - dotenv-expand "4.2.0" - eslint "^5.16.0" - eslint-config-react-app "^4.0.0" - eslint-loader "2.1.2" - eslint-plugin-flowtype "2.50.1" - eslint-plugin-import "2.16.0" - eslint-plugin-jsx-a11y "6.2.1" - eslint-plugin-react "7.12.4" - eslint-plugin-react-hooks "^1.5.0" - file-loader "3.0.1" - fs-extra "7.0.1" - html-webpack-plugin "4.0.0-beta.5" - identity-obj-proxy "3.0.0" - is-wsl "^1.1.0" - jest "24.7.1" - jest-environment-jsdom-fourteen "0.1.0" - jest-resolve "24.7.1" - jest-watch-typeahead "0.3.0" - mini-css-extract-plugin "0.5.0" - optimize-css-assets-webpack-plugin "5.0.1" - pnp-webpack-plugin "1.2.1" - postcss-flexbugs-fixes "4.1.0" - postcss-loader "3.0.0" - postcss-normalize "7.0.1" - postcss-preset-env "6.6.0" - postcss-safe-parser "4.0.1" - react-app-polyfill "^1.0.0" - react-dev-utils "^9.0.0" - resolve "1.10.0" - sass-loader "7.1.0" - semver "6.0.0" - style-loader "0.23.1" - terser-webpack-plugin "1.2.3" - url-loader "1.1.2" - webpack "4.29.6" - webpack-dev-server "3.2.1" - webpack-manifest-plugin "2.0.4" - workbox-webpack-plugin "4.2.0" - optionalDependencies: - fsevents "2.0.6" - -react-slick@~0.25.2: - version "0.25.2" - resolved "https://registry.yarnpkg.com/react-slick/-/react-slick-0.25.2.tgz#56331b67d47d8bcfe2dceb6acab1c8fd5bd1f6bc" - integrity sha512-8MNH/NFX/R7zF6W/w+FS5VXNyDusF+XDW1OU0SzODEU7wqYB+ZTGAiNJ++zVNAVqCAHdyCybScaUB+FCZOmBBw== - dependencies: - classnames "^2.2.5" - enquire.js "^2.1.6" - json2mq "^0.2.0" - lodash.debounce "^4.0.8" - resize-observer-polyfill "^1.5.0" - -react@^16.8.6: - version "16.9.0" - resolved "https://registry.yarnpkg.com/react/-/react-16.9.0.tgz#40ba2f9af13bc1a38d75dbf2f4359a5185c4f7aa" - integrity sha512-+7LQnFBwkiw+BobzOF6N//BdoNw0ouwmSJTEm9cglOOmsg/TMiFHZLe2sEoN5M7LgJTj9oHH0gxklfnQe66S1w== - dependencies: - loose-envify "^1.1.0" - object-assign "^4.1.1" - prop-types "^15.6.2" - -read-pkg-up@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-2.0.0.tgz#6b72a8048984e0c41e79510fd5e9fa99b3b549be" - integrity sha1-a3KoBImE4MQeeVEP1en6mbO1Sb4= - dependencies: - find-up "^2.0.0" - read-pkg "^2.0.0" - -read-pkg-up@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-4.0.0.tgz#1b221c6088ba7799601c808f91161c66e58f8978" - integrity sha512-6etQSH7nJGsK0RbG/2TeDzZFa8shjQ1um+SwQQ5cwKy0dhSXdOncEhb1CPpvQG4h7FyOV6EB6YlV0yJvZQNAkA== - dependencies: - find-up "^3.0.0" - read-pkg "^3.0.0" - -read-pkg@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-2.0.0.tgz#8ef1c0623c6a6db0dc6713c4bfac46332b2368f8" - integrity sha1-jvHAYjxqbbDcZxPEv6xGMysjaPg= - dependencies: - load-json-file "^2.0.0" - normalize-package-data "^2.3.2" - path-type "^2.0.0" - -read-pkg@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-3.0.0.tgz#9cbc686978fee65d16c00e2b19c237fcf6e38389" - integrity sha1-nLxoaXj+5l0WwA4rGcI3/Pbjg4k= - dependencies: - load-json-file "^4.0.0" - normalize-package-data "^2.3.2" - path-type "^3.0.0" - -"readable-stream@1 || 2", readable-stream@^2.0.0, readable-stream@^2.0.1, readable-stream@^2.0.2, readable-stream@^2.0.6, readable-stream@^2.1.5, readable-stream@^2.2.2, readable-stream@^2.3.3, readable-stream@^2.3.6, readable-stream@~2.3.6: - version "2.3.6" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.6.tgz#b11c27d88b8ff1fbe070643cf94b0c79ae1b0aaf" - integrity sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw== - dependencies: - core-util-is "~1.0.0" - inherits "~2.0.3" - isarray "~1.0.0" - process-nextick-args "~2.0.0" - safe-buffer "~5.1.1" - string_decoder "~1.1.1" - util-deprecate "~1.0.1" - -readable-stream@^3.0.6, readable-stream@^3.1.1: - version "3.4.0" - resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.4.0.tgz#a51c26754658e0a3c21dbf59163bd45ba6f447fc" - integrity sha512-jItXPLmrSR8jmTRmRWJXCnGJsfy85mB3Wd/uINMXA65yrnFo0cPClFIUWzo2najVNSl+mx7/4W8ttlLWJe99pQ== - dependencies: - inherits "^2.0.3" - string_decoder "^1.1.1" - util-deprecate "^1.0.1" - -readdirp@^2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-2.2.1.tgz#0e87622a3325aa33e892285caf8b4e846529a525" - integrity sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ== - dependencies: - graceful-fs "^4.1.11" - micromatch "^3.1.10" - readable-stream "^2.0.2" - -realpath-native@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/realpath-native/-/realpath-native-1.1.0.tgz#2003294fea23fb0672f2476ebe22fcf498a2d65c" - integrity sha512-wlgPA6cCIIg9gKz0fgAPjnzh4yR/LnXovwuo9hvyGvx3h8nX4+/iLZplfUWasXpqD8BdnGnP5njOFjkUwPzvjA== - dependencies: - util.promisify "^1.0.0" - -recursive-readdir@2.2.2: - version "2.2.2" - resolved "https://registry.yarnpkg.com/recursive-readdir/-/recursive-readdir-2.2.2.tgz#9946fb3274e1628de6e36b2f6714953b4845094f" - integrity sha512-nRCcW9Sj7NuZwa2XvH9co8NPeXUBhZP7CRKJtU+cS6PW9FpCIFoI5ib0NT1ZrbNuPoRy0ylyCaUL8Gih4LSyFg== - dependencies: - minimatch "3.0.4" - -regenerate-unicode-properties@^8.1.0: - version "8.1.0" - resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-8.1.0.tgz#ef51e0f0ea4ad424b77bf7cb41f3e015c70a3f0e" - integrity sha512-LGZzkgtLY79GeXLm8Dp0BVLdQlWICzBnJz/ipWUgo59qBaZ+BHtq51P2q1uVZlppMuUAT37SDk39qUbjTWB7bA== - dependencies: - regenerate "^1.4.0" - -regenerate@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.0.tgz#4a856ec4b56e4077c557589cae85e7a4c8869a11" - integrity sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg== - -regenerator-runtime@0.13.3, regenerator-runtime@^0.13.2: - version "0.13.3" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.3.tgz#7cf6a77d8f5c6f60eb73c5fc1955b2ceb01e6bf5" - integrity sha512-naKIZz2GQ8JWh///G7L3X6LaQUAMp2lvb1rvwwsURe/VXwD6VMfr+/1NuNw3ag8v2kY1aQ/go5SNn79O9JU7yw== - -regenerator-runtime@^0.11.0: - version "0.11.1" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz#be05ad7f9bf7d22e056f9726cee5017fbf19e2e9" - integrity sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg== - -regenerator-transform@^0.14.0: - version "0.14.1" - resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.14.1.tgz#3b2fce4e1ab7732c08f665dfdb314749c7ddd2fb" - integrity sha512-flVuee02C3FKRISbxhXl9mGzdbWUVHubl1SMaknjxkFB1/iqpJhArQUvRxOOPEc/9tAiX0BaQ28FJH10E4isSQ== - dependencies: - private "^0.1.6" - -regex-not@^1.0.0, regex-not@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/regex-not/-/regex-not-1.0.2.tgz#1f4ece27e00b0b65e0247a6810e6a85d83a5752c" - integrity sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A== - dependencies: - extend-shallow "^3.0.2" - safe-regex "^1.1.0" - -regexp-tree@^0.1.6: - version "0.1.12" - resolved "https://registry.yarnpkg.com/regexp-tree/-/regexp-tree-0.1.12.tgz#28eaaa6e66eeb3527c15108a3ff740d9e574e420" - integrity sha512-TsXZ8+cv2uxMEkLfgwO0E068gsNMLfuYwMMhiUxf0Kw2Vcgzq93vgl6wIlIYuPmfMqMjfQ9zAporiozqCnwLuQ== - -regexp.prototype.flags@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.2.0.tgz#6b30724e306a27833eeb171b66ac8890ba37e41c" - integrity sha512-ztaw4M1VqgMwl9HlPpOuiYgItcHlunW0He2fE6eNfT6E/CF2FtYi9ofOYe4mKntstYk0Fyh/rDRBdS3AnxjlrA== - dependencies: - define-properties "^1.1.2" - -regexpp@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-2.0.1.tgz#8d19d31cf632482b589049f8281f93dbcba4d07f" - integrity sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw== - -regexpu-core@^4.5.4: - version "4.5.5" - resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-4.5.5.tgz#aaffe61c2af58269b3e516b61a73790376326411" - integrity sha512-FpI67+ky9J+cDizQUJlIlNZFKual/lUkFr1AG6zOCpwZ9cLrg8UUVakyUQJD7fCDIe9Z2nwTQJNPyonatNmDFQ== - dependencies: - regenerate "^1.4.0" - regenerate-unicode-properties "^8.1.0" - regjsgen "^0.5.0" - regjsparser "^0.6.0" - unicode-match-property-ecmascript "^1.0.4" - unicode-match-property-value-ecmascript "^1.1.0" - -regjsgen@^0.5.0: - version "0.5.0" - resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.5.0.tgz#a7634dc08f89209c2049adda3525711fb97265dd" - integrity sha512-RnIrLhrXCX5ow/E5/Mh2O4e/oa1/jW0eaBKTSy3LaCj+M3Bqvm97GWDp2yUtzIs4LEn65zR2yiYGFqb2ApnzDA== - -regjsparser@^0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.6.0.tgz#f1e6ae8b7da2bae96c99399b868cd6c933a2ba9c" - integrity sha512-RQ7YyokLiQBomUJuUG8iGVvkgOLxwyZM8k6d3q5SAXpg4r5TZJZigKFvC6PpD+qQ98bCDC5YelPeA3EucDoNeQ== - dependencies: - jsesc "~0.5.0" - -relateurl@0.2.x: - version "0.2.7" - resolved "https://registry.yarnpkg.com/relateurl/-/relateurl-0.2.7.tgz#54dbf377e51440aca90a4cd274600d3ff2d888a9" - integrity sha1-VNvzd+UUQKypCkzSdGANP/LYiKk= - -remove-trailing-separator@^1.0.1: - version "1.1.0" - resolved "https://registry.yarnpkg.com/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz#c24bce2a283adad5bc3f58e0d48249b92379d8ef" - integrity sha1-wkvOKig62tW8P1jg1IJJuSN52O8= - -renderkid@^2.0.1: - version "2.0.3" - resolved "https://registry.yarnpkg.com/renderkid/-/renderkid-2.0.3.tgz#380179c2ff5ae1365c522bf2fcfcff01c5b74149" - integrity sha512-z8CLQp7EZBPCwCnncgf9C4XAi3WR0dv+uWu/PjIyhhAb5d6IJ/QZqlHFprHeKT+59//V6BNUsLbvN8+2LarxGA== - dependencies: - css-select "^1.1.0" - dom-converter "^0.2" - htmlparser2 "^3.3.0" - strip-ansi "^3.0.0" - utila "^0.4.0" - -repeat-element@^1.1.2: - version "1.1.3" - resolved "https://registry.yarnpkg.com/repeat-element/-/repeat-element-1.1.3.tgz#782e0d825c0c5a3bb39731f84efee6b742e6b1ce" - integrity sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g== - -repeat-string@^1.6.1: - version "1.6.1" - resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" - integrity sha1-jcrkcOHIirwtYA//Sndihtp15jc= - -request-promise-core@1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/request-promise-core/-/request-promise-core-1.1.2.tgz#339f6aababcafdb31c799ff158700336301d3346" - integrity sha512-UHYyq1MO8GsefGEt7EprS8UrXsm1TxEvFUX1IMTuSLU2Rh7fTIdFtl8xD7JiEYiWU2dl+NYAjCTksTehQUxPag== - dependencies: - lodash "^4.17.11" - -request-promise-native@^1.0.5: - version "1.0.7" - resolved "https://registry.yarnpkg.com/request-promise-native/-/request-promise-native-1.0.7.tgz#a49868a624bdea5069f1251d0a836e0d89aa2c59" - integrity sha512-rIMnbBdgNViL37nZ1b3L/VfPOpSi0TqVDQPAvO6U14lMzOLrt5nilxCQqtDKhZeDiW0/hkCXGoQjhgJd/tCh6w== - dependencies: - request-promise-core "1.1.2" - stealthy-require "^1.1.1" - tough-cookie "^2.3.3" - -request@^2.83.0, request@^2.87.0, request@^2.88.0: - version "2.88.0" - resolved "https://registry.yarnpkg.com/request/-/request-2.88.0.tgz#9c2fca4f7d35b592efe57c7f0a55e81052124fef" - integrity sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg== - dependencies: - aws-sign2 "~0.7.0" - aws4 "^1.8.0" - caseless "~0.12.0" - combined-stream "~1.0.6" - extend "~3.0.2" - forever-agent "~0.6.1" - form-data "~2.3.2" - har-validator "~5.1.0" - http-signature "~1.2.0" - is-typedarray "~1.0.0" - isstream "~0.1.2" - json-stringify-safe "~5.0.1" - mime-types "~2.1.19" - oauth-sign "~0.9.0" - performance-now "^2.1.0" - qs "~6.5.2" - safe-buffer "^5.1.2" - tough-cookie "~2.4.3" - tunnel-agent "^0.6.0" - uuid "^3.3.2" - -require-directory@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" - integrity sha1-jGStX9MNqxyXbiNE/+f3kqam30I= - -require-main-filename@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-1.0.1.tgz#97f717b69d48784f5f526a6c5aa8ffdda055a4d1" - integrity sha1-l/cXtp1IeE9fUmpsWqj/3aBVpNE= - -require-main-filename@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b" - integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg== - -requireindex@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/requireindex/-/requireindex-1.2.0.tgz#3463cdb22ee151902635aa6c9535d4de9c2ef1ef" - integrity sha512-L9jEkOi3ASd9PYit2cwRfyppc9NoABujTP8/5gFcbERmo5jUoAKovIC3fsF17pkTnGsrByysqX+Kxd2OTNI1ww== - -requires-port@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" - integrity sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8= - -resize-observer-polyfill@^1.5.0, resize-observer-polyfill@^1.5.1: - version "1.5.1" - resolved "https://registry.yarnpkg.com/resize-observer-polyfill/-/resize-observer-polyfill-1.5.1.tgz#0e9020dd3d21024458d4ebd27e23e40269810464" - integrity sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg== - -resolve-cwd@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/resolve-cwd/-/resolve-cwd-2.0.0.tgz#00a9f7387556e27038eae232caa372a6a59b665a" - integrity sha1-AKn3OHVW4nA46uIyyqNypqWbZlo= - dependencies: - resolve-from "^3.0.0" - -resolve-from@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-3.0.0.tgz#b22c7af7d9d6881bc8b6e653335eebcb0a188748" - integrity sha1-six699nWiBvItuZTM17rywoYh0g= - -resolve-from@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" - integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== - -resolve-pathname@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/resolve-pathname/-/resolve-pathname-2.2.0.tgz#7e9ae21ed815fd63ab189adeee64dc831eefa879" - integrity sha512-bAFz9ld18RzJfddgrO2e/0S2O81710++chRMUxHjXOYKF6jTAMrUNZrEZ1PvV0zlhfjidm08iRPdTLPno1FuRg== - -resolve-url@^0.2.1: - version "0.2.1" - resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" - integrity sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo= - -resolve@1.1.7: - version "1.1.7" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.1.7.tgz#203114d82ad2c5ed9e8e0411b3932875e889e97b" - integrity sha1-IDEU2CrSxe2ejgQRs5ModeiJ6Xs= - -resolve@1.10.0: - version "1.10.0" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.10.0.tgz#3bdaaeaf45cc07f375656dfd2e54ed0810b101ba" - integrity sha512-3sUr9aq5OfSg2S9pNtPA9hL1FVEAjvfOC4leW0SNf/mpnaakz2a9femSd6LqAww2RaFctwyf1lCqnTHuF1rxDg== - dependencies: - path-parse "^1.0.6" - -resolve@^1.10.0, resolve@^1.3.2, resolve@^1.5.0, resolve@^1.8.1, resolve@^1.9.0: - version "1.12.0" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.12.0.tgz#3fc644a35c84a48554609ff26ec52b66fa577df6" - integrity sha512-B/dOmuoAik5bKcD6s6nXDCjzUKnaDvdkRyAk6rsmsKLipWj4797iothd7jmmUhWTfinVMU+wc56rYKsit2Qy4w== - dependencies: - path-parse "^1.0.6" - -restore-cursor@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-2.0.0.tgz#9f7ee287f82fd326d4fd162923d62129eee0dfaf" - integrity sha1-n37ih/gv0ybU/RYpI9YhKe7g368= - dependencies: - onetime "^2.0.0" - signal-exit "^3.0.2" - -ret@~0.1.10: - version "0.1.15" - resolved "https://registry.yarnpkg.com/ret/-/ret-0.1.15.tgz#b8a4825d5bdb1fc3f6f53c2bc33f81388681c7bc" - integrity sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg== - -rgb-regex@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/rgb-regex/-/rgb-regex-1.0.1.tgz#c0e0d6882df0e23be254a475e8edd41915feaeb1" - integrity sha1-wODWiC3w4jviVKR16O3UGRX+rrE= - -rgba-regex@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/rgba-regex/-/rgba-regex-1.0.0.tgz#43374e2e2ca0968b0ef1523460b7d730ff22eeb3" - integrity sha1-QzdOLiyglosO8VI0YLfXMP8i7rM= - -rimraf@2.6.3: - version "2.6.3" - resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.3.tgz#b2d104fe0d8fb27cf9e0a1cda8262dd3833c6cab" - integrity sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA== - dependencies: - glob "^7.1.3" - -rimraf@^2.2.8, rimraf@^2.5.4, rimraf@^2.6.1, rimraf@^2.6.3: - version "2.7.1" - resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.7.1.tgz#35797f13a7fdadc566142c29d4f07ccad483e3ec" - integrity sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w== - dependencies: - glob "^7.1.3" - -ripemd160@^2.0.0, ripemd160@^2.0.1: - version "2.0.2" - resolved "https://registry.yarnpkg.com/ripemd160/-/ripemd160-2.0.2.tgz#a1c1a6f624751577ba5d07914cbc92850585890c" - integrity sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA== - dependencies: - hash-base "^3.0.0" - inherits "^2.0.1" - -rmc-feedback@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/rmc-feedback/-/rmc-feedback-2.0.0.tgz#cbc6cb3ae63c7a635eef0e25e4fbaf5ac366eeaa" - integrity sha512-5PWOGOW7VXks/l3JzlOU9NIxRpuaSS8d9zA3UULUCuTKnpwBHNvv1jSJzxgbbCQeYzROWUpgKI4za3X4C/mKmQ== - dependencies: - babel-runtime "6.x" - classnames "^2.2.5" - -rsvp@^4.8.4: - version "4.8.5" - resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-4.8.5.tgz#c8f155311d167f68f21e168df71ec5b083113734" - integrity sha512-nfMOlASu9OnRJo1mbEk2cz0D56a1MBNrJ7orjRZQG10XDyuvwksKbuXNp6qa+kbn839HwjwhBzhFmdsaEAfauA== - -run-async@^2.2.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/run-async/-/run-async-2.3.0.tgz#0371ab4ae0bdd720d4166d7dfda64ff7a445a6c0" - integrity sha1-A3GrSuC91yDUFm19/aZP96RFpsA= - dependencies: - is-promise "^2.1.0" - -run-queue@^1.0.0, run-queue@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/run-queue/-/run-queue-1.0.3.tgz#e848396f057d223f24386924618e25694161ec47" - integrity sha1-6Eg5bwV9Ij8kOGkkYY4laUFh7Ec= - dependencies: - aproba "^1.1.1" - -rxjs@^6.4.0: - version "6.5.2" - resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-6.5.2.tgz#2e35ce815cd46d84d02a209fb4e5921e051dbec7" - integrity sha512-HUb7j3kvb7p7eCUHE3FqjoDsC1xfZQ4AHFWfTKSpZ+sAhhz5X1WX0ZuUqWbzB2QhSLp3DoLUG+hMdEDKqWo2Zg== - dependencies: - tslib "^1.9.0" - -safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: - version "5.1.2" - resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" - integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== - -safe-buffer@>=5.1.0, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@~5.2.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.0.tgz#b74daec49b1148f88c64b68d49b1e815c1f2f519" - integrity sha512-fZEwUGbVl7kouZs1jCdMLdt95hdIv0ZeHg6L7qPeciMZhZ+/gdesW4wgTARkrFWEpspjEATAzUGPG8N2jJiwbg== - -safe-regex@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/safe-regex/-/safe-regex-1.1.0.tgz#40a3669f3b077d1e943d44629e157dd48023bf2e" - integrity sha1-QKNmnzsHfR6UPURinhV91IAjvy4= - dependencies: - ret "~0.1.10" - -"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: - version "2.1.2" - resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" - integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== - -sane@^4.0.3: - version "4.1.0" - resolved "https://registry.yarnpkg.com/sane/-/sane-4.1.0.tgz#ed881fd922733a6c461bc189dc2b6c006f3ffded" - integrity sha512-hhbzAgTIX8O7SHfp2c8/kREfEn4qO/9q8C9beyY6+tvZ87EpoZ3i1RIEvp27YBswnNbY9mWd6paKVmKbAgLfZA== - dependencies: - "@cnakazawa/watch" "^1.0.3" - anymatch "^2.0.0" - capture-exit "^2.0.0" - exec-sh "^0.3.2" - execa "^1.0.0" - fb-watchman "^2.0.0" - micromatch "^3.1.4" - minimist "^1.1.1" - walker "~1.0.5" - -sass-loader@7.1.0: - version "7.1.0" - resolved "https://registry.yarnpkg.com/sass-loader/-/sass-loader-7.1.0.tgz#16fd5138cb8b424bf8a759528a1972d72aad069d" - integrity sha512-+G+BKGglmZM2GUSfT9TLuEp6tzehHPjAMoRRItOojWIqIGPloVCMhNIQuG639eJ+y033PaGTSjLaTHts8Kw79w== - dependencies: - clone-deep "^2.0.1" - loader-utils "^1.0.1" - lodash.tail "^4.1.1" - neo-async "^2.5.0" - pify "^3.0.0" - semver "^5.5.0" - -sax@^1.2.4, sax@~1.2.4: - version "1.2.4" - resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9" - integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw== - -saxes@^3.1.9: - version "3.1.11" - resolved "https://registry.yarnpkg.com/saxes/-/saxes-3.1.11.tgz#d59d1fd332ec92ad98a2e0b2ee644702384b1c5b" - integrity sha512-Ydydq3zC+WYDJK1+gRxRapLIED9PWeSuuS41wqyoRmzvhhh9nc+QQrVMKJYzJFULazeGhzSV0QleN2wD3boh2g== - dependencies: - xmlchars "^2.1.1" - -scheduler@^0.15.0: - version "0.15.0" - resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.15.0.tgz#6bfcf80ff850b280fed4aeecc6513bc0b4f17f8e" - integrity sha512-xAefmSfN6jqAa7Kuq7LIJY0bwAPG3xlCj0HMEBQk1lxYiDKZscY2xJ5U/61ZTrYbmNQbXa+gc7czPkVo11tnCg== - dependencies: - loose-envify "^1.1.0" - object-assign "^4.1.1" - -schema-utils@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-1.0.0.tgz#0b79a93204d7b600d4b2850d1f66c2a34951c770" - integrity sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g== - dependencies: - ajv "^6.1.0" - ajv-errors "^1.0.0" - ajv-keywords "^3.1.0" - -select-hose@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/select-hose/-/select-hose-2.0.0.tgz#625d8658f865af43ec962bfc376a37359a4994ca" - integrity sha1-Yl2GWPhlr0Psliv8N2o3NZpJlMo= - -selfsigned@^1.9.1: - version "1.10.4" - resolved "https://registry.yarnpkg.com/selfsigned/-/selfsigned-1.10.4.tgz#cdd7eccfca4ed7635d47a08bf2d5d3074092e2cd" - integrity sha512-9AukTiDmHXGXWtWjembZ5NDmVvP2695EtpgbCsxCa68w3c88B+alqbmZ4O3hZ4VWGXeGWzEVdvqgAJD8DQPCDw== - dependencies: - node-forge "0.7.5" - -"semver@2 || 3 || 4 || 5", semver@^5.3.0, semver@^5.4.1, semver@^5.5.0, semver@^5.5.1, semver@^5.6.0: - version "5.7.1" - resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7" - integrity sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ== - -semver@5.5.0: - version "5.5.0" - resolved "https://registry.yarnpkg.com/semver/-/semver-5.5.0.tgz#dc4bbc7a6ca9d916dee5d43516f0092b58f7b8ab" - integrity sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA== - -semver@6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/semver/-/semver-6.0.0.tgz#05e359ee571e5ad7ed641a6eec1e547ba52dea65" - integrity sha512-0UewU+9rFapKFnlbirLi3byoOuhrSsli/z/ihNnvM24vgF+8sNBiI1LZPBSH9wJKUwaUbw+s3hToDLCXkrghrQ== - -semver@^6.0.0, semver@^6.2.0, semver@^6.3.0: - version "6.3.0" - resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" - integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== - -send@0.17.1: - version "0.17.1" - resolved "https://registry.yarnpkg.com/send/-/send-0.17.1.tgz#c1d8b059f7900f7466dd4938bdc44e11ddb376c8" - integrity sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg== - dependencies: - debug "2.6.9" - depd "~1.1.2" - destroy "~1.0.4" - encodeurl "~1.0.2" - escape-html "~1.0.3" - etag "~1.8.1" - fresh "0.5.2" - http-errors "~1.7.2" - mime "1.6.0" - ms "2.1.1" - on-finished "~2.3.0" - range-parser "~1.2.1" - statuses "~1.5.0" - -serialize-javascript@^1.4.0, serialize-javascript@^1.7.0: - version "1.8.0" - resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-1.8.0.tgz#9515fc687232e2321aea1ca7a529476eb34bb480" - integrity sha512-3tHgtF4OzDmeKYj6V9nSyceRS0UJ3C7VqyD2Yj28vC/z2j6jG5FmFGahOKMD9CrglxTm3tETr87jEypaYV8DUg== - -serve-index@^1.7.2: - version "1.9.1" - resolved "https://registry.yarnpkg.com/serve-index/-/serve-index-1.9.1.tgz#d3768d69b1e7d82e5ce050fff5b453bea12a9239" - integrity sha1-03aNabHn2C5c4FD/9bRTvqEqkjk= - dependencies: - accepts "~1.3.4" - batch "0.6.1" - debug "2.6.9" - escape-html "~1.0.3" - http-errors "~1.6.2" - mime-types "~2.1.17" - parseurl "~1.3.2" - -serve-static@1.14.1: - version "1.14.1" - resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.14.1.tgz#666e636dc4f010f7ef29970a88a674320898b2f9" - integrity sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg== - dependencies: - encodeurl "~1.0.2" - escape-html "~1.0.3" - parseurl "~1.3.3" - send "0.17.1" - -set-blocking@^2.0.0, set-blocking@~2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" - integrity sha1-BF+XgtARrppoA93TgrJDkrPYkPc= - -set-value@^2.0.0, set-value@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/set-value/-/set-value-2.0.1.tgz#a18d40530e6f07de4228c7defe4227af8cad005b" - integrity sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw== - dependencies: - extend-shallow "^2.0.1" - is-extendable "^0.1.1" - is-plain-object "^2.0.3" - split-string "^3.0.1" - -setimmediate@^1.0.4, setimmediate@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/setimmediate/-/setimmediate-1.0.5.tgz#290cbb232e306942d7d7ea9b83732ab7856f8285" - integrity sha1-KQy7Iy4waULX1+qbg3Mqt4VvgoU= - -setprototypeof@1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.0.tgz#d0bd85536887b6fe7c0d818cb962d9d91c54e656" - integrity sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ== - -setprototypeof@1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.1.tgz#7e95acb24aa92f5885e0abef5ba131330d4ae683" - integrity sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw== - -sha.js@^2.4.0, sha.js@^2.4.8: - version "2.4.11" - resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.11.tgz#37a5cf0b81ecbc6943de109ba2960d1b26584ae7" - integrity sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ== - dependencies: - inherits "^2.0.1" - safe-buffer "^5.0.1" - -shallow-clone@^0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/shallow-clone/-/shallow-clone-0.1.2.tgz#5909e874ba77106d73ac414cfec1ffca87d97060" - integrity sha1-WQnodLp3EG1zrEFM/sH/yofZcGA= - dependencies: - is-extendable "^0.1.1" - kind-of "^2.0.1" - lazy-cache "^0.2.3" - mixin-object "^2.0.1" - -shallow-clone@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/shallow-clone/-/shallow-clone-1.0.0.tgz#4480cd06e882ef68b2ad88a3ea54832e2c48b571" - integrity sha512-oeXreoKR/SyNJtRJMAKPDSvd28OqEwG4eR/xc856cRGBII7gX9lvAqDxusPm0846z/w/hWYjI1NpKwJ00NHzRA== - dependencies: - is-extendable "^0.1.1" - kind-of "^5.0.0" - mixin-object "^2.0.1" - -shallow-equal@^1.0.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/shallow-equal/-/shallow-equal-1.2.0.tgz#fd828d2029ff4e19569db7e19e535e94e2d1f5cc" - integrity sha512-Z21pVxR4cXsfwpMKMhCEIO1PCi5sp7KEp+CmOpBQ+E8GpHwKOw2sEzk7sgblM3d/j4z4gakoWEoPcjK0VJQogA== - -shallowequal@^0.2.2: - version "0.2.2" - resolved "https://registry.yarnpkg.com/shallowequal/-/shallowequal-0.2.2.tgz#1e32fd5bcab6ad688a4812cb0cc04efc75c7014e" - integrity sha1-HjL9W8q2rWiKSBLLDMBO/HXHAU4= - dependencies: - lodash.keys "^3.1.2" - -shallowequal@^1.0.1, shallowequal@^1.0.2, shallowequal@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/shallowequal/-/shallowequal-1.1.0.tgz#188d521de95b9087404fd4dcb68b13df0ae4e7f8" - integrity sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ== - -shebang-command@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" - integrity sha1-RKrGW2lbAzmJaMOfNj/uXer98eo= - dependencies: - shebang-regex "^1.0.0" - -shebang-regex@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3" - integrity sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM= - -shell-quote@1.6.1: - version "1.6.1" - resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.6.1.tgz#f4781949cce402697127430ea3b3c5476f481767" - integrity sha1-9HgZSczkAmlxJ0MOo7PFR29IF2c= - dependencies: - array-filter "~0.0.0" - array-map "~0.0.0" - array-reduce "~0.0.0" - jsonify "~0.0.0" - -shellwords@^0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/shellwords/-/shellwords-0.1.1.tgz#d6b9181c1a48d397324c84871efbcfc73fc0654b" - integrity sha512-vFwSUfQvqybiICwZY5+DAWIPLKsWO31Q91JSKl3UYv+K5c2QRPzn0qzec6QPu1Qc9eHYItiP3NdJqNVqetYAww== - -signal-exit@^3.0.0, signal-exit@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.2.tgz#b5fdc08f1287ea1178628e415e25132b73646c6d" - integrity sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0= - -simple-swizzle@^0.2.2: - version "0.2.2" - resolved "https://registry.yarnpkg.com/simple-swizzle/-/simple-swizzle-0.2.2.tgz#a4da6b635ffcccca33f70d17cb92592de95e557a" - integrity sha1-pNprY1/8zMoz9w0Xy5JZLeleVXo= - dependencies: - is-arrayish "^0.3.1" - -sisteransi@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.3.tgz#98168d62b79e3a5e758e27ae63c4a053d748f4eb" - integrity sha512-SbEG75TzH8G7eVXFSN5f9EExILKfly7SUvVY5DhhYLvfhKqhDFY0OzevWa/zwak0RLRfWS5AvfMWpd9gJvr5Yg== - -slash@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55" - integrity sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU= - -slash@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/slash/-/slash-2.0.0.tgz#de552851a1759df3a8f206535442f5ec4ddeab44" - integrity sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A== - -slice-ansi@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-2.1.0.tgz#cacd7693461a637a5788d92a7dd4fba068e81636" - integrity sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ== - dependencies: - ansi-styles "^3.2.0" - astral-regex "^1.0.0" - is-fullwidth-code-point "^2.0.0" - -snapdragon-node@^2.0.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/snapdragon-node/-/snapdragon-node-2.1.1.tgz#6c175f86ff14bdb0724563e8f3c1b021a286853b" - integrity sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw== - dependencies: - define-property "^1.0.0" - isobject "^3.0.0" - snapdragon-util "^3.0.1" - -snapdragon-util@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/snapdragon-util/-/snapdragon-util-3.0.1.tgz#f956479486f2acd79700693f6f7b805e45ab56e2" - integrity sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ== - dependencies: - kind-of "^3.2.0" - -snapdragon@^0.8.1: - version "0.8.2" - resolved "https://registry.yarnpkg.com/snapdragon/-/snapdragon-0.8.2.tgz#64922e7c565b0e14204ba1aa7d6964278d25182d" - integrity sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg== - dependencies: - base "^0.11.1" - debug "^2.2.0" - define-property "^0.2.5" - extend-shallow "^2.0.1" - map-cache "^0.2.2" - source-map "^0.5.6" - source-map-resolve "^0.5.0" - use "^3.1.0" - -sockjs-client@1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/sockjs-client/-/sockjs-client-1.3.0.tgz#12fc9d6cb663da5739d3dc5fb6e8687da95cb177" - integrity sha512-R9jxEzhnnrdxLCNln0xg5uGHqMnkhPSTzUZH2eXcR03S/On9Yvoq2wyUZILRUhZCNVu2PmwWVoyuiPz8th8zbg== - dependencies: - debug "^3.2.5" - eventsource "^1.0.7" - faye-websocket "~0.11.1" - inherits "^2.0.3" - json3 "^3.3.2" - url-parse "^1.4.3" - -sockjs@0.3.19: - version "0.3.19" - resolved "https://registry.yarnpkg.com/sockjs/-/sockjs-0.3.19.tgz#d976bbe800af7bd20ae08598d582393508993c0d" - integrity sha512-V48klKZl8T6MzatbLlzzRNhMepEys9Y4oGFpypBFFn1gLI/QQ9HtLLyWJNbPlwGLelOVOEijUbTTJeLLI59jLw== - dependencies: - faye-websocket "^0.10.0" - uuid "^3.0.1" - -source-list-map@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/source-list-map/-/source-list-map-2.0.1.tgz#3993bd873bfc48479cca9ea3a547835c7c154b34" - integrity sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw== - -source-map-resolve@^0.5.0: - version "0.5.2" - resolved "https://registry.yarnpkg.com/source-map-resolve/-/source-map-resolve-0.5.2.tgz#72e2cc34095543e43b2c62b2c4c10d4a9054f259" - integrity sha512-MjqsvNwyz1s0k81Goz/9vRBe9SZdB09Bdw+/zYyO+3CuPk6fouTaxscHkgtE8jKvf01kVfl8riHzERQ/kefaSA== - dependencies: - atob "^2.1.1" - decode-uri-component "^0.2.0" - resolve-url "^0.2.1" - source-map-url "^0.4.0" - urix "^0.1.0" - -source-map-support@^0.5.6, source-map-support@~0.5.10, source-map-support@~0.5.12: - version "0.5.13" - resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.13.tgz#31b24a9c2e73c2de85066c0feb7d44767ed52932" - integrity sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w== - dependencies: - buffer-from "^1.0.0" - source-map "^0.6.0" - -source-map-url@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/source-map-url/-/source-map-url-0.4.0.tgz#3e935d7ddd73631b97659956d55128e87b5084a3" - integrity sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM= - -source-map@^0.5.0, source-map@^0.5.3, source-map@^0.5.6: - version "0.5.7" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" - integrity sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w= - -source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0, source-map@~0.6.1: - version "0.6.1" - resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" - integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== - -spdx-correct@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.1.0.tgz#fb83e504445268f154b074e218c87c003cd31df4" - integrity sha512-lr2EZCctC2BNR7j7WzJ2FpDznxky1sjfxvvYEyzxNyb6lZXHODmEoJeFu4JupYlkfha1KZpJyoqiJ7pgA1qq8Q== - dependencies: - spdx-expression-parse "^3.0.0" - spdx-license-ids "^3.0.0" - -spdx-exceptions@^2.1.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.2.0.tgz#2ea450aee74f2a89bfb94519c07fcd6f41322977" - integrity sha512-2XQACfElKi9SlVb1CYadKDXvoajPgBVPn/gOQLrTvHdElaVhr7ZEbqJaRnJLVNeaI4cMEAgVCeBMKF6MWRDCRA== - -spdx-expression-parse@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz#99e119b7a5da00e05491c9fa338b7904823b41d0" - integrity sha512-Yg6D3XpRD4kkOmTpdgbUiEJFKghJH03fiC1OPll5h/0sO6neh2jqRDVHOQ4o/LMea0tgCkbMgea5ip/e+MkWyg== - dependencies: - spdx-exceptions "^2.1.0" - spdx-license-ids "^3.0.0" - -spdx-license-ids@^3.0.0: - version "3.0.5" - resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz#3694b5804567a458d3c8045842a6358632f62654" - integrity sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q== - -spdy-transport@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/spdy-transport/-/spdy-transport-3.0.0.tgz#00d4863a6400ad75df93361a1608605e5dcdcf31" - integrity sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw== - dependencies: - debug "^4.1.0" - detect-node "^2.0.4" - hpack.js "^2.1.6" - obuf "^1.1.2" - readable-stream "^3.0.6" - wbuf "^1.7.3" - -spdy@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/spdy/-/spdy-4.0.1.tgz#6f12ed1c5db7ea4f24ebb8b89ba58c87c08257f2" - integrity sha512-HeZS3PBdMA+sZSu0qwpCxl3DeALD5ASx8pAX0jZdKXSpPWbQ6SYGnlg3BBmYLx5LtiZrmkAZfErCm2oECBcioA== - dependencies: - debug "^4.1.0" - handle-thing "^2.0.0" - http-deceiver "^1.2.7" - select-hose "^2.0.0" - spdy-transport "^3.0.0" - -split-string@^3.0.1, split-string@^3.0.2: - version "3.1.0" - resolved "https://registry.yarnpkg.com/split-string/-/split-string-3.1.0.tgz#7cb09dda3a86585705c64b39a6466038682e8fe2" - integrity sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw== - dependencies: - extend-shallow "^3.0.0" - -sprintf-js@~1.0.2: - version "1.0.3" - resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" - integrity sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw= - -sshpk@^1.7.0: - version "1.16.1" - resolved "https://registry.yarnpkg.com/sshpk/-/sshpk-1.16.1.tgz#fb661c0bef29b39db40769ee39fa70093d6f6877" - integrity sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg== - dependencies: - asn1 "~0.2.3" - assert-plus "^1.0.0" - bcrypt-pbkdf "^1.0.0" - dashdash "^1.12.0" - ecc-jsbn "~0.1.1" - getpass "^0.1.1" - jsbn "~0.1.0" - safer-buffer "^2.0.2" - tweetnacl "~0.14.0" - -ssri@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/ssri/-/ssri-6.0.1.tgz#2a3c41b28dd45b62b63676ecb74001265ae9edd8" - integrity sha512-3Wge10hNcT1Kur4PDFwEieXSCMCJs/7WvSACcrMYrNp+b8kDL1/0wJch5Ni2WrtwEa2IO8OsVfeKIciKCDx/QA== - dependencies: - figgy-pudding "^3.5.1" - -stable@^0.1.8: - version "0.1.8" - resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.8.tgz#836eb3c8382fe2936feaf544631017ce7d47a3cf" - integrity sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w== - -stack-utils@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/stack-utils/-/stack-utils-1.0.2.tgz#33eba3897788558bebfc2db059dc158ec36cebb8" - integrity sha512-MTX+MeG5U994cazkjd/9KNAapsHnibjMLnfXodlkXw76JEea0UiNzrqidzo1emMwk7w5Qhc9jd4Bn9TBb1MFwA== - -static-extend@^0.1.1: - version "0.1.2" - resolved "https://registry.yarnpkg.com/static-extend/-/static-extend-0.1.2.tgz#60809c39cbff55337226fd5e0b520f341f1fb5c6" - integrity sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY= - dependencies: - define-property "^0.2.5" - object-copy "^0.1.0" - -"statuses@>= 1.4.0 < 2", "statuses@>= 1.5.0 < 2", statuses@~1.5.0: - version "1.5.0" - resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c" - integrity sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow= - -stealthy-require@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/stealthy-require/-/stealthy-require-1.1.1.tgz#35b09875b4ff49f26a777e509b3090a3226bf24b" - integrity sha1-NbCYdbT/SfJqd35QmzCQoyJr8ks= - -stream-browserify@^2.0.1: - version "2.0.2" - resolved "https://registry.yarnpkg.com/stream-browserify/-/stream-browserify-2.0.2.tgz#87521d38a44aa7ee91ce1cd2a47df0cb49dd660b" - integrity sha512-nX6hmklHs/gr2FuxYDltq8fJA1GDlxKQCz8O/IM4atRqBH8OORmBNgfvW5gG10GT/qQ9u0CzIvr2X5Pkt6ntqg== - dependencies: - inherits "~2.0.1" - readable-stream "^2.0.2" - -stream-each@^1.1.0: - version "1.2.3" - resolved "https://registry.yarnpkg.com/stream-each/-/stream-each-1.2.3.tgz#ebe27a0c389b04fbcc233642952e10731afa9bae" - integrity sha512-vlMC2f8I2u/bZGqkdfLQW/13Zihpej/7PmSiMQsbYddxuTsJp8vRe2x2FvVExZg7FaOds43ROAuFJwPR4MTZLw== - dependencies: - end-of-stream "^1.1.0" - stream-shift "^1.0.0" - -stream-http@^2.7.2: - version "2.8.3" - resolved "https://registry.yarnpkg.com/stream-http/-/stream-http-2.8.3.tgz#b2d242469288a5a27ec4fe8933acf623de6514fc" - integrity sha512-+TSkfINHDo4J+ZobQLWiMouQYB+UVYFttRA94FpEzzJ7ZdqcL4uUUQ7WkdkI4DSozGmgBUE/a47L+38PenXhUw== - dependencies: - builtin-status-codes "^3.0.0" - inherits "^2.0.1" - readable-stream "^2.3.6" - to-arraybuffer "^1.0.0" - xtend "^4.0.0" - -stream-shift@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/stream-shift/-/stream-shift-1.0.0.tgz#d5c752825e5367e786f78e18e445ea223a155952" - integrity sha1-1cdSgl5TZ+eG944Y5EXqIjoVWVI= - -string-convert@^0.2.0: - version "0.2.1" - resolved "https://registry.yarnpkg.com/string-convert/-/string-convert-0.2.1.tgz#6982cc3049fbb4cd85f8b24568b9d9bf39eeff97" - integrity sha1-aYLMMEn7tM2F+LJFaLnZvznu/5c= - -string-length@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/string-length/-/string-length-2.0.0.tgz#d40dbb686a3ace960c1cffca562bf2c45f8363ed" - integrity sha1-1A27aGo6zpYMHP/KVivyxF+DY+0= - dependencies: - astral-regex "^1.0.0" - strip-ansi "^4.0.0" - -string-width@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" - integrity sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M= - dependencies: - code-point-at "^1.0.0" - is-fullwidth-code-point "^1.0.0" - strip-ansi "^3.0.0" - -"string-width@^1.0.2 || 2", string-width@^2.0.0, string-width@^2.1.0, string-width@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" - integrity sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw== - dependencies: - is-fullwidth-code-point "^2.0.0" - strip-ansi "^4.0.0" - -string-width@^3.0.0, string-width@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-3.1.0.tgz#22767be21b62af1081574306f69ac51b62203961" - integrity sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w== - dependencies: - emoji-regex "^7.0.1" - is-fullwidth-code-point "^2.0.0" - strip-ansi "^5.1.0" - -string_decoder@^1.0.0, string_decoder@^1.1.1: - version "1.3.0" - resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" - integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== - dependencies: - safe-buffer "~5.2.0" - -string_decoder@~1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" - integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== - dependencies: - safe-buffer "~5.1.0" - -stringify-object@^3.3.0: - version "3.3.0" - resolved "https://registry.yarnpkg.com/stringify-object/-/stringify-object-3.3.0.tgz#703065aefca19300d3ce88af4f5b3956d7556629" - integrity sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw== - dependencies: - get-own-enumerable-property-symbols "^3.0.0" - is-obj "^1.0.1" - is-regexp "^1.0.0" - -strip-ansi@5.2.0, strip-ansi@^5.0.0, strip-ansi@^5.1.0, strip-ansi@^5.2.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-5.2.0.tgz#8c9a536feb6afc962bdfa5b104a5091c1ad9c0ae" - integrity sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA== - dependencies: - ansi-regex "^4.1.0" - -strip-ansi@^3.0.0, strip-ansi@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" - integrity sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8= - dependencies: - ansi-regex "^2.0.0" - -strip-ansi@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" - integrity sha1-qEeQIusaw2iocTibY1JixQXuNo8= - dependencies: - ansi-regex "^3.0.0" - -strip-bom@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-3.0.0.tgz#2334c18e9c759f7bdd56fdef7e9ae3d588e68ed3" - integrity sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM= - -strip-comments@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/strip-comments/-/strip-comments-1.0.2.tgz#82b9c45e7f05873bee53f37168af930aa368679d" - integrity sha512-kL97alc47hoyIQSV165tTt9rG5dn4w1dNnBhOQ3bOU1Nc1hel09jnXANaHJ7vzHLd4Ju8kseDGzlev96pghLFw== - dependencies: - babel-extract-comments "^1.0.0" - babel-plugin-transform-object-rest-spread "^6.26.0" - -strip-eof@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/strip-eof/-/strip-eof-1.0.0.tgz#bb43ff5598a6eb05d89b59fcd129c983313606bf" - integrity sha1-u0P/VZim6wXYm1n80SnJgzE2Br8= - -strip-json-comments@^2.0.1, strip-json-comments@~2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" - integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo= - -style-loader@0.23.1: - version "0.23.1" - resolved "https://registry.yarnpkg.com/style-loader/-/style-loader-0.23.1.tgz#cb9154606f3e771ab6c4ab637026a1049174d925" - integrity sha512-XK+uv9kWwhZMZ1y7mysB+zoihsEj4wneFWAS5qoiLwzW0WzSqMrrsIy+a3zkQJq0ipFtBpX5W3MqyRIBF/WFGg== - dependencies: - loader-utils "^1.1.0" - schema-utils "^1.0.0" - -stylehacks@^4.0.0: - version "4.0.3" - resolved "https://registry.yarnpkg.com/stylehacks/-/stylehacks-4.0.3.tgz#6718fcaf4d1e07d8a1318690881e8d96726a71d5" - integrity sha512-7GlLk9JwlElY4Y6a/rmbH2MhVlTyVmiJd1PfTCqFaIBEGMYNsrO/v3SeGTdhBThLg4Z+NbOk/qFMwCa+J+3p/g== - dependencies: - browserslist "^4.0.0" - postcss "^7.0.0" - postcss-selector-parser "^3.0.0" - -supports-color@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" - integrity sha1-U10EXOa2Nj+kARcIRimZXp3zJMc= - -supports-color@^5.3.0: - version "5.5.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" - integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== - dependencies: - has-flag "^3.0.0" - -supports-color@^6.1.0: - version "6.1.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-6.1.0.tgz#0764abc69c63d5ac842dd4867e8d025e880df8f3" - integrity sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ== - dependencies: - has-flag "^3.0.0" - -svg-parser@^2.0.0: - version "2.0.2" - resolved "https://registry.yarnpkg.com/svg-parser/-/svg-parser-2.0.2.tgz#d134cc396fa2681dc64f518330784e98bd801ec8" - integrity sha512-1gtApepKFweigFZj3sGO8KT8LvVZK8io146EzXrpVuWCDAbISz/yMucco3hWTkpZNoPabM+dnMOpy6Swue68Zg== - -svgo@^1.0.0, svgo@^1.2.2: - version "1.3.0" - resolved "https://registry.yarnpkg.com/svgo/-/svgo-1.3.0.tgz#bae51ba95ded9a33a36b7c46ce9c359ae9154313" - integrity sha512-MLfUA6O+qauLDbym+mMZgtXCGRfIxyQoeH6IKVcFslyODEe/ElJNwr0FohQ3xG4C6HK6bk3KYPPXwHVJk3V5NQ== - dependencies: - chalk "^2.4.1" - coa "^2.0.2" - css-select "^2.0.0" - css-select-base-adapter "^0.1.1" - css-tree "1.0.0-alpha.33" - csso "^3.5.1" - js-yaml "^3.13.1" - mkdirp "~0.5.1" - object.values "^1.1.0" - sax "~1.2.4" - stable "^0.1.8" - unquote "~1.1.1" - util.promisify "~1.0.0" - -symbol-tree@^3.2.2: - version "3.2.4" - resolved "https://registry.yarnpkg.com/symbol-tree/-/symbol-tree-3.2.4.tgz#430637d248ba77e078883951fb9aa0eed7c63fa2" - integrity sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw== - -table@^5.2.3: - version "5.4.6" - resolved "https://registry.yarnpkg.com/table/-/table-5.4.6.tgz#1292d19500ce3f86053b05f0e8e7e4a3bb21079e" - integrity sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug== - dependencies: - ajv "^6.10.2" - lodash "^4.17.14" - slice-ansi "^2.1.0" - string-width "^3.0.0" - -tapable@^1.0.0, tapable@^1.1.0: - version "1.1.3" - resolved "https://registry.yarnpkg.com/tapable/-/tapable-1.1.3.tgz#a1fccc06b58db61fd7a45da2da44f5f3a3e67ba2" - integrity sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA== - -tar@^4: - version "4.4.10" - resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.10.tgz#946b2810b9a5e0b26140cf78bea6b0b0d689eba1" - integrity sha512-g2SVs5QIxvo6OLp0GudTqEf05maawKUxXru104iaayWA09551tFCTI8f1Asb4lPfkBr91k07iL4c11XO3/b0tA== - dependencies: - chownr "^1.1.1" - fs-minipass "^1.2.5" - minipass "^2.3.5" - minizlib "^1.2.1" - mkdirp "^0.5.0" - safe-buffer "^5.1.2" - yallist "^3.0.3" - -terser-webpack-plugin@1.2.3: - version "1.2.3" - resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-1.2.3.tgz#3f98bc902fac3e5d0de730869f50668561262ec8" - integrity sha512-GOK7q85oAb/5kE12fMuLdn2btOS9OBZn4VsecpHDywoUC/jLhSAKOiYo0ezx7ss2EXPMzyEWFoE0s1WLE+4+oA== - dependencies: - cacache "^11.0.2" - find-cache-dir "^2.0.0" - schema-utils "^1.0.0" - serialize-javascript "^1.4.0" - source-map "^0.6.1" - terser "^3.16.1" - webpack-sources "^1.1.0" - worker-farm "^1.5.2" - -terser-webpack-plugin@^1.1.0: - version "1.4.1" - resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-1.4.1.tgz#61b18e40eaee5be97e771cdbb10ed1280888c2b4" - integrity sha512-ZXmmfiwtCLfz8WKZyYUuuHf3dMYEjg8NrjHMb0JqHVHVOSkzp3cW2/XG1fP3tRhqEqSzMwzzRQGtAPbs4Cncxg== - dependencies: - cacache "^12.0.2" - find-cache-dir "^2.1.0" - is-wsl "^1.1.0" - schema-utils "^1.0.0" - serialize-javascript "^1.7.0" - source-map "^0.6.1" - terser "^4.1.2" - webpack-sources "^1.4.0" - worker-farm "^1.7.0" - -terser@^3.16.1: - version "3.17.0" - resolved "https://registry.yarnpkg.com/terser/-/terser-3.17.0.tgz#f88ffbeda0deb5637f9d24b0da66f4e15ab10cb2" - integrity sha512-/FQzzPJmCpjAH9Xvk2paiWrFq+5M6aVOf+2KRbwhByISDX/EujxsK+BAvrhb6H+2rtrLCHK9N01wO014vrIwVQ== - dependencies: - commander "^2.19.0" - source-map "~0.6.1" - source-map-support "~0.5.10" - -terser@^4.1.2: - version "4.2.1" - resolved "https://registry.yarnpkg.com/terser/-/terser-4.2.1.tgz#1052cfe17576c66e7bc70fcc7119f22b155bdac1" - integrity sha512-cGbc5utAcX4a9+2GGVX4DsenG6v0x3glnDi5hx8816X1McEAwPlPgRtXPJzSBsbpILxZ8MQMT0KvArLuE0HP5A== - dependencies: - commander "^2.20.0" - source-map "~0.6.1" - source-map-support "~0.5.12" - -test-exclude@^5.2.3: - version "5.2.3" - resolved "https://registry.yarnpkg.com/test-exclude/-/test-exclude-5.2.3.tgz#c3d3e1e311eb7ee405e092dac10aefd09091eac0" - integrity sha512-M+oxtseCFO3EDtAaGH7iiej3CBkzXqFMbzqYAACdzKui4eZA+pq3tZEwChvOdNfa7xxy8BfbmgJSIr43cC/+2g== - dependencies: - glob "^7.1.3" - minimatch "^3.0.4" - read-pkg-up "^4.0.0" - require-main-filename "^2.0.0" - -text-table@0.2.0, text-table@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" - integrity sha1-f17oI66AUgfACvLfSoTsP8+lcLQ= - -throat@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/throat/-/throat-4.1.0.tgz#89037cbc92c56ab18926e6ba4cbb200e15672a6a" - integrity sha1-iQN8vJLFarGJJua6TLsgDhVnKmo= - -through2@^2.0.0: - version "2.0.5" - resolved "https://registry.yarnpkg.com/through2/-/through2-2.0.5.tgz#01c1e39eb31d07cb7d03a96a70823260b23132cd" - integrity sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ== - dependencies: - readable-stream "~2.3.6" - xtend "~4.0.1" - -through@^2.3.6: - version "2.3.8" - resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" - integrity sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU= - -thunky@^1.0.2: - version "1.0.3" - resolved "https://registry.yarnpkg.com/thunky/-/thunky-1.0.3.tgz#f5df732453407b09191dae73e2a8cc73f381a826" - integrity sha512-YwT8pjmNcAXBZqrubu22P4FYsh2D4dxRmnWBOL8Jk8bUcRUtc5326kx32tuTmFDAZtLOGEVNl8POAR8j896Iow== - -timers-browserify@^2.0.4: - version "2.0.11" - resolved "https://registry.yarnpkg.com/timers-browserify/-/timers-browserify-2.0.11.tgz#800b1f3eee272e5bc53ee465a04d0e804c31211f" - integrity sha512-60aV6sgJ5YEbzUdn9c8kYGIqOubPoUdqQCul3SBAsRCZ40s6Y5cMcrW4dt3/k/EsbLVJNl9n6Vz3fTc+k2GeKQ== - dependencies: - setimmediate "^1.0.4" - -timsort@^0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/timsort/-/timsort-0.3.0.tgz#405411a8e7e6339fe64db9a234de11dc31e02bd4" - integrity sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q= - -tiny-invariant@^1.0.2: - version "1.0.6" - resolved "https://registry.yarnpkg.com/tiny-invariant/-/tiny-invariant-1.0.6.tgz#b3f9b38835e36a41c843a3b0907a5a7b3755de73" - integrity sha512-FOyLWWVjG+aC0UqG76V53yAWdXfH8bO6FNmyZOuUrzDzK8DI3/JRY25UD7+g49JWM1LXwymsKERB+DzI0dTEQA== - -tiny-warning@^1.0.0, tiny-warning@^1.0.2: - version "1.0.3" - resolved "https://registry.yarnpkg.com/tiny-warning/-/tiny-warning-1.0.3.tgz#94a30db453df4c643d0fd566060d60a875d84754" - integrity sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA== - -tinycolor2@^1.4.1: - version "1.4.1" - resolved "https://registry.yarnpkg.com/tinycolor2/-/tinycolor2-1.4.1.tgz#f4fad333447bc0b07d4dc8e9209d8f39a8ac77e8" - integrity sha1-9PrTM0R7wLB9TcjpIJ2POaisd+g= - -tmp@^0.0.33: - version "0.0.33" - resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" - integrity sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw== - dependencies: - os-tmpdir "~1.0.2" - -tmpl@1.0.x: - version "1.0.4" - resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.4.tgz#23640dd7b42d00433911140820e5cf440e521dd1" - integrity sha1-I2QN17QtAEM5ERQIIOXPRA5SHdE= - -to-arraybuffer@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz#7d229b1fcc637e466ca081180836a7aabff83f43" - integrity sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M= - -to-fast-properties@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" - integrity sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4= - -to-object-path@^0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/to-object-path/-/to-object-path-0.3.0.tgz#297588b7b0e7e0ac08e04e672f85c1f4999e17af" - integrity sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68= - dependencies: - kind-of "^3.0.2" - -to-regex-range@^2.1.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-2.1.1.tgz#7c80c17b9dfebe599e27367e0d4dd5590141db38" - integrity sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg= - dependencies: - is-number "^3.0.0" - repeat-string "^1.6.1" - -to-regex@^3.0.1, to-regex@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/to-regex/-/to-regex-3.0.2.tgz#13cfdd9b336552f30b51f33a8ae1b42a7a7599ce" - integrity sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw== - dependencies: - define-property "^2.0.2" - extend-shallow "^3.0.2" - regex-not "^1.0.2" - safe-regex "^1.1.0" - -toggle-selection@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/toggle-selection/-/toggle-selection-1.0.6.tgz#6e45b1263f2017fa0acc7d89d78b15b8bf77da32" - integrity sha1-bkWxJj8gF/oKzH2J14sVuL932jI= - -toidentifier@1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.0.tgz#7e1be3470f1e77948bc43d94a3c8f4d7752ba553" - integrity sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw== - -tough-cookie@^2.3.3, tough-cookie@^2.3.4, tough-cookie@^2.5.0: - version "2.5.0" - resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.5.0.tgz#cd9fb2a0aa1d5a12b473bd9fb96fa3dcff65ade2" - integrity sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g== - dependencies: - psl "^1.1.28" - punycode "^2.1.1" - -tough-cookie@~2.4.3: - version "2.4.3" - resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.4.3.tgz#53f36da3f47783b0925afa06ff9f3b165280f781" - integrity sha512-Q5srk/4vDM54WJsJio3XNn6K2sCG+CQ8G5Wz6bZhRZoAe/+TxjWB/GlFAnYEbkYVlON9FMk/fE3h2RLpPXo4lQ== - dependencies: - psl "^1.1.24" - punycode "^1.4.1" - -tr46@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/tr46/-/tr46-1.0.1.tgz#a8b13fd6bfd2489519674ccde55ba3693b706d09" - integrity sha1-qLE/1r/SSJUZZ0zN5VujaTtwbQk= - dependencies: - punycode "^2.1.0" - -trim-right@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003" - integrity sha1-yy4SAwZ+DI3h9hQJS5/kVwTqYAM= - -ts-pnp@^1.0.0: - version "1.1.4" - resolved "https://registry.yarnpkg.com/ts-pnp/-/ts-pnp-1.1.4.tgz#ae27126960ebaefb874c6d7fa4729729ab200d90" - integrity sha512-1J/vefLC+BWSo+qe8OnJQfWTYRS6ingxjwqmHMqaMxXMj7kFtKLgAaYW3JeX3mktjgUL+etlU8/B4VUAUI9QGw== - -tslib@^1.8.1, tslib@^1.9.0: - version "1.10.0" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.10.0.tgz#c3c19f95973fb0a62973fb09d90d961ee43e5c8a" - integrity sha512-qOebF53frne81cf0S9B41ByenJ3/IuH8yJKngAX35CmiZySA0khhkovshKK+jGCaMnVomla7gVlIcc3EvKPbTQ== - -tsutils@^3.7.0: - version "3.17.1" - resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-3.17.1.tgz#ed719917f11ca0dee586272b2ac49e015a2dd759" - integrity sha512-kzeQ5B8H3w60nFY2g8cJIuH7JDpsALXySGtwGJ0p2LSjLgay3NdIpqq5SoOBe46bKDW2iq25irHCr8wjomUS2g== - dependencies: - tslib "^1.8.1" - -tty-browserify@0.0.0: - version "0.0.0" - resolved "https://registry.yarnpkg.com/tty-browserify/-/tty-browserify-0.0.0.tgz#a157ba402da24e9bf957f9aa69d524eed42901a6" - integrity sha1-oVe6QC2iTpv5V/mqadUk7tQpAaY= - -tunnel-agent@^0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd" - integrity sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0= - dependencies: - safe-buffer "^5.0.1" - -tweetnacl@^0.14.3, tweetnacl@~0.14.0: - version "0.14.5" - resolved "https://registry.yarnpkg.com/tweetnacl/-/tweetnacl-0.14.5.tgz#5ae68177f192d4456269d108afa93ff8743f4f64" - integrity sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q= - -type-check@~0.3.2: - version "0.3.2" - resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" - integrity sha1-WITKtRLPHTVeP7eE8wgEsrUg23I= - dependencies: - prelude-ls "~1.1.2" - -type-is@~1.6.17, type-is@~1.6.18: - version "1.6.18" - resolved "https://registry.yarnpkg.com/type-is/-/type-is-1.6.18.tgz#4e552cd05df09467dcbc4ef739de89f2cf37c131" - integrity sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g== - dependencies: - media-typer "0.3.0" - mime-types "~2.1.24" - -typedarray@^0.0.6: - version "0.0.6" - resolved "https://registry.yarnpkg.com/typedarray/-/typedarray-0.0.6.tgz#867ac74e3864187b1d3d47d996a78ec5c8830777" - integrity sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c= - -typescript@3.4.5: - version "3.4.5" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-3.4.5.tgz#2d2618d10bb566572b8d7aad5180d84257d70a99" - integrity sha512-YycBxUb49UUhdNMU5aJ7z5Ej2XGmaIBL0x34vZ82fn3hGvD+bgrMrVDpatgz2f7YxUMJxMkbWxJZeAvDxVe7Vw== - -ua-parser-js@^0.7.18: - version "0.7.20" - resolved "https://registry.yarnpkg.com/ua-parser-js/-/ua-parser-js-0.7.20.tgz#7527178b82f6a62a0f243d1f94fd30e3e3c21098" - integrity sha512-8OaIKfzL5cpx8eCMAhhvTlft8GYF8b2eQr6JkCyVdrgjcytyOmPCXrqXFcUnhonRpLlh5yxEZVohm6mzaowUOw== - -uglify-js@3.4.x: - version "3.4.10" - resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.4.10.tgz#9ad9563d8eb3acdfb8d38597d2af1d815f6a755f" - integrity sha512-Y2VsbPVs0FIshJztycsO2SfPk7/KAF/T72qzv9u5EpQ4kB2hQoHlhNQTsNyy6ul7lQtqJN/AoWeS23OzEiEFxw== - dependencies: - commander "~2.19.0" - source-map "~0.6.1" - -uglify-js@^3.1.4: - version "3.6.0" - resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.6.0.tgz#704681345c53a8b2079fb6cec294b05ead242ff5" - integrity sha512-W+jrUHJr3DXKhrsS7NUVxn3zqMOFn0hL/Ei6v0anCIMoKC93TjcflTagwIHLW7SfMFfiQuktQyFVCFHGUE0+yg== - dependencies: - commander "~2.20.0" - source-map "~0.6.1" - -unicode-canonical-property-names-ecmascript@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz#2619800c4c825800efdd8343af7dd9933cbe2818" - integrity sha512-jDrNnXWHd4oHiTZnx/ZG7gtUTVp+gCcTTKr8L0HjlwphROEW3+Him+IpvC+xcJEFegapiMZyZe02CyuOnRmbnQ== - -unicode-match-property-ecmascript@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz#8ed2a32569961bce9227d09cd3ffbb8fed5f020c" - integrity sha512-L4Qoh15vTfntsn4P1zqnHulG0LdXgjSO035fEpdtp6YxXhMT51Q6vgM5lYdG/5X3MjS+k/Y9Xw4SFCY9IkR0rg== - dependencies: - unicode-canonical-property-names-ecmascript "^1.0.4" - unicode-property-aliases-ecmascript "^1.0.4" - -unicode-match-property-value-ecmascript@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.1.0.tgz#5b4b426e08d13a80365e0d657ac7a6c1ec46a277" - integrity sha512-hDTHvaBk3RmFzvSl0UVrUmC3PuW9wKVnpoUDYH0JDkSIovzw+J5viQmeYHxVSBptubnr7PbH2e0fnpDRQnQl5g== - -unicode-property-aliases-ecmascript@^1.0.4: - version "1.0.5" - resolved "https://registry.yarnpkg.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.0.5.tgz#a9cc6cc7ce63a0a3023fc99e341b94431d405a57" - integrity sha512-L5RAqCfXqAwR3RriF8pM0lU0w4Ryf/GgzONwi6KnL1taJQa7x1TCxdJnILX59WIGOwR57IVxn7Nej0fz1Ny6fw== - -union-value@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/union-value/-/union-value-1.0.1.tgz#0b6fe7b835aecda61c6ea4d4f02c14221e109847" - integrity sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg== - dependencies: - arr-union "^3.1.0" - get-value "^2.0.6" - is-extendable "^0.1.1" - set-value "^2.0.1" - -uniq@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/uniq/-/uniq-1.0.1.tgz#b31c5ae8254844a3a8281541ce2b04b865a734ff" - integrity sha1-sxxa6CVIRKOoKBVBzisEuGWnNP8= - -uniqs@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/uniqs/-/uniqs-2.0.0.tgz#ffede4b36b25290696e6e165d4a59edb998e6b02" - integrity sha1-/+3ks2slKQaW5uFl1KWe25mOawI= - -unique-filename@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/unique-filename/-/unique-filename-1.1.1.tgz#1d69769369ada0583103a1e6ae87681b56573230" - integrity sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ== - dependencies: - unique-slug "^2.0.0" - -unique-slug@^2.0.0: - version "2.0.2" - resolved "https://registry.yarnpkg.com/unique-slug/-/unique-slug-2.0.2.tgz#baabce91083fc64e945b0f3ad613e264f7cd4e6c" - integrity sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w== - dependencies: - imurmurhash "^0.1.4" - -universalify@^0.1.0: - version "0.1.2" - resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66" - integrity sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg== - -unpipe@1.0.0, unpipe@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" - integrity sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw= - -unquote@~1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/unquote/-/unquote-1.1.1.tgz#8fded7324ec6e88a0ff8b905e7c098cdc086d544" - integrity sha1-j97XMk7G6IoP+LkF58CYzcCG1UQ= - -unset-value@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/unset-value/-/unset-value-1.0.0.tgz#8376873f7d2335179ffb1e6fc3a8ed0dfc8ab559" - integrity sha1-g3aHP30jNRef+x5vw6jtDfyKtVk= - dependencies: - has-value "^0.3.1" - isobject "^3.0.0" - -upath@^1.1.1: - version "1.1.2" - resolved "https://registry.yarnpkg.com/upath/-/upath-1.1.2.tgz#3db658600edaeeccbe6db5e684d67ee8c2acd068" - integrity sha512-kXpym8nmDmlCBr7nKdIx8P2jNBa+pBpIUFRnKJ4dr8htyYGJFokkr2ZvERRtUN+9SY+JqXouNgUPtv6JQva/2Q== - -upper-case@^1.1.1: - version "1.1.3" - resolved "https://registry.yarnpkg.com/upper-case/-/upper-case-1.1.3.tgz#f6b4501c2ec4cdd26ba78be7222961de77621598" - integrity sha1-9rRQHC7EzdJrp4vnIilh3ndiFZg= - -uri-js@^4.2.2: - version "4.2.2" - resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.2.2.tgz#94c540e1ff772956e2299507c010aea6c8838eb0" - integrity sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ== - dependencies: - punycode "^2.1.0" - -urix@^0.1.0: - version "0.1.0" - resolved "https://registry.yarnpkg.com/urix/-/urix-0.1.0.tgz#da937f7a62e21fec1fd18d49b35c2935067a6c72" - integrity sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI= - -url-loader@1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/url-loader/-/url-loader-1.1.2.tgz#b971d191b83af693c5e3fea4064be9e1f2d7f8d8" - integrity sha512-dXHkKmw8FhPqu8asTc1puBfe3TehOCo2+RmOOev5suNCIYBcT626kxiWg1NBVkwc4rO8BGa7gP70W7VXuqHrjg== - dependencies: - loader-utils "^1.1.0" - mime "^2.0.3" - schema-utils "^1.0.0" - -url-parse@^1.4.3: - version "1.4.7" - resolved "https://registry.yarnpkg.com/url-parse/-/url-parse-1.4.7.tgz#a8a83535e8c00a316e403a5db4ac1b9b853ae278" - integrity sha512-d3uaVyzDB9tQoSXFvuSUNFibTd9zxd2bkVrDRvF5TmvWWQwqE4lgYJ5m+x1DbecWkw+LK4RNl2CU1hHuOKPVlg== - dependencies: - querystringify "^2.1.1" - requires-port "^1.0.0" - -url@^0.11.0: - version "0.11.0" - resolved "https://registry.yarnpkg.com/url/-/url-0.11.0.tgz#3838e97cfc60521eb73c525a8e55bfdd9e2e28f1" - integrity sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE= - dependencies: - punycode "1.3.2" - querystring "0.2.0" - -use@^3.1.0: - version "3.1.1" - resolved "https://registry.yarnpkg.com/use/-/use-3.1.1.tgz#d50c8cac79a19fbc20f2911f56eb973f4e10070f" - integrity sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ== - -util-deprecate@^1.0.1, util-deprecate@~1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" - integrity sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8= - -util.promisify@1.0.0, util.promisify@^1.0.0, util.promisify@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/util.promisify/-/util.promisify-1.0.0.tgz#440f7165a459c9a16dc145eb8e72f35687097030" - integrity sha512-i+6qA2MPhvoKLuxnJNpXAGhg7HphQOSUq2LKMZD0m15EiskXUkMvKdF4Uui0WYeCUGea+o2cw/ZuwehtfsrNkA== - dependencies: - define-properties "^1.1.2" - object.getownpropertydescriptors "^2.0.3" - -util@0.10.3: - version "0.10.3" - resolved "https://registry.yarnpkg.com/util/-/util-0.10.3.tgz#7afb1afe50805246489e3db7fe0ed379336ac0f9" - integrity sha1-evsa/lCAUkZInj23/g7TeTNqwPk= - dependencies: - inherits "2.0.1" - -util@^0.11.0: - version "0.11.1" - resolved "https://registry.yarnpkg.com/util/-/util-0.11.1.tgz#3236733720ec64bb27f6e26f421aaa2e1b588d61" - integrity sha512-HShAsny+zS2TZfaXxD9tYj4HQGlBezXZMZuM/S5PKLLoZkShZiGk9o5CzukI1LVHZvjdvZ2Sj1aW/Ndn2NB/HQ== - dependencies: - inherits "2.0.3" - -utila@^0.4.0, utila@~0.4: - version "0.4.0" - resolved "https://registry.yarnpkg.com/utila/-/utila-0.4.0.tgz#8a16a05d445657a3aea5eecc5b12a4fa5379772c" - integrity sha1-ihagXURWV6Oupe7MWxKk+lN5dyw= - -utils-merge@1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713" - integrity sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM= - -uuid@^3.0.1, uuid@^3.3.2: - version "3.3.3" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.3.tgz#4568f0216e78760ee1dbf3a4d2cf53e224112866" - integrity sha512-pW0No1RGHgzlpHJO1nsVrHKpOEIxkGg1xB+v0ZmdNH5OAeAwzAVrCnI2/6Mtx+Uys6iaylxa+D3g4j63IKKjSQ== - -validate-npm-package-license@^3.0.1: - version "3.0.4" - resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz#fc91f6b9c7ba15c857f4cb2c5defeec39d4f410a" - integrity sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew== - dependencies: - spdx-correct "^3.0.0" - spdx-expression-parse "^3.0.0" - -value-equal@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/value-equal/-/value-equal-0.4.0.tgz#c5bdd2f54ee093c04839d71ce2e4758a6890abc7" - integrity sha512-x+cYdNnaA3CxvMaTX0INdTCN8m8aF2uY9BvEqmxuYp8bL09cs/kWVQPVGcA35fMktdOsP69IgU7wFj/61dJHEw== - -vary@~1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" - integrity sha1-IpnwLG3tMNSllhsLn3RSShj2NPw= - -vendors@^1.0.0: - version "1.0.3" - resolved "https://registry.yarnpkg.com/vendors/-/vendors-1.0.3.tgz#a6467781abd366217c050f8202e7e50cc9eef8c0" - integrity sha512-fOi47nsJP5Wqefa43kyWSg80qF+Q3XA6MUkgi7Hp1HQaKDQW4cQrK2D0P7mmbFtsV1N89am55Yru/nyEwRubcw== - -verror@1.10.0: - version "1.10.0" - resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" - integrity sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA= - dependencies: - assert-plus "^1.0.0" - core-util-is "1.0.2" - extsprintf "^1.2.0" - -vm-browserify@^1.0.1: - version "1.1.0" - resolved "https://registry.yarnpkg.com/vm-browserify/-/vm-browserify-1.1.0.tgz#bd76d6a23323e2ca8ffa12028dc04559c75f9019" - integrity sha512-iq+S7vZJE60yejDYM0ek6zg308+UZsdtPExWP9VZoCFCz1zkJoXFnAX7aZfd/ZwrkidzdUZL0C/ryW+JwAiIGw== - -w3c-hr-time@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/w3c-hr-time/-/w3c-hr-time-1.0.1.tgz#82ac2bff63d950ea9e3189a58a65625fedf19045" - integrity sha1-gqwr/2PZUOqeMYmlimViX+3xkEU= - dependencies: - browser-process-hrtime "^0.1.2" - -w3c-xmlserializer@^1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/w3c-xmlserializer/-/w3c-xmlserializer-1.1.2.tgz#30485ca7d70a6fd052420a3d12fd90e6339ce794" - integrity sha512-p10l/ayESzrBMYWRID6xbuCKh2Fp77+sA0doRuGn4tTIMrrZVeqfpKjXHY+oDh3K4nLdPgNwMTVP6Vp4pvqbNg== - dependencies: - domexception "^1.0.1" - webidl-conversions "^4.0.2" - xml-name-validator "^3.0.0" - -walker@^1.0.7, walker@~1.0.5: - version "1.0.7" - resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.7.tgz#2f7f9b8fd10d677262b18a884e28d19618e028fb" - integrity sha1-L3+bj9ENZ3JisYqITijRlhjgKPs= - dependencies: - makeerror "1.0.x" - -warning@4.x, warning@^4.0.1, warning@^4.0.2, warning@^4.0.3, warning@~4.0.3: - version "4.0.3" - resolved "https://registry.yarnpkg.com/warning/-/warning-4.0.3.tgz#16e9e077eb8a86d6af7d64aa1e05fd85b4678ca3" - integrity sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w== - dependencies: - loose-envify "^1.0.0" - -warning@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/warning/-/warning-3.0.0.tgz#32e5377cb572de4ab04753bdf8821c01ed605b7c" - integrity sha1-MuU3fLVy3kqwR1O9+IIcAe1gW3w= - dependencies: - loose-envify "^1.0.0" - -watchpack@^1.5.0: - version "1.6.0" - resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-1.6.0.tgz#4bc12c2ebe8aa277a71f1d3f14d685c7b446cd00" - integrity sha512-i6dHe3EyLjMmDlU1/bGQpEw25XSjkJULPuAVKCbNRefQVq48yXKUpwg538F7AZTf9kyr57zj++pQFltUa5H7yA== - dependencies: - chokidar "^2.0.2" - graceful-fs "^4.1.2" - neo-async "^2.5.0" - -wbuf@^1.1.0, wbuf@^1.7.3: - version "1.7.3" - resolved "https://registry.yarnpkg.com/wbuf/-/wbuf-1.7.3.tgz#c1d8d149316d3ea852848895cb6a0bfe887b87df" - integrity sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA== - dependencies: - minimalistic-assert "^1.0.0" - -webidl-conversions@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-4.0.2.tgz#a855980b1f0b6b359ba1d5d9fb39ae941faa63ad" - integrity sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg== - -webpack-dev-middleware@^3.5.1: - version "3.7.0" - resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-3.7.0.tgz#ef751d25f4e9a5c8a35da600c5fda3582b5c6cff" - integrity sha512-qvDesR1QZRIAZHOE3iQ4CXLZZSQ1lAUsSpnQmlB1PBfoN/xdRjmge3Dok0W4IdaVLJOGJy3sGI4sZHwjRU0PCA== - dependencies: - memory-fs "^0.4.1" - mime "^2.4.2" - range-parser "^1.2.1" - webpack-log "^2.0.0" - -webpack-dev-server@3.2.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/webpack-dev-server/-/webpack-dev-server-3.2.1.tgz#1b45ce3ecfc55b6ebe5e36dab2777c02bc508c4e" - integrity sha512-sjuE4mnmx6JOh9kvSbPYw3u/6uxCLHNWfhWaIPwcXWsvWOPN+nc5baq4i9jui3oOBRXGonK9+OI0jVkaz6/rCw== - dependencies: - ansi-html "0.0.7" - bonjour "^3.5.0" - chokidar "^2.0.0" - compression "^1.5.2" - connect-history-api-fallback "^1.3.0" - debug "^4.1.1" - del "^3.0.0" - express "^4.16.2" - html-entities "^1.2.0" - http-proxy-middleware "^0.19.1" - import-local "^2.0.0" - internal-ip "^4.2.0" - ip "^1.1.5" - killable "^1.0.0" - loglevel "^1.4.1" - opn "^5.1.0" - portfinder "^1.0.9" - schema-utils "^1.0.0" - selfsigned "^1.9.1" - semver "^5.6.0" - serve-index "^1.7.2" - sockjs "0.3.19" - sockjs-client "1.3.0" - spdy "^4.0.0" - strip-ansi "^3.0.0" - supports-color "^6.1.0" - url "^0.11.0" - webpack-dev-middleware "^3.5.1" - webpack-log "^2.0.0" - yargs "12.0.2" - -webpack-log@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/webpack-log/-/webpack-log-2.0.0.tgz#5b7928e0637593f119d32f6227c1e0ac31e1b47f" - integrity sha512-cX8G2vR/85UYG59FgkoMamwHUIkSSlV3bBMRsbxVXVUk2j6NleCKjQ/WE9eYg9WY4w25O9w8wKP4rzNZFmUcUg== - dependencies: - ansi-colors "^3.0.0" - uuid "^3.3.2" - -webpack-manifest-plugin@2.0.4: - version "2.0.4" - resolved "https://registry.yarnpkg.com/webpack-manifest-plugin/-/webpack-manifest-plugin-2.0.4.tgz#e4ca2999b09557716b8ba4475fb79fab5986f0cd" - integrity sha512-nejhOHexXDBKQOj/5v5IZSfCeTO3x1Dt1RZEcGfBSul891X/eLIcIVH31gwxPDdsi2Z8LKKFGpM4w9+oTBOSCg== - dependencies: - fs-extra "^7.0.0" - lodash ">=3.5 <5" - tapable "^1.0.0" - -webpack-sources@^1.1.0, webpack-sources@^1.3.0, webpack-sources@^1.4.0: - version "1.4.3" - resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-1.4.3.tgz#eedd8ec0b928fbf1cbfe994e22d2d890f330a933" - integrity sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ== - dependencies: - source-list-map "^2.0.0" - source-map "~0.6.1" - -webpack@4.29.6: - version "4.29.6" - resolved "https://registry.yarnpkg.com/webpack/-/webpack-4.29.6.tgz#66bf0ec8beee4d469f8b598d3988ff9d8d90e955" - integrity sha512-MwBwpiE1BQpMDkbnUUaW6K8RFZjljJHArC6tWQJoFm0oQtfoSebtg4Y7/QHnJ/SddtjYLHaKGX64CFjG5rehJw== - dependencies: - "@webassemblyjs/ast" "1.8.5" - "@webassemblyjs/helper-module-context" "1.8.5" - "@webassemblyjs/wasm-edit" "1.8.5" - "@webassemblyjs/wasm-parser" "1.8.5" - acorn "^6.0.5" - acorn-dynamic-import "^4.0.0" - ajv "^6.1.0" - ajv-keywords "^3.1.0" - chrome-trace-event "^1.0.0" - enhanced-resolve "^4.1.0" - eslint-scope "^4.0.0" - json-parse-better-errors "^1.0.2" - loader-runner "^2.3.0" - loader-utils "^1.1.0" - memory-fs "~0.4.1" - micromatch "^3.1.8" - mkdirp "~0.5.0" - neo-async "^2.5.0" - node-libs-browser "^2.0.0" - schema-utils "^1.0.0" - tapable "^1.1.0" - terser-webpack-plugin "^1.1.0" - watchpack "^1.5.0" - webpack-sources "^1.3.0" - -websocket-driver@>=0.5.1: - version "0.7.3" - resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.7.3.tgz#a2d4e0d4f4f116f1e6297eba58b05d430100e9f9" - integrity sha512-bpxWlvbbB459Mlipc5GBzzZwhoZgGEZLuqPaR0INBGnPAY1vdBX6hPnoFXiw+3yWxDuHyQjO2oXTMyS8A5haFg== - dependencies: - http-parser-js ">=0.4.0 <0.4.11" - safe-buffer ">=5.1.0" - websocket-extensions ">=0.1.1" - -websocket-extensions@>=0.1.1: - version "0.1.3" - resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.3.tgz#5d2ff22977003ec687a4b87073dfbbac146ccf29" - integrity sha512-nqHUnMXmBzT0w570r2JpJxfiSD1IzoI+HGVdd3aZ0yNi3ngvQ4jv1dtHt5VGxfI2yj5yqImPhOK4vmIh2xMbGg== - -whatwg-encoding@^1.0.1, whatwg-encoding@^1.0.3, whatwg-encoding@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/whatwg-encoding/-/whatwg-encoding-1.0.5.tgz#5abacf777c32166a51d085d6b4f3e7d27113ddb0" - integrity sha512-b5lim54JOPN9HtzvK9HFXvBma/rnfFeqsic0hSpjtDbVxR3dJKLc+KB4V6GgiGOvl7CY/KNh8rxSo9DKQrnUEw== - dependencies: - iconv-lite "0.4.24" - -whatwg-fetch@3.0.0, whatwg-fetch@>=0.10.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-3.0.0.tgz#fc804e458cc460009b1a2b966bc8817d2578aefb" - integrity sha512-9GSJUgz1D4MfyKU7KRqwOjXCXTqWdFNvEr7eUBYchQiVc744mqK/MzXPNR2WsPkmkOa4ywfg8C2n8h+13Bey1Q== - -whatwg-mimetype@^2.1.0, whatwg-mimetype@^2.2.0, whatwg-mimetype@^2.3.0: - version "2.3.0" - resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz#3d4b1e0312d2079879f826aff18dbeeca5960fbf" - integrity sha512-M4yMwr6mAnQz76TbJm914+gPpB/nCwvZbJU28cUD6dR004SAxDLOOSUaB1JDRqLtaOV/vi0IC5lEAGFgrjGv/g== - -whatwg-url@^6.4.1: - version "6.5.0" - resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-6.5.0.tgz#f2df02bff176fd65070df74ad5ccbb5a199965a8" - integrity sha512-rhRZRqx/TLJQWUpQ6bmrt2UV4f0HCQ463yQuONJqC6fO2VoEb1pTYddbe59SkYq87aoM5A3bdhMZiUiVws+fzQ== - dependencies: - lodash.sortby "^4.7.0" - tr46 "^1.0.1" - webidl-conversions "^4.0.2" - -whatwg-url@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-7.0.0.tgz#fde926fa54a599f3adf82dff25a9f7be02dc6edd" - integrity sha512-37GeVSIJ3kn1JgKyjiYNmSLP1yzbpb29jdmwBSgkD9h40/hyrR/OifpVUndji3tmwGgD8qpw7iQu3RSbCrBpsQ== - dependencies: - lodash.sortby "^4.7.0" - tr46 "^1.0.1" - webidl-conversions "^4.0.2" - -which-module@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.0.tgz#d9ef07dce77b9902b8a3a8fa4b31c3e3f7e6e87a" - integrity sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho= - -which@^1.2.9, which@^1.3.0, which@^1.3.1: - version "1.3.1" - resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" - integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== - dependencies: - isexe "^2.0.0" - -wide-align@^1.1.0: - version "1.1.3" - resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" - integrity sha512-QGkOQc8XL6Bt5PwnsExKBPuMKBxnGxWWW3fU55Xt4feHozMUhdUMaBCk290qpm/wG5u/RSKzwdAC4i51YigihA== - dependencies: - string-width "^1.0.2 || 2" - -wordwrap@~0.0.2: - version "0.0.3" - resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-0.0.3.tgz#a3d5da6cd5c0bc0008d37234bbaf1bed63059107" - integrity sha1-o9XabNXAvAAI03I0u68b7WMFkQc= - -wordwrap@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/wordwrap/-/wordwrap-1.0.0.tgz#27584810891456a4171c8d0226441ade90cbcaeb" - integrity sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus= - -workbox-background-sync@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-background-sync/-/workbox-background-sync-4.3.1.tgz#26821b9bf16e9e37fd1d640289edddc08afd1950" - integrity sha512-1uFkvU8JXi7L7fCHVBEEnc3asPpiAL33kO495UMcD5+arew9IbKW2rV5lpzhoWcm/qhGB89YfO4PmB/0hQwPRg== - dependencies: - workbox-core "^4.3.1" - -workbox-broadcast-update@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-broadcast-update/-/workbox-broadcast-update-4.3.1.tgz#e2c0280b149e3a504983b757606ad041f332c35b" - integrity sha512-MTSfgzIljpKLTBPROo4IpKjESD86pPFlZwlvVG32Kb70hW+aob4Jxpblud8EhNb1/L5m43DUM4q7C+W6eQMMbA== - dependencies: - workbox-core "^4.3.1" - -workbox-build@^4.2.0: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-build/-/workbox-build-4.3.1.tgz#414f70fb4d6de47f6538608b80ec52412d233e64" - integrity sha512-UHdwrN3FrDvicM3AqJS/J07X0KXj67R8Cg0waq1MKEOqzo89ap6zh6LmaLnRAjpB+bDIz+7OlPye9iii9KBnxw== - dependencies: - "@babel/runtime" "^7.3.4" - "@hapi/joi" "^15.0.0" - common-tags "^1.8.0" - fs-extra "^4.0.2" - glob "^7.1.3" - lodash.template "^4.4.0" - pretty-bytes "^5.1.0" - stringify-object "^3.3.0" - strip-comments "^1.0.2" - workbox-background-sync "^4.3.1" - workbox-broadcast-update "^4.3.1" - workbox-cacheable-response "^4.3.1" - workbox-core "^4.3.1" - workbox-expiration "^4.3.1" - workbox-google-analytics "^4.3.1" - workbox-navigation-preload "^4.3.1" - workbox-precaching "^4.3.1" - workbox-range-requests "^4.3.1" - workbox-routing "^4.3.1" - workbox-strategies "^4.3.1" - workbox-streams "^4.3.1" - workbox-sw "^4.3.1" - workbox-window "^4.3.1" - -workbox-cacheable-response@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-cacheable-response/-/workbox-cacheable-response-4.3.1.tgz#f53e079179c095a3f19e5313b284975c91428c91" - integrity sha512-Rp5qlzm6z8IOvnQNkCdO9qrDgDpoPNguovs0H8C+wswLuPgSzSp9p2afb5maUt9R1uTIwOXrVQMmPfPypv+npw== - dependencies: - workbox-core "^4.3.1" - -workbox-core@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-core/-/workbox-core-4.3.1.tgz#005d2c6a06a171437afd6ca2904a5727ecd73be6" - integrity sha512-I3C9jlLmMKPxAC1t0ExCq+QoAMd0vAAHULEgRZ7kieCdUd919n53WC0AfvokHNwqRhGn+tIIj7vcb5duCjs2Kg== - -workbox-expiration@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-expiration/-/workbox-expiration-4.3.1.tgz#d790433562029e56837f341d7f553c4a78ebe921" - integrity sha512-vsJLhgQsQouv9m0rpbXubT5jw0jMQdjpkum0uT+d9tTwhXcEZks7qLfQ9dGSaufTD2eimxbUOJfWLbNQpIDMPw== - dependencies: - workbox-core "^4.3.1" - -workbox-google-analytics@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-google-analytics/-/workbox-google-analytics-4.3.1.tgz#9eda0183b103890b5c256e6f4ea15a1f1548519a" - integrity sha512-xzCjAoKuOb55CBSwQrbyWBKqp35yg1vw9ohIlU2wTy06ZrYfJ8rKochb1MSGlnoBfXGWss3UPzxR5QL5guIFdg== - dependencies: - workbox-background-sync "^4.3.1" - workbox-core "^4.3.1" - workbox-routing "^4.3.1" - workbox-strategies "^4.3.1" - -workbox-navigation-preload@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-navigation-preload/-/workbox-navigation-preload-4.3.1.tgz#29c8e4db5843803b34cd96dc155f9ebd9afa453d" - integrity sha512-K076n3oFHYp16/C+F8CwrRqD25GitA6Rkd6+qAmLmMv1QHPI2jfDwYqrytOfKfYq42bYtW8Pr21ejZX7GvALOw== - dependencies: - workbox-core "^4.3.1" - -workbox-precaching@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-precaching/-/workbox-precaching-4.3.1.tgz#9fc45ed122d94bbe1f0ea9584ff5940960771cba" - integrity sha512-piSg/2csPoIi/vPpp48t1q5JLYjMkmg5gsXBQkh/QYapCdVwwmKlU9mHdmy52KsDGIjVaqEUMFvEzn2LRaigqQ== - dependencies: - workbox-core "^4.3.1" - -workbox-range-requests@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-range-requests/-/workbox-range-requests-4.3.1.tgz#f8a470188922145cbf0c09a9a2d5e35645244e74" - integrity sha512-S+HhL9+iTFypJZ/yQSl/x2Bf5pWnbXdd3j57xnb0V60FW1LVn9LRZkPtneODklzYuFZv7qK6riZ5BNyc0R0jZA== - dependencies: - workbox-core "^4.3.1" - -workbox-routing@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-routing/-/workbox-routing-4.3.1.tgz#a675841af623e0bb0c67ce4ed8e724ac0bed0cda" - integrity sha512-FkbtrODA4Imsi0p7TW9u9MXuQ5P4pVs1sWHK4dJMMChVROsbEltuE79fBoIk/BCztvOJ7yUpErMKa4z3uQLX+g== - dependencies: - workbox-core "^4.3.1" - -workbox-strategies@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-strategies/-/workbox-strategies-4.3.1.tgz#d2be03c4ef214c115e1ab29c9c759c9fe3e9e646" - integrity sha512-F/+E57BmVG8dX6dCCopBlkDvvhg/zj6VDs0PigYwSN23L8hseSRwljrceU2WzTvk/+BSYICsWmRq5qHS2UYzhw== - dependencies: - workbox-core "^4.3.1" - -workbox-streams@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-streams/-/workbox-streams-4.3.1.tgz#0b57da70e982572de09c8742dd0cb40a6b7c2cc3" - integrity sha512-4Kisis1f/y0ihf4l3u/+ndMkJkIT4/6UOacU3A4BwZSAC9pQ9vSvJpIi/WFGQRH/uPXvuVjF5c2RfIPQFSS2uA== - dependencies: - workbox-core "^4.3.1" - -workbox-sw@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-sw/-/workbox-sw-4.3.1.tgz#df69e395c479ef4d14499372bcd84c0f5e246164" - integrity sha512-0jXdusCL2uC5gM3yYFT6QMBzKfBr2XTk0g5TPAV4y8IZDyVNDyj1a8uSXy3/XrvkVTmQvLN4O5k3JawGReXr9w== - -workbox-webpack-plugin@4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/workbox-webpack-plugin/-/workbox-webpack-plugin-4.2.0.tgz#c94c3f69ff39c8a5b0c7e6bebc382cb53410a63d" - integrity sha512-YZsiA+y/ns/GdWRaBsfYv8dln1ebWtGnJcTOg1ppO0pO1tScAHX0yGtHIjndxz3L/UUhE8b0NQE9KeLNwJwA5A== - dependencies: - "@babel/runtime" "^7.0.0" - json-stable-stringify "^1.0.1" - workbox-build "^4.2.0" - -workbox-window@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/workbox-window/-/workbox-window-4.3.1.tgz#ee6051bf10f06afa5483c9b8dfa0531994ede0f3" - integrity sha512-C5gWKh6I58w3GeSc0wp2Ne+rqVw8qwcmZnQGpjiek8A2wpbxSJb1FdCoQVO+jDJs35bFgo/WETgl1fqgsxN0Hg== - dependencies: - workbox-core "^4.3.1" - -worker-farm@^1.5.2, worker-farm@^1.7.0: - version "1.7.0" - resolved "https://registry.yarnpkg.com/worker-farm/-/worker-farm-1.7.0.tgz#26a94c5391bbca926152002f69b84a4bf772e5a8" - integrity sha512-rvw3QTZc8lAxyVrqcSGVm5yP/IJ2UcB3U0graE3LCFoZ0Yn2x4EoVSqJKdB/T5M+FLcRPjz4TDacRf3OCfNUzw== - dependencies: - errno "~0.1.7" - -worker-rpc@^0.1.0: - version "0.1.1" - resolved "https://registry.yarnpkg.com/worker-rpc/-/worker-rpc-0.1.1.tgz#cb565bd6d7071a8f16660686051e969ad32f54d5" - integrity sha512-P1WjMrUB3qgJNI9jfmpZ/htmBEjFh//6l/5y8SD9hg1Ef5zTTVVoRjTrTEzPrNBQvmhMxkoTsjOXN10GWU7aCg== - dependencies: - microevent.ts "~0.1.1" - -wrap-ansi@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-2.1.0.tgz#d8fc3d284dd05794fe84973caecdd1cf824fdd85" - integrity sha1-2Pw9KE3QV5T+hJc8rs3Rz4JP3YU= - dependencies: - string-width "^1.0.1" - strip-ansi "^3.0.1" - -wrap-ansi@^5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-5.1.0.tgz#1fd1f67235d5b6d0fee781056001bfb694c03b09" - integrity sha512-QC1/iN/2/RPVJ5jYK8BGttj5z83LmSKmvbvrXPNCLZSEb32KKVDJDl/MOt2N01qU2H/FkzEa9PKto1BqDjtd7Q== - dependencies: - ansi-styles "^3.2.0" - string-width "^3.0.0" - strip-ansi "^5.0.0" - -wrappy@1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" - integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8= - -write-file-atomic@2.4.1: - version "2.4.1" - resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-2.4.1.tgz#d0b05463c188ae804396fd5ab2a370062af87529" - integrity sha512-TGHFeZEZMnv+gBFRfjAcxL5bPHrsGKtnb4qsFAws7/vlh+QfwAaySIw4AXP9ZskTTh5GWu3FLuJhsWVdiJPGvg== - dependencies: - graceful-fs "^4.1.11" - imurmurhash "^0.1.4" - signal-exit "^3.0.2" - -write@1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/write/-/write-1.0.3.tgz#0800e14523b923a387e415123c865616aae0f5c3" - integrity sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig== - dependencies: - mkdirp "^0.5.1" - -ws@^5.2.0: - version "5.2.2" - resolved "https://registry.yarnpkg.com/ws/-/ws-5.2.2.tgz#dffef14866b8e8dc9133582514d1befaf96e980f" - integrity sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA== - dependencies: - async-limiter "~1.0.0" - -ws@^6.1.2: - version "6.2.1" - resolved "https://registry.yarnpkg.com/ws/-/ws-6.2.1.tgz#442fdf0a47ed64f59b6a5d8ff130f4748ed524fb" - integrity sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA== - dependencies: - async-limiter "~1.0.0" - -xml-name-validator@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/xml-name-validator/-/xml-name-validator-3.0.0.tgz#6ae73e06de4d8c6e47f9fb181f78d648ad457c6a" - integrity sha512-A5CUptxDsvxKJEU3yO6DuWBSJz/qizqzJKOMIfUJHETbBw/sFaDxgd6fxm1ewUaM0jZ444Fc5vC5ROYurg/4Pw== - -xmlchars@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/xmlchars/-/xmlchars-2.1.1.tgz#ef1a81c05bff629c2280007f12daca21bd6f6c93" - integrity sha512-7hew1RPJ1iIuje/Y01bGD/mXokXxegAgVS+e+E0wSi2ILHQkYAH1+JXARwTjZSM4Z4Z+c73aKspEcqj+zPPL/w== - -xregexp@4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/xregexp/-/xregexp-4.0.0.tgz#e698189de49dd2a18cc5687b05e17c8e43943020" - integrity sha512-PHyM+sQouu7xspQQwELlGwwd05mXUFqwFYfqPO0cC7x4fxyHnnuetmQr6CjJiafIDoH4MogHb9dOoJzR/Y4rFg== - -xtend@^4.0.0, xtend@~4.0.1: - version "4.0.2" - resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54" - integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== - -"y18n@^3.2.1 || ^4.0.0", y18n@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.0.tgz#95ef94f85ecc81d007c264e190a120f0a3c8566b" - integrity sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w== - -yallist@^3.0.0, yallist@^3.0.2, yallist@^3.0.3: - version "3.0.3" - resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.0.3.tgz#b4b049e314be545e3ce802236d6cd22cd91c3de9" - integrity sha512-S+Zk8DEWE6oKpV+vI3qWkaK+jSbIK86pCwe2IF/xwIpQ8jEuxpw9NyaGjmp9+BoJv5FV2piqCDcoCtStppiq2A== - -yargs-parser@^10.1.0: - version "10.1.0" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-10.1.0.tgz#7202265b89f7e9e9f2e5765e0fe735a905edbaa8" - integrity sha512-VCIyR1wJoEBZUqk5PA+oOBF6ypbwh5aNB3I50guxAL/quggdfs4TtNHQrSazFA3fYZ+tEqfs0zIGlv0c/rgjbQ== - dependencies: - camelcase "^4.1.0" - -yargs-parser@^13.1.1: - version "13.1.1" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-13.1.1.tgz#d26058532aa06d365fe091f6a1fc06b2f7e5eca0" - integrity sha512-oVAVsHz6uFrg3XQheFII8ESO2ssAf9luWuAd6Wexsu4F3OtIW0o8IribPXYrD4WC24LWtPrJlGy87y5udK+dxQ== - dependencies: - camelcase "^5.0.0" - decamelize "^1.2.0" - -yargs@12.0.2: - version "12.0.2" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-12.0.2.tgz#fe58234369392af33ecbef53819171eff0f5aadc" - integrity sha512-e7SkEx6N6SIZ5c5H22RTZae61qtn3PYUE8JYbBFlK9sYmh3DMQ6E5ygtaG/2BW0JZi4WGgTR2IV5ChqlqrDGVQ== - dependencies: - cliui "^4.0.0" - decamelize "^2.0.0" - find-up "^3.0.0" - get-caller-file "^1.0.1" - os-locale "^3.0.0" - require-directory "^2.1.1" - require-main-filename "^1.0.1" - set-blocking "^2.0.0" - string-width "^2.0.0" - which-module "^2.0.0" - y18n "^3.2.1 || ^4.0.0" - yargs-parser "^10.1.0" - -yargs@^13.3.0: - version "13.3.0" - resolved "https://registry.yarnpkg.com/yargs/-/yargs-13.3.0.tgz#4c657a55e07e5f2cf947f8a366567c04a0dedc83" - integrity sha512-2eehun/8ALW8TLoIl7MVaRUrg+yCnenu8B4kBlRxj3GJGDKU1Og7sMXPNm1BYyM1DOJmTZ4YeN/Nwxv+8XJsUA== - dependencies: - cliui "^5.0.0" - find-up "^3.0.0" - get-caller-file "^2.0.1" - require-directory "^2.1.1" - require-main-filename "^2.0.0" - set-blocking "^2.0.0" - string-width "^3.0.0" - which-module "^2.0.0" - y18n "^4.0.0" - yargs-parser "^13.1.1" diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java deleted file mode 100644 index ff3765af120..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/AbstractOMMetadataManagerTest.java +++ /dev/null @@ -1,232 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon; - -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; -import static org.junit.Assert.assertNotNull; - -import java.io.File; -import java.io.IOException; -import java.util.Collections; -import java.util.List; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.recovery.ReconOmMetadataManagerImpl; -import org.apache.hadoop.hdds.utils.db.DBCheckpoint; -import org.junit.Rule; -import org.junit.rules.TemporaryFolder; - -/** - * Utility methods for test classes. - */ -public abstract class AbstractOMMetadataManagerTest { - - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - - /** - * Create a new OM Metadata manager instance with default volume and bucket. - * @throws IOException ioEx - */ - protected OMMetadataManager initializeNewOmMetadataManager() - throws IOException { - File omDbDir = temporaryFolder.newFolder(); - OzoneConfiguration omConfiguration = new OzoneConfiguration(); - omConfiguration.set(OZONE_OM_DB_DIRS, - omDbDir.getAbsolutePath()); - OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( - omConfiguration); - - String volumeKey = omMetadataManager.getVolumeKey("sampleVol"); - OmVolumeArgs args = - OmVolumeArgs.newBuilder() - .setVolume("sampleVol") - .setAdminName("TestUser") - .setOwnerName("TestUser") - .build(); - omMetadataManager.getVolumeTable().put(volumeKey, args); - - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sampleVol") - .setBucketName("bucketOne") - .build(); - - String bucketKey = omMetadataManager.getBucketKey( - bucketInfo.getVolumeName(), bucketInfo.getBucketName()); - - omMetadataManager.getBucketTable().put(bucketKey, bucketInfo); - - return omMetadataManager; - } - - /** - * Create an empty OM Metadata manager instance. - * @throws IOException ioEx - */ - protected OMMetadataManager initializeEmptyOmMetadataManager() - throws IOException { - File omDbDir = temporaryFolder.newFolder(); - OzoneConfiguration omConfiguration = new OzoneConfiguration(); - omConfiguration.set(OZONE_OM_DB_DIRS, - omDbDir.getAbsolutePath()); - return new OmMetadataManagerImpl(omConfiguration); - } - - /** - * Get an instance of Recon OM Metadata manager. - * @return ReconOMMetadataManager - * @throws IOException when creating the RocksDB instance. - */ - protected ReconOMMetadataManager getTestMetadataManager( - OMMetadataManager omMetadataManager) - throws IOException { - - DBCheckpoint checkpoint = omMetadataManager.getStore() - .getCheckpoint(true); - assertNotNull(checkpoint.getCheckpointLocation()); - - File reconOmDbDir = temporaryFolder.newFolder(); - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, reconOmDbDir - .getAbsolutePath()); - - ReconOMMetadataManager reconOMMetaMgr = - new ReconOmMetadataManagerImpl(configuration, new ReconUtils()); - reconOMMetaMgr.start(configuration); - - reconOMMetaMgr.updateOmDB( - checkpoint.getCheckpointLocation().toFile()); - return reconOMMetaMgr; - } - - /** - * Write a key to OM instance. - * @throws IOException while writing. - */ - public void writeDataToOm(OMMetadataManager omMetadataManager, - String key) throws IOException { - - String omKey = omMetadataManager.getOzoneKey("sampleVol", - "bucketOne", key); - - omMetadataManager.getKeyTable().put(omKey, - new OmKeyInfo.Builder() - .setBucketName("bucketOne") - .setVolumeName("sampleVol") - .setKeyName(key) - .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) - .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) - .build()); - } - - /** - * Write a key to OM instance. - * @throws IOException while writing. - */ - protected void writeDataToOm(OMMetadataManager omMetadataManager, - String key, - String bucket, - String volume, - List - omKeyLocationInfoGroupList) - throws IOException { - - String omKey = omMetadataManager.getOzoneKey(volume, - bucket, key); - - omMetadataManager.getKeyTable().put(omKey, - new OmKeyInfo.Builder() - .setBucketName(bucket) - .setVolumeName(volume) - .setKeyName(key) - .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) - .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) - .setOmKeyLocationInfos(omKeyLocationInfoGroupList) - .build()); - } - - /** - * Write a key to OM instance. - * @throws IOException while writing. - */ - protected void writeDataToOm(OMMetadataManager omMetadataManager, - String key, - String bucket, - String volume, - Long dataSize, - List - omKeyLocationInfoGroupList) - throws IOException { - - String omKey = omMetadataManager.getOzoneKey(volume, - bucket, key); - - omMetadataManager.getKeyTable().put(omKey, - new OmKeyInfo.Builder() - .setBucketName(bucket) - .setVolumeName(volume) - .setKeyName(key) - .setDataSize(dataSize) - .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) - .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) - .setOmKeyLocationInfos(omKeyLocationInfoGroupList) - .build()); - } - - /** - * Return random pipeline. - * @return pipeline - */ - protected Pipeline getRandomPipeline() { - return Pipeline.newBuilder() - .setFactor(HddsProtos.ReplicationFactor.ONE) - .setId(PipelineID.randomId()) - .setNodes(Collections.EMPTY_LIST) - .setState(Pipeline.PipelineState.OPEN) - .setType(HddsProtos.ReplicationType.STAND_ALONE) - .build(); - } - - /** - * Get new OmKeyLocationInfo for given BlockID and Pipeline. - * @param blockID blockId - * @param pipeline pipeline - * @return new instance of OmKeyLocationInfo - */ - protected OmKeyLocationInfo getOmKeyLocationInfo(BlockID blockID, - Pipeline pipeline) { - return new OmKeyLocationInfo.Builder() - .setBlockID(blockID) - .setPipeline(pipeline) - .build(); - } -} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/GuiceInjectorUtilsForTestsImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/GuiceInjectorUtilsForTestsImpl.java deleted file mode 100644 index 6f16c1ceaed..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/GuiceInjectorUtilsForTestsImpl.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon; - -import org.apache.hadoop.ozone.recon.types.GuiceInjectorUtilsForTests; - -/** - * Implementation for GuiceInjectorUtilsForTests. - */ -public class GuiceInjectorUtilsForTestsImpl implements - GuiceInjectorUtilsForTests { -} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java deleted file mode 100644 index 772c661dcca..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon; - -import java.io.IOException; - -import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; -import org.apache.hadoop.ozone.recon.spi.impl.ContainerKeyPrefixCodec; -import org.apache.hadoop.hdds.utils.db.Codec; -import org.apache.hadoop.hdds.utils.db.IntegerCodec; -import org.junit.Assert; -import org.junit.Test; - -/** - * Unit Tests for Codecs used in Recon. - */ -public class TestReconCodecs { - - @Test - public void testContainerKeyPrefixCodec() throws IOException { - ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix( - System.currentTimeMillis(), "TestKeyPrefix", 0); - - Codec codec = new ContainerKeyPrefixCodec(); - byte[] persistedFormat = codec.toPersistedFormat(containerKeyPrefix); - Assert.assertTrue(persistedFormat != null); - ContainerKeyPrefix fromPersistedFormat = - codec.fromPersistedFormat(persistedFormat); - Assert.assertEquals(containerKeyPrefix, fromPersistedFormat); - } - - @Test - public void testIntegerCodec() throws IOException { - Integer i = 1000; - Codec codec = new IntegerCodec(); - byte[] persistedFormat = codec.toPersistedFormat(i); - Assert.assertTrue(persistedFormat != null); - Integer fromPersistedFormat = - codec.fromPersistedFormat(persistedFormat); - Assert.assertEquals(i, fromPersistedFormat); - } -} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java deleted file mode 100644 index 6bb8993decb..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java +++ /dev/null @@ -1,207 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon; - -import static org.apache.hadoop.ozone.recon.ReconUtils.createTarFile; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.FileWriter; -import java.io.IOException; -import java.io.InputStream; -import java.nio.charset.Charset; -import java.nio.file.Paths; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.http.HttpEntity; -import org.apache.http.StatusLine; -import org.apache.http.client.methods.CloseableHttpResponse; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.impl.client.CloseableHttpClient; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -/** - * Test Recon Utility methods. - */ -public class TestReconUtils { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - @Test - public void testGetReconDbDir() throws Exception { - - String filePath = folder.getRoot().getAbsolutePath(); - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set("TEST_DB_DIR", filePath); - - File file = new ReconUtils().getReconDbDir(configuration, - "TEST_DB_DIR"); - Assert.assertEquals(filePath, file.getAbsolutePath()); - } - - @Test - public void testCreateTarFile() throws Exception { - - File tempSnapshotDir = null; - FileInputStream fis = null; - FileOutputStream fos = null; - File tarFile = null; - - try { - String testDirName = System.getProperty("java.io.tmpdir"); - if (!testDirName.endsWith("/")) { - testDirName += "/"; - } - testDirName += "TestCreateTarFile_Dir" + System.currentTimeMillis(); - tempSnapshotDir = new File(testDirName); - tempSnapshotDir.mkdirs(); - - File file = new File(testDirName + "/temp1.txt"); - FileWriter writer = new FileWriter(file); - writer.write("Test data 1"); - writer.close(); - - file = new File(testDirName + "/temp2.txt"); - writer = new FileWriter(file); - writer.write("Test data 2"); - writer.close(); - - tarFile = createTarFile(Paths.get(testDirName)); - Assert.assertNotNull(tarFile); - - } finally { - org.apache.hadoop.io.IOUtils.closeStream(fis); - org.apache.hadoop.io.IOUtils.closeStream(fos); - FileUtils.deleteDirectory(tempSnapshotDir); - FileUtils.deleteQuietly(tarFile); - } - } - - @Test - public void testUntarCheckpointFile() throws Exception { - - File newDir = folder.newFolder(); - - File file1 = Paths.get(newDir.getAbsolutePath(), "file1") - .toFile(); - String str = "File1 Contents"; - BufferedWriter writer = new BufferedWriter(new FileWriter( - file1.getAbsolutePath())); - writer.write(str); - writer.close(); - - File file2 = Paths.get(newDir.getAbsolutePath(), "file2") - .toFile(); - str = "File2 Contents"; - writer = new BufferedWriter(new FileWriter(file2.getAbsolutePath())); - writer.write(str); - writer.close(); - - //Create test tar file. - File tarFile = createTarFile(newDir.toPath()); - File outputDir = folder.newFolder(); - new ReconUtils().untarCheckpointFile(tarFile, outputDir.toPath()); - - assertTrue(outputDir.isDirectory()); - assertTrue(outputDir.listFiles().length == 2); - } - - @Test - public void testMakeHttpCall() throws Exception { - - CloseableHttpClient httpClientMock = mock(CloseableHttpClient.class); - String url = "http://localhost:9874/dbCheckpoint"; - - CloseableHttpResponse httpResponseMock = mock(CloseableHttpResponse.class); - when(httpClientMock.execute(any(HttpGet.class))) - .thenReturn(httpResponseMock); - - StatusLine statusLineMock = mock(StatusLine.class); - when(statusLineMock.getStatusCode()).thenReturn(200); - when(httpResponseMock.getStatusLine()).thenReturn(statusLineMock); - - HttpEntity httpEntityMock = mock(HttpEntity.class); - when(httpResponseMock.getEntity()).thenReturn(httpEntityMock); - File file1 = Paths.get(folder.getRoot().getPath(), "file1") - .toFile(); - BufferedWriter writer = new BufferedWriter(new FileWriter( - file1.getAbsolutePath())); - writer.write("File 1 Contents"); - writer.close(); - InputStream fileInputStream = new FileInputStream(file1); - - when(httpEntityMock.getContent()).thenReturn(new InputStream() { - @Override - public int read() throws IOException { - return fileInputStream.read(); - } - }); - - InputStream inputStream = new ReconUtils() - .makeHttpCall(httpClientMock, url); - String contents = IOUtils.toString(inputStream, Charset.defaultCharset()); - - assertEquals("File 1 Contents", contents); - } - - @Test - public void testGetLastKnownDB() throws IOException { - File newDir = folder.newFolder(); - - File file1 = Paths.get(newDir.getAbsolutePath(), "valid_1") - .toFile(); - String str = "File1 Contents"; - BufferedWriter writer = new BufferedWriter(new FileWriter( - file1.getAbsolutePath())); - writer.write(str); - writer.close(); - - File file2 = Paths.get(newDir.getAbsolutePath(), "valid_2") - .toFile(); - str = "File2 Contents"; - writer = new BufferedWriter(new FileWriter(file2.getAbsolutePath())); - writer.write(str); - writer.close(); - - - File file3 = Paths.get(newDir.getAbsolutePath(), "invalid_3") - .toFile(); - str = "File3 Contents"; - writer = new BufferedWriter(new FileWriter(file3.getAbsolutePath())); - writer.write(str); - writer.close(); - - ReconUtils reconUtils = new ReconUtils(); - File latestValidFile = reconUtils.getLastKnownDB(newDir, "valid"); - assertTrue(latestValidFile.getName().equals("valid_2")); - } -} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java deleted file mode 100644 index 9cca5a71000..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerKeyService.java +++ /dev/null @@ -1,373 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.api; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; - -import javax.sql.DataSource; -import javax.ws.rs.core.Response; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.recon.AbstractOMMetadataManagerTest; -import org.apache.hadoop.ozone.recon.GuiceInjectorUtilsForTestsImpl; -import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; -import org.apache.hadoop.ozone.recon.api.types.ContainersResponse; -import org.apache.hadoop.ozone.recon.api.types.KeyMetadata; -import org.apache.hadoop.ozone.recon.api.types.KeysResponse; -import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider; -import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; -import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask; -import org.apache.hadoop.hdds.utils.db.Table; -import org.hadoop.ozone.recon.schema.StatsSchemaDefinition; -import org.jooq.impl.DSL; -import org.jooq.impl.DefaultConfiguration; -import org.junit.Before; -import org.junit.Test; - -import com.google.inject.AbstractModule; -import com.google.inject.Injector; - -/** - * Test for container key service. - */ -public class TestContainerKeyService extends AbstractOMMetadataManagerTest { - - private ContainerDBServiceProvider containerDbServiceProvider; - private Injector injector; - private OzoneManagerServiceProviderImpl ozoneManagerServiceProvider; - private ContainerKeyService containerKeyService; - private GuiceInjectorUtilsForTestsImpl guiceInjectorTest = - new GuiceInjectorUtilsForTestsImpl(); - private boolean isSetupDone = false; - private ReconOMMetadataManager reconOMMetadataManager; - private void initializeInjector() throws Exception { - reconOMMetadataManager = getTestMetadataManager( - initializeNewOmMetadataManager()); - ozoneManagerServiceProvider = getMockOzoneManagerServiceProvider(); - - Injector parentInjector = guiceInjectorTest.getInjector( - ozoneManagerServiceProvider, reconOMMetadataManager, temporaryFolder); - - injector = parentInjector.createChildInjector(new AbstractModule() { - @Override - protected void configure() { - containerKeyService = new ContainerKeyService(); - bind(ContainerKeyService.class).toInstance(containerKeyService); - } - }); - } - - @Before - public void setUp() throws Exception { - // The following setup runs only once - if (!isSetupDone) { - initializeInjector(); - - DSL.using(new DefaultConfiguration().set( - injector.getInstance(DataSource.class))); - - containerDbServiceProvider = injector.getInstance( - ContainerDBServiceProvider.class); - - StatsSchemaDefinition schemaDefinition = injector.getInstance( - StatsSchemaDefinition.class); - schemaDefinition.initializeSchema(); - - isSetupDone = true; - } - - //Write Data to OM - Pipeline pipeline = getRandomPipeline(); - - List omKeyLocationInfoList = new ArrayList<>(); - BlockID blockID1 = new BlockID(1, 101); - OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1, - pipeline); - omKeyLocationInfoList.add(omKeyLocationInfo1); - - BlockID blockID2 = new BlockID(2, 102); - OmKeyLocationInfo omKeyLocationInfo2 = getOmKeyLocationInfo(blockID2, - pipeline); - omKeyLocationInfoList.add(omKeyLocationInfo2); - - OmKeyLocationInfoGroup omKeyLocationInfoGroup = new - OmKeyLocationInfoGroup(0, omKeyLocationInfoList); - - //key = key_one, Blocks = [ {CID = 1, LID = 101}, {CID = 2, LID = 102} ] - writeDataToOm(reconOMMetadataManager, - "key_one", "bucketOne", "sampleVol", - Collections.singletonList(omKeyLocationInfoGroup)); - - List infoGroups = new ArrayList<>(); - BlockID blockID3 = new BlockID(1, 103); - OmKeyLocationInfo omKeyLocationInfo3 = getOmKeyLocationInfo(blockID3, - pipeline); - - List omKeyLocationInfoListNew = new ArrayList<>(); - omKeyLocationInfoListNew.add(omKeyLocationInfo3); - infoGroups.add(new OmKeyLocationInfoGroup(0, - omKeyLocationInfoListNew)); - - BlockID blockID4 = new BlockID(1, 104); - OmKeyLocationInfo omKeyLocationInfo4 = getOmKeyLocationInfo(blockID4, - pipeline); - - omKeyLocationInfoListNew = new ArrayList<>(); - omKeyLocationInfoListNew.add(omKeyLocationInfo4); - infoGroups.add(new OmKeyLocationInfoGroup(1, - omKeyLocationInfoListNew)); - - //key = key_two, Blocks = [ {CID = 1, LID = 103}, {CID = 1, LID = 104} ] - writeDataToOm(reconOMMetadataManager, - "key_two", "bucketOne", "sampleVol", infoGroups); - - List omKeyLocationInfoList2 = new ArrayList<>(); - BlockID blockID5 = new BlockID(2, 2); - OmKeyLocationInfo omKeyLocationInfo5 = getOmKeyLocationInfo(blockID5, - pipeline); - omKeyLocationInfoList2.add(omKeyLocationInfo5); - - BlockID blockID6 = new BlockID(2, 3); - OmKeyLocationInfo omKeyLocationInfo6 = getOmKeyLocationInfo(blockID6, - pipeline); - omKeyLocationInfoList2.add(omKeyLocationInfo6); - - OmKeyLocationInfoGroup omKeyLocationInfoGroup2 = new - OmKeyLocationInfoGroup(0, omKeyLocationInfoList2); - - //key = key_three, Blocks = [ {CID = 2, LID = 2}, {CID = 2, LID = 3} ] - writeDataToOm(reconOMMetadataManager, - "key_three", "bucketOne", "sampleVol", - Collections.singletonList(omKeyLocationInfoGroup2)); - - //Generate Recon container DB data. - OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class); - Table tableMock = mock(Table.class); - when(tableMock.getName()).thenReturn("KeyTable"); - when(omMetadataManagerMock.getKeyTable()).thenReturn(tableMock); - ContainerKeyMapperTask containerKeyMapperTask = - new ContainerKeyMapperTask(containerDbServiceProvider); - containerKeyMapperTask.reprocess(reconOMMetadataManager); - } - - @Test - public void testGetKeysForContainer() { - - Response response = containerKeyService.getKeysForContainer(1L, -1, ""); - - KeysResponse responseObject = (KeysResponse) response.getEntity(); - KeysResponse.KeysResponseData data = responseObject.getKeysResponseData(); - Collection keyMetadataList = data.getKeys(); - - assertEquals(3, data.getTotalCount()); - assertEquals(2, keyMetadataList.size()); - - Iterator iterator = keyMetadataList.iterator(); - - KeyMetadata keyMetadata = iterator.next(); - assertEquals("key_one", keyMetadata.getKey()); - assertEquals(1, keyMetadata.getVersions().size()); - assertEquals(1, keyMetadata.getBlockIds().size()); - Map> blockIds = - keyMetadata.getBlockIds(); - assertEquals(101, blockIds.get(0L).iterator().next().getLocalID()); - - keyMetadata = iterator.next(); - assertEquals("key_two", keyMetadata.getKey()); - assertEquals(2, keyMetadata.getVersions().size()); - assertTrue(keyMetadata.getVersions().contains(0L) && keyMetadata - .getVersions().contains(1L)); - assertEquals(2, keyMetadata.getBlockIds().size()); - blockIds = keyMetadata.getBlockIds(); - assertEquals(103, blockIds.get(0L).iterator().next().getLocalID()); - assertEquals(104, blockIds.get(1L).iterator().next().getLocalID()); - - response = containerKeyService.getKeysForContainer(3L, -1, ""); - responseObject = (KeysResponse) response.getEntity(); - data = responseObject.getKeysResponseData(); - keyMetadataList = data.getKeys(); - assertTrue(keyMetadataList.isEmpty()); - assertEquals(0, data.getTotalCount()); - - // test if limit works as expected - response = containerKeyService.getKeysForContainer(1L, 1, ""); - responseObject = (KeysResponse) response.getEntity(); - data = responseObject.getKeysResponseData(); - keyMetadataList = data.getKeys(); - assertEquals(1, keyMetadataList.size()); - assertEquals(3, data.getTotalCount()); - } - - @Test - public void testGetKeysForContainerWithPrevKey() { - // test if prev-key param works as expected - Response response = containerKeyService.getKeysForContainer( - 1L, -1, "/sampleVol/bucketOne/key_one"); - - KeysResponse responseObject = - (KeysResponse) response.getEntity(); - - KeysResponse.KeysResponseData data = - responseObject.getKeysResponseData(); - assertEquals(3, data.getTotalCount()); - - Collection keyMetadataList = data.getKeys(); - assertEquals(1, keyMetadataList.size()); - - Iterator iterator = keyMetadataList.iterator(); - KeyMetadata keyMetadata = iterator.next(); - - assertEquals("key_two", keyMetadata.getKey()); - assertEquals(2, keyMetadata.getVersions().size()); - assertEquals(2, keyMetadata.getBlockIds().size()); - - response = containerKeyService.getKeysForContainer( - 1L, -1, StringUtils.EMPTY); - responseObject = (KeysResponse) response.getEntity(); - data = responseObject.getKeysResponseData(); - keyMetadataList = data.getKeys(); - - assertEquals(3, data.getTotalCount()); - assertEquals(2, keyMetadataList.size()); - iterator = keyMetadataList.iterator(); - keyMetadata = iterator.next(); - assertEquals("key_one", keyMetadata.getKey()); - - // test for negative cases - response = containerKeyService.getKeysForContainer( - 1L, -1, "/sampleVol/bucketOne/invalid_key"); - responseObject = (KeysResponse) response.getEntity(); - data = responseObject.getKeysResponseData(); - keyMetadataList = data.getKeys(); - assertEquals(3, data.getTotalCount()); - assertEquals(0, keyMetadataList.size()); - - response = containerKeyService.getKeysForContainer( - 5L, -1, ""); - responseObject = (KeysResponse) response.getEntity(); - data = responseObject.getKeysResponseData(); - keyMetadataList = data.getKeys(); - assertEquals(0, keyMetadataList.size()); - assertEquals(0, data.getTotalCount()); - } - - @Test - public void testGetContainers() { - - Response response = containerKeyService.getContainers(-1, 0L); - - ContainersResponse responseObject = - (ContainersResponse) response.getEntity(); - - ContainersResponse.ContainersResponseData data = - responseObject.getContainersResponseData(); - assertEquals(2, data.getTotalCount()); - - List containers = new ArrayList<>(data.getContainers()); - - Iterator iterator = containers.iterator(); - - ContainerMetadata containerMetadata = iterator.next(); - assertEquals(1L, containerMetadata.getContainerID()); - // Number of keys for CID:1 should be 3 because of two different versions - // of key_two stored in CID:1 - assertEquals(3L, containerMetadata.getNumberOfKeys()); - - containerMetadata = iterator.next(); - assertEquals(2L, containerMetadata.getContainerID()); - assertEquals(2L, containerMetadata.getNumberOfKeys()); - - // test if limit works as expected - response = containerKeyService.getContainers(1, 0L); - responseObject = (ContainersResponse) response.getEntity(); - data = responseObject.getContainersResponseData(); - containers = new ArrayList<>(data.getContainers()); - assertEquals(1, containers.size()); - assertEquals(2, data.getTotalCount()); - } - - @Test - public void testGetContainersWithPrevKey() { - - Response response = containerKeyService.getContainers(1, 1L); - - ContainersResponse responseObject = - (ContainersResponse) response.getEntity(); - - ContainersResponse.ContainersResponseData data = - responseObject.getContainersResponseData(); - assertEquals(2, data.getTotalCount()); - - List containers = new ArrayList<>(data.getContainers()); - - Iterator iterator = containers.iterator(); - - ContainerMetadata containerMetadata = iterator.next(); - - assertEquals(1, containers.size()); - assertEquals(2L, containerMetadata.getContainerID()); - - response = containerKeyService.getContainers(-1, 0L); - responseObject = (ContainersResponse) response.getEntity(); - data = responseObject.getContainersResponseData(); - containers = new ArrayList<>(data.getContainers()); - assertEquals(2, containers.size()); - assertEquals(2, data.getTotalCount()); - iterator = containers.iterator(); - containerMetadata = iterator.next(); - assertEquals(1L, containerMetadata.getContainerID()); - - // test for negative cases - response = containerKeyService.getContainers(-1, 5L); - responseObject = (ContainersResponse) response.getEntity(); - data = responseObject.getContainersResponseData(); - containers = new ArrayList<>(data.getContainers()); - assertEquals(0, containers.size()); - assertEquals(2, data.getTotalCount()); - - response = containerKeyService.getContainers(-1, -1L); - responseObject = (ContainersResponse) response.getEntity(); - data = responseObject.getContainersResponseData(); - containers = new ArrayList<>(data.getContainers()); - assertEquals(2, containers.size()); - assertEquals(2, data.getTotalCount()); - } - - private OzoneManagerServiceProviderImpl getMockOzoneManagerServiceProvider() { - OzoneManagerServiceProviderImpl omServiceProviderMock = - mock(OzoneManagerServiceProviderImpl.class); - return omServiceProviderMock; - } -} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestUtilizationService.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestUtilizationService.java deleted file mode 100644 index a3265b82a6d..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestUtilizationService.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.api; - -import org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao; -import org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize; -import org.junit.Test; - -import javax.ws.rs.core.Response; -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -/** - * Test for File size count service. - */ -public class TestUtilizationService { - private UtilizationService utilizationService; - private int maxBinSize = 42; - - private List setUpResultList() { - List resultList = new ArrayList<>(); - for (int i = 0; i < maxBinSize; i++) { - if (i == maxBinSize - 1) { - // for last bin file count is 41. - resultList.add(new FileCountBySize(Long.MAX_VALUE, (long) i)); - } else { - // count of files of upperBound is equal to it's index. - resultList.add(new FileCountBySize((long) Math.pow(2, (10+i)), - (long) i)); - } - } - return resultList; - } - - @Test - public void testGetFileCounts() { - List resultList = setUpResultList(); - - FileCountBySizeDao fileCountBySizeDao = mock(FileCountBySizeDao.class); - utilizationService = mock(UtilizationService.class); - when(utilizationService.getFileCounts()).thenCallRealMethod(); - when(utilizationService.getDao()).thenReturn(fileCountBySizeDao); - when(fileCountBySizeDao.findAll()).thenReturn(resultList); - - Response response = utilizationService.getFileCounts(); - // get result list from Response entity - List responseList = - (List) response.getEntity(); - - verify(fileCountBySizeDao, times(1)).findAll(); - assertEquals(maxBinSize, responseList.size()); - - assertEquals(resultList, responseList); - } -} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/package-info.java deleted file mode 100644 index faf2658a79c..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * The classes in this package test the Rest API layer of Recon. - */ -package org.apache.hadoop.ozone.recon.api; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/package-info.java deleted file mode 100644 index d0066a38554..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Package for recon server tests. - */ -package org.apache.hadoop.ozone.recon; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractSqlDatabaseTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractSqlDatabaseTest.java deleted file mode 100644 index 898dd19967f..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractSqlDatabaseTest.java +++ /dev/null @@ -1,161 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon.persistence; - -import java.io.File; -import java.io.IOException; - -import javax.sql.DataSource; - -import org.jooq.DSLContext; -import org.jooq.SQLDialect; -import org.jooq.impl.DSL; -import org.jooq.impl.DefaultConfiguration; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.rules.TemporaryFolder; - -import com.google.inject.AbstractModule; -import com.google.inject.Guice; -import com.google.inject.Injector; -import com.google.inject.Provider; - -/** - * Create an injector for tests that need to access the SQl database. - */ -public abstract class AbstractSqlDatabaseTest { - - @ClassRule - public static TemporaryFolder temporaryFolder = new TemporaryFolder(); - - private static Injector injector; - private static DSLContext dslContext; - - @BeforeClass - public static void setup() throws IOException { - File tempDir = temporaryFolder.newFolder(); - - DataSourceConfigurationProvider configurationProvider = - new DataSourceConfigurationProvider(tempDir); - - JooqPersistenceModule persistenceModule = - new JooqPersistenceModule(configurationProvider); - - injector = Guice.createInjector(persistenceModule, new AbstractModule() { - @Override - public void configure() { - bind(DataSourceConfiguration.class).toProvider(configurationProvider); - } - }); - dslContext = DSL.using(new DefaultConfiguration().set( - injector.getInstance(DataSource.class))); - } - - @AfterClass - public static void tearDown() { - temporaryFolder.delete(); - } - - protected Injector getInjector() { - return injector; - } - - protected DSLContext getDslContext() { - return dslContext; - } - - /** - * Local Sqlite datasource provider. - */ - public static class DataSourceConfigurationProvider implements - Provider { - - private final File tempDir; - - public DataSourceConfigurationProvider(File tempDir) { - this.tempDir = tempDir; - } - - @Override - public DataSourceConfiguration get() { - return new DataSourceConfiguration() { - @Override - public String getDriverClass() { - return "org.sqlite.JDBC"; - } - - @Override - public String getJdbcUrl() { - return "jdbc:sqlite:" + tempDir.getAbsolutePath() + - File.separator + "sqlite_recon.db"; - } - - @Override - public String getUserName() { - return null; - } - - @Override - public String getPassword() { - return null; - } - - @Override - public boolean setAutoCommit() { - return true; - } - - @Override - public long getConnectionTimeout() { - return 10000; - } - - @Override - public String getSqlDialect() { - return SQLDialect.SQLITE.toString(); - } - - @Override - public Integer getMaxActiveConnections() { - return 2; - } - - @Override - public Integer getMaxConnectionAge() { - return 120; - } - - @Override - public Integer getMaxIdleConnectionAge() { - return 120; - } - - @Override - public String getConnectionTestStatement() { - return "SELECT 1"; - } - - @Override - public Integer getIdleConnectionTestPeriod() { - return 30; - } - }; - } - } -} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java deleted file mode 100644 index 150007ef58b..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java +++ /dev/null @@ -1,143 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.persistence; - -import static org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition.RECON_TASK_STATUS_TABLE_NAME; - -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.ResultSet; -import java.sql.Types; -import java.util.ArrayList; -import java.util.List; - -import javax.sql.DataSource; - -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.commons.lang3.tuple.Pair; -import org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition; -import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; -import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; -import org.jooq.Configuration; -import org.junit.Assert; -import org.junit.Test; - -/** - * Class used to test ReconInternalSchemaDefinition. - */ -public class TestReconInternalSchemaDefinition extends AbstractSqlDatabaseTest { - - @Test - public void testSchemaCreated() throws Exception { - ReconInternalSchemaDefinition schemaDefinition = getInjector().getInstance( - ReconInternalSchemaDefinition.class); - - schemaDefinition.initializeSchema(); - - Connection connection = - getInjector().getInstance(DataSource.class).getConnection(); - // Verify table definition - DatabaseMetaData metaData = connection.getMetaData(); - ResultSet resultSet = metaData.getColumns(null, null, - RECON_TASK_STATUS_TABLE_NAME, null); - - List> expectedPairs = new ArrayList<>(); - - expectedPairs.add(new ImmutablePair<>("task_name", Types.VARCHAR)); - expectedPairs.add(new ImmutablePair<>("last_updated_timestamp", - Types.INTEGER)); - expectedPairs.add(new ImmutablePair<>("last_updated_seq_number", - Types.INTEGER)); - - List> actualPairs = new ArrayList<>(); - - while (resultSet.next()) { - actualPairs.add(new ImmutablePair<>( - resultSet.getString("COLUMN_NAME"), - resultSet.getInt("DATA_TYPE"))); - } - - Assert.assertEquals(3, actualPairs.size()); - Assert.assertEquals(expectedPairs, actualPairs); - } - - @Test - public void testReconTaskStatusCRUDOperations() throws Exception { - // Verify table exists - ReconInternalSchemaDefinition schemaDefinition = getInjector().getInstance( - ReconInternalSchemaDefinition.class); - - schemaDefinition.initializeSchema(); - - DataSource ds = getInjector().getInstance(DataSource.class); - Connection connection = ds.getConnection(); - - DatabaseMetaData metaData = connection.getMetaData(); - ResultSet resultSet = metaData.getTables(null, null, - RECON_TASK_STATUS_TABLE_NAME, null); - - while (resultSet.next()) { - Assert.assertEquals(RECON_TASK_STATUS_TABLE_NAME, - resultSet.getString("TABLE_NAME")); - } - - ReconTaskStatusDao dao = new ReconTaskStatusDao(getInjector().getInstance( - Configuration.class)); - - long now = System.currentTimeMillis(); - ReconTaskStatus newRecord = new ReconTaskStatus(); - newRecord.setTaskName("HelloWorldTask"); - newRecord.setLastUpdatedTimestamp(now); - newRecord.setLastUpdatedSeqNumber(100L); - - // Create - dao.insert(newRecord); - - ReconTaskStatus newRecord2 = new ReconTaskStatus(); - newRecord2.setTaskName("GoodbyeWorldTask"); - newRecord2.setLastUpdatedTimestamp(now); - newRecord2.setLastUpdatedSeqNumber(200L); - // Create - dao.insert(newRecord2); - - // Read - ReconTaskStatus dbRecord = dao.findById("HelloWorldTask"); - - Assert.assertEquals("HelloWorldTask", dbRecord.getTaskName()); - Assert.assertEquals(Long.valueOf(now), dbRecord.getLastUpdatedTimestamp()); - Assert.assertEquals(Long.valueOf(100), dbRecord.getLastUpdatedSeqNumber()); - - // Update - dbRecord.setLastUpdatedSeqNumber(150L); - dao.update(dbRecord); - - // Read updated - dbRecord = dao.findById("HelloWorldTask"); - Assert.assertEquals(Long.valueOf(150), dbRecord.getLastUpdatedSeqNumber()); - - // Delete - dao.deleteById("GoodbyeWorldTask"); - - // Verify - dbRecord = dao.findById("GoodbyeWorldTask"); - - Assert.assertNull(dbRecord); - } - -} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java deleted file mode 100644 index 864e59e2bff..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java +++ /dev/null @@ -1,147 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon.persistence; - -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.commons.lang3.tuple.Pair; -import org.hadoop.ozone.recon.schema.StatsSchemaDefinition; -import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; -import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; -import org.jooq.Configuration; -import org.junit.Assert; -import org.junit.Test; - -import javax.sql.DataSource; -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.ResultSet; -import java.sql.Timestamp; -import java.sql.Types; -import java.util.ArrayList; -import java.util.List; - -import static org.hadoop.ozone.recon.schema.StatsSchemaDefinition.GLOBAL_STATS_TABLE_NAME; - -/** - * Class used to test StatsSchemaDefinition. - */ -public class TestStatsSchemaDefinition extends AbstractSqlDatabaseTest { - - @Test - public void testIfStatsSchemaCreated() throws Exception { - StatsSchemaDefinition schemaDefinition = getInjector().getInstance( - StatsSchemaDefinition.class); - - schemaDefinition.initializeSchema(); - - Connection connection = - getInjector().getInstance(DataSource.class).getConnection(); - // Verify table definition - DatabaseMetaData metaData = connection.getMetaData(); - ResultSet resultSet = metaData.getColumns(null, null, - GLOBAL_STATS_TABLE_NAME, null); - - List> expectedPairs = new ArrayList<>(); - - expectedPairs.add(new ImmutablePair<>("key", Types.VARCHAR)); - expectedPairs.add(new ImmutablePair<>("value", Types.INTEGER)); - expectedPairs.add(new ImmutablePair<>("last_updated_timestamp", - Types.VARCHAR)); - - List> actualPairs = new ArrayList<>(); - - while (resultSet.next()) { - actualPairs.add(new ImmutablePair<>(resultSet.getString("COLUMN_NAME"), - resultSet.getInt("DATA_TYPE"))); - } - - Assert.assertEquals(3, actualPairs.size()); - Assert.assertEquals(expectedPairs, actualPairs); - } - - @Test - public void testGlobalStatsCRUDOperations() throws Exception { - // Verify table exists - StatsSchemaDefinition schemaDefinition = getInjector().getInstance( - StatsSchemaDefinition.class); - - schemaDefinition.initializeSchema(); - - DataSource ds = getInjector().getInstance(DataSource.class); - Connection connection = ds.getConnection(); - - DatabaseMetaData metaData = connection.getMetaData(); - ResultSet resultSet = metaData.getTables(null, null, - GLOBAL_STATS_TABLE_NAME, null); - - while (resultSet.next()) { - Assert.assertEquals(GLOBAL_STATS_TABLE_NAME, - resultSet.getString("TABLE_NAME")); - } - - GlobalStatsDao dao = new GlobalStatsDao( - getInjector().getInstance(Configuration.class)); - - long now = System.currentTimeMillis(); - GlobalStats newRecord = new GlobalStats(); - newRecord.setLastUpdatedTimestamp(new Timestamp(now)); - newRecord.setKey("key1"); - newRecord.setValue(500L); - - // Create - dao.insert(newRecord); - GlobalStats newRecord2 = new GlobalStats(); - newRecord2.setLastUpdatedTimestamp(new Timestamp(now + 1000L)); - newRecord2.setKey("key2"); - newRecord2.setValue(10L); - dao.insert(newRecord2); - - // Read - GlobalStats dbRecord = dao.findById("key1"); - - Assert.assertEquals("key1", dbRecord.getKey()); - Assert.assertEquals(Long.valueOf(500), dbRecord.getValue()); - Assert.assertEquals(new Timestamp(now), dbRecord.getLastUpdatedTimestamp()); - - dbRecord = dao.findById("key2"); - Assert.assertEquals("key2", dbRecord.getKey()); - Assert.assertEquals(Long.valueOf(10), dbRecord.getValue()); - Assert.assertEquals(new Timestamp(now + 1000L), - dbRecord.getLastUpdatedTimestamp()); - - // Update - dbRecord.setValue(100L); - dbRecord.setLastUpdatedTimestamp(new Timestamp(now + 2000L)); - dao.update(dbRecord); - - // Read updated - dbRecord = dao.findById("key2"); - - Assert.assertEquals(new Timestamp(now + 2000L), - dbRecord.getLastUpdatedTimestamp()); - Assert.assertEquals(Long.valueOf(100L), dbRecord.getValue()); - - // Delete - dao.deleteById("key1"); - - // Verify - dbRecord = dao.findById("key1"); - - Assert.assertNull(dbRecord); - } -} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java deleted file mode 100644 index 22cc55b3aea..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java +++ /dev/null @@ -1,234 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon.persistence; - -import static org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition.CLUSTER_GROWTH_DAILY_TABLE_NAME; -import static org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition.FILE_COUNT_BY_SIZE_TABLE_NAME; -import static org.hadoop.ozone.recon.schema.tables.ClusterGrowthDailyTable.CLUSTER_GROWTH_DAILY; -import static org.junit.Assert.assertEquals; - -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.sql.Types; -import java.util.ArrayList; -import java.util.List; - -import javax.sql.DataSource; - -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.commons.lang3.tuple.Pair; -import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; -import org.hadoop.ozone.recon.schema.tables.daos.ClusterGrowthDailyDao; -import org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao; -import org.hadoop.ozone.recon.schema.tables.pojos.ClusterGrowthDaily; -import org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize; -import org.hadoop.ozone.recon.schema.tables.records.FileCountBySizeRecord; -import org.jooq.Configuration; -import org.jooq.Table; -import org.jooq.UniqueKey; -import org.junit.Assert; -import org.junit.Test; - -/** - * Test persistence module provides connection and transaction awareness. - */ -public class TestUtilizationSchemaDefinition extends AbstractSqlDatabaseTest { - - @Test - public void testReconSchemaCreated() throws Exception { - UtilizationSchemaDefinition schemaDefinition = getInjector().getInstance( - UtilizationSchemaDefinition.class); - - schemaDefinition.initializeSchema(); - - Connection connection = - getInjector().getInstance(DataSource.class).getConnection(); - // Verify table definition - DatabaseMetaData metaData = connection.getMetaData(); - ResultSet resultSet = metaData.getColumns(null, null, - CLUSTER_GROWTH_DAILY_TABLE_NAME, null); - - List> expectedPairs = new ArrayList<>(); - - expectedPairs.add(new ImmutablePair<>("timestamp", Types.VARCHAR)); - expectedPairs.add(new ImmutablePair<>("datanode_id", Types.INTEGER)); - expectedPairs.add(new ImmutablePair<>("datanode_host", Types.VARCHAR)); - expectedPairs.add(new ImmutablePair<>("rack_id", Types.VARCHAR)); - expectedPairs.add(new ImmutablePair<>("available_size", Types.INTEGER)); - expectedPairs.add(new ImmutablePair<>("used_size", Types.INTEGER)); - expectedPairs.add(new ImmutablePair<>("container_count", Types.INTEGER)); - expectedPairs.add(new ImmutablePair<>("block_count", Types.INTEGER)); - - List> actualPairs = new ArrayList<>(); - - while (resultSet.next()) { - actualPairs.add(new ImmutablePair<>(resultSet.getString("COLUMN_NAME"), - resultSet.getInt("DATA_TYPE"))); - } - - Assert.assertEquals(8, actualPairs.size()); - Assert.assertEquals(expectedPairs, actualPairs); - - ResultSet resultSetFileCount = metaData.getColumns(null, null, - FILE_COUNT_BY_SIZE_TABLE_NAME, null); - - List> expectedPairsFileCount = new ArrayList<>(); - expectedPairsFileCount.add( - new ImmutablePair<>("file_size", Types.INTEGER)); - expectedPairsFileCount.add( - new ImmutablePair<>("count", Types.INTEGER)); - - List> actualPairsFileCount = new ArrayList<>(); - while(resultSetFileCount.next()) { - actualPairsFileCount.add(new ImmutablePair<>(resultSetFileCount.getString( - "COLUMN_NAME"), resultSetFileCount.getInt( - "DATA_TYPE"))); - } - assertEquals("Unexpected number of columns", - 2, actualPairsFileCount.size()); - assertEquals("Columns Do not Match ", - expectedPairsFileCount, actualPairsFileCount); - } - - @Test - public void testClusterGrowthDailyCRUDOperations() throws Exception { - // Verify table exists - UtilizationSchemaDefinition schemaDefinition = getInjector().getInstance( - UtilizationSchemaDefinition.class); - schemaDefinition.initializeSchema(); - - DataSource ds = getInjector().getInstance(DataSource.class); - Connection connection = ds.getConnection(); - - DatabaseMetaData metaData = connection.getMetaData(); - ResultSet resultSet = metaData.getTables(null, null, - CLUSTER_GROWTH_DAILY_TABLE_NAME, null); - - while (resultSet.next()) { - Assert.assertEquals(CLUSTER_GROWTH_DAILY_TABLE_NAME, - resultSet.getString("TABLE_NAME")); - } - - ClusterGrowthDailyDao dao = new ClusterGrowthDailyDao( - getInjector().getInstance(Configuration.class)); - - long now = System.currentTimeMillis(); - ClusterGrowthDaily newRecord = new ClusterGrowthDaily(); - newRecord.setTimestamp(new Timestamp(now)); - newRecord.setDatanodeId(10); - newRecord.setDatanodeHost("host1"); - newRecord.setRackId("rack1"); - newRecord.setAvailableSize(1024L); - newRecord.setUsedSize(512L); - newRecord.setContainerCount(10); - newRecord.setBlockCount(25); - - // Create - dao.insert(newRecord); - - // Read - ClusterGrowthDaily dbRecord = - dao.findById(getDslContext().newRecord(CLUSTER_GROWTH_DAILY.TIMESTAMP, - CLUSTER_GROWTH_DAILY.DATANODE_ID) - .value1(new Timestamp(now)).value2(10)); - - Assert.assertEquals("host1", dbRecord.getDatanodeHost()); - Assert.assertEquals("rack1", dbRecord.getRackId()); - Assert.assertEquals(Long.valueOf(1024), dbRecord.getAvailableSize()); - Assert.assertEquals(Long.valueOf(512), dbRecord.getUsedSize()); - Assert.assertEquals(Integer.valueOf(10), dbRecord.getContainerCount()); - Assert.assertEquals(Integer.valueOf(25), dbRecord.getBlockCount()); - - // Update - dbRecord.setUsedSize(700L); - dbRecord.setBlockCount(30); - dao.update(dbRecord); - - // Read updated - dbRecord = - dao.findById(getDslContext().newRecord(CLUSTER_GROWTH_DAILY.TIMESTAMP, - CLUSTER_GROWTH_DAILY.DATANODE_ID) - .value1(new Timestamp(now)).value2(10)); - - Assert.assertEquals(Long.valueOf(700), dbRecord.getUsedSize()); - Assert.assertEquals(Integer.valueOf(30), dbRecord.getBlockCount()); - - // Delete - dao.deleteById(getDslContext().newRecord(CLUSTER_GROWTH_DAILY.TIMESTAMP, - CLUSTER_GROWTH_DAILY.DATANODE_ID) - .value1(new Timestamp(now)).value2(10)); - - // Verify - dbRecord = - dao.findById(getDslContext().newRecord(CLUSTER_GROWTH_DAILY.TIMESTAMP, - CLUSTER_GROWTH_DAILY.DATANODE_ID) - .value1(new Timestamp(now)).value2(10)); - - Assert.assertNull(dbRecord); - } - - @Test - public void testFileCountBySizeCRUDOperations() throws SQLException { - UtilizationSchemaDefinition schemaDefinition = getInjector().getInstance( - UtilizationSchemaDefinition.class); - schemaDefinition.initializeSchema(); - - DataSource ds = getInjector().getInstance(DataSource.class); - Connection connection = ds.getConnection(); - - DatabaseMetaData metaData = connection.getMetaData(); - ResultSet resultSet = metaData.getTables(null, null, - FILE_COUNT_BY_SIZE_TABLE_NAME, null); - - while (resultSet.next()) { - Assert.assertEquals(FILE_COUNT_BY_SIZE_TABLE_NAME, - resultSet.getString("TABLE_NAME")); - } - - FileCountBySizeDao fileCountBySizeDao = new FileCountBySizeDao( - getInjector().getInstance(Configuration.class)); - - FileCountBySize newRecord = new FileCountBySize(); - newRecord.setFileSize(1024L); - newRecord.setCount(1L); - - fileCountBySizeDao.insert(newRecord); - - FileCountBySize dbRecord = fileCountBySizeDao.findById(1024L); - assertEquals(Long.valueOf(1), dbRecord.getCount()); - - dbRecord.setCount(2L); - fileCountBySizeDao.update(dbRecord); - - dbRecord = fileCountBySizeDao.findById(1024L); - assertEquals(Long.valueOf(2), dbRecord.getCount()); - - - - Table fileCountBySizeRecordTable = - fileCountBySizeDao.getTable(); - List> tableKeys = - fileCountBySizeRecordTable.getKeys(); - for (UniqueKey key : tableKeys) { - String name = key.getName(); - } - } -} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/package-info.java deleted file mode 100644 index 63b8505d0bd..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * End to end tests for persistence classes. - */ -package org.apache.hadoop.ozone.recon.persistence; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java deleted file mode 100644 index a9e6aea42c7..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/TestReconOmMetadataManagerImpl.java +++ /dev/null @@ -1,187 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.recovery; - -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; - -import java.io.File; -import java.io.IOException; - -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.hdds.utils.db.DBCheckpoint; -import org.apache.hadoop.ozone.recon.ReconUtils; -import org.junit.Assert; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -/** - * Test Recon OM Metadata Manager implementation. - */ -public class TestReconOmMetadataManagerImpl { - - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - - @Test - public void testStart() throws Exception { - - OMMetadataManager omMetadataManager = getOMMetadataManager(); - - //Take checkpoint of the above OM DB. - DBCheckpoint checkpoint = omMetadataManager.getStore() - .getCheckpoint(true); - File snapshotFile = new File( - checkpoint.getCheckpointLocation().getParent() + "/" + - "om.snapshot.db_" + System.currentTimeMillis()); - checkpoint.getCheckpointLocation().toFile().renameTo(snapshotFile); - - //Create new Recon OM Metadata manager instance. - File reconOmDbDir = temporaryFolder.newFolder(); - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, reconOmDbDir - .getAbsolutePath()); - FileUtils.copyDirectory(snapshotFile.getParentFile(), reconOmDbDir); - - ReconOMMetadataManager reconOMMetadataManager = - new ReconOmMetadataManagerImpl(configuration, new ReconUtils()); - reconOMMetadataManager.start(configuration); - - Assert.assertNotNull(reconOMMetadataManager.getBucketTable()); - Assert.assertNotNull(reconOMMetadataManager.getVolumeTable() - .get("/sampleVol")); - Assert.assertNotNull(reconOMMetadataManager.getBucketTable() - .get("/sampleVol/bucketOne")); - Assert.assertNotNull(reconOMMetadataManager.getKeyTable() - .get("/sampleVol/bucketOne/key_one")); - Assert.assertNotNull(reconOMMetadataManager.getKeyTable() - .get("/sampleVol/bucketOne/key_two")); - } - - @Test - public void testUpdateOmDB() throws Exception { - - OMMetadataManager omMetadataManager = getOMMetadataManager(); - //Make sure OM Metadata reflects the keys that were inserted. - Assert.assertNotNull(omMetadataManager.getKeyTable() - .get("/sampleVol/bucketOne/key_one")); - Assert.assertNotNull(omMetadataManager.getKeyTable() - .get("/sampleVol/bucketOne/key_two")); - - //Take checkpoint of OM DB. - DBCheckpoint checkpoint = omMetadataManager.getStore() - .getCheckpoint(true); - Assert.assertNotNull(checkpoint.getCheckpointLocation()); - - //Create new Recon OM Metadata manager instance. - File reconOmDbDir = temporaryFolder.newFolder(); - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, reconOmDbDir - .getAbsolutePath()); - ReconOMMetadataManager reconOMMetadataManager = - new ReconOmMetadataManagerImpl(configuration, new ReconUtils()); - reconOMMetadataManager.start(configuration); - - //Before accepting a snapshot, the metadata should have null tables. - Assert.assertNull(reconOMMetadataManager.getBucketTable()); - - //Update Recon OM DB with the OM DB checkpoint location. - reconOMMetadataManager.updateOmDB( - checkpoint.getCheckpointLocation().toFile()); - - //Now, the tables should have been initialized. - Assert.assertNotNull(reconOMMetadataManager.getBucketTable()); - - // Check volume and bucket entries. - Assert.assertNotNull(reconOMMetadataManager.getVolumeTable() - .get("/sampleVol")); - Assert.assertNotNull(reconOMMetadataManager.getBucketTable() - .get("/sampleVol/bucketOne")); - - //Verify Keys inserted in OM DB are available in Recon OM DB. - Assert.assertNotNull(reconOMMetadataManager.getKeyTable() - .get("/sampleVol/bucketOne/key_one")); - Assert.assertNotNull(reconOMMetadataManager.getKeyTable() - .get("/sampleVol/bucketOne/key_two")); - - } - - /** - * Get test OM metadata manager. - * @return OMMetadataManager instance - * @throws IOException - */ - private OMMetadataManager getOMMetadataManager() throws IOException { - //Create a new OM Metadata Manager instance + DB. - File omDbDir = temporaryFolder.newFolder(); - OzoneConfiguration omConfiguration = new OzoneConfiguration(); - omConfiguration.set(OZONE_OM_DB_DIRS, - omDbDir.getAbsolutePath()); - OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( - omConfiguration); - - //Create a volume + bucket + 2 keys. - String volumeKey = omMetadataManager.getVolumeKey("sampleVol"); - OmVolumeArgs args = - OmVolumeArgs.newBuilder() - .setVolume("sampleVol") - .setAdminName("TestUser") - .setOwnerName("TestUser") - .build(); - omMetadataManager.getVolumeTable().put(volumeKey, args); - - OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("sampleVol") - .setBucketName("bucketOne") - .build(); - - String bucketKey = - omMetadataManager.getBucketKey(bucketInfo.getVolumeName(), - bucketInfo.getBucketName()); - omMetadataManager.getBucketTable().put(bucketKey, bucketInfo); - - - omMetadataManager.getKeyTable().put("/sampleVol/bucketOne/key_one", - new OmKeyInfo.Builder() - .setBucketName("bucketOne") - .setVolumeName("sampleVol") - .setKeyName("key_one") - .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) - .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) - .build()); - omMetadataManager.getKeyTable().put("/sampleVol/bucketOne/key_two", - new OmKeyInfo.Builder() - .setBucketName("bucketOne") - .setVolumeName("sampleVol") - .setKeyName("key_two") - .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) - .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) - .build()); - - return omMetadataManager; - } -} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/package-info.java deleted file mode 100644 index c3b0b343389..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/recovery/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Package for recon server - OM service specific tests. - */ -package org.apache.hadoop.ozone.recon.recovery; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java deleted file mode 100644 index 2392f8ab4de..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java +++ /dev/null @@ -1,405 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.spi.impl; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.util.HashMap; -import java.util.Map; - -import org.apache.hadoop.ozone.recon.GuiceInjectorUtilsForTestsImpl; -import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; -import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; -import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider; -import org.hadoop.ozone.recon.schema.StatsSchemaDefinition; -import org.jooq.impl.DSL; -import org.jooq.impl.DefaultConfiguration; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import com.google.inject.Injector; - -import javax.sql.DataSource; - -/** - * Unit Tests for ContainerDBServiceProviderImpl. - */ -public class TestContainerDBServiceProviderImpl { - - @ClassRule - public static TemporaryFolder tempFolder = new TemporaryFolder(); - private static ContainerDBServiceProvider containerDbServiceProvider; - private static Injector injector; - private static GuiceInjectorUtilsForTestsImpl guiceInjectorTest = - new GuiceInjectorUtilsForTestsImpl(); - - private String keyPrefix1 = "V3/B1/K1"; - private String keyPrefix2 = "V3/B1/K2"; - private String keyPrefix3 = "V3/B2/K1"; - - private void populateKeysInContainers(long containerId1, long containerId2) - throws Exception { - - ContainerKeyPrefix containerKeyPrefix1 = new - ContainerKeyPrefix(containerId1, keyPrefix1, 0); - containerDbServiceProvider.storeContainerKeyMapping(containerKeyPrefix1, - 1); - - ContainerKeyPrefix containerKeyPrefix2 = new ContainerKeyPrefix( - containerId1, keyPrefix2, 0); - containerDbServiceProvider.storeContainerKeyMapping(containerKeyPrefix2, - 2); - - ContainerKeyPrefix containerKeyPrefix3 = new ContainerKeyPrefix( - containerId2, keyPrefix3, 0); - - containerDbServiceProvider.storeContainerKeyMapping(containerKeyPrefix3, - 3); - } - - private static void initializeInjector() throws Exception { - injector = guiceInjectorTest.getInjector( - null, null, tempFolder); - } - - @BeforeClass - public static void setupOnce() throws Exception { - - initializeInjector(); - - DSL.using(new DefaultConfiguration().set( - injector.getInstance(DataSource.class))); - - containerDbServiceProvider = injector.getInstance( - ContainerDBServiceProvider.class); - - StatsSchemaDefinition schemaDefinition = injector.getInstance( - StatsSchemaDefinition.class); - schemaDefinition.initializeSchema(); - } - - @Before - public void setUp() throws Exception { - // Reset containerDB before running each test - containerDbServiceProvider.initNewContainerDB(null); - } - - @Test - public void testInitNewContainerDB() throws Exception { - long containerId = System.currentTimeMillis(); - Map prefixCounts = new HashMap<>(); - - ContainerKeyPrefix ckp1 = new ContainerKeyPrefix(containerId, - "V1/B1/K1", 0); - prefixCounts.put(ckp1, 1); - - ContainerKeyPrefix ckp2 = new ContainerKeyPrefix(containerId, - "V1/B1/K2", 0); - prefixCounts.put(ckp2, 2); - - ContainerKeyPrefix ckp3 = new ContainerKeyPrefix(containerId, - "V1/B2/K3", 0); - prefixCounts.put(ckp3, 3); - - for (ContainerKeyPrefix prefix : prefixCounts.keySet()) { - containerDbServiceProvider.storeContainerKeyMapping( - prefix, prefixCounts.get(prefix)); - } - - assertEquals(1, containerDbServiceProvider - .getCountForContainerKeyPrefix(ckp1).intValue()); - - prefixCounts.clear(); - prefixCounts.put(ckp2, 12); - prefixCounts.put(ckp3, 13); - ContainerKeyPrefix ckp4 = new ContainerKeyPrefix(containerId, - "V1/B3/K1", 0); - prefixCounts.put(ckp4, 14); - ContainerKeyPrefix ckp5 = new ContainerKeyPrefix(containerId, - "V1/B3/K2", 0); - prefixCounts.put(ckp5, 15); - - containerDbServiceProvider.initNewContainerDB(prefixCounts); - Map keyPrefixesForContainer = - containerDbServiceProvider.getKeyPrefixesForContainer(containerId); - - assertEquals(4, keyPrefixesForContainer.size()); - assertEquals(12, keyPrefixesForContainer.get(ckp2).intValue()); - assertEquals(13, keyPrefixesForContainer.get(ckp3).intValue()); - assertEquals(14, keyPrefixesForContainer.get(ckp4).intValue()); - assertEquals(15, keyPrefixesForContainer.get(ckp5).intValue()); - - assertEquals(0, containerDbServiceProvider - .getCountForContainerKeyPrefix(ckp1).intValue()); - } - - @Test - public void testStoreContainerKeyMapping() throws Exception { - - long containerId = System.currentTimeMillis(); - Map prefixCounts = new HashMap<>(); - prefixCounts.put(keyPrefix1, 1); - prefixCounts.put(keyPrefix2, 2); - prefixCounts.put(keyPrefix3, 3); - - for (String prefix : prefixCounts.keySet()) { - ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix( - containerId, prefix, 0); - containerDbServiceProvider.storeContainerKeyMapping( - containerKeyPrefix, prefixCounts.get(prefix)); - } - - Assert.assertEquals(1, - containerDbServiceProvider.getCountForContainerKeyPrefix( - new ContainerKeyPrefix(containerId, keyPrefix1, - 0)).longValue()); - Assert.assertEquals(2, - containerDbServiceProvider.getCountForContainerKeyPrefix( - new ContainerKeyPrefix(containerId, keyPrefix2, - 0)).longValue()); - Assert.assertEquals(3, - containerDbServiceProvider.getCountForContainerKeyPrefix( - new ContainerKeyPrefix(containerId, keyPrefix3, - 0)).longValue()); - } - - @Test - public void testStoreContainerKeyCount() throws Exception { - long containerId = 1L; - long nextContainerId = 2L; - containerDbServiceProvider.storeContainerKeyCount(containerId, 2L); - containerDbServiceProvider.storeContainerKeyCount(nextContainerId, 3L); - - assertEquals(2, - containerDbServiceProvider.getKeyCountForContainer(containerId)); - assertEquals(3, - containerDbServiceProvider.getKeyCountForContainer(nextContainerId)); - - containerDbServiceProvider.storeContainerKeyCount(containerId, 20L); - assertEquals(20, - containerDbServiceProvider.getKeyCountForContainer(containerId)); - } - - @Test - public void testGetKeyCountForContainer() throws Exception { - long containerId = 1L; - long nextContainerId = 2L; - containerDbServiceProvider.storeContainerKeyCount(containerId, 2L); - containerDbServiceProvider.storeContainerKeyCount(nextContainerId, 3L); - - assertEquals(2, - containerDbServiceProvider.getKeyCountForContainer(containerId)); - assertEquals(3, - containerDbServiceProvider.getKeyCountForContainer(nextContainerId)); - - assertEquals(0, - containerDbServiceProvider.getKeyCountForContainer(5L)); - } - - @Test - public void testDoesContainerExists() throws Exception { - long containerId = 1L; - long nextContainerId = 2L; - containerDbServiceProvider.storeContainerKeyCount(containerId, 2L); - containerDbServiceProvider.storeContainerKeyCount(nextContainerId, 3L); - - assertTrue(containerDbServiceProvider.doesContainerExists(containerId)); - assertTrue(containerDbServiceProvider.doesContainerExists(nextContainerId)); - assertFalse(containerDbServiceProvider.doesContainerExists(0L)); - assertFalse(containerDbServiceProvider.doesContainerExists(3L)); - } - - @Test - public void testGetCountForContainerKeyPrefix() throws Exception { - long containerId = System.currentTimeMillis(); - - containerDbServiceProvider.storeContainerKeyMapping(new - ContainerKeyPrefix(containerId, keyPrefix1), 2); - - Integer count = containerDbServiceProvider. - getCountForContainerKeyPrefix(new ContainerKeyPrefix(containerId, - keyPrefix1)); - assertEquals(2L, count.longValue()); - - count = containerDbServiceProvider. - getCountForContainerKeyPrefix(new ContainerKeyPrefix(containerId, - "invalid")); - assertEquals(0L, count.longValue()); - } - - @Test - public void testGetKeyPrefixesForContainer() throws Exception { - long containerId = 1L; - long nextContainerId = 2L; - populateKeysInContainers(containerId, nextContainerId); - - ContainerKeyPrefix containerKeyPrefix1 = new - ContainerKeyPrefix(containerId, keyPrefix1, 0); - ContainerKeyPrefix containerKeyPrefix2 = new ContainerKeyPrefix( - containerId, keyPrefix2, 0); - ContainerKeyPrefix containerKeyPrefix3 = new ContainerKeyPrefix( - nextContainerId, keyPrefix3, 0); - - - Map keyPrefixMap = - containerDbServiceProvider.getKeyPrefixesForContainer(containerId); - assertEquals(2, keyPrefixMap.size()); - - assertEquals(1, keyPrefixMap.get(containerKeyPrefix1).longValue()); - assertEquals(2, keyPrefixMap.get(containerKeyPrefix2).longValue()); - - keyPrefixMap = containerDbServiceProvider.getKeyPrefixesForContainer( - nextContainerId); - assertEquals(1, keyPrefixMap.size()); - assertEquals(3, keyPrefixMap.get(containerKeyPrefix3).longValue()); - } - - @Test - public void testGetKeyPrefixesForContainerWithKeyPrefix() throws Exception { - long containerId = 1L; - long nextContainerId = 2L; - populateKeysInContainers(containerId, nextContainerId); - - ContainerKeyPrefix containerKeyPrefix2 = new ContainerKeyPrefix( - containerId, keyPrefix2, 0); - - Map keyPrefixMap = - containerDbServiceProvider.getKeyPrefixesForContainer(containerId, - keyPrefix1); - assertEquals(1, keyPrefixMap.size()); - assertEquals(2, keyPrefixMap.get(containerKeyPrefix2).longValue()); - - keyPrefixMap = containerDbServiceProvider.getKeyPrefixesForContainer( - nextContainerId, keyPrefix3); - assertEquals(0, keyPrefixMap.size()); - - // test for negative cases - keyPrefixMap = containerDbServiceProvider.getKeyPrefixesForContainer( - containerId, "V3/B1/invalid"); - assertEquals(0, keyPrefixMap.size()); - - keyPrefixMap = containerDbServiceProvider.getKeyPrefixesForContainer( - containerId, keyPrefix3); - assertEquals(0, keyPrefixMap.size()); - - keyPrefixMap = containerDbServiceProvider.getKeyPrefixesForContainer( - 10L, ""); - assertEquals(0, keyPrefixMap.size()); - } - - @Test - public void testGetContainersWithPrevContainer() throws Exception { - long containerId = 1L; - long nextContainerId = 2L; - populateKeysInContainers(containerId, nextContainerId); - - Map containerMap = - containerDbServiceProvider.getContainers(-1, 0L); - assertEquals(2, containerMap.size()); - - assertEquals(3, containerMap.get(containerId).getNumberOfKeys()); - assertEquals(3, containerMap.get(nextContainerId).getNumberOfKeys()); - - // test if limit works - containerMap = containerDbServiceProvider.getContainers( - 1, 0L); - assertEquals(1, containerMap.size()); - assertNull(containerMap.get(nextContainerId)); - - // test for prev key - containerMap = containerDbServiceProvider.getContainers( - -1, containerId); - assertEquals(1, containerMap.size()); - // containerId must be skipped from containerMap result - assertNull(containerMap.get(containerId)); - - containerMap = containerDbServiceProvider.getContainers( - -1, nextContainerId); - assertEquals(0, containerMap.size()); - - // test for negative cases - containerMap = containerDbServiceProvider.getContainers( - -1, 10L); - assertEquals(0, containerMap.size()); - - containerMap = containerDbServiceProvider.getContainers( - 0, containerId); - assertEquals(0, containerMap.size()); - } - - @Test - public void testDeleteContainerMapping() throws Exception { - long containerId = 1L; - long nextContainerId = 2L; - populateKeysInContainers(containerId, nextContainerId); - - Map keyPrefixMap = - containerDbServiceProvider.getKeyPrefixesForContainer(containerId); - assertEquals(2, keyPrefixMap.size()); - - containerDbServiceProvider.deleteContainerMapping(new ContainerKeyPrefix( - containerId, keyPrefix2, 0)); - keyPrefixMap = - containerDbServiceProvider.getKeyPrefixesForContainer(containerId); - assertEquals(1, keyPrefixMap.size()); - } - - @Test - public void testGetCountForContainers() throws Exception { - - assertEquals(0, containerDbServiceProvider.getCountForContainers()); - - containerDbServiceProvider.storeContainerCount(5L); - - assertEquals(5L, containerDbServiceProvider.getCountForContainers()); - containerDbServiceProvider.incrementContainerCountBy(1L); - - assertEquals(6L, containerDbServiceProvider.getCountForContainers()); - - containerDbServiceProvider.storeContainerCount(10L); - assertEquals(10L, containerDbServiceProvider.getCountForContainers()); - } - - @Test - public void testStoreContainerCount() throws Exception { - containerDbServiceProvider.storeContainerCount(3L); - assertEquals(3L, containerDbServiceProvider.getCountForContainers()); - - containerDbServiceProvider.storeContainerCount(5L); - assertEquals(5L, containerDbServiceProvider.getCountForContainers()); - } - - @Test - public void testIncrementContainerCountBy() throws Exception { - assertEquals(0, containerDbServiceProvider.getCountForContainers()); - - containerDbServiceProvider.incrementContainerCountBy(1L); - assertEquals(1L, containerDbServiceProvider.getCountForContainers()); - - containerDbServiceProvider.incrementContainerCountBy(3L); - assertEquals(4L, containerDbServiceProvider.getCountForContainers()); - } -} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java deleted file mode 100644 index 63b414071d7..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java +++ /dev/null @@ -1,338 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.spi.impl; - -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; -import static org.apache.hadoop.ozone.recon.ReconUtils.createTarFile; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doCallRealMethod; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileWriter; -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Paths; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.recon.AbstractOMMetadataManagerTest; -import org.apache.hadoop.ozone.recon.ReconUtils; -import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.tasks.OMDBUpdatesHandler; -import org.apache.hadoop.ozone.recon.tasks.OMUpdateEventBatch; -import org.apache.hadoop.ozone.recon.tasks.ReconTaskController; -import org.apache.hadoop.hdds.utils.db.DBCheckpoint; -import org.apache.hadoop.hdds.utils.db.DBUpdatesWrapper; -import org.apache.hadoop.hdds.utils.db.RDBStore; -import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; -import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; -import org.rocksdb.RocksDB; -import org.rocksdb.TransactionLogIterator; -import org.rocksdb.WriteBatch; - -/** - * Class to test Ozone Manager Service Provider Implementation. - */ -public class TestOzoneManagerServiceProviderImpl extends - AbstractOMMetadataManagerTest { - - private OzoneConfiguration configuration; - private OzoneManagerProtocol ozoneManagerProtocol; - - @Before - public void setUp() throws Exception { - configuration = new OzoneConfiguration(); - configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, - temporaryFolder.newFolder().getAbsolutePath()); - configuration.set(OZONE_RECON_DB_DIR, - temporaryFolder.newFolder().getAbsolutePath()); - configuration.set("ozone.om.address", "localhost:9862"); - ozoneManagerProtocol = getMockOzoneManagerClient(new DBUpdatesWrapper()); - } - - @Test - public void testUpdateReconOmDBWithNewSnapshot() throws Exception { - - OMMetadataManager omMetadataManager = initializeNewOmMetadataManager(); - ReconOMMetadataManager reconOMMetadataManager = - getTestMetadataManager(omMetadataManager); - - writeDataToOm(omMetadataManager, "key_one"); - writeDataToOm(omMetadataManager, "key_two"); - - DBCheckpoint checkpoint = omMetadataManager.getStore() - .getCheckpoint(true); - File tarFile = createTarFile(checkpoint.getCheckpointLocation()); - InputStream inputStream = new FileInputStream(tarFile); - ReconUtils reconUtilsMock = getMockReconUtils(); - when(reconUtilsMock.makeHttpCall(any(), anyString())) - .thenReturn(inputStream); - - ReconTaskController reconTaskController = getMockTaskController(); - - OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = - new OzoneManagerServiceProviderImpl(configuration, - reconOMMetadataManager, reconTaskController, reconUtilsMock, - ozoneManagerProtocol); - - Assert.assertNull(reconOMMetadataManager.getKeyTable() - .get("/sampleVol/bucketOne/key_one")); - Assert.assertNull(reconOMMetadataManager.getKeyTable() - .get("/sampleVol/bucketOne/key_two")); - - assertTrue(ozoneManagerServiceProvider.updateReconOmDBWithNewSnapshot()); - - assertNotNull(reconOMMetadataManager.getKeyTable() - .get("/sampleVol/bucketOne/key_one")); - assertNotNull(reconOMMetadataManager.getKeyTable() - .get("/sampleVol/bucketOne/key_two")); - } - - @Test - public void testGetOzoneManagerDBSnapshot() throws Exception { - - File reconOmSnapshotDbDir = temporaryFolder.newFolder(); - - File checkpointDir = Paths.get(reconOmSnapshotDbDir.getAbsolutePath(), - "testGetOzoneManagerDBSnapshot").toFile(); - checkpointDir.mkdir(); - - File file1 = Paths.get(checkpointDir.getAbsolutePath(), "file1") - .toFile(); - String str = "File1 Contents"; - BufferedWriter writer = new BufferedWriter(new FileWriter( - file1.getAbsolutePath())); - writer.write(str); - writer.close(); - - File file2 = Paths.get(checkpointDir.getAbsolutePath(), "file2") - .toFile(); - str = "File2 Contents"; - writer = new BufferedWriter(new FileWriter(file2.getAbsolutePath())); - writer.write(str); - writer.close(); - - //Create test tar file. - File tarFile = createTarFile(checkpointDir.toPath()); - InputStream fileInputStream = new FileInputStream(tarFile); - ReconUtils reconUtilsMock = getMockReconUtils(); - when(reconUtilsMock.makeHttpCall(any(), anyString())) - .thenReturn(fileInputStream); - - ReconOMMetadataManager reconOMMetadataManager = - mock(ReconOMMetadataManager.class); - ReconTaskController reconTaskController = getMockTaskController(); - OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = - new OzoneManagerServiceProviderImpl(configuration, - reconOMMetadataManager, reconTaskController, reconUtilsMock, - ozoneManagerProtocol); - - DBCheckpoint checkpoint = ozoneManagerServiceProvider - .getOzoneManagerDBSnapshot(); - assertNotNull(checkpoint); - assertTrue(checkpoint.getCheckpointLocation().toFile().isDirectory()); - assertTrue(checkpoint.getCheckpointLocation().toFile() - .listFiles().length == 2); - } - - @Test - public void testGetAndApplyDeltaUpdatesFromOM() throws Exception { - - // Writing 2 Keys into a source OM DB and collecting it in a - // DBUpdatesWrapper. - OMMetadataManager sourceOMMetadataMgr = initializeNewOmMetadataManager(); - writeDataToOm(sourceOMMetadataMgr, "key_one"); - writeDataToOm(sourceOMMetadataMgr, "key_two"); - - RocksDB rocksDB = ((RDBStore)sourceOMMetadataMgr.getStore()).getDb(); - TransactionLogIterator transactionLogIterator = rocksDB.getUpdatesSince(0L); - DBUpdatesWrapper dbUpdatesWrapper = new DBUpdatesWrapper(); - while(transactionLogIterator.isValid()) { - TransactionLogIterator.BatchResult result = - transactionLogIterator.getBatch(); - result.writeBatch().markWalTerminationPoint(); - WriteBatch writeBatch = result.writeBatch(); - dbUpdatesWrapper.addWriteBatch(writeBatch.data(), - result.sequenceNumber()); - transactionLogIterator.next(); - } - - // OM Service Provider's Metadata Manager. - OMMetadataManager omMetadataManager = initializeNewOmMetadataManager(); - - OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = - new OzoneManagerServiceProviderImpl(configuration, - getTestMetadataManager(omMetadataManager), - getMockTaskController(), new ReconUtils(), - getMockOzoneManagerClient(dbUpdatesWrapper)); - - OMDBUpdatesHandler updatesHandler = - new OMDBUpdatesHandler(omMetadataManager); - ozoneManagerServiceProvider.getAndApplyDeltaUpdatesFromOM( - 0L, updatesHandler); - - // In this method, we have to assert the "GET" part and the "APPLY" path. - - // Assert GET path --> verify if the OMDBUpdatesHandler picked up the 4 - // events ( 1 Vol PUT + 1 Bucket PUT + 2 Key PUTs). - assertEquals(4, updatesHandler.getEvents().size()); - - // Assert APPLY path --> Verify if the OM service provider's RocksDB got - // the changes. - String fullKey = omMetadataManager.getOzoneKey("sampleVol", - "bucketOne", "key_one"); - assertTrue(ozoneManagerServiceProvider.getOMMetadataManagerInstance() - .getKeyTable().isExist(fullKey)); - fullKey = omMetadataManager.getOzoneKey("sampleVol", - "bucketOne", "key_two"); - assertTrue(ozoneManagerServiceProvider.getOMMetadataManagerInstance() - .getKeyTable().isExist(fullKey)); - } - - @Test - public void testSyncDataFromOMFullSnapshot() throws Exception { - - // Empty OM DB to start with. - ReconOMMetadataManager omMetadataManager = getTestMetadataManager( - initializeEmptyOmMetadataManager()); - ReconTaskStatusDao reconTaskStatusDaoMock = - mock(ReconTaskStatusDao.class); - doNothing().when(reconTaskStatusDaoMock) - .update(any(ReconTaskStatus.class)); - - ReconTaskController reconTaskControllerMock = getMockTaskController(); - when(reconTaskControllerMock.getReconTaskStatusDao()) - .thenReturn(reconTaskStatusDaoMock); - doNothing().when(reconTaskControllerMock) - .reInitializeTasks(omMetadataManager); - - OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = - new MockOzoneServiceProvider(configuration, omMetadataManager, - reconTaskControllerMock, new ReconUtils(), ozoneManagerProtocol); - - // Should trigger full snapshot request. - ozoneManagerServiceProvider.syncDataFromOM(); - - ArgumentCaptor captor = - ArgumentCaptor.forClass(ReconTaskStatus.class); - verify(reconTaskStatusDaoMock, times(1)) - .update(captor.capture()); - assertTrue(captor.getValue().getTaskName().equals("OM_DB_FULL_SNAPSHOT")); - verify(reconTaskControllerMock, times(1)) - .reInitializeTasks(omMetadataManager); - } - - @Test - public void testSyncDataFromOMDeltaUpdates() throws Exception { - - // Non-Empty OM DB to start with. - ReconOMMetadataManager omMetadataManager = getTestMetadataManager( - initializeNewOmMetadataManager()); - ReconTaskStatusDao reconTaskStatusDaoMock = - mock(ReconTaskStatusDao.class); - doNothing().when(reconTaskStatusDaoMock) - .update(any(ReconTaskStatus.class)); - - ReconTaskController reconTaskControllerMock = getMockTaskController(); - when(reconTaskControllerMock.getReconTaskStatusDao()) - .thenReturn(reconTaskStatusDaoMock); - doNothing().when(reconTaskControllerMock) - .consumeOMEvents(any(OMUpdateEventBatch.class), - any(OMMetadataManager.class)); - - OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = - new OzoneManagerServiceProviderImpl(configuration, omMetadataManager, - reconTaskControllerMock, new ReconUtils(), ozoneManagerProtocol); - - // Should trigger delta updates. - ozoneManagerServiceProvider.syncDataFromOM(); - - ArgumentCaptor captor = - ArgumentCaptor.forClass(ReconTaskStatus.class); - verify(reconTaskStatusDaoMock, times(1)) - .update(captor.capture()); - assertTrue(captor.getValue().getTaskName().equals("OM_DB_DELTA_UPDATES")); - - verify(reconTaskControllerMock, times(1)) - .consumeOMEvents(any(OMUpdateEventBatch.class), - any(OMMetadataManager.class)); - } - - private ReconTaskController getMockTaskController() { - ReconTaskController reconTaskControllerMock = - mock(ReconTaskController.class); - return reconTaskControllerMock; - } - - private ReconUtils getMockReconUtils() throws IOException { - ReconUtils reconUtilsMock = mock(ReconUtils.class); - when(reconUtilsMock.getReconDbDir(any(), anyString())).thenCallRealMethod(); - doCallRealMethod().when(reconUtilsMock).untarCheckpointFile(any(), any()); - return reconUtilsMock; - } - - private OzoneManagerProtocol getMockOzoneManagerClient( - DBUpdatesWrapper dbUpdatesWrapper) throws IOException { - OzoneManagerProtocol ozoneManagerProtocolMock = - mock(OzoneManagerProtocol.class); - when(ozoneManagerProtocolMock.getDBUpdates(any(OzoneManagerProtocolProtos - .DBUpdatesRequest.class))).thenReturn(dbUpdatesWrapper); - return ozoneManagerProtocolMock; - } -} - -/** - * Mock OzoneManagerServiceProviderImpl which overrides - * updateReconOmDBWithNewSnapshot. - */ -class MockOzoneServiceProvider extends OzoneManagerServiceProviderImpl { - - MockOzoneServiceProvider(OzoneConfiguration configuration, - ReconOMMetadataManager omMetadataManager, - ReconTaskController reconTaskController, - ReconUtils reconUtils, - OzoneManagerProtocol ozoneManagerClient) - throws IOException { - super(configuration, omMetadataManager, reconTaskController, reconUtils, - ozoneManagerClient); - } - - @Override - public boolean updateReconOmDBWithNewSnapshot() { - return true; - } -} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerDBProvider.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerDBProvider.java deleted file mode 100644 index ad1feeb68a3..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestReconContainerDBProvider.java +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.spi.impl; - -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; -import static org.junit.Assert.assertNotNull; - -import java.io.File; -import java.io.IOException; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import com.google.inject.AbstractModule; -import com.google.inject.Guice; -import com.google.inject.Injector; -import com.google.inject.Singleton; - -/** - * Tests the class that provides the instance of the DB Store used by Recon to - * store its container - key data. - */ -public class TestReconContainerDBProvider { - - @Rule - public TemporaryFolder tempFolder = new TemporaryFolder(); - - private Injector injector; - - @Before - public void setUp() throws IOException { - tempFolder.create(); - injector = Guice.createInjector(new AbstractModule() { - @Override - protected void configure() { - File dbDir = tempFolder.getRoot(); - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OZONE_RECON_DB_DIR, dbDir.getAbsolutePath()); - bind(OzoneConfiguration.class).toInstance(configuration); - bind(DBStore.class).toProvider(ReconContainerDBProvider.class).in( - Singleton.class); - } - }); - } - - @Test - public void testGet() throws Exception { - ReconContainerDBProvider reconContainerDBProvider = injector.getInstance( - ReconContainerDBProvider.class); - DBStore dbStore = reconContainerDBProvider.get(); - assertNotNull(dbStore); - } - -} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java deleted file mode 100644 index 932c4375c8c..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Package for recon server impl tests. - */ -package org.apache.hadoop.ozone.recon.spi.impl; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/DummyReconDBTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/DummyReconDBTask.java deleted file mode 100644 index 66be41eae1e..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/DummyReconDBTask.java +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import java.util.Collection; -import java.util.Collections; - -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.ozone.om.OMMetadataManager; - -/** - * Dummy Recon task that has 3 modes of operations. - * ALWAYS_FAIL / FAIL_ONCE / ALWAYS_PASS - */ -public class DummyReconDBTask implements ReconDBUpdateTask { - - private int numFailuresAllowed = Integer.MIN_VALUE; - private int callCtr = 0; - private String taskName; - - DummyReconDBTask(String taskName, TaskType taskType) { - this.taskName = taskName; - if (taskType.equals(TaskType.FAIL_ONCE)) { - numFailuresAllowed = 1; - } else if (taskType.equals(TaskType.ALWAYS_FAIL)) { - numFailuresAllowed = Integer.MAX_VALUE; - } - } - - @Override - public String getTaskName() { - return taskName; - } - - @Override - public Collection getTaskTables() { - return Collections.singletonList("volumeTable"); - } - - @Override - public Pair process(OMUpdateEventBatch events) { - if (++callCtr <= numFailuresAllowed) { - return new ImmutablePair<>(getTaskName(), false); - } else { - return new ImmutablePair<>(getTaskName(), true); - } - } - - @Override - public Pair reprocess(OMMetadataManager omMetadataManager) { - if (++callCtr <= numFailuresAllowed) { - return new ImmutablePair<>(getTaskName(), false); - } else { - return new ImmutablePair<>(getTaskName(), true); - } - } - - /** - * Type of the task. - */ - public enum TaskType { - ALWAYS_PASS, - FAIL_ONCE, - ALWAYS_FAIL - } -} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java deleted file mode 100644 index 8634998b878..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java +++ /dev/null @@ -1,314 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.recon.AbstractOMMetadataManagerTest; -import org.apache.hadoop.ozone.recon.GuiceInjectorUtilsForTestsImpl; -import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; -import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider; -import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; -import org.apache.hadoop.hdds.utils.db.Table; -import org.hadoop.ozone.recon.schema.StatsSchemaDefinition; -import org.jooq.impl.DSL; -import org.jooq.impl.DefaultConfiguration; -import org.junit.Before; -import org.junit.Test; -import com.google.inject.Injector; -import javax.sql.DataSource; - -/** - * Unit test for Container Key mapper task. - */ -public class TestContainerKeyMapperTask extends AbstractOMMetadataManagerTest { - - private ContainerDBServiceProvider containerDbServiceProvider; - private OMMetadataManager omMetadataManager; - private ReconOMMetadataManager reconOMMetadataManager; - private Injector injector; - private OzoneManagerServiceProviderImpl ozoneManagerServiceProvider; - private boolean setUpIsDone = false; - private GuiceInjectorUtilsForTestsImpl guiceInjectorTest = - new GuiceInjectorUtilsForTestsImpl(); - - private Injector getInjector() { - return injector; - } - - private void initializeInjector() throws Exception { - omMetadataManager = initializeNewOmMetadataManager(); - ozoneManagerServiceProvider = getMockOzoneManagerServiceProvider(); - reconOMMetadataManager = getTestMetadataManager(omMetadataManager); - - injector = guiceInjectorTest.getInjector( - ozoneManagerServiceProvider, reconOMMetadataManager, temporaryFolder); - } - - @Before - public void setUp() throws Exception { - // The following setup is run only once - if (!setUpIsDone) { - initializeInjector(); - - DSL.using(new DefaultConfiguration().set( - injector.getInstance(DataSource.class))); - - containerDbServiceProvider = injector.getInstance( - ContainerDBServiceProvider.class); - - StatsSchemaDefinition schemaDefinition = getInjector().getInstance( - StatsSchemaDefinition.class); - schemaDefinition.initializeSchema(); - - setUpIsDone = true; - } - - containerDbServiceProvider = injector.getInstance( - ContainerDBServiceProvider.class); - } - - @Test - public void testReprocessOMDB() throws Exception{ - - Map keyPrefixesForContainer = - containerDbServiceProvider.getKeyPrefixesForContainer(1); - assertTrue(keyPrefixesForContainer.isEmpty()); - - keyPrefixesForContainer = containerDbServiceProvider - .getKeyPrefixesForContainer(2); - assertTrue(keyPrefixesForContainer.isEmpty()); - - Pipeline pipeline = getRandomPipeline(); - - List omKeyLocationInfoList = new ArrayList<>(); - BlockID blockID1 = new BlockID(1, 1); - OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1, - pipeline); - - BlockID blockID2 = new BlockID(2, 1); - OmKeyLocationInfo omKeyLocationInfo2 - = getOmKeyLocationInfo(blockID2, pipeline); - - omKeyLocationInfoList.add(omKeyLocationInfo1); - omKeyLocationInfoList.add(omKeyLocationInfo2); - - OmKeyLocationInfoGroup omKeyLocationInfoGroup = new - OmKeyLocationInfoGroup(0, omKeyLocationInfoList); - - writeDataToOm(reconOMMetadataManager, - "key_one", - "bucketOne", - "sampleVol", - Collections.singletonList(omKeyLocationInfoGroup)); - - ContainerKeyMapperTask containerKeyMapperTask = - new ContainerKeyMapperTask(containerDbServiceProvider); - containerKeyMapperTask.reprocess(reconOMMetadataManager); - - keyPrefixesForContainer = - containerDbServiceProvider.getKeyPrefixesForContainer(1); - assertEquals(1, keyPrefixesForContainer.size()); - String omKey = omMetadataManager.getOzoneKey("sampleVol", - "bucketOne", "key_one"); - ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix(1, - omKey, 0); - assertEquals(1, - keyPrefixesForContainer.get(containerKeyPrefix).intValue()); - - keyPrefixesForContainer = - containerDbServiceProvider.getKeyPrefixesForContainer(2); - assertEquals(1, keyPrefixesForContainer.size()); - containerKeyPrefix = new ContainerKeyPrefix(2, omKey, - 0); - assertEquals(1, - keyPrefixesForContainer.get(containerKeyPrefix).intValue()); - - // Test if container key counts are updated - assertEquals(1, containerDbServiceProvider.getKeyCountForContainer(1L)); - assertEquals(1, containerDbServiceProvider.getKeyCountForContainer(2L)); - assertEquals(0, containerDbServiceProvider.getKeyCountForContainer(3L)); - - // Test if container count is updated - assertEquals(2, containerDbServiceProvider.getCountForContainers()); - } - - @Test - public void testProcessOMEvents() throws IOException { - Map keyPrefixesForContainer = - containerDbServiceProvider.getKeyPrefixesForContainer(1); - assertTrue(keyPrefixesForContainer.isEmpty()); - - keyPrefixesForContainer = containerDbServiceProvider - .getKeyPrefixesForContainer(2); - assertTrue(keyPrefixesForContainer.isEmpty()); - - Pipeline pipeline = getRandomPipeline(); - - List omKeyLocationInfoList = new ArrayList<>(); - BlockID blockID1 = new BlockID(1, 1); - OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1, - pipeline); - - BlockID blockID2 = new BlockID(2, 1); - OmKeyLocationInfo omKeyLocationInfo2 - = getOmKeyLocationInfo(blockID2, pipeline); - - omKeyLocationInfoList.add(omKeyLocationInfo1); - omKeyLocationInfoList.add(omKeyLocationInfo2); - - OmKeyLocationInfoGroup omKeyLocationInfoGroup = new - OmKeyLocationInfoGroup(0, omKeyLocationInfoList); - - String bucket = "bucketOne"; - String volume = "sampleVol"; - String key = "key_one"; - String omKey = omMetadataManager.getOzoneKey(volume, bucket, key); - OmKeyInfo omKeyInfo = buildOmKeyInfo(volume, bucket, key, - omKeyLocationInfoGroup); - - OMDBUpdateEvent keyEvent1 = new OMDBUpdateEvent. - OMUpdateEventBuilder() - .setKey(omKey) - .setValue(omKeyInfo) - .setTable(omMetadataManager.getKeyTable().getName()) - .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) - .build(); - - BlockID blockID3 = new BlockID(1, 2); - OmKeyLocationInfo omKeyLocationInfo3 = - getOmKeyLocationInfo(blockID3, pipeline); - - BlockID blockID4 = new BlockID(3, 1); - OmKeyLocationInfo omKeyLocationInfo4 - = getOmKeyLocationInfo(blockID4, pipeline); - - omKeyLocationInfoList = new ArrayList<>(); - omKeyLocationInfoList.add(omKeyLocationInfo3); - omKeyLocationInfoList.add(omKeyLocationInfo4); - omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0, - omKeyLocationInfoList); - - String key2 = "key_two"; - writeDataToOm(reconOMMetadataManager, key2, bucket, volume, Collections - .singletonList(omKeyLocationInfoGroup)); - - omKey = omMetadataManager.getOzoneKey(volume, bucket, key2); - OMDBUpdateEvent keyEvent2 = new OMDBUpdateEvent. - OMUpdateEventBuilder() - .setKey(omKey) - .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE) - .setTable(omMetadataManager.getKeyTable().getName()) - .build(); - - OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(new - ArrayList() {{ - add(keyEvent1); - add(keyEvent2); - }}); - - ContainerKeyMapperTask containerKeyMapperTask = - new ContainerKeyMapperTask(containerDbServiceProvider); - containerKeyMapperTask.reprocess(reconOMMetadataManager); - - keyPrefixesForContainer = containerDbServiceProvider - .getKeyPrefixesForContainer(1); - assertEquals(1, keyPrefixesForContainer.size()); - - keyPrefixesForContainer = containerDbServiceProvider - .getKeyPrefixesForContainer(2); - assertTrue(keyPrefixesForContainer.isEmpty()); - - keyPrefixesForContainer = containerDbServiceProvider - .getKeyPrefixesForContainer(3); - assertEquals(1, keyPrefixesForContainer.size()); - - assertEquals(1, containerDbServiceProvider.getKeyCountForContainer(1L)); - assertEquals(0, containerDbServiceProvider.getKeyCountForContainer(2L)); - assertEquals(1, containerDbServiceProvider.getKeyCountForContainer(3L)); - - // Process PUT & DELETE event. - containerKeyMapperTask.process(omUpdateEventBatch); - - keyPrefixesForContainer = containerDbServiceProvider - .getKeyPrefixesForContainer(1); - assertEquals(1, keyPrefixesForContainer.size()); - - keyPrefixesForContainer = containerDbServiceProvider - .getKeyPrefixesForContainer(2); - assertEquals(1, keyPrefixesForContainer.size()); - - keyPrefixesForContainer = containerDbServiceProvider - .getKeyPrefixesForContainer(3); - assertTrue(keyPrefixesForContainer.isEmpty()); - - assertEquals(1, containerDbServiceProvider.getKeyCountForContainer(1L)); - assertEquals(1, containerDbServiceProvider.getKeyCountForContainer(2L)); - assertEquals(0, containerDbServiceProvider.getKeyCountForContainer(3L)); - - // Test if container count is updated - assertEquals(3, containerDbServiceProvider.getCountForContainers()); - } - - private OmKeyInfo buildOmKeyInfo(String volume, - String bucket, - String key, - OmKeyLocationInfoGroup - omKeyLocationInfoGroup) { - return new OmKeyInfo.Builder() - .setBucketName(bucket) - .setVolumeName(volume) - .setKeyName(key) - .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) - .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) - .setOmKeyLocationInfos(Collections.singletonList( - omKeyLocationInfoGroup)) - .build(); - } - - private OzoneManagerServiceProviderImpl getMockOzoneManagerServiceProvider() - throws IOException { - OzoneManagerServiceProviderImpl omServiceProviderMock = - mock(OzoneManagerServiceProviderImpl.class); - OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class); - Table tableMock = mock(Table.class); - when(tableMock.getName()).thenReturn("keyTable"); - when(omMetadataManagerMock.getKeyTable()).thenReturn(tableMock); - when(omServiceProviderMock.getOMMetadataManagerInstance()) - .thenReturn(omMetadataManagerMock); - return omServiceProviderMock; - } -} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java deleted file mode 100644 index b4b546733fc..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java +++ /dev/null @@ -1,130 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.hdds.utils.db.TypedTable; -import org.junit.Test; - -import java.io.IOException; - -import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.PUT; -import static org.junit.Assert.assertEquals; - -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.BDDMockito.given; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.when; - -/** - * Unit test for File Size Count Task. - */ -public class TestFileSizeCountTask { - @Test - public void testCalculateBinIndex() { - FileSizeCountTask fileSizeCountTask = mock(FileSizeCountTask.class); - - when(fileSizeCountTask.getMaxFileSizeUpperBound()). - thenReturn(1125899906842624L); // 1 PB - when(fileSizeCountTask.getOneKB()).thenReturn(1024L); - when(fileSizeCountTask.getMaxBinSize()).thenReturn(42); - when(fileSizeCountTask.calculateBinIndex(anyLong())).thenCallRealMethod(); - when(fileSizeCountTask.nextClosestPowerIndexOfTwo( - anyLong())).thenCallRealMethod(); - - long fileSize = 1024L; // 1 KB - int binIndex = fileSizeCountTask.calculateBinIndex(fileSize); - assertEquals(1, binIndex); - - fileSize = 1023L; // 1KB - 1B - binIndex = fileSizeCountTask.calculateBinIndex(fileSize); - assertEquals(0, binIndex); - - fileSize = 562949953421312L; // 512 TB - binIndex = fileSizeCountTask.calculateBinIndex(fileSize); - assertEquals(40, binIndex); - - fileSize = 562949953421313L; // (512 TB + 1B) - binIndex = fileSizeCountTask.calculateBinIndex(fileSize); - assertEquals(40, binIndex); - - fileSize = 562949953421311L; // (512 TB - 1B) - binIndex = fileSizeCountTask.calculateBinIndex(fileSize); - assertEquals(39, binIndex); - - fileSize = 1125899906842624L; // 1 PB - last (extra) bin - binIndex = fileSizeCountTask.calculateBinIndex(fileSize); - assertEquals(41, binIndex); - - fileSize = 100000L; - binIndex = fileSizeCountTask.calculateBinIndex(fileSize); - assertEquals(7, binIndex); - - fileSize = 1125899906842623L; // (1 PB - 1B) - binIndex = fileSizeCountTask.calculateBinIndex(fileSize); - assertEquals(40, binIndex); - - fileSize = 1125899906842624L * 4; // 4 PB - last extra bin - binIndex = fileSizeCountTask.calculateBinIndex(fileSize); - assertEquals(41, binIndex); - - fileSize = Long.MAX_VALUE; // extra bin - binIndex = fileSizeCountTask.calculateBinIndex(fileSize); - assertEquals(41, binIndex); - } - - @Test - public void testFileCountBySizeReprocess() throws IOException { - OmKeyInfo omKeyInfo1 = mock(OmKeyInfo.class); - given(omKeyInfo1.getKeyName()).willReturn("key1"); - given(omKeyInfo1.getDataSize()).willReturn(1000L); - - OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class); - TypedTable keyTable = mock(TypedTable.class); - - - TypedTable.TypedTableIterator mockKeyIter = mock(TypedTable - .TypedTableIterator.class); - TypedTable.TypedKeyValue mockKeyValue = mock( - TypedTable.TypedKeyValue.class); - - when(keyTable.iterator()).thenReturn(mockKeyIter); - when(omMetadataManager.getKeyTable()).thenReturn(keyTable); - when(mockKeyIter.hasNext()).thenReturn(true).thenReturn(false); - when(mockKeyIter.next()).thenReturn(mockKeyValue); - when(mockKeyValue.getValue()).thenReturn(omKeyInfo1); - - FileSizeCountTask fileSizeCountTask = mock(FileSizeCountTask.class); - when(fileSizeCountTask.getMaxFileSizeUpperBound()). - thenReturn(4096L); - when(fileSizeCountTask.getOneKB()).thenReturn(1024L); - - when(fileSizeCountTask.reprocess(omMetadataManager)).thenCallRealMethod(); - //call reprocess() - fileSizeCountTask.reprocess(omMetadataManager); - verify(fileSizeCountTask, times(1)). - updateUpperBoundCount(omKeyInfo1, PUT); - verify(fileSizeCountTask, - times(1)).populateFileCountBySizeDB(); - } -} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java deleted file mode 100644 index 7056e7ebbc6..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java +++ /dev/null @@ -1,207 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.hdds.utils.db.RDBStore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.rocksdb.RocksDB; -import org.rocksdb.TransactionLogIterator; -import org.rocksdb.WriteBatch; - -/** - * Class used to test OMDBUpdatesHandler. - */ -public class TestOMDBUpdatesHandler { - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OzoneConfiguration createNewTestPath() throws IOException { - OzoneConfiguration configuration = new OzoneConfiguration(); - File newFolder = folder.newFolder(); - if (!newFolder.exists()) { - assertTrue(newFolder.mkdirs()); - } - ServerUtils.setOzoneMetaDirPath(configuration, newFolder.toString()); - return configuration; - } - - @Test - public void testPut() throws Exception { - OzoneConfiguration configuration = createNewTestPath(); - OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration); - - String volumeKey = metaMgr.getVolumeKey("sampleVol"); - OmVolumeArgs args = - OmVolumeArgs.newBuilder() - .setVolume("sampleVol") - .setAdminName("bilbo") - .setOwnerName("bilbo") - .build(); - metaMgr.getVolumeTable().put(volumeKey, args); - - OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() - .setBucketName("bucketOne") - .setVolumeName("sampleVol") - .setKeyName("key_one") - .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) - .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) - .build(); - - metaMgr.getKeyTable().put("/sampleVol/bucketOne/key_one", omKeyInfo); - RDBStore rdbStore = (RDBStore) metaMgr.getStore(); - - RocksDB rocksDB = rdbStore.getDb(); - TransactionLogIterator transactionLogIterator = - rocksDB.getUpdatesSince(0); - List writeBatches = new ArrayList<>(); - - while(transactionLogIterator.isValid()) { - TransactionLogIterator.BatchResult result = - transactionLogIterator.getBatch(); - result.writeBatch().markWalTerminationPoint(); - WriteBatch writeBatch = result.writeBatch(); - writeBatches.add(writeBatch.data()); - transactionLogIterator.next(); - } - - OzoneConfiguration conf2 = createNewTestPath(); - OmMetadataManagerImpl reconOmmetaMgr = new OmMetadataManagerImpl(conf2); - List events = new ArrayList<>(); - for (byte[] data : writeBatches) { - WriteBatch writeBatch = new WriteBatch(data); - OMDBUpdatesHandler omdbUpdatesHandler = - new OMDBUpdatesHandler(reconOmmetaMgr); - writeBatch.iterate(omdbUpdatesHandler); - events.addAll(omdbUpdatesHandler.getEvents()); - } - assertNotNull(events); - assertTrue(events.size() == 2); - - OMDBUpdateEvent volEvent = events.get(0); - assertEquals(OMDBUpdateEvent.OMDBUpdateAction.PUT, volEvent.getAction()); - assertEquals(volumeKey, volEvent.getKey()); - assertEquals(args.getVolume(), ((OmVolumeArgs)volEvent.getValue()) - .getVolume()); - - OMDBUpdateEvent keyEvent = events.get(1); - assertEquals(OMDBUpdateEvent.OMDBUpdateAction.PUT, keyEvent.getAction()); - assertEquals("/sampleVol/bucketOne/key_one", keyEvent.getKey()); - assertEquals(omKeyInfo.getBucketName(), - ((OmKeyInfo)keyEvent.getValue()).getBucketName()); - } - - @Test - public void testDelete() throws Exception { - OzoneConfiguration configuration = createNewTestPath(); - OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration); - - String volumeKey = metaMgr.getVolumeKey("sampleVol"); - OmVolumeArgs args = - OmVolumeArgs.newBuilder() - .setVolume("sampleVol") - .setAdminName("bilbo") - .setOwnerName("bilbo") - .build(); - metaMgr.getVolumeTable().put(volumeKey, args); - - OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() - .setBucketName("bucketOne") - .setVolumeName("sampleVol") - .setKeyName("key_one") - .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) - .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) - .build(); - - metaMgr.getKeyTable().put("/sampleVol/bucketOne/key_one", omKeyInfo); - - metaMgr.getKeyTable().delete("/sampleVol/bucketOne/key_one"); - metaMgr.getVolumeTable().delete(volumeKey); - - RDBStore rdbStore = (RDBStore) metaMgr.getStore(); - - RocksDB rocksDB = rdbStore.getDb(); - TransactionLogIterator transactionLogIterator = - rocksDB.getUpdatesSince(0); - List writeBatches = new ArrayList<>(); - - while(transactionLogIterator.isValid()) { - TransactionLogIterator.BatchResult result = - transactionLogIterator.getBatch(); - result.writeBatch().markWalTerminationPoint(); - WriteBatch writeBatch = result.writeBatch(); - writeBatches.add(writeBatch.data()); - transactionLogIterator.next(); - } - - OzoneConfiguration conf2 = createNewTestPath(); - OmMetadataManagerImpl reconOmmetaMgr = new OmMetadataManagerImpl(conf2); - List events = new ArrayList<>(); - for (byte[] data : writeBatches) { - WriteBatch writeBatch = new WriteBatch(data); - OMDBUpdatesHandler omdbUpdatesHandler = - new OMDBUpdatesHandler(reconOmmetaMgr); - writeBatch.iterate(omdbUpdatesHandler); - events.addAll(omdbUpdatesHandler.getEvents()); - } - assertNotNull(events); - assertTrue(events.size() == 4); - - OMDBUpdateEvent keyEvent = events.get(2); - assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, keyEvent.getAction()); - assertEquals("/sampleVol/bucketOne/key_one", keyEvent.getKey()); - - OMDBUpdateEvent volEvent = events.get(3); - assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, volEvent.getAction()); - assertEquals(volumeKey, volEvent.getKey()); - } - - @Test - public void testGetValueType() throws IOException { - OzoneConfiguration configuration = createNewTestPath(); - OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration); - OMDBUpdatesHandler omdbUpdatesHandler = - new OMDBUpdatesHandler(metaMgr); - - assertEquals(OmKeyInfo.class, omdbUpdatesHandler.getValueType( - metaMgr.getKeyTable().getName())); - assertEquals(OmVolumeArgs.class, omdbUpdatesHandler.getValueType( - metaMgr.getVolumeTable().getName())); - assertEquals(OmBucketInfo.class, omdbUpdatesHandler.getValueType( - metaMgr.getBucketTable().getName())); - } -} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java deleted file mode 100644 index 67608697273..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java +++ /dev/null @@ -1,191 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.tasks; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.util.Collections; -import java.util.HashSet; - -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.recon.persistence.AbstractSqlDatabaseTest; -import org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition; -import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; -import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; -import org.jooq.Configuration; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -/** - * Class used to test ReconTaskControllerImpl. - */ -public class TestReconTaskControllerImpl extends AbstractSqlDatabaseTest { - - private ReconTaskController reconTaskController; - private Configuration sqlConfiguration; - - @Before - public void setUp() throws Exception { - - OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); - - sqlConfiguration = getInjector() - .getInstance(Configuration.class); - - ReconInternalSchemaDefinition schemaDefinition = getInjector(). - getInstance(ReconInternalSchemaDefinition.class); - schemaDefinition.initializeSchema(); - - reconTaskController = new ReconTaskControllerImpl(ozoneConfiguration, - sqlConfiguration, new HashSet<>()); - } - - @Test - public void testRegisterTask() throws Exception { - String taskName = "Dummy_" + System.currentTimeMillis(); - DummyReconDBTask dummyReconDBTask = - new DummyReconDBTask(taskName, DummyReconDBTask.TaskType.ALWAYS_PASS); - reconTaskController.registerTask(dummyReconDBTask); - assertTrue(reconTaskController.getRegisteredTasks().size() == 1); - assertTrue(reconTaskController.getRegisteredTasks() - .get(dummyReconDBTask.getTaskName()) == dummyReconDBTask); - } - - @Test - public void testConsumeOMEvents() throws Exception { - - ReconDBUpdateTask reconDBUpdateTaskMock = getMockTask("MockTask"); - when(reconDBUpdateTaskMock.process(any(OMUpdateEventBatch.class))) - .thenReturn(new ImmutablePair<>("MockTask", true)); - reconTaskController.registerTask(reconDBUpdateTaskMock); - OMUpdateEventBatch omUpdateEventBatchMock = mock(OMUpdateEventBatch.class); - when(omUpdateEventBatchMock.isEmpty()).thenReturn(false); - when(omUpdateEventBatchMock.filter(Collections.singleton("MockTable"))) - .thenReturn(omUpdateEventBatchMock); - reconTaskController.consumeOMEvents( - omUpdateEventBatchMock, - mock(OMMetadataManager.class)); - - verify(reconDBUpdateTaskMock, times(1)) - .process(any()); - } - - @Test - public void testFailedTaskRetryLogic() throws Exception { - String taskName = "Dummy_" + System.currentTimeMillis(); - DummyReconDBTask dummyReconDBTask = - new DummyReconDBTask(taskName, DummyReconDBTask.TaskType.FAIL_ONCE); - reconTaskController.registerTask(dummyReconDBTask); - - long currentTime = System.currentTimeMillis(); - OMUpdateEventBatch omUpdateEventBatchMock = mock(OMUpdateEventBatch.class); - when(omUpdateEventBatchMock.isEmpty()).thenReturn(false); - when(omUpdateEventBatchMock.getLastSequenceNumber()).thenReturn(100L); - - reconTaskController.consumeOMEvents(omUpdateEventBatchMock, - mock(OMMetadataManager.class)); - assertFalse(reconTaskController.getRegisteredTasks().isEmpty()); - assertEquals(dummyReconDBTask, reconTaskController.getRegisteredTasks() - .get(dummyReconDBTask.getTaskName())); - - ReconTaskStatusDao dao = new ReconTaskStatusDao(sqlConfiguration); - ReconTaskStatus dbRecord = dao.findById(taskName); - - Assert.assertEquals(taskName, dbRecord.getTaskName()); - Assert.assertTrue( - dbRecord.getLastUpdatedTimestamp() > currentTime); - Assert.assertEquals(Long.valueOf(100L), dbRecord.getLastUpdatedSeqNumber()); - } - - @Test - public void testBadBehavedTaskBlacklisting() throws Exception { - String taskName = "Dummy_" + System.currentTimeMillis(); - DummyReconDBTask dummyReconDBTask = - new DummyReconDBTask(taskName, DummyReconDBTask.TaskType.ALWAYS_FAIL); - reconTaskController.registerTask(dummyReconDBTask); - - OMUpdateEventBatch omUpdateEventBatchMock = mock(OMUpdateEventBatch.class); - when(omUpdateEventBatchMock.isEmpty()).thenReturn(false); - when(omUpdateEventBatchMock.getLastSequenceNumber()).thenReturn(100L); - - OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class); - for (int i = 0; i < 2; i++) { - reconTaskController.consumeOMEvents(omUpdateEventBatchMock, - omMetadataManagerMock); - - assertFalse(reconTaskController.getRegisteredTasks().isEmpty()); - assertEquals(dummyReconDBTask, reconTaskController.getRegisteredTasks() - .get(dummyReconDBTask.getTaskName())); - } - - //Should be blacklisted now. - reconTaskController.consumeOMEvents(omUpdateEventBatchMock, - omMetadataManagerMock); - assertTrue(reconTaskController.getRegisteredTasks().isEmpty()); - - ReconTaskStatusDao dao = new ReconTaskStatusDao(sqlConfiguration); - ReconTaskStatus dbRecord = dao.findById(taskName); - - Assert.assertEquals(taskName, dbRecord.getTaskName()); - Assert.assertEquals(Long.valueOf(0L), dbRecord.getLastUpdatedTimestamp()); - Assert.assertEquals(Long.valueOf(0L), dbRecord.getLastUpdatedSeqNumber()); - } - - - @Test - public void testReInitializeTasks() throws Exception { - - OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class); - ReconDBUpdateTask reconDBUpdateTaskMock = - getMockTask("MockTask2"); - when(reconDBUpdateTaskMock.reprocess(omMetadataManagerMock)) - .thenReturn(new ImmutablePair<>("MockTask2", true)); - - reconTaskController.registerTask(reconDBUpdateTaskMock); - reconTaskController.reInitializeTasks(omMetadataManagerMock); - - verify(reconDBUpdateTaskMock, times(1)) - .reprocess(omMetadataManagerMock); - } - - /** - * Helper method for getting a mocked Task. - * @param taskName name of the task. - * @return instance of ReconDBUpdateTask. - */ - private ReconDBUpdateTask getMockTask(String taskName) { - ReconDBUpdateTask reconDBUpdateTaskMock = mock(ReconDBUpdateTask.class); - when(reconDBUpdateTaskMock.getTaskTables()).thenReturn(Collections - .EMPTY_LIST); - when(reconDBUpdateTaskMock.getTaskName()).thenReturn(taskName); - when(reconDBUpdateTaskMock.getTaskTables()) - .thenReturn(Collections.singleton("MockTable")); - return reconDBUpdateTaskMock; - } -} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/package-info.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/package-info.java deleted file mode 100644 index 9e1a31ad3b9..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * The classes in this package tests the various scheduled tasks used by - * Recon. - */ -package org.apache.hadoop.ozone.recon.tasks; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/types/GuiceInjectorUtilsForTests.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/types/GuiceInjectorUtilsForTests.java deleted file mode 100644 index 77d910637b0..00000000000 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/types/GuiceInjectorUtilsForTests.java +++ /dev/null @@ -1,117 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.recon.types; - -import com.google.inject.AbstractModule; -import com.google.inject.Guice; -import com.google.inject.Injector; -import com.google.inject.Singleton; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.recon.persistence.AbstractSqlDatabaseTest; -import org.apache.hadoop.ozone.recon.persistence.DataSourceConfiguration; -import org.apache.hadoop.ozone.recon.persistence.JooqPersistenceModule; -import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider; -import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider; -import org.apache.hadoop.ozone.recon.spi.impl.ContainerDBServiceProviderImpl; -import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; -import org.apache.hadoop.ozone.recon.spi.impl.ReconContainerDBProvider; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.junit.Assert; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.io.IOException; - -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; - -/** - * Utility methods to get guice injector and ozone configuration. - */ -public interface GuiceInjectorUtilsForTests { - - /** - * Get Guice Injector with bindings. - * @param ozoneManagerServiceProvider - * @param reconOMMetadataManager - * @param temporaryFolder - * @return Injector - * @throws IOException ioEx. - */ - default Injector getInjector( - OzoneManagerServiceProviderImpl ozoneManagerServiceProvider, - ReconOMMetadataManager reconOMMetadataManager, - TemporaryFolder temporaryFolder - ) throws IOException { - - File tempDir = temporaryFolder.newFolder(); - AbstractSqlDatabaseTest.DataSourceConfigurationProvider - configurationProvider = - new AbstractSqlDatabaseTest.DataSourceConfigurationProvider(tempDir); - - JooqPersistenceModule jooqPersistenceModule = - new JooqPersistenceModule(configurationProvider); - - return Guice.createInjector(jooqPersistenceModule, - new AbstractModule() { - @Override - protected void configure() { - try { - bind(DataSourceConfiguration.class) - .toProvider(configurationProvider); - bind(OzoneConfiguration.class).toInstance( - getTestOzoneConfiguration(temporaryFolder)); - - if (reconOMMetadataManager != null) { - bind(ReconOMMetadataManager.class) - .toInstance(reconOMMetadataManager); - } - - if (ozoneManagerServiceProvider != null) { - bind(OzoneManagerServiceProvider.class) - .toInstance(ozoneManagerServiceProvider); - } - - bind(DBStore.class).toProvider(ReconContainerDBProvider.class). - in(Singleton.class); - bind(ContainerDBServiceProvider.class).to( - ContainerDBServiceProviderImpl.class).in(Singleton.class); - } catch (IOException e) { - Assert.fail(); - } - } - }); - } - - /** - * Get Test OzoneConfiguration instance. - * @return OzoneConfiguration - * @throws IOException ioEx. - */ - default OzoneConfiguration getTestOzoneConfiguration( - TemporaryFolder temporaryFolder) throws IOException { - OzoneConfiguration configuration = new OzoneConfiguration(); - configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, - temporaryFolder.newFolder().getAbsolutePath()); - configuration.set(OZONE_RECON_DB_DIR, temporaryFolder.newFolder() - .getAbsolutePath()); - return configuration; - } -} diff --git a/hadoop-ozone/recon/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker b/hadoop-ozone/recon/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker deleted file mode 100644 index 3c9e1c8a697..00000000000 --- a/hadoop-ozone/recon/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker +++ /dev/null @@ -1,16 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -mock-maker-inline \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml deleted file mode 100644 index 32c95874540..00000000000 --- a/hadoop-ozone/s3gateway/pom.xml +++ /dev/null @@ -1,256 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-s3gateway - Apache Hadoop Ozone S3 Gateway - jar - 0.5.0-SNAPSHOT - - UTF-8 - true - - - - - org.apache.hadoop - hadoop-ozone-common - compile - - - org.jboss.weld.servlet - weld-servlet - 2.4.7.Final - - - org.glassfish.jersey.containers - jersey-container-servlet-core - 2.27 - - - org.glassfish.jersey.ext.cdi - jersey-cdi1x - 2.27 - - - org.glassfish.jersey.inject - jersey-hk2 - 2.27 - - - hk2-api - org.glassfish.hk2 - - - hk2-utils - org.glassfish.hk2 - - - aopalliance-repackaged - org.glassfish.hk2.external - - - - - org.glassfish.hk2 - hk2-api - 2.5.0 - - - com.fasterxml.jackson.dataformat - jackson-dataformat-xml - 2.9.0 - - - javax.enterprise - cdi-api - 1.2 - - - com.sun.xml.bind - jaxb-impl - - - com.sun.xml.bind - jaxb-core - - - javax.xml.bind - jaxb-api - - - javax.activation - activation - - - - org.apache.hadoop - hadoop-common - - - com.sun.jersey - jersey-core - - - com.sun.jersey - jersey-servlet - - - com.sun.jersey - jersey-json - - - com.sun.jersey - jersey-server - - - - - org.apache.hadoop - hadoop-hdfs - - - com.sun.jersey - jersey-core - - - com.sun.jersey - jersey-servlet - - - com.sun.jersey - jersey-json - - - com.sun.jersey - jersey-server - - - - - org.apache.hadoop - hadoop-common - test-jar - test - - - com.sun.jersey - jersey-core - - - com.sun.jersey - jersey-servlet - - - com.sun.jersey - jersey-json - - - com.sun.jersey - jersey-server - - - - - org.apache.hadoop - hadoop-hdfs - test-jar - test - - - com.sun.jersey - jersey-core - - - com.sun.jersey - jersey-servlet - - - com.sun.jersey - jersey-json - - - com.sun.jersey - jersey-server - - - - - org.apache.hadoop - hadoop-ozone-client - - - junit - junit - test - - - org.mockito - mockito-core - 2.15.0 - test - - - com.github.spotbugs - spotbugs - provided - - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - copy-common-html - prepare-package - - unpack - - - - - org.apache.hadoop - hadoop-hdds-server-framework - ${project.build.outputDirectory} - - webapps/static/**/*.* - - - org.apache.hadoop - hadoop-hdds-docs - - ${project.build.outputDirectory}/webapps/static - - docs/**/*.* - - - true - - - - - - - diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSAuthParser.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSAuthParser.java deleted file mode 100644 index 88def0b6eec..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSAuthParser.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import java.nio.charset.Charset; -import java.time.ZoneOffset; -import java.time.format.DateTimeFormatter; - -/* - * Parser to request auth parser for http request. - * */ -interface AWSAuthParser { - - String UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD"; - String NEWLINE = "\n"; - String CONTENT_TYPE = "content-type"; - String X_AMAZ_DATE = "X-Amz-Date"; - String CONTENT_MD5 = "content-md5"; - String AUTHORIZATION_HEADER = "Authorization"; - Charset UTF_8 = Charset.forName("utf-8"); - String X_AMZ_CONTENT_SHA256 = "X-Amz-Content-SHA256"; - String HOST = "host"; - - String AWS4_TERMINATOR = "aws4_request"; - - String AWS4_SIGNING_ALGORITHM = "AWS4-HMAC-SHA256"; - - /** - * Seconds in a week, which is the max expiration time Sig-v4 accepts. - */ - long PRESIGN_URL_MAX_EXPIRATION_SECONDS = - 60 * 60 * 24 * 7; - - String X_AMZ_SECURITY_TOKEN = "X-Amz-Security-Token"; - - String X_AMZ_CREDENTIAL = "X-Amz-Credential"; - - String X_AMZ_DATE = "X-Amz-Date"; - - String X_AMZ_EXPIRES = "X-Amz-Expires"; - - String X_AMZ_SIGNED_HEADER = "X-Amz-SignedHeaders"; - - String X_AMZ_SIGNATURE = "X-Amz-Signature"; - - String X_AMZ_ALGORITHM = "X-Amz-Algorithm"; - - String AUTHORIZATION = "Authorization"; - - String HOST_HEADER = "Host"; - - DateTimeFormatter DATE_FORMATTER = - DateTimeFormatter.ofPattern("yyyyMMdd"); - - DateTimeFormatter TIME_FORMATTER = - DateTimeFormatter.ofPattern("yyyyMMdd'T'HHmmss'Z'") - .withZone(ZoneOffset.UTC); - - /** - * API to return string to sign. - */ - String getStringToSign() throws Exception; -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java deleted file mode 100644 index 82ffa0c5c43..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AWSV4AuthParser.java +++ /dev/null @@ -1,304 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.header.AuthorizationHeaderV4; -import org.apache.hadoop.ozone.s3.header.Credential; -import org.apache.kerby.util.Hex; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.ws.rs.container.ContainerRequestContext; -import javax.ws.rs.core.MultivaluedMap; -import java.io.UnsupportedEncodingException; -import java.net.InetAddress; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URLEncoder; -import java.net.UnknownHostException; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.time.LocalDate; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import static java.time.temporal.ChronoUnit.SECONDS; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_TOKEN_CREATION_ERROR; - -/** - * Parser to process AWS v4 auth request. Creates string to sign and auth - * header. For more details refer to AWS documentation https://docs.aws - * .amazon.com/general/latest/gr/sigv4-create-canonical-request.html. - **/ -public class AWSV4AuthParser implements AWSAuthParser { - - private final static Logger LOG = - LoggerFactory.getLogger(AWSV4AuthParser.class); - private MultivaluedMap headerMap; - private MultivaluedMap queryMap; - private String uri; - private String method; - private AuthorizationHeaderV4 v4Header; - private String stringToSign; - private String amzContentPayload; - - public AWSV4AuthParser(ContainerRequestContext context) - throws OS3Exception { - this.headerMap = context.getHeaders(); - this.queryMap = context.getUriInfo().getQueryParameters(); - try { - this.uri = new URI(context.getUriInfo().getRequestUri() - .getPath().replaceAll("\\/+", - "/")).normalize().getPath(); - } catch (URISyntaxException e) { - throw S3_TOKEN_CREATION_ERROR; - } - - this.method = context.getMethod(); - v4Header = new AuthorizationHeaderV4( - headerMap.getFirst(AUTHORIZATION_HEADER)); - } - - public void parse() throws Exception { - StringBuilder strToSign = new StringBuilder(); - - // According to AWS sigv4 documentation, authorization header should be - // in following format. - // Authorization: algorithm Credential=access key ID/credential scope, - // SignedHeaders=SignedHeaders, Signature=signature - - // Construct String to sign in below format. - // StringToSign = - // Algorithm + \n + - // RequestDateTime + \n + - // CredentialScope + \n + - // HashedCanonicalRequest - String algorithm, requestDateTime, credentialScope, canonicalRequest; - algorithm = v4Header.getAlgorithm(); - requestDateTime = headerMap.getFirst(X_AMAZ_DATE); - Credential credential = v4Header.getCredentialObj(); - credentialScope = String.format("%s/%s/%s/%s", credential.getDate(), - credential.getAwsRegion(), credential.getAwsService(), - credential.getAwsRequest()); - - // If the absolute path is empty, use a forward slash (/) - uri = (uri.trim().length() > 0) ? uri : "/"; - // Encode URI and preserve forward slashes - strToSign.append(algorithm + NEWLINE); - strToSign.append(requestDateTime + NEWLINE); - strToSign.append(credentialScope + NEWLINE); - - canonicalRequest = buildCanonicalRequest(); - strToSign.append(hash(canonicalRequest)); - if (LOG.isDebugEnabled()) { - LOG.debug("canonicalRequest:[{}]", canonicalRequest); - } - - if (LOG.isTraceEnabled()) { - headerMap.keySet().forEach(k -> LOG.trace("Header:{},value:{}", k, - headerMap.get(k))); - } - - LOG.debug("StringToSign:[{}]", strToSign); - stringToSign = strToSign.toString(); - } - - private String buildCanonicalRequest() throws OS3Exception { - Iterable parts = split("/", uri); - List encParts = new ArrayList<>(); - for (String p : parts) { - encParts.add(urlEncode(p)); - } - String canonicalUri = join("/", encParts); - - String canonicalQueryStr = getQueryParamString(); - - StringBuilder canonicalHeaders = new StringBuilder(); - - for (String header : v4Header.getSignedHeaders()) { - List headerValue = new ArrayList<>(); - canonicalHeaders.append(header.toLowerCase()); - canonicalHeaders.append(":"); - for (String originalHeader : headerMap.keySet()) { - if (originalHeader.toLowerCase().equals(header)) { - headerValue.add(headerMap.getFirst(originalHeader).trim()); - } - } - - if (headerValue.size() == 0) { - throw new RuntimeException("Header " + header + " not present in " + - "request"); - } - if (headerValue.size() > 1) { - Collections.sort(headerValue); - } - - // Set for testing purpose only to skip date and host validation. - validateSignedHeader(header, headerValue.get(0)); - - canonicalHeaders.append(join(",", headerValue)); - canonicalHeaders.append(NEWLINE); - } - - String payloadHash; - if (UNSIGNED_PAYLOAD.equals( - headerMap.get(X_AMZ_CONTENT_SHA256))) { - payloadHash = UNSIGNED_PAYLOAD; - } else { - payloadHash = headerMap.getFirst(X_AMZ_CONTENT_SHA256); - } - - String signedHeaderStr = v4Header.getSignedHeaderString(); - String canonicalRequest = method + NEWLINE - + canonicalUri + NEWLINE - + canonicalQueryStr + NEWLINE - + canonicalHeaders + NEWLINE - + signedHeaderStr + NEWLINE - + payloadHash; - - return canonicalRequest; - } - - @VisibleForTesting - void validateSignedHeader(String header, String headerValue) - throws OS3Exception { - switch (header) { - case HOST: - try { - URI hostUri = new URI(headerValue); - InetAddress.getByName(hostUri.getHost()); - // TODO: Validate if current request is coming from same host. - } catch (UnknownHostException|URISyntaxException e) { - LOG.error("Host value mentioned in signed header is not valid. " + - "Host:{}", headerValue); - throw S3_TOKEN_CREATION_ERROR; - } - break; - case X_AMAZ_DATE: - LocalDate date = LocalDate.parse(headerValue, TIME_FORMATTER); - LocalDate now = LocalDate.now(); - if (date.isBefore(now.minus(PRESIGN_URL_MAX_EXPIRATION_SECONDS, SECONDS)) - || date.isAfter(now.plus(PRESIGN_URL_MAX_EXPIRATION_SECONDS, - SECONDS))) { - LOG.error("AWS date not in valid range. Request timestamp:{} should " + - "not be older than {} seconds.", headerValue, - PRESIGN_URL_MAX_EXPIRATION_SECONDS); - throw S3_TOKEN_CREATION_ERROR; - } - break; - case X_AMZ_CONTENT_SHA256: - // TODO: Construct request payload and match HEX(SHA256(requestPayload)) - break; - default: - break; - } - } - - /** - * String join that also works with empty strings. - * - * @return joined string - */ - private static String join(String glue, List parts) { - StringBuilder result = new StringBuilder(); - boolean addSeparator = false; - for (String p : parts) { - if (addSeparator) { - result.append(glue); - } - result.append(p); - addSeparator = true; - } - return result.toString(); - } - - /** - * Returns matching strings. - * - * @param regex Regular expression to split by - * @param whole The string to split - * @return pieces - */ - private static Iterable split(String regex, String whole) { - Pattern p = Pattern.compile(regex); - Matcher m = p.matcher(whole); - List result = new ArrayList<>(); - int pos = 0; - while (m.find()) { - result.add(whole.substring(pos, m.start())); - pos = m.end(); - } - result.add(whole.substring(pos)); - return result; - } - - private String urlEncode(String str) { - try { - - return URLEncoder.encode(str, UTF_8.name()) - .replaceAll("\\+", "%20") - .replaceAll("%7E", "~"); - } catch (UnsupportedEncodingException e) { - throw new RuntimeException(e); - } - } - - private String getQueryParamString() { - List params = new ArrayList<>(queryMap.keySet()); - - // Sort by name, then by value - Collections.sort(params, (o1, o2) -> o1.equals(o2) ? - queryMap.getFirst(o1).compareTo(queryMap.getFirst(o2)) : - o1.compareTo(o2)); - - StringBuilder result = new StringBuilder(); - for (String p : params) { - if (result.length() > 0) { - result.append("&"); - } - result.append(urlEncode(p)); - result.append('='); - - result.append(urlEncode(queryMap.getFirst(p))); - } - return result.toString(); - } - - public static String hash(String payload) throws NoSuchAlgorithmException { - MessageDigest md = MessageDigest.getInstance("SHA-256"); - md.update(payload.getBytes(UTF_8)); - return Hex.encode(md.digest()).toLowerCase(); - } - - public String getAwsAccessId() { - return v4Header.getAccessKeyID(); - } - - public String getSignature() { - return v4Header.getSignature(); - } - - public String getStringToSign() throws Exception { - return stringToSign; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java deleted file mode 100644 index 27f792e65b4..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/CommonHeadersContainerResponseFilter.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import javax.inject.Inject; -import javax.ws.rs.container.ContainerRequestContext; -import javax.ws.rs.container.ContainerResponseContext; -import javax.ws.rs.container.ContainerResponseFilter; -import javax.ws.rs.ext.Provider; -import java.io.IOException; - -/** - * This class adds common header responses for all the requests. - */ -@Provider -public class CommonHeadersContainerResponseFilter implements - ContainerResponseFilter { - - @Inject - private RequestIdentifier requestIdentifier; - - @Override - public void filter(ContainerRequestContext containerRequestContext, - ContainerResponseContext containerResponseContext) throws IOException { - - containerResponseContext.getHeaders().add("Server", "Ozone"); - containerResponseContext.getHeaders() - .add("x-amz-id-2", requestIdentifier.getAmzId()); - containerResponseContext.getHeaders() - .add("x-amz-request-id", requestIdentifier.getRequestId()); - - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java deleted file mode 100644 index 061a2d7d0d6..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import java.io.IOException; - -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import picocli.CommandLine.Command; - -/** - * This class is used to start/stop S3 compatible rest server. - */ -@Command(name = "ozone s3g", - hidden = true, description = "S3 compatible rest server.", - versionProvider = HddsVersionProvider.class, - mixinStandardHelpOptions = true) -public class Gateway extends GenericCli { - - private static final Logger LOG = LoggerFactory.getLogger(Gateway.class); - - private S3GatewayHttpServer httpServer; - - public static void main(String[] args) throws Exception { - new Gateway().run(args); - } - - @Override - public Void call() throws Exception { - OzoneConfiguration ozoneConfiguration = createOzoneConfiguration(); - OzoneConfigurationHolder.setConfiguration(ozoneConfiguration); - httpServer = new S3GatewayHttpServer(ozoneConfiguration, "s3gateway"); - start(); - return null; - } - - public void start() throws IOException { - LOG.info("Starting Ozone S3 gateway"); - httpServer.start(); - } - - public void stop() throws Exception { - LOG.info("Stopping Ozone S3 gateway"); - httpServer.stop(); - } - -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java deleted file mode 100644 index c5a291b4450..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import org.glassfish.jersey.server.ResourceConfig; - -/** - * JaxRS resource definition. - */ -public class GatewayApplication extends ResourceConfig { - public GatewayApplication() { - packages("org.apache.hadoop.ozone.s3"); - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java deleted file mode 100644 index db94bbbb750..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/HeaderPreprocessor.java +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import javax.annotation.Priority; -import javax.ws.rs.container.ContainerRequestContext; -import javax.ws.rs.container.ContainerRequestFilter; -import javax.ws.rs.container.PreMatching; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.ext.Provider; -import java.io.IOException; - -/** - * Filter to adjust request headers for compatible reasons. - * - * It should be executed AFTER signature check (VirtualHostStyleFilter) as the - * original Content-Type could be part of the base of the signature. - */ -@Provider -@PreMatching -@Priority(VirtualHostStyleFilter.PRIORITY - + S3GatewayHttpServer.FILTER_PRIORITY_DO_AFTER) -public class HeaderPreprocessor implements ContainerRequestFilter { - - public static final String MULTIPART_UPLOAD_MARKER = "ozone/mpu"; - - @Override - public void filter(ContainerRequestContext requestContext) throws - IOException { - MultivaluedMap queryParameters = - requestContext.getUriInfo().getQueryParameters(); - - if (queryParameters.containsKey("delete")) { - //aws cli doesn't send proper Content-Type and by default POST requests - //processed as form-url-encoded. Here we can fix this. - requestContext.getHeaders() - .putSingle("Content-Type", MediaType.APPLICATION_XML); - } - - if (queryParameters.containsKey("uploadId")) { - //aws cli doesn't send proper Content-Type and by default POST requests - //processed as form-url-encoded. Here we can fix this. - requestContext.getHeaders() - .putSingle("Content-Type", MediaType.APPLICATION_XML); - } else if (queryParameters.containsKey("uploads")) { - // uploads defined but uploadId is not --> this is the creation of the - // multi-part-upload requests. - // - //In AWS SDK for go uses application/octet-stream which also - //should be fixed to route the request to the right jaxrs method. - // - //Should be empty instead of XML as the body is empty which can not be - //serialized as as CompleteMultipartUploadRequest - requestContext.getHeaders() - .putSingle("Content-Type", MULTIPART_UPLOAD_MARKER); - } - - } - -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java deleted file mode 100644 index d42c005e583..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.enterprise.context.RequestScoped; -import javax.enterprise.inject.Produces; -import javax.inject.Inject; -import javax.ws.rs.container.ContainerRequestContext; -import javax.ws.rs.core.Context; -import java.io.IOException; -import java.net.URISyntaxException; - -import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMTokenProto.Type.S3TOKEN; -import static org.apache.hadoop.ozone.s3.AWSAuthParser.AUTHORIZATION_HEADER; -import static org.apache.hadoop.ozone.s3.AWSAuthParser.UTF_8; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.AUTH_PROTOCOL_NOT_SUPPORTED; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.S3_TOKEN_CREATION_ERROR; - -/** - * This class creates the OzoneClient for the Rest endpoints. - */ -@RequestScoped -public class OzoneClientProducer { - - private final static Logger LOG = - LoggerFactory.getLogger(OzoneClientProducer.class); - - @Context - private ContainerRequestContext context; - - @Inject - private OzoneConfiguration ozoneConfiguration; - - @Inject - private Text omService; - - @Inject - private String omServiceID; - - - @Produces - public OzoneClient createClient() throws IOException { - return getClient(ozoneConfiguration); - } - - private OzoneClient getClient(OzoneConfiguration config) throws IOException { - try { - if (OzoneSecurityUtil.isSecurityEnabled(config)) { - LOG.debug("Creating s3 token for client."); - if (context.getHeaderString(AUTHORIZATION_HEADER).startsWith("AWS4")) { - try { - AWSV4AuthParser v4RequestParser = new AWSV4AuthParser(context); - v4RequestParser.parse(); - - OzoneTokenIdentifier identifier = new OzoneTokenIdentifier(); - identifier.setTokenType(S3TOKEN); - identifier.setStrToSign(v4RequestParser.getStringToSign()); - identifier.setSignature(v4RequestParser.getSignature()); - identifier.setAwsAccessId(v4RequestParser.getAwsAccessId()); - identifier.setOwner(new Text(v4RequestParser.getAwsAccessId())); - if (LOG.isTraceEnabled()) { - LOG.trace("Adding token for service:{}", omService); - } - Token token = new Token(identifier.getBytes(), - identifier.getSignature().getBytes(UTF_8), - identifier.getKind(), - omService); - UserGroupInformation remoteUser = - UserGroupInformation.createRemoteUser( - v4RequestParser.getAwsAccessId()); - remoteUser.addToken(token); - UserGroupInformation.setLoginUser(remoteUser); - } catch (OS3Exception | URISyntaxException ex) { - LOG.error("S3 token creation failed."); - throw S3_TOKEN_CREATION_ERROR; - } - } else { - throw AUTH_PROTOCOL_NOT_SUPPORTED; - } - } - } catch (Exception e) { - LOG.error("Error: ", e); - } - - if (omServiceID == null) { - return OzoneClientFactory.getClient(ozoneConfiguration); - } else { - // As in HA case, we need to pass om service ID. - return OzoneClientFactory.getRpcClient(omServiceID, ozoneConfiguration); - } - } - - @VisibleForTesting - public void setContext(ContainerRequestContext context) { - this.context = context; - } - - @VisibleForTesting - public void setOzoneConfiguration(OzoneConfiguration config) { - this.ozoneConfiguration = config; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java deleted file mode 100644 index 4aeab1f3c4a..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneConfigurationHolder.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import javax.enterprise.inject.Produces; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -/** - * Ozone Configuration factory. - *

    - * As the OzoneConfiguration is created by the CLI application here we inject - * it via a singleton instance to the Jax-RS/CDI instances. - */ -public class OzoneConfigurationHolder { - - private static OzoneConfiguration configuration; - - @Produces - public OzoneConfiguration configuration() { - return configuration; - } - - public static void setConfiguration( - OzoneConfiguration conf) { - OzoneConfigurationHolder.configuration = conf; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneServiceProvider.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneServiceProvider.java deleted file mode 100644 index b98426c2014..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneServiceProvider.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.s3.util.OzoneS3Util; -import org.apache.hadoop.security.SecurityUtil; - -import javax.annotation.PostConstruct; -import javax.enterprise.context.ApplicationScoped; -import javax.enterprise.inject.Produces; -import javax.inject.Inject; - -import java.util.Arrays; -import java.util.Collection; - -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODES_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; - -/** - * This class creates the OM service . - */ -@ApplicationScoped -public class OzoneServiceProvider { - - private Text omServiceAddr; - - private String omserviceID; - - @Inject - private OzoneConfiguration conf; - - @PostConstruct - public void init() { - Collection serviceIdList = - conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY); - if (serviceIdList.size() == 0) { - // Non-HA cluster - omServiceAddr = SecurityUtil.buildTokenService(OmUtils. - getOmAddressForClients(conf)); - } else { - // HA cluster. - //For now if multiple service id's are configured we throw exception. - // As if multiple service id's are configured, S3Gateway will not be - // knowing which one to talk to. In future, if OM federation is supported - // we can resolve this by having another property like - // ozone.om.internal.service.id. - // TODO: Revisit this later. - if (serviceIdList.size() > 1) { - throw new IllegalArgumentException("Multiple serviceIds are " + - "configured. " + Arrays.toString(serviceIdList.toArray())); - } else { - String serviceId = serviceIdList.iterator().next(); - Collection omNodeIds = OmUtils.getOMNodeIds(conf, serviceId); - if (omNodeIds.size() == 0) { - throw new IllegalArgumentException(OZONE_OM_NODES_KEY - + "." + serviceId + " is not defined"); - } - omServiceAddr = new Text(OzoneS3Util.buildServiceNameForToken(conf, - serviceId, omNodeIds)); - omserviceID = serviceId; - } - } - } - - - @Produces - public Text getService() { - return omServiceAddr; - } - - @Produces - public String getOmServiceID() { - return omserviceID; - } - -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RequestIdentifier.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RequestIdentifier.java deleted file mode 100644 index 379393cfc7c..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RequestIdentifier.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import javax.enterprise.context.RequestScoped; - -import org.apache.hadoop.ozone.web.utils.OzoneUtils; - -import org.apache.commons.lang3.RandomStringUtils; - -/** - * Request specific identifiers. - */ -@RequestScoped -public class RequestIdentifier { - - private final String requestId; - - private final String amzId; - - public RequestIdentifier() { - amzId = RandomStringUtils.randomAlphanumeric(8, 16); - requestId = OzoneUtils.getRequestID(); - } - - public String getRequestId() { - return requestId; - } - - public String getAmzId() { - return amzId; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java deleted file mode 100644 index 4a5570aef86..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * This class contains constants for configuration keys used in S3G. - */ -@InterfaceAudience.Public -@InterfaceStability.Unstable -public final class S3GatewayConfigKeys { - - public static final String OZONE_S3G_HTTP_ENABLED_KEY = - "ozone.s3g.http.enabled"; - public static final String OZONE_S3G_HTTP_BIND_HOST_KEY = - "ozone.s3g.http-bind-host"; - public static final String OZONE_S3G_HTTPS_BIND_HOST_KEY = - "ozone.s3g.https-bind-host"; - public static final String OZONE_S3G_HTTP_ADDRESS_KEY = - "ozone.s3g.http-address"; - public static final String OZONE_S3G_HTTPS_ADDRESS_KEY = - "ozone.s3g.https-address"; - public static final String OZONE_S3G_KEYTAB_FILE = - "ozone.s3g.keytab.file"; - public static final String OZONE_S3G_HTTP_BIND_HOST_DEFAULT = "0.0.0.0"; - public static final int OZONE_S3G_HTTP_BIND_PORT_DEFAULT = 9878; - public static final int OZONE_S3G_HTTPS_BIND_PORT_DEFAULT = 9879; - public static final String OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL = - "ozone.s3g.authentication.kerberos.principal"; - public static final String OZONE_S3G_DOMAIN_NAME = "ozone.s3g.domain.name"; - - /** - * Never constructed. - */ - private S3GatewayConfigKeys() { - - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java deleted file mode 100644 index f3d83412ae6..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.server.BaseHttpServer; - -/** - * S3 Gateway specific configuration keys. - */ -public class S3GatewayHttpServer extends BaseHttpServer { - - /** - * Default offset between two filters. - */ - public static final int FILTER_PRIORITY_DO_AFTER = 50; - - public S3GatewayHttpServer(Configuration conf, - String name) throws IOException { - super(conf, name); - } - - @Override - protected String getHttpAddressKey() { - return S3GatewayConfigKeys.OZONE_S3G_HTTP_ADDRESS_KEY; - } - - @Override - protected String getHttpBindHostKey() { - return S3GatewayConfigKeys.OZONE_S3G_HTTP_BIND_HOST_KEY; - } - - @Override - protected String getHttpsAddressKey() { - return S3GatewayConfigKeys.OZONE_S3G_HTTPS_ADDRESS_KEY; - } - - @Override - protected String getHttpsBindHostKey() { - return S3GatewayConfigKeys.OZONE_S3G_HTTPS_BIND_HOST_KEY; - } - - @Override - protected String getBindHostDefault() { - return S3GatewayConfigKeys.OZONE_S3G_HTTP_BIND_HOST_DEFAULT; - } - - @Override - protected int getHttpBindPortDefault() { - return S3GatewayConfigKeys.OZONE_S3G_HTTP_BIND_PORT_DEFAULT; - } - - @Override - protected int getHttpsBindPortDefault() { - return S3GatewayConfigKeys.OZONE_S3G_HTTPS_BIND_PORT_DEFAULT; - } - - @Override - protected String getKeytabFile() { - return S3GatewayConfigKeys.OZONE_S3G_KEYTAB_FILE; - } - - @Override - protected String getSpnegoPrincipal() { - return S3GatewayConfigKeys.OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL; - } - - @Override - protected String getEnabledKey() { - return S3GatewayConfigKeys.OZONE_S3G_HTTP_ENABLED_KEY; - } - -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java deleted file mode 100644 index 1074ef2dfb7..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/SignedChunksInputStream.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -package org.apache.hadoop.ozone.s3; - -import java.io.IOException; -import java.io.InputStream; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * Input stream implementation to read body with chunked signatures. - *

    - * see: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html - */ -public class SignedChunksInputStream extends InputStream { - - private Pattern signatureLinePattern = - Pattern.compile("([0-9A-Fa-f]+);chunk-signature=.*"); - - private InputStream originalStream; - - /** - * Numer of following databits. If zero, the signature line should be parsed. - */ - private int remainingData = 0; - - public SignedChunksInputStream(InputStream inputStream) { - originalStream = inputStream; - } - - @Override - public int read() throws IOException { - if (remainingData > 0) { - int curr = originalStream.read(); - remainingData--; - if (remainingData == 0) { - //read the "\r\n" at the end of the data section - originalStream.read(); - originalStream.read(); - } - return curr; - } else { - remainingData = readHeader(); - if (remainingData == -1) { - return -1; - } - return read(); - } - } - - @Override - public int read(byte[] b, int off, int len) throws IOException { - if (b == null) { - throw new NullPointerException(); - } else if (off < 0 || len < 0 || len > b.length - off) { - throw new IndexOutOfBoundsException(); - } else if (len == 0) { - return 0; - } - int currentOff = off; - int currentLen = len; - int totalReadBytes = 0; - int realReadLen = 0; - int maxReadLen = 0; - do { - if (remainingData > 0) { - maxReadLen = Math.min(remainingData, currentLen); - realReadLen = originalStream.read(b, currentOff, maxReadLen); - if (realReadLen == -1) { - break; - } - currentOff += realReadLen; - currentLen -= realReadLen; - totalReadBytes += realReadLen; - remainingData -= realReadLen; - if (remainingData == 0) { - //read the "\r\n" at the end of the data section - originalStream.read(); - originalStream.read(); - } - } else { - remainingData = readHeader(); - if (remainingData == -1) { - break; - } - } - } while (currentLen > 0); - return totalReadBytes > 0 ? totalReadBytes : -1; - } - - private int readHeader() throws IOException { - int prev = -1; - int curr = 0; - StringBuilder buf = new StringBuilder(); - - //read everything until the next \r\n - while (!eol(prev, curr) && curr != -1) { - int next = originalStream.read(); - if (next != -1) { - buf.append((char) next); - } - prev = curr; - curr = next; - } - String signatureLine = buf.toString().trim(); - if (signatureLine.length() == 0) { - return -1; - } - - //parse the data length. - Matcher matcher = signatureLinePattern.matcher(signatureLine); - if (matcher.matches()) { - return Integer.parseInt(matcher.group(1), 16); - } else { - throw new IOException("Invalid signature line: " + signatureLine); - } - } - - private boolean eol(int prev, int curr) { - return prev == 13 && curr == 10; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java deleted file mode 100644 index 9ce98e11ee1..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java +++ /dev/null @@ -1,169 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import javax.annotation.Priority; -import javax.inject.Inject; -import javax.ws.rs.container.ContainerRequestContext; -import javax.ws.rs.container.ContainerRequestFilter; -import javax.ws.rs.container.PreMatching; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.core.UriBuilder; -import javax.ws.rs.ext.Provider; - -import java.io.IOException; -import java.net.URI; -import java.util.Arrays; - - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.fs.InvalidRequestException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.s3.header.AuthenticationHeaderParser; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_DOMAIN_NAME; - -/** - * Filter used to convert virtual host style pattern to path style pattern. - */ - -@Provider -@PreMatching -@Priority(VirtualHostStyleFilter.PRIORITY) -public class VirtualHostStyleFilter implements ContainerRequestFilter { - - public static final int PRIORITY = 100; - - private static final Logger LOG = LoggerFactory.getLogger( - VirtualHostStyleFilter.class); - - @Inject - private OzoneConfiguration conf; - - @Inject - private AuthenticationHeaderParser authenticationHeaderParser; - - private String[] domains; - - @Override - public void filter(ContainerRequestContext requestContext) throws - IOException { - - authenticationHeaderParser.setAuthHeader(requestContext.getHeaderString( - HttpHeaders.AUTHORIZATION)); - domains = conf.getTrimmedStrings(OZONE_S3G_DOMAIN_NAME); - - if (domains.length == 0) { - // domains is not configured, might be it is path style. - // So, do not continue further, just return. - return; - } - //Get the value of the host - String host = requestContext.getHeaderString(HttpHeaders.HOST); - host = checkHostWithoutPort(host); - String domain = getDomainName(host); - - if (domain == null) { - throw getException("Invalid S3 Gateway request {" + requestContext - .getUriInfo().getRequestUri().toString() + " }: No matching domain " + - "{" + Arrays.toString(domains) + "} for the host {" + host + "}"); - } - - LOG.debug("Http header host name is {}", host); - LOG.debug("Domain name matched is {}", domain); - - //Check if we have a Virtual Host style request, host length greater than - // address length means it is virtual host style, we need to convert to - // path style. - if (host.length() > domain.length()) { - String bucketName = host.substring(0, host.length() - domain.length()); - - if(!bucketName.endsWith(".")) { - //Checking this as the virtual host style pattern is http://bucket.host/ - throw getException("Invalid S3 Gateway request {" + requestContext - .getUriInfo().getRequestUri().toString() +"}:" +" Host: {" + host - + " is in invalid format"); - } else { - bucketName = bucketName.substring(0, bucketName.length() - 1); - } - LOG.debug("Bucket name is {}", bucketName); - - URI baseURI = requestContext.getUriInfo().getBaseUri(); - String currentPath = requestContext.getUriInfo().getPath(); - String newPath = bucketName; - if (currentPath != null) { - newPath += String.format("%s", currentPath); - } - MultivaluedMap queryParams = requestContext.getUriInfo() - .getQueryParameters(); - UriBuilder requestAddrBuilder = UriBuilder.fromUri(baseURI).path(newPath); - queryParams.forEach((k, v) -> requestAddrBuilder.queryParam(k, - v.toArray())); - URI requestAddr = requestAddrBuilder.build(); - requestContext.setRequestUri(baseURI, requestAddr); - } - } - - private InvalidRequestException getException(String message) { - return new InvalidRequestException(message); - } - - @VisibleForTesting - public void setConfiguration(OzoneConfiguration config) { - this.conf = config; - } - - - /** - * This method finds the longest match with the domain name. - * @param host - * @return domain name matched with the host. if none of them are matching, - * return null. - */ - private String getDomainName(String host) { - String match = null; - int length=0; - for (String domainVal : domains) { - if (host.endsWith(domainVal)) { - int len = domainVal.length(); - if (len > length) { - length = len; - match = domainVal; - } - } - } - return match; - } - - private String checkHostWithoutPort(String host) { - if (host.contains(":")){ - return host.substring(0, host.lastIndexOf(":")); - } else { - return host; - } - } - - @VisibleForTesting - public void setAuthenticationHeaderParser(AuthenticationHeaderParser parser) { - this.authenticationHeaderParser = parser; - } - -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/BucketMetadata.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/BucketMetadata.java deleted file mode 100644 index 04f8ffd60e1..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/BucketMetadata.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.commontypes; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; -import java.time.Instant; - -/** - * Metadata object represents one bucket. - */ -@XmlAccessorType(XmlAccessType.FIELD) -public class BucketMetadata { - @XmlElement(name = "Name") - private String name; - - @XmlJavaTypeAdapter(IsoDateAdapter.class) - @XmlElement(name = "CreationDate") - private Instant creationDate; - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public Instant getCreationDate() { - return creationDate; - } - - public void setCreationDate(Instant creationDate) { - this.creationDate = creationDate; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/CommonPrefix.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/CommonPrefix.java deleted file mode 100644 index 83e60476c7e..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/CommonPrefix.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.commontypes; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; - -/** - * Directory name ("key prefix") in case of listing. - */ -@XmlAccessorType(XmlAccessType.FIELD) -public class CommonPrefix { - - @XmlElement(name = "Prefix") - private String prefix; - - public CommonPrefix(String prefix) { - this.prefix = prefix; - } - - public CommonPrefix() { - } - - public String getPrefix() { - return prefix; - } - - public void setPrefix(String prefix) { - this.prefix = prefix; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/IsoDateAdapter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/IsoDateAdapter.java deleted file mode 100644 index cb04870e237..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/IsoDateAdapter.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.commontypes; - -import javax.xml.bind.annotation.adapters.XmlAdapter; -import java.time.Instant; -import java.time.ZoneOffset; -import java.time.format.DateTimeFormatter; - -/** - * A converter to convert Instant to standard date string. - */ -public class IsoDateAdapter extends XmlAdapter { - - private DateTimeFormatter iso8861Formatter; - - public IsoDateAdapter() { - iso8861Formatter = - DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSSX") - .withZone(ZoneOffset.UTC); - } - - @Override - public Instant unmarshal(String v) throws Exception { - throw new UnsupportedOperationException(); - } - - @Override - public String marshal(Instant v) throws Exception { - return iso8861Formatter.format(v); - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java deleted file mode 100644 index 34cea281949..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.commontypes; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; -import java.time.Instant; - -/** - * Metadata object represents one key in the object store. - */ -@XmlAccessorType(XmlAccessType.FIELD) -public class KeyMetadata { - - @XmlElement(name = "Key") - private String key; // or the Object Name - - @XmlJavaTypeAdapter(IsoDateAdapter.class) - @XmlElement(name = "LastModified") - private Instant lastModified; - - @XmlElement(name = "ETag") - private String eTag; - - @XmlElement(name = "Size") - private long size; - - @XmlElement(name = "StorageClass") - private String storageClass; - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public Instant getLastModified() { - return lastModified; - } - - public void setLastModified(Instant lastModified) { - this.lastModified = lastModified; - } - - public String getETag() { - return eTag; - } - - public void setETag(String tag) { - this.eTag = tag; - } - - public long getSize() { - return size; - } - - public void setSize(long size) { - this.size = size; - } - - public String getStorageClass() { - return storageClass; - } - - public void setStorageClass(String storageClass) { - this.storageClass = storageClass; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/package-info.java deleted file mode 100644 index dd916e88396..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/package-info.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Common classes required for S3 rest API's. - */ -@javax.xml.bind.annotation.XmlSchema( - namespace = "http://s3.amazonaws" - + ".com/doc/2006-03-01/", elementFormDefault = - javax.xml.bind.annotation.XmlNsForm.QUALIFIED, - xmlns = { - @javax.xml.bind.annotation.XmlNs(namespaceURI = "http://s3.amazonaws" - + ".com/doc/2006-03-01/", prefix = "")}) -package org.apache.hadoop.ozone.s3.commontypes; \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java deleted file mode 100644 index e4db6cc2827..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ /dev/null @@ -1,347 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.ws.rs.DELETE; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.HEAD; -import javax.ws.rs.POST; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.Response.Status; -import java.io.IOException; -import java.io.InputStream; -import java.time.Instant; -import java.util.Iterator; - -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneMultipartUploadList; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.s3.commontypes.KeyMetadata; -import org.apache.hadoop.ozone.s3.endpoint.MultiDeleteRequest.DeleteObject; -import org.apache.hadoop.ozone.s3.endpoint.MultiDeleteResponse.DeletedObject; -import org.apache.hadoop.ozone.s3.endpoint.MultiDeleteResponse.Error; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.ozone.s3.util.ContinueToken; -import org.apache.hadoop.ozone.s3.util.S3StorageType; - -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import org.apache.commons.lang3.StringUtils; -import static org.apache.hadoop.ozone.s3.util.OzoneS3Util.getVolumeName; -import static org.apache.hadoop.ozone.s3.util.S3Consts.ENCODING_TYPE; -import org.apache.http.HttpStatus; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Bucket level rest endpoints. - */ -@Path("/{bucket}") -public class BucketEndpoint extends EndpointBase { - - private static final Logger LOG = - LoggerFactory.getLogger(BucketEndpoint.class); - - /** - * Rest endpoint to list objects in a specific bucket. - *

    - * See: https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html - * for more details. - */ - @GET - @SuppressFBWarnings - @SuppressWarnings("parameternumber") - public Response list( - @PathParam("bucket") String bucketName, - @QueryParam("delimiter") String delimiter, - @QueryParam("encoding-type") String encodingType, - @QueryParam("marker") String marker, - @DefaultValue("1000") @QueryParam("max-keys") int maxKeys, - @QueryParam("prefix") String prefix, - @QueryParam("browser") String browser, - @QueryParam("continuation-token") String continueToken, - @QueryParam("start-after") String startAfter, - @QueryParam("uploads") String uploads, - @Context HttpHeaders hh) throws OS3Exception, IOException { - - if (browser != null) { - InputStream browserPage = getClass() - .getResourceAsStream("/browser.html"); - return Response.ok(browserPage, - MediaType.TEXT_HTML_TYPE) - .build(); - - } - - if (uploads != null) { - return listMultipartUploads(bucketName, prefix); - } - - if (prefix == null) { - prefix = ""; - } - - OzoneBucket bucket = getBucket(bucketName); - - Iterator ozoneKeyIterator; - - ContinueToken decodedToken = - ContinueToken.decodeFromString(continueToken); - - if (startAfter != null && continueToken != null) { - // If continuation token and start after both are provided, then we - // ignore start After - ozoneKeyIterator = bucket.listKeys(prefix, decodedToken.getLastKey()); - } else if (startAfter != null && continueToken == null) { - ozoneKeyIterator = bucket.listKeys(prefix, startAfter); - } else if (startAfter == null && continueToken != null){ - ozoneKeyIterator = bucket.listKeys(prefix, decodedToken.getLastKey()); - } else { - ozoneKeyIterator = bucket.listKeys(prefix); - } - - - ListObjectResponse response = new ListObjectResponse(); - response.setDelimiter(delimiter); - response.setName(bucketName); - response.setPrefix(prefix); - response.setMarker(""); - response.setMaxKeys(maxKeys); - response.setEncodingType(ENCODING_TYPE); - response.setTruncated(false); - response.setContinueToken(continueToken); - - String prevDir = null; - if (continueToken != null) { - prevDir = decodedToken.getLastDir(); - } - String lastKey = null; - int count = 0; - while (ozoneKeyIterator.hasNext()) { - OzoneKey next = ozoneKeyIterator.next(); - String relativeKeyName = next.getName().substring(prefix.length()); - - int depth = StringUtils.countMatches(relativeKeyName, delimiter); - if (delimiter != null) { - if (depth > 0) { - // means key has multiple delimiters in its value. - // ex: dir/dir1/dir2, where delimiter is "/" and prefix is dir/ - String dirName = relativeKeyName.substring(0, relativeKeyName - .indexOf(delimiter)); - if (!dirName.equals(prevDir)) { - response.addPrefix(prefix + dirName + delimiter); - prevDir = dirName; - count++; - } - } else if (relativeKeyName.endsWith(delimiter)) { - // means or key is same as prefix with delimiter at end and ends with - // delimiter. ex: dir/, where prefix is dir and delimiter is / - response.addPrefix(relativeKeyName); - count++; - } else { - // means our key is matched with prefix if prefix is given and it - // does not have any common prefix. - addKey(response, next); - count++; - } - } else { - addKey(response, next); - count++; - } - - if (count == maxKeys) { - lastKey = next.getName(); - break; - } - } - - response.setKeyCount(count); - - if (count < maxKeys) { - response.setTruncated(false); - } else if(ozoneKeyIterator.hasNext()) { - response.setTruncated(true); - ContinueToken nextToken = new ContinueToken(lastKey, prevDir); - response.setNextToken(nextToken.encodeToString()); - } else { - response.setTruncated(false); - } - - response.setKeyCount( - response.getCommonPrefixes().size() + response.getContents().size()); - return Response.ok(response).build(); - } - - @PUT - public Response put(@PathParam("bucket") String bucketName, @Context - HttpHeaders httpHeaders) throws IOException, OS3Exception { - - String volumeName = getVolumeName(getAuthenticationHeaderParser(). - getAccessKeyID()); - - String location = createS3Bucket(volumeName, bucketName); - - LOG.info("Location is {}", location); - return Response.status(HttpStatus.SC_OK).header("Location", location) - .build(); - - } - - public Response listMultipartUploads( - @PathParam("bucket") String bucketName, - @QueryParam("prefix") String prefix) - throws OS3Exception, IOException { - - OzoneBucket bucket = getBucket(bucketName); - - OzoneMultipartUploadList ozoneMultipartUploadList = - bucket.listMultipartUploads(prefix); - - ListMultipartUploadsResult result = new ListMultipartUploadsResult(); - result.setBucket(bucketName); - - ozoneMultipartUploadList.getUploads().forEach(upload -> result.addUpload( - new ListMultipartUploadsResult.Upload( - upload.getKeyName(), - upload.getUploadId(), - upload.getCreationTime(), - S3StorageType.fromReplicationType(upload.getReplicationType(), - upload.getReplicationFactor()) - ))); - return Response.ok(result).build(); - } - /** - * Rest endpoint to check the existence of a bucket. - *

    - * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketHEAD.html - * for more details. - */ - @HEAD - public Response head(@PathParam("bucket") String bucketName) - throws OS3Exception, IOException { - try { - getBucket(bucketName); - } catch (OS3Exception ex) { - LOG.error("Exception occurred in headBucket", ex); - //TODO: use a subclass fo OS3Exception and catch it here. - if (ex.getCode().contains("NoSuchBucket")) { - return Response.status(Status.BAD_REQUEST).build(); - } else { - throw ex; - } - } - return Response.ok().build(); - } - - /** - * Rest endpoint to delete specific bucket. - *

    - * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETE.html - * for more details. - */ - @DELETE - public Response delete(@PathParam("bucket") String bucketName) - throws IOException, OS3Exception { - - try { - deleteS3Bucket(bucketName); - } catch (OMException ex) { - if (ex.getResult() == ResultCodes.BUCKET_NOT_EMPTY) { - throw S3ErrorTable.newError(S3ErrorTable - .BUCKET_NOT_EMPTY, bucketName); - } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable - .NO_SUCH_BUCKET, bucketName); - } else { - throw ex; - } - } - - return Response - .status(HttpStatus.SC_NO_CONTENT) - .build(); - - } - - /** - * Implement multi delete. - *

    - * see: https://docs.aws.amazon - * .com/AmazonS3/latest/API/multiobjectdeleteapi.html - */ - @POST - @Produces(MediaType.APPLICATION_XML) - public MultiDeleteResponse multiDelete(@PathParam("bucket") String bucketName, - @QueryParam("delete") String delete, - MultiDeleteRequest request) throws OS3Exception, IOException { - OzoneBucket bucket = getBucket(bucketName); - MultiDeleteResponse result = new MultiDeleteResponse(); - if (request.getObjects() != null) { - for (DeleteObject keyToDelete : request.getObjects()) { - try { - bucket.deleteKey(keyToDelete.getKey()); - - if (!request.isQuiet()) { - result.addDeleted(new DeletedObject(keyToDelete.getKey())); - } - } catch (OMException ex) { - if (ex.getResult() != ResultCodes.KEY_NOT_FOUND) { - result.addError( - new Error(keyToDelete.getKey(), "InternalError", - ex.getMessage())); - } else if (!request.isQuiet()) { - result.addDeleted(new DeletedObject(keyToDelete.getKey())); - } - } catch (Exception ex) { - result.addError( - new Error(keyToDelete.getKey(), "InternalError", - ex.getMessage())); - } - } - } - return result; - } - - private void addKey(ListObjectResponse response, OzoneKey next) { - KeyMetadata keyMetadata = new KeyMetadata(); - keyMetadata.setKey(next.getName()); - keyMetadata.setSize(next.getDataSize()); - keyMetadata.setETag("" + next.getModificationTime()); - if (next.getReplicationType().toString().equals(ReplicationType - .STAND_ALONE.toString())) { - keyMetadata.setStorageClass(S3StorageType.REDUCED_REDUNDANCY.toString()); - } else { - keyMetadata.setStorageClass(S3StorageType.STANDARD.toString()); - } - keyMetadata.setLastModified(Instant.ofEpochMilli( - next.getModificationTime())); - response.addKey(keyMetadata); - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java deleted file mode 100644 index 6120ad6ec91..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlRootElement; -import java.util.ArrayList; -import java.util.List; - -/** - * Request for Complete Multipart Upload request. - */ -@XmlAccessorType(XmlAccessType.FIELD) -@XmlRootElement(name = "CompleteMultipartUpload") -public class CompleteMultipartUploadRequest { - - @XmlElement(name = "Part") - private List partList = new ArrayList<>(); - - public List getPartList() { - return partList; - } - - public void setPartList(List partList) { - this.partList = partList; - } - - /** - * JAXB entity for child element. - */ - @XmlAccessorType(XmlAccessType.FIELD) - @XmlRootElement(name = "Part") - public static class Part { - - @XmlElement(name = "PartNumber") - private int partNumber; - - @XmlElement(name = "ETag") - private String eTag; - - public int getPartNumber() { - return partNumber; - } - - public void setPartNumber(int partNumber) { - this.partNumber = partNumber; - } - - public String geteTag() { - return eTag; - } - - public void seteTag(String eTag) { - this.eTag = eTag; - } - } - -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java deleted file mode 100644 index c636f36b175..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlRootElement; - -/** - * Complete Multipart Upload request response. - */ - -@XmlAccessorType(XmlAccessType.FIELD) -@XmlRootElement(name = "CompleteMultipartUploadResult", namespace = - "http://s3.amazonaws.com/doc/2006-03-01/") -public class CompleteMultipartUploadResponse { - - @XmlElement(name = "Location") - private String location; - - @XmlElement(name = "Bucket") - private String bucket; - - @XmlElement(name = "Key") - private String key; - - @XmlElement(name = "ETag") - private String eTag; - - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - - public String getBucket() { - return bucket; - } - - public void setBucket(String bucket) { - this.bucket = bucket; - } - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getETag() { - return eTag; - } - - public void setETag(String tag) { - this.eTag = tag; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java deleted file mode 100644 index f090791937e..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlRootElement; -import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; -import java.time.Instant; - -/** - * Copy object Response. - */ -@XmlAccessorType(XmlAccessType.FIELD) -@XmlRootElement(name = "ListAllMyBucketsResult", - namespace = "http://s3.amazonaws.com/doc/2006-03-01/") -public class CopyObjectResponse { - - @XmlJavaTypeAdapter(IsoDateAdapter.class) - @XmlElement(name = "LastModified") - private Instant lastModified; - - @XmlElement(name = "ETag") - private String eTag; - - - public Instant getLastModified() { - return lastModified; - } - - public void setLastModified(Instant lastModified) { - this.lastModified = lastModified; - } - - public String getETag() { - return eTag; - } - - public void setETag(String tag) { - this.eTag = tag; - } - - -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java deleted file mode 100644 index c4e65aa38ff..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlRootElement; -import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; -import java.time.Instant; - -import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; - -/** - * Copy object Response. - */ -@XmlAccessorType(XmlAccessType.FIELD) -@XmlRootElement(name = "CopyPartResult", - namespace = "http://s3.amazonaws.com/doc/2006-03-01/") -public class CopyPartResult { - - @XmlJavaTypeAdapter(IsoDateAdapter.class) - @XmlElement(name = "LastModified") - private Instant lastModified; - - @XmlElement(name = "ETag") - private String eTag; - - public CopyPartResult() { - } - - public CopyPartResult(String eTag) { - this.eTag = eTag; - this.lastModified = Instant.now(); - } - - public Instant getLastModified() { - return lastModified; - } - - public void setLastModified(Instant lastModified) { - this.lastModified = lastModified; - } - - public String getETag() { - return eTag; - } - - public void setETag(String tag) { - this.eTag = tag; - } - -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java deleted file mode 100644 index 19329a409b9..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ /dev/null @@ -1,216 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.inject.Inject; -import javax.ws.rs.NotFoundException; -import java.io.IOException; -import java.util.Iterator; - -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.ozone.s3.header.AuthenticationHeaderParser; - -import com.google.common.annotations.VisibleForTesting; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Basic helpers for all the REST endpoints. - */ -public class EndpointBase { - - private static final Logger LOG = - LoggerFactory.getLogger(EndpointBase.class); - - @Inject - private OzoneClient client; - - @Inject - private AuthenticationHeaderParser authenticationHeaderParser; - - protected OzoneBucket getBucket(String volumeName, String bucketName) - throws IOException { - return getVolume(volumeName).getBucket(bucketName); - } - - protected OzoneBucket getBucket(OzoneVolume volume, String bucketName) - throws OS3Exception, IOException { - OzoneBucket bucket; - try { - bucket = volume.getBucket(bucketName); - } catch (OMException ex) { - if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName); - } else { - throw ex; - } - } - return bucket; - } - - protected OzoneBucket getBucket(String bucketName) - throws OS3Exception, IOException { - OzoneBucket bucket; - try { - OzoneVolume volume = getVolume(getOzoneVolumeName(bucketName)); - bucket = volume.getBucket(bucketName); - } catch (OMException ex) { - if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND - || ex.getResult() == ResultCodes.S3_BUCKET_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName); - } else { - throw ex; - } - } - return bucket; - } - - protected OzoneVolume getVolume(String volumeName) throws IOException { - OzoneVolume volume = null; - try { - volume = client.getObjectStore().getVolume(volumeName); - } catch (OMException ex) { - if (ex.getResult() == ResultCodes.VOLUME_NOT_FOUND) { - throw new NotFoundException("Volume " + volumeName + " is not found"); - } else { - throw ex; - } - } - return volume; - } - - /** - * Create an S3Bucket, and also it creates mapping needed to access via - * ozone and S3. - * @param userName - * @param bucketName - * @return location of the S3Bucket. - * @throws IOException - */ - protected String createS3Bucket(String userName, String bucketName) throws - IOException { - try { - client.getObjectStore().createS3Bucket(userName, bucketName); - } catch (OMException ex) { - if (ex.getResult() != ResultCodes.S3_BUCKET_ALREADY_EXISTS) { - // S3 does not return error for bucket already exists, it just - // returns the location. - throw ex; - } - } - - // Not required to call as bucketname is same, but calling now in future - // if mapping changes we get right location. - String location = client.getObjectStore().getOzoneBucketName( - bucketName); - return "/"+location; - } - - /** - * Deletes an s3 bucket and removes mapping of Ozone volume/bucket. - * @param s3BucketName - S3 Bucket Name. - * @throws IOException in case the bucket cannot be deleted. - */ - public void deleteS3Bucket(String s3BucketName) - throws IOException { - client.getObjectStore().deleteS3Bucket(s3BucketName); - } - - /** - * Returns the Ozone Namespace for the S3Bucket. It will return the - * OzoneVolume/OzoneBucketName. - * @param s3BucketName - S3 Bucket Name. - * @return String - The Ozone canonical name for this s3 bucket. This - * string is useful for mounting an OzoneFS. - * @throws IOException - Error is throw if the s3bucket does not exist. - */ - public String getOzoneBucketMapping(String s3BucketName) throws IOException { - return client.getObjectStore().getOzoneBucketMapping(s3BucketName); - } - - /** - * Returns the corresponding Ozone volume given an S3 Bucket. - * @param s3BucketName - S3Bucket Name. - * @return String - Ozone Volume name. - * @throws IOException - Throws if the s3Bucket does not exist. - */ - public String getOzoneVolumeName(String s3BucketName) throws IOException { - return client.getObjectStore().getOzoneVolumeName(s3BucketName); - } - - /** - * Returns the corresponding Ozone bucket name for the given S3 bucket. - * @param s3BucketName - S3Bucket Name. - * @return String - Ozone bucket Name. - * @throws IOException - Throws if the s3bucket does not exist. - */ - public String getOzoneBucketName(String s3BucketName) throws IOException { - return client.getObjectStore().getOzoneBucketName(s3BucketName); - } - - /** - * Returns Iterator to iterate over all buckets for a specific user. - * The result can be restricted using bucket prefix, will return all - * buckets if bucket prefix is null. - * - * @param userName - * @param prefix - * @return {@code Iterator} - */ - public Iterator listS3Buckets(String userName, - String prefix) { - return client.getObjectStore().listS3Buckets(userName, prefix); - } - - /** - * Returns Iterator to iterate over all buckets after prevBucket for a - * specific user. If prevBucket is null it returns an iterator to iterate - * over all buckets for this user. The result can be restricted using - * bucket prefix, will return all buckets if bucket prefix is null. - * - * @param prefix Bucket prefix to match - * @param previousBucket Buckets are listed after this bucket - * @return {@code Iterator} - */ - public Iterator listS3Buckets(String userName, - String prefix, - String previousBucket) { - return client.getObjectStore().listS3Buckets(userName, prefix, - previousBucket); - } - - public AuthenticationHeaderParser getAuthenticationHeaderParser() { - return authenticationHeaderParser; - } - - @VisibleForTesting - public void setAuthenticationHeaderParser(AuthenticationHeaderParser parser) { - this.authenticationHeaderParser = parser; - } - - @VisibleForTesting - public void setClient(OzoneClient ozoneClient) { - this.client = ozoneClient; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListBucketResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListBucketResponse.java deleted file mode 100644 index b9f87026f73..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListBucketResponse.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.ozone.s3.commontypes.BucketMetadata; - -import javax.xml.bind.annotation.*; -import java.util.ArrayList; -import java.util.List; - -/** - * Response from the ListBucket RPC Call. - */ -@XmlAccessorType(XmlAccessType.FIELD) -@XmlRootElement(name = "ListAllMyBucketsResult", - namespace = "http://s3.amazonaws.com/doc/2006-03-01/") -public class ListBucketResponse { - @XmlElementWrapper(name = "Buckets") - @XmlElement(name = "Bucket") - private List buckets = new ArrayList<>(); - - public List getBuckets() { - return buckets; - } - - @VisibleForTesting - public int getBucketsNum() { - return buckets.size(); - } - - public void setBuckets(List buckets) { - this.buckets = buckets; - } - - public void addBucket(BucketMetadata bucket) { - buckets.add(bucket); - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsResult.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsResult.java deleted file mode 100644 index 20dc9cdaac1..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListMultipartUploadsResult.java +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlRootElement; -import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; -import org.apache.hadoop.ozone.s3.util.S3StorageType; - -/** - * AWS compatible REST response for list multipart upload. - */ -@XmlAccessorType(XmlAccessType.FIELD) -@XmlRootElement(name = "ListMultipartUploadsResult", namespace = - "http://s3.amazonaws.com/doc/2006-03-01/") -public class ListMultipartUploadsResult { - - public static final Owner - NOT_SUPPORTED_OWNER = new Owner("NOT-SUPPORTED", "Not Supported"); - - @XmlElement(name = "Bucket") - private String bucket; - - @XmlElement(name = "KeyMarker") - private String keyMarker; - - @XmlElement(name = "UploadIdMarker") - private String uploadIdMarker; - - @XmlElement(name = "NextKeyMarker") - private String nextKeyMarker; - - @XmlElement(name = "NextUploadIdMarker") - private String nextUploadIdMarker; - - @XmlElement(name = "MaxUploads") - private int maxUploads = 1000; - - @XmlElement(name = "IsTruncated") - private boolean isTruncated = false; - - @XmlElement(name = "Upload") - private List uploads = new ArrayList<>(); - - public String getBucket() { - return bucket; - } - - public void setBucket(String bucket) { - this.bucket = bucket; - } - - public String getKeyMarker() { - return keyMarker; - } - - public void setKeyMarker(String keyMarker) { - this.keyMarker = keyMarker; - } - - public String getUploadIdMarker() { - return uploadIdMarker; - } - - public void setUploadIdMarker(String uploadIdMarker) { - this.uploadIdMarker = uploadIdMarker; - } - - public String getNextKeyMarker() { - return nextKeyMarker; - } - - public void setNextKeyMarker(String nextKeyMarker) { - this.nextKeyMarker = nextKeyMarker; - } - - public String getNextUploadIdMarker() { - return nextUploadIdMarker; - } - - public void setNextUploadIdMarker(String nextUploadIdMarker) { - this.nextUploadIdMarker = nextUploadIdMarker; - } - - public int getMaxUploads() { - return maxUploads; - } - - public void setMaxUploads(int maxUploads) { - this.maxUploads = maxUploads; - } - - public boolean isTruncated() { - return isTruncated; - } - - public void setTruncated(boolean truncated) { - isTruncated = truncated; - } - - public List getUploads() { - return uploads; - } - - public void setUploads( - List uploads) { - this.uploads = uploads; - } - - public void addUpload(Upload upload) { - this.uploads.add(upload); - } - - /** - * Upload information. - */ - @XmlAccessorType(XmlAccessType.FIELD) - @XmlRootElement(name = "Upload") - public static class Upload { - - @XmlElement(name = "Key") - private String key; - - @XmlElement(name = "UploadId") - private String uploadId; - - @XmlElement(name = "Owner") - private Owner owner = NOT_SUPPORTED_OWNER; - - @XmlElement(name = "Initiator") - private Owner initiator = NOT_SUPPORTED_OWNER; - - @XmlElement(name = "StorageClass") - private String storageClass = "STANDARD"; - - @XmlJavaTypeAdapter(IsoDateAdapter.class) - @XmlElement(name = "Initiated") - private Instant initiated; - - public Upload() { - } - - public Upload(String key, String uploadId, Instant initiated) { - this.key = key; - this.uploadId = uploadId; - this.initiated = initiated; - } - - public Upload(String key, String uploadId, Instant initiated, - S3StorageType storageClass) { - this.key = key; - this.uploadId = uploadId; - this.initiated = initiated; - this.storageClass = storageClass.toString(); - } - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getUploadId() { - return uploadId; - } - - public void setUploadId(String uploadId) { - this.uploadId = uploadId; - } - - public Owner getOwner() { - return owner; - } - - public void setOwner( - Owner owner) { - this.owner = owner; - } - - public Owner getInitiator() { - return initiator; - } - - public void setInitiator( - Owner initiator) { - this.initiator = initiator; - } - - public String getStorageClass() { - return storageClass; - } - - public void setStorageClass(String storageClass) { - this.storageClass = storageClass; - } - - public Instant getInitiated() { - return initiated; - } - - public void setInitiated(Instant initiated) { - this.initiated = initiated; - } - } - - /** - * Upload information. - */ - @XmlAccessorType(XmlAccessType.FIELD) - @XmlRootElement(name = "Owner") - public static class Owner { - - @XmlElement(name = "ID") - private String id; - - @XmlElement(name = "DisplayName") - private String displayName; - - public Owner() { - } - - public Owner(String id, String displayName) { - this.id = id; - this.displayName = displayName; - } - - public String getId() { - return id; - } - - public void setId(String id) { - this.id = id; - } - - public String getDisplayName() { - return displayName; - } - - public void setDisplayName(String displayName) { - this.displayName = displayName; - } - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java deleted file mode 100644 index adb5f20e301..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlRootElement; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.ozone.s3.commontypes.CommonPrefix; -import org.apache.hadoop.ozone.s3.commontypes.KeyMetadata; - -/** - * Response from the ListObject RPC Call. - */ -@XmlAccessorType(XmlAccessType.FIELD) -@XmlRootElement(name = "ListBucketResult", namespace = "http://s3.amazonaws" - + ".com/doc/2006-03-01/") -public class ListObjectResponse { - - @XmlElement(name = "Name") - private String name; - - @XmlElement(name = "Prefix") - private String prefix; - - @XmlElement(name = "Marker") - private String marker; - - @XmlElement(name = "MaxKeys") - private int maxKeys; - - @XmlElement(name = "KeyCount") - private int keyCount; - - @XmlElement(name = "Delimiter") - private String delimiter = "/"; - - @XmlElement(name = "EncodingType") - private String encodingType = "url"; - - @XmlElement(name = "IsTruncated") - private boolean isTruncated; - - @XmlElement(name = "NextContinuationToken") - private String nextToken; - - @XmlElement(name = "continueToken") - private String continueToken; - - @XmlElement(name = "Contents") - private List contents = new ArrayList<>(); - - @XmlElement(name = "CommonPrefixes") - private List commonPrefixes = new ArrayList<>(); - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getPrefix() { - return prefix; - } - - public void setPrefix(String prefix) { - this.prefix = prefix; - } - - public String getMarker() { - return marker; - } - - public void setMarker(String marker) { - this.marker = marker; - } - - public int getMaxKeys() { - return maxKeys; - } - - public void setMaxKeys(int maxKeys) { - this.maxKeys = maxKeys; - } - - public String getDelimiter() { - return delimiter; - } - - public void setDelimiter(String delimiter) { - this.delimiter = delimiter; - } - - public String getEncodingType() { - return encodingType; - } - - public void setEncodingType(String encodingType) { - this.encodingType = encodingType; - } - - public boolean isTruncated() { - return isTruncated; - } - - public void setTruncated(boolean truncated) { - isTruncated = truncated; - } - - public List getContents() { - return contents; - } - - public void setContents( - List contents) { - this.contents = contents; - } - - public List getCommonPrefixes() { - return commonPrefixes; - } - - public void setCommonPrefixes( - List commonPrefixes) { - this.commonPrefixes = commonPrefixes; - } - - public void addKey(KeyMetadata keyMetadata) { - contents.add(keyMetadata); - } - - public void addPrefix(String relativeKeyName) { - commonPrefixes.add(new CommonPrefix(relativeKeyName)); - } - - public String getNextToken() { - return nextToken; - } - - public void setNextToken(String nextToken) { - this.nextToken = nextToken; - } - - public String getContinueToken() { - return continueToken; - } - - public void setContinueToken(String continueToken) { - this.continueToken = continueToken; - } - - public int getKeyCount() { - return keyCount; - } - - public void setKeyCount(int keyCount) { - this.keyCount = keyCount; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java deleted file mode 100644 index fc9da14133c..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java +++ /dev/null @@ -1,196 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlRootElement; -import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; - -/** - * Request for list parts of a multipart upload request. - */ -@XmlAccessorType(XmlAccessType.FIELD) -@XmlRootElement(name = "ListPartsResult", namespace = "http://s3.amazonaws" - + ".com/doc/2006-03-01/") -public class ListPartsResponse { - - @XmlElement(name = "Bucket") - private String bucket; - - @XmlElement(name = "Key") - private String key; - - @XmlElement(name = "UploadId") - private String uploadID; - - @XmlElement(name = "StorageClass") - private String storageClass; - - @XmlElement(name = "PartNumberMarker") - private int partNumberMarker; - - @XmlElement(name = "NextPartNumberMarker") - private int nextPartNumberMarker; - - @XmlElement(name = "MaxParts") - private int maxParts; - - @XmlElement(name = "IsTruncated") - private boolean truncated; - - @XmlElement(name = "Part") - private List partList = new ArrayList<>(); - - public String getBucket() { - return bucket; - } - - public void setBucket(String bucket) { - this.bucket = bucket; - } - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getUploadID() { - return uploadID; - } - - public void setUploadID(String uploadID) { - this.uploadID = uploadID; - } - - public String getStorageClass() { - return storageClass; - } - - public void setStorageClass(String storageClass) { - this.storageClass = storageClass; - } - - public int getPartNumberMarker() { - return partNumberMarker; - } - - public void setPartNumberMarker(int partNumberMarker) { - this.partNumberMarker = partNumberMarker; - } - - public int getNextPartNumberMarker() { - return nextPartNumberMarker; - } - - public void setNextPartNumberMarker(int nextPartNumberMarker) { - this.nextPartNumberMarker = nextPartNumberMarker; - } - - public int getMaxParts() { - return maxParts; - } - - public void setMaxParts(int maxParts) { - this.maxParts = maxParts; - } - - public boolean getTruncated() { - return truncated; - } - - public void setTruncated(boolean truncated) { - this.truncated = truncated; - } - - public List getPartList() { - return partList; - } - - public void setPartList(List partList) { - this.partList = partList; - } - - public void addPart(Part part) { - this.partList.add(part); - } - - /** - * Part information. - */ - @XmlAccessorType(XmlAccessType.FIELD) - @XmlRootElement(name = "Part") - public static class Part { - - @XmlElement(name = "PartNumber") - private int partNumber; - - @XmlJavaTypeAdapter(IsoDateAdapter.class) - @XmlElement(name = "LastModified") - private Instant lastModified; - - @XmlElement(name = "ETag") - private String eTag; - - - @XmlElement(name = "Size") - private long size; - - public int getPartNumber() { - return partNumber; - } - - public void setPartNumber(int partNumber) { - this.partNumber = partNumber; - } - - public Instant getLastModified() { - return lastModified; - } - - public void setLastModified(Instant lastModified) { - this.lastModified = lastModified; - } - - public String getETag() { - return eTag; - } - - public void setETag(String tag) { - this.eTag = tag; - } - - public long getSize() { - return size; - } - - public void setSize(long size) { - this.size = size; - } - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequest.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequest.java deleted file mode 100644 index 45b8322de52..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequest.java +++ /dev/null @@ -1,96 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlRootElement; -import java.util.ArrayList; -import java.util.List; - -/** - * Request for multi object delete request. - */ - -@XmlAccessorType(XmlAccessType.FIELD) -@XmlRootElement(name = "Delete", namespace = "http://s3.amazonaws" - + ".com/doc/2006-03-01/") -public class MultiDeleteRequest { - - @XmlElement(name = "Quiet") - private Boolean quiet = Boolean.FALSE; - - @XmlElement(name = "Object") - private List objects = new ArrayList<>(); - - public boolean isQuiet() { - return quiet; - } - - public void setQuiet(boolean quiet) { - this.quiet = quiet; - } - - public List getObjects() { - return objects; - } - - public void setObjects( - List objects) { - this.objects = objects; - } - - /** - * JAXB entity for child element. - */ - @XmlAccessorType(XmlAccessType.FIELD) - @XmlRootElement(name = "Object", namespace = "http://s3.amazonaws" - + ".com/doc/2006-03-01/") - public static class DeleteObject { - - @XmlElement(name = "Key") - private String key; - - @XmlElement(name = "VersionId") - private String versionId; - - public DeleteObject() { - } - - public DeleteObject(String key) { - this.key = key; - } - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getVersionId() { - return versionId; - } - - public void setVersionId(String versionId) { - this.versionId = versionId; - } - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java deleted file mode 100644 index e8ed5159229..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.ws.rs.Produces; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.ext.MessageBodyReader; -import javax.ws.rs.ext.Provider; -import javax.xml.bind.JAXBContext; -import javax.xml.bind.UnmarshallerHandler; -import javax.xml.parsers.SAXParserFactory; -import java.io.IOException; -import java.io.InputStream; -import java.lang.annotation.Annotation; -import java.lang.reflect.Type; - -import org.xml.sax.InputSource; -import org.xml.sax.XMLReader; - -/** - * Custom unmarshaller to read MultiDeleteRequest w/wo namespace. - */ -@Provider -@Produces(MediaType.APPLICATION_XML) -public class MultiDeleteRequestUnmarshaller - implements MessageBodyReader { - - private final JAXBContext context; - private final XMLReader xmlReader; - - public MultiDeleteRequestUnmarshaller() { - try { - context = JAXBContext.newInstance(MultiDeleteRequest.class); - SAXParserFactory saxParserFactory = SAXParserFactory.newInstance(); - xmlReader = saxParserFactory.newSAXParser().getXMLReader(); - } catch (Exception ex) { - throw new AssertionError("Can't instantiate MultiDeleteRequest parser", - ex); - } - } - - @Override - public boolean isReadable(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { - return type.equals(MultiDeleteRequest.class); - } - - @Override - public MultiDeleteRequest readFrom(Class type, - Type genericType, Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, InputStream entityStream) - throws IOException, WebApplicationException { - try { - UnmarshallerHandler unmarshallerHandler = - context.createUnmarshaller().getUnmarshallerHandler(); - - XmlNamespaceFilter filter = - new XmlNamespaceFilter("http://s3.amazonaws.com/doc/2006-03-01/"); - filter.setContentHandler(unmarshallerHandler); - filter.setParent(xmlReader); - filter.parse(new InputSource(entityStream)); - return (MultiDeleteRequest) unmarshallerHandler.getResult(); - } catch (Exception e) { - throw new WebApplicationException("Can't parse request body to XML.", e); - } - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteResponse.java deleted file mode 100644 index f2e21e6cb54..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteResponse.java +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlRootElement; -import java.util.ArrayList; -import java.util.List; - -/** - * Response for multi object delete request. - */ -@XmlAccessorType(XmlAccessType.FIELD) -@XmlRootElement(name = "DeleteResult", namespace = "http://s3.amazonaws" - + ".com/doc/2006-03-01/") -public class MultiDeleteResponse { - - @XmlElement(name = "Deleted") - private List deletedObjects = new ArrayList<>(); - - @XmlElement(name = "Error") - private List errors = new ArrayList<>(); - - public void addDeleted(DeletedObject deletedObject) { - deletedObjects.add(deletedObject); - } - - public void addError(Error error) { - errors.add(error); - } - - public List getDeletedObjects() { - return deletedObjects; - } - - public void setDeletedObjects( - List deletedObjects) { - this.deletedObjects = deletedObjects; - } - - public List getErrors() { - return errors; - } - - public void setErrors( - List errors) { - this.errors = errors; - } - - /** - * JAXB entity for child element. - */ - @XmlAccessorType(XmlAccessType.FIELD) - @XmlRootElement(name = "Deleted", namespace = "http://s3.amazonaws" - + ".com/doc/2006-03-01/") - public static class DeletedObject { - - @XmlElement(name = "Key") - private String key; - - private String versionId; - - public DeletedObject() { - } - - public DeletedObject(String key) { - this.key = key; - } - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getVersionId() { - return versionId; - } - - public void setVersionId(String versionId) { - this.versionId = versionId; - } - } - - /** - * JAXB entity for child element. - */ - @XmlAccessorType(XmlAccessType.FIELD) - @XmlRootElement(name = "Error", namespace = "http://s3.amazonaws" - + ".com/doc/2006-03-01/") - public static class Error { - - @XmlElement(name = "Key") - private String key; - - @XmlElement(name = "Code") - private String code; - - @XmlElement(name = "Message") - private String message; - - public Error() { - } - - public Error(String key, String code, String message) { - this.key = key; - this.code = code; - this.message = message; - } - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getCode() { - return code; - } - - public void setCode(String code) { - this.code = code; - } - - public String getMessage() { - return message; - } - - public void setMessage(String message) { - this.message = message; - } - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartUploadInitiateResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartUploadInitiateResponse.java deleted file mode 100644 index c038820c0fe..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartUploadInitiateResponse.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlRootElement; - - -/** - * Response for Initiate Multipart Upload request. - */ -@XmlAccessorType(XmlAccessType.FIELD) -@XmlRootElement(name = "InitiateMultipartUploadResult", - namespace = "http://s3.amazonaws.com/doc/2006-03-01/") -public class MultipartUploadInitiateResponse { - - @XmlElement(name = "Bucket") - private String bucket; - - @XmlElement(name = "Key") - private String key; - - @XmlElement(name = "UploadId") - private String uploadID; - - public String getBucket() { - return bucket; - } - - public void setBucket(String bucket) { - this.bucket = bucket; - } - - public String getKey() { - return key; - } - - public void setKey(String key) { - this.key = key; - } - - public String getUploadID() { - return uploadID; - } - - public void setUploadID(String uploadID) { - this.uploadID = uploadID; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java deleted file mode 100644 index b947c9ee520..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ /dev/null @@ -1,766 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.ws.rs.Consumes; -import javax.ws.rs.DELETE; -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.HEAD; -import javax.ws.rs.HeaderParam; -import javax.ws.rs.POST; -import javax.ws.rs.PUT; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.core.Context; -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.Response.ResponseBuilder; -import javax.ws.rs.core.Response.Status; -import javax.ws.rs.core.StreamingOutput; -import java.io.IOException; -import java.io.InputStream; -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneKeyDetails; -import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.s3.HeaderPreprocessor; -import org.apache.hadoop.ozone.s3.SignedChunksInputStream; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.ozone.s3.io.S3WrapperInputStream; -import org.apache.hadoop.ozone.s3.util.RFC1123Util; -import org.apache.hadoop.ozone.s3.util.RangeHeader; -import org.apache.hadoop.ozone.s3.util.RangeHeaderParserUtil; -import org.apache.hadoop.ozone.s3.util.S3StorageType; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.util.Time; - -import com.google.common.annotations.VisibleForTesting; -import static javax.ws.rs.core.HttpHeaders.CONTENT_LENGTH; -import static javax.ws.rs.core.HttpHeaders.LAST_MODIFIED; -import org.apache.commons.io.IOUtils; - -import org.apache.commons.lang3.tuple.Pair; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ENTITY_TOO_SMALL; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_UPLOAD; -import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCEPT_RANGE_HEADER; -import static org.apache.hadoop.ozone.s3.util.S3Consts.CONTENT_RANGE_HEADER; -import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; -import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER_RANGE; -import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER; -import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER_SUPPORTED_UNIT; -import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; -import org.apache.http.HttpStatus; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Key level rest endpoints. - */ -@Path("/{bucket}/{path:.+}") -public class ObjectEndpoint extends EndpointBase { - - private static final Logger LOG = - LoggerFactory.getLogger(ObjectEndpoint.class); - - @Context - private HttpHeaders headers; - - private List customizableGetHeaders = new ArrayList<>(); - - public ObjectEndpoint() { - customizableGetHeaders.add("Content-Type"); - customizableGetHeaders.add("Content-Language"); - customizableGetHeaders.add("Expires"); - customizableGetHeaders.add("Cache-Control"); - customizableGetHeaders.add("Content-Disposition"); - customizableGetHeaders.add("Content-Encoding"); - } - - /** - * Rest endpoint to upload object to a bucket. - *

    - * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for - * more details. - */ - @PUT - public Response put( - @PathParam("bucket") String bucketName, - @PathParam("path") String keyPath, - @HeaderParam("Content-Length") long length, - @QueryParam("partNumber") int partNumber, - @QueryParam("uploadId") @DefaultValue("") String uploadID, - InputStream body) throws IOException, OS3Exception { - - OzoneOutputStream output = null; - - if (uploadID != null && !uploadID.equals("")) { - // If uploadID is specified, it is a request for upload part - return createMultipartKey(bucketName, keyPath, length, - partNumber, uploadID, body); - } - - try { - String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER); - String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER); - - ReplicationType replicationType; - ReplicationFactor replicationFactor; - boolean storageTypeDefault; - if (storageType == null || storageType.equals("")) { - replicationType = S3StorageType.getDefault().getType(); - replicationFactor = S3StorageType.getDefault().getFactor(); - storageTypeDefault = true; - } else { - try { - replicationType = S3StorageType.valueOf(storageType).getType(); - replicationFactor = S3StorageType.valueOf(storageType).getFactor(); - } catch (IllegalArgumentException ex) { - throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, - storageType); - } - storageTypeDefault = false; - } - - if (copyHeader != null) { - //Copy object, as copy source available. - CopyObjectResponse copyObjectResponse = copyObject( - copyHeader, bucketName, keyPath, replicationType, - replicationFactor, storageTypeDefault); - return Response.status(Status.OK).entity(copyObjectResponse).header( - "Connection", "close").build(); - } - - // Normal put object - OzoneBucket bucket = getBucket(bucketName); - - output = bucket.createKey(keyPath, length, replicationType, - replicationFactor, new HashMap<>()); - - if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - .equals(headers.getHeaderString("x-amz-content-sha256"))) { - body = new SignedChunksInputStream(body); - } - - IOUtils.copy(body, output); - - return Response.ok().status(HttpStatus.SC_OK) - .build(); - } catch (IOException ex) { - LOG.error("Exception occurred in PutObject", ex); - throw ex; - } finally { - if (output != null) { - output.close(); - } - } - } - - /** - * Rest endpoint to download object from a bucket, if query param uploadId - * is specified, request for list parts of a multipart upload key with - * specific uploadId. - *

    - * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html - * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html - * for more details. - */ - @GET - public Response get( - @PathParam("bucket") String bucketName, - @PathParam("path") String keyPath, - @QueryParam("uploadId") String uploadId, - @QueryParam("max-parts") @DefaultValue("1000") int maxParts, - @QueryParam("part-number-marker") String partNumberMarker, - InputStream body) throws IOException, OS3Exception { - try { - - if (uploadId != null) { - // When we have uploadId, this is the request for list Parts. - int partMarker = 0; - if (partNumberMarker != null) { - partMarker = Integer.parseInt(partNumberMarker); - } - return listParts(bucketName, keyPath, uploadId, - partMarker, maxParts); - } - - OzoneBucket bucket = getBucket(bucketName); - - OzoneKeyDetails keyDetails = bucket.getKey(keyPath); - - long length = keyDetails.getDataSize(); - - LOG.debug("Data length of the key {} is {}", keyPath, length); - - String rangeHeaderVal = headers.getHeaderString(RANGE_HEADER); - RangeHeader rangeHeader = null; - - LOG.debug("range Header provided value is {}", rangeHeaderVal); - - if (rangeHeaderVal != null) { - rangeHeader = RangeHeaderParserUtil.parseRangeHeader(rangeHeaderVal, - length); - LOG.debug("range Header provided value is {}", rangeHeader); - if (rangeHeader.isInValidRange()) { - OS3Exception exception = S3ErrorTable.newError(S3ErrorTable - .INVALID_RANGE, rangeHeaderVal); - throw exception; - } - } - ResponseBuilder responseBuilder; - - if (rangeHeaderVal == null || rangeHeader.isReadFull()) { - StreamingOutput output = dest -> { - try (OzoneInputStream key = bucket.readKey(keyPath)) { - IOUtils.copy(key, dest); - } - }; - responseBuilder = Response - .ok(output) - .header(CONTENT_LENGTH, keyDetails.getDataSize()); - - } else { - LOG.debug("range Header provided value is {}", rangeHeader); - OzoneInputStream key = bucket.readKey(keyPath); - - long startOffset = rangeHeader.getStartOffset(); - long endOffset = rangeHeader.getEndOffset(); - long copyLength; - if (startOffset == endOffset) { - // if range header is given as bytes=0-0, then we should return 1 - // byte from start offset - copyLength = 1; - } else { - copyLength = rangeHeader.getEndOffset() - rangeHeader - .getStartOffset() + 1; - } - StreamingOutput output = dest -> { - try (S3WrapperInputStream s3WrapperInputStream = - new S3WrapperInputStream( - key.getInputStream())) { - IOUtils.copyLarge(s3WrapperInputStream, dest, startOffset, - copyLength); - } - }; - responseBuilder = Response - .ok(output) - .header(CONTENT_LENGTH, copyLength); - - String contentRangeVal = RANGE_HEADER_SUPPORTED_UNIT + " " + - rangeHeader.getStartOffset() + "-" + rangeHeader.getEndOffset() + - "/" + length; - - responseBuilder.header(CONTENT_RANGE_HEADER, contentRangeVal); - } - responseBuilder.header(ACCEPT_RANGE_HEADER, - RANGE_HEADER_SUPPORTED_UNIT); - for (String responseHeader : customizableGetHeaders) { - String headerValue = headers.getHeaderString(responseHeader); - if (headerValue != null) { - responseBuilder.header(responseHeader, headerValue); - } - } - addLastModifiedDate(responseBuilder, keyDetails); - return responseBuilder.build(); - } catch (OMException ex) { - if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable - .NO_SUCH_KEY, keyPath); - } else { - throw ex; - } - } - } - - private void addLastModifiedDate( - ResponseBuilder responseBuilder, OzoneKeyDetails key) { - - ZonedDateTime lastModificationTime = - Instant.ofEpochMilli(key.getModificationTime()) - .atZone(ZoneId.of("GMT")); - - responseBuilder - .header(LAST_MODIFIED, - RFC1123Util.FORMAT.format(lastModificationTime)); - } - - /** - * Rest endpoint to check existence of an object in a bucket. - *

    - * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html - * for more details. - */ - @HEAD - public Response head( - @PathParam("bucket") String bucketName, - @PathParam("path") String keyPath) throws Exception { - OzoneKeyDetails key; - - try { - key = getBucket(bucketName).getKey(keyPath); - // TODO: return the specified range bytes of this object. - } catch (OMException ex) { - if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { - // Just return 404 with no content - return Response.status(Status.NOT_FOUND).build(); - } else { - throw ex; - } - } - - ResponseBuilder response = Response.ok().status(HttpStatus.SC_OK) - .header("ETag", "" + key.getModificationTime()) - .header("Content-Length", key.getDataSize()) - .header("Content-Type", "binary/octet-stream"); - addLastModifiedDate(response, key); - return response - .build(); - } - - /** - * Abort multipart upload request. - * @param bucket - * @param key - * @param uploadId - * @return Response - * @throws IOException - * @throws OS3Exception - */ - private Response abortMultipartUpload(String bucket, String key, String - uploadId) throws IOException, OS3Exception { - try { - OzoneBucket ozoneBucket = getBucket(bucket); - ozoneBucket.abortMultipartUpload(key, uploadId); - } catch (OMException ex) { - if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { - throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_UPLOAD, uploadId); - } - throw ex; - } - return Response - .status(Status.NO_CONTENT) - .build(); - } - - - /** - * Delete a specific object from a bucket, if query param uploadId is - * specified, this request is for abort multipart upload. - *

    - * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html - * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadAbort.html - * for more details. - */ - @DELETE - @SuppressWarnings("emptyblock") - public Response delete( - @PathParam("bucket") String bucketName, - @PathParam("path") String keyPath, - @QueryParam("uploadId") @DefaultValue("") String uploadId) throws - IOException, OS3Exception { - - try { - if (uploadId != null && !uploadId.equals("")) { - return abortMultipartUpload(bucketName, keyPath, uploadId); - } - OzoneBucket bucket = getBucket(bucketName); - bucket.getKey(keyPath); - bucket.deleteKey(keyPath); - } catch (OMException ex) { - if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable - .NO_SUCH_BUCKET, bucketName); - } else if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { - //NOT_FOUND is not a problem, AWS doesn't throw exception for missing - // keys. Just return 204 - } else { - throw ex; - } - - } - return Response - .status(Status.NO_CONTENT) - .build(); - - } - - /** - * Initialize MultiPartUpload request. - *

    - * Note: the specific content type is set by the HeaderPreprocessor. - */ - @POST - @Produces(MediaType.APPLICATION_XML) - @Consumes(HeaderPreprocessor.MULTIPART_UPLOAD_MARKER) - public Response initializeMultipartUpload( - @PathParam("bucket") String bucket, - @PathParam("path") String key - ) - throws IOException, OS3Exception { - try { - OzoneBucket ozoneBucket = getBucket(bucket); - String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER); - - ReplicationType replicationType; - ReplicationFactor replicationFactor; - if (storageType == null || storageType.equals("")) { - replicationType = S3StorageType.getDefault().getType(); - replicationFactor = S3StorageType.getDefault().getFactor(); - } else { - try { - replicationType = S3StorageType.valueOf(storageType).getType(); - replicationFactor = S3StorageType.valueOf(storageType).getFactor(); - } catch (IllegalArgumentException ex) { - throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, - storageType); - } - } - - OmMultipartInfo multipartInfo = ozoneBucket - .initiateMultipartUpload(key, replicationType, replicationFactor); - - MultipartUploadInitiateResponse multipartUploadInitiateResponse = new - MultipartUploadInitiateResponse(); - - multipartUploadInitiateResponse.setBucket(bucket); - multipartUploadInitiateResponse.setKey(key); - multipartUploadInitiateResponse.setUploadID(multipartInfo.getUploadID()); - - return Response.status(Status.OK).entity( - multipartUploadInitiateResponse).build(); - } catch (IOException ex) { - LOG.error("Error in Initiate Multipart Upload Request for bucket: " + - bucket + ", key: " + key, ex); - throw ex; - } - } - - /** - * Complete a multipart upload. - */ - @POST - @Produces(MediaType.APPLICATION_XML) - public Response completeMultipartUpload(@PathParam("bucket") String bucket, - @PathParam("path") String key, - @QueryParam("uploadId") @DefaultValue("") String uploadID, - CompleteMultipartUploadRequest multipartUploadRequest) - throws IOException, OS3Exception { - OzoneBucket ozoneBucket = getBucket(bucket); - Map partsMap = new TreeMap<>(); - List partList = - multipartUploadRequest.getPartList(); - - for (CompleteMultipartUploadRequest.Part part : partList) { - partsMap.put(part.getPartNumber(), part.geteTag()); - } - - LOG.debug("Parts map {}", partsMap.toString()); - - OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo; - try { - omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload( - key, uploadID, partsMap); - CompleteMultipartUploadResponse completeMultipartUploadResponse = - new CompleteMultipartUploadResponse(); - completeMultipartUploadResponse.setBucket(bucket); - completeMultipartUploadResponse.setKey(key); - completeMultipartUploadResponse.setETag(omMultipartUploadCompleteInfo - .getHash()); - // Location also setting as bucket name. - completeMultipartUploadResponse.setLocation(bucket); - return Response.status(Status.OK).entity(completeMultipartUploadResponse) - .build(); - } catch (OMException ex) { - LOG.error("Error in Complete Multipart Upload Request for bucket: " + - bucket + ", key: " + key, ex); - if (ex.getResult() == ResultCodes.MISMATCH_MULTIPART_LIST) { - OS3Exception oex = - S3ErrorTable.newError(S3ErrorTable.INVALID_PART, key); - throw oex; - } else if (ex.getResult() == ResultCodes.MISSING_UPLOAD_PARTS) { - OS3Exception oex = - S3ErrorTable.newError(S3ErrorTable.INVALID_PART_ORDER, key); - throw oex; - } else if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { - OS3Exception os3Exception = S3ErrorTable.newError(NO_SUCH_UPLOAD, - uploadID); - throw os3Exception; - } else if (ex.getResult() == ResultCodes.ENTITY_TOO_SMALL) { - OS3Exception os3Exception = S3ErrorTable.newError(ENTITY_TOO_SMALL, - key); - throw os3Exception; - } - throw ex; - } - } - - private Response createMultipartKey(String bucket, String key, long length, - int partNumber, String uploadID, - InputStream body) - throws IOException, OS3Exception { - try { - OzoneBucket ozoneBucket = getBucket(bucket); - OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( - key, length, partNumber, uploadID); - - String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER); - if (copyHeader != null) { - Pair result = parseSourceHeader(copyHeader); - - String sourceBucket = result.getLeft(); - String sourceKey = result.getRight(); - - try (OzoneInputStream sourceObject = - getBucket(sourceBucket).readKey(sourceKey)) { - - String range = - headers.getHeaderString(COPY_SOURCE_HEADER_RANGE); - if (range != null) { - RangeHeader rangeHeader = - RangeHeaderParserUtil.parseRangeHeader(range, 0); - IOUtils.copyLarge(sourceObject, ozoneOutputStream, - rangeHeader.getStartOffset(), - rangeHeader.getEndOffset() - rangeHeader.getStartOffset()); - - } else { - IOUtils.copy(sourceObject, ozoneOutputStream); - } - } - - } else { - IOUtils.copy(body, ozoneOutputStream); - } - ozoneOutputStream.close(); - OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = - ozoneOutputStream.getCommitUploadPartInfo(); - String eTag = omMultipartCommitUploadPartInfo.getPartName(); - - if (copyHeader != null) { - return Response.ok(new CopyPartResult(eTag)).build(); - } else { - return Response.ok().header("ETag", - eTag).build(); - } - - } catch (OMException ex) { - if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { - throw S3ErrorTable.newError(NO_SUCH_UPLOAD, - uploadID); - } - throw ex; - } - - } - - /** - * Returns response for the listParts request. - * See: https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html - * @param bucket - * @param key - * @param uploadID - * @param partNumberMarker - * @param maxParts - * @return - * @throws IOException - * @throws OS3Exception - */ - private Response listParts(String bucket, String key, String uploadID, - int partNumberMarker, int maxParts) throws IOException, OS3Exception { - ListPartsResponse listPartsResponse = new ListPartsResponse(); - try { - OzoneBucket ozoneBucket = getBucket(bucket); - OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = - ozoneBucket.listParts(key, uploadID, partNumberMarker, maxParts); - listPartsResponse.setBucket(bucket); - listPartsResponse.setKey(key); - listPartsResponse.setUploadID(uploadID); - listPartsResponse.setMaxParts(maxParts); - listPartsResponse.setPartNumberMarker(partNumberMarker); - listPartsResponse.setTruncated(false); - - listPartsResponse.setStorageClass(S3StorageType.fromReplicationType( - ozoneMultipartUploadPartListParts.getReplicationType(), - ozoneMultipartUploadPartListParts.getReplicationFactor()).toString()); - - if (ozoneMultipartUploadPartListParts.isTruncated()) { - listPartsResponse.setTruncated( - ozoneMultipartUploadPartListParts.isTruncated()); - listPartsResponse.setNextPartNumberMarker( - ozoneMultipartUploadPartListParts.getNextPartNumberMarker()); - } - - ozoneMultipartUploadPartListParts.getPartInfoList().forEach(partInfo -> { - ListPartsResponse.Part part = new ListPartsResponse.Part(); - part.setPartNumber(partInfo.getPartNumber()); - part.setETag(partInfo.getPartName()); - part.setSize(partInfo.getSize()); - part.setLastModified(Instant.ofEpochMilli( - partInfo.getModificationTime())); - listPartsResponse.addPart(part); - }); - - } catch (OMException ex) { - if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { - throw S3ErrorTable.newError(NO_SUCH_UPLOAD, - uploadID); - } - throw ex; - } - return Response.status(Status.OK).entity(listPartsResponse).build(); - } - - @VisibleForTesting - public void setHeaders(HttpHeaders headers) { - this.headers = headers; - } - - private CopyObjectResponse copyObject(String copyHeader, - String destBucket, - String destkey, - ReplicationType replicationType, - ReplicationFactor replicationFactor, - boolean storageTypeDefault) - throws OS3Exception, IOException { - - Pair result = parseSourceHeader(copyHeader); - - String sourceBucket = result.getLeft(); - String sourceKey = result.getRight(); - OzoneInputStream sourceInputStream = null; - OzoneOutputStream destOutputStream = null; - boolean closed = false; - try { - // Checking whether we trying to copying to it self. - - if (sourceBucket.equals(destBucket)) { - if (sourceKey.equals(destkey)) { - // When copying to same storage type when storage type is provided, - // we should not throw exception, as aws cli checks if any of the - // options like storage type are provided or not when source and - // dest are given same - if (storageTypeDefault) { - OS3Exception ex = S3ErrorTable.newError(S3ErrorTable - .INVALID_REQUEST, copyHeader); - ex.setErrorMessage("This copy request is illegal because it is " + - "trying to copy an object to it self itself without changing " + - "the object's metadata, storage class, website redirect " + - "location or encryption attributes."); - throw ex; - } else { - // TODO: Actually here we should change storage type, as ozone - // still does not support this just returning dummy response - // for now - CopyObjectResponse copyObjectResponse = new CopyObjectResponse(); - copyObjectResponse.setETag(OzoneUtils.getRequestID()); - copyObjectResponse.setLastModified(Instant.ofEpochMilli( - Time.now())); - return copyObjectResponse; - } - } - } - - - OzoneBucket sourceOzoneBucket = getBucket(sourceBucket); - OzoneBucket destOzoneBucket = getBucket(destBucket); - - OzoneKeyDetails sourceKeyDetails = sourceOzoneBucket.getKey(sourceKey); - long sourceKeyLen = sourceKeyDetails.getDataSize(); - - sourceInputStream = sourceOzoneBucket.readKey(sourceKey); - - destOutputStream = destOzoneBucket.createKey(destkey, sourceKeyLen, - replicationType, replicationFactor, new HashMap<>()); - - IOUtils.copy(sourceInputStream, destOutputStream); - - // Closing here, as if we don't call close this key will not commit in - // OM, and getKey fails. - sourceInputStream.close(); - destOutputStream.close(); - closed = true; - - OzoneKeyDetails destKeyDetails = destOzoneBucket.getKey(destkey); - - CopyObjectResponse copyObjectResponse = new CopyObjectResponse(); - copyObjectResponse.setETag(OzoneUtils.getRequestID()); - copyObjectResponse.setLastModified(Instant.ofEpochMilli(destKeyDetails - .getModificationTime())); - return copyObjectResponse; - } catch (OMException ex) { - if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_KEY, sourceKey); - } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { - throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, sourceBucket); - } - throw ex; - } finally { - if (!closed) { - if (sourceInputStream != null) { - sourceInputStream.close(); - } - if (destOutputStream != null) { - destOutputStream.close(); - } - } - } - } - - /** - * Parse the key and bucket name from copy header. - */ - @VisibleForTesting - public static Pair parseSourceHeader(String copyHeader) - throws OS3Exception { - String header = copyHeader; - if (header.startsWith("/")) { - header = copyHeader.substring(1); - } - int pos = header.indexOf("/"); - if (pos == -1) { - OS3Exception ex = S3ErrorTable.newError(S3ErrorTable - .INVALID_ARGUMENT, header); - ex.setErrorMessage("Copy Source must mention the source bucket and " + - "key: sourcebucket/sourcekey"); - throw ex; - } - - return Pair.of(header.substring(0, pos), header.substring(pos + 1)); - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PlainTextMultipartUploadReader.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PlainTextMultipartUploadReader.java deleted file mode 100644 index 599b4731947..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/PlainTextMultipartUploadReader.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.ws.rs.Consumes; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.ext.MessageBodyReader; -import javax.ws.rs.ext.Provider; -import java.io.IOException; -import java.io.InputStream; -import java.lang.annotation.Annotation; -import java.lang.reflect.Type; - -/** - * Body reader to accept plain text MPU. - *

    - * Aws s3 api sends a multipartupload request with the content type - * 'text/plain' in case of using 'aws s3 cp' (instead of aws s3api). - *

    - * Our generic ObjectEndpoint.multipartUpload has a - * CompleteMultipartUploadRequest parameter, which is required only for the - * completion request. - *

    - * But JaxRS tries to parse it from the body for the requests and in case of - * text/plain requests this parsing is failed. This simple BodyReader enables - * to parse an empty text/plain message and return with an empty completion - * request. - */ -@Provider -@Consumes("text/plain") -public class PlainTextMultipartUploadReader - implements MessageBodyReader { - - @Override - public boolean isReadable(Class type, Type genericType, - Annotation[] annotations, MediaType mediaType) { - return type.equals(CompleteMultipartUploadRequest.class) - && mediaType.equals(MediaType.TEXT_PLAIN_TYPE); - } - - @Override - public CompleteMultipartUploadRequest readFrom( - Class type, Type genericType, - Annotation[] annotations, MediaType mediaType, - MultivaluedMap httpHeaders, InputStream entityStream) - throws IOException, WebApplicationException { - return new CompleteMultipartUploadRequest(); - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java deleted file mode 100644 index 23d02e9f2b9..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.Response.Status; -import java.io.IOException; -import java.time.Instant; -import java.util.Iterator; - -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.s3.commontypes.BucketMetadata; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.header.AuthenticationHeaderParser; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import static org.apache.hadoop.ozone.s3.util.OzoneS3Util.getVolumeName; - -/** - * Top level rest endpoint. - */ -@Path("/") -public class RootEndpoint extends EndpointBase { - - private static final Logger LOG = - LoggerFactory.getLogger(RootEndpoint.class); - - /** - * Rest endpoint to list all the buckets of the current user. - * - * See https://docs.aws.amazon.com/AmazonS3/latest/API/RESTServiceGET.html - * for more details. - */ - @GET - public Response get() - throws OS3Exception, IOException { - OzoneVolume volume; - ListBucketResponse response = new ListBucketResponse(); - - AuthenticationHeaderParser authenticationHeaderParser = - getAuthenticationHeaderParser(); - - if (!authenticationHeaderParser.doesAuthenticationInfoExists()) { - return Response.status(Status.TEMPORARY_REDIRECT) - .header("Location", "/static/") - .build(); - } - String volumeName = getVolumeName(authenticationHeaderParser. - getAccessKeyID()); - Iterator bucketIterator = listS3Buckets(volumeName, - null); - - while (bucketIterator.hasNext()) { - OzoneBucket next = bucketIterator.next(); - BucketMetadata bucketMetadata = new BucketMetadata(); - bucketMetadata.setName(next.getName()); - bucketMetadata.setCreationDate(Instant.ofEpochMilli(next - .getCreationTime())); - response.addBucket(bucketMetadata); - } - - return Response.ok(response).build(); - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/XmlNamespaceFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/XmlNamespaceFilter.java deleted file mode 100644 index a49ecf61240..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/XmlNamespaceFilter.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import org.xml.sax.Attributes; -import org.xml.sax.SAXException; -import org.xml.sax.helpers.XMLFilterImpl; - -/** - * SAX filter to force namespace usage. - *

    - * This filter will read the XML content as namespace qualified content - * independent from the current namespace usage. - */ -public class XmlNamespaceFilter extends XMLFilterImpl { - - private String namespace; - - /** - * Create the filter. - * - * @param namespace to add to every elements. - */ - public XmlNamespaceFilter(String namespace) { - this.namespace = namespace; - } - - @Override - public void startElement(String uri, String localName, String qName, - Attributes atts) throws SAXException { - super.startElement(namespace, localName, qName, atts); - } - - @Override - public void endElement(String uri, String localName, String qName) - throws SAXException { - super.endElement(namespace, localName, qName); - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java deleted file mode 100644 index c55cdf4ecd4..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Rest endpoint implementation for the s3 gateway. - */ -@javax.xml.bind.annotation.XmlSchema( - namespace = "http://s3.amazonaws" - + ".com/doc/2006-03-01/", elementFormDefault = - javax.xml.bind.annotation.XmlNsForm.QUALIFIED, - xmlns = { - @javax.xml.bind.annotation.XmlNs(namespaceURI = "http://s3.amazonaws" - + ".com/doc/2006-03-01/", prefix = "")}) - -package org.apache.hadoop.ozone.s3.endpoint; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java deleted file mode 100644 index 722a4a1bd29..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.exception; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.SerializationFeature; -import com.fasterxml.jackson.dataformat.xml.XmlMapper; -import com.fasterxml.jackson.module.jaxb.JaxbAnnotationModule; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.xml.bind.annotation.XmlAccessType; -import javax.xml.bind.annotation.XmlAccessorType; -import javax.xml.bind.annotation.XmlElement; -import javax.xml.bind.annotation.XmlTransient; -import javax.xml.bind.annotation.XmlRootElement; - - -/** - * This class represents exceptions raised from Ozone S3 service. - * - * Ref:https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html - */ -@XmlRootElement(name = "Error") -@XmlAccessorType(XmlAccessType.NONE) -public class OS3Exception extends Exception { - private static final Logger LOG = - LoggerFactory.getLogger(OS3Exception.class); - private static ObjectMapper mapper; - - static { - mapper = new XmlMapper(); - mapper.registerModule(new JaxbAnnotationModule()); - mapper.enable(SerializationFeature.INDENT_OUTPUT); - } - @XmlElement(name = "Code") - private String code; - - @XmlElement(name = "Message") - private String errorMessage; - - @XmlElement(name = "Resource") - private String resource; - - @XmlElement(name = "RequestId") - private String requestId; - - @XmlTransient - private int httpCode; - - public OS3Exception() { - //Added for JaxB. - } - - /** - * Create an object OS3Exception. - * @param codeVal - * @param messageVal - * @param requestIdVal - * @param resourceVal - */ - public OS3Exception(String codeVal, String messageVal, String requestIdVal, - String resourceVal) { - this.code = codeVal; - this.errorMessage = messageVal; - this.requestId = requestIdVal; - this.resource = resourceVal; - } - - /** - * Create an object OS3Exception. - * @param codeVal - * @param messageVal - * @param httpCode - */ - public OS3Exception(String codeVal, String messageVal, int httpCode) { - this.code = codeVal; - this.errorMessage = messageVal; - this.httpCode = httpCode; - } - - public String getCode() { - return code; - } - - public void setCode(String code) { - this.code = code; - } - - public String getErrorMessage() { - return errorMessage; - } - - public void setErrorMessage(String errorMessage) { - this.errorMessage = errorMessage; - } - - public String getRequestId() { - return requestId; - } - - public void setRequestId(String requestId) { - this.requestId = requestId; - } - - public String getResource() { - return resource; - } - - public void setResource(String resource) { - this.resource = resource; - } - - public int getHttpCode() { - return httpCode; - } - - public void setHttpCode(int httpCode) { - this.httpCode = httpCode; - } - - public String toXml() { - try { - String val = mapper.writeValueAsString(this); - LOG.debug("toXml val is {}", val); - String xmlLine = "\n" - + val; - return xmlLine; - } catch (Exception ex) { - LOG.error("Exception occurred {}", ex); - } - - //When we get exception log it, and return exception as xml from actual - // exception data. So, falling back to construct from exception. - String formatString = "" + - "" + - "%s" + - "%s" + - "%s" + - "%s" + - ""; - return String.format(formatString, this.getCode(), - this.getErrorMessage(), this.getResource(), - this.getRequestId()); - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java deleted file mode 100644 index 588dafae86a..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.exception; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.inject.Inject; -import javax.ws.rs.core.Response; -import javax.ws.rs.ext.ExceptionMapper; -import javax.ws.rs.ext.Provider; - -import org.apache.hadoop.ozone.s3.RequestIdentifier; - -/** - * Class the represents various errors returned by the - * Ozone S3 service. - */ -@Provider -public class OS3ExceptionMapper implements ExceptionMapper { - - private static final Logger LOG = - LoggerFactory.getLogger(OS3ExceptionMapper.class); - - @Inject - private RequestIdentifier requestIdentifier; - - @Override - public Response toResponse(OS3Exception exception) { - if (LOG.isDebugEnabled()) { - LOG.debug("Returning exception. ex: {}", exception.toString()); - } - exception.setRequestId(requestIdentifier.getRequestId()); - return Response.status(exception.getHttpCode()) - .entity(exception.toXml()).build(); - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java deleted file mode 100644 index 1df04440ccb..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.exception; - - -import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; -import static java.net.HttpURLConnection.HTTP_CONFLICT; -import static java.net.HttpURLConnection.HTTP_NOT_FOUND; -import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_NOT_SATISFIABLE; - -/** - * This class represents errors from Ozone S3 service. - * This class needs to be updated to add new errors when required. - */ -public final class S3ErrorTable { - - private S3ErrorTable() { - //No one should construct this object. - } - - public static final OS3Exception INVALID_URI = new OS3Exception("InvalidURI", - "Couldn't parse the specified URI.", HTTP_BAD_REQUEST); - - public static final OS3Exception NO_SUCH_VOLUME = new OS3Exception( - "NoSuchVolume", "The specified volume does not exist", HTTP_NOT_FOUND); - - public static final OS3Exception NO_SUCH_BUCKET = new OS3Exception( - "NoSuchBucket", "The specified bucket does not exist", HTTP_NOT_FOUND); - - public static final OS3Exception AUTH_PROTOCOL_NOT_SUPPORTED = - new OS3Exception("AuthProtocolNotSupported", "Auth protocol used for" + - " this request is not supported.", HTTP_BAD_REQUEST); - - public static final OS3Exception S3_TOKEN_CREATION_ERROR = - new OS3Exception("InvalidRequest", "Error creating s3 token creation.", - HTTP_BAD_REQUEST); - - public static final OS3Exception BUCKET_NOT_EMPTY = new OS3Exception( - "BucketNotEmpty", "The bucket you tried to delete is not empty.", - HTTP_CONFLICT); - - public static final OS3Exception MALFORMED_HEADER = new OS3Exception( - "AuthorizationHeaderMalformed", "The authorization header you provided " + - "is invalid.", HTTP_NOT_FOUND); - - public static final OS3Exception NO_SUCH_KEY = new OS3Exception( - "NoSuchKey", "The specified key does not exist", HTTP_NOT_FOUND); - - public static final OS3Exception INVALID_ARGUMENT = new OS3Exception( - "InvalidArgument", "Invalid Argument", HTTP_BAD_REQUEST); - - public static final OS3Exception INVALID_REQUEST = new OS3Exception( - "InvalidRequest", "Invalid Request", HTTP_BAD_REQUEST); - - public static final OS3Exception INVALID_RANGE = new OS3Exception( - "InvalidRange", "The requested range is not satisfiable", - RANGE_NOT_SATISFIABLE); - - public static final OS3Exception NO_SUCH_UPLOAD = new OS3Exception( - "NoSuchUpload", "The specified multipart upload does not exist. The " + - "upload ID might be invalid, or the multipart upload might have " + - "been aborted or completed.", HTTP_NOT_FOUND); - - public static final OS3Exception INVALID_PART = new OS3Exception( - "InvalidPart", "One or more of the specified parts could not be found." + - " The part might not have been uploaded, or the specified entity " + - "tag might not have matched the part's entity tag.", HTTP_BAD_REQUEST); - - public static final OS3Exception INVALID_PART_ORDER = new OS3Exception( - "InvalidPartOrder", "The list of parts was not in ascending order. The " + - "parts list must be specified in order by part number.", - HTTP_BAD_REQUEST); - - public static final OS3Exception ENTITY_TOO_SMALL = new OS3Exception( - "EntityTooSmall", "Your proposed upload is smaller than the minimum " + - "allowed object size. Each part must be at least 5 MB in size, except " + - "the last part.", HTTP_BAD_REQUEST); - - - /** - * Create a new instance of Error. - * @param e Error Template - * @param resource Resource associated with this exception - * @return creates a new instance of error based on the template - */ - public static OS3Exception newError(OS3Exception e, String resource) { - OS3Exception err = new OS3Exception(e.getCode(), e.getErrorMessage(), - e.getHttpCode()); - err.setResource(resource); - return err; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/package-info.java deleted file mode 100644 index d295ae885a8..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains Ozone S3 exceptions. - */ diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthenticationHeaderParser.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthenticationHeaderParser.java deleted file mode 100644 index 7f17c9d3dc9..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthenticationHeaderParser.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.header; - -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.enterprise.context.RequestScoped; - -/** - * Authentication Header parser to parse HttpHeader Authentication. - */ -@RequestScoped -public class AuthenticationHeaderParser { - - private final static Logger LOG = LoggerFactory.getLogger( - AuthenticationHeaderParser.class); - - private String authHeader; - private String accessKeyID; - - public void parse() throws OS3Exception { - if (authHeader.startsWith("AWS4")) { - LOG.debug("V4 Header {}", authHeader); - AuthorizationHeaderV4 authorizationHeader = new AuthorizationHeaderV4( - authHeader); - accessKeyID = authorizationHeader.getAccessKeyID().toLowerCase(); - } else { - LOG.debug("V2 Header {}", authHeader); - AuthorizationHeaderV2 authorizationHeader = new AuthorizationHeaderV2( - authHeader); - accessKeyID = authorizationHeader.getAccessKeyID().toLowerCase(); - } - } - - public boolean doesAuthenticationInfoExists() { - return authHeader != null; - } - - public String getAccessKeyID() throws OS3Exception { - parse(); - return accessKeyID; - } - - public void setAuthHeader(String header) { - this.authHeader = header; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java deleted file mode 100644 index dfafc3a5aca..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV2.java +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.header; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; - -import static org.apache.commons.lang3.StringUtils.isBlank; - -/** - * Authorization Header v2. - */ -public class AuthorizationHeaderV2 { - - private final static String IDENTIFIER = "AWS"; - private String authHeader; - private String identifier; - private String accessKeyID; - private String signature; - - public AuthorizationHeaderV2(String auth) throws OS3Exception { - Preconditions.checkNotNull(auth); - this.authHeader = auth; - parseHeader(); - } - - /** - * This method parses the authorization header. - * - * Authorization header sample: - * AWS AKIAIOSFODNN7EXAMPLE:frJIUN8DYpKDtOLCwo//yllqDzg= - * - * @throws OS3Exception - */ - @SuppressWarnings("StringSplitter") - public void parseHeader() throws OS3Exception { - String[] split = authHeader.split(" "); - if (split.length != 2) { - throw S3ErrorTable.newError(S3ErrorTable.MALFORMED_HEADER, authHeader); - } - - identifier = split[0]; - if (!IDENTIFIER.equals(identifier)) { - throw S3ErrorTable.newError(S3ErrorTable.MALFORMED_HEADER, authHeader); - } - - String[] remainingSplit = split[1].split(":"); - - if (remainingSplit.length != 2) { - throw S3ErrorTable.newError(S3ErrorTable.MALFORMED_HEADER, authHeader); - } - - accessKeyID = remainingSplit[0]; - signature = remainingSplit[1]; - if (isBlank(accessKeyID) || isBlank(signature)) { - throw S3ErrorTable.newError(S3ErrorTable.MALFORMED_HEADER, authHeader); - } - } - - public String getAuthHeader() { - return authHeader; - } - - public void setAuthHeader(String authHeader) { - this.authHeader = authHeader; - } - - public String getIdentifier() { - return identifier; - } - - public String getAccessKeyID() { - return accessKeyID; - } - - public String getSignature() { - return signature; - } - -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java deleted file mode 100644 index 2637522de3d..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/AuthorizationHeaderV4.java +++ /dev/null @@ -1,253 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.s3.header; - - -import com.google.common.base.Preconditions; -import org.apache.commons.codec.DecoderException; -import org.apache.commons.codec.binary.Hex; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.apache.hadoop.util.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.time.LocalDate; -import java.util.Collection; - -import static java.time.temporal.ChronoUnit.DAYS; -import static org.apache.commons.lang3.StringUtils.isAllEmpty; -import static org.apache.commons.lang3.StringUtils.isNoneEmpty; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER; -import static org.apache.hadoop.ozone.s3.AWSV4AuthParser.AWS4_SIGNING_ALGORITHM; -import static org.apache.hadoop.ozone.s3.AWSV4AuthParser.DATE_FORMATTER; - -/** - * S3 Authorization header. - * Ref: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using - * -authorization-header.html - */ -public class AuthorizationHeaderV4 { - - private final static Logger LOG = LoggerFactory.getLogger( - AuthorizationHeaderV4.class); - - private final static String CREDENTIAL = "Credential="; - private final static String SIGNEDHEADERS = "SignedHeaders="; - private final static String SIGNATURE = "Signature="; - - private String authHeader; - private String algorithm; - private String credential; - private String signedHeadersStr; - private String signature; - private Credential credentialObj; - private Collection signedHeaders; - - /** - * Construct AuthorizationHeader object. - * @param header - */ - public AuthorizationHeaderV4(String header) throws OS3Exception { - Preconditions.checkNotNull(header); - this.authHeader = header; - parseAuthHeader(); - } - - /** - * This method parses authorization header. - * - * Authorization Header sample: - * AWS4-HMAC-SHA256 Credential=AKIAJWFJK62WUTKNFJJA/20181009/us-east-1/s3 - * /aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, - * Signature=db81b057718d7c1b3b8dffa29933099551c51d787b3b13b9e0f9ebed45982bf2 - * @throws OS3Exception - */ - @SuppressWarnings("StringSplitter") - public void parseAuthHeader() throws OS3Exception { - int firstSep = authHeader.indexOf(' '); - if (firstSep < 0) { - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - - //split the value parts of the authorization header - String[] split = authHeader.substring(firstSep + 1).trim().split(", *"); - - if (split.length != 3) { - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - - algorithm = authHeader.substring(0, firstSep); - validateAlgorithm(); - credential = split[0]; - signedHeadersStr = split[1]; - signature = split[2]; - validateCredentials(); - validateSignedHeaders(); - validateSignature(); - - } - - /** - * Validate Signed headers. - * */ - private void validateSignedHeaders() throws OS3Exception { - if (isNoneEmpty(signedHeadersStr) - && signedHeadersStr.startsWith(SIGNEDHEADERS)) { - signedHeadersStr = signedHeadersStr.substring(SIGNEDHEADERS.length()); - signedHeaders = StringUtils.getStringCollection(signedHeadersStr, ";"); - if (signedHeaders.size() == 0) { - LOG.error("No signed headers found. Authheader:{}", authHeader); - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - } else { - LOG.error("No signed headers found. Authheader:{}", authHeader); - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - } - - /** - * Validate signature. - * */ - private void validateSignature() throws OS3Exception { - if (signature.startsWith(SIGNATURE)) { - signature = signature.substring(SIGNATURE.length()); - if (!isNoneEmpty(signature)) { - LOG.error("Signature can't be empty.", signature); - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - try { - Hex.decodeHex(signature); - } catch (DecoderException e) { - LOG.error("Signature:{} should be in hexa-decimal encoding.", - signature); - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - } else { - LOG.error("Signature can't be empty.", signature); - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - } - - /** - * Validate credentials. - * */ - private void validateCredentials() throws OS3Exception { - if (isNoneEmpty(credential) && credential.startsWith(CREDENTIAL)) { - credential = credential.substring(CREDENTIAL.length()); - // Parse credential. Other parts of header are not validated yet. When - // security comes, it needs to be completed. - credentialObj = new Credential(credential); - } else { - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - - if (credentialObj.getAccessKeyID().isEmpty()) { - LOG.error("AWS access id shouldn't be empty. credential:{}", credential); - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - if (credentialObj.getAwsRegion().isEmpty()) { - LOG.error("AWS region shouldn't be empty. credential:{}", credential); - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - if (credentialObj.getAwsRequest().isEmpty()) { - LOG.error("AWS request shouldn't be empty. credential:{}", credential); - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - if (credentialObj.getAwsService().isEmpty()) { - LOG.error("AWS service:{} shouldn't be empty. credential:{}", - credential); - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - - // Date should not be empty and within valid range. - if (!credentialObj.getDate().isEmpty()) { - LocalDate date = LocalDate.parse(credentialObj.getDate(), DATE_FORMATTER); - LocalDate now = LocalDate.now(); - if (date.isBefore(now.minus(1, DAYS)) || - date.isAfter(now.plus(1, DAYS))) { - LOG.error("AWS date not in valid range. Date:{} should not be older " + - "than 1 day(i.e yesterday) and greater than 1 day(i.e " + - "tomorrow).", - getDate()); - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - } else { - LOG.error("AWS date shouldn't be empty. credential:{}", credential); - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - } - - /** - * Validate if algorithm is in expected format. - * */ - private void validateAlgorithm() throws OS3Exception { - if (isAllEmpty(algorithm) || !algorithm.equals(AWS4_SIGNING_ALGORITHM)) { - LOG.error("Unexpected hash algorithm. Algo:{}", algorithm); - throw S3ErrorTable.newError(MALFORMED_HEADER, authHeader); - } - } - - public String getAuthHeader() { - return authHeader; - } - - public String getAlgorithm() { - return algorithm; - } - - public String getCredential() { - return credential; - } - - public String getSignedHeaderString() { - return signedHeadersStr; - } - - public String getSignature() { - return signature; - } - - public String getAccessKeyID() { - return credentialObj.getAccessKeyID(); - } - - public String getDate() { - return credentialObj.getDate(); - } - - public String getAwsRegion() { - return credentialObj.getAwsRegion(); - } - - public String getAwsService() { - return credentialObj.getAwsService(); - } - - public String getAwsRequest() { - return credentialObj.getAwsRequest(); - } - - public Collection getSignedHeaders() { - return signedHeaders; - } - - public Credential getCredentialObj() { - return credentialObj; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/Credential.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/Credential.java deleted file mode 100644 index 883980af7ec..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/Credential.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.header; - -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -/** - * Credential in the AWS authorization header. - * Ref: https://docs.aws.amazon.com/AmazonS3/latest/API/ - * sigv4-auth-using-authorization-header.html - * - */ -public class Credential { - private static final Logger LOG = LoggerFactory.getLogger(Credential.class); - - private String accessKeyID; - private String date; - private String awsRegion; - private String awsService; - private String awsRequest; - private String credential; - - /** - * Construct Credential Object. - * @param cred - */ - Credential(String cred) throws OS3Exception { - this.credential = cred; - parseCredential(); - } - - /** - * Parse credential value. - * - * Sample credential value: - * Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request - * - * @throws OS3Exception - */ - @SuppressWarnings("StringSplitter") - public void parseCredential() throws OS3Exception { - String[] split = credential.split("/"); - switch (split.length) { - case 5: - // Ex: dkjad922329ddnks/20190321/us-west-1/s3/aws4_request - accessKeyID = split[0].trim(); - date = split[1].trim(); - awsRegion = split[2].trim(); - awsService = split[3].trim(); - awsRequest = split[4].trim(); - return; - case 6: - // Access id is kerberos principal. - // Ex: testuser/om@EXAMPLE.COM/20190321/us-west-1/s3/aws4_request - accessKeyID = split[0] + "/" +split[1]; - date = split[2].trim(); - awsRegion = split[3].trim(); - awsService = split[4].trim(); - awsRequest = split[5].trim(); - return; - default: - LOG.error("Credentials not in expected format. credential:{}", - credential); - throw S3ErrorTable.newError(S3ErrorTable.MALFORMED_HEADER, credential); - } - } - - public String getAccessKeyID() { - return accessKeyID; - } - - public String getDate() { - return date; - } - - public String getAwsRegion() { - return awsRegion; - } - - public String getAwsService() { - return awsService; - } - - public String getAwsRequest() { - return awsRequest; - } - - public String getCredential() { - return credential; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/package-info.java deleted file mode 100644 index 40bc78bd337..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/header/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains Ozone S3 Authorization header. - */ -package org.apache.hadoop.ozone.s3.header; \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/S3WrapperInputStream.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/S3WrapperInputStream.java deleted file mode 100644 index 9efcc8738c6..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/S3WrapperInputStream.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.io; - -import org.apache.hadoop.fs.FSInputStream; -import org.apache.hadoop.ozone.client.io.KeyInputStream; - -import java.io.IOException; -import java.io.InputStream; - -/** - * S3Wrapper Input Stream which encapsulates KeyInputStream from ozone. - */ -public class S3WrapperInputStream extends FSInputStream { - private final KeyInputStream inputStream; - - /** - * Constructs S3WrapperInputStream with KeyInputStream. - * - * @param inputStream - */ - public S3WrapperInputStream(InputStream inputStream) { - this.inputStream = (KeyInputStream) inputStream; - } - - @Override - public int read() throws IOException { - return inputStream.read(); - } - - @Override - public int read(byte[] b, int off, int len) throws IOException { - return inputStream.read(b, off, len); - } - - @Override - public synchronized void close() throws IOException { - inputStream.close(); - } - - @Override - public int available() throws IOException { - return inputStream.available(); - } - - public InputStream getInputStream() { - return inputStream; - } - - @Override - public void seek(long pos) throws IOException { - inputStream.seek(pos); - } - @Override - public long getPos() throws IOException { - return inputStream.getPos(); - } - - @Override - public boolean seekToNewSource(long targetPos) throws IOException { - return false; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/package-info.java deleted file mode 100644 index 5167e60a995..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/io/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains Ozone S3 wrapper stream related classes. - */ - -package org.apache.hadoop.ozone.s3.io; \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/package-info.java deleted file mode 100644 index 9d41551c49c..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains the top level generic classes of s3 gateway. - */ -package org.apache.hadoop.ozone.s3; \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java deleted file mode 100644 index 92ae6d44739..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/ContinueToken.java +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.util; - -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.util.Objects; - -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; - -import com.google.common.base.Preconditions; -import org.apache.commons.codec.DecoderException; -import org.apache.commons.codec.binary.Hex; -import org.apache.commons.codec.digest.DigestUtils; - -/** - * Token which holds enough information to continue the key iteration. - */ -public class ContinueToken { - - private String lastKey; - - private String lastDir; - - private static final String CONTINUE_TOKEN_SEPERATOR = "-"; - - public ContinueToken(String lastKey, String lastDir) { - Preconditions.checkNotNull(lastKey, - "The last key can't be null in the continue token."); - this.lastKey = lastKey; - if (lastDir != null && lastDir.length() > 0) { - this.lastDir = lastDir; - } - } - - /** - * Generate a continuation token which is used in get Bucket. - * - * @return if key is not null return continuation token, else returns null. - */ - public String encodeToString() { - if (this.lastKey != null) { - - ByteBuffer buffer = ByteBuffer - .allocate(4 + lastKey.length() - + (lastDir == null ? 0 : lastDir.length())); - buffer.putInt(lastKey.length()); - buffer.put(lastKey.getBytes(StandardCharsets.UTF_8)); - if (lastDir != null) { - buffer.put(lastDir.getBytes(StandardCharsets.UTF_8)); - } - - String hex = Hex.encodeHexString(buffer.array()); - String digest = DigestUtils.sha256Hex(hex); - return hex + CONTINUE_TOKEN_SEPERATOR + digest; - } else { - return null; - } - } - - /** - * Decode a continuation token which is used in get Bucket. - * - * @param key - * @return if key is not null return decoded token, otherwise returns null. - * @throws OS3Exception - */ - public static ContinueToken decodeFromString(String key) throws OS3Exception { - if (key != null) { - int indexSeparator = key.indexOf(CONTINUE_TOKEN_SEPERATOR); - if (indexSeparator == -1) { - throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, key); - } - String hex = key.substring(0, indexSeparator); - String digest = key.substring(indexSeparator + 1); - try { - checkHash(key, hex, digest); - - ByteBuffer buffer = ByteBuffer.wrap(Hex.decodeHex(hex)); - int keySize = buffer.getInt(); - - byte[] actualKeyBytes = new byte[keySize]; - buffer.get(actualKeyBytes); - - byte[] actualDirBytes = new byte[buffer.remaining()]; - buffer.get(actualDirBytes); - - return new ContinueToken( - new String(actualKeyBytes, StandardCharsets.UTF_8), - new String(actualDirBytes, StandardCharsets.UTF_8) - ); - - } catch (DecoderException ex) { - OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable - .INVALID_ARGUMENT, key); - os3Exception.setErrorMessage("The continuation token provided is " + - "incorrect"); - throw os3Exception; - } - } else { - return null; - } - } - - private static void checkHash(String key, String hex, String digest) - throws OS3Exception { - String digestActualKey = DigestUtils.sha256Hex(hex); - if (!digest.equals(digestActualKey)) { - OS3Exception ex = S3ErrorTable.newError(S3ErrorTable - .INVALID_ARGUMENT, key); - ex.setErrorMessage("The continuation token provided is incorrect"); - throw ex; - } - } - - public String getLastKey() { - return lastKey; - } - - public void setLastKey(String lastKey) { - this.lastKey = lastKey; - } - - public String getLastDir() { - return lastDir; - } - - public void setLastDir(String lastDir) { - this.lastDir = lastDir; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ContinueToken that = (ContinueToken) o; - return lastKey.equals(that.lastKey) && - Objects.equals(lastDir, that.lastDir); - } - - @Override - public int hashCode() { - return Objects.hash(lastKey); - } - - @Override - public String toString() { - return "ContinueToken{" + - "lastKey='" + lastKey + '\'' + - ", lastDir='" + lastDir + '\'' + - '}'; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java deleted file mode 100644 index ce7d4f2876a..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/OzoneS3Util.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.ozone.s3.util; - -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.security.SecurityUtil; - -import javax.annotation.Nonnull; -import java.util.Collection; -import java.util.Objects; - -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; - -/** - * Ozone util for S3 related operations. - */ -public final class OzoneS3Util { - - private OzoneS3Util() { - } - - public static String getVolumeName(String userName) { - Objects.requireNonNull(userName); - return DigestUtils.md5Hex(userName); - } - - /** - * Generate service Name for token. - * @param configuration - * @param serviceId - ozone manager service ID - * @param omNodeIds - list of node ids for the given OM service. - * @return service Name. - */ - public static String buildServiceNameForToken( - @Nonnull OzoneConfiguration configuration, @Nonnull String serviceId, - @Nonnull Collection omNodeIds) { - StringBuilder rpcAddress = new StringBuilder(); - - int nodesLength = omNodeIds.size(); - int counter = 0; - for (String nodeId : omNodeIds) { - counter++; - String rpcAddrKey = OmUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY, - serviceId, nodeId); - String rpcAddrStr = OmUtils.getOmRpcAddress(configuration, rpcAddrKey); - if (rpcAddrStr == null || rpcAddrStr.isEmpty()) { - throw new IllegalArgumentException("Could not find rpcAddress for " + - OZONE_OM_ADDRESS_KEY + "." + serviceId + "." + nodeId); - } - - if (counter != nodesLength) { - rpcAddress.append(SecurityUtil.buildTokenService( - NetUtils.createSocketAddr(rpcAddrStr)) + ","); - } else { - rpcAddress.append(SecurityUtil.buildTokenService( - NetUtils.createSocketAddr(rpcAddrStr))); - } - } - return rpcAddress.toString(); - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RFC1123Util.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RFC1123Util.java deleted file mode 100644 index 15a09b4bcef..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RFC1123Util.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.ozone.s3.util; - -import java.time.format.DateTimeFormatter; -import java.time.format.DateTimeFormatterBuilder; -import java.time.format.SignStyle; -import java.util.HashMap; -import java.util.Map; - -import static java.time.temporal.ChronoField.DAY_OF_MONTH; -import static java.time.temporal.ChronoField.DAY_OF_WEEK; -import static java.time.temporal.ChronoField.HOUR_OF_DAY; -import static java.time.temporal.ChronoField.MINUTE_OF_HOUR; -import static java.time.temporal.ChronoField.MONTH_OF_YEAR; -import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; -import static java.time.temporal.ChronoField.YEAR; - -/** - * Stricter RFC1123 data format. - *

    - * This format always use two digits for the days to make it compatible with - * golang clients. - */ -public final class RFC1123Util { - - private RFC1123Util() { - } - - /** - * An RFC-1123 compatible file format which always use two digits for the - * days. - */ - public static final DateTimeFormatter FORMAT; - - static { - Map dow = new HashMap<>(); - dow.put(1L, "Mon"); - dow.put(2L, "Tue"); - dow.put(3L, "Wed"); - dow.put(4L, "Thu"); - dow.put(5L, "Fri"); - dow.put(6L, "Sat"); - dow.put(7L, "Sun"); - Map moy = new HashMap<>(); - moy.put(1L, "Jan"); - moy.put(2L, "Feb"); - moy.put(3L, "Mar"); - moy.put(4L, "Apr"); - moy.put(5L, "May"); - moy.put(6L, "Jun"); - moy.put(7L, "Jul"); - moy.put(8L, "Aug"); - moy.put(9L, "Sep"); - moy.put(10L, "Oct"); - moy.put(11L, "Nov"); - moy.put(12L, "Dec"); - FORMAT = new DateTimeFormatterBuilder() - .parseCaseInsensitive() - .parseLenient() - .optionalStart() - .appendText(DAY_OF_WEEK, dow) - .appendLiteral(", ") - .optionalEnd() - .appendValue(DAY_OF_MONTH, 2, 2, SignStyle.NOT_NEGATIVE) - .appendLiteral(' ') - .appendText(MONTH_OF_YEAR, moy) - .appendLiteral(' ') - .appendValue(YEAR, 4) - .appendLiteral(' ') - .appendValue(HOUR_OF_DAY, 2) - .appendLiteral(':') - .appendValue(MINUTE_OF_HOUR, 2) - .optionalStart() - .appendLiteral(':') - .appendValue(SECOND_OF_MINUTE, 2) - .optionalEnd() - .appendLiteral(' ') - .appendOffset("+HHMM", "GMT") - .toFormatter(); - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java deleted file mode 100644 index 5f5c827433d..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeader.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.util; - -/** - * Ranger Header class which hold startoffset, endoffset of the Range header - * value provided as part of get object. - * - */ -public class RangeHeader { - private long startOffset; - private long endOffset; - private boolean readFull; - private boolean inValidRange; - - - /** - * Construct RangeHeader object. - * @param startOffset - * @param endOffset - */ - public RangeHeader(long startOffset, long endOffset, boolean full, - boolean invalid) { - this.startOffset = startOffset; - this.endOffset = endOffset; - this.readFull = full; - this.inValidRange = invalid; - } - - /** - * Return startOffset. - * - * @return startOffset - */ - public long getStartOffset() { - return startOffset; - } - - /** - * Return endoffset. - * - * @return endoffset - */ - public long getEndOffset() { - return endOffset; - } - - /** - * Return a flag whether after parsing range header, when the provided - * values are with in a range, and whole file read is required. - * - * @return readFull - */ - public boolean isReadFull() { - return readFull; - } - - /** - * Return a flag, whether range header values are correct or not. - * - * @return isInValidRange - */ - public boolean isInValidRange() { - return inValidRange; - } - - - public String toString() { - return "startOffset - [" + startOffset + "]" + "endOffset - [" + - endOffset + "]" + " readFull - [ " + readFull + "]" + " invalidRange " + - "- [ " + inValidRange + "]"; - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java deleted file mode 100644 index b1b61ccc643..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/RangeHeaderParserUtil.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.ozone.s3.util; - -import java.util.regex.Matcher; - -import org.apache.hadoop.classification.InterfaceAudience; - -import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER_MATCH_PATTERN; -/** - * Utility class for S3. - */ -@InterfaceAudience.Private -public final class RangeHeaderParserUtil { - - private RangeHeaderParserUtil() { - } - - /** - * Parse the rangeHeader and set the start and end offset. - * @param rangeHeaderVal - * @param length - * - * @return RangeHeader - */ - public static RangeHeader parseRangeHeader(String rangeHeaderVal, long - length) { - long start = 0; - long end = 0; - boolean noStart = false; - boolean readFull = false; - boolean inValidRange = false; - RangeHeader rangeHeader; - Matcher matcher = RANGE_HEADER_MATCH_PATTERN.matcher(rangeHeaderVal); - if (matcher.matches()) { - if (!matcher.group("start").equals("")) { - start = Integer.parseInt(matcher.group("start")); - } else { - noStart = true; - } - if (!matcher.group("end").equals("")) { - end = Integer.parseInt(matcher.group("end")); - } else { - end = length - 1; - } - if (noStart) { - if (end < length) { - start = length - end; - } else { - start = 0; - } - end = length - 1; - } else { - if (start >= length) { - readFull = true; - if (end >= length) { - inValidRange = true; - } else { - start = 0; - end = length - 1; - } - } else { - if (end >= length) { - end = length - 1; - } - } - } - } else { - // Byte specification is not matching or start and endoffset provided - // are not matching with regex. - start = 0; - end = length - 1; - readFull = true; - } - rangeHeader = new RangeHeader(start, end, readFull, inValidRange); - return rangeHeader; - - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java deleted file mode 100644 index 95168230b51..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.ozone.s3.util; - -import org.apache.hadoop.classification.InterfaceAudience; - -import java.util.regex.Pattern; - -/** - * Set of constants used for S3 implementation. - */ -@InterfaceAudience.Private -public final class S3Consts { - - //Never Constructed - private S3Consts() { - - } - - public static final String COPY_SOURCE_HEADER = "x-amz-copy-source"; - public static final String COPY_SOURCE_HEADER_RANGE = - "x-amz-copy-source-range"; - public static final String STORAGE_CLASS_HEADER = "x-amz-storage-class"; - public static final String ENCODING_TYPE = "url"; - - // Constants related to Range Header - public static final String RANGE_HEADER_SUPPORTED_UNIT = "bytes"; - public static final String RANGE_HEADER = "Range"; - public static final String ACCEPT_RANGE_HEADER = "Accept-Ranges"; - public static final String CONTENT_RANGE_HEADER = "Content-Range"; - - - public static final Pattern RANGE_HEADER_MATCH_PATTERN = - Pattern.compile("bytes=(?[0-9]*)-(?[0-9]*)"); - - //Error code 416 is Range Not Satisfiable - public static final int RANGE_NOT_SATISFIABLE = 416; - -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java deleted file mode 100644 index 7c0773bdf66..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.util; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; - -/** - * Maps S3 storage class values to Ozone replication values. - */ - -public enum S3StorageType { - - REDUCED_REDUNDANCY(ReplicationType.RATIS, ReplicationFactor.ONE), - STANDARD(ReplicationType.RATIS, ReplicationFactor.THREE); - - private final ReplicationType type; - private final ReplicationFactor factor; - - S3StorageType( - ReplicationType type, - ReplicationFactor factor) { - this.type = type; - this.factor = factor; - } - - public ReplicationFactor getFactor() { - return factor; - } - - public ReplicationType getType() { - return type; - } - - public static S3StorageType getDefault() { - return STANDARD; - } - - public static S3StorageType fromReplicationType( - ReplicationType replicationType, ReplicationFactor factor) { - if ((replicationType == ReplicationType.STAND_ALONE) || - (factor == ReplicationFactor.ONE)) { - return S3StorageType.REDUCED_REDUNDANCY; - } else { - return S3StorageType.STANDARD; - } - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java deleted file mode 100644 index af93f08eb9f..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package contains Ozone S3 Util classes. - */ -package org.apache.hadoop.ozone.s3.util; \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/main/resources/META-INF/beans.xml b/hadoop-ozone/s3gateway/src/main/resources/META-INF/beans.xml deleted file mode 100644 index cf00d2936af..00000000000 --- a/hadoop-ozone/s3gateway/src/main/resources/META-INF/beans.xml +++ /dev/null @@ -1,20 +0,0 @@ - - - - \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/main/resources/browser.html b/hadoop-ozone/s3gateway/src/main/resources/browser.html deleted file mode 100644 index 0405b17e90b..00000000000 --- a/hadoop-ozone/s3gateway/src/main/resources/browser.html +++ /dev/null @@ -1,617 +0,0 @@ - - - - - - - - Ozone S3 Explorer - - - - - - - - - - - - - -

    -
    -
    -
    - - -
    - -
    -
    - Ozone S3 Explorer  -
    - -
    - -
    -
    - -
    -
    - - -
    - - -
    -
    - -
    - - 42 -
    -
    -
    - - -
    - - - - - - - - - - - -
    ObjectFolderLast ModifiedTimestampSize
    -
    -
    -
    -
    -
    - - - - - - - - - - - - - diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/beans.xml b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/beans.xml deleted file mode 100644 index cf00d2936af..00000000000 --- a/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/beans.xml +++ /dev/null @@ -1,20 +0,0 @@ - - - - \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml deleted file mode 100644 index a3552f07006..00000000000 --- a/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml +++ /dev/null @@ -1,36 +0,0 @@ - - - - jaxrs - org.glassfish.jersey.servlet.ServletContainer - - javax.ws.rs.Application - org.apache.hadoop.ozone.s3.GatewayApplication - - 1 - - - jaxrs - /* - - - - org.jboss.weld.environment.servlet.Listener - - - - \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/images/ozone.ico b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/images/ozone.ico deleted file mode 100755 index 72886eabc1a00813dbc45b7762509dbb0b2a0361..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1150 zcmZ`&ZA@EL7(Nw@aVCn{vLE~)#`r^x2BT&p(YAc{i?+8wOD}D2dv8B)ziw}D3#FDq zdx4>Y;mV?;$$)9VB`ky0>4NJhBaFEbqWgii{4UzOphP{9gccDFBB7051y=RD$NQUJ$$M;4=Uq>@6*)EWzp4H{YnLe6gze zZJ=i5Kphq!$VOK7c#m}ZpfMx^WXfEg%JRHeC)HKfy;iF7%Su`}ZJD0a9c~J1p3{UB zPawPfIT`?U%=$`4uDgJz$T6qi@fmJ5t{QN|9o&MiSlz@Y7N*UBF^$je$779|j<9L& zTl-7|IzMfPRyPS1az(Met{zGyQm9lYx2!HAX9zXF{$CE1Bl(`?h{b09LHw>5GCkpS z>Z`=M|H=T&Ol8+xwDSXp-=4#ounmb=0tLHO1Jm^C9tYm3KjaEJuZYEBND#zttrmle zN_t}EFM2jy^F!MwE{-kvV?I3{b9-=;@mIt_x^2^1Ms)zyBarqwlq%&GhGBo6oH}*c zJDphe%qDNMH#3{Y`2lEO7=a(%oOzHLKXp8wjt$G@^1n42%_9?QtV1y{@!$-)n{t!O zL|0n{cRo2}zMkIJU*{pYG7N2Be+cn~e#ozk7xUjN-tFt{+o+aSLtB90)gd{cL!R3x zCRfNYhW0J(+NYya$u8UCz$0COhb`CopmC`enwGkY+^wOV!rdFi>}VGHqrS81kV51O zdsmUaBVeBt`Yb{*HANar*@?~|QQ#lS^ATt&bi<3^U)?xAs zh)ptQB@ziFNXuFaiaVkkvVw_6!|2GXG s0DJgd3c&q`0Q|TKK(PmatM33%5U_xd7wiUvv!?*~WdeY|gmdKgFK)uudjJ3c diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html deleted file mode 100644 index b20bf3530da..00000000000 --- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html +++ /dev/null @@ -1,83 +0,0 @@ - - - - - - - - - - - - S3 gateway -- Apache Hadoop Ozone - - - - - - - - - - - - -
    - -

    S3 gateway

    - -

    This is an endpoint of Apache Hadoop Ozone S3 gateway. Use it with any - AWS S3 compatible tool - with setting this url as an endpoint

    - -

    For example with aws-cli:

    - -
    aws s3api --endpoint  create-bucket --bucket=wordcount
    - -

    For more information, please check the documentation. -

    -
    - - - - - - - diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js b/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js deleted file mode 100644 index 8b1e977ba7e..00000000000 --- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -window.onload = function () { - var safeurl = window.location.protocol + "//" + window.location.host + window.location.pathname; - safeurl = safeurl.replace("static/", ""); - document.getElementById('s3gurl').innerHTML = safeurl; -}; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java deleted file mode 100644 index 4feaca66b45..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -package org.apache.hadoop.ozone.client; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import org.apache.hadoop.ozone.om.exceptions.OMException; - -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_EMPTY; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.S3_BUCKET_NOT_FOUND; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; - -/** - * ObjectStore implementation with in-memory state. - */ -public class ObjectStoreStub extends ObjectStore { - - public ObjectStoreStub() { - super(); - } - - private Map volumes = new HashMap<>(); - private Map bucketVolumeMap = new HashMap<>(); - private Map bucketEmptyStatus = new HashMap<>(); - private Map> userBuckets = new HashMap<>(); - - @Override - public void createVolume(String volumeName) throws IOException { - createVolume(volumeName, - VolumeArgs.newBuilder() - .setAdmin("root") - .setOwner("root") - .setQuota("" + Integer.MAX_VALUE) - .setAcls(new ArrayList<>()).build()); - } - - @Override - public void createVolume(String volumeName, VolumeArgs volumeArgs) - throws IOException { - OzoneVolumeStub volume = - new OzoneVolumeStub(volumeName, - volumeArgs.getAdmin(), - volumeArgs.getOwner(), - Long.parseLong(volumeArgs.getQuota()), - System.currentTimeMillis(), - volumeArgs.getAcls()); - volumes.put(volumeName, volume); - } - - @Override - public OzoneVolume getVolume(String volumeName) throws IOException { - if (volumes.containsKey(volumeName)) { - return volumes.get(volumeName); - } else { - throw new OMException("", VOLUME_NOT_FOUND); - } - } - - @Override - public Iterator listVolumes(String volumePrefix) - throws IOException { - return volumes.values() - .stream() - .filter(volume -> volume.getName().startsWith(volumePrefix)) - .collect(Collectors.toList()) - .iterator(); - - } - - @Override - public Iterator listVolumes(String volumePrefix, - String prevVolume) throws IOException { - return volumes.values() - .stream() - .filter(volume -> volume.getName().compareTo(prevVolume) > 0) - .filter(volume -> volume.getName().startsWith(volumePrefix)) - .collect(Collectors.toList()) - .iterator(); - } - - @Override - public Iterator listVolumesByUser(String user, - String volumePrefix, String prevVolume) throws IOException { - return volumes.values() - .stream() - .filter(volume -> volume.getOwner().equals(user)) - .filter(volume -> volume.getName().compareTo(prevVolume) < 0) - .filter(volume -> volume.getName().startsWith(volumePrefix)) - .collect(Collectors.toList()) - .iterator(); - } - - @Override - public void deleteVolume(String volumeName) throws IOException { - volumes.remove(volumeName); - } - - @Override - public void createS3Bucket(String userName, String s3BucketName) throws - IOException { - String volumeName = "s3" + userName; - if (bucketVolumeMap.get(s3BucketName) == null) { - bucketVolumeMap.put(s3BucketName, volumeName + "/" + s3BucketName); - bucketEmptyStatus.put(s3BucketName, true); - createVolume(volumeName); - volumes.get(volumeName).createBucket(s3BucketName); - } else { - throw new OMException("", BUCKET_ALREADY_EXISTS); - } - - if (userBuckets.get(userName) == null) { - List ozoneBuckets = new ArrayList<>(); - ozoneBuckets.add(volumes.get(volumeName).getBucket(s3BucketName)); - userBuckets.put(userName, ozoneBuckets); - } else { - userBuckets.get(userName).add(volumes.get(volumeName).getBucket( - s3BucketName)); - } - } - - public Iterator listS3Buckets(String userName, - String bucketPrefix) { - if (userBuckets.get(userName) == null) { - return new ArrayList().iterator(); - } else { - return userBuckets.get(userName).parallelStream() - .filter(ozoneBucket -> { - if (bucketPrefix != null) { - return ozoneBucket.getName().startsWith(bucketPrefix); - } else { - return true; - } - }).collect(Collectors.toList()) - .iterator(); - } - } - - public Iterator listS3Buckets(String userName, - String bucketPrefix, - String prevBucket) { - - if (userBuckets.get(userName) == null) { - return new ArrayList().iterator(); - } else { - //Sort buckets lexicographically - userBuckets.get(userName).sort( - (bucket1, bucket2) -> { - int compare = bucket1.getName().compareTo(bucket2.getName()); - if (compare < 0) { - return -1; - } else if (compare == 0) { - return 0; - } else { - return 1; - } - }); - return userBuckets.get(userName).stream() - .filter(ozoneBucket -> { - if (prevBucket != null) { - return ozoneBucket.getName().compareTo(prevBucket) > 0; - } else { - return true; - } - }) - .filter(ozoneBucket -> { - if (bucketPrefix != null) { - return ozoneBucket.getName().startsWith(bucketPrefix); - } else { - return true; - } - }).collect(Collectors.toList()) - .iterator(); - } - } - - @Override - public void deleteS3Bucket(String s3BucketName) throws - IOException { - if (bucketVolumeMap.containsKey(s3BucketName)) { - if (bucketEmptyStatus.get(s3BucketName)) { - bucketVolumeMap.remove(s3BucketName); - } else { - throw new OMException("", BUCKET_NOT_EMPTY); - } - } else { - throw new OMException("", BUCKET_NOT_FOUND); - } - } - - @Override - public String getOzoneBucketMapping(String s3BucketName) throws IOException { - if (bucketVolumeMap.get(s3BucketName) == null) { - throw new OMException("", S3_BUCKET_NOT_FOUND); - } - return bucketVolumeMap.get(s3BucketName); - } - - @Override - @SuppressWarnings("StringSplitter") - public String getOzoneVolumeName(String s3BucketName) throws IOException { - if (bucketVolumeMap.get(s3BucketName) == null) { - throw new OMException("", S3_BUCKET_NOT_FOUND); - } - return bucketVolumeMap.get(s3BucketName).split("/")[0]; - } - - @Override - @SuppressWarnings("StringSplitter") - public String getOzoneBucketName(String s3BucketName) throws IOException { - if (bucketVolumeMap.get(s3BucketName) == null) { - throw new OMException("", BUCKET_NOT_FOUND); - } - return bucketVolumeMap.get(s3BucketName).split("/")[1]; - } - - public void setBucketEmptyStatus(String bucketName, boolean status) { - bucketEmptyStatus.put(bucketName, status); - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java deleted file mode 100644 index e8ebf02b0f8..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -package org.apache.hadoop.ozone.client; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.UUID; -import java.util.stream.Collectors; - -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts.PartInfo; -import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.util.Time; - -/** - * In-memory ozone bucket for testing. - */ -public class OzoneBucketStub extends OzoneBucket { - - private Map keyDetails = new HashMap<>(); - - private Map keyContents = new HashMap<>(); - - private Map multipartUploadIdMap = new HashMap<>(); - - private Map> partList = new HashMap<>(); - - /** - * Constructs OzoneBucket instance. - * - * @param volumeName Name of the volume the bucket belongs to. - * @param bucketName Name of the bucket. - * @param storageType StorageType of the bucket. - * @param versioning versioning status of the bucket. - * @param creationTime creation time of the bucket. - */ - public OzoneBucketStub( - String volumeName, - String bucketName, - StorageType storageType, Boolean versioning, - long creationTime) { - super(volumeName, - bucketName, - ReplicationFactor.ONE, - ReplicationType.STAND_ALONE, - storageType, - versioning, - creationTime); - } - - @Override - public OzoneOutputStream createKey(String key, long size) throws IOException { - return createKey(key, size, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); - } - - @Override - public OzoneOutputStream createKey(String key, long size, - ReplicationType type, - ReplicationFactor factor, - Map metadata) - throws IOException { - ByteArrayOutputStream byteArrayOutputStream = - new ByteArrayOutputStream((int) size) { - @Override - public void close() throws IOException { - keyContents.put(key, toByteArray()); - keyDetails.put(key, new OzoneKeyDetails( - getVolumeName(), - getName(), - key, - size, - System.currentTimeMillis(), - System.currentTimeMillis(), - new ArrayList<>(), type, metadata, null, - factor.getValue() - )); - super.close(); - } - }; - return new OzoneOutputStream(byteArrayOutputStream); - } - - @Override - public OzoneInputStream readKey(String key) throws IOException { - return new OzoneInputStream(new ByteArrayInputStream(keyContents.get(key))); - } - - @Override - public OzoneKeyDetails getKey(String key) throws IOException { - if (keyDetails.containsKey(key)) { - return keyDetails.get(key); - } else { - throw new OMException(ResultCodes.KEY_NOT_FOUND); - } - } - - @Override - public Iterator listKeys(String keyPrefix) { - Map sortedKey = new TreeMap(keyDetails); - return sortedKey.values() - .stream() - .filter(key -> key.getName().startsWith(keyPrefix)) - .collect(Collectors.toList()) - .iterator(); - } - - @Override - public Iterator listKeys(String keyPrefix, - String prevKey) { - Map sortedKey = new TreeMap(keyDetails); - return sortedKey.values() - .stream() - .filter(key -> key.getName().compareTo(prevKey) > 0) - .filter(key -> key.getName().startsWith(keyPrefix)) - .collect(Collectors.toList()) - .iterator(); - } - - @Override - public void deleteKey(String key) throws IOException { - keyDetails.remove(key); - } - - @Override - public void renameKey(String fromKeyName, String toKeyName) - throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - public OmMultipartInfo initiateMultipartUpload(String keyName, - ReplicationType type, - ReplicationFactor factor) - throws IOException { - String uploadID = UUID.randomUUID().toString(); - multipartUploadIdMap.put(keyName, uploadID); - return new OmMultipartInfo(getVolumeName(), getName(), keyName, uploadID); - } - - @Override - public OzoneOutputStream createMultipartKey(String key, long size, - int partNumber, String uploadID) - throws IOException { - String multipartUploadID = multipartUploadIdMap.get(key); - if (multipartUploadID == null || !multipartUploadID.equals(uploadID)) { - throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } else { - ByteArrayOutputStream byteArrayOutputStream = - new ByteArrayOutputStream((int) size) { - @Override - public void close() throws IOException { - Part part = new Part(key + size, - toByteArray()); - if (partList.get(key) == null) { - Map parts = new TreeMap<>(); - parts.put(partNumber, part); - partList.put(key, parts); - } else { - partList.get(key).put(partNumber, part); - } - } - }; - return new OzoneOutputStreamStub(byteArrayOutputStream, key + size); - } - } - - @Override - public OmMultipartUploadCompleteInfo completeMultipartUpload(String key, - String uploadID, Map partsMap) throws IOException { - - if (multipartUploadIdMap.get(key) == null) { - throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } else { - final Map partsList = partList.get(key); - - if (partsMap.size() != partsList.size()) { - throw new OMException(ResultCodes.MISMATCH_MULTIPART_LIST); - } - - int count = 1; - - ByteArrayOutputStream output = new ByteArrayOutputStream(); - - for (Map.Entry part: partsMap.entrySet()) { - Part recordedPart = partsList.get(part.getKey()); - if (part.getKey() != count) { - throw new OMException(ResultCodes.MISSING_UPLOAD_PARTS); - } else { - if (!part.getValue().equals(recordedPart.getPartName())) { - throw new OMException(ResultCodes.MISMATCH_MULTIPART_LIST); - } else { - count++; - output.write(recordedPart.getContent()); - } - } - } - keyContents.put(key, output.toByteArray()); - } - - return new OmMultipartUploadCompleteInfo(getVolumeName(), getName(), key, - DigestUtils.sha256Hex(key)); - } - - @Override - public void abortMultipartUpload(String keyName, String uploadID) throws - IOException { - if (multipartUploadIdMap.get(keyName) == null) { - throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } else { - multipartUploadIdMap.remove(keyName); - } - } - - @Override - public OzoneMultipartUploadPartListParts listParts(String key, - String uploadID, int partNumberMarker, int maxParts) throws IOException { - if (multipartUploadIdMap.get(key) == null) { - throw new OMException(ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } - List partInfoList = new ArrayList<>(); - - if (partList.get(key) == null) { - return new OzoneMultipartUploadPartListParts(ReplicationType.RATIS, - ReplicationFactor.ONE, 0, false); - } else { - Map partMap = partList.get(key); - Iterator> partIterator = - partMap.entrySet().iterator(); - - int count = 0; - int nextPartNumberMarker = 0; - boolean truncated = false; - while (count < maxParts && partIterator.hasNext()) { - Map.Entry partEntry = partIterator.next(); - nextPartNumberMarker = partEntry.getKey(); - if (partEntry.getKey() > partNumberMarker) { - PartInfo partInfo = new PartInfo(partEntry.getKey(), - partEntry.getValue().getPartName(), - partEntry.getValue().getContent().length, Time.now()); - partInfoList.add(partInfo); - count++; - } - } - - if (partIterator.hasNext()) { - truncated = true; - } else { - truncated = false; - nextPartNumberMarker = 0; - } - - OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = - new OzoneMultipartUploadPartListParts(ReplicationType.RATIS, - ReplicationFactor.ONE, - nextPartNumberMarker, truncated); - ozoneMultipartUploadPartListParts.addAllParts(partInfoList); - - return ozoneMultipartUploadPartListParts; - - } - - } - - /** - * Class used to hold part information in a upload part request. - */ - public class Part { - private String partName; - private byte[] content; - - public Part(String name, byte[] data) { - this.partName = name; - this.content = data; - } - - public String getPartName() { - return partName; - } - - public byte[] getContent() { - return content; - } - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneClientStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneClientStub.java deleted file mode 100644 index 3c7a2534658..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneClientStub.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -package org.apache.hadoop.ozone.client; - -import java.io.IOException; - -/** - * In-memory OzoneClient for testing. - */ -public class OzoneClientStub extends OzoneClient { - - public OzoneClientStub() { - super(new ObjectStoreStub()); - } - - @Override - public void close() throws IOException { - //NOOP. - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java deleted file mode 100644 index 28e377b4282..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ - -package org.apache.hadoop.ozone.client; - -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; - -import java.io.IOException; -import java.io.OutputStream; - -/** - * OzoneOutputStream stub for testing. - */ -public class OzoneOutputStreamStub extends OzoneOutputStream { - - private final String partName; - - /** - * Constructs OzoneOutputStreamStub with outputStream and partName. - * - * @param outputStream - * @param name - partName - */ - public OzoneOutputStreamStub(OutputStream outputStream, String name) { - super(outputStream); - this.partName = name; - } - - @Override - public void write(int b) throws IOException { - getOutputStream().write(b); - } - - @Override - public void write(byte[] b, int off, int len) throws IOException { - getOutputStream().write(b, off, len); - } - - @Override - public synchronized void flush() throws IOException { - getOutputStream().flush(); - } - - @Override - public synchronized void close() throws IOException { - //commitKey can be done here, if needed. - getOutputStream().close(); - } - - @Override - public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return new OmMultipartCommitUploadPartInfo(partName); - } - -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java deleted file mode 100644 index 89972601e9b..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -package org.apache.hadoop.ozone.client; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.ozone.OzoneAcl; - -/** - * Ozone volume with in-memory state for testing. - */ -public class OzoneVolumeStub extends OzoneVolume { - - private Map buckets = new HashMap<>(); - - public OzoneVolumeStub(String name, String admin, String owner, - long quotaInBytes, - long creationTime, List acls) { - super(name, admin, owner, quotaInBytes, creationTime, acls); - } - - @Override - public void createBucket(String bucketName) throws IOException { - createBucket(bucketName, new BucketArgs.Builder() - .setStorageType(StorageType.DEFAULT) - .setVersioning(false) - .build()); - } - - @Override - public void createBucket(String bucketName, BucketArgs bucketArgs) - throws IOException { - buckets.put(bucketName, new OzoneBucketStub( - getName(), - bucketName, - bucketArgs.getStorageType(), - bucketArgs.getVersioning(), - System.currentTimeMillis())); - - } - - @Override - public OzoneBucket getBucket(String bucketName) throws IOException { - if (buckets.containsKey(bucketName)) { - return buckets.get(bucketName); - } else { - throw new IOException("BUCKET_NOT_FOUND"); - } - - } - - @Override - public Iterator listBuckets(String bucketPrefix) { - return buckets.values() - .stream() - .filter(bucket -> { - if (bucketPrefix != null) { - return bucket.getName().startsWith(bucketPrefix); - } else { - return true; - } - }) - .collect(Collectors.toList()) - .iterator(); - } - - @Override - public Iterator listBuckets(String bucketPrefix, - String prevBucket) { - return buckets.values() - .stream() - .filter(bucket -> bucket.getName().compareTo(prevBucket) > 0) - .filter(bucket -> bucket.getName().startsWith(bucketPrefix)) - .collect(Collectors.toList()) - .iterator(); - } - - @Override - public void deleteBucket(String bucketName) throws IOException { - if (buckets.containsKey(bucketName)) { - buckets.remove(bucketName); - } else { - throw new IOException("BUCKET_NOT_FOUND"); - } - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/package-info.java deleted file mode 100644 index 10e42745b30..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * In-memory OzoneClient implementation to test REST endpoints. - */ -package org.apache.hadoop.ozone.client; \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java deleted file mode 100644 index 252d87b307c..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java +++ /dev/null @@ -1,145 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import javax.ws.rs.container.ContainerRequestContext; -import javax.ws.rs.core.MultivaluedHashMap; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.core.UriInfo; -import java.io.IOException; -import java.net.URI; -import java.util.Arrays; -import java.util.Collection; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.test.LambdaTestUtils; - -import static org.apache.hadoop.ozone.s3.AWSAuthParser.AUTHORIZATION_HEADER; -import static org.apache.hadoop.ozone.s3.AWSAuthParser.CONTENT_MD5; -import static org.apache.hadoop.ozone.s3.AWSAuthParser.CONTENT_TYPE; -import static org.apache.hadoop.ozone.s3.AWSAuthParser.HOST_HEADER; -import static org.apache.hadoop.ozone.s3.AWSAuthParser.X_AMAZ_DATE; -import static org.apache.hadoop.ozone.s3.AWSAuthParser.X_AMZ_CONTENT_SHA256; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.mockito.Mockito; - -/** - * Test class for @{@link OzoneClientProducer}. - * */ -@RunWith(Parameterized.class) -public class TestOzoneClientProducer { - - private OzoneClientProducer producer; - private MultivaluedMap headerMap; - private MultivaluedMap queryMap; - private String authHeader; - private String contentMd5; - private String host; - private String amzContentSha256; - private String date; - private String contentType; - - - private ContainerRequestContext context; - private UriInfo uriInfo; - - public TestOzoneClientProducer(String authHeader, String contentMd5, - String host, String amzContentSha256, String date, String contentType) - throws Exception { - this.authHeader = authHeader; - this.contentMd5 = contentMd5; - this.host = host; - this.amzContentSha256 = amzContentSha256; - this.date = date; - this.contentType = contentType; - producer = new OzoneClientProducer(); - headerMap = new MultivaluedHashMap<>(); - queryMap = new MultivaluedHashMap<>(); - uriInfo = Mockito.mock(UriInfo.class); - context = Mockito.mock(ContainerRequestContext.class); - OzoneConfiguration config = new OzoneConfiguration(); - config.setBoolean(OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY, true); - config.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, ""); - setupContext(); - producer.setContext(context); - producer.setOzoneConfiguration(config); - } - - @Test - public void testGetClientFailure() throws Exception { - LambdaTestUtils.intercept(IOException.class, "Couldn't create", - () -> producer.createClient()); - } - - private void setupContext() throws Exception { - headerMap.putSingle(AUTHORIZATION_HEADER, authHeader); - headerMap.putSingle(CONTENT_MD5, contentMd5); - headerMap.putSingle(HOST_HEADER, host); - headerMap.putSingle(X_AMZ_CONTENT_SHA256, amzContentSha256); - headerMap.putSingle(X_AMAZ_DATE, date); - headerMap.putSingle(CONTENT_TYPE, contentType); - - Mockito.when(uriInfo.getQueryParameters()).thenReturn(queryMap); - Mockito.when(uriInfo.getRequestUri()).thenReturn(new URI("")); - - Mockito.when(context.getUriInfo()).thenReturn(uriInfo); - Mockito.when(context.getHeaders()).thenReturn(headerMap); - Mockito.when(context.getHeaderString(AUTHORIZATION_HEADER)) - .thenReturn(authHeader); - Mockito.when(context.getUriInfo().getQueryParameters()) - .thenReturn(queryMap); - } - - @Parameterized.Parameters - public static Collection data() { - return Arrays.asList(new Object[][]{ - { - "AWS4-HMAC-SHA256 Credential=testuser1/20190221/us-west-1/s3" + - "/aws4_request, SignedHeaders=content-md5;host;" + - "x-amz-content-sha256;x-amz-date, " + - "Signature" + - "=56ec73ba1974f8feda8365c3caef89c5d4a688d5f9baccf47" + - "65f46a14cd745ad", - "Zi68x2nPDDXv5qfDC+ZWTg==", - "s3g:9878", - "e2bd43f11c97cde3465e0e8d1aad77af7ec7aa2ed8e213cd0e24" + - "1e28375860c6", - "20190221T002037Z", - "" - }, - { - "AWS4-HMAC-SHA256 " + - "Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request," + - " SignedHeaders=content-type;host;x-amz-date, " + - "Signature=" + - "5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400" + - "e06b5924a6f2b5d7", - "", - "iam.amazonaws.com", - "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "20150830T123600Z", - "application/x-www-form-urlencoded; charset=utf-8" - } - }); - } - -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java deleted file mode 100644 index 3599c059b1f..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.nio.charset.Charset; - -import org.apache.commons.io.IOUtils; -import org.junit.Assert; -import org.junit.Test; - -/** - * Test input stream parsing with signatures. - */ -public class TestSignedChunksInputStream { - - @Test - public void emptyfile() throws IOException { - InputStream is = fileContent("0;chunk-signature" - + - "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40"); - String result = IOUtils.toString(is, Charset.forName("UTF-8")); - Assert.assertEquals("", result); - - is = fileContent("0;chunk-signature" - + - "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40\r" - + "\n"); - result = IOUtils.toString(is, Charset.forName("UTF-8")); - Assert.assertEquals("", result); - } - - @Test - public void singlechunk() throws IOException { - //test simple read() - InputStream is = fileContent("0A;chunk-signature" - + - "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40\r" - + "\n1234567890\r\n"); - String result = IOUtils.toString(is, Charset.forName("UTF-8")); - Assert.assertEquals("1234567890", result); - - //test read(byte[],int,int) - is = fileContent("0A;chunk-signature" - + - "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40\r" - + "\n1234567890\r\n"); - byte[] bytes = new byte[10]; - IOUtils.read(is, bytes, 0, 10); - Assert.assertEquals("1234567890", new String(bytes)); - } - - @Test - public void singlechunkwithoutend() throws IOException { - //test simple read() - InputStream is = fileContent("0A;chunk-signature" - + - "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40\r" - + "\n1234567890"); - String result = IOUtils.toString(is, Charset.forName("UTF-8")); - Assert.assertEquals("1234567890", result); - - //test read(byte[],int,int) - is = fileContent("0A;chunk-signature" - + - "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40\r" - + "\n1234567890"); - byte[] bytes = new byte[10]; - IOUtils.read(is, bytes, 0, 10); - Assert.assertEquals("1234567890", new String(bytes)); - } - - @Test - public void multichunks() throws IOException { - //test simple read() - InputStream is = fileContent("0a;chunk-signature=signature\r\n" - + "1234567890\r\n" - + "05;chunk-signature=signature\r\n" - + "abcde\r\n"); - String result = IOUtils.toString(is, Charset.forName("UTF-8")); - Assert.assertEquals("1234567890abcde", result); - - //test read(byte[],int,int) - is = fileContent("0a;chunk-signature=signature\r\n" - + "1234567890\r\n" - + "05;chunk-signature=signature\r\n" - + "abcde\r\n"); - byte[] bytes = new byte[15]; - IOUtils.read(is, bytes, 0, 15); - Assert.assertEquals("1234567890abcde", new String(bytes)); - } - - private InputStream fileContent(String content) { - return new SignedChunksInputStream( - new ByteArrayInputStream(content.getBytes())); - } -} \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java deleted file mode 100644 index eead44784ad..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java +++ /dev/null @@ -1,220 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import org.apache.hadoop.fs.InvalidRequestException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.s3.header.AuthenticationHeaderParser; -import org.apache.hadoop.test.GenericTestUtils; -import org.glassfish.jersey.internal.PropertiesDelegate; -import org.glassfish.jersey.server.ContainerRequest; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.SecurityContext; -import java.net.URI; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * This class test virtual host style mapping conversion to path style. - */ -public class TestVirtualHostStyleFilter { - - private static OzoneConfiguration conf; - private static String s3HttpAddr; - private AuthenticationHeaderParser authenticationHeaderParser; - - @Before - public void setup() { - conf = new OzoneConfiguration(); - s3HttpAddr = "localhost:9878"; - conf.set(S3GatewayConfigKeys.OZONE_S3G_HTTP_ADDRESS_KEY, s3HttpAddr); - s3HttpAddr = s3HttpAddr.substring(0, s3HttpAddr.lastIndexOf(":")); - conf.set(S3GatewayConfigKeys.OZONE_S3G_DOMAIN_NAME, s3HttpAddr); - authenticationHeaderParser = new AuthenticationHeaderParser(); - authenticationHeaderParser.setAuthHeader("AWS ozone:scret"); - } - - /** - * Create containerRequest object. - * @return ContainerRequest - * @throws Exception - */ - public ContainerRequest createContainerRequest(String host, String path, - String queryParams, - boolean virtualHostStyle) - throws Exception { - URI baseUri = new URI("http://" + s3HttpAddr); - URI virtualHostStyleUri; - if (path == null && queryParams == null) { - virtualHostStyleUri = new URI("http://" + s3HttpAddr); - } else if (path != null && queryParams == null) { - virtualHostStyleUri = new URI("http://" + s3HttpAddr + path); - } else if (path !=null && queryParams != null) { - virtualHostStyleUri = new URI("http://" + s3HttpAddr + path + - queryParams); - } else { - virtualHostStyleUri = new URI("http://" + s3HttpAddr + queryParams); - } - URI pathStyleUri; - if (queryParams == null) { - pathStyleUri = new URI("http://" + s3HttpAddr + path); - } else { - pathStyleUri = new URI("http://" + s3HttpAddr + path + queryParams); - } - String httpMethod = "DELETE"; - SecurityContext securityContext = Mockito.mock(SecurityContext.class); - PropertiesDelegate propertiesDelegate = Mockito.mock(PropertiesDelegate - .class); - ContainerRequest containerRequest; - if (virtualHostStyle) { - containerRequest = new ContainerRequest(baseUri, virtualHostStyleUri, - httpMethod, securityContext, propertiesDelegate); - containerRequest.header(HttpHeaders.HOST, host); - } else { - containerRequest = new ContainerRequest(baseUri, pathStyleUri, - httpMethod, securityContext, propertiesDelegate); - containerRequest.header(HttpHeaders.HOST, host); - } - return containerRequest; - } - - @Test - public void testVirtualHostStyle() throws Exception { - VirtualHostStyleFilter virtualHostStyleFilter = - new VirtualHostStyleFilter(); - virtualHostStyleFilter.setConfiguration(conf); - virtualHostStyleFilter.setAuthenticationHeaderParser( - authenticationHeaderParser); - - ContainerRequest containerRequest = createContainerRequest("mybucket" + - ".localhost:9878", "/myfile", null, true); - virtualHostStyleFilter.filter(containerRequest); - URI expected = new URI("http://" + s3HttpAddr + - "/mybucket/myfile"); - Assert.assertEquals(expected, containerRequest.getRequestUri()); - } - - @Test - public void testPathStyle() throws Exception { - - VirtualHostStyleFilter virtualHostStyleFilter = - new VirtualHostStyleFilter(); - virtualHostStyleFilter.setConfiguration(conf); - virtualHostStyleFilter.setAuthenticationHeaderParser( - authenticationHeaderParser); - - ContainerRequest containerRequest = createContainerRequest(s3HttpAddr, - "/mybucket/myfile", null, false); - virtualHostStyleFilter.filter(containerRequest); - URI expected = new URI("http://" + s3HttpAddr + - "/mybucket/myfile"); - Assert.assertEquals(expected, containerRequest.getRequestUri()); - - } - - @Test - public void testVirtualHostStyleWithCreateBucketRequest() throws Exception { - - VirtualHostStyleFilter virtualHostStyleFilter = - new VirtualHostStyleFilter(); - virtualHostStyleFilter.setConfiguration(conf); - virtualHostStyleFilter.setAuthenticationHeaderParser( - authenticationHeaderParser); - - ContainerRequest containerRequest = createContainerRequest("mybucket" + - ".localhost:9878", null, null, true); - virtualHostStyleFilter.filter(containerRequest); - URI expected = new URI("http://" + s3HttpAddr + "/mybucket"); - Assert.assertEquals(expected, containerRequest.getRequestUri()); - - } - - @Test - public void testVirtualHostStyleWithQueryParams() throws Exception { - - VirtualHostStyleFilter virtualHostStyleFilter = - new VirtualHostStyleFilter(); - virtualHostStyleFilter.setConfiguration(conf); - virtualHostStyleFilter.setAuthenticationHeaderParser( - authenticationHeaderParser); - - ContainerRequest containerRequest = createContainerRequest("mybucket" + - ".localhost:9878", null, "?prefix=bh", true); - virtualHostStyleFilter.filter(containerRequest); - URI expected = new URI("http://" + s3HttpAddr + "/mybucket?prefix=bh"); - assertTrue(expected.toString().contains(containerRequest.getRequestUri() - .toString())); - - containerRequest = createContainerRequest("mybucket" + - ".localhost:9878", null, "?prefix=bh&type=dir", true); - virtualHostStyleFilter.filter(containerRequest); - expected = new URI("http://" + s3HttpAddr + - "/mybucket?prefix=bh&type=dir"); - assertTrue(expected.toString().contains(containerRequest.getRequestUri() - .toString())); - - } - - @Test - public void testVirtualHostStyleWithNoMatchingDomain() throws Exception { - - VirtualHostStyleFilter virtualHostStyleFilter = - new VirtualHostStyleFilter(); - virtualHostStyleFilter.setConfiguration(conf); - virtualHostStyleFilter.setAuthenticationHeaderParser( - authenticationHeaderParser); - - ContainerRequest containerRequest = createContainerRequest("mybucket" + - ".myhost:9999", null, null, true); - try { - virtualHostStyleFilter.filter(containerRequest); - fail("testVirtualHostStyleWithNoMatchingDomain"); - } catch (InvalidRequestException ex) { - GenericTestUtils.assertExceptionContains("No matching domain", ex); - } - - } - - @Test - public void testIncorrectVirtualHostStyle() throws - Exception { - - VirtualHostStyleFilter virtualHostStyleFilter = - new VirtualHostStyleFilter(); - virtualHostStyleFilter.setConfiguration(conf); - virtualHostStyleFilter.setAuthenticationHeaderParser( - authenticationHeaderParser); - - ContainerRequest containerRequest = createContainerRequest("mybucket" + - "localhost:9878", null, null, true); - try { - virtualHostStyleFilter.filter(containerRequest); - fail("testIncorrectVirtualHostStyle failed"); - } catch (InvalidRequestException ex) { - GenericTestUtils.assertExceptionContains("invalid format", ex); - } - - } - -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java deleted file mode 100644 index 912a769cd3f..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.junit.Test; -import org.mockito.Mockito; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; - - -import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.mockito.Mockito.when; - -/** - * This class tests abort multipart upload request. - */ -public class TestAbortMultipartUpload { - - - @Test - public void testAbortMultipartUpload() throws Exception { - - String bucket = "s3bucket"; - String key = "key1"; - OzoneClientStub client = new OzoneClientStub(); - client.getObjectStore().createS3Bucket("ozone", bucket); - - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( - "STANDARD"); - - ObjectEndpoint rest = new ObjectEndpoint(); - rest.setHeaders(headers); - rest.setClient(client); - - Response response = rest.initializeMultipartUpload(bucket, key); - - assertEquals(response.getStatus(), 200); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - - - // Abort multipart upload - response = rest.delete(bucket, key, uploadID); - - assertEquals(204, response.getStatus()); - - // test with unknown upload Id. - try { - rest.delete(bucket, key, "random"); - } catch (OS3Exception ex) { - assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), ex.getCode()); - assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), - ex.getErrorMessage()); - } - - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java deleted file mode 100644 index ea574d42ed5..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.ws.rs.core.Response; - -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.ObjectStoreStub; -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; - -import org.apache.http.HttpStatus; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; -import org.junit.Before; -import org.junit.Test; - -/** - * This class tests delete bucket functionality. - */ -public class TestBucketDelete { - - private String bucketName = "myBucket"; - private OzoneClientStub clientStub; - private ObjectStore objectStoreStub; - private BucketEndpoint bucketEndpoint; - - @Before - public void setup() throws Exception { - - //Create client stub and object store stub. - clientStub = new OzoneClientStub(); - objectStoreStub = clientStub.getObjectStore(); - - objectStoreStub.createS3Bucket("ozone", bucketName); - - // Create HeadBucket and setClient to OzoneClientStub - bucketEndpoint = new BucketEndpoint(); - bucketEndpoint.setClient(clientStub); - - - } - - @Test - public void testBucketEndpoint() throws Exception { - Response response = bucketEndpoint.delete(bucketName); - assertEquals(HttpStatus.SC_NO_CONTENT, response.getStatus()); - - } - - @Test - public void testDeleteWithNoSuchBucket() throws Exception { - try { - bucketEndpoint.delete("unknownbucket"); - } catch (OS3Exception ex) { - assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), ex.getCode()); - assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getErrorMessage(), - ex.getErrorMessage()); - return; - } - fail("testDeleteWithNoSuchBucket failed"); - } - - - @Test - public void testDeleteWithBucketNotEmpty() throws Exception { - try { - String bucket = "nonemptybucket"; - objectStoreStub.createS3Bucket("ozone1", bucket); - ObjectStoreStub stub = (ObjectStoreStub) objectStoreStub; - stub.setBucketEmptyStatus(bucket, false); - bucketEndpoint.delete(bucket); - } catch (OS3Exception ex) { - assertEquals(S3ErrorTable.BUCKET_NOT_EMPTY.getCode(), ex.getCode()); - assertEquals(S3ErrorTable.BUCKET_NOT_EMPTY.getErrorMessage(), - ex.getErrorMessage()); - return; - } - fail("testDeleteWithBucketNotEmpty failed"); - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java deleted file mode 100644 index 844f9bef434..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java +++ /dev/null @@ -1,380 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import java.io.IOException; - -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; - -import org.junit.Assert; -import static org.junit.Assert.fail; -import org.junit.Test; - -/** - * Testing basic object list browsing. - */ -public class TestBucketGet { - - @Test - public void listRoot() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - - OzoneClient client = createClientWithKeys("file1", "dir1/file2"); - - getBucket.setClient(client); - - ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket - .list("b1", "/", null, null, 100, "", null, null, null, null, null) - .getEntity(); - - Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size()); - Assert.assertEquals("dir1/", - getBucketResponse.getCommonPrefixes().get(0).getPrefix()); - - Assert.assertEquals(1, getBucketResponse.getContents().size()); - Assert.assertEquals("file1", - getBucketResponse.getContents().get(0).getKey()); - - } - - @Test - public void listDir() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - - OzoneClient client = createClientWithKeys("dir1/file2", "dir1/dir2/file2"); - - getBucket.setClient(client); - - ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.list("b1", "/", null, null, 100, - "dir1", null, null, null, null, null).getEntity(); - - Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size()); - Assert.assertEquals("dir1/", - getBucketResponse.getCommonPrefixes().get(0).getPrefix()); - - Assert.assertEquals(0, getBucketResponse.getContents().size()); - - } - - @Test - public void listSubDir() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - - OzoneClient ozoneClient = - createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file", - "dir1bha/file2"); - - getBucket.setClient(ozoneClient); - - ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket - .list("b1", "/", null, null, 100, "dir1/", null, null, - null, null, null) - .getEntity(); - - Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size()); - Assert.assertEquals("dir1/dir2/", - getBucketResponse.getCommonPrefixes().get(0).getPrefix()); - - Assert.assertEquals(1, getBucketResponse.getContents().size()); - Assert.assertEquals("dir1/file2", - getBucketResponse.getContents().get(0).getKey()); - - } - - - @Test - public void listWithPrefixAndDelimiter() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - - OzoneClient ozoneClient = - createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file", - "dir1bha/file2", "file2"); - - getBucket.setClient(ozoneClient); - - ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.list("b1", "/", null, null, 100, - "dir1", null, null, null, null, null).getEntity(); - - Assert.assertEquals(3, getBucketResponse.getCommonPrefixes().size()); - - } - - @Test - public void listWithPrefixAndDelimiter1() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - - OzoneClient ozoneClient = - createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file", - "dir1bha/file2", "file2"); - - getBucket.setClient(ozoneClient); - - ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.list("b1", "/", null, null, 100, - "", null, null, null, null, null).getEntity(); - - Assert.assertEquals(3, getBucketResponse.getCommonPrefixes().size()); - Assert.assertEquals("file2", getBucketResponse.getContents().get(0) - .getKey()); - - } - - @Test - public void listWithPrefixAndDelimiter2() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - - OzoneClient ozoneClient = - createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file", - "dir1bha/file2", "file2"); - - getBucket.setClient(ozoneClient); - - ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.list("b1", "/", null, null, 100, - "dir1bh", null, null, "dir1/dir2/file2", null, null).getEntity(); - - Assert.assertEquals(2, getBucketResponse.getCommonPrefixes().size()); - - } - - @Test - public void listWithContinuationToken() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - - OzoneClient ozoneClient = - createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file", - "dir1bha/file2", "file2"); - - getBucket.setClient(ozoneClient); - - int maxKeys = 2; - // As we have 5 keys, with max keys 2 we should call list 3 times. - - // First time - ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.list("b1", null, null, null, maxKeys, - "", null, null, null, null, null).getEntity(); - - Assert.assertTrue(getBucketResponse.isTruncated()); - Assert.assertTrue(getBucketResponse.getContents().size() == 2); - - // 2nd time - String continueToken = getBucketResponse.getNextToken(); - getBucketResponse = - (ListObjectResponse) getBucket.list("b1", null, null, null, maxKeys, - "", null, continueToken, null, null, null).getEntity(); - Assert.assertTrue(getBucketResponse.isTruncated()); - Assert.assertTrue(getBucketResponse.getContents().size() == 2); - - - continueToken = getBucketResponse.getNextToken(); - - //3rd time - getBucketResponse = - (ListObjectResponse) getBucket.list("b1", null, null, null, maxKeys, - "", null, continueToken, null, null, null).getEntity(); - - Assert.assertFalse(getBucketResponse.isTruncated()); - Assert.assertTrue(getBucketResponse.getContents().size() == 1); - - } - - @Test - public void listWithContinuationTokenDirBreak() - throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - - OzoneClient ozoneClient = - createClientWithKeys( - "test/dir1/file1", - "test/dir1/file2", - "test/dir1/file3", - "test/dir2/file4", - "test/dir2/file5", - "test/dir2/file6", - "test/dir3/file7", - "test/file8"); - - getBucket.setClient(ozoneClient); - - int maxKeys = 2; - - ListObjectResponse getBucketResponse; - - getBucketResponse = - (ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys, - "test/", null, null, null, null, null).getEntity(); - - Assert.assertEquals(0, getBucketResponse.getContents().size()); - Assert.assertEquals(2, getBucketResponse.getCommonPrefixes().size()); - Assert.assertEquals("test/dir1/", - getBucketResponse.getCommonPrefixes().get(0).getPrefix()); - Assert.assertEquals("test/dir2/", - getBucketResponse.getCommonPrefixes().get(1).getPrefix()); - - getBucketResponse = - (ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys, - "test/", null, getBucketResponse.getNextToken(), null, null, null) - .getEntity(); - Assert.assertEquals(1, getBucketResponse.getContents().size()); - Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size()); - Assert.assertEquals("test/dir3/", - getBucketResponse.getCommonPrefixes().get(0).getPrefix()); - Assert.assertEquals("test/file8", - getBucketResponse.getContents().get(0).getKey()); - - } - - @Test - /** - * This test is with prefix and delimiter and verify continuation-token - * behavior. - */ - public void listWithContinuationToken1() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - - OzoneClient ozoneClient = - createClientWithKeys("dir1/file1", "dir1bh/file1", - "dir1bha/file1", "dir0/file1", "dir2/file1"); - - getBucket.setClient(ozoneClient); - - int maxKeys = 2; - // As we have 5 keys, with max keys 2 we should call list 3 times. - - // First time - ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys, - "dir", null, null, null, null, null).getEntity(); - - Assert.assertTrue(getBucketResponse.isTruncated()); - Assert.assertTrue(getBucketResponse.getCommonPrefixes().size() == 2); - - // 2nd time - String continueToken = getBucketResponse.getNextToken(); - getBucketResponse = - (ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys, - "dir", null, continueToken, null, null, null).getEntity(); - Assert.assertTrue(getBucketResponse.isTruncated()); - Assert.assertTrue(getBucketResponse.getCommonPrefixes().size() == 2); - - //3rd time - continueToken = getBucketResponse.getNextToken(); - getBucketResponse = - (ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys, - "dir", null, continueToken, null, null, null).getEntity(); - - Assert.assertFalse(getBucketResponse.isTruncated()); - Assert.assertTrue(getBucketResponse.getCommonPrefixes().size() == 1); - - } - - @Test - public void listWithContinuationTokenFail() throws OS3Exception, IOException { - - BucketEndpoint getBucket = new BucketEndpoint(); - - OzoneClient ozoneClient = - createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file", - "dir1bha/file2", "dir1", "dir2", "dir3"); - - getBucket.setClient(ozoneClient); - - try { - ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.list("b1", "/", null, null, 2, - "dir", null, "random", null, null, null).getEntity(); - fail("listWithContinuationTokenFail"); - } catch (OS3Exception ex) { - Assert.assertEquals("random", ex.getResource()); - Assert.assertEquals("Invalid Argument", ex.getErrorMessage()); - } - - } - - - @Test - public void testStartAfter() throws IOException, OS3Exception { - BucketEndpoint getBucket = new BucketEndpoint(); - - OzoneClient ozoneClient = - createClientWithKeys("dir1/file1", "dir1bh/file1", - "dir1bha/file1", "dir0/file1", "dir2/file1"); - - getBucket.setClient(ozoneClient); - - ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.list("b1", null, null, null, 1000, - null, null, null, null, null, null).getEntity(); - - Assert.assertFalse(getBucketResponse.isTruncated()); - Assert.assertTrue(getBucketResponse.getContents().size() == 5); - - //As our list output is sorted, after seeking to startAfter, we shall - // have 4 keys. - String startAfter = "dir0/file1"; - - getBucketResponse = - (ListObjectResponse) getBucket.list("b1", null, null, null, - 1000, null, null, null, startAfter, null, null).getEntity(); - - Assert.assertFalse(getBucketResponse.isTruncated()); - Assert.assertTrue(getBucketResponse.getContents().size() == 4); - - getBucketResponse = - (ListObjectResponse) getBucket.list("b1", null, null, null, - 1000, null, null, null, "random", null, null).getEntity(); - - Assert.assertFalse(getBucketResponse.isTruncated()); - Assert.assertTrue(getBucketResponse.getContents().size() == 0); - - - } - - private OzoneClient createClientWithKeys(String... keys) throws IOException { - OzoneClient client = new OzoneClientStub(); - - client.getObjectStore().createS3Bucket("bilbo", "b1"); - String volume = client.getObjectStore().getOzoneVolumeName("b1"); - client.getObjectStore().getVolume(volume).createBucket("b1"); - OzoneBucket bucket = - client.getObjectStore().getVolume(volume).getBucket("b1"); - for (String key : keys) { - bucket.createKey(key, 0).close(); - } - return client; - } -} \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java deleted file mode 100644 index f06da703c21..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.ws.rs.core.Response; - -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClientStub; - -import org.junit.Assert; -import static org.junit.Assert.assertEquals; -import org.junit.Before; -import org.junit.Test; - -/** - * This class test HeadBucket functionality. - */ -public class TestBucketHead { - - private String bucketName = "myBucket"; - private String userName = "ozone"; - private OzoneClientStub clientStub; - private ObjectStore objectStoreStub; - private BucketEndpoint bucketEndpoint; - - @Before - public void setup() throws Exception { - - //Create client stub and object store stub. - clientStub = new OzoneClientStub(); - objectStoreStub = clientStub.getObjectStore(); - - objectStoreStub.createS3Bucket(userName, bucketName); - - // Create HeadBucket and setClient to OzoneClientStub - bucketEndpoint = new BucketEndpoint(); - bucketEndpoint.setClient(clientStub); - } - - @Test - public void testHeadBucket() throws Exception { - - Response response = bucketEndpoint.head(bucketName); - assertEquals(200, response.getStatus()); - - } - - @Test - public void testHeadFail() throws Exception { - Response response = bucketEndpoint.head("unknownbucket"); - Assert.assertEquals(400, response.getStatus()); - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketResponse.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketResponse.java deleted file mode 100644 index 7c5bfadad74..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketResponse.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.xml.bind.JAXBContext; -import javax.xml.bind.JAXBException; - -import org.junit.Test; - -/** - * Testing JAXB serialization. - */ -public class TestBucketResponse { - - @Test - public void serialize() throws JAXBException { - JAXBContext context = JAXBContext.newInstance(ListObjectResponse.class); - context.createMarshaller().marshal(new ListObjectResponse(), System.out); - } - -} \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java deleted file mode 100644 index 212721af00f..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.junit.Test; -import org.mockito.Mockito; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; - -import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.mockito.Mockito.when; - -/** - * This class tests Initiate Multipart Upload request. - */ -public class TestInitiateMultipartUpload { - - @Test - public void testInitiateMultipartUpload() throws Exception { - - String bucket = "s3bucket"; - String key = "key1"; - OzoneClientStub client = new OzoneClientStub(); - client.getObjectStore().createS3Bucket("ozone", bucket); - String volumeName = client.getObjectStore().getOzoneVolumeName(bucket); - OzoneVolume volume = client.getObjectStore().getVolume(volumeName); - OzoneBucket ozoneBucket = volume.getBucket("s3bucket"); - - - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( - "STANDARD"); - - ObjectEndpoint rest = new ObjectEndpoint(); - rest.setHeaders(headers); - rest.setClient(client); - - Response response = rest.initializeMultipartUpload(bucket, key); - - assertEquals(response.getStatus(), 200); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - - // Calling again should return different uploadID. - response = rest.initializeMultipartUpload(bucket, key); - assertEquals(response.getStatus(), 200); - multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - assertNotEquals(multipartUploadInitiateResponse.getUploadID(), uploadID); - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java deleted file mode 100644 index 21545ec9b07..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.mockito.Mockito; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; - -import java.io.ByteArrayInputStream; - -import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.mockito.Mockito.when; - -/** - * This class test list parts request. - */ -public class TestListParts { - - - private final static ObjectEndpoint REST = new ObjectEndpoint(); - private final static String BUCKET = "s3bucket"; - private final static String KEY = "key1"; - private static String uploadID; - - @BeforeClass - public static void setUp() throws Exception { - - OzoneClientStub client = new OzoneClientStub(); - client.getObjectStore().createS3Bucket("ozone", BUCKET); - - - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( - "STANDARD"); - - REST.setHeaders(headers); - REST.setClient(client); - - Response response = REST.initializeMultipartUpload(BUCKET, KEY); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - uploadID = multipartUploadInitiateResponse.getUploadID(); - - assertEquals(response.getStatus(), 200); - - String content = "Multipart Upload"; - ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes()); - response = REST.put(BUCKET, KEY, content.length(), 1, uploadID, body); - - assertNotNull(response.getHeaderString("ETag")); - - response = REST.put(BUCKET, KEY, content.length(), 2, uploadID, body); - - assertNotNull(response.getHeaderString("ETag")); - - response = REST.put(BUCKET, KEY, content.length(), 3, uploadID, body); - - assertNotNull(response.getHeaderString("ETag")); - } - - @Test - public void testListParts() throws Exception { - Response response = REST.get(BUCKET, KEY, uploadID, 3, "0", null); - - ListPartsResponse listPartsResponse = - (ListPartsResponse) response.getEntity(); - - Assert.assertFalse(listPartsResponse.getTruncated()); - Assert.assertTrue(listPartsResponse.getPartList().size() == 3); - - } - - @Test - public void testListPartsContinuation() throws Exception { - Response response = REST.get(BUCKET, KEY, uploadID, 2, "0", null); - ListPartsResponse listPartsResponse = - (ListPartsResponse) response.getEntity(); - - Assert.assertTrue(listPartsResponse.getTruncated()); - Assert.assertTrue(listPartsResponse.getPartList().size() == 2); - - // Continue - response = REST.get(BUCKET, KEY, uploadID, 2, - Integer.toString(listPartsResponse.getNextPartNumberMarker()), null); - listPartsResponse = (ListPartsResponse) response.getEntity(); - - Assert.assertFalse(listPartsResponse.getTruncated()); - Assert.assertTrue(listPartsResponse.getPartList().size() == 1); - - } - - @Test - public void testListPartsWithUnknownUploadID() throws Exception { - try { - Response response = REST.get(BUCKET, KEY, uploadID, 2, "0", null); - } catch (OS3Exception ex) { - Assert.assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), - ex.getErrorMessage()); - } - } - - -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java deleted file mode 100644 index c15a1280c37..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import java.io.ByteArrayInputStream; -import java.io.IOException; - -import org.junit.Assert; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.junit.Assert.*; -import org.junit.Test; - -/** - * Test custom marshalling of MultiDeleteRequest. - */ -public class TestMultiDeleteRequestUnmarshaller { - - @Test - public void fromStreamWithNamespace() throws IOException { - //GIVEN - ByteArrayInputStream inputBody = - new ByteArrayInputStream( - ("key1key2" - + "key3" - + "") - .getBytes(UTF_8)); - - //WHEN - MultiDeleteRequest multiDeleteRequest = - unmarshall(inputBody); - - //THEN - Assert.assertEquals(3, multiDeleteRequest.getObjects().size()); - } - - @Test - public void fromStreamWithoutNamespace() throws IOException { - //GIVEN - ByteArrayInputStream inputBody = - new ByteArrayInputStream( - ("key1key2" - + "key3" - + "") - .getBytes(UTF_8)); - - //WHEN - MultiDeleteRequest multiDeleteRequest = - unmarshall(inputBody); - - //THEN - Assert.assertEquals(3, multiDeleteRequest.getObjects().size()); - } - - private MultiDeleteRequest unmarshall(ByteArrayInputStream inputBody) - throws IOException { - return new MultiDeleteRequestUnmarshaller() - .readFrom(null, null, null, null, null, inputBody); - } -} \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java deleted file mode 100644 index b9e3885ac6f..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.junit.BeforeClass; -import org.junit.Test; -import org.mockito.Mockito; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; - -import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; -import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; -import static org.mockito.Mockito.when; - -/** - * Class to test Multipart upload end to end. - */ - -public class TestMultipartUploadComplete { - - private final static ObjectEndpoint REST = new ObjectEndpoint();; - private final static String BUCKET = "s3bucket"; - private final static String KEY = "key1"; - private final static OzoneClientStub CLIENT = new OzoneClientStub(); - - @BeforeClass - public static void setUp() throws Exception { - - CLIENT.getObjectStore().createS3Bucket("ozone", BUCKET); - - - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( - "STANDARD"); - - REST.setHeaders(headers); - REST.setClient(CLIENT); - } - - private String initiateMultipartUpload(String key) throws IOException, - OS3Exception { - Response response = REST.initializeMultipartUpload(BUCKET, key); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - - assertEquals(response.getStatus(), 200); - - return uploadID; - - } - - private Part uploadPart(String key, String uploadID, int partNumber, String - content) throws IOException, OS3Exception { - ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes()); - Response response = REST.put(BUCKET, key, content.length(), partNumber, - uploadID, body); - assertEquals(response.getStatus(), 200); - assertNotNull(response.getHeaderString("ETag")); - Part part = new Part(); - part.seteTag(response.getHeaderString("ETag")); - part.setPartNumber(partNumber); - - return part; - } - - private void completeMultipartUpload(String key, - CompleteMultipartUploadRequest completeMultipartUploadRequest, - String uploadID) throws IOException, OS3Exception { - Response response = REST.completeMultipartUpload(BUCKET, key, uploadID, - completeMultipartUploadRequest); - - assertEquals(response.getStatus(), 200); - - CompleteMultipartUploadResponse completeMultipartUploadResponse = - (CompleteMultipartUploadResponse) response.getEntity(); - - assertEquals(completeMultipartUploadResponse.getBucket(), BUCKET); - assertEquals(completeMultipartUploadResponse.getKey(), KEY); - assertEquals(completeMultipartUploadResponse.getLocation(), BUCKET); - assertNotNull(completeMultipartUploadResponse.getETag()); - } - - @Test - public void testMultipart() throws Exception { - - // Initiate multipart upload - String uploadID = initiateMultipartUpload(KEY); - - List partsList = new ArrayList<>(); - - - // Upload parts - String content = "Multipart Upload 1"; - int partNumber = 1; - - Part part1 = uploadPart(KEY, uploadID, partNumber, content); - partsList.add(part1); - - content = "Multipart Upload 2"; - partNumber = 2; - Part part2 = uploadPart(KEY, uploadID, partNumber, content); - partsList.add(part2); - - // complete multipart upload - CompleteMultipartUploadRequest completeMultipartUploadRequest = new - CompleteMultipartUploadRequest(); - completeMultipartUploadRequest.setPartList(partsList); - - - completeMultipartUpload(KEY, completeMultipartUploadRequest, - uploadID); - - } - - - @Test - public void testMultipartInvalidPartOrderError() throws Exception { - - // Initiate multipart upload - String key = UUID.randomUUID().toString(); - String uploadID = initiateMultipartUpload(key); - - List partsList = new ArrayList<>(); - - // Upload parts - String content = "Multipart Upload 1"; - int partNumber = 1; - - Part part1 = uploadPart(key, uploadID, partNumber, content); - // Change part number - part1.setPartNumber(3); - partsList.add(part1); - - content = "Multipart Upload 2"; - partNumber = 2; - - Part part2 = uploadPart(key, uploadID, partNumber, content); - partsList.add(part2); - - // complete multipart upload - CompleteMultipartUploadRequest completeMultipartUploadRequest = new - CompleteMultipartUploadRequest(); - completeMultipartUploadRequest.setPartList(partsList); - try { - completeMultipartUpload(key, completeMultipartUploadRequest, uploadID); - fail("testMultipartInvalidPartOrderError"); - } catch (OS3Exception ex) { - assertEquals(ex.getCode(), S3ErrorTable.INVALID_PART_ORDER.getCode()); - } - - } - - @Test - public void testMultipartInvalidPartError() throws Exception { - - // Initiate multipart upload - String key = UUID.randomUUID().toString(); - String uploadID = initiateMultipartUpload(key); - - List partsList = new ArrayList<>(); - - // Upload parts - String content = "Multipart Upload 1"; - int partNumber = 1; - - Part part1 = uploadPart(key, uploadID, partNumber, content); - // Change part number - part1.seteTag("random"); - partsList.add(part1); - - content = "Multipart Upload 2"; - partNumber = 2; - - Part part2 = uploadPart(key, uploadID, partNumber, content); - partsList.add(part2); - - // complete multipart upload - CompleteMultipartUploadRequest completeMultipartUploadRequest = new - CompleteMultipartUploadRequest(); - completeMultipartUploadRequest.setPartList(partsList); - try { - completeMultipartUpload(key, completeMultipartUploadRequest, uploadID); - fail("testMultipartInvalidPartOrderError"); - } catch (OS3Exception ex) { - assertEquals(ex.getCode(), S3ErrorTable.INVALID_PART.getCode()); - } - - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java deleted file mode 100644 index 425bfc45e76..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Scanner; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; - -import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; -import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER_RANGE; -import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; -import org.junit.Assert; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import org.junit.BeforeClass; -import org.junit.Test; -import org.mockito.Mockito; -import static org.mockito.Mockito.when; - -/** - * Class to test Multipart upload where parts are created with copy header. - */ - -public class TestMultipartUploadWithCopy { - - private final static ObjectEndpoint REST = new ObjectEndpoint(); - - private final static String BUCKET = "s3bucket"; - private final static String KEY = "key2"; - private final static String EXISTING_KEY = "key1"; - private static final String EXISTING_KEY_CONTENT = "testkey"; - private final static OzoneClientStub CLIENT = new OzoneClientStub(); - private static final int RANGE_FROM = 2; - private static final int RANGE_TO = 4; - - @BeforeClass - public static void setUp() throws Exception { - - ObjectStore objectStore = CLIENT.getObjectStore(); - objectStore.createS3Bucket("ozone", BUCKET); - - OzoneBucket bucket = getOzoneBucket(objectStore, BUCKET); - - byte[] keyContent = EXISTING_KEY_CONTENT.getBytes(); - try (OutputStream stream = bucket - .createKey(EXISTING_KEY, keyContent.length, ReplicationType.RATIS, - ReplicationFactor.THREE, new HashMap<>())) { - stream.write(keyContent); - } - - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( - "STANDARD"); - - REST.setHeaders(headers); - REST.setClient(CLIENT); - } - - @Test - public void testMultipart() throws Exception { - - // Initiate multipart upload - String uploadID = initiateMultipartUpload(KEY); - - List partsList = new ArrayList<>(); - - // Upload parts - String content = "Multipart Upload 1"; - int partNumber = 1; - - Part part1 = uploadPart(KEY, uploadID, partNumber, content); - partsList.add(part1); - - partNumber = 2; - Part part2 = - uploadPartWithCopy(KEY, uploadID, partNumber, - BUCKET + "/" + EXISTING_KEY, null); - partsList.add(part2); - - partNumber = 3; - Part part3 = - uploadPartWithCopy(KEY, uploadID, partNumber, - BUCKET + "/" + EXISTING_KEY, - "bytes=" + RANGE_FROM + "-" + RANGE_TO); - partsList.add(part3); - - // complete multipart upload - CompleteMultipartUploadRequest completeMultipartUploadRequest = new - CompleteMultipartUploadRequest(); - completeMultipartUploadRequest.setPartList(partsList); - - completeMultipartUpload(KEY, completeMultipartUploadRequest, - uploadID); - - OzoneBucket bucket = getOzoneBucket(CLIENT.getObjectStore(), BUCKET); - try (InputStream is = bucket.readKey(KEY)) { - String keyContent = new Scanner(is).useDelimiter("\\A").next(); - Assert.assertEquals(content + EXISTING_KEY_CONTENT + EXISTING_KEY_CONTENT - .substring(RANGE_FROM, RANGE_TO), keyContent); - } - } - - private String initiateMultipartUpload(String key) throws IOException, - OS3Exception { - setHeaders(); - Response response = REST.initializeMultipartUpload(BUCKET, key); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - - assertEquals(response.getStatus(), 200); - - return uploadID; - - } - - private Part uploadPart(String key, String uploadID, int partNumber, String - content) throws IOException, OS3Exception { - setHeaders(); - ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes()); - Response response = REST.put(BUCKET, key, content.length(), partNumber, - uploadID, body); - assertEquals(response.getStatus(), 200); - assertNotNull(response.getHeaderString("ETag")); - Part part = new Part(); - part.seteTag(response.getHeaderString("ETag")); - part.setPartNumber(partNumber); - - return part; - } - - private Part uploadPartWithCopy(String key, String uploadID, int partNumber, - String keyOrigin, String range) throws IOException, OS3Exception { - Map additionalHeaders = new HashMap<>(); - additionalHeaders.put(COPY_SOURCE_HEADER, keyOrigin); - if (range != null) { - additionalHeaders.put(COPY_SOURCE_HEADER_RANGE, range); - - } - setHeaders(additionalHeaders); - - ByteArrayInputStream body = new ByteArrayInputStream("".getBytes()); - Response response = REST.put(BUCKET, key, 0, partNumber, - uploadID, body); - assertEquals(response.getStatus(), 200); - - CopyPartResult result = (CopyPartResult) response.getEntity(); - assertNotNull(result.getETag()); - assertNotNull(result.getLastModified()); - Part part = new Part(); - part.seteTag(result.getETag()); - part.setPartNumber(partNumber); - - return part; - } - - private void completeMultipartUpload(String key, - CompleteMultipartUploadRequest completeMultipartUploadRequest, - String uploadID) throws IOException, OS3Exception { - setHeaders(); - Response response = REST.completeMultipartUpload(BUCKET, key, uploadID, - completeMultipartUploadRequest); - - assertEquals(response.getStatus(), 200); - - CompleteMultipartUploadResponse completeMultipartUploadResponse = - (CompleteMultipartUploadResponse) response.getEntity(); - - assertEquals(completeMultipartUploadResponse.getBucket(), BUCKET); - assertEquals(completeMultipartUploadResponse.getKey(), KEY); - assertEquals(completeMultipartUploadResponse.getLocation(), BUCKET); - assertNotNull(completeMultipartUploadResponse.getETag()); - } - - private void setHeaders(Map additionalHeaders) { - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( - "STANDARD"); - - additionalHeaders - .forEach((k, v) -> when(headers.getHeaderString(k)).thenReturn(v)); - REST.setHeaders(headers); - } - - private void setHeaders() { - setHeaders(new HashMap<>()); - } - - private static OzoneBucket getOzoneBucket(ObjectStore objectStore, - String bucketName) - throws IOException { - - String ozoneBucketName = objectStore.getOzoneBucketName(bucketName); - String ozoneVolumeName = objectStore.getOzoneVolumeName(bucketName); - - return objectStore.getVolume(ozoneVolumeName).getBucket(ozoneBucketName); - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java deleted file mode 100644 index b5d0c931c64..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import java.io.IOException; - -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test delete object. - */ -public class TestObjectDelete { - - @Test - public void delete() throws IOException, OS3Exception { - //GIVEN - OzoneClient client = new OzoneClientStub(); - client.getObjectStore().createS3Bucket("bilbo", "b1"); - - String volumeName = client.getObjectStore().getOzoneVolumeName("b1"); - - OzoneBucket bucket = - client.getObjectStore().getVolume(volumeName).getBucket("b1"); - - bucket.createKey("key1", 0).close(); - - ObjectEndpoint rest = new ObjectEndpoint(); - rest.setClient(client); - - //WHEN - rest.delete("b1", "key1", null); - - //THEN - Assert.assertFalse("Bucket Should not contain any key after delete", - bucket.listKeys("").hasNext()); - } -} \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectEndpoint.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectEndpoint.java deleted file mode 100644 index 070c827ac0b..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectEndpoint.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import org.apache.hadoop.ozone.s3.exception.OS3Exception; - -import org.apache.commons.lang3.tuple.Pair; -import org.junit.Assert; -import org.junit.Test; - -/** - * Test static utility methods of the ObjectEndpoint. - */ -public class TestObjectEndpoint { - - @Test - public void parseSourceHeader() throws OS3Exception { - Pair bucketKey = - ObjectEndpoint.parseSourceHeader("bucket1/key1"); - - Assert.assertEquals("bucket1", bucketKey.getLeft()); - - Assert.assertEquals("key1", bucketKey.getRight()); - } - - @Test - public void parseSourceHeaderWithPrefix() throws OS3Exception { - Pair bucketKey = - ObjectEndpoint.parseSourceHeader("/bucket1/key1"); - - Assert.assertEquals("bucket1", bucketKey.getLeft()); - - Assert.assertEquals("key1", bucketKey.getRight()); - } - -} \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java deleted file mode 100644 index fcafe31feb5..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.nio.charset.Charset; -import java.time.format.DateTimeFormatter; - -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; - -import org.apache.commons.io.IOUtils; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import static java.nio.charset.StandardCharsets.UTF_8; - -/** - * Test get object. - */ -public class TestObjectGet { - - public static final String CONTENT = "0123456789"; - - @Test - public void get() throws IOException, OS3Exception { - //GIVEN - OzoneClientStub client = new OzoneClientStub(); - client.getObjectStore().createS3Bucket("bilbo", "b1"); - String volumeName = client.getObjectStore().getOzoneVolumeName("b1"); - OzoneVolume volume = client.getObjectStore().getVolume(volumeName); - OzoneBucket bucket = - volume.getBucket("b1"); - OzoneOutputStream keyStream = - bucket.createKey("key1", CONTENT.getBytes(UTF_8).length); - keyStream.write(CONTENT.getBytes(UTF_8)); - keyStream.close(); - - ObjectEndpoint rest = new ObjectEndpoint(); - rest.setClient(client); - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - rest.setHeaders(headers); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - - //WHEN - Response response = rest.get("b1", "key1", null, 0, null, body); - - //THEN - OzoneInputStream ozoneInputStream = - volume.getBucket("b1") - .readKey("key1"); - String keyContent = - IOUtils.toString(ozoneInputStream, Charset.forName("UTF-8")); - - Assert.assertEquals(CONTENT, keyContent); - Assert.assertEquals("" + keyContent.length(), - response.getHeaderString("Content-Length")); - - DateTimeFormatter.RFC_1123_DATE_TIME - .parse(response.getHeaderString("Last-Modified")); - - } -} \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java deleted file mode 100644 index ba39b285a8f..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.ws.rs.core.Response; -import java.io.IOException; -import java.time.format.DateTimeFormatter; -import java.util.HashMap; - -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; - -import static java.net.HttpURLConnection.HTTP_NOT_FOUND; -import static java.nio.charset.StandardCharsets.UTF_8; -import org.apache.commons.lang3.RandomStringUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -/** - * Test head object. - */ -public class TestObjectHead { - private String bucketName = "b1"; - private OzoneClientStub clientStub; - private ObjectStore objectStoreStub; - private ObjectEndpoint keyEndpoint; - private OzoneBucket bucket; - - @Before - public void setup() throws IOException { - //Create client stub and object store stub. - clientStub = new OzoneClientStub(); - objectStoreStub = clientStub.getObjectStore(); - - // Create volume and bucket - objectStoreStub.createS3Bucket("bilbo", bucketName); - String volName = objectStoreStub.getOzoneVolumeName(bucketName); - - bucket = objectStoreStub.getVolume(volName).getBucket(bucketName); - - // Create HeadBucket and setClient to OzoneClientStub - keyEndpoint = new ObjectEndpoint(); - keyEndpoint.setClient(clientStub); - } - - @Test - public void testHeadObject() throws Exception { - //GIVEN - String value = RandomStringUtils.randomAlphanumeric(32); - OzoneOutputStream out = bucket.createKey("key1", - value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE, - ReplicationFactor.ONE, new HashMap<>()); - out.write(value.getBytes(UTF_8)); - out.close(); - - //WHEN - Response response = keyEndpoint.head(bucketName, "key1"); - - //THEN - Assert.assertEquals(200, response.getStatus()); - Assert.assertEquals(value.getBytes(UTF_8).length, - Long.parseLong(response.getHeaderString("Content-Length"))); - - DateTimeFormatter.RFC_1123_DATE_TIME - .parse(response.getHeaderString("Last-Modified")); - - } - - @Test - public void testHeadFailByBadName() throws Exception { - //Head an object that doesn't exist. - try { - Response response = keyEndpoint.head(bucketName, "badKeyName"); - Assert.assertEquals(404, response.getStatus()); - } catch (OS3Exception ex) { - Assert.assertTrue(ex.getCode().contains("NoSuchObject")); - Assert.assertTrue(ex.getErrorMessage().contains("object does not exist")); - Assert.assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); - } - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java deleted file mode 100644 index f4c3b944459..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.xml.bind.JAXBException; -import java.io.IOException; -import java.util.HashSet; -import java.util.Set; -import java.util.stream.Collectors; - -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.s3.endpoint.MultiDeleteRequest.DeleteObject; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; - -import com.google.common.collect.Sets; -import org.junit.Assert; -import org.junit.Test; - -/** - * Test object multi delete. - */ -public class TestObjectMultiDelete { - - @Test - public void delete() throws IOException, OS3Exception, JAXBException { - //GIVEN - OzoneClient client = new OzoneClientStub(); - OzoneBucket bucket = initTestData(client); - - BucketEndpoint rest = new BucketEndpoint(); - rest.setClient(client); - - MultiDeleteRequest mdr = new MultiDeleteRequest(); - mdr.getObjects().add(new DeleteObject("key1")); - mdr.getObjects().add(new DeleteObject("key2")); - mdr.getObjects().add(new DeleteObject("key4")); - - //WHEN - MultiDeleteResponse response = rest.multiDelete("b1", "", mdr); - - //THEN - Set keysAtTheEnd = Sets.newHashSet(bucket.listKeys("")).stream() - .map(OzoneKey::getName) - .collect(Collectors.toSet()); - - Set expectedResult = new HashSet<>(); - expectedResult.add("key3"); - - //THEN - Assert.assertEquals(expectedResult, keysAtTheEnd); - Assert.assertEquals(3, response.getDeletedObjects().size()); - Assert.assertEquals(0, response.getErrors().size()); - } - - @Test - public void deleteQuiet() throws IOException, OS3Exception, JAXBException { - //GIVEN - OzoneClient client = new OzoneClientStub(); - OzoneBucket bucket = initTestData(client); - - BucketEndpoint rest = new BucketEndpoint(); - rest.setClient(client); - - MultiDeleteRequest mdr = new MultiDeleteRequest(); - mdr.setQuiet(true); - mdr.getObjects().add(new DeleteObject("key1")); - mdr.getObjects().add(new DeleteObject("key2")); - mdr.getObjects().add(new DeleteObject("key4")); - - //WHEN - MultiDeleteResponse response = rest.multiDelete("b1", "", mdr); - - //THEN - Set keysAtTheEnd = Sets.newHashSet(bucket.listKeys("")).stream() - .map(OzoneKey::getName) - .collect(Collectors.toSet()); - - //THEN - Assert.assertEquals(0, response.getDeletedObjects().size()); - Assert.assertEquals(0, response.getErrors().size()); - } - - private OzoneBucket initTestData(OzoneClient client) throws IOException { - client.getObjectStore().createS3Bucket("bilbo", "b1"); - - String volumeName = client.getObjectStore().getOzoneVolumeName("b1"); - - OzoneBucket bucket = - client.getObjectStore().getVolume(volumeName).getBucket("b1"); - - bucket.createKey("key1", 0).close(); - bucket.createKey("key2", 0).close(); - bucket.createKey("key3", 0).close(); - return bucket; - } -} \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java deleted file mode 100644 index 839834cbdf1..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.nio.charset.Charset; - -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.client.OzoneKeyDetails; -import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; - -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; -import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; -import static org.mockito.Mockito.when; - -/** - * Test put object. - */ -public class TestObjectPut { - public static final String CONTENT = "0123456789"; - private String userName = "ozone"; - private String bucketName = "b1"; - private String keyName = "key1"; - private String destBucket = "b2"; - private String destkey = "key2"; - private String nonexist = "nonexist"; - private OzoneClientStub clientStub; - private ObjectStore objectStoreStub; - private ObjectEndpoint objectEndpoint; - - @Before - public void setup() throws IOException { - //Create client stub and object store stub. - clientStub = new OzoneClientStub(); - objectStoreStub = clientStub.getObjectStore(); - - // Create bucket - objectStoreStub.createS3Bucket(userName, bucketName); - objectStoreStub.createS3Bucket("ozone1", destBucket); - - // Create PutObject and setClient to OzoneClientStub - objectEndpoint = new ObjectEndpoint(); - objectEndpoint.setClient(clientStub); - } - - @Test - public void testPutObject() throws IOException, OS3Exception { - //GIVEN - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes()); - objectEndpoint.setHeaders(headers); - - //WHEN - Response response = objectEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); - - - //THEN - String volumeName = clientStub.getObjectStore() - .getOzoneVolumeName(bucketName); - OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getVolume(volumeName).getBucket(bucketName) - .readKey(keyName); - String keyContent = - IOUtils.toString(ozoneInputStream, Charset.forName("UTF-8")); - - Assert.assertEquals(200, response.getStatus()); - Assert.assertEquals(CONTENT, keyContent); - } - - @Test - public void testPutObjectWithSignedChunks() throws IOException, OS3Exception { - //GIVEN - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - objectEndpoint.setHeaders(headers); - - String chunkedContent = "0a;chunk-signature=signature\r\n" - + "1234567890\r\n" - + "05;chunk-signature=signature\r\n" - + "abcde\r\n"; - - when(headers.getHeaderString("x-amz-content-sha256")) - .thenReturn("STREAMING-AWS4-HMAC-SHA256-PAYLOAD"); - - //WHEN - Response response = objectEndpoint.put(bucketName, keyName, - chunkedContent.length(), 1, null, - new ByteArrayInputStream(chunkedContent.getBytes())); - - //THEN - String volumeName = clientStub.getObjectStore() - .getOzoneVolumeName(bucketName); - OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getVolume(volumeName).getBucket(bucketName) - .readKey(keyName); - String keyContent = - IOUtils.toString(ozoneInputStream, Charset.forName("UTF-8")); - - Assert.assertEquals(200, response.getStatus()); - Assert.assertEquals("1234567890abcde", keyContent); - } - - @Test - public void testCopyObject() throws IOException, OS3Exception { - // Put object in to source bucket - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes()); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; - - Response response = objectEndpoint.put(bucketName, keyName, - CONTENT.length(), 1, null, body); - - String volumeName = clientStub.getObjectStore().getOzoneVolumeName( - bucketName); - - OzoneInputStream ozoneInputStream = clientStub.getObjectStore().getVolume( - volumeName).getBucket(bucketName).readKey(keyName); - - String keyContent = IOUtils.toString(ozoneInputStream, Charset.forName( - "UTF-8")); - - Assert.assertEquals(200, response.getStatus()); - Assert.assertEquals(CONTENT, keyContent); - - - // Add copy header, and then call put - when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + keyName); - - response = objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1, - null, body); - - // Check destination key and response - volumeName = clientStub.getObjectStore().getOzoneVolumeName(destBucket); - ozoneInputStream = clientStub.getObjectStore().getVolume(volumeName) - .getBucket(destBucket).readKey(destkey); - - keyContent = IOUtils.toString(ozoneInputStream, Charset.forName("UTF-8")); - - Assert.assertEquals(200, response.getStatus()); - Assert.assertEquals(CONTENT, keyContent); - - // source and dest same - try { - objectEndpoint.put(bucketName, keyName, CONTENT.length(), 1, null, body); - fail("test copy object failed"); - } catch (OS3Exception ex) { - Assert.assertTrue(ex.getErrorMessage().contains("This copy request is " + - "illegal")); - } - - // source bucket not found - try { - when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - nonexist + "/" + keyName); - objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1, null, - body); - fail("test copy object failed"); - } catch (OS3Exception ex) { - Assert.assertTrue(ex.getCode().contains("NoSuchBucket")); - } - - // dest bucket not found - try { - when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + keyName); - objectEndpoint.put(nonexist, destkey, CONTENT.length(), 1, null, body); - fail("test copy object failed"); - } catch (OS3Exception ex) { - Assert.assertTrue(ex.getCode().contains("NoSuchBucket")); - } - - //Both source and dest bucket not found - try { - when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - nonexist + "/" + keyName); - objectEndpoint.put(nonexist, destkey, CONTENT.length(), 1, null, body); - fail("test copy object failed"); - } catch (OS3Exception ex) { - Assert.assertTrue(ex.getCode().contains("NoSuchBucket")); - } - - // source key not found - try { - when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + nonexist); - objectEndpoint.put("nonexistent", keyName, CONTENT.length(), 1, - null, body); - fail("test copy object failed"); - } catch (OS3Exception ex) { - Assert.assertTrue(ex.getCode().contains("NoSuchBucket")); - } - - } - - @Test - public void testInvalidStorageType() throws IOException { - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes()); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; - when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random"); - - try { - Response response = objectEndpoint.put(bucketName, keyName, - CONTENT.length(), 1, null, body); - fail("testInvalidStorageType"); - } catch (OS3Exception ex) { - assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(), - ex.getErrorMessage()); - assertEquals("random", ex.getResource()); - } - } - - @Test - public void testEmptyStorageType() throws IOException, OS3Exception { - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes()); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; - when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); - - Response response = objectEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); - - String volumeName = clientStub.getObjectStore() - .getOzoneVolumeName(bucketName); - - OzoneKeyDetails key = - clientStub.getObjectStore().getVolume(volumeName).getBucket(bucketName) - .getKey(keyName); - - //default type is set - Assert.assertEquals(ReplicationType.RATIS, key.getReplicationType()); - } -} \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java deleted file mode 100644 index 3e91a77ffd4..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.junit.BeforeClass; -import org.junit.Test; -import org.mockito.Mockito; - -import javax.ws.rs.core.HttpHeaders; -import javax.ws.rs.core.Response; - -import java.io.ByteArrayInputStream; - -import static java.net.HttpURLConnection.HTTP_NOT_FOUND; -import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; -import static org.mockito.Mockito.when; - -/** - * This class tests Upload part request. - */ -public class TestPartUpload { - - private final static ObjectEndpoint REST = new ObjectEndpoint(); - private final static String BUCKET = "s3bucket"; - private final static String KEY = "key1"; - - @BeforeClass - public static void setUp() throws Exception { - - OzoneClientStub client = new OzoneClientStub(); - client.getObjectStore().createS3Bucket("ozone", BUCKET); - - - HttpHeaders headers = Mockito.mock(HttpHeaders.class); - when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( - "STANDARD"); - - REST.setHeaders(headers); - REST.setClient(client); - } - - - @Test - public void testPartUpload() throws Exception { - - Response response = REST.initializeMultipartUpload(BUCKET, KEY); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - - assertEquals(response.getStatus(), 200); - - String content = "Multipart Upload"; - ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes()); - response = REST.put(BUCKET, KEY, content.length(), 1, uploadID, body); - - assertNotNull(response.getHeaderString("ETag")); - - } - - @Test - public void testPartUploadWithOverride() throws Exception { - - Response response = REST.initializeMultipartUpload(BUCKET, KEY); - MultipartUploadInitiateResponse multipartUploadInitiateResponse = - (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - String uploadID = multipartUploadInitiateResponse.getUploadID(); - - assertEquals(response.getStatus(), 200); - - String content = "Multipart Upload"; - ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes()); - response = REST.put(BUCKET, KEY, content.length(), 1, uploadID, body); - - assertNotNull(response.getHeaderString("ETag")); - - String eTag = response.getHeaderString("ETag"); - - // Upload part again with same part Number, the ETag should be changed. - content = "Multipart Upload Changed"; - response = REST.put(BUCKET, KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); - assertNotEquals(eTag, response.getHeaderString("ETag")); - - } - - - @Test - public void testPartUploadWithIncorrectUploadID() throws Exception { - try { - String content = "Multipart Upload With Incorrect uploadID"; - ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes()); - REST.put(BUCKET, KEY, content.length(), 1, "random", body); - fail("testPartUploadWithIncorrectUploadID failed"); - } catch (OS3Exception ex) { - assertEquals("NoSuchUpload", ex.getCode()); - assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); - } - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java deleted file mode 100644 index b7512cb370d..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ - -package org.apache.hadoop.ozone.s3.endpoint; - -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClientStub; -import org.apache.hadoop.ozone.s3.header.AuthenticationHeaderParser; - -import static org.junit.Assert.assertEquals; -import org.apache.hadoop.ozone.s3.util.OzoneS3Util; -import org.junit.Before; -import org.junit.Test; - -/** - * This class test HeadBucket functionality. - */ -public class TestRootList { - - private OzoneClientStub clientStub; - private ObjectStore objectStoreStub; - private RootEndpoint rootEndpoint; - private String userName = "ozone"; - - @Before - public void setup() throws Exception { - - //Create client stub and object store stub. - clientStub = new OzoneClientStub(); - objectStoreStub = clientStub.getObjectStore(); - - // Create HeadBucket and setClient to OzoneClientStub - rootEndpoint = new RootEndpoint(); - rootEndpoint.setClient(clientStub); - - AuthenticationHeaderParser parser = new AuthenticationHeaderParser(); - parser.setAuthHeader("AWS " + userName +":secret"); - rootEndpoint.setAuthenticationHeaderParser(parser); - } - - @Test - public void testListBucket() throws Exception { - - // List operation should succeed even there is no bucket. - ListBucketResponse response = - (ListBucketResponse) rootEndpoint.get().getEntity(); - assertEquals(0, response.getBucketsNum()); - String volumeName = OzoneS3Util.getVolumeName(userName); - - String bucketBaseName = "bucket-" + getClass().getName(); - for(int i = 0; i < 10; i++) { - objectStoreStub.createS3Bucket(volumeName, bucketBaseName + i); - } - response = (ListBucketResponse) rootEndpoint.get().getEntity(); - assertEquals(10, response.getBucketsNum()); - } - -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java deleted file mode 100644 index d320041e5d4..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Unit tests for the rest endpoint implementations. - */ -package org.apache.hadoop.ozone.s3.endpoint; \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exception.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exception.java deleted file mode 100644 index fa6e2c7dfaf..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exception.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.exception; - -import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.junit.Assert; -import org.junit.Test; - -/** - * This class tests OS3Exception class. - */ -public class TestOS3Exception { - - @Test - public void testOS3Exception() { - OS3Exception ex = new OS3Exception("AccessDenied", "Access Denied", - 403); - String requestId = OzoneUtils.getRequestID(); - ex = S3ErrorTable.newError(ex, "bucket"); - ex.setRequestId(requestId); - String val = ex.toXml(); - String formatString = "\n" + - "\n" + - " %s\n" + - " %s\n" + - " %s\n" + - " %s\n" + - "\n"; - String expected = String.format(formatString, ex.getCode(), - ex.getErrorMessage(), ex.getResource(), - ex.getRequestId()); - Assert.assertEquals(expected, val); - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/package-info.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/package-info.java deleted file mode 100644 index 31effe4fba2..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This package tests OS3Exception. - */ -package org.apache.hadoop.ozone.s3.exception; \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV2.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV2.java deleted file mode 100644 index 97f7fb4cc09..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV2.java +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.header; - -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.junit.Test; - - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - -/** - * This class tests Authorization header format v2. - */ -public class TestAuthorizationHeaderV2 { - - @Test - public void testAuthHeaderV2() throws OS3Exception { - try { - String auth = "AWS accessKey:signature"; - AuthorizationHeaderV2 v2 = new AuthorizationHeaderV2(auth); - assertEquals(v2.getAccessKeyID(), "accessKey"); - assertEquals(v2.getSignature(), "signature"); - } catch (OS3Exception ex) { - fail("testAuthHeaderV2 failed"); - } - } - - @Test - public void testIncorrectHeader1() throws OS3Exception { - try { - String auth = "AAA accessKey:signature"; - new AuthorizationHeaderV2(auth); - fail("testIncorrectHeader"); - } catch (OS3Exception ex) { - assertEquals("AuthorizationHeaderMalformed", ex.getCode()); - } - } - - @Test - public void testIncorrectHeader2() throws OS3Exception { - try { - String auth = "AWS :accessKey"; - new AuthorizationHeaderV2(auth); - fail("testIncorrectHeader"); - } catch (OS3Exception ex) { - assertEquals("AuthorizationHeaderMalformed", ex.getCode()); - } - } - - @Test - public void testIncorrectHeader3() throws OS3Exception { - try { - String auth = "AWS :signature"; - new AuthorizationHeaderV2(auth); - fail("testIncorrectHeader"); - } catch (OS3Exception ex) { - assertEquals("AuthorizationHeaderMalformed", ex.getCode()); - } - } - - @Test - public void testIncorrectHeader4() throws OS3Exception { - try { - String auth = "AWS accessKey:"; - new AuthorizationHeaderV2(auth); - fail("testIncorrectHeader"); - } catch (OS3Exception ex) { - assertEquals("AuthorizationHeaderMalformed", ex.getCode()); - } - } -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java deleted file mode 100644 index a8cffbe55b5..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java +++ /dev/null @@ -1,354 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.header; - -import org.apache.hadoop.ozone.s3.exception.OS3Exception; -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.Before; -import org.junit.Test; - -import java.time.LocalDate; - -import static java.time.temporal.ChronoUnit.DAYS; -import static org.apache.hadoop.ozone.s3.AWSV4AuthParser.DATE_FORMATTER; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - -/** - * This class tests Authorization header format v2. - */ - -public class TestAuthorizationHeaderV4 { - private String curDate; - - @Before - public void setup() { - LocalDate now = LocalDate.now(); - curDate = DATE_FORMATTER.format(now); - } - - @Test - public void testV4HeaderWellFormed() throws Exception { - String auth = "AWS4-HMAC-SHA256 " + - "Credential=ozone/" + curDate + "/us-east-1/s3/aws4_request, " + - "SignedHeaders=host;range;x-amz-date, " + - "Signature=fe5f80f77d5fa3beca038a248ff027"; - AuthorizationHeaderV4 v4 = new AuthorizationHeaderV4(auth); - assertEquals("AWS4-HMAC-SHA256", v4.getAlgorithm()); - assertEquals("ozone", v4.getAccessKeyID()); - assertEquals(curDate, v4.getDate()); - assertEquals("us-east-1", v4.getAwsRegion()); - assertEquals("aws4_request", v4.getAwsRequest()); - assertEquals("host;range;x-amz-date", v4.getSignedHeaderString()); - assertEquals("fe5f80f77d5fa3beca038a248ff027", v4.getSignature()); - } - - @Test - public void testV4HeaderMissingParts() { - try { - String auth = "AWS4-HMAC-SHA256 " + - "Credential=ozone/" + curDate + "/us-east-1/s3/aws4_request, " + - "SignedHeaders=host;range;x-amz-date,"; - AuthorizationHeaderV4 v4 = new AuthorizationHeaderV4(auth); - fail("Exception is expected in case of malformed header"); - } catch (OS3Exception ex) { - assertEquals("AuthorizationHeaderMalformed", ex.getCode()); - } - } - - @Test - public void testV4HeaderInvalidCredential() { - try { - String auth = "AWS4-HMAC-SHA256 " + - "Credential=" + curDate + "/us-east-1/s3/aws4_request, " + - "SignedHeaders=host;range;x-amz-date, " + - "Signature=fe5f80f77d5fa3beca038a248ff027"; - AuthorizationHeaderV4 v4 = new AuthorizationHeaderV4(auth); - fail("Exception is expected in case of malformed header"); - } catch (OS3Exception ex) { - assertEquals("AuthorizationHeaderMalformed", ex.getCode()); - } - } - - @Test - public void testV4HeaderWithoutSpace() throws OS3Exception { - - String auth = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" + - "/aws4_request," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - AuthorizationHeaderV4 v4 = new AuthorizationHeaderV4(auth); - - assertEquals("AWS4-HMAC-SHA256", v4.getAlgorithm()); - assertEquals("ozone", v4.getAccessKeyID()); - assertEquals(curDate, v4.getDate()); - assertEquals("us-east-1", v4.getAwsRegion()); - assertEquals("aws4_request", v4.getAwsRequest()); - assertEquals("host;x-amz-content-sha256;x-amz-date", - v4.getSignedHeaderString()); - assertEquals("fe5f80f77d5fa3beca038a248ff027", v4.getSignature()); - - } - - @Test - public void testV4HeaderDateValidationSuccess() throws OS3Exception { - // Case 1: valid date within range. - LocalDate now = LocalDate.now(); - String dateStr = DATE_FORMATTER.format(now); - validateResponse(dateStr); - - // Case 2: Valid date with in range. - dateStr = DATE_FORMATTER.format(now.plus(1, DAYS)); - validateResponse(dateStr); - - // Case 3: Valid date with in range. - dateStr = DATE_FORMATTER.format(now.minus(1, DAYS)); - validateResponse(dateStr); - } - - @Test - public void testV4HeaderDateValidationFailure() throws Exception { - // Case 1: Empty date. - LocalDate now = LocalDate.now(); - String dateStr = ""; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> validateResponse(dateStr)); - - // Case 2: Date after yesterday. - String dateStr2 = DATE_FORMATTER.format(now.plus(2, DAYS)); - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> validateResponse(dateStr2)); - - // Case 3: Date before yesterday. - String dateStr3 = DATE_FORMATTER.format(now.minus(2, DAYS)); - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> validateResponse(dateStr3)); - } - - private void validateResponse(String dateStr) throws OS3Exception { - String auth = - "AWS4-HMAC-SHA256 Credential=ozone/" + dateStr + "/us-east-1/s3" + - "/aws4_request," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - AuthorizationHeaderV4 v4 = new AuthorizationHeaderV4(auth); - - assertEquals("AWS4-HMAC-SHA256", v4.getAlgorithm()); - assertEquals("ozone", v4.getAccessKeyID()); - assertEquals(dateStr, v4.getDate()); - assertEquals("us-east-1", v4.getAwsRegion()); - assertEquals("aws4_request", v4.getAwsRequest()); - assertEquals("host;x-amz-content-sha256;x-amz-date", - v4.getSignedHeaderString()); - assertEquals("fe5f80f77d5fa3beca038a248ff027", v4.getSignature()); - } - - @Test - public void testV4HeaderRegionValidationFailure() throws Exception { - String auth = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "//s3/aws4_request," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027%"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth)); - String auth2 = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "s3/aws4_request," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027%"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth2)); - } - - @Test - public void testV4HeaderServiceValidationFailure() throws Exception { - String auth = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1" + - "//aws4_request," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth)); - - String auth2 = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1" + - "/aws4_request," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth2)); - } - - @Test - public void testV4HeaderRequestValidationFailure() throws Exception { - String auth = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" + - "/ ," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth)); - - String auth2 = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" + - "/," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth2)); - - String auth3 = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" + - "," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth3)); - } - - @Test - public void testV4HeaderSignedHeaderValidationFailure() throws Exception { - String auth = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" + - "/aws4_request," - + "SignedHeaders=;;," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth)); - - String auth2 = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" + - "/aws4_request," - + "SignedHeaders=," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth2)); - - String auth3 = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" + - "/aws4_request," - + "=x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth3)); - - String auth4 = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" + - "/aws4_request," - + "=," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth4)); - } - - @Test - public void testV4HeaderSignatureValidationFailure() throws Exception { - String auth = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" + - "/aws4_request," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027%"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth)); - - String auth2 = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" + - "/aws4_request," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "="; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth2)); - - String auth3 = - "AWS4-HMAC-SHA256 Credential=ozone/" + curDate + "/us-east-1/s3" + - "/aws4_request," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "" - + "="; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth3)); - } - - @Test - public void testV4HeaderHashAlgoValidationFailure() throws Exception { - String auth = - "AWS4-HMAC-SHA Credential=ozone/" + curDate + "/us-east-1/s3" + - "/aws4_request," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth)); - - String auth2 = - "SHA-256 Credential=ozone/" + curDate + "/us-east-1/s3" + - "/aws4_request," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth2)); - - String auth3 = - " Credential=ozone/" + curDate + "/us-east-1/s3" + - "/aws4_request," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth3)); - } - - @Test - public void testV4HeaderCredentialValidationFailure() throws Exception { - String auth = - "AWS4-HMAC-SHA Credential=/" + curDate + "//" + - "/," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth)); - - String auth2 = - "AWS4-HMAC-SHA =/" + curDate + "//" + - "/," - + "SignedHeaders=host;x-amz-content-sha256;x-amz-date," - + "Signature" - + "=fe5f80f77d5fa3beca038a248ff027"; - LambdaTestUtils.intercept(OS3Exception.class, "", - () -> new AuthorizationHeaderV4(auth2)); - } - -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/package-info.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/package-info.java deleted file mode 100644 index e7e04ab9917..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/** - * Unit tests for the bucket related rest endpoints. - */ -package org.apache.hadoop.ozone.s3; \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestContinueToken.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestContinueToken.java deleted file mode 100644 index a590367dfa0..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestContinueToken.java +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3.util; - -import org.apache.hadoop.ozone.s3.exception.OS3Exception; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test encode/decode of the continue token. - */ -public class TestContinueToken { - - @Test - public void encodeDecode() throws OS3Exception { - ContinueToken ct = new ContinueToken("key1", "dir1"); - - ContinueToken parsedToken = - ContinueToken.decodeFromString(ct.encodeToString()); - - Assert.assertEquals(ct, parsedToken); - } - - @Test - public void encodeDecodeNullDir() throws OS3Exception { - ContinueToken ct = new ContinueToken("key1", null); - - ContinueToken parsedToken = - ContinueToken.decodeFromString(ct.encodeToString()); - - Assert.assertEquals(ct, parsedToken); - } - -} \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestOzoneS3Util.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestOzoneS3Util.java deleted file mode 100644 index 8892a9784db..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestOzoneS3Util.java +++ /dev/null @@ -1,130 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.util; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.util.Collection; - -import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP; -import static org.junit.Assert.fail; - -/** - * Class used to test OzoneS3Util. - */ -public class TestOzoneS3Util { - - - private OzoneConfiguration configuration; - private String serviceID = "omService"; - - @Before - public void setConf() { - configuration = new OzoneConfiguration(); - - String nodeIDs = "om1,om2,om3"; - configuration.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, serviceID); - configuration.set(OMConfigKeys.OZONE_OM_NODES_KEY + "." + serviceID, - nodeIDs); - configuration.setBoolean(HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, false); - } - - @Test - public void testBuildServiceNameForToken() { - - Collection nodeIDList = OmUtils.getOMNodeIds(configuration, - serviceID); - - configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, - serviceID, "om1"), "om1:9862"); - configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, - serviceID, "om2"), "om2:9862"); - configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, - serviceID, "om3"), "om3:9862"); - - String expectedOmServiceAddress = buildServiceAddress(nodeIDList); - - SecurityUtil.setConfiguration(configuration); - String omserviceAddr = OzoneS3Util.buildServiceNameForToken(configuration, - serviceID, nodeIDList); - - Assert.assertEquals(expectedOmServiceAddress, omserviceAddr); - } - - - @Test - public void testBuildServiceNameForTokenIncorrectConfig() { - - Collection nodeIDList = OmUtils.getOMNodeIds(configuration, - serviceID); - - // Don't set om3 node rpc address. Here we are skipping setting of one of - // the OM address. So buildServiceNameForToken will fail. - configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, - serviceID, "om1"), "om1:9862"); - configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY, - serviceID, "om2"), "om2:9862"); - - - SecurityUtil.setConfiguration(configuration); - - try { - OzoneS3Util.buildServiceNameForToken(configuration, - serviceID, nodeIDList); - fail("testBuildServiceNameForTokenIncorrectConfig failed"); - } catch (IllegalArgumentException ex) { - GenericTestUtils.assertExceptionContains("Could not find rpcAddress " + - "for", ex); - } - - - } - - /** - * Build serviceName from list of node ids. - * @param nodeIDList - * @return service name for token. - */ - private String buildServiceAddress(Collection nodeIDList) { - StringBuilder omServiceAddrBuilder = new StringBuilder(); - int nodesLength = nodeIDList.size(); - int counter = 0; - for (String nodeID : nodeIDList) { - counter++; - String addr = configuration.get(OmUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_ADDRESS_KEY, serviceID, nodeID)); - - if (counter != nodesLength) { - omServiceAddrBuilder.append(addr + ","); - } else { - omServiceAddrBuilder.append(addr); - } - } - - return omServiceAddrBuilder.toString(); - } - -} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRFC1123Util.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRFC1123Util.java deleted file mode 100644 index 75760250e2b..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRFC1123Util.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.ozone.s3.util; - -import java.time.temporal.TemporalAccessor; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Test for RFC1123 util. - */ -public class TestRFC1123Util { - - @Test - public void parse() { - //one digit day - String dateStr = "Mon, 5 Nov 2018 15:04:05 GMT"; - - TemporalAccessor date = RFC1123Util.FORMAT.parse(dateStr); - - String formatted = RFC1123Util.FORMAT.format(date); - - //two digits day - Assert.assertEquals("Mon, 05 Nov 2018 15:04:05 GMT", formatted); - - } -} \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java deleted file mode 100644 index 03c91bfde9b..00000000000 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/util/TestRangeHeaderParserUtil.java +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.s3.util; - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; - -/** - * Test class to test RangeHeaderParserUtil. - */ -public class TestRangeHeaderParserUtil { - - @Test - public void testRangeHeaderParser() { - - RangeHeader rangeHeader; - - - //range is with in file length - rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=0-8", 10); - assertEquals(0, rangeHeader.getStartOffset()); - assertEquals(8, rangeHeader.getEndOffset()); - assertEquals(false, rangeHeader.isReadFull()); - assertEquals(false, rangeHeader.isInValidRange()); - - //range is with in file length, both start and end offset are same - rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=0-0", 10); - assertEquals(0, rangeHeader.getStartOffset()); - assertEquals(0, rangeHeader.getEndOffset()); - assertEquals(false, rangeHeader.isReadFull()); - assertEquals(false, rangeHeader.isInValidRange()); - - //range is not with in file length, both start and end offset are greater - // than length - rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=11-10", 10); - assertEquals(true, rangeHeader.isInValidRange()); - - // range is satisfying, one of the range is with in the length. So, read - // full file - rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=11-8", 10); - assertEquals(0, rangeHeader.getStartOffset()); - assertEquals(9, rangeHeader.getEndOffset()); - assertEquals(true, rangeHeader.isReadFull()); - assertEquals(false, rangeHeader.isInValidRange()); - - // bytes spec is wrong - rangeHeader = RangeHeaderParserUtil.parseRangeHeader("mb=11-8", 10); - assertEquals(0, rangeHeader.getStartOffset()); - assertEquals(9, rangeHeader.getEndOffset()); - assertEquals(true, rangeHeader.isReadFull()); - assertEquals(false, rangeHeader.isInValidRange()); - - // range specified is invalid - rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=-11-8", 10); - assertEquals(0, rangeHeader.getStartOffset()); - assertEquals(9, rangeHeader.getEndOffset()); - assertEquals(true, rangeHeader.isReadFull()); - assertEquals(false, rangeHeader.isInValidRange()); - - //Last n bytes - rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=-6", 10); - assertEquals(4, rangeHeader.getStartOffset()); - assertEquals(9, rangeHeader.getEndOffset()); - assertEquals(false, rangeHeader.isReadFull()); - assertEquals(false, rangeHeader.isInValidRange()); - - rangeHeader = RangeHeaderParserUtil.parseRangeHeader("bytes=-106", 10); - assertEquals(0, rangeHeader.getStartOffset()); - assertEquals(9, rangeHeader.getEndOffset()); - assertEquals(false, rangeHeader.isInValidRange()); - - - - } - -} diff --git a/hadoop-ozone/s3gateway/src/test/resources/log4j.properties b/hadoop-ozone/s3gateway/src/test/resources/log4j.properties deleted file mode 100644 index b8ad21d6c7f..00000000000 --- a/hadoop-ozone/s3gateway/src/test/resources/log4j.properties +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# log4j configuration used during build and unit tests - -log4j.rootLogger=info,stdout -log4j.threshold=ALL -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n - -log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR -log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR diff --git a/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml deleted file mode 100644 index e6a345eea55..00000000000 --- a/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - - diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml deleted file mode 100644 index d1ee9d590bd..00000000000 --- a/hadoop-ozone/tools/pom.xml +++ /dev/null @@ -1,146 +0,0 @@ - - - - 4.0.0 - - org.apache.hadoop - hadoop-ozone - 0.5.0-SNAPSHOT - - hadoop-ozone-tools - 0.5.0-SNAPSHOT - Apache Hadoop Ozone Tools - Apache Hadoop Ozone Tools - jar - - - - org.apache.hadoop - hadoop-ozone-ozone-manager - - - org.apache.hadoop - hadoop-ozone-common - - - - org.apache.hadoop - hadoop-hdds-server-scm - - - org.apache.hadoop - hadoop-ozone-client - - - org.apache.hadoop - hadoop-ozone-filesystem - - - org.apache.hadoop - hadoop-hdds-server-framework - - - org.apache.hadoop - hadoop-common - compile - - - org.apache.hadoop - hadoop-hdfs - compile - - - com.sun.xml.bind - jaxb-core - - - javax.xml.bind - jaxb-api - - - javax.activation - activation - - - io.dropwizard.metrics - metrics-core - 3.2.4 - - - org.openjdk.jmh - jmh-core - 1.19 - - - org.openjdk.jmh - jmh-generator-annprocess - 1.19 - - - io.dropwizard.metrics - metrics-core - - - com.amazonaws - aws-java-sdk-s3 - 1.11.615 - - - com.github.spotbugs - spotbugs - provided - - - junit - junit - test - - - org.apache.hadoop - hadoop-common - test - test-jar - - - org.apache.hadoop - hadoop-ozone-integration-test - test - test-jar - - - org.mockito - mockito-core - 2.15.0 - test - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - true - 2048 - - - - - diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java deleted file mode 100644 index 5690296e0f7..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.audit.parser; - -import org.apache.hadoop.hdds.cli.GenericCli; -import org.apache.hadoop.ozone.audit.parser.handler.LoadCommandHandler; -import org.apache.hadoop.ozone.audit.parser.handler.QueryCommandHandler; -import org.apache.hadoop.ozone.audit.parser.handler.TemplateCommandHandler; -import picocli.CommandLine.Command; -import picocli.CommandLine.Parameters; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; - -/** - * Ozone audit parser tool. - */ -@Command(name = "ozone auditparser", - description = "Shell parser for Ozone Audit Logs", - subcommands = { - LoadCommandHandler.class, - TemplateCommandHandler.class, - QueryCommandHandler.class - }, - versionProvider = HddsVersionProvider.class, - mixinStandardHelpOptions = true) -public class AuditParser extends GenericCli { - /* - <.db file path> load - <.db file path> template